1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Keystone NetCP Core driver |
4 | * |
5 | * Copyright (C) 2014 Texas Instruments Incorporated |
6 | * Authors: Sandeep Nair <sandeep_n@ti.com> |
7 | * Sandeep Paulraj <s-paulraj@ti.com> |
8 | * Cyril Chemparathy <cyril@ti.com> |
9 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
10 | * Murali Karicheri <m-karicheri2@ti.com> |
11 | * Wingman Kwok <w-kwok2@ti.com> |
12 | */ |
13 | |
14 | #include <linux/io.h> |
15 | #include <linux/module.h> |
16 | #include <linux/of_net.h> |
17 | #include <linux/of_address.h> |
18 | #include <linux/if_vlan.h> |
19 | #include <linux/pm_runtime.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/soc/ti/knav_qmss.h> |
22 | #include <linux/soc/ti/knav_dma.h> |
23 | |
24 | #include "netcp.h" |
25 | |
26 | #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) |
27 | #define NETCP_TX_TIMEOUT (5 * HZ) |
28 | #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) |
29 | #define NETCP_MIN_PACKET_SIZE ETH_ZLEN |
30 | #define NETCP_MAX_MCAST_ADDR 16 |
31 | |
32 | #define NETCP_EFUSE_REG_INDEX 0 |
33 | |
34 | #define NETCP_MOD_PROBE_SKIPPED 1 |
35 | #define NETCP_MOD_PROBE_FAILED 2 |
36 | |
37 | #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ |
38 | NETIF_MSG_DRV | NETIF_MSG_LINK | \ |
39 | NETIF_MSG_IFUP | NETIF_MSG_INTR | \ |
40 | NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ |
41 | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ |
42 | NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ |
43 | NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ |
44 | NETIF_MSG_RX_STATUS) |
45 | |
46 | #define NETCP_EFUSE_ADDR_SWAP 2 |
47 | |
48 | #define knav_queue_get_id(q) knav_queue_device_control(q, \ |
49 | KNAV_QUEUE_GET_ID, (unsigned long)NULL) |
50 | |
51 | #define knav_queue_enable_notify(q) knav_queue_device_control(q, \ |
52 | KNAV_QUEUE_ENABLE_NOTIFY, \ |
53 | (unsigned long)NULL) |
54 | |
55 | #define knav_queue_disable_notify(q) knav_queue_device_control(q, \ |
56 | KNAV_QUEUE_DISABLE_NOTIFY, \ |
57 | (unsigned long)NULL) |
58 | |
59 | #define knav_queue_get_count(q) knav_queue_device_control(q, \ |
60 | KNAV_QUEUE_GET_COUNT, (unsigned long)NULL) |
61 | |
62 | #define for_each_netcp_module(module) \ |
63 | list_for_each_entry(module, &netcp_modules, module_list) |
64 | |
65 | #define for_each_netcp_device_module(netcp_device, inst_modpriv) \ |
66 | list_for_each_entry(inst_modpriv, \ |
67 | &((netcp_device)->modpriv_head), inst_list) |
68 | |
69 | #define for_each_module(netcp, intf_modpriv) \ |
70 | list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list) |
71 | |
72 | /* Module management structures */ |
73 | struct netcp_device { |
74 | struct list_head device_list; |
75 | struct list_head interface_head; |
76 | struct list_head modpriv_head; |
77 | struct device *device; |
78 | }; |
79 | |
80 | struct netcp_inst_modpriv { |
81 | struct netcp_device *netcp_device; |
82 | struct netcp_module *netcp_module; |
83 | struct list_head inst_list; |
84 | void *module_priv; |
85 | }; |
86 | |
87 | struct netcp_intf_modpriv { |
88 | struct netcp_intf *netcp_priv; |
89 | struct netcp_module *netcp_module; |
90 | struct list_head intf_list; |
91 | void *module_priv; |
92 | }; |
93 | |
94 | struct netcp_tx_cb { |
95 | void *ts_context; |
96 | void (*txtstamp)(void *context, struct sk_buff *skb); |
97 | }; |
98 | |
99 | static LIST_HEAD(netcp_devices); |
100 | static LIST_HEAD(netcp_modules); |
101 | static DEFINE_MUTEX(netcp_modules_lock); |
102 | |
103 | static int netcp_debug_level = -1; |
104 | module_param(netcp_debug_level, int, 0); |
105 | MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)" ); |
106 | |
107 | /* Helper functions - Get/Set */ |
108 | static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, |
109 | struct knav_dma_desc *desc) |
110 | { |
111 | *buff_len = le32_to_cpu(desc->buff_len); |
112 | *buff = le32_to_cpu(desc->buff); |
113 | *ndesc = le32_to_cpu(desc->next_desc); |
114 | } |
115 | |
116 | static void get_desc_info(u32 *desc_info, u32 *pkt_info, |
117 | struct knav_dma_desc *desc) |
118 | { |
119 | *desc_info = le32_to_cpu(desc->desc_info); |
120 | *pkt_info = le32_to_cpu(desc->packet_info); |
121 | } |
122 | |
123 | static u32 get_sw_data(int index, struct knav_dma_desc *desc) |
124 | { |
125 | /* No Endian conversion needed as this data is untouched by hw */ |
126 | return desc->sw_data[index]; |
127 | } |
128 | |
129 | /* use these macros to get sw data */ |
130 | #define GET_SW_DATA0(desc) get_sw_data(0, desc) |
131 | #define GET_SW_DATA1(desc) get_sw_data(1, desc) |
132 | #define GET_SW_DATA2(desc) get_sw_data(2, desc) |
133 | #define GET_SW_DATA3(desc) get_sw_data(3, desc) |
134 | |
135 | static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, |
136 | struct knav_dma_desc *desc) |
137 | { |
138 | *buff = le32_to_cpu(desc->orig_buff); |
139 | *buff_len = le32_to_cpu(desc->orig_len); |
140 | } |
141 | |
142 | static void get_words(dma_addr_t *words, int num_words, __le32 *desc) |
143 | { |
144 | int i; |
145 | |
146 | for (i = 0; i < num_words; i++) |
147 | words[i] = le32_to_cpu(desc[i]); |
148 | } |
149 | |
150 | static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc, |
151 | struct knav_dma_desc *desc) |
152 | { |
153 | desc->buff_len = cpu_to_le32(buff_len); |
154 | desc->buff = cpu_to_le32(buff); |
155 | desc->next_desc = cpu_to_le32(ndesc); |
156 | } |
157 | |
158 | static void set_desc_info(u32 desc_info, u32 pkt_info, |
159 | struct knav_dma_desc *desc) |
160 | { |
161 | desc->desc_info = cpu_to_le32(desc_info); |
162 | desc->packet_info = cpu_to_le32(pkt_info); |
163 | } |
164 | |
165 | static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc) |
166 | { |
167 | /* No Endian conversion needed as this data is untouched by hw */ |
168 | desc->sw_data[index] = data; |
169 | } |
170 | |
171 | /* use these macros to set sw data */ |
172 | #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc) |
173 | #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc) |
174 | #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc) |
175 | #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc) |
176 | |
177 | static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, |
178 | struct knav_dma_desc *desc) |
179 | { |
180 | desc->orig_buff = cpu_to_le32(buff); |
181 | desc->orig_len = cpu_to_le32(buff_len); |
182 | } |
183 | |
184 | static void set_words(u32 *words, int num_words, __le32 *desc) |
185 | { |
186 | int i; |
187 | |
188 | for (i = 0; i < num_words; i++) |
189 | desc[i] = cpu_to_le32(words[i]); |
190 | } |
191 | |
192 | /* Read the e-fuse value as 32 bit values to be endian independent */ |
193 | static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) |
194 | { |
195 | unsigned int addr0, addr1; |
196 | |
197 | addr1 = readl(addr: efuse_mac + 4); |
198 | addr0 = readl(addr: efuse_mac); |
199 | |
200 | switch (swap) { |
201 | case NETCP_EFUSE_ADDR_SWAP: |
202 | addr0 = addr1; |
203 | addr1 = readl(addr: efuse_mac); |
204 | break; |
205 | default: |
206 | break; |
207 | } |
208 | |
209 | x[0] = (addr1 & 0x0000ff00) >> 8; |
210 | x[1] = addr1 & 0x000000ff; |
211 | x[2] = (addr0 & 0xff000000) >> 24; |
212 | x[3] = (addr0 & 0x00ff0000) >> 16; |
213 | x[4] = (addr0 & 0x0000ff00) >> 8; |
214 | x[5] = addr0 & 0x000000ff; |
215 | |
216 | return 0; |
217 | } |
218 | |
219 | /* Module management routines */ |
220 | static int netcp_register_interface(struct netcp_intf *netcp) |
221 | { |
222 | int ret; |
223 | |
224 | ret = register_netdev(dev: netcp->ndev); |
225 | if (!ret) |
226 | netcp->netdev_registered = true; |
227 | return ret; |
228 | } |
229 | |
230 | static int netcp_module_probe(struct netcp_device *netcp_device, |
231 | struct netcp_module *module) |
232 | { |
233 | struct device *dev = netcp_device->device; |
234 | struct device_node *devices, *interface, *node = dev->of_node; |
235 | struct device_node *child; |
236 | struct netcp_inst_modpriv *inst_modpriv; |
237 | struct netcp_intf *netcp_intf; |
238 | struct netcp_module *tmp; |
239 | bool primary_module_registered = false; |
240 | int ret; |
241 | |
242 | /* Find this module in the sub-tree for this device */ |
243 | devices = of_get_child_by_name(node, name: "netcp-devices" ); |
244 | if (!devices) { |
245 | dev_err(dev, "could not find netcp-devices node\n" ); |
246 | return NETCP_MOD_PROBE_SKIPPED; |
247 | } |
248 | |
249 | for_each_available_child_of_node(devices, child) { |
250 | const char *name; |
251 | char node_name[32]; |
252 | |
253 | if (of_property_read_string(np: child, propname: "label" , out_string: &name) < 0) { |
254 | snprintf(buf: node_name, size: sizeof(node_name), fmt: "%pOFn" , child); |
255 | name = node_name; |
256 | } |
257 | if (!strcasecmp(s1: module->name, s2: name)) |
258 | break; |
259 | } |
260 | |
261 | of_node_put(node: devices); |
262 | /* If module not used for this device, skip it */ |
263 | if (!child) { |
264 | dev_warn(dev, "module(%s) not used for device\n" , module->name); |
265 | return NETCP_MOD_PROBE_SKIPPED; |
266 | } |
267 | |
268 | inst_modpriv = devm_kzalloc(dev, size: sizeof(*inst_modpriv), GFP_KERNEL); |
269 | if (!inst_modpriv) { |
270 | of_node_put(node: child); |
271 | return -ENOMEM; |
272 | } |
273 | |
274 | inst_modpriv->netcp_device = netcp_device; |
275 | inst_modpriv->netcp_module = module; |
276 | list_add_tail(new: &inst_modpriv->inst_list, head: &netcp_device->modpriv_head); |
277 | |
278 | ret = module->probe(netcp_device, dev, child, |
279 | &inst_modpriv->module_priv); |
280 | of_node_put(node: child); |
281 | if (ret) { |
282 | dev_err(dev, "Probe of module(%s) failed with %d\n" , |
283 | module->name, ret); |
284 | list_del(entry: &inst_modpriv->inst_list); |
285 | devm_kfree(dev, p: inst_modpriv); |
286 | return NETCP_MOD_PROBE_FAILED; |
287 | } |
288 | |
289 | /* Attach modules only if the primary module is probed */ |
290 | for_each_netcp_module(tmp) { |
291 | if (tmp->primary) |
292 | primary_module_registered = true; |
293 | } |
294 | |
295 | if (!primary_module_registered) |
296 | return 0; |
297 | |
298 | /* Attach module to interfaces */ |
299 | list_for_each_entry(netcp_intf, &netcp_device->interface_head, |
300 | interface_list) { |
301 | struct netcp_intf_modpriv *intf_modpriv; |
302 | |
303 | intf_modpriv = devm_kzalloc(dev, size: sizeof(*intf_modpriv), |
304 | GFP_KERNEL); |
305 | if (!intf_modpriv) |
306 | return -ENOMEM; |
307 | |
308 | interface = of_parse_phandle(np: netcp_intf->node_interface, |
309 | phandle_name: module->name, index: 0); |
310 | |
311 | if (!interface) { |
312 | devm_kfree(dev, p: intf_modpriv); |
313 | continue; |
314 | } |
315 | |
316 | intf_modpriv->netcp_priv = netcp_intf; |
317 | intf_modpriv->netcp_module = module; |
318 | list_add_tail(new: &intf_modpriv->intf_list, |
319 | head: &netcp_intf->module_head); |
320 | |
321 | ret = module->attach(inst_modpriv->module_priv, |
322 | netcp_intf->ndev, interface, |
323 | &intf_modpriv->module_priv); |
324 | of_node_put(node: interface); |
325 | if (ret) { |
326 | dev_dbg(dev, "Attach of module %s declined with %d\n" , |
327 | module->name, ret); |
328 | list_del(entry: &intf_modpriv->intf_list); |
329 | devm_kfree(dev, p: intf_modpriv); |
330 | continue; |
331 | } |
332 | } |
333 | |
334 | /* Now register the interface with netdev */ |
335 | list_for_each_entry(netcp_intf, |
336 | &netcp_device->interface_head, |
337 | interface_list) { |
338 | /* If interface not registered then register now */ |
339 | if (!netcp_intf->netdev_registered) { |
340 | ret = netcp_register_interface(netcp: netcp_intf); |
341 | if (ret) |
342 | return -ENODEV; |
343 | } |
344 | } |
345 | return 0; |
346 | } |
347 | |
348 | int netcp_register_module(struct netcp_module *module) |
349 | { |
350 | struct netcp_device *netcp_device; |
351 | struct netcp_module *tmp; |
352 | int ret; |
353 | |
354 | if (!module->name) { |
355 | WARN(1, "error registering netcp module: no name\n" ); |
356 | return -EINVAL; |
357 | } |
358 | |
359 | if (!module->probe) { |
360 | WARN(1, "error registering netcp module: no probe\n" ); |
361 | return -EINVAL; |
362 | } |
363 | |
364 | mutex_lock(&netcp_modules_lock); |
365 | |
366 | for_each_netcp_module(tmp) { |
367 | if (!strcasecmp(s1: tmp->name, s2: module->name)) { |
368 | mutex_unlock(lock: &netcp_modules_lock); |
369 | return -EEXIST; |
370 | } |
371 | } |
372 | list_add_tail(new: &module->module_list, head: &netcp_modules); |
373 | |
374 | list_for_each_entry(netcp_device, &netcp_devices, device_list) { |
375 | ret = netcp_module_probe(netcp_device, module); |
376 | if (ret < 0) |
377 | goto fail; |
378 | } |
379 | mutex_unlock(lock: &netcp_modules_lock); |
380 | return 0; |
381 | |
382 | fail: |
383 | mutex_unlock(lock: &netcp_modules_lock); |
384 | netcp_unregister_module(module); |
385 | return ret; |
386 | } |
387 | EXPORT_SYMBOL_GPL(netcp_register_module); |
388 | |
389 | static void netcp_release_module(struct netcp_device *netcp_device, |
390 | struct netcp_module *module) |
391 | { |
392 | struct netcp_inst_modpriv *inst_modpriv, *inst_tmp; |
393 | struct netcp_intf *netcp_intf, *netcp_tmp; |
394 | struct device *dev = netcp_device->device; |
395 | |
396 | /* Release the module from each interface */ |
397 | list_for_each_entry_safe(netcp_intf, netcp_tmp, |
398 | &netcp_device->interface_head, |
399 | interface_list) { |
400 | struct netcp_intf_modpriv *intf_modpriv, *intf_tmp; |
401 | |
402 | list_for_each_entry_safe(intf_modpriv, intf_tmp, |
403 | &netcp_intf->module_head, |
404 | intf_list) { |
405 | if (intf_modpriv->netcp_module == module) { |
406 | module->release(intf_modpriv->module_priv); |
407 | list_del(entry: &intf_modpriv->intf_list); |
408 | devm_kfree(dev, p: intf_modpriv); |
409 | break; |
410 | } |
411 | } |
412 | } |
413 | |
414 | /* Remove the module from each instance */ |
415 | list_for_each_entry_safe(inst_modpriv, inst_tmp, |
416 | &netcp_device->modpriv_head, inst_list) { |
417 | if (inst_modpriv->netcp_module == module) { |
418 | module->remove(netcp_device, |
419 | inst_modpriv->module_priv); |
420 | list_del(entry: &inst_modpriv->inst_list); |
421 | devm_kfree(dev, p: inst_modpriv); |
422 | break; |
423 | } |
424 | } |
425 | } |
426 | |
427 | void netcp_unregister_module(struct netcp_module *module) |
428 | { |
429 | struct netcp_device *netcp_device; |
430 | struct netcp_module *module_tmp; |
431 | |
432 | mutex_lock(&netcp_modules_lock); |
433 | |
434 | list_for_each_entry(netcp_device, &netcp_devices, device_list) { |
435 | netcp_release_module(netcp_device, module); |
436 | } |
437 | |
438 | /* Remove the module from the module list */ |
439 | for_each_netcp_module(module_tmp) { |
440 | if (module == module_tmp) { |
441 | list_del(entry: &module->module_list); |
442 | break; |
443 | } |
444 | } |
445 | |
446 | mutex_unlock(lock: &netcp_modules_lock); |
447 | } |
448 | EXPORT_SYMBOL_GPL(netcp_unregister_module); |
449 | |
450 | void *netcp_module_get_intf_data(struct netcp_module *module, |
451 | struct netcp_intf *intf) |
452 | { |
453 | struct netcp_intf_modpriv *intf_modpriv; |
454 | |
455 | list_for_each_entry(intf_modpriv, &intf->module_head, intf_list) |
456 | if (intf_modpriv->netcp_module == module) |
457 | return intf_modpriv->module_priv; |
458 | return NULL; |
459 | } |
460 | EXPORT_SYMBOL_GPL(netcp_module_get_intf_data); |
461 | |
462 | /* Module TX and RX Hook management */ |
463 | struct netcp_hook_list { |
464 | struct list_head list; |
465 | netcp_hook_rtn *hook_rtn; |
466 | void *hook_data; |
467 | int order; |
468 | }; |
469 | |
470 | int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, |
471 | netcp_hook_rtn *hook_rtn, void *hook_data) |
472 | { |
473 | struct netcp_hook_list *entry; |
474 | struct netcp_hook_list *next; |
475 | unsigned long flags; |
476 | |
477 | entry = devm_kzalloc(dev: netcp_priv->dev, size: sizeof(*entry), GFP_KERNEL); |
478 | if (!entry) |
479 | return -ENOMEM; |
480 | |
481 | entry->hook_rtn = hook_rtn; |
482 | entry->hook_data = hook_data; |
483 | entry->order = order; |
484 | |
485 | spin_lock_irqsave(&netcp_priv->lock, flags); |
486 | list_for_each_entry(next, &netcp_priv->txhook_list_head, list) { |
487 | if (next->order > order) |
488 | break; |
489 | } |
490 | __list_add(new: &entry->list, prev: next->list.prev, next: &next->list); |
491 | spin_unlock_irqrestore(lock: &netcp_priv->lock, flags); |
492 | |
493 | return 0; |
494 | } |
495 | EXPORT_SYMBOL_GPL(netcp_register_txhook); |
496 | |
497 | int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, |
498 | netcp_hook_rtn *hook_rtn, void *hook_data) |
499 | { |
500 | struct netcp_hook_list *next, *n; |
501 | unsigned long flags; |
502 | |
503 | spin_lock_irqsave(&netcp_priv->lock, flags); |
504 | list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) { |
505 | if ((next->order == order) && |
506 | (next->hook_rtn == hook_rtn) && |
507 | (next->hook_data == hook_data)) { |
508 | list_del(entry: &next->list); |
509 | spin_unlock_irqrestore(lock: &netcp_priv->lock, flags); |
510 | devm_kfree(dev: netcp_priv->dev, p: next); |
511 | return 0; |
512 | } |
513 | } |
514 | spin_unlock_irqrestore(lock: &netcp_priv->lock, flags); |
515 | return -ENOENT; |
516 | } |
517 | EXPORT_SYMBOL_GPL(netcp_unregister_txhook); |
518 | |
519 | int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, |
520 | netcp_hook_rtn *hook_rtn, void *hook_data) |
521 | { |
522 | struct netcp_hook_list *entry; |
523 | struct netcp_hook_list *next; |
524 | unsigned long flags; |
525 | |
526 | entry = devm_kzalloc(dev: netcp_priv->dev, size: sizeof(*entry), GFP_KERNEL); |
527 | if (!entry) |
528 | return -ENOMEM; |
529 | |
530 | entry->hook_rtn = hook_rtn; |
531 | entry->hook_data = hook_data; |
532 | entry->order = order; |
533 | |
534 | spin_lock_irqsave(&netcp_priv->lock, flags); |
535 | list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) { |
536 | if (next->order > order) |
537 | break; |
538 | } |
539 | __list_add(new: &entry->list, prev: next->list.prev, next: &next->list); |
540 | spin_unlock_irqrestore(lock: &netcp_priv->lock, flags); |
541 | |
542 | return 0; |
543 | } |
544 | EXPORT_SYMBOL_GPL(netcp_register_rxhook); |
545 | |
546 | int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, |
547 | netcp_hook_rtn *hook_rtn, void *hook_data) |
548 | { |
549 | struct netcp_hook_list *next, *n; |
550 | unsigned long flags; |
551 | |
552 | spin_lock_irqsave(&netcp_priv->lock, flags); |
553 | list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) { |
554 | if ((next->order == order) && |
555 | (next->hook_rtn == hook_rtn) && |
556 | (next->hook_data == hook_data)) { |
557 | list_del(entry: &next->list); |
558 | spin_unlock_irqrestore(lock: &netcp_priv->lock, flags); |
559 | devm_kfree(dev: netcp_priv->dev, p: next); |
560 | return 0; |
561 | } |
562 | } |
563 | spin_unlock_irqrestore(lock: &netcp_priv->lock, flags); |
564 | |
565 | return -ENOENT; |
566 | } |
567 | EXPORT_SYMBOL_GPL(netcp_unregister_rxhook); |
568 | |
569 | static void netcp_frag_free(bool is_frag, void *ptr) |
570 | { |
571 | if (is_frag) |
572 | skb_free_frag(addr: ptr); |
573 | else |
574 | kfree(objp: ptr); |
575 | } |
576 | |
577 | static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, |
578 | struct knav_dma_desc *desc) |
579 | { |
580 | struct knav_dma_desc *ndesc; |
581 | dma_addr_t dma_desc, dma_buf; |
582 | unsigned int buf_len, dma_sz = sizeof(*ndesc); |
583 | void *buf_ptr; |
584 | u32 tmp; |
585 | |
586 | get_words(words: &dma_desc, num_words: 1, desc: &desc->next_desc); |
587 | |
588 | while (dma_desc) { |
589 | ndesc = knav_pool_desc_unmap(ph: netcp->rx_pool, dma: dma_desc, dma_sz); |
590 | if (unlikely(!ndesc)) { |
591 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n" ); |
592 | break; |
593 | } |
594 | get_pkt_info(buff: &dma_buf, buff_len: &tmp, ndesc: &dma_desc, desc: ndesc); |
595 | /* warning!!!! We are retrieving the virtual ptr in the sw_data |
596 | * field as a 32bit value. Will not work on 64bit machines |
597 | */ |
598 | buf_ptr = (void *)GET_SW_DATA0(ndesc); |
599 | buf_len = (int)GET_SW_DATA1(desc); |
600 | dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); |
601 | __free_page(buf_ptr); |
602 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
603 | } |
604 | /* warning!!!! We are retrieving the virtual ptr in the sw_data |
605 | * field as a 32bit value. Will not work on 64bit machines |
606 | */ |
607 | buf_ptr = (void *)GET_SW_DATA0(desc); |
608 | buf_len = (int)GET_SW_DATA1(desc); |
609 | |
610 | if (buf_ptr) |
611 | netcp_frag_free(is_frag: buf_len <= PAGE_SIZE, ptr: buf_ptr); |
612 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
613 | } |
614 | |
615 | static void netcp_empty_rx_queue(struct netcp_intf *netcp) |
616 | { |
617 | struct netcp_stats *rx_stats = &netcp->stats; |
618 | struct knav_dma_desc *desc; |
619 | unsigned int dma_sz; |
620 | dma_addr_t dma; |
621 | |
622 | for (; ;) { |
623 | dma = knav_queue_pop(qhandle: netcp->rx_queue, size: &dma_sz); |
624 | if (!dma) |
625 | break; |
626 | |
627 | desc = knav_pool_desc_unmap(ph: netcp->rx_pool, dma, dma_sz); |
628 | if (unlikely(!desc)) { |
629 | dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n" , |
630 | __func__); |
631 | rx_stats->rx_errors++; |
632 | continue; |
633 | } |
634 | netcp_free_rx_desc_chain(netcp, desc); |
635 | rx_stats->rx_dropped++; |
636 | } |
637 | } |
638 | |
639 | static int netcp_process_one_rx_packet(struct netcp_intf *netcp) |
640 | { |
641 | struct netcp_stats *rx_stats = &netcp->stats; |
642 | unsigned int dma_sz, buf_len, org_buf_len; |
643 | struct knav_dma_desc *desc, *ndesc; |
644 | unsigned int pkt_sz = 0, accum_sz; |
645 | struct netcp_hook_list *rx_hook; |
646 | dma_addr_t dma_desc, dma_buff; |
647 | struct netcp_packet p_info; |
648 | struct sk_buff *skb; |
649 | void *org_buf_ptr; |
650 | u32 tmp; |
651 | |
652 | dma_desc = knav_queue_pop(qhandle: netcp->rx_queue, size: &dma_sz); |
653 | if (!dma_desc) |
654 | return -1; |
655 | |
656 | desc = knav_pool_desc_unmap(ph: netcp->rx_pool, dma: dma_desc, dma_sz); |
657 | if (unlikely(!desc)) { |
658 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n" ); |
659 | return 0; |
660 | } |
661 | |
662 | get_pkt_info(buff: &dma_buff, buff_len: &buf_len, ndesc: &dma_desc, desc); |
663 | /* warning!!!! We are retrieving the virtual ptr in the sw_data |
664 | * field as a 32bit value. Will not work on 64bit machines |
665 | */ |
666 | org_buf_ptr = (void *)GET_SW_DATA0(desc); |
667 | org_buf_len = (int)GET_SW_DATA1(desc); |
668 | |
669 | if (unlikely(!org_buf_ptr)) { |
670 | dev_err(netcp->ndev_dev, "NULL bufptr in desc\n" ); |
671 | goto free_desc; |
672 | } |
673 | |
674 | pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK; |
675 | accum_sz = buf_len; |
676 | dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE); |
677 | |
678 | /* Build a new sk_buff for the primary buffer */ |
679 | skb = build_skb(data: org_buf_ptr, frag_size: org_buf_len); |
680 | if (unlikely(!skb)) { |
681 | dev_err(netcp->ndev_dev, "build_skb() failed\n" ); |
682 | goto free_desc; |
683 | } |
684 | |
685 | /* update data, tail and len */ |
686 | skb_reserve(skb, NETCP_SOP_OFFSET); |
687 | __skb_put(skb, len: buf_len); |
688 | |
689 | /* Fill in the page fragment list */ |
690 | while (dma_desc) { |
691 | struct page *page; |
692 | |
693 | ndesc = knav_pool_desc_unmap(ph: netcp->rx_pool, dma: dma_desc, dma_sz); |
694 | if (unlikely(!ndesc)) { |
695 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n" ); |
696 | goto free_desc; |
697 | } |
698 | |
699 | get_pkt_info(buff: &dma_buff, buff_len: &buf_len, ndesc: &dma_desc, desc: ndesc); |
700 | /* warning!!!! We are retrieving the virtual ptr in the sw_data |
701 | * field as a 32bit value. Will not work on 64bit machines |
702 | */ |
703 | page = (struct page *)GET_SW_DATA0(ndesc); |
704 | |
705 | if (likely(dma_buff && buf_len && page)) { |
706 | dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, |
707 | DMA_FROM_DEVICE); |
708 | } else { |
709 | dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n" , |
710 | &dma_buff, buf_len, page); |
711 | goto free_desc; |
712 | } |
713 | |
714 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
715 | offset_in_page(dma_buff), size: buf_len, PAGE_SIZE); |
716 | accum_sz += buf_len; |
717 | |
718 | /* Free the descriptor */ |
719 | knav_pool_desc_put(ph: netcp->rx_pool, desc: ndesc); |
720 | } |
721 | |
722 | /* check for packet len and warn */ |
723 | if (unlikely(pkt_sz != accum_sz)) |
724 | dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n" , |
725 | pkt_sz, accum_sz); |
726 | |
727 | /* Newer version of the Ethernet switch can trim the Ethernet FCS |
728 | * from the packet and is indicated in hw_cap. So trim it only for |
729 | * older h/w |
730 | */ |
731 | if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS)) |
732 | __pskb_trim(skb, len: skb->len - ETH_FCS_LEN); |
733 | |
734 | /* Call each of the RX hooks */ |
735 | p_info.skb = skb; |
736 | skb->dev = netcp->ndev; |
737 | p_info.rxtstamp_complete = false; |
738 | get_desc_info(desc_info: &tmp, pkt_info: &p_info.eflags, desc); |
739 | p_info.epib = desc->epib; |
740 | p_info.psdata = (u32 __force *)desc->psdata; |
741 | p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) & |
742 | KNAV_DMA_DESC_EFLAGS_MASK); |
743 | list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { |
744 | int ret; |
745 | |
746 | ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data, |
747 | &p_info); |
748 | if (unlikely(ret)) { |
749 | dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n" , |
750 | rx_hook->order, ret); |
751 | /* Free the primary descriptor */ |
752 | rx_stats->rx_dropped++; |
753 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
754 | dev_kfree_skb(skb); |
755 | return 0; |
756 | } |
757 | } |
758 | /* Free the primary descriptor */ |
759 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
760 | |
761 | u64_stats_update_begin(syncp: &rx_stats->syncp_rx); |
762 | rx_stats->rx_packets++; |
763 | rx_stats->rx_bytes += skb->len; |
764 | u64_stats_update_end(syncp: &rx_stats->syncp_rx); |
765 | |
766 | /* push skb up the stack */ |
767 | skb->protocol = eth_type_trans(skb, dev: netcp->ndev); |
768 | netif_receive_skb(skb); |
769 | return 0; |
770 | |
771 | free_desc: |
772 | netcp_free_rx_desc_chain(netcp, desc); |
773 | rx_stats->rx_errors++; |
774 | return 0; |
775 | } |
776 | |
777 | static int netcp_process_rx_packets(struct netcp_intf *netcp, |
778 | unsigned int budget) |
779 | { |
780 | int i; |
781 | |
782 | for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++) |
783 | ; |
784 | return i; |
785 | } |
786 | |
787 | /* Release descriptors and attached buffers from Rx FDQ */ |
788 | static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) |
789 | { |
790 | struct knav_dma_desc *desc; |
791 | unsigned int buf_len, dma_sz; |
792 | dma_addr_t dma; |
793 | void *buf_ptr; |
794 | |
795 | /* Allocate descriptor */ |
796 | while ((dma = knav_queue_pop(qhandle: netcp->rx_fdq[fdq], size: &dma_sz))) { |
797 | desc = knav_pool_desc_unmap(ph: netcp->rx_pool, dma, dma_sz); |
798 | if (unlikely(!desc)) { |
799 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n" ); |
800 | continue; |
801 | } |
802 | |
803 | get_org_pkt_info(buff: &dma, buff_len: &buf_len, desc); |
804 | /* warning!!!! We are retrieving the virtual ptr in the sw_data |
805 | * field as a 32bit value. Will not work on 64bit machines |
806 | */ |
807 | buf_ptr = (void *)GET_SW_DATA0(desc); |
808 | |
809 | if (unlikely(!dma)) { |
810 | dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n" ); |
811 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
812 | continue; |
813 | } |
814 | |
815 | if (unlikely(!buf_ptr)) { |
816 | dev_err(netcp->ndev_dev, "NULL bufptr in desc\n" ); |
817 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
818 | continue; |
819 | } |
820 | |
821 | if (fdq == 0) { |
822 | dma_unmap_single(netcp->dev, dma, buf_len, |
823 | DMA_FROM_DEVICE); |
824 | netcp_frag_free(is_frag: (buf_len <= PAGE_SIZE), ptr: buf_ptr); |
825 | } else { |
826 | dma_unmap_page(netcp->dev, dma, buf_len, |
827 | DMA_FROM_DEVICE); |
828 | __free_page(buf_ptr); |
829 | } |
830 | |
831 | knav_pool_desc_put(ph: netcp->rx_pool, desc); |
832 | } |
833 | } |
834 | |
835 | static void netcp_rxpool_free(struct netcp_intf *netcp) |
836 | { |
837 | int i; |
838 | |
839 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && |
840 | !IS_ERR_OR_NULL(ptr: netcp->rx_fdq[i]); i++) |
841 | netcp_free_rx_buf(netcp, fdq: i); |
842 | |
843 | if (knav_pool_count(ph: netcp->rx_pool) != netcp->rx_pool_size) |
844 | dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n" , |
845 | netcp->rx_pool_size - knav_pool_count(netcp->rx_pool)); |
846 | |
847 | knav_pool_destroy(ph: netcp->rx_pool); |
848 | netcp->rx_pool = NULL; |
849 | } |
850 | |
851 | static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) |
852 | { |
853 | struct knav_dma_desc *hwdesc; |
854 | unsigned int buf_len, dma_sz; |
855 | u32 desc_info, pkt_info; |
856 | struct page *page; |
857 | dma_addr_t dma; |
858 | void *bufptr; |
859 | u32 sw_data[2]; |
860 | |
861 | /* Allocate descriptor */ |
862 | hwdesc = knav_pool_desc_get(ph: netcp->rx_pool); |
863 | if (IS_ERR_OR_NULL(ptr: hwdesc)) { |
864 | dev_dbg(netcp->ndev_dev, "out of rx pool desc\n" ); |
865 | return -ENOMEM; |
866 | } |
867 | |
868 | if (likely(fdq == 0)) { |
869 | unsigned int primary_buf_len; |
870 | /* Allocate a primary receive queue entry */ |
871 | buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; |
872 | primary_buf_len = SKB_DATA_ALIGN(buf_len) + |
873 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
874 | |
875 | bufptr = netdev_alloc_frag(fragsz: primary_buf_len); |
876 | sw_data[1] = primary_buf_len; |
877 | |
878 | if (unlikely(!bufptr)) { |
879 | dev_warn_ratelimited(netcp->ndev_dev, |
880 | "Primary RX buffer alloc failed\n" ); |
881 | goto fail; |
882 | } |
883 | dma = dma_map_single(netcp->dev, bufptr, buf_len, |
884 | DMA_TO_DEVICE); |
885 | if (unlikely(dma_mapping_error(netcp->dev, dma))) |
886 | goto fail; |
887 | |
888 | /* warning!!!! We are saving the virtual ptr in the sw_data |
889 | * field as a 32bit value. Will not work on 64bit machines |
890 | */ |
891 | sw_data[0] = (u32)bufptr; |
892 | } else { |
893 | /* Allocate a secondary receive queue entry */ |
894 | page = alloc_page(GFP_ATOMIC | GFP_DMA); |
895 | if (unlikely(!page)) { |
896 | dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n" ); |
897 | goto fail; |
898 | } |
899 | buf_len = PAGE_SIZE; |
900 | dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); |
901 | /* warning!!!! We are saving the virtual ptr in the sw_data |
902 | * field as a 32bit value. Will not work on 64bit machines |
903 | */ |
904 | sw_data[0] = (u32)page; |
905 | sw_data[1] = 0; |
906 | } |
907 | |
908 | desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; |
909 | desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK; |
910 | pkt_info = KNAV_DMA_DESC_HAS_EPIB; |
911 | pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT; |
912 | pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << |
913 | KNAV_DMA_DESC_RETQ_SHIFT; |
914 | set_org_pkt_info(buff: dma, buff_len: buf_len, desc: hwdesc); |
915 | SET_SW_DATA0(sw_data[0], hwdesc); |
916 | SET_SW_DATA1(sw_data[1], hwdesc); |
917 | set_desc_info(desc_info, pkt_info, desc: hwdesc); |
918 | |
919 | /* Push to FDQs */ |
920 | knav_pool_desc_map(ph: netcp->rx_pool, desc: hwdesc, size: sizeof(*hwdesc), dma: &dma, |
921 | dma_sz: &dma_sz); |
922 | knav_queue_push(qhandle: netcp->rx_fdq[fdq], dma, size: sizeof(*hwdesc), flags: 0); |
923 | return 0; |
924 | |
925 | fail: |
926 | knav_pool_desc_put(ph: netcp->rx_pool, desc: hwdesc); |
927 | return -ENOMEM; |
928 | } |
929 | |
930 | /* Refill Rx FDQ with descriptors & attached buffers */ |
931 | static void netcp_rxpool_refill(struct netcp_intf *netcp) |
932 | { |
933 | u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; |
934 | int i, ret = 0; |
935 | |
936 | /* Calculate the FDQ deficit and refill */ |
937 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { |
938 | fdq_deficit[i] = netcp->rx_queue_depths[i] - |
939 | knav_queue_get_count(netcp->rx_fdq[i]); |
940 | |
941 | while (fdq_deficit[i]-- && !ret) |
942 | ret = netcp_allocate_rx_buf(netcp, fdq: i); |
943 | } /* end for fdqs */ |
944 | } |
945 | |
946 | /* NAPI poll */ |
947 | static int netcp_rx_poll(struct napi_struct *napi, int budget) |
948 | { |
949 | struct netcp_intf *netcp = container_of(napi, struct netcp_intf, |
950 | rx_napi); |
951 | unsigned int packets; |
952 | |
953 | packets = netcp_process_rx_packets(netcp, budget); |
954 | |
955 | netcp_rxpool_refill(netcp); |
956 | if (packets < budget) { |
957 | napi_complete_done(n: &netcp->rx_napi, work_done: packets); |
958 | knav_queue_enable_notify(netcp->rx_queue); |
959 | } |
960 | |
961 | return packets; |
962 | } |
963 | |
964 | static void netcp_rx_notify(void *arg) |
965 | { |
966 | struct netcp_intf *netcp = arg; |
967 | |
968 | knav_queue_disable_notify(netcp->rx_queue); |
969 | napi_schedule(n: &netcp->rx_napi); |
970 | } |
971 | |
972 | static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, |
973 | struct knav_dma_desc *desc, |
974 | unsigned int desc_sz) |
975 | { |
976 | struct knav_dma_desc *ndesc = desc; |
977 | dma_addr_t dma_desc, dma_buf; |
978 | unsigned int buf_len; |
979 | |
980 | while (ndesc) { |
981 | get_pkt_info(buff: &dma_buf, buff_len: &buf_len, ndesc: &dma_desc, desc: ndesc); |
982 | |
983 | if (dma_buf && buf_len) |
984 | dma_unmap_single(netcp->dev, dma_buf, buf_len, |
985 | DMA_TO_DEVICE); |
986 | else |
987 | dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n" , |
988 | &dma_buf, buf_len); |
989 | |
990 | knav_pool_desc_put(ph: netcp->tx_pool, desc: ndesc); |
991 | ndesc = NULL; |
992 | if (dma_desc) { |
993 | ndesc = knav_pool_desc_unmap(ph: netcp->tx_pool, dma: dma_desc, |
994 | dma_sz: desc_sz); |
995 | if (!ndesc) |
996 | dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n" ); |
997 | } |
998 | } |
999 | } |
1000 | |
1001 | static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, |
1002 | unsigned int budget) |
1003 | { |
1004 | struct netcp_stats *tx_stats = &netcp->stats; |
1005 | struct knav_dma_desc *desc; |
1006 | struct netcp_tx_cb *tx_cb; |
1007 | struct sk_buff *skb; |
1008 | unsigned int dma_sz; |
1009 | dma_addr_t dma; |
1010 | int pkts = 0; |
1011 | |
1012 | while (budget--) { |
1013 | dma = knav_queue_pop(qhandle: netcp->tx_compl_q, size: &dma_sz); |
1014 | if (!dma) |
1015 | break; |
1016 | desc = knav_pool_desc_unmap(ph: netcp->tx_pool, dma, dma_sz); |
1017 | if (unlikely(!desc)) { |
1018 | dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n" ); |
1019 | tx_stats->tx_errors++; |
1020 | continue; |
1021 | } |
1022 | |
1023 | /* warning!!!! We are retrieving the virtual ptr in the sw_data |
1024 | * field as a 32bit value. Will not work on 64bit machines |
1025 | */ |
1026 | skb = (struct sk_buff *)GET_SW_DATA0(desc); |
1027 | netcp_free_tx_desc_chain(netcp, desc, desc_sz: dma_sz); |
1028 | if (!skb) { |
1029 | dev_err(netcp->ndev_dev, "No skb in Tx desc\n" ); |
1030 | tx_stats->tx_errors++; |
1031 | continue; |
1032 | } |
1033 | |
1034 | tx_cb = (struct netcp_tx_cb *)skb->cb; |
1035 | if (tx_cb->txtstamp) |
1036 | tx_cb->txtstamp(tx_cb->ts_context, skb); |
1037 | |
1038 | if (netif_subqueue_stopped(dev: netcp->ndev, skb) && |
1039 | netif_running(dev: netcp->ndev) && |
1040 | (knav_pool_count(ph: netcp->tx_pool) > |
1041 | netcp->tx_resume_threshold)) { |
1042 | u16 subqueue = skb_get_queue_mapping(skb); |
1043 | |
1044 | netif_wake_subqueue(dev: netcp->ndev, queue_index: subqueue); |
1045 | } |
1046 | |
1047 | u64_stats_update_begin(syncp: &tx_stats->syncp_tx); |
1048 | tx_stats->tx_packets++; |
1049 | tx_stats->tx_bytes += skb->len; |
1050 | u64_stats_update_end(syncp: &tx_stats->syncp_tx); |
1051 | dev_kfree_skb(skb); |
1052 | pkts++; |
1053 | } |
1054 | return pkts; |
1055 | } |
1056 | |
1057 | static int netcp_tx_poll(struct napi_struct *napi, int budget) |
1058 | { |
1059 | int packets; |
1060 | struct netcp_intf *netcp = container_of(napi, struct netcp_intf, |
1061 | tx_napi); |
1062 | |
1063 | packets = netcp_process_tx_compl_packets(netcp, budget); |
1064 | if (packets < budget) { |
1065 | napi_complete(n: &netcp->tx_napi); |
1066 | knav_queue_enable_notify(netcp->tx_compl_q); |
1067 | } |
1068 | |
1069 | return packets; |
1070 | } |
1071 | |
1072 | static void netcp_tx_notify(void *arg) |
1073 | { |
1074 | struct netcp_intf *netcp = arg; |
1075 | |
1076 | knav_queue_disable_notify(netcp->tx_compl_q); |
1077 | napi_schedule(n: &netcp->tx_napi); |
1078 | } |
1079 | |
1080 | static struct knav_dma_desc* |
1081 | netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) |
1082 | { |
1083 | struct knav_dma_desc *desc, *ndesc, *pdesc; |
1084 | unsigned int pkt_len = skb_headlen(skb); |
1085 | struct device *dev = netcp->dev; |
1086 | dma_addr_t dma_addr; |
1087 | unsigned int dma_sz; |
1088 | int i; |
1089 | |
1090 | /* Map the linear buffer */ |
1091 | dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); |
1092 | if (unlikely(dma_mapping_error(dev, dma_addr))) { |
1093 | dev_err(netcp->ndev_dev, "Failed to map skb buffer\n" ); |
1094 | return NULL; |
1095 | } |
1096 | |
1097 | desc = knav_pool_desc_get(ph: netcp->tx_pool); |
1098 | if (IS_ERR_OR_NULL(ptr: desc)) { |
1099 | dev_err(netcp->ndev_dev, "out of TX desc\n" ); |
1100 | dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE); |
1101 | return NULL; |
1102 | } |
1103 | |
1104 | set_pkt_info(buff: dma_addr, buff_len: pkt_len, ndesc: 0, desc); |
1105 | if (skb_is_nonlinear(skb)) { |
1106 | prefetchw(skb_shinfo(skb)); |
1107 | } else { |
1108 | desc->next_desc = 0; |
1109 | goto upd_pkt_len; |
1110 | } |
1111 | |
1112 | pdesc = desc; |
1113 | |
1114 | /* Handle the case where skb is fragmented in pages */ |
1115 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1116 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1117 | struct page *page = skb_frag_page(frag); |
1118 | u32 page_offset = skb_frag_off(frag); |
1119 | u32 buf_len = skb_frag_size(frag); |
1120 | dma_addr_t desc_dma; |
1121 | u32 desc_dma_32; |
1122 | |
1123 | dma_addr = dma_map_page(dev, page, page_offset, buf_len, |
1124 | DMA_TO_DEVICE); |
1125 | if (unlikely(!dma_addr)) { |
1126 | dev_err(netcp->ndev_dev, "Failed to map skb page\n" ); |
1127 | goto free_descs; |
1128 | } |
1129 | |
1130 | ndesc = knav_pool_desc_get(ph: netcp->tx_pool); |
1131 | if (IS_ERR_OR_NULL(ptr: ndesc)) { |
1132 | dev_err(netcp->ndev_dev, "out of TX desc for frags\n" ); |
1133 | dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE); |
1134 | goto free_descs; |
1135 | } |
1136 | |
1137 | desc_dma = knav_pool_desc_virt_to_dma(ph: netcp->tx_pool, virt: ndesc); |
1138 | set_pkt_info(buff: dma_addr, buff_len: buf_len, ndesc: 0, desc: ndesc); |
1139 | desc_dma_32 = (u32)desc_dma; |
1140 | set_words(words: &desc_dma_32, num_words: 1, desc: &pdesc->next_desc); |
1141 | pkt_len += buf_len; |
1142 | if (pdesc != desc) |
1143 | knav_pool_desc_map(ph: netcp->tx_pool, desc: pdesc, |
1144 | size: sizeof(*pdesc), dma: &desc_dma, dma_sz: &dma_sz); |
1145 | pdesc = ndesc; |
1146 | } |
1147 | if (pdesc != desc) |
1148 | knav_pool_desc_map(ph: netcp->tx_pool, desc: pdesc, size: sizeof(*pdesc), |
1149 | dma: &dma_addr, dma_sz: &dma_sz); |
1150 | |
1151 | /* frag list based linkage is not supported for now. */ |
1152 | if (skb_shinfo(skb)->frag_list) { |
1153 | dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n" ); |
1154 | goto free_descs; |
1155 | } |
1156 | |
1157 | upd_pkt_len: |
1158 | WARN_ON(pkt_len != skb->len); |
1159 | |
1160 | pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK; |
1161 | set_words(words: &pkt_len, num_words: 1, desc: &desc->desc_info); |
1162 | return desc; |
1163 | |
1164 | free_descs: |
1165 | netcp_free_tx_desc_chain(netcp, desc, desc_sz: sizeof(*desc)); |
1166 | return NULL; |
1167 | } |
1168 | |
1169 | static int netcp_tx_submit_skb(struct netcp_intf *netcp, |
1170 | struct sk_buff *skb, |
1171 | struct knav_dma_desc *desc) |
1172 | { |
1173 | struct netcp_tx_pipe *tx_pipe = NULL; |
1174 | struct netcp_hook_list *tx_hook; |
1175 | struct netcp_packet p_info; |
1176 | struct netcp_tx_cb *tx_cb; |
1177 | unsigned int dma_sz; |
1178 | dma_addr_t dma; |
1179 | u32 tmp = 0; |
1180 | int ret = 0; |
1181 | |
1182 | p_info.netcp = netcp; |
1183 | p_info.skb = skb; |
1184 | p_info.tx_pipe = NULL; |
1185 | p_info.psdata_len = 0; |
1186 | p_info.ts_context = NULL; |
1187 | p_info.txtstamp = NULL; |
1188 | p_info.epib = desc->epib; |
1189 | p_info.psdata = (u32 __force *)desc->psdata; |
1190 | memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32)); |
1191 | |
1192 | /* Find out where to inject the packet for transmission */ |
1193 | list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { |
1194 | ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data, |
1195 | &p_info); |
1196 | if (unlikely(ret != 0)) { |
1197 | dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n" , |
1198 | tx_hook->order, ret); |
1199 | ret = (ret < 0) ? ret : NETDEV_TX_OK; |
1200 | goto out; |
1201 | } |
1202 | } |
1203 | |
1204 | /* Make sure some TX hook claimed the packet */ |
1205 | tx_pipe = p_info.tx_pipe; |
1206 | if (!tx_pipe) { |
1207 | dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n" ); |
1208 | ret = -ENXIO; |
1209 | goto out; |
1210 | } |
1211 | |
1212 | tx_cb = (struct netcp_tx_cb *)skb->cb; |
1213 | tx_cb->ts_context = p_info.ts_context; |
1214 | tx_cb->txtstamp = p_info.txtstamp; |
1215 | |
1216 | /* update descriptor */ |
1217 | if (p_info.psdata_len) { |
1218 | /* psdata points to both native-endian and device-endian data */ |
1219 | __le32 *psdata = (void __force *)p_info.psdata; |
1220 | |
1221 | set_words(words: (u32 *)psdata + |
1222 | (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len), |
1223 | num_words: p_info.psdata_len, desc: psdata); |
1224 | tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << |
1225 | KNAV_DMA_DESC_PSLEN_SHIFT; |
1226 | } |
1227 | |
1228 | tmp |= KNAV_DMA_DESC_HAS_EPIB | |
1229 | ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << |
1230 | KNAV_DMA_DESC_RETQ_SHIFT); |
1231 | |
1232 | if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) { |
1233 | tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) << |
1234 | KNAV_DMA_DESC_PSFLAG_SHIFT); |
1235 | } |
1236 | |
1237 | set_words(words: &tmp, num_words: 1, desc: &desc->packet_info); |
1238 | /* warning!!!! We are saving the virtual ptr in the sw_data |
1239 | * field as a 32bit value. Will not work on 64bit machines |
1240 | */ |
1241 | SET_SW_DATA0((u32)skb, desc); |
1242 | |
1243 | if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { |
1244 | tmp = tx_pipe->switch_to_port; |
1245 | set_words(words: &tmp, num_words: 1, desc: &desc->tag_info); |
1246 | } |
1247 | |
1248 | /* submit packet descriptor */ |
1249 | ret = knav_pool_desc_map(ph: netcp->tx_pool, desc, size: sizeof(*desc), dma: &dma, |
1250 | dma_sz: &dma_sz); |
1251 | if (unlikely(ret)) { |
1252 | dev_err(netcp->ndev_dev, "%s() failed to map desc\n" , __func__); |
1253 | ret = -ENOMEM; |
1254 | goto out; |
1255 | } |
1256 | skb_tx_timestamp(skb); |
1257 | knav_queue_push(qhandle: tx_pipe->dma_queue, dma, size: dma_sz, flags: 0); |
1258 | |
1259 | out: |
1260 | return ret; |
1261 | } |
1262 | |
1263 | /* Submit the packet */ |
1264 | static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1265 | { |
1266 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1267 | struct netcp_stats *tx_stats = &netcp->stats; |
1268 | int subqueue = skb_get_queue_mapping(skb); |
1269 | struct knav_dma_desc *desc; |
1270 | int desc_count, ret = 0; |
1271 | |
1272 | if (unlikely(skb->len <= 0)) { |
1273 | dev_kfree_skb(skb); |
1274 | return NETDEV_TX_OK; |
1275 | } |
1276 | |
1277 | if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) { |
1278 | ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE); |
1279 | if (ret < 0) { |
1280 | /* If we get here, the skb has already been dropped */ |
1281 | dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n" , |
1282 | ret); |
1283 | tx_stats->tx_dropped++; |
1284 | return ret; |
1285 | } |
1286 | skb->len = NETCP_MIN_PACKET_SIZE; |
1287 | } |
1288 | |
1289 | desc = netcp_tx_map_skb(skb, netcp); |
1290 | if (unlikely(!desc)) { |
1291 | netif_stop_subqueue(dev: ndev, queue_index: subqueue); |
1292 | ret = -ENOBUFS; |
1293 | goto drop; |
1294 | } |
1295 | |
1296 | ret = netcp_tx_submit_skb(netcp, skb, desc); |
1297 | if (ret) |
1298 | goto drop; |
1299 | |
1300 | /* Check Tx pool count & stop subqueue if needed */ |
1301 | desc_count = knav_pool_count(ph: netcp->tx_pool); |
1302 | if (desc_count < netcp->tx_pause_threshold) { |
1303 | dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n" , desc_count); |
1304 | netif_stop_subqueue(dev: ndev, queue_index: subqueue); |
1305 | } |
1306 | return NETDEV_TX_OK; |
1307 | |
1308 | drop: |
1309 | tx_stats->tx_dropped++; |
1310 | if (desc) |
1311 | netcp_free_tx_desc_chain(netcp, desc, desc_sz: sizeof(*desc)); |
1312 | dev_kfree_skb(skb); |
1313 | return ret; |
1314 | } |
1315 | |
1316 | int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe) |
1317 | { |
1318 | if (tx_pipe->dma_channel) { |
1319 | knav_dma_close_channel(channel: tx_pipe->dma_channel); |
1320 | tx_pipe->dma_channel = NULL; |
1321 | } |
1322 | return 0; |
1323 | } |
1324 | EXPORT_SYMBOL_GPL(netcp_txpipe_close); |
1325 | |
1326 | int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) |
1327 | { |
1328 | struct device *dev = tx_pipe->netcp_device->device; |
1329 | struct knav_dma_cfg config; |
1330 | int ret = 0; |
1331 | u8 name[16]; |
1332 | |
1333 | memset(&config, 0, sizeof(config)); |
1334 | config.direction = DMA_MEM_TO_DEV; |
1335 | config.u.tx.filt_einfo = false; |
1336 | config.u.tx.filt_pswords = false; |
1337 | config.u.tx.priority = DMA_PRIO_MED_L; |
1338 | |
1339 | tx_pipe->dma_channel = knav_dma_open_channel(dev, |
1340 | name: tx_pipe->dma_chan_name, config: &config); |
1341 | if (IS_ERR(ptr: tx_pipe->dma_channel)) { |
1342 | dev_err(dev, "failed opening tx chan(%s)\n" , |
1343 | tx_pipe->dma_chan_name); |
1344 | ret = PTR_ERR(ptr: tx_pipe->dma_channel); |
1345 | goto err; |
1346 | } |
1347 | |
1348 | snprintf(buf: name, size: sizeof(name), fmt: "tx-pipe-%s" , dev_name(dev)); |
1349 | tx_pipe->dma_queue = knav_queue_open(name, id: tx_pipe->dma_queue_id, |
1350 | KNAV_QUEUE_SHARED); |
1351 | if (IS_ERR(ptr: tx_pipe->dma_queue)) { |
1352 | dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n" , |
1353 | name, tx_pipe->dma_queue); |
1354 | ret = PTR_ERR(ptr: tx_pipe->dma_queue); |
1355 | goto err; |
1356 | } |
1357 | |
1358 | dev_dbg(dev, "opened tx pipe %s\n" , name); |
1359 | return 0; |
1360 | |
1361 | err: |
1362 | if (!IS_ERR_OR_NULL(ptr: tx_pipe->dma_channel)) |
1363 | knav_dma_close_channel(channel: tx_pipe->dma_channel); |
1364 | tx_pipe->dma_channel = NULL; |
1365 | return ret; |
1366 | } |
1367 | EXPORT_SYMBOL_GPL(netcp_txpipe_open); |
1368 | |
1369 | int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, |
1370 | struct netcp_device *netcp_device, |
1371 | const char *dma_chan_name, unsigned int dma_queue_id) |
1372 | { |
1373 | memset(tx_pipe, 0, sizeof(*tx_pipe)); |
1374 | tx_pipe->netcp_device = netcp_device; |
1375 | tx_pipe->dma_chan_name = dma_chan_name; |
1376 | tx_pipe->dma_queue_id = dma_queue_id; |
1377 | return 0; |
1378 | } |
1379 | EXPORT_SYMBOL_GPL(netcp_txpipe_init); |
1380 | |
1381 | static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp, |
1382 | const u8 *addr, |
1383 | enum netcp_addr_type type) |
1384 | { |
1385 | struct netcp_addr *naddr; |
1386 | |
1387 | list_for_each_entry(naddr, &netcp->addr_list, node) { |
1388 | if (naddr->type != type) |
1389 | continue; |
1390 | if (addr && memcmp(p: addr, q: naddr->addr, ETH_ALEN)) |
1391 | continue; |
1392 | return naddr; |
1393 | } |
1394 | |
1395 | return NULL; |
1396 | } |
1397 | |
1398 | static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, |
1399 | const u8 *addr, |
1400 | enum netcp_addr_type type) |
1401 | { |
1402 | struct netcp_addr *naddr; |
1403 | |
1404 | naddr = devm_kmalloc(dev: netcp->dev, size: sizeof(*naddr), GFP_ATOMIC); |
1405 | if (!naddr) |
1406 | return NULL; |
1407 | |
1408 | naddr->type = type; |
1409 | naddr->flags = 0; |
1410 | naddr->netcp = netcp; |
1411 | if (addr) |
1412 | ether_addr_copy(dst: naddr->addr, src: addr); |
1413 | else |
1414 | eth_zero_addr(addr: naddr->addr); |
1415 | list_add_tail(new: &naddr->node, head: &netcp->addr_list); |
1416 | |
1417 | return naddr; |
1418 | } |
1419 | |
1420 | static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr) |
1421 | { |
1422 | list_del(entry: &naddr->node); |
1423 | devm_kfree(dev: netcp->dev, p: naddr); |
1424 | } |
1425 | |
1426 | static void netcp_addr_clear_mark(struct netcp_intf *netcp) |
1427 | { |
1428 | struct netcp_addr *naddr; |
1429 | |
1430 | list_for_each_entry(naddr, &netcp->addr_list, node) |
1431 | naddr->flags = 0; |
1432 | } |
1433 | |
1434 | static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr, |
1435 | enum netcp_addr_type type) |
1436 | { |
1437 | struct netcp_addr *naddr; |
1438 | |
1439 | naddr = netcp_addr_find(netcp, addr, type); |
1440 | if (naddr) { |
1441 | naddr->flags |= ADDR_VALID; |
1442 | return; |
1443 | } |
1444 | |
1445 | naddr = netcp_addr_add(netcp, addr, type); |
1446 | if (!WARN_ON(!naddr)) |
1447 | naddr->flags |= ADDR_NEW; |
1448 | } |
1449 | |
1450 | static void netcp_addr_sweep_del(struct netcp_intf *netcp) |
1451 | { |
1452 | struct netcp_addr *naddr, *tmp; |
1453 | struct netcp_intf_modpriv *priv; |
1454 | struct netcp_module *module; |
1455 | int error; |
1456 | |
1457 | list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { |
1458 | if (naddr->flags & (ADDR_VALID | ADDR_NEW)) |
1459 | continue; |
1460 | dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n" , |
1461 | naddr->addr, naddr->type); |
1462 | for_each_module(netcp, priv) { |
1463 | module = priv->netcp_module; |
1464 | if (!module->del_addr) |
1465 | continue; |
1466 | error = module->del_addr(priv->module_priv, |
1467 | naddr); |
1468 | WARN_ON(error); |
1469 | } |
1470 | netcp_addr_del(netcp, naddr); |
1471 | } |
1472 | } |
1473 | |
1474 | static void netcp_addr_sweep_add(struct netcp_intf *netcp) |
1475 | { |
1476 | struct netcp_addr *naddr, *tmp; |
1477 | struct netcp_intf_modpriv *priv; |
1478 | struct netcp_module *module; |
1479 | int error; |
1480 | |
1481 | list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { |
1482 | if (!(naddr->flags & ADDR_NEW)) |
1483 | continue; |
1484 | dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n" , |
1485 | naddr->addr, naddr->type); |
1486 | |
1487 | for_each_module(netcp, priv) { |
1488 | module = priv->netcp_module; |
1489 | if (!module->add_addr) |
1490 | continue; |
1491 | error = module->add_addr(priv->module_priv, naddr); |
1492 | WARN_ON(error); |
1493 | } |
1494 | } |
1495 | } |
1496 | |
1497 | static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc) |
1498 | { |
1499 | struct netcp_intf_modpriv *priv; |
1500 | struct netcp_module *module; |
1501 | int error; |
1502 | |
1503 | for_each_module(netcp, priv) { |
1504 | module = priv->netcp_module; |
1505 | if (!module->set_rx_mode) |
1506 | continue; |
1507 | |
1508 | error = module->set_rx_mode(priv->module_priv, promisc); |
1509 | if (error) |
1510 | return error; |
1511 | } |
1512 | return 0; |
1513 | } |
1514 | |
1515 | static void netcp_set_rx_mode(struct net_device *ndev) |
1516 | { |
1517 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1518 | struct netdev_hw_addr *ndev_addr; |
1519 | bool promisc; |
1520 | |
1521 | promisc = (ndev->flags & IFF_PROMISC || |
1522 | ndev->flags & IFF_ALLMULTI || |
1523 | netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); |
1524 | |
1525 | spin_lock(lock: &netcp->lock); |
1526 | /* first clear all marks */ |
1527 | netcp_addr_clear_mark(netcp); |
1528 | |
1529 | /* next add new entries, mark existing ones */ |
1530 | netcp_addr_add_mark(netcp, addr: ndev->broadcast, type: ADDR_BCAST); |
1531 | for_each_dev_addr(ndev, ndev_addr) |
1532 | netcp_addr_add_mark(netcp, addr: ndev_addr->addr, type: ADDR_DEV); |
1533 | netdev_for_each_uc_addr(ndev_addr, ndev) |
1534 | netcp_addr_add_mark(netcp, addr: ndev_addr->addr, type: ADDR_UCAST); |
1535 | netdev_for_each_mc_addr(ndev_addr, ndev) |
1536 | netcp_addr_add_mark(netcp, addr: ndev_addr->addr, type: ADDR_MCAST); |
1537 | |
1538 | if (promisc) |
1539 | netcp_addr_add_mark(netcp, NULL, type: ADDR_ANY); |
1540 | |
1541 | /* finally sweep and callout into modules */ |
1542 | netcp_addr_sweep_del(netcp); |
1543 | netcp_addr_sweep_add(netcp); |
1544 | netcp_set_promiscuous(netcp, promisc); |
1545 | spin_unlock(lock: &netcp->lock); |
1546 | } |
1547 | |
1548 | static void netcp_free_navigator_resources(struct netcp_intf *netcp) |
1549 | { |
1550 | int i; |
1551 | |
1552 | if (netcp->rx_channel) { |
1553 | knav_dma_close_channel(channel: netcp->rx_channel); |
1554 | netcp->rx_channel = NULL; |
1555 | } |
1556 | |
1557 | if (!IS_ERR_OR_NULL(ptr: netcp->rx_pool)) |
1558 | netcp_rxpool_free(netcp); |
1559 | |
1560 | if (!IS_ERR_OR_NULL(ptr: netcp->rx_queue)) { |
1561 | knav_queue_close(qhandle: netcp->rx_queue); |
1562 | netcp->rx_queue = NULL; |
1563 | } |
1564 | |
1565 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && |
1566 | !IS_ERR_OR_NULL(ptr: netcp->rx_fdq[i]) ; ++i) { |
1567 | knav_queue_close(qhandle: netcp->rx_fdq[i]); |
1568 | netcp->rx_fdq[i] = NULL; |
1569 | } |
1570 | |
1571 | if (!IS_ERR_OR_NULL(ptr: netcp->tx_compl_q)) { |
1572 | knav_queue_close(qhandle: netcp->tx_compl_q); |
1573 | netcp->tx_compl_q = NULL; |
1574 | } |
1575 | |
1576 | if (!IS_ERR_OR_NULL(ptr: netcp->tx_pool)) { |
1577 | knav_pool_destroy(ph: netcp->tx_pool); |
1578 | netcp->tx_pool = NULL; |
1579 | } |
1580 | } |
1581 | |
1582 | static int netcp_setup_navigator_resources(struct net_device *ndev) |
1583 | { |
1584 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1585 | struct knav_queue_notify_config notify_cfg; |
1586 | struct knav_dma_cfg config; |
1587 | u32 last_fdq = 0; |
1588 | u8 name[16]; |
1589 | int ret; |
1590 | int i; |
1591 | |
1592 | /* Create Rx/Tx descriptor pools */ |
1593 | snprintf(buf: name, size: sizeof(name), fmt: "rx-pool-%s" , ndev->name); |
1594 | netcp->rx_pool = knav_pool_create(name, num_desc: netcp->rx_pool_size, |
1595 | region_id: netcp->rx_pool_region_id); |
1596 | if (IS_ERR_OR_NULL(ptr: netcp->rx_pool)) { |
1597 | dev_err(netcp->ndev_dev, "Couldn't create rx pool\n" ); |
1598 | ret = PTR_ERR(ptr: netcp->rx_pool); |
1599 | goto fail; |
1600 | } |
1601 | |
1602 | snprintf(buf: name, size: sizeof(name), fmt: "tx-pool-%s" , ndev->name); |
1603 | netcp->tx_pool = knav_pool_create(name, num_desc: netcp->tx_pool_size, |
1604 | region_id: netcp->tx_pool_region_id); |
1605 | if (IS_ERR_OR_NULL(ptr: netcp->tx_pool)) { |
1606 | dev_err(netcp->ndev_dev, "Couldn't create tx pool\n" ); |
1607 | ret = PTR_ERR(ptr: netcp->tx_pool); |
1608 | goto fail; |
1609 | } |
1610 | |
1611 | /* open Tx completion queue */ |
1612 | snprintf(buf: name, size: sizeof(name), fmt: "tx-compl-%s" , ndev->name); |
1613 | netcp->tx_compl_q = knav_queue_open(name, id: netcp->tx_compl_qid, flags: 0); |
1614 | if (IS_ERR(ptr: netcp->tx_compl_q)) { |
1615 | ret = PTR_ERR(ptr: netcp->tx_compl_q); |
1616 | goto fail; |
1617 | } |
1618 | netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q); |
1619 | |
1620 | /* Set notification for Tx completion */ |
1621 | notify_cfg.fn = netcp_tx_notify; |
1622 | notify_cfg.fn_arg = netcp; |
1623 | ret = knav_queue_device_control(qhandle: netcp->tx_compl_q, |
1624 | cmd: KNAV_QUEUE_SET_NOTIFIER, |
1625 | arg: (unsigned long)¬ify_cfg); |
1626 | if (ret) |
1627 | goto fail; |
1628 | |
1629 | knav_queue_disable_notify(netcp->tx_compl_q); |
1630 | |
1631 | /* open Rx completion queue */ |
1632 | snprintf(buf: name, size: sizeof(name), fmt: "rx-compl-%s" , ndev->name); |
1633 | netcp->rx_queue = knav_queue_open(name, id: netcp->rx_queue_id, flags: 0); |
1634 | if (IS_ERR(ptr: netcp->rx_queue)) { |
1635 | ret = PTR_ERR(ptr: netcp->rx_queue); |
1636 | goto fail; |
1637 | } |
1638 | netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue); |
1639 | |
1640 | /* Set notification for Rx completion */ |
1641 | notify_cfg.fn = netcp_rx_notify; |
1642 | notify_cfg.fn_arg = netcp; |
1643 | ret = knav_queue_device_control(qhandle: netcp->rx_queue, |
1644 | cmd: KNAV_QUEUE_SET_NOTIFIER, |
1645 | arg: (unsigned long)¬ify_cfg); |
1646 | if (ret) |
1647 | goto fail; |
1648 | |
1649 | knav_queue_disable_notify(netcp->rx_queue); |
1650 | |
1651 | /* open Rx FDQs */ |
1652 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; |
1653 | ++i) { |
1654 | snprintf(buf: name, size: sizeof(name), fmt: "rx-fdq-%s-%d" , ndev->name, i); |
1655 | netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, flags: 0); |
1656 | if (IS_ERR(ptr: netcp->rx_fdq[i])) { |
1657 | ret = PTR_ERR(ptr: netcp->rx_fdq[i]); |
1658 | goto fail; |
1659 | } |
1660 | } |
1661 | |
1662 | memset(&config, 0, sizeof(config)); |
1663 | config.direction = DMA_DEV_TO_MEM; |
1664 | config.u.rx.einfo_present = true; |
1665 | config.u.rx.psinfo_present = true; |
1666 | config.u.rx.err_mode = DMA_DROP; |
1667 | config.u.rx.desc_type = DMA_DESC_HOST; |
1668 | config.u.rx.psinfo_at_sop = false; |
1669 | config.u.rx.sop_offset = NETCP_SOP_OFFSET; |
1670 | config.u.rx.dst_q = netcp->rx_queue_id; |
1671 | config.u.rx.thresh = DMA_THRESH_NONE; |
1672 | |
1673 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) { |
1674 | if (netcp->rx_fdq[i]) |
1675 | last_fdq = knav_queue_get_id(netcp->rx_fdq[i]); |
1676 | config.u.rx.fdq[i] = last_fdq; |
1677 | } |
1678 | |
1679 | netcp->rx_channel = knav_dma_open_channel(dev: netcp->netcp_device->device, |
1680 | name: netcp->dma_chan_name, config: &config); |
1681 | if (IS_ERR(ptr: netcp->rx_channel)) { |
1682 | dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n" , |
1683 | netcp->dma_chan_name); |
1684 | ret = PTR_ERR(ptr: netcp->rx_channel); |
1685 | goto fail; |
1686 | } |
1687 | |
1688 | dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n" , netcp->rx_channel); |
1689 | return 0; |
1690 | |
1691 | fail: |
1692 | netcp_free_navigator_resources(netcp); |
1693 | return ret; |
1694 | } |
1695 | |
1696 | /* Open the device */ |
1697 | static int netcp_ndo_open(struct net_device *ndev) |
1698 | { |
1699 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1700 | struct netcp_intf_modpriv *intf_modpriv; |
1701 | struct netcp_module *module; |
1702 | int ret; |
1703 | |
1704 | netif_carrier_off(dev: ndev); |
1705 | ret = netcp_setup_navigator_resources(ndev); |
1706 | if (ret) { |
1707 | dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n" ); |
1708 | goto fail; |
1709 | } |
1710 | |
1711 | for_each_module(netcp, intf_modpriv) { |
1712 | module = intf_modpriv->netcp_module; |
1713 | if (module->open) { |
1714 | ret = module->open(intf_modpriv->module_priv, ndev); |
1715 | if (ret != 0) { |
1716 | dev_err(netcp->ndev_dev, "module open failed\n" ); |
1717 | goto fail_open; |
1718 | } |
1719 | } |
1720 | } |
1721 | |
1722 | napi_enable(n: &netcp->rx_napi); |
1723 | napi_enable(n: &netcp->tx_napi); |
1724 | knav_queue_enable_notify(netcp->tx_compl_q); |
1725 | knav_queue_enable_notify(netcp->rx_queue); |
1726 | netcp_rxpool_refill(netcp); |
1727 | netif_tx_wake_all_queues(dev: ndev); |
1728 | dev_dbg(netcp->ndev_dev, "netcp device %s opened\n" , ndev->name); |
1729 | return 0; |
1730 | |
1731 | fail_open: |
1732 | for_each_module(netcp, intf_modpriv) { |
1733 | module = intf_modpriv->netcp_module; |
1734 | if (module->close) |
1735 | module->close(intf_modpriv->module_priv, ndev); |
1736 | } |
1737 | |
1738 | fail: |
1739 | netcp_free_navigator_resources(netcp); |
1740 | return ret; |
1741 | } |
1742 | |
1743 | /* Close the device */ |
1744 | static int netcp_ndo_stop(struct net_device *ndev) |
1745 | { |
1746 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1747 | struct netcp_intf_modpriv *intf_modpriv; |
1748 | struct netcp_module *module; |
1749 | int err = 0; |
1750 | |
1751 | netif_tx_stop_all_queues(dev: ndev); |
1752 | netif_carrier_off(dev: ndev); |
1753 | netcp_addr_clear_mark(netcp); |
1754 | netcp_addr_sweep_del(netcp); |
1755 | knav_queue_disable_notify(netcp->rx_queue); |
1756 | knav_queue_disable_notify(netcp->tx_compl_q); |
1757 | napi_disable(n: &netcp->rx_napi); |
1758 | napi_disable(n: &netcp->tx_napi); |
1759 | |
1760 | for_each_module(netcp, intf_modpriv) { |
1761 | module = intf_modpriv->netcp_module; |
1762 | if (module->close) { |
1763 | err = module->close(intf_modpriv->module_priv, ndev); |
1764 | if (err != 0) |
1765 | dev_err(netcp->ndev_dev, "Close failed\n" ); |
1766 | } |
1767 | } |
1768 | |
1769 | /* Recycle Rx descriptors from completion queue */ |
1770 | netcp_empty_rx_queue(netcp); |
1771 | |
1772 | /* Recycle Tx descriptors from completion queue */ |
1773 | netcp_process_tx_compl_packets(netcp, budget: netcp->tx_pool_size); |
1774 | |
1775 | if (knav_pool_count(ph: netcp->tx_pool) != netcp->tx_pool_size) |
1776 | dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n" , |
1777 | netcp->tx_pool_size - knav_pool_count(netcp->tx_pool)); |
1778 | |
1779 | netcp_free_navigator_resources(netcp); |
1780 | dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n" , ndev->name); |
1781 | return 0; |
1782 | } |
1783 | |
1784 | static int netcp_ndo_ioctl(struct net_device *ndev, |
1785 | struct ifreq *req, int cmd) |
1786 | { |
1787 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1788 | struct netcp_intf_modpriv *intf_modpriv; |
1789 | struct netcp_module *module; |
1790 | int ret = -1, err = -EOPNOTSUPP; |
1791 | |
1792 | if (!netif_running(dev: ndev)) |
1793 | return -EINVAL; |
1794 | |
1795 | for_each_module(netcp, intf_modpriv) { |
1796 | module = intf_modpriv->netcp_module; |
1797 | if (!module->ioctl) |
1798 | continue; |
1799 | |
1800 | err = module->ioctl(intf_modpriv->module_priv, req, cmd); |
1801 | if ((err < 0) && (err != -EOPNOTSUPP)) { |
1802 | ret = err; |
1803 | goto out; |
1804 | } |
1805 | if (err == 0) |
1806 | ret = err; |
1807 | } |
1808 | |
1809 | out: |
1810 | return (ret == 0) ? 0 : err; |
1811 | } |
1812 | |
1813 | static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) |
1814 | { |
1815 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1816 | unsigned int descs = knav_pool_count(ph: netcp->tx_pool); |
1817 | |
1818 | dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n" , descs); |
1819 | netcp_process_tx_compl_packets(netcp, budget: netcp->tx_pool_size); |
1820 | netif_trans_update(dev: ndev); |
1821 | netif_tx_wake_all_queues(dev: ndev); |
1822 | } |
1823 | |
1824 | static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) |
1825 | { |
1826 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1827 | struct netcp_intf_modpriv *intf_modpriv; |
1828 | struct netcp_module *module; |
1829 | unsigned long flags; |
1830 | int err = 0; |
1831 | |
1832 | dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n" , vid); |
1833 | |
1834 | spin_lock_irqsave(&netcp->lock, flags); |
1835 | for_each_module(netcp, intf_modpriv) { |
1836 | module = intf_modpriv->netcp_module; |
1837 | if ((module->add_vid) && (vid != 0)) { |
1838 | err = module->add_vid(intf_modpriv->module_priv, vid); |
1839 | if (err != 0) { |
1840 | dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n" , |
1841 | vid); |
1842 | break; |
1843 | } |
1844 | } |
1845 | } |
1846 | spin_unlock_irqrestore(lock: &netcp->lock, flags); |
1847 | |
1848 | return err; |
1849 | } |
1850 | |
1851 | static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) |
1852 | { |
1853 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1854 | struct netcp_intf_modpriv *intf_modpriv; |
1855 | struct netcp_module *module; |
1856 | unsigned long flags; |
1857 | int err = 0; |
1858 | |
1859 | dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n" , vid); |
1860 | |
1861 | spin_lock_irqsave(&netcp->lock, flags); |
1862 | for_each_module(netcp, intf_modpriv) { |
1863 | module = intf_modpriv->netcp_module; |
1864 | if (module->del_vid) { |
1865 | err = module->del_vid(intf_modpriv->module_priv, vid); |
1866 | if (err != 0) { |
1867 | dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n" , |
1868 | vid); |
1869 | break; |
1870 | } |
1871 | } |
1872 | } |
1873 | spin_unlock_irqrestore(lock: &netcp->lock, flags); |
1874 | return err; |
1875 | } |
1876 | |
1877 | static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, |
1878 | void *type_data) |
1879 | { |
1880 | struct tc_mqprio_qopt *mqprio = type_data; |
1881 | u8 num_tc; |
1882 | int i; |
1883 | |
1884 | /* setup tc must be called under rtnl lock */ |
1885 | ASSERT_RTNL(); |
1886 | |
1887 | if (type != TC_SETUP_QDISC_MQPRIO) |
1888 | return -EOPNOTSUPP; |
1889 | |
1890 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
1891 | num_tc = mqprio->num_tc; |
1892 | |
1893 | /* Sanity-check the number of traffic classes requested */ |
1894 | if ((dev->real_num_tx_queues <= 1) || |
1895 | (dev->real_num_tx_queues < num_tc)) |
1896 | return -EINVAL; |
1897 | |
1898 | /* Configure traffic class to queue mappings */ |
1899 | if (num_tc) { |
1900 | netdev_set_num_tc(dev, num_tc); |
1901 | for (i = 0; i < num_tc; i++) |
1902 | netdev_set_tc_queue(dev, tc: i, count: 1, offset: i); |
1903 | } else { |
1904 | netdev_reset_tc(dev); |
1905 | } |
1906 | |
1907 | return 0; |
1908 | } |
1909 | |
1910 | static void |
1911 | netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) |
1912 | { |
1913 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
1914 | struct netcp_stats *p = &netcp->stats; |
1915 | u64 rxpackets, rxbytes, txpackets, txbytes; |
1916 | unsigned int start; |
1917 | |
1918 | do { |
1919 | start = u64_stats_fetch_begin(syncp: &p->syncp_rx); |
1920 | rxpackets = p->rx_packets; |
1921 | rxbytes = p->rx_bytes; |
1922 | } while (u64_stats_fetch_retry(syncp: &p->syncp_rx, start)); |
1923 | |
1924 | do { |
1925 | start = u64_stats_fetch_begin(syncp: &p->syncp_tx); |
1926 | txpackets = p->tx_packets; |
1927 | txbytes = p->tx_bytes; |
1928 | } while (u64_stats_fetch_retry(syncp: &p->syncp_tx, start)); |
1929 | |
1930 | stats->rx_packets = rxpackets; |
1931 | stats->rx_bytes = rxbytes; |
1932 | stats->tx_packets = txpackets; |
1933 | stats->tx_bytes = txbytes; |
1934 | |
1935 | /* The following are stored as 32 bit */ |
1936 | stats->rx_errors = p->rx_errors; |
1937 | stats->rx_dropped = p->rx_dropped; |
1938 | stats->tx_dropped = p->tx_dropped; |
1939 | } |
1940 | |
1941 | static const struct net_device_ops netcp_netdev_ops = { |
1942 | .ndo_open = netcp_ndo_open, |
1943 | .ndo_stop = netcp_ndo_stop, |
1944 | .ndo_start_xmit = netcp_ndo_start_xmit, |
1945 | .ndo_set_rx_mode = netcp_set_rx_mode, |
1946 | .ndo_eth_ioctl = netcp_ndo_ioctl, |
1947 | .ndo_get_stats64 = netcp_get_stats, |
1948 | .ndo_set_mac_address = eth_mac_addr, |
1949 | .ndo_validate_addr = eth_validate_addr, |
1950 | .ndo_vlan_rx_add_vid = netcp_rx_add_vid, |
1951 | .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, |
1952 | .ndo_tx_timeout = netcp_ndo_tx_timeout, |
1953 | .ndo_select_queue = dev_pick_tx_zero, |
1954 | .ndo_setup_tc = netcp_setup_tc, |
1955 | }; |
1956 | |
1957 | static int netcp_create_interface(struct netcp_device *netcp_device, |
1958 | struct device_node *node_interface) |
1959 | { |
1960 | struct device *dev = netcp_device->device; |
1961 | struct device_node *node = dev->of_node; |
1962 | struct netcp_intf *netcp; |
1963 | struct net_device *ndev; |
1964 | resource_size_t size; |
1965 | struct resource res; |
1966 | void __iomem *efuse = NULL; |
1967 | u32 efuse_mac = 0; |
1968 | u8 efuse_mac_addr[6]; |
1969 | u32 temp[2]; |
1970 | int ret = 0; |
1971 | |
1972 | ndev = alloc_etherdev_mqs(sizeof_priv: sizeof(*netcp), txqs: 1, rxqs: 1); |
1973 | if (!ndev) { |
1974 | dev_err(dev, "Error allocating netdev\n" ); |
1975 | return -ENOMEM; |
1976 | } |
1977 | |
1978 | ndev->features |= NETIF_F_SG; |
1979 | ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
1980 | ndev->hw_features = ndev->features; |
1981 | ndev->vlan_features |= NETIF_F_SG; |
1982 | |
1983 | /* MTU range: 68 - 9486 */ |
1984 | ndev->min_mtu = ETH_MIN_MTU; |
1985 | ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); |
1986 | |
1987 | netcp = netdev_priv(dev: ndev); |
1988 | spin_lock_init(&netcp->lock); |
1989 | INIT_LIST_HEAD(list: &netcp->module_head); |
1990 | INIT_LIST_HEAD(list: &netcp->txhook_list_head); |
1991 | INIT_LIST_HEAD(list: &netcp->rxhook_list_head); |
1992 | INIT_LIST_HEAD(list: &netcp->addr_list); |
1993 | u64_stats_init(syncp: &netcp->stats.syncp_rx); |
1994 | u64_stats_init(syncp: &netcp->stats.syncp_tx); |
1995 | netcp->netcp_device = netcp_device; |
1996 | netcp->dev = netcp_device->device; |
1997 | netcp->ndev = ndev; |
1998 | netcp->ndev_dev = &ndev->dev; |
1999 | netcp->msg_enable = netif_msg_init(debug_value: netcp_debug_level, NETCP_DEBUG); |
2000 | netcp->tx_pause_threshold = MAX_SKB_FRAGS; |
2001 | netcp->tx_resume_threshold = netcp->tx_pause_threshold; |
2002 | netcp->node_interface = node_interface; |
2003 | |
2004 | ret = of_property_read_u32(np: node_interface, propname: "efuse-mac" , out_value: &efuse_mac); |
2005 | if (efuse_mac) { |
2006 | if (of_address_to_resource(dev: node, NETCP_EFUSE_REG_INDEX, r: &res)) { |
2007 | dev_err(dev, "could not find efuse-mac reg resource\n" ); |
2008 | ret = -ENODEV; |
2009 | goto quit; |
2010 | } |
2011 | size = resource_size(res: &res); |
2012 | |
2013 | if (!devm_request_mem_region(dev, res.start, size, |
2014 | dev_name(dev))) { |
2015 | dev_err(dev, "could not reserve resource\n" ); |
2016 | ret = -ENOMEM; |
2017 | goto quit; |
2018 | } |
2019 | |
2020 | efuse = devm_ioremap(dev, offset: res.start, size); |
2021 | if (!efuse) { |
2022 | dev_err(dev, "could not map resource\n" ); |
2023 | devm_release_mem_region(dev, res.start, size); |
2024 | ret = -ENOMEM; |
2025 | goto quit; |
2026 | } |
2027 | |
2028 | emac_arch_get_mac_addr(x: efuse_mac_addr, efuse_mac: efuse, swap: efuse_mac); |
2029 | if (is_valid_ether_addr(addr: efuse_mac_addr)) |
2030 | eth_hw_addr_set(dev: ndev, addr: efuse_mac_addr); |
2031 | else |
2032 | eth_hw_addr_random(dev: ndev); |
2033 | |
2034 | devm_iounmap(dev, addr: efuse); |
2035 | devm_release_mem_region(dev, res.start, size); |
2036 | } else { |
2037 | ret = of_get_ethdev_address(np: node_interface, dev: ndev); |
2038 | if (ret) |
2039 | eth_hw_addr_random(dev: ndev); |
2040 | } |
2041 | |
2042 | ret = of_property_read_string(np: node_interface, propname: "rx-channel" , |
2043 | out_string: &netcp->dma_chan_name); |
2044 | if (ret < 0) { |
2045 | dev_err(dev, "missing \"rx-channel\" parameter\n" ); |
2046 | ret = -ENODEV; |
2047 | goto quit; |
2048 | } |
2049 | |
2050 | ret = of_property_read_u32(np: node_interface, propname: "rx-queue" , |
2051 | out_value: &netcp->rx_queue_id); |
2052 | if (ret < 0) { |
2053 | dev_warn(dev, "missing \"rx-queue\" parameter\n" ); |
2054 | netcp->rx_queue_id = KNAV_QUEUE_QPEND; |
2055 | } |
2056 | |
2057 | ret = of_property_read_u32_array(np: node_interface, propname: "rx-queue-depth" , |
2058 | out_values: netcp->rx_queue_depths, |
2059 | KNAV_DMA_FDQ_PER_CHAN); |
2060 | if (ret < 0) { |
2061 | dev_err(dev, "missing \"rx-queue-depth\" parameter\n" ); |
2062 | netcp->rx_queue_depths[0] = 128; |
2063 | } |
2064 | |
2065 | ret = of_property_read_u32_array(np: node_interface, propname: "rx-pool" , out_values: temp, sz: 2); |
2066 | if (ret < 0) { |
2067 | dev_err(dev, "missing \"rx-pool\" parameter\n" ); |
2068 | ret = -ENODEV; |
2069 | goto quit; |
2070 | } |
2071 | netcp->rx_pool_size = temp[0]; |
2072 | netcp->rx_pool_region_id = temp[1]; |
2073 | |
2074 | ret = of_property_read_u32_array(np: node_interface, propname: "tx-pool" , out_values: temp, sz: 2); |
2075 | if (ret < 0) { |
2076 | dev_err(dev, "missing \"tx-pool\" parameter\n" ); |
2077 | ret = -ENODEV; |
2078 | goto quit; |
2079 | } |
2080 | netcp->tx_pool_size = temp[0]; |
2081 | netcp->tx_pool_region_id = temp[1]; |
2082 | |
2083 | if (netcp->tx_pool_size < MAX_SKB_FRAGS) { |
2084 | dev_err(dev, "tx-pool size too small, must be at least %u\n" , |
2085 | (unsigned int)MAX_SKB_FRAGS); |
2086 | ret = -ENODEV; |
2087 | goto quit; |
2088 | } |
2089 | |
2090 | ret = of_property_read_u32(np: node_interface, propname: "tx-completion-queue" , |
2091 | out_value: &netcp->tx_compl_qid); |
2092 | if (ret < 0) { |
2093 | dev_warn(dev, "missing \"tx-completion-queue\" parameter\n" ); |
2094 | netcp->tx_compl_qid = KNAV_QUEUE_QPEND; |
2095 | } |
2096 | |
2097 | /* NAPI register */ |
2098 | netif_napi_add(dev: ndev, napi: &netcp->rx_napi, poll: netcp_rx_poll); |
2099 | netif_napi_add_tx(dev: ndev, napi: &netcp->tx_napi, poll: netcp_tx_poll); |
2100 | |
2101 | /* Register the network device */ |
2102 | ndev->dev_id = 0; |
2103 | ndev->watchdog_timeo = NETCP_TX_TIMEOUT; |
2104 | ndev->netdev_ops = &netcp_netdev_ops; |
2105 | SET_NETDEV_DEV(ndev, dev); |
2106 | |
2107 | list_add_tail(new: &netcp->interface_list, head: &netcp_device->interface_head); |
2108 | return 0; |
2109 | |
2110 | quit: |
2111 | free_netdev(dev: ndev); |
2112 | return ret; |
2113 | } |
2114 | |
2115 | static void netcp_delete_interface(struct netcp_device *netcp_device, |
2116 | struct net_device *ndev) |
2117 | { |
2118 | struct netcp_intf_modpriv *intf_modpriv, *tmp; |
2119 | struct netcp_intf *netcp = netdev_priv(dev: ndev); |
2120 | struct netcp_module *module; |
2121 | |
2122 | dev_dbg(netcp_device->device, "Removing interface \"%s\"\n" , |
2123 | ndev->name); |
2124 | |
2125 | /* Notify each of the modules that the interface is going away */ |
2126 | list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head, |
2127 | intf_list) { |
2128 | module = intf_modpriv->netcp_module; |
2129 | dev_dbg(netcp_device->device, "Releasing module \"%s\"\n" , |
2130 | module->name); |
2131 | if (module->release) |
2132 | module->release(intf_modpriv->module_priv); |
2133 | list_del(entry: &intf_modpriv->intf_list); |
2134 | } |
2135 | WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n" , |
2136 | ndev->name); |
2137 | |
2138 | list_del(entry: &netcp->interface_list); |
2139 | |
2140 | of_node_put(node: netcp->node_interface); |
2141 | unregister_netdev(dev: ndev); |
2142 | free_netdev(dev: ndev); |
2143 | } |
2144 | |
2145 | static int netcp_probe(struct platform_device *pdev) |
2146 | { |
2147 | struct device_node *node = pdev->dev.of_node; |
2148 | struct netcp_intf *netcp_intf, *netcp_tmp; |
2149 | struct device_node *child, *interfaces; |
2150 | struct netcp_device *netcp_device; |
2151 | struct device *dev = &pdev->dev; |
2152 | struct netcp_module *module; |
2153 | int ret; |
2154 | |
2155 | if (!knav_dma_device_ready() || |
2156 | !knav_qmss_device_ready()) |
2157 | return -EPROBE_DEFER; |
2158 | |
2159 | if (!node) { |
2160 | dev_err(dev, "could not find device info\n" ); |
2161 | return -ENODEV; |
2162 | } |
2163 | |
2164 | /* Allocate a new NETCP device instance */ |
2165 | netcp_device = devm_kzalloc(dev, size: sizeof(*netcp_device), GFP_KERNEL); |
2166 | if (!netcp_device) |
2167 | return -ENOMEM; |
2168 | |
2169 | pm_runtime_enable(dev: &pdev->dev); |
2170 | ret = pm_runtime_get_sync(dev: &pdev->dev); |
2171 | if (ret < 0) { |
2172 | dev_err(dev, "Failed to enable NETCP power-domain\n" ); |
2173 | pm_runtime_disable(dev: &pdev->dev); |
2174 | return ret; |
2175 | } |
2176 | |
2177 | /* Initialize the NETCP device instance */ |
2178 | INIT_LIST_HEAD(list: &netcp_device->interface_head); |
2179 | INIT_LIST_HEAD(list: &netcp_device->modpriv_head); |
2180 | netcp_device->device = dev; |
2181 | platform_set_drvdata(pdev, data: netcp_device); |
2182 | |
2183 | /* create interfaces */ |
2184 | interfaces = of_get_child_by_name(node, name: "netcp-interfaces" ); |
2185 | if (!interfaces) { |
2186 | dev_err(dev, "could not find netcp-interfaces node\n" ); |
2187 | ret = -ENODEV; |
2188 | goto probe_quit; |
2189 | } |
2190 | |
2191 | for_each_available_child_of_node(interfaces, child) { |
2192 | ret = netcp_create_interface(netcp_device, node_interface: child); |
2193 | if (ret) { |
2194 | dev_err(dev, "could not create interface(%pOFn)\n" , |
2195 | child); |
2196 | goto probe_quit_interface; |
2197 | } |
2198 | } |
2199 | |
2200 | of_node_put(node: interfaces); |
2201 | |
2202 | /* Add the device instance to the list */ |
2203 | list_add_tail(new: &netcp_device->device_list, head: &netcp_devices); |
2204 | |
2205 | /* Probe & attach any modules already registered */ |
2206 | mutex_lock(&netcp_modules_lock); |
2207 | for_each_netcp_module(module) { |
2208 | ret = netcp_module_probe(netcp_device, module); |
2209 | if (ret < 0) |
2210 | dev_err(dev, "module(%s) probe failed\n" , module->name); |
2211 | } |
2212 | mutex_unlock(lock: &netcp_modules_lock); |
2213 | return 0; |
2214 | |
2215 | probe_quit_interface: |
2216 | list_for_each_entry_safe(netcp_intf, netcp_tmp, |
2217 | &netcp_device->interface_head, |
2218 | interface_list) { |
2219 | netcp_delete_interface(netcp_device, ndev: netcp_intf->ndev); |
2220 | } |
2221 | |
2222 | of_node_put(node: interfaces); |
2223 | |
2224 | probe_quit: |
2225 | pm_runtime_put_sync(dev: &pdev->dev); |
2226 | pm_runtime_disable(dev: &pdev->dev); |
2227 | platform_set_drvdata(pdev, NULL); |
2228 | return ret; |
2229 | } |
2230 | |
2231 | static void netcp_remove(struct platform_device *pdev) |
2232 | { |
2233 | struct netcp_device *netcp_device = platform_get_drvdata(pdev); |
2234 | struct netcp_intf *netcp_intf, *netcp_tmp; |
2235 | struct netcp_inst_modpriv *inst_modpriv, *tmp; |
2236 | struct netcp_module *module; |
2237 | |
2238 | list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head, |
2239 | inst_list) { |
2240 | module = inst_modpriv->netcp_module; |
2241 | dev_dbg(&pdev->dev, "Removing module \"%s\"\n" , module->name); |
2242 | module->remove(netcp_device, inst_modpriv->module_priv); |
2243 | list_del(entry: &inst_modpriv->inst_list); |
2244 | } |
2245 | |
2246 | /* now that all modules are removed, clean up the interfaces */ |
2247 | list_for_each_entry_safe(netcp_intf, netcp_tmp, |
2248 | &netcp_device->interface_head, |
2249 | interface_list) { |
2250 | netcp_delete_interface(netcp_device, ndev: netcp_intf->ndev); |
2251 | } |
2252 | |
2253 | WARN(!list_empty(&netcp_device->interface_head), |
2254 | "%s interface list not empty!\n" , pdev->name); |
2255 | |
2256 | pm_runtime_put_sync(dev: &pdev->dev); |
2257 | pm_runtime_disable(dev: &pdev->dev); |
2258 | platform_set_drvdata(pdev, NULL); |
2259 | } |
2260 | |
2261 | static const struct of_device_id of_match[] = { |
2262 | { .compatible = "ti,netcp-1.0" , }, |
2263 | {}, |
2264 | }; |
2265 | MODULE_DEVICE_TABLE(of, of_match); |
2266 | |
2267 | static struct platform_driver netcp_driver = { |
2268 | .driver = { |
2269 | .name = "netcp-1.0" , |
2270 | .of_match_table = of_match, |
2271 | }, |
2272 | .probe = netcp_probe, |
2273 | .remove_new = netcp_remove, |
2274 | }; |
2275 | module_platform_driver(netcp_driver); |
2276 | |
2277 | MODULE_LICENSE("GPL v2" ); |
2278 | MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs" ); |
2279 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com" ); |
2280 | |