1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2021, MediaTek Inc. |
4 | * Copyright (c) 2021-2022, Intel Corporation. |
5 | * |
6 | * Authors: |
7 | * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> |
8 | * Haijun Liu <haijun.liu@mediatek.com> |
9 | * Ricardo Martinez <ricardo.martinez@linux.intel.com> |
10 | * |
11 | * Contributors: |
12 | * Amir Hanania <amir.hanania@intel.com> |
13 | * Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
14 | * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> |
15 | * Eliot Lee <eliot.lee@intel.com> |
16 | * Moises Veleta <moises.veleta@intel.com> |
17 | * Sreehari Kancharla <sreehari.kancharla@intel.com> |
18 | */ |
19 | |
20 | #include <linux/atomic.h> |
21 | #include <linux/device.h> |
22 | #include <linux/gfp.h> |
23 | #include <linux/if_arp.h> |
24 | #include <linux/if_ether.h> |
25 | #include <linux/ip.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/list.h> |
28 | #include <linux/netdev_features.h> |
29 | #include <linux/netdevice.h> |
30 | #include <linux/pm_runtime.h> |
31 | #include <linux/skbuff.h> |
32 | #include <linux/types.h> |
33 | #include <linux/wwan.h> |
34 | #include <net/ipv6.h> |
35 | #include <net/pkt_sched.h> |
36 | |
37 | #include "t7xx_hif_dpmaif_rx.h" |
38 | #include "t7xx_hif_dpmaif_tx.h" |
39 | #include "t7xx_netdev.h" |
40 | #include "t7xx_pci.h" |
41 | #include "t7xx_port_proxy.h" |
42 | #include "t7xx_state_monitor.h" |
43 | |
44 | #define IP_MUX_SESSION_DEFAULT 0 |
45 | #define SBD_PACKET_TYPE_MASK GENMASK(7, 4) |
46 | |
47 | static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb) |
48 | { |
49 | struct dpmaif_ctrl *ctrl; |
50 | int i, ret; |
51 | |
52 | ctrl = ctlb->hif_ctrl; |
53 | |
54 | if (ctlb->is_napi_en) |
55 | return; |
56 | |
57 | for (i = 0; i < RXQ_NUM; i++) { |
58 | /* The usage count has to be bumped every time before calling |
59 | * napi_schedule. It will be decresed in the poll routine, |
60 | * right after napi_complete_done is called. |
61 | */ |
62 | ret = pm_runtime_resume_and_get(dev: ctrl->dev); |
63 | if (ret < 0) { |
64 | dev_err(ctrl->dev, "Failed to resume device: %d\n" , |
65 | ret); |
66 | return; |
67 | } |
68 | napi_enable(n: ctlb->napi[i]); |
69 | napi_schedule(n: ctlb->napi[i]); |
70 | } |
71 | ctlb->is_napi_en = true; |
72 | } |
73 | |
74 | static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb) |
75 | { |
76 | int i; |
77 | |
78 | if (!ctlb->is_napi_en) |
79 | return; |
80 | |
81 | for (i = 0; i < RXQ_NUM; i++) { |
82 | napi_synchronize(n: ctlb->napi[i]); |
83 | napi_disable(n: ctlb->napi[i]); |
84 | } |
85 | |
86 | ctlb->is_napi_en = false; |
87 | } |
88 | |
89 | static int t7xx_ccmni_open(struct net_device *dev) |
90 | { |
91 | struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); |
92 | struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; |
93 | |
94 | netif_carrier_on(dev); |
95 | netif_tx_start_all_queues(dev); |
96 | if (!atomic_fetch_inc(v: &ccmni_ctl->napi_usr_refcnt)) |
97 | t7xx_ccmni_enable_napi(ctlb: ccmni_ctl); |
98 | |
99 | atomic_inc(v: &ccmni->usage); |
100 | return 0; |
101 | } |
102 | |
103 | static int t7xx_ccmni_close(struct net_device *dev) |
104 | { |
105 | struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); |
106 | struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; |
107 | |
108 | atomic_dec(v: &ccmni->usage); |
109 | if (atomic_dec_and_test(v: &ccmni_ctl->napi_usr_refcnt)) |
110 | t7xx_ccmni_disable_napi(ctlb: ccmni_ctl); |
111 | |
112 | netif_carrier_off(dev); |
113 | netif_tx_disable(dev); |
114 | return 0; |
115 | } |
116 | |
117 | static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb, |
118 | unsigned int txq_number) |
119 | { |
120 | struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb; |
121 | struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb); |
122 | |
123 | skb_cb->netif_idx = ccmni->index; |
124 | |
125 | if (t7xx_dpmaif_tx_send_skb(dpmaif_ctrl: ctlb->hif_ctrl, txq_number, skb)) |
126 | return NETDEV_TX_BUSY; |
127 | |
128 | return 0; |
129 | } |
130 | |
131 | static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) |
132 | { |
133 | struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); |
134 | int skb_len = skb->len; |
135 | |
136 | /* If MTU is changed or there is no headroom, drop the packet */ |
137 | if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) { |
138 | dev_kfree_skb(skb); |
139 | dev->stats.tx_dropped++; |
140 | return NETDEV_TX_OK; |
141 | } |
142 | |
143 | if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE)) |
144 | return NETDEV_TX_BUSY; |
145 | |
146 | dev->stats.tx_packets++; |
147 | dev->stats.tx_bytes += skb_len; |
148 | |
149 | return NETDEV_TX_OK; |
150 | } |
151 | |
152 | static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue) |
153 | { |
154 | struct t7xx_ccmni *ccmni = netdev_priv(dev); |
155 | |
156 | dev->stats.tx_errors++; |
157 | |
158 | if (atomic_read(v: &ccmni->usage) > 0) |
159 | netif_tx_wake_all_queues(dev); |
160 | } |
161 | |
162 | static const struct net_device_ops ccmni_netdev_ops = { |
163 | .ndo_open = t7xx_ccmni_open, |
164 | .ndo_stop = t7xx_ccmni_close, |
165 | .ndo_start_xmit = t7xx_ccmni_start_xmit, |
166 | .ndo_tx_timeout = t7xx_ccmni_tx_timeout, |
167 | }; |
168 | |
169 | static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb) |
170 | { |
171 | struct t7xx_ccmni *ccmni; |
172 | int i; |
173 | |
174 | for (i = 0; i < ctlb->nic_dev_num; i++) { |
175 | ccmni = ctlb->ccmni_inst[i]; |
176 | if (!ccmni) |
177 | continue; |
178 | |
179 | if (atomic_read(v: &ccmni->usage) > 0) { |
180 | netif_tx_start_all_queues(dev: ccmni->dev); |
181 | netif_carrier_on(dev: ccmni->dev); |
182 | } |
183 | } |
184 | |
185 | if (atomic_read(v: &ctlb->napi_usr_refcnt)) |
186 | t7xx_ccmni_enable_napi(ctlb); |
187 | } |
188 | |
189 | static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) |
190 | { |
191 | struct t7xx_ccmni *ccmni; |
192 | int i; |
193 | |
194 | for (i = 0; i < ctlb->nic_dev_num; i++) { |
195 | ccmni = ctlb->ccmni_inst[i]; |
196 | if (!ccmni) |
197 | continue; |
198 | |
199 | if (atomic_read(v: &ccmni->usage) > 0) |
200 | netif_tx_disable(dev: ccmni->dev); |
201 | } |
202 | } |
203 | |
204 | static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb) |
205 | { |
206 | struct t7xx_ccmni *ccmni; |
207 | int i; |
208 | |
209 | if (atomic_read(v: &ctlb->napi_usr_refcnt)) |
210 | t7xx_ccmni_disable_napi(ctlb); |
211 | |
212 | for (i = 0; i < ctlb->nic_dev_num; i++) { |
213 | ccmni = ctlb->ccmni_inst[i]; |
214 | if (!ccmni) |
215 | continue; |
216 | |
217 | if (atomic_read(v: &ccmni->usage) > 0) |
218 | netif_carrier_off(dev: ccmni->dev); |
219 | } |
220 | } |
221 | |
222 | static void t7xx_ccmni_wwan_setup(struct net_device *dev) |
223 | { |
224 | dev->needed_headroom += sizeof(struct ccci_header); |
225 | |
226 | dev->mtu = ETH_DATA_LEN; |
227 | dev->max_mtu = CCMNI_MTU_MAX; |
228 | BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE); |
229 | |
230 | dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; |
231 | dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO; |
232 | |
233 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
234 | |
235 | dev->features = NETIF_F_VLAN_CHALLENGED; |
236 | |
237 | dev->features |= NETIF_F_SG; |
238 | dev->hw_features |= NETIF_F_SG; |
239 | |
240 | dev->features |= NETIF_F_HW_CSUM; |
241 | dev->hw_features |= NETIF_F_HW_CSUM; |
242 | |
243 | dev->features |= NETIF_F_RXCSUM; |
244 | dev->hw_features |= NETIF_F_RXCSUM; |
245 | |
246 | dev->features |= NETIF_F_GRO; |
247 | dev->hw_features |= NETIF_F_GRO; |
248 | |
249 | dev->needs_free_netdev = true; |
250 | |
251 | dev->type = ARPHRD_NONE; |
252 | |
253 | dev->netdev_ops = &ccmni_netdev_ops; |
254 | } |
255 | |
256 | static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) |
257 | { |
258 | int i; |
259 | |
260 | /* one HW, but shared with multiple net devices, |
261 | * so add a dummy device for NAPI. |
262 | */ |
263 | init_dummy_netdev(dev: &ctlb->dummy_dev); |
264 | atomic_set(v: &ctlb->napi_usr_refcnt, i: 0); |
265 | ctlb->is_napi_en = false; |
266 | |
267 | for (i = 0; i < RXQ_NUM; i++) { |
268 | ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi; |
269 | netif_napi_add_weight(dev: &ctlb->dummy_dev, napi: ctlb->napi[i], poll: t7xx_dpmaif_napi_rx_poll, |
270 | NIC_NAPI_POLL_BUDGET); |
271 | } |
272 | } |
273 | |
274 | static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) |
275 | { |
276 | int i; |
277 | |
278 | for (i = 0; i < RXQ_NUM; i++) { |
279 | netif_napi_del(napi: ctlb->napi[i]); |
280 | ctlb->napi[i] = NULL; |
281 | } |
282 | } |
283 | |
284 | static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, |
285 | struct netlink_ext_ack *extack) |
286 | { |
287 | struct t7xx_ccmni_ctrl *ctlb = ctxt; |
288 | struct t7xx_ccmni *ccmni; |
289 | int ret; |
290 | |
291 | if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) |
292 | return -EINVAL; |
293 | |
294 | ccmni = wwan_netdev_drvpriv(dev); |
295 | ccmni->index = if_id; |
296 | ccmni->ctlb = ctlb; |
297 | ccmni->dev = dev; |
298 | atomic_set(v: &ccmni->usage, i: 0); |
299 | ctlb->ccmni_inst[if_id] = ccmni; |
300 | |
301 | ret = register_netdevice(dev); |
302 | if (ret) |
303 | return ret; |
304 | |
305 | netif_device_attach(dev); |
306 | return 0; |
307 | } |
308 | |
309 | static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head) |
310 | { |
311 | struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); |
312 | struct t7xx_ccmni_ctrl *ctlb = ctxt; |
313 | u8 if_id = ccmni->index; |
314 | |
315 | if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) |
316 | return; |
317 | |
318 | if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) |
319 | return; |
320 | |
321 | unregister_netdevice(dev); |
322 | } |
323 | |
324 | static const struct wwan_ops ccmni_wwan_ops = { |
325 | .priv_size = sizeof(struct t7xx_ccmni), |
326 | .setup = t7xx_ccmni_wwan_setup, |
327 | .newlink = t7xx_ccmni_wwan_newlink, |
328 | .dellink = t7xx_ccmni_wwan_dellink, |
329 | }; |
330 | |
331 | static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb) |
332 | { |
333 | struct device *dev = ctlb->hif_ctrl->dev; |
334 | int ret; |
335 | |
336 | if (ctlb->wwan_is_registered) |
337 | return 0; |
338 | |
339 | /* WWAN core will create a netdev for the default IP MUX channel */ |
340 | ret = wwan_register_ops(parent: dev, ops: &ccmni_wwan_ops, ctxt: ctlb, IP_MUX_SESSION_DEFAULT); |
341 | if (ret < 0) { |
342 | dev_err(dev, "Unable to register WWAN ops, %d\n" , ret); |
343 | return ret; |
344 | } |
345 | |
346 | ctlb->wwan_is_registered = true; |
347 | return 0; |
348 | } |
349 | |
350 | static int t7xx_ccmni_md_state_callback(enum md_state state, void *para) |
351 | { |
352 | struct t7xx_ccmni_ctrl *ctlb = para; |
353 | struct device *dev; |
354 | int ret = 0; |
355 | |
356 | dev = ctlb->hif_ctrl->dev; |
357 | ctlb->md_sta = state; |
358 | |
359 | switch (state) { |
360 | case MD_STATE_READY: |
361 | ret = t7xx_ccmni_register_wwan(ctlb); |
362 | if (!ret) |
363 | t7xx_ccmni_start(ctlb); |
364 | break; |
365 | |
366 | case MD_STATE_EXCEPTION: |
367 | case MD_STATE_STOPPED: |
368 | t7xx_ccmni_pre_stop(ctlb); |
369 | |
370 | ret = t7xx_dpmaif_md_state_callback(dpmaif_ctrl: ctlb->hif_ctrl, state); |
371 | if (ret < 0) |
372 | dev_err(dev, "DPMAIF md state callback err, state=%d\n" , state); |
373 | |
374 | t7xx_ccmni_post_stop(ctlb); |
375 | break; |
376 | |
377 | case MD_STATE_WAITING_FOR_HS1: |
378 | case MD_STATE_WAITING_TO_STOP: |
379 | ret = t7xx_dpmaif_md_state_callback(dpmaif_ctrl: ctlb->hif_ctrl, state); |
380 | if (ret < 0) |
381 | dev_err(dev, "DPMAIF md state callback err, state=%d\n" , state); |
382 | |
383 | break; |
384 | |
385 | default: |
386 | break; |
387 | } |
388 | |
389 | return ret; |
390 | } |
391 | |
392 | static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev) |
393 | { |
394 | struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; |
395 | struct t7xx_fsm_notifier *md_status_notifier; |
396 | |
397 | md_status_notifier = &ctlb->md_status_notify; |
398 | INIT_LIST_HEAD(list: &md_status_notifier->entry); |
399 | md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback; |
400 | md_status_notifier->data = ctlb; |
401 | |
402 | t7xx_fsm_notifier_register(md: t7xx_dev->md, notifier: md_status_notifier); |
403 | } |
404 | |
405 | static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb, |
406 | struct napi_struct *napi) |
407 | { |
408 | struct t7xx_skb_cb *skb_cb; |
409 | struct net_device *net_dev; |
410 | struct t7xx_ccmni *ccmni; |
411 | int pkt_type, skb_len; |
412 | u8 netif_id; |
413 | |
414 | skb_cb = T7XX_SKB_CB(skb); |
415 | netif_id = skb_cb->netif_idx; |
416 | ccmni = ccmni_ctlb->ccmni_inst[netif_id]; |
417 | if (!ccmni) { |
418 | dev_kfree_skb(skb); |
419 | return; |
420 | } |
421 | |
422 | net_dev = ccmni->dev; |
423 | pkt_type = skb_cb->rx_pkt_type; |
424 | skb->dev = net_dev; |
425 | if (pkt_type == PKT_TYPE_IP6) |
426 | skb->protocol = htons(ETH_P_IPV6); |
427 | else |
428 | skb->protocol = htons(ETH_P_IP); |
429 | |
430 | skb_len = skb->len; |
431 | napi_gro_receive(napi, skb); |
432 | net_dev->stats.rx_packets++; |
433 | net_dev->stats.rx_bytes += skb_len; |
434 | } |
435 | |
436 | static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) |
437 | { |
438 | struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; |
439 | struct netdev_queue *net_queue; |
440 | |
441 | if (netif_running(dev: ccmni->dev) && atomic_read(v: &ccmni->usage) > 0) { |
442 | net_queue = netdev_get_tx_queue(dev: ccmni->dev, index: qno); |
443 | if (netif_tx_queue_stopped(dev_queue: net_queue)) |
444 | netif_tx_wake_queue(dev_queue: net_queue); |
445 | } |
446 | } |
447 | |
448 | static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) |
449 | { |
450 | struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; |
451 | struct netdev_queue *net_queue; |
452 | |
453 | if (atomic_read(v: &ccmni->usage) > 0) { |
454 | netdev_err(dev: ccmni->dev, format: "TX queue %d is full\n" , qno); |
455 | net_queue = netdev_get_tx_queue(dev: ccmni->dev, index: qno); |
456 | netif_tx_stop_queue(dev_queue: net_queue); |
457 | } |
458 | } |
459 | |
460 | static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, |
461 | enum dpmaif_txq_state state, int qno) |
462 | { |
463 | struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; |
464 | |
465 | if (ctlb->md_sta != MD_STATE_READY) |
466 | return; |
467 | |
468 | if (!ctlb->ccmni_inst[0]) { |
469 | dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n" ); |
470 | return; |
471 | } |
472 | |
473 | if (state == DMPAIF_TXQ_STATE_IRQ) |
474 | t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); |
475 | else if (state == DMPAIF_TXQ_STATE_FULL) |
476 | t7xx_ccmni_queue_tx_full_notify(ctlb, qno); |
477 | } |
478 | |
479 | int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) |
480 | { |
481 | struct device *dev = &t7xx_dev->pdev->dev; |
482 | struct t7xx_ccmni_ctrl *ctlb; |
483 | |
484 | ctlb = devm_kzalloc(dev, size: sizeof(*ctlb), GFP_KERNEL); |
485 | if (!ctlb) |
486 | return -ENOMEM; |
487 | |
488 | t7xx_dev->ccmni_ctlb = ctlb; |
489 | ctlb->t7xx_dev = t7xx_dev; |
490 | ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify; |
491 | ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb; |
492 | ctlb->nic_dev_num = NIC_DEV_DEFAULT; |
493 | |
494 | ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, callbacks: &ctlb->callbacks); |
495 | if (!ctlb->hif_ctrl) |
496 | return -ENOMEM; |
497 | |
498 | t7xx_init_netdev_napi(ctlb); |
499 | init_md_status_notifier(t7xx_dev); |
500 | return 0; |
501 | } |
502 | |
503 | void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev) |
504 | { |
505 | struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; |
506 | |
507 | t7xx_fsm_notifier_unregister(md: t7xx_dev->md, notifier: &ctlb->md_status_notify); |
508 | |
509 | if (ctlb->wwan_is_registered) { |
510 | wwan_unregister_ops(parent: &t7xx_dev->pdev->dev); |
511 | ctlb->wwan_is_registered = false; |
512 | } |
513 | |
514 | t7xx_uninit_netdev_napi(ctlb); |
515 | t7xx_dpmaif_hif_exit(dpmaif_ctrl: ctlb->hif_ctrl); |
516 | } |
517 | |