1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qedr NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/pci.h> |
8 | #include <linux/netdevice.h> |
9 | #include <linux/list.h> |
10 | #include <linux/mutex.h> |
11 | #include <linux/qed/qede_rdma.h> |
12 | #include "qede.h" |
13 | |
14 | static struct qedr_driver *qedr_drv; |
15 | static LIST_HEAD(qedr_dev_list); |
16 | static DEFINE_MUTEX(qedr_dev_list_lock); |
17 | |
18 | bool qede_rdma_supported(struct qede_dev *dev) |
19 | { |
20 | return dev->dev_info.common.rdma_supported; |
21 | } |
22 | |
23 | static void _qede_rdma_dev_add(struct qede_dev *edev) |
24 | { |
25 | if (!qedr_drv) |
26 | return; |
27 | |
28 | /* Leftovers from previous error recovery */ |
29 | edev->rdma_info.exp_recovery = false; |
30 | edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, |
31 | edev->ndev); |
32 | } |
33 | |
34 | static int qede_rdma_create_wq(struct qede_dev *edev) |
35 | { |
36 | INIT_LIST_HEAD(list: &edev->rdma_info.rdma_event_list); |
37 | kref_init(kref: &edev->rdma_info.refcnt); |
38 | init_completion(x: &edev->rdma_info.event_comp); |
39 | |
40 | edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq" ); |
41 | if (!edev->rdma_info.rdma_wq) { |
42 | DP_NOTICE(edev, "qedr: Could not create workqueue\n" ); |
43 | return -ENOMEM; |
44 | } |
45 | |
46 | return 0; |
47 | } |
48 | |
49 | static void qede_rdma_cleanup_event(struct qede_dev *edev) |
50 | { |
51 | struct list_head *head = &edev->rdma_info.rdma_event_list; |
52 | struct qede_rdma_event_work *event_node; |
53 | |
54 | flush_workqueue(edev->rdma_info.rdma_wq); |
55 | while (!list_empty(head)) { |
56 | event_node = list_entry(head->next, struct qede_rdma_event_work, |
57 | list); |
58 | cancel_work_sync(work: &event_node->work); |
59 | list_del(entry: &event_node->list); |
60 | kfree(objp: event_node); |
61 | } |
62 | } |
63 | |
64 | static void qede_rdma_complete_event(struct kref *ref) |
65 | { |
66 | struct qede_rdma_dev *rdma_dev = |
67 | container_of(ref, struct qede_rdma_dev, refcnt); |
68 | |
69 | /* no more events will be added after this */ |
70 | complete(&rdma_dev->event_comp); |
71 | } |
72 | |
73 | static void qede_rdma_destroy_wq(struct qede_dev *edev) |
74 | { |
75 | /* Avoid race with add_event flow, make sure it finishes before |
76 | * we start accessing the list and cleaning up the work |
77 | */ |
78 | kref_put(kref: &edev->rdma_info.refcnt, release: qede_rdma_complete_event); |
79 | wait_for_completion(&edev->rdma_info.event_comp); |
80 | |
81 | qede_rdma_cleanup_event(edev); |
82 | destroy_workqueue(wq: edev->rdma_info.rdma_wq); |
83 | edev->rdma_info.rdma_wq = NULL; |
84 | } |
85 | |
86 | int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) |
87 | { |
88 | int rc; |
89 | |
90 | if (!qede_rdma_supported(dev: edev)) |
91 | return 0; |
92 | |
93 | /* Cannot start qedr while recovering since it wasn't fully stopped */ |
94 | if (recovery) |
95 | return 0; |
96 | |
97 | rc = qede_rdma_create_wq(edev); |
98 | if (rc) |
99 | return rc; |
100 | |
101 | INIT_LIST_HEAD(list: &edev->rdma_info.entry); |
102 | mutex_lock(&qedr_dev_list_lock); |
103 | list_add_tail(new: &edev->rdma_info.entry, head: &qedr_dev_list); |
104 | _qede_rdma_dev_add(edev); |
105 | mutex_unlock(lock: &qedr_dev_list_lock); |
106 | |
107 | return rc; |
108 | } |
109 | |
110 | static void _qede_rdma_dev_remove(struct qede_dev *edev) |
111 | { |
112 | if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) |
113 | qedr_drv->remove(edev->rdma_info.qedr_dev); |
114 | } |
115 | |
116 | void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) |
117 | { |
118 | if (!qede_rdma_supported(dev: edev)) |
119 | return; |
120 | |
121 | /* Cannot remove qedr while recovering since it wasn't fully stopped */ |
122 | if (!recovery) { |
123 | qede_rdma_destroy_wq(edev); |
124 | mutex_lock(&qedr_dev_list_lock); |
125 | if (!edev->rdma_info.exp_recovery) |
126 | _qede_rdma_dev_remove(edev); |
127 | edev->rdma_info.qedr_dev = NULL; |
128 | list_del(entry: &edev->rdma_info.entry); |
129 | mutex_unlock(lock: &qedr_dev_list_lock); |
130 | } else { |
131 | if (!edev->rdma_info.exp_recovery) { |
132 | mutex_lock(&qedr_dev_list_lock); |
133 | _qede_rdma_dev_remove(edev); |
134 | mutex_unlock(lock: &qedr_dev_list_lock); |
135 | } |
136 | edev->rdma_info.exp_recovery = true; |
137 | } |
138 | } |
139 | |
140 | static void _qede_rdma_dev_open(struct qede_dev *edev) |
141 | { |
142 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) |
143 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); |
144 | } |
145 | |
146 | static void qede_rdma_dev_open(struct qede_dev *edev) |
147 | { |
148 | if (!qede_rdma_supported(dev: edev)) |
149 | return; |
150 | |
151 | mutex_lock(&qedr_dev_list_lock); |
152 | _qede_rdma_dev_open(edev); |
153 | mutex_unlock(lock: &qedr_dev_list_lock); |
154 | } |
155 | |
156 | static void _qede_rdma_dev_close(struct qede_dev *edev) |
157 | { |
158 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) |
159 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); |
160 | } |
161 | |
162 | static void qede_rdma_dev_close(struct qede_dev *edev) |
163 | { |
164 | if (!qede_rdma_supported(dev: edev)) |
165 | return; |
166 | |
167 | mutex_lock(&qedr_dev_list_lock); |
168 | _qede_rdma_dev_close(edev); |
169 | mutex_unlock(lock: &qedr_dev_list_lock); |
170 | } |
171 | |
172 | static void qede_rdma_dev_shutdown(struct qede_dev *edev) |
173 | { |
174 | if (!qede_rdma_supported(dev: edev)) |
175 | return; |
176 | |
177 | mutex_lock(&qedr_dev_list_lock); |
178 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) |
179 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); |
180 | mutex_unlock(lock: &qedr_dev_list_lock); |
181 | } |
182 | |
183 | int qede_rdma_register_driver(struct qedr_driver *drv) |
184 | { |
185 | struct qede_dev *edev; |
186 | u8 qedr_counter = 0; |
187 | |
188 | mutex_lock(&qedr_dev_list_lock); |
189 | if (qedr_drv) { |
190 | mutex_unlock(lock: &qedr_dev_list_lock); |
191 | return -EINVAL; |
192 | } |
193 | qedr_drv = drv; |
194 | |
195 | list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { |
196 | struct net_device *ndev; |
197 | |
198 | qedr_counter++; |
199 | _qede_rdma_dev_add(edev); |
200 | ndev = edev->ndev; |
201 | if (netif_running(dev: ndev) && netif_oper_up(dev: ndev)) |
202 | _qede_rdma_dev_open(edev); |
203 | } |
204 | mutex_unlock(lock: &qedr_dev_list_lock); |
205 | |
206 | pr_notice("qedr: discovered and registered %d RDMA funcs\n" , |
207 | qedr_counter); |
208 | |
209 | return 0; |
210 | } |
211 | EXPORT_SYMBOL(qede_rdma_register_driver); |
212 | |
213 | void qede_rdma_unregister_driver(struct qedr_driver *drv) |
214 | { |
215 | struct qede_dev *edev; |
216 | |
217 | mutex_lock(&qedr_dev_list_lock); |
218 | list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { |
219 | /* If device has experienced recovery it was already removed */ |
220 | if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) |
221 | _qede_rdma_dev_remove(edev); |
222 | } |
223 | qedr_drv = NULL; |
224 | mutex_unlock(lock: &qedr_dev_list_lock); |
225 | } |
226 | EXPORT_SYMBOL(qede_rdma_unregister_driver); |
227 | |
228 | static void qede_rdma_changeaddr(struct qede_dev *edev) |
229 | { |
230 | if (!qede_rdma_supported(dev: edev)) |
231 | return; |
232 | |
233 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) |
234 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); |
235 | } |
236 | |
237 | static void qede_rdma_change_mtu(struct qede_dev *edev) |
238 | { |
239 | if (qede_rdma_supported(dev: edev)) { |
240 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) |
241 | qedr_drv->notify(edev->rdma_info.qedr_dev, |
242 | QEDE_CHANGE_MTU); |
243 | } |
244 | } |
245 | |
246 | static struct qede_rdma_event_work * |
247 | qede_rdma_get_free_event_node(struct qede_dev *edev) |
248 | { |
249 | struct qede_rdma_event_work *event_node = NULL; |
250 | bool found = false; |
251 | |
252 | list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list, |
253 | list) { |
254 | if (!work_pending(&event_node->work)) { |
255 | found = true; |
256 | break; |
257 | } |
258 | } |
259 | |
260 | if (!found) { |
261 | event_node = kzalloc(size: sizeof(*event_node), GFP_ATOMIC); |
262 | if (!event_node) { |
263 | DP_NOTICE(edev, |
264 | "qedr: Could not allocate memory for rdma work\n" ); |
265 | return NULL; |
266 | } |
267 | list_add_tail(new: &event_node->list, |
268 | head: &edev->rdma_info.rdma_event_list); |
269 | } |
270 | |
271 | return event_node; |
272 | } |
273 | |
274 | static void qede_rdma_handle_event(struct work_struct *work) |
275 | { |
276 | struct qede_rdma_event_work *event_node; |
277 | enum qede_rdma_event event; |
278 | struct qede_dev *edev; |
279 | |
280 | event_node = container_of(work, struct qede_rdma_event_work, work); |
281 | event = event_node->event; |
282 | edev = event_node->ptr; |
283 | |
284 | switch (event) { |
285 | case QEDE_UP: |
286 | qede_rdma_dev_open(edev); |
287 | break; |
288 | case QEDE_DOWN: |
289 | qede_rdma_dev_close(edev); |
290 | break; |
291 | case QEDE_CLOSE: |
292 | qede_rdma_dev_shutdown(edev); |
293 | break; |
294 | case QEDE_CHANGE_ADDR: |
295 | qede_rdma_changeaddr(edev); |
296 | break; |
297 | case QEDE_CHANGE_MTU: |
298 | qede_rdma_change_mtu(edev); |
299 | break; |
300 | default: |
301 | DP_NOTICE(edev, "Invalid rdma event %d" , event); |
302 | } |
303 | } |
304 | |
305 | static void qede_rdma_add_event(struct qede_dev *edev, |
306 | enum qede_rdma_event event) |
307 | { |
308 | struct qede_rdma_event_work *event_node; |
309 | |
310 | /* If a recovery was experienced avoid adding the event */ |
311 | if (edev->rdma_info.exp_recovery) |
312 | return; |
313 | |
314 | if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) |
315 | return; |
316 | |
317 | /* We don't want the cleanup flow to start while we're allocating and |
318 | * scheduling the work |
319 | */ |
320 | if (!kref_get_unless_zero(kref: &edev->rdma_info.refcnt)) |
321 | return; /* already being destroyed */ |
322 | |
323 | event_node = qede_rdma_get_free_event_node(edev); |
324 | if (!event_node) |
325 | goto out; |
326 | |
327 | event_node->event = event; |
328 | event_node->ptr = edev; |
329 | |
330 | INIT_WORK(&event_node->work, qede_rdma_handle_event); |
331 | queue_work(wq: edev->rdma_info.rdma_wq, work: &event_node->work); |
332 | |
333 | out: |
334 | kref_put(kref: &edev->rdma_info.refcnt, release: qede_rdma_complete_event); |
335 | } |
336 | |
337 | void qede_rdma_dev_event_open(struct qede_dev *edev) |
338 | { |
339 | qede_rdma_add_event(edev, event: QEDE_UP); |
340 | } |
341 | |
342 | void qede_rdma_dev_event_close(struct qede_dev *edev) |
343 | { |
344 | qede_rdma_add_event(edev, event: QEDE_DOWN); |
345 | } |
346 | |
347 | void qede_rdma_event_changeaddr(struct qede_dev *edev) |
348 | { |
349 | qede_rdma_add_event(edev, event: QEDE_CHANGE_ADDR); |
350 | } |
351 | |
352 | void qede_rdma_event_change_mtu(struct qede_dev *edev) |
353 | { |
354 | qede_rdma_add_event(edev, event: QEDE_CHANGE_MTU); |
355 | } |
356 | |