1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
6 | * Copyright (c) 2005 PathScale, Inc. All rights reserved. |
7 | * |
8 | * This software is available to you under a choice of one of two |
9 | * licenses. You may choose to be licensed under the terms of the GNU |
10 | * General Public License (GPL) Version 2, available from the file |
11 | * COPYING in the main directory of this source tree, or the |
12 | * OpenIB.org BSD license below: |
13 | * |
14 | * Redistribution and use in source and binary forms, with or |
15 | * without modification, are permitted provided that the following |
16 | * conditions are met: |
17 | * |
18 | * - Redistributions of source code must retain the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer. |
21 | * |
22 | * - Redistributions in binary form must reproduce the above |
23 | * copyright notice, this list of conditions and the following |
24 | * disclaimer in the documentation and/or other materials |
25 | * provided with the distribution. |
26 | * |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
34 | * SOFTWARE. |
35 | */ |
36 | |
37 | #include <linux/module.h> |
38 | #include <linux/init.h> |
39 | #include <linux/device.h> |
40 | #include <linux/err.h> |
41 | #include <linux/fs.h> |
42 | #include <linux/poll.h> |
43 | #include <linux/sched.h> |
44 | #include <linux/file.h> |
45 | #include <linux/cdev.h> |
46 | #include <linux/anon_inodes.h> |
47 | #include <linux/slab.h> |
48 | #include <linux/sched/mm.h> |
49 | |
50 | #include <linux/uaccess.h> |
51 | |
52 | #include <rdma/ib.h> |
53 | #include <rdma/uverbs_std_types.h> |
54 | #include <rdma/rdma_netlink.h> |
55 | |
56 | #include "uverbs.h" |
57 | #include "core_priv.h" |
58 | #include "rdma_core.h" |
59 | |
60 | MODULE_AUTHOR("Roland Dreier" ); |
61 | MODULE_DESCRIPTION("InfiniBand userspace verbs access" ); |
62 | MODULE_LICENSE("Dual BSD/GPL" ); |
63 | |
64 | enum { |
65 | IB_UVERBS_MAJOR = 231, |
66 | IB_UVERBS_BASE_MINOR = 192, |
67 | IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS, |
68 | IB_UVERBS_NUM_FIXED_MINOR = 32, |
69 | IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR, |
70 | }; |
71 | |
72 | #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) |
73 | |
74 | static dev_t dynamic_uverbs_dev; |
75 | |
76 | static DEFINE_IDA(uverbs_ida); |
77 | static int ib_uverbs_add_one(struct ib_device *device); |
78 | static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); |
79 | |
80 | static char *uverbs_devnode(const struct device *dev, umode_t *mode) |
81 | { |
82 | if (mode) |
83 | *mode = 0666; |
84 | return kasprintf(GFP_KERNEL, fmt: "infiniband/%s" , dev_name(dev)); |
85 | } |
86 | |
87 | static const struct class uverbs_class = { |
88 | .name = "infiniband_verbs" , |
89 | .devnode = uverbs_devnode, |
90 | }; |
91 | |
92 | /* |
93 | * Must be called with the ufile->device->disassociate_srcu held, and the lock |
94 | * must be held until use of the ucontext is finished. |
95 | */ |
96 | struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile) |
97 | { |
98 | /* |
99 | * We do not hold the hw_destroy_rwsem lock for this flow, instead |
100 | * srcu is used. It does not matter if someone races this with |
101 | * get_context, we get NULL or valid ucontext. |
102 | */ |
103 | struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext); |
104 | |
105 | if (!srcu_dereference(ufile->device->ib_dev, |
106 | &ufile->device->disassociate_srcu)) |
107 | return ERR_PTR(error: -EIO); |
108 | |
109 | if (!ucontext) |
110 | return ERR_PTR(error: -EINVAL); |
111 | |
112 | return ucontext; |
113 | } |
114 | EXPORT_SYMBOL(ib_uverbs_get_ucontext_file); |
115 | |
116 | int uverbs_dealloc_mw(struct ib_mw *mw) |
117 | { |
118 | struct ib_pd *pd = mw->pd; |
119 | int ret; |
120 | |
121 | ret = mw->device->ops.dealloc_mw(mw); |
122 | if (ret) |
123 | return ret; |
124 | |
125 | atomic_dec(v: &pd->usecnt); |
126 | kfree(objp: mw); |
127 | return ret; |
128 | } |
129 | |
130 | static void ib_uverbs_release_dev(struct device *device) |
131 | { |
132 | struct ib_uverbs_device *dev = |
133 | container_of(device, struct ib_uverbs_device, dev); |
134 | |
135 | uverbs_destroy_api(uapi: dev->uapi); |
136 | cleanup_srcu_struct(ssp: &dev->disassociate_srcu); |
137 | mutex_destroy(lock: &dev->lists_mutex); |
138 | mutex_destroy(lock: &dev->xrcd_tree_mutex); |
139 | kfree(objp: dev); |
140 | } |
141 | |
142 | void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file, |
143 | struct ib_ucq_object *uobj) |
144 | { |
145 | struct ib_uverbs_event *evt, *tmp; |
146 | |
147 | if (ev_file) { |
148 | spin_lock_irq(lock: &ev_file->ev_queue.lock); |
149 | list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { |
150 | list_del(entry: &evt->list); |
151 | kfree(objp: evt); |
152 | } |
153 | spin_unlock_irq(lock: &ev_file->ev_queue.lock); |
154 | |
155 | uverbs_uobject_put(uobject: &ev_file->uobj); |
156 | } |
157 | |
158 | ib_uverbs_release_uevent(uobj: &uobj->uevent); |
159 | } |
160 | |
161 | void ib_uverbs_release_uevent(struct ib_uevent_object *uobj) |
162 | { |
163 | struct ib_uverbs_async_event_file *async_file = uobj->event_file; |
164 | struct ib_uverbs_event *evt, *tmp; |
165 | |
166 | if (!async_file) |
167 | return; |
168 | |
169 | spin_lock_irq(lock: &async_file->ev_queue.lock); |
170 | list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { |
171 | list_del(entry: &evt->list); |
172 | kfree(objp: evt); |
173 | } |
174 | spin_unlock_irq(lock: &async_file->ev_queue.lock); |
175 | uverbs_uobject_put(uobject: &async_file->uobj); |
176 | } |
177 | |
178 | void ib_uverbs_detach_umcast(struct ib_qp *qp, |
179 | struct ib_uqp_object *uobj) |
180 | { |
181 | struct ib_uverbs_mcast_entry *mcast, *tmp; |
182 | |
183 | list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { |
184 | ib_detach_mcast(qp, gid: &mcast->gid, lid: mcast->lid); |
185 | list_del(entry: &mcast->list); |
186 | kfree(objp: mcast); |
187 | } |
188 | } |
189 | |
190 | static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) |
191 | { |
192 | complete(&dev->comp); |
193 | } |
194 | |
195 | void ib_uverbs_release_file(struct kref *ref) |
196 | { |
197 | struct ib_uverbs_file *file = |
198 | container_of(ref, struct ib_uverbs_file, ref); |
199 | struct ib_device *ib_dev; |
200 | int srcu_key; |
201 | |
202 | release_ufile_idr_uobject(ufile: file); |
203 | |
204 | srcu_key = srcu_read_lock(ssp: &file->device->disassociate_srcu); |
205 | ib_dev = srcu_dereference(file->device->ib_dev, |
206 | &file->device->disassociate_srcu); |
207 | if (ib_dev && !ib_dev->ops.disassociate_ucontext) |
208 | module_put(module: ib_dev->ops.owner); |
209 | srcu_read_unlock(ssp: &file->device->disassociate_srcu, idx: srcu_key); |
210 | |
211 | if (refcount_dec_and_test(r: &file->device->refcount)) |
212 | ib_uverbs_comp_dev(dev: file->device); |
213 | |
214 | if (file->default_async_file) |
215 | uverbs_uobject_put(uobject: &file->default_async_file->uobj); |
216 | put_device(dev: &file->device->dev); |
217 | |
218 | if (file->disassociate_page) |
219 | __free_pages(page: file->disassociate_page, order: 0); |
220 | mutex_destroy(lock: &file->umap_lock); |
221 | mutex_destroy(lock: &file->ucontext_lock); |
222 | kfree(objp: file); |
223 | } |
224 | |
225 | static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, |
226 | struct file *filp, char __user *buf, |
227 | size_t count, loff_t *pos, |
228 | size_t eventsz) |
229 | { |
230 | struct ib_uverbs_event *event; |
231 | int ret = 0; |
232 | |
233 | spin_lock_irq(lock: &ev_queue->lock); |
234 | |
235 | while (list_empty(head: &ev_queue->event_list)) { |
236 | if (ev_queue->is_closed) { |
237 | spin_unlock_irq(lock: &ev_queue->lock); |
238 | return -EIO; |
239 | } |
240 | |
241 | spin_unlock_irq(lock: &ev_queue->lock); |
242 | if (filp->f_flags & O_NONBLOCK) |
243 | return -EAGAIN; |
244 | |
245 | if (wait_event_interruptible(ev_queue->poll_wait, |
246 | (!list_empty(&ev_queue->event_list) || |
247 | ev_queue->is_closed))) |
248 | return -ERESTARTSYS; |
249 | |
250 | spin_lock_irq(lock: &ev_queue->lock); |
251 | } |
252 | |
253 | event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); |
254 | |
255 | if (eventsz > count) { |
256 | ret = -EINVAL; |
257 | event = NULL; |
258 | } else { |
259 | list_del(entry: ev_queue->event_list.next); |
260 | if (event->counter) { |
261 | ++(*event->counter); |
262 | list_del(entry: &event->obj_list); |
263 | } |
264 | } |
265 | |
266 | spin_unlock_irq(lock: &ev_queue->lock); |
267 | |
268 | if (event) { |
269 | if (copy_to_user(to: buf, from: event, n: eventsz)) |
270 | ret = -EFAULT; |
271 | else |
272 | ret = eventsz; |
273 | } |
274 | |
275 | kfree(objp: event); |
276 | |
277 | return ret; |
278 | } |
279 | |
280 | static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf, |
281 | size_t count, loff_t *pos) |
282 | { |
283 | struct ib_uverbs_async_event_file *file = filp->private_data; |
284 | |
285 | return ib_uverbs_event_read(ev_queue: &file->ev_queue, filp, buf, count, pos, |
286 | eventsz: sizeof(struct ib_uverbs_async_event_desc)); |
287 | } |
288 | |
289 | static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, |
290 | size_t count, loff_t *pos) |
291 | { |
292 | struct ib_uverbs_completion_event_file *comp_ev_file = |
293 | filp->private_data; |
294 | |
295 | return ib_uverbs_event_read(ev_queue: &comp_ev_file->ev_queue, filp, buf, count, |
296 | pos, |
297 | eventsz: sizeof(struct ib_uverbs_comp_event_desc)); |
298 | } |
299 | |
300 | static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, |
301 | struct file *filp, |
302 | struct poll_table_struct *wait) |
303 | { |
304 | __poll_t pollflags = 0; |
305 | |
306 | poll_wait(filp, wait_address: &ev_queue->poll_wait, p: wait); |
307 | |
308 | spin_lock_irq(lock: &ev_queue->lock); |
309 | if (!list_empty(head: &ev_queue->event_list)) |
310 | pollflags = EPOLLIN | EPOLLRDNORM; |
311 | else if (ev_queue->is_closed) |
312 | pollflags = EPOLLERR; |
313 | spin_unlock_irq(lock: &ev_queue->lock); |
314 | |
315 | return pollflags; |
316 | } |
317 | |
318 | static __poll_t ib_uverbs_async_event_poll(struct file *filp, |
319 | struct poll_table_struct *wait) |
320 | { |
321 | struct ib_uverbs_async_event_file *file = filp->private_data; |
322 | |
323 | return ib_uverbs_event_poll(ev_queue: &file->ev_queue, filp, wait); |
324 | } |
325 | |
326 | static __poll_t ib_uverbs_comp_event_poll(struct file *filp, |
327 | struct poll_table_struct *wait) |
328 | { |
329 | struct ib_uverbs_completion_event_file *comp_ev_file = |
330 | filp->private_data; |
331 | |
332 | return ib_uverbs_event_poll(ev_queue: &comp_ev_file->ev_queue, filp, wait); |
333 | } |
334 | |
335 | static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on) |
336 | { |
337 | struct ib_uverbs_async_event_file *file = filp->private_data; |
338 | |
339 | return fasync_helper(fd, filp, on, &file->ev_queue.async_queue); |
340 | } |
341 | |
342 | static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on) |
343 | { |
344 | struct ib_uverbs_completion_event_file *comp_ev_file = |
345 | filp->private_data; |
346 | |
347 | return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue); |
348 | } |
349 | |
350 | const struct file_operations uverbs_event_fops = { |
351 | .owner = THIS_MODULE, |
352 | .read = ib_uverbs_comp_event_read, |
353 | .poll = ib_uverbs_comp_event_poll, |
354 | .release = uverbs_uobject_fd_release, |
355 | .fasync = ib_uverbs_comp_event_fasync, |
356 | .llseek = no_llseek, |
357 | }; |
358 | |
359 | const struct file_operations uverbs_async_event_fops = { |
360 | .owner = THIS_MODULE, |
361 | .read = ib_uverbs_async_event_read, |
362 | .poll = ib_uverbs_async_event_poll, |
363 | .release = uverbs_async_event_release, |
364 | .fasync = ib_uverbs_async_event_fasync, |
365 | .llseek = no_llseek, |
366 | }; |
367 | |
368 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) |
369 | { |
370 | struct ib_uverbs_event_queue *ev_queue = cq_context; |
371 | struct ib_ucq_object *uobj; |
372 | struct ib_uverbs_event *entry; |
373 | unsigned long flags; |
374 | |
375 | if (!ev_queue) |
376 | return; |
377 | |
378 | spin_lock_irqsave(&ev_queue->lock, flags); |
379 | if (ev_queue->is_closed) { |
380 | spin_unlock_irqrestore(lock: &ev_queue->lock, flags); |
381 | return; |
382 | } |
383 | |
384 | entry = kmalloc(size: sizeof(*entry), GFP_ATOMIC); |
385 | if (!entry) { |
386 | spin_unlock_irqrestore(lock: &ev_queue->lock, flags); |
387 | return; |
388 | } |
389 | |
390 | uobj = cq->uobject; |
391 | |
392 | entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle; |
393 | entry->counter = &uobj->comp_events_reported; |
394 | |
395 | list_add_tail(new: &entry->list, head: &ev_queue->event_list); |
396 | list_add_tail(new: &entry->obj_list, head: &uobj->comp_list); |
397 | spin_unlock_irqrestore(lock: &ev_queue->lock, flags); |
398 | |
399 | wake_up_interruptible(&ev_queue->poll_wait); |
400 | kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN); |
401 | } |
402 | |
403 | void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, |
404 | __u64 element, __u64 event, |
405 | struct list_head *obj_list, u32 *counter) |
406 | { |
407 | struct ib_uverbs_event *entry; |
408 | unsigned long flags; |
409 | |
410 | if (!async_file) |
411 | return; |
412 | |
413 | spin_lock_irqsave(&async_file->ev_queue.lock, flags); |
414 | if (async_file->ev_queue.is_closed) { |
415 | spin_unlock_irqrestore(lock: &async_file->ev_queue.lock, flags); |
416 | return; |
417 | } |
418 | |
419 | entry = kmalloc(size: sizeof(*entry), GFP_ATOMIC); |
420 | if (!entry) { |
421 | spin_unlock_irqrestore(lock: &async_file->ev_queue.lock, flags); |
422 | return; |
423 | } |
424 | |
425 | entry->desc.async.element = element; |
426 | entry->desc.async.event_type = event; |
427 | entry->desc.async.reserved = 0; |
428 | entry->counter = counter; |
429 | |
430 | list_add_tail(new: &entry->list, head: &async_file->ev_queue.event_list); |
431 | if (obj_list) |
432 | list_add_tail(new: &entry->obj_list, head: obj_list); |
433 | spin_unlock_irqrestore(lock: &async_file->ev_queue.lock, flags); |
434 | |
435 | wake_up_interruptible(&async_file->ev_queue.poll_wait); |
436 | kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN); |
437 | } |
438 | |
439 | static void uverbs_uobj_event(struct ib_uevent_object *eobj, |
440 | struct ib_event *event) |
441 | { |
442 | ib_uverbs_async_handler(async_file: eobj->event_file, |
443 | element: eobj->uobject.user_handle, event: event->event, |
444 | obj_list: &eobj->event_list, counter: &eobj->events_reported); |
445 | } |
446 | |
447 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) |
448 | { |
449 | uverbs_uobj_event(eobj: &event->element.cq->uobject->uevent, event); |
450 | } |
451 | |
452 | void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) |
453 | { |
454 | /* for XRC target qp's, check that qp is live */ |
455 | if (!event->element.qp->uobject) |
456 | return; |
457 | |
458 | uverbs_uobj_event(eobj: &event->element.qp->uobject->uevent, event); |
459 | } |
460 | |
461 | void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) |
462 | { |
463 | uverbs_uobj_event(eobj: &event->element.wq->uobject->uevent, event); |
464 | } |
465 | |
466 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) |
467 | { |
468 | uverbs_uobj_event(eobj: &event->element.srq->uobject->uevent, event); |
469 | } |
470 | |
471 | static void ib_uverbs_event_handler(struct ib_event_handler *handler, |
472 | struct ib_event *event) |
473 | { |
474 | ib_uverbs_async_handler( |
475 | container_of(handler, struct ib_uverbs_async_event_file, |
476 | event_handler), |
477 | element: event->element.port_num, event: event->event, NULL, NULL); |
478 | } |
479 | |
480 | void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue) |
481 | { |
482 | spin_lock_init(&ev_queue->lock); |
483 | INIT_LIST_HEAD(list: &ev_queue->event_list); |
484 | init_waitqueue_head(&ev_queue->poll_wait); |
485 | ev_queue->is_closed = 0; |
486 | ev_queue->async_queue = NULL; |
487 | } |
488 | |
489 | void ib_uverbs_init_async_event_file( |
490 | struct ib_uverbs_async_event_file *async_file) |
491 | { |
492 | struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile; |
493 | struct ib_device *ib_dev = async_file->uobj.context->device; |
494 | |
495 | ib_uverbs_init_event_queue(ev_queue: &async_file->ev_queue); |
496 | |
497 | /* The first async_event_file becomes the default one for the file. */ |
498 | mutex_lock(&uverbs_file->ucontext_lock); |
499 | if (!uverbs_file->default_async_file) { |
500 | /* Pairs with the put in ib_uverbs_release_file */ |
501 | uverbs_uobject_get(uobject: &async_file->uobj); |
502 | smp_store_release(&uverbs_file->default_async_file, async_file); |
503 | } |
504 | mutex_unlock(lock: &uverbs_file->ucontext_lock); |
505 | |
506 | INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev, |
507 | ib_uverbs_event_handler); |
508 | ib_register_event_handler(event_handler: &async_file->event_handler); |
509 | } |
510 | |
511 | static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, |
512 | struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count, |
513 | const struct uverbs_api_write_method *method_elm) |
514 | { |
515 | if (method_elm->is_ex) { |
516 | count -= sizeof(*hdr) + sizeof(*ex_hdr); |
517 | |
518 | if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count) |
519 | return -EINVAL; |
520 | |
521 | if (hdr->in_words * 8 < method_elm->req_size) |
522 | return -ENOSPC; |
523 | |
524 | if (ex_hdr->cmd_hdr_reserved) |
525 | return -EINVAL; |
526 | |
527 | if (ex_hdr->response) { |
528 | if (!hdr->out_words && !ex_hdr->provider_out_words) |
529 | return -EINVAL; |
530 | |
531 | if (hdr->out_words * 8 < method_elm->resp_size) |
532 | return -ENOSPC; |
533 | |
534 | if (!access_ok(u64_to_user_ptr(ex_hdr->response), |
535 | (hdr->out_words + ex_hdr->provider_out_words) * 8)) |
536 | return -EFAULT; |
537 | } else { |
538 | if (hdr->out_words || ex_hdr->provider_out_words) |
539 | return -EINVAL; |
540 | } |
541 | |
542 | return 0; |
543 | } |
544 | |
545 | /* not extended command */ |
546 | if (hdr->in_words * 4 != count) |
547 | return -EINVAL; |
548 | |
549 | if (count < method_elm->req_size + sizeof(*hdr)) { |
550 | /* |
551 | * rdma-core v18 and v19 have a bug where they send DESTROY_CQ |
552 | * with a 16 byte write instead of 24. Old kernels didn't |
553 | * check the size so they allowed this. Now that the size is |
554 | * checked provide a compatibility work around to not break |
555 | * those userspaces. |
556 | */ |
557 | if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ && |
558 | count == 16) { |
559 | hdr->in_words = 6; |
560 | return 0; |
561 | } |
562 | return -ENOSPC; |
563 | } |
564 | if (hdr->out_words * 4 < method_elm->resp_size) |
565 | return -ENOSPC; |
566 | |
567 | return 0; |
568 | } |
569 | |
570 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, |
571 | size_t count, loff_t *pos) |
572 | { |
573 | struct ib_uverbs_file *file = filp->private_data; |
574 | const struct uverbs_api_write_method *method_elm; |
575 | struct uverbs_api *uapi = file->device->uapi; |
576 | struct ib_uverbs_ex_cmd_hdr ex_hdr; |
577 | struct ib_uverbs_cmd_hdr hdr; |
578 | struct uverbs_attr_bundle bundle; |
579 | int srcu_key; |
580 | ssize_t ret; |
581 | |
582 | if (!ib_safe_file_access(filp)) { |
583 | pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n" , |
584 | task_tgid_vnr(current), current->comm); |
585 | return -EACCES; |
586 | } |
587 | |
588 | if (count < sizeof(hdr)) |
589 | return -EINVAL; |
590 | |
591 | if (copy_from_user(to: &hdr, from: buf, n: sizeof(hdr))) |
592 | return -EFAULT; |
593 | |
594 | method_elm = uapi_get_method(uapi, command: hdr.command); |
595 | if (IS_ERR(ptr: method_elm)) |
596 | return PTR_ERR(ptr: method_elm); |
597 | |
598 | if (method_elm->is_ex) { |
599 | if (count < (sizeof(hdr) + sizeof(ex_hdr))) |
600 | return -EINVAL; |
601 | if (copy_from_user(to: &ex_hdr, from: buf + sizeof(hdr), n: sizeof(ex_hdr))) |
602 | return -EFAULT; |
603 | } |
604 | |
605 | ret = verify_hdr(hdr: &hdr, ex_hdr: &ex_hdr, count, method_elm); |
606 | if (ret) |
607 | return ret; |
608 | |
609 | srcu_key = srcu_read_lock(ssp: &file->device->disassociate_srcu); |
610 | |
611 | buf += sizeof(hdr); |
612 | |
613 | memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); |
614 | bundle.ufile = file; |
615 | bundle.context = NULL; /* only valid if bundle has uobject */ |
616 | bundle.uobject = NULL; |
617 | if (!method_elm->is_ex) { |
618 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); |
619 | size_t out_len = hdr.out_words * 4; |
620 | u64 response = 0; |
621 | |
622 | if (method_elm->has_udata) { |
623 | bundle.driver_udata.inlen = |
624 | in_len - method_elm->req_size; |
625 | in_len = method_elm->req_size; |
626 | if (bundle.driver_udata.inlen) |
627 | bundle.driver_udata.inbuf = buf + in_len; |
628 | else |
629 | bundle.driver_udata.inbuf = NULL; |
630 | } else { |
631 | memset(&bundle.driver_udata, 0, |
632 | sizeof(bundle.driver_udata)); |
633 | } |
634 | |
635 | if (method_elm->has_resp) { |
636 | /* |
637 | * The macros check that if has_resp is set |
638 | * then the command request structure starts |
639 | * with a '__aligned u64 response' member. |
640 | */ |
641 | ret = get_user(response, (const u64 __user *)buf); |
642 | if (ret) |
643 | goto out_unlock; |
644 | |
645 | if (method_elm->has_udata) { |
646 | bundle.driver_udata.outlen = |
647 | out_len - method_elm->resp_size; |
648 | out_len = method_elm->resp_size; |
649 | if (bundle.driver_udata.outlen) |
650 | bundle.driver_udata.outbuf = |
651 | u64_to_user_ptr(response + |
652 | out_len); |
653 | else |
654 | bundle.driver_udata.outbuf = NULL; |
655 | } |
656 | } else { |
657 | bundle.driver_udata.outlen = 0; |
658 | bundle.driver_udata.outbuf = NULL; |
659 | } |
660 | |
661 | ib_uverbs_init_udata_buf_or_null( |
662 | udata: &bundle.ucore, ibuf: buf, u64_to_user_ptr(response), |
663 | ilen: in_len, olen: out_len); |
664 | } else { |
665 | buf += sizeof(ex_hdr); |
666 | |
667 | ib_uverbs_init_udata_buf_or_null(udata: &bundle.ucore, ibuf: buf, |
668 | u64_to_user_ptr(ex_hdr.response), |
669 | ilen: hdr.in_words * 8, olen: hdr.out_words * 8); |
670 | |
671 | ib_uverbs_init_udata_buf_or_null( |
672 | udata: &bundle.driver_udata, ibuf: buf + bundle.ucore.inlen, |
673 | u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen, |
674 | ilen: ex_hdr.provider_in_words * 8, |
675 | olen: ex_hdr.provider_out_words * 8); |
676 | |
677 | } |
678 | |
679 | ret = method_elm->handler(&bundle); |
680 | if (bundle.uobject) |
681 | uverbs_finalize_object(uobj: bundle.uobject, access: UVERBS_ACCESS_NEW, hw_obj_valid: true, |
682 | commit: !ret, attrs: &bundle); |
683 | out_unlock: |
684 | srcu_read_unlock(ssp: &file->device->disassociate_srcu, idx: srcu_key); |
685 | return (ret) ? : count; |
686 | } |
687 | |
688 | static const struct vm_operations_struct rdma_umap_ops; |
689 | |
690 | static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) |
691 | { |
692 | struct ib_uverbs_file *file = filp->private_data; |
693 | struct ib_ucontext *ucontext; |
694 | int ret = 0; |
695 | int srcu_key; |
696 | |
697 | srcu_key = srcu_read_lock(ssp: &file->device->disassociate_srcu); |
698 | ucontext = ib_uverbs_get_ucontext_file(file); |
699 | if (IS_ERR(ptr: ucontext)) { |
700 | ret = PTR_ERR(ptr: ucontext); |
701 | goto out; |
702 | } |
703 | vma->vm_ops = &rdma_umap_ops; |
704 | ret = ucontext->device->ops.mmap(ucontext, vma); |
705 | out: |
706 | srcu_read_unlock(ssp: &file->device->disassociate_srcu, idx: srcu_key); |
707 | return ret; |
708 | } |
709 | |
710 | /* |
711 | * The VMA has been dup'd, initialize the vm_private_data with a new tracking |
712 | * struct |
713 | */ |
714 | static void rdma_umap_open(struct vm_area_struct *vma) |
715 | { |
716 | struct ib_uverbs_file *ufile = vma->vm_file->private_data; |
717 | struct rdma_umap_priv *opriv = vma->vm_private_data; |
718 | struct rdma_umap_priv *priv; |
719 | |
720 | if (!opriv) |
721 | return; |
722 | |
723 | /* We are racing with disassociation */ |
724 | if (!down_read_trylock(sem: &ufile->hw_destroy_rwsem)) |
725 | goto out_zap; |
726 | /* |
727 | * Disassociation already completed, the VMA should already be zapped. |
728 | */ |
729 | if (!ufile->ucontext) |
730 | goto out_unlock; |
731 | |
732 | priv = kzalloc(size: sizeof(*priv), GFP_KERNEL); |
733 | if (!priv) |
734 | goto out_unlock; |
735 | rdma_umap_priv_init(priv, vma, entry: opriv->entry); |
736 | |
737 | up_read(sem: &ufile->hw_destroy_rwsem); |
738 | return; |
739 | |
740 | out_unlock: |
741 | up_read(sem: &ufile->hw_destroy_rwsem); |
742 | out_zap: |
743 | /* |
744 | * We can't allow the VMA to be created with the actual IO pages, that |
745 | * would break our API contract, and it can't be stopped at this |
746 | * point, so zap it. |
747 | */ |
748 | vma->vm_private_data = NULL; |
749 | zap_vma_ptes(vma, address: vma->vm_start, size: vma->vm_end - vma->vm_start); |
750 | } |
751 | |
752 | static void rdma_umap_close(struct vm_area_struct *vma) |
753 | { |
754 | struct ib_uverbs_file *ufile = vma->vm_file->private_data; |
755 | struct rdma_umap_priv *priv = vma->vm_private_data; |
756 | |
757 | if (!priv) |
758 | return; |
759 | |
760 | /* |
761 | * The vma holds a reference on the struct file that created it, which |
762 | * in turn means that the ib_uverbs_file is guaranteed to exist at |
763 | * this point. |
764 | */ |
765 | mutex_lock(&ufile->umap_lock); |
766 | if (priv->entry) |
767 | rdma_user_mmap_entry_put(entry: priv->entry); |
768 | |
769 | list_del(entry: &priv->list); |
770 | mutex_unlock(lock: &ufile->umap_lock); |
771 | kfree(objp: priv); |
772 | } |
773 | |
774 | /* |
775 | * Once the zap_vma_ptes has been called touches to the VMA will come here and |
776 | * we return a dummy writable zero page for all the pfns. |
777 | */ |
778 | static vm_fault_t rdma_umap_fault(struct vm_fault *vmf) |
779 | { |
780 | struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data; |
781 | struct rdma_umap_priv *priv = vmf->vma->vm_private_data; |
782 | vm_fault_t ret = 0; |
783 | |
784 | if (!priv) |
785 | return VM_FAULT_SIGBUS; |
786 | |
787 | /* Read only pages can just use the system zero page. */ |
788 | if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { |
789 | vmf->page = ZERO_PAGE(vmf->address); |
790 | get_page(page: vmf->page); |
791 | return 0; |
792 | } |
793 | |
794 | mutex_lock(&ufile->umap_lock); |
795 | if (!ufile->disassociate_page) |
796 | ufile->disassociate_page = |
797 | alloc_pages(gfp: vmf->gfp_mask | __GFP_ZERO, order: 0); |
798 | |
799 | if (ufile->disassociate_page) { |
800 | /* |
801 | * This VMA is forced to always be shared so this doesn't have |
802 | * to worry about COW. |
803 | */ |
804 | vmf->page = ufile->disassociate_page; |
805 | get_page(page: vmf->page); |
806 | } else { |
807 | ret = VM_FAULT_SIGBUS; |
808 | } |
809 | mutex_unlock(lock: &ufile->umap_lock); |
810 | |
811 | return ret; |
812 | } |
813 | |
814 | static const struct vm_operations_struct rdma_umap_ops = { |
815 | .open = rdma_umap_open, |
816 | .close = rdma_umap_close, |
817 | .fault = rdma_umap_fault, |
818 | }; |
819 | |
820 | void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) |
821 | { |
822 | struct rdma_umap_priv *priv, *next_priv; |
823 | |
824 | lockdep_assert_held(&ufile->hw_destroy_rwsem); |
825 | |
826 | while (1) { |
827 | struct mm_struct *mm = NULL; |
828 | |
829 | /* Get an arbitrary mm pointer that hasn't been cleaned yet */ |
830 | mutex_lock(&ufile->umap_lock); |
831 | while (!list_empty(head: &ufile->umaps)) { |
832 | int ret; |
833 | |
834 | priv = list_first_entry(&ufile->umaps, |
835 | struct rdma_umap_priv, list); |
836 | mm = priv->vma->vm_mm; |
837 | ret = mmget_not_zero(mm); |
838 | if (!ret) { |
839 | list_del_init(entry: &priv->list); |
840 | if (priv->entry) { |
841 | rdma_user_mmap_entry_put(entry: priv->entry); |
842 | priv->entry = NULL; |
843 | } |
844 | mm = NULL; |
845 | continue; |
846 | } |
847 | break; |
848 | } |
849 | mutex_unlock(lock: &ufile->umap_lock); |
850 | if (!mm) |
851 | return; |
852 | |
853 | /* |
854 | * The umap_lock is nested under mmap_lock since it used within |
855 | * the vma_ops callbacks, so we have to clean the list one mm |
856 | * at a time to get the lock ordering right. Typically there |
857 | * will only be one mm, so no big deal. |
858 | */ |
859 | mmap_read_lock(mm); |
860 | mutex_lock(&ufile->umap_lock); |
861 | list_for_each_entry_safe (priv, next_priv, &ufile->umaps, |
862 | list) { |
863 | struct vm_area_struct *vma = priv->vma; |
864 | |
865 | if (vma->vm_mm != mm) |
866 | continue; |
867 | list_del_init(entry: &priv->list); |
868 | |
869 | zap_vma_ptes(vma, address: vma->vm_start, |
870 | size: vma->vm_end - vma->vm_start); |
871 | |
872 | if (priv->entry) { |
873 | rdma_user_mmap_entry_put(entry: priv->entry); |
874 | priv->entry = NULL; |
875 | } |
876 | } |
877 | mutex_unlock(lock: &ufile->umap_lock); |
878 | mmap_read_unlock(mm); |
879 | mmput(mm); |
880 | } |
881 | } |
882 | |
883 | /* |
884 | * ib_uverbs_open() does not need the BKL: |
885 | * |
886 | * - the ib_uverbs_device structures are properly reference counted and |
887 | * everything else is purely local to the file being created, so |
888 | * races against other open calls are not a problem; |
889 | * - there is no ioctl method to race against; |
890 | * - the open method will either immediately run -ENXIO, or all |
891 | * required initialization will be done. |
892 | */ |
893 | static int ib_uverbs_open(struct inode *inode, struct file *filp) |
894 | { |
895 | struct ib_uverbs_device *dev; |
896 | struct ib_uverbs_file *file; |
897 | struct ib_device *ib_dev; |
898 | int ret; |
899 | int module_dependent; |
900 | int srcu_key; |
901 | |
902 | dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); |
903 | if (!refcount_inc_not_zero(r: &dev->refcount)) |
904 | return -ENXIO; |
905 | |
906 | get_device(dev: &dev->dev); |
907 | srcu_key = srcu_read_lock(ssp: &dev->disassociate_srcu); |
908 | mutex_lock(&dev->lists_mutex); |
909 | ib_dev = srcu_dereference(dev->ib_dev, |
910 | &dev->disassociate_srcu); |
911 | if (!ib_dev) { |
912 | ret = -EIO; |
913 | goto err; |
914 | } |
915 | |
916 | if (!rdma_dev_access_netns(device: ib_dev, current->nsproxy->net_ns)) { |
917 | ret = -EPERM; |
918 | goto err; |
919 | } |
920 | |
921 | /* In case IB device supports disassociate ucontext, there is no hard |
922 | * dependency between uverbs device and its low level device. |
923 | */ |
924 | module_dependent = !(ib_dev->ops.disassociate_ucontext); |
925 | |
926 | if (module_dependent) { |
927 | if (!try_module_get(module: ib_dev->ops.owner)) { |
928 | ret = -ENODEV; |
929 | goto err; |
930 | } |
931 | } |
932 | |
933 | file = kzalloc(size: sizeof(*file), GFP_KERNEL); |
934 | if (!file) { |
935 | ret = -ENOMEM; |
936 | if (module_dependent) |
937 | goto err_module; |
938 | |
939 | goto err; |
940 | } |
941 | |
942 | file->device = dev; |
943 | kref_init(kref: &file->ref); |
944 | mutex_init(&file->ucontext_lock); |
945 | |
946 | spin_lock_init(&file->uobjects_lock); |
947 | INIT_LIST_HEAD(list: &file->uobjects); |
948 | init_rwsem(&file->hw_destroy_rwsem); |
949 | mutex_init(&file->umap_lock); |
950 | INIT_LIST_HEAD(list: &file->umaps); |
951 | |
952 | filp->private_data = file; |
953 | list_add_tail(new: &file->list, head: &dev->uverbs_file_list); |
954 | mutex_unlock(lock: &dev->lists_mutex); |
955 | srcu_read_unlock(ssp: &dev->disassociate_srcu, idx: srcu_key); |
956 | |
957 | setup_ufile_idr_uobject(file); |
958 | |
959 | return stream_open(inode, filp); |
960 | |
961 | err_module: |
962 | module_put(module: ib_dev->ops.owner); |
963 | |
964 | err: |
965 | mutex_unlock(lock: &dev->lists_mutex); |
966 | srcu_read_unlock(ssp: &dev->disassociate_srcu, idx: srcu_key); |
967 | if (refcount_dec_and_test(r: &dev->refcount)) |
968 | ib_uverbs_comp_dev(dev); |
969 | |
970 | put_device(dev: &dev->dev); |
971 | return ret; |
972 | } |
973 | |
974 | static int ib_uverbs_close(struct inode *inode, struct file *filp) |
975 | { |
976 | struct ib_uverbs_file *file = filp->private_data; |
977 | |
978 | uverbs_destroy_ufile_hw(ufile: file, reason: RDMA_REMOVE_CLOSE); |
979 | |
980 | mutex_lock(&file->device->lists_mutex); |
981 | list_del_init(entry: &file->list); |
982 | mutex_unlock(lock: &file->device->lists_mutex); |
983 | |
984 | kref_put(kref: &file->ref, release: ib_uverbs_release_file); |
985 | |
986 | return 0; |
987 | } |
988 | |
989 | static const struct file_operations uverbs_fops = { |
990 | .owner = THIS_MODULE, |
991 | .write = ib_uverbs_write, |
992 | .open = ib_uverbs_open, |
993 | .release = ib_uverbs_close, |
994 | .llseek = no_llseek, |
995 | .unlocked_ioctl = ib_uverbs_ioctl, |
996 | .compat_ioctl = compat_ptr_ioctl, |
997 | }; |
998 | |
999 | static const struct file_operations uverbs_mmap_fops = { |
1000 | .owner = THIS_MODULE, |
1001 | .write = ib_uverbs_write, |
1002 | .mmap = ib_uverbs_mmap, |
1003 | .open = ib_uverbs_open, |
1004 | .release = ib_uverbs_close, |
1005 | .llseek = no_llseek, |
1006 | .unlocked_ioctl = ib_uverbs_ioctl, |
1007 | .compat_ioctl = compat_ptr_ioctl, |
1008 | }; |
1009 | |
1010 | static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data, |
1011 | struct ib_client_nl_info *res) |
1012 | { |
1013 | struct ib_uverbs_device *uverbs_dev = client_data; |
1014 | int ret; |
1015 | |
1016 | if (res->port != -1) |
1017 | return -EINVAL; |
1018 | |
1019 | res->abi = ibdev->ops.uverbs_abi_ver; |
1020 | res->cdev = &uverbs_dev->dev; |
1021 | |
1022 | /* |
1023 | * To support DRIVER_ID binding in userspace some of the driver need |
1024 | * upgrading to expose their PCI dependent revision information |
1025 | * through get_context instead of relying on modalias matching. When |
1026 | * the drivers are fixed they can drop this flag. |
1027 | */ |
1028 | if (!ibdev->ops.uverbs_no_driver_id_binding) { |
1029 | ret = nla_put_u32(skb: res->nl_msg, attrtype: RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID, |
1030 | value: ibdev->ops.driver_id); |
1031 | if (ret) |
1032 | return ret; |
1033 | } |
1034 | return 0; |
1035 | } |
1036 | |
1037 | static struct ib_client uverbs_client = { |
1038 | .name = "uverbs" , |
1039 | .no_kverbs_req = true, |
1040 | .add = ib_uverbs_add_one, |
1041 | .remove = ib_uverbs_remove_one, |
1042 | .get_nl_info = ib_uverbs_get_nl_info, |
1043 | }; |
1044 | MODULE_ALIAS_RDMA_CLIENT("uverbs" ); |
1045 | |
1046 | static ssize_t ibdev_show(struct device *device, struct device_attribute *attr, |
1047 | char *buf) |
1048 | { |
1049 | struct ib_uverbs_device *dev = |
1050 | container_of(device, struct ib_uverbs_device, dev); |
1051 | int ret = -ENODEV; |
1052 | int srcu_key; |
1053 | struct ib_device *ib_dev; |
1054 | |
1055 | srcu_key = srcu_read_lock(ssp: &dev->disassociate_srcu); |
1056 | ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); |
1057 | if (ib_dev) |
1058 | ret = sysfs_emit(buf, fmt: "%s\n" , dev_name(dev: &ib_dev->dev)); |
1059 | srcu_read_unlock(ssp: &dev->disassociate_srcu, idx: srcu_key); |
1060 | |
1061 | return ret; |
1062 | } |
1063 | static DEVICE_ATTR_RO(ibdev); |
1064 | |
1065 | static ssize_t abi_version_show(struct device *device, |
1066 | struct device_attribute *attr, char *buf) |
1067 | { |
1068 | struct ib_uverbs_device *dev = |
1069 | container_of(device, struct ib_uverbs_device, dev); |
1070 | int ret = -ENODEV; |
1071 | int srcu_key; |
1072 | struct ib_device *ib_dev; |
1073 | |
1074 | srcu_key = srcu_read_lock(ssp: &dev->disassociate_srcu); |
1075 | ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); |
1076 | if (ib_dev) |
1077 | ret = sysfs_emit(buf, fmt: "%u\n" , ib_dev->ops.uverbs_abi_ver); |
1078 | srcu_read_unlock(ssp: &dev->disassociate_srcu, idx: srcu_key); |
1079 | |
1080 | return ret; |
1081 | } |
1082 | static DEVICE_ATTR_RO(abi_version); |
1083 | |
1084 | static struct attribute *ib_dev_attrs[] = { |
1085 | &dev_attr_abi_version.attr, |
1086 | &dev_attr_ibdev.attr, |
1087 | NULL, |
1088 | }; |
1089 | |
1090 | static const struct attribute_group dev_attr_group = { |
1091 | .attrs = ib_dev_attrs, |
1092 | }; |
1093 | |
1094 | static CLASS_ATTR_STRING(abi_version, S_IRUGO, |
1095 | __stringify(IB_USER_VERBS_ABI_VERSION)); |
1096 | |
1097 | static int ib_uverbs_create_uapi(struct ib_device *device, |
1098 | struct ib_uverbs_device *uverbs_dev) |
1099 | { |
1100 | struct uverbs_api *uapi; |
1101 | |
1102 | uapi = uverbs_alloc_api(ibdev: device); |
1103 | if (IS_ERR(ptr: uapi)) |
1104 | return PTR_ERR(ptr: uapi); |
1105 | |
1106 | uverbs_dev->uapi = uapi; |
1107 | return 0; |
1108 | } |
1109 | |
1110 | static int ib_uverbs_add_one(struct ib_device *device) |
1111 | { |
1112 | int devnum; |
1113 | dev_t base; |
1114 | struct ib_uverbs_device *uverbs_dev; |
1115 | int ret; |
1116 | |
1117 | if (!device->ops.alloc_ucontext) |
1118 | return -EOPNOTSUPP; |
1119 | |
1120 | uverbs_dev = kzalloc(size: sizeof(*uverbs_dev), GFP_KERNEL); |
1121 | if (!uverbs_dev) |
1122 | return -ENOMEM; |
1123 | |
1124 | ret = init_srcu_struct(&uverbs_dev->disassociate_srcu); |
1125 | if (ret) { |
1126 | kfree(objp: uverbs_dev); |
1127 | return -ENOMEM; |
1128 | } |
1129 | |
1130 | device_initialize(dev: &uverbs_dev->dev); |
1131 | uverbs_dev->dev.class = &uverbs_class; |
1132 | uverbs_dev->dev.parent = device->dev.parent; |
1133 | uverbs_dev->dev.release = ib_uverbs_release_dev; |
1134 | uverbs_dev->groups[0] = &dev_attr_group; |
1135 | uverbs_dev->dev.groups = uverbs_dev->groups; |
1136 | refcount_set(r: &uverbs_dev->refcount, n: 1); |
1137 | init_completion(x: &uverbs_dev->comp); |
1138 | uverbs_dev->xrcd_tree = RB_ROOT; |
1139 | mutex_init(&uverbs_dev->xrcd_tree_mutex); |
1140 | mutex_init(&uverbs_dev->lists_mutex); |
1141 | INIT_LIST_HEAD(list: &uverbs_dev->uverbs_file_list); |
1142 | rcu_assign_pointer(uverbs_dev->ib_dev, device); |
1143 | uverbs_dev->num_comp_vectors = device->num_comp_vectors; |
1144 | |
1145 | devnum = ida_alloc_max(ida: &uverbs_ida, max: IB_UVERBS_MAX_DEVICES - 1, |
1146 | GFP_KERNEL); |
1147 | if (devnum < 0) { |
1148 | ret = -ENOMEM; |
1149 | goto err; |
1150 | } |
1151 | uverbs_dev->devnum = devnum; |
1152 | if (devnum >= IB_UVERBS_NUM_FIXED_MINOR) |
1153 | base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR; |
1154 | else |
1155 | base = IB_UVERBS_BASE_DEV + devnum; |
1156 | |
1157 | ret = ib_uverbs_create_uapi(device, uverbs_dev); |
1158 | if (ret) |
1159 | goto err_uapi; |
1160 | |
1161 | uverbs_dev->dev.devt = base; |
1162 | dev_set_name(dev: &uverbs_dev->dev, name: "uverbs%d" , uverbs_dev->devnum); |
1163 | |
1164 | cdev_init(&uverbs_dev->cdev, |
1165 | device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops); |
1166 | uverbs_dev->cdev.owner = THIS_MODULE; |
1167 | |
1168 | ret = cdev_device_add(cdev: &uverbs_dev->cdev, dev: &uverbs_dev->dev); |
1169 | if (ret) |
1170 | goto err_uapi; |
1171 | |
1172 | ib_set_client_data(device, client: &uverbs_client, data: uverbs_dev); |
1173 | return 0; |
1174 | |
1175 | err_uapi: |
1176 | ida_free(&uverbs_ida, id: devnum); |
1177 | err: |
1178 | if (refcount_dec_and_test(r: &uverbs_dev->refcount)) |
1179 | ib_uverbs_comp_dev(dev: uverbs_dev); |
1180 | wait_for_completion(&uverbs_dev->comp); |
1181 | put_device(dev: &uverbs_dev->dev); |
1182 | return ret; |
1183 | } |
1184 | |
1185 | static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, |
1186 | struct ib_device *ib_dev) |
1187 | { |
1188 | struct ib_uverbs_file *file; |
1189 | |
1190 | /* Pending running commands to terminate */ |
1191 | uverbs_disassociate_api_pre(uverbs_dev); |
1192 | |
1193 | mutex_lock(&uverbs_dev->lists_mutex); |
1194 | while (!list_empty(head: &uverbs_dev->uverbs_file_list)) { |
1195 | file = list_first_entry(&uverbs_dev->uverbs_file_list, |
1196 | struct ib_uverbs_file, list); |
1197 | list_del_init(entry: &file->list); |
1198 | kref_get(kref: &file->ref); |
1199 | |
1200 | /* We must release the mutex before going ahead and calling |
1201 | * uverbs_cleanup_ufile, as it might end up indirectly calling |
1202 | * uverbs_close, for example due to freeing the resources (e.g |
1203 | * mmput). |
1204 | */ |
1205 | mutex_unlock(lock: &uverbs_dev->lists_mutex); |
1206 | |
1207 | uverbs_destroy_ufile_hw(ufile: file, reason: RDMA_REMOVE_DRIVER_REMOVE); |
1208 | kref_put(kref: &file->ref, release: ib_uverbs_release_file); |
1209 | |
1210 | mutex_lock(&uverbs_dev->lists_mutex); |
1211 | } |
1212 | mutex_unlock(lock: &uverbs_dev->lists_mutex); |
1213 | |
1214 | uverbs_disassociate_api(uapi: uverbs_dev->uapi); |
1215 | } |
1216 | |
1217 | static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) |
1218 | { |
1219 | struct ib_uverbs_device *uverbs_dev = client_data; |
1220 | int wait_clients = 1; |
1221 | |
1222 | cdev_device_del(cdev: &uverbs_dev->cdev, dev: &uverbs_dev->dev); |
1223 | ida_free(&uverbs_ida, id: uverbs_dev->devnum); |
1224 | |
1225 | if (device->ops.disassociate_ucontext) { |
1226 | /* We disassociate HW resources and immediately return. |
1227 | * Userspace will see a EIO errno for all future access. |
1228 | * Upon returning, ib_device may be freed internally and is not |
1229 | * valid any more. |
1230 | * uverbs_device is still available until all clients close |
1231 | * their files, then the uverbs device ref count will be zero |
1232 | * and its resources will be freed. |
1233 | * Note: At this point no more files can be opened since the |
1234 | * cdev was deleted, however active clients can still issue |
1235 | * commands and close their open files. |
1236 | */ |
1237 | ib_uverbs_free_hw_resources(uverbs_dev, ib_dev: device); |
1238 | wait_clients = 0; |
1239 | } |
1240 | |
1241 | if (refcount_dec_and_test(r: &uverbs_dev->refcount)) |
1242 | ib_uverbs_comp_dev(dev: uverbs_dev); |
1243 | if (wait_clients) |
1244 | wait_for_completion(&uverbs_dev->comp); |
1245 | |
1246 | put_device(dev: &uverbs_dev->dev); |
1247 | } |
1248 | |
1249 | static int __init ib_uverbs_init(void) |
1250 | { |
1251 | int ret; |
1252 | |
1253 | ret = register_chrdev_region(IB_UVERBS_BASE_DEV, |
1254 | IB_UVERBS_NUM_FIXED_MINOR, |
1255 | "infiniband_verbs" ); |
1256 | if (ret) { |
1257 | pr_err("user_verbs: couldn't register device number\n" ); |
1258 | goto out; |
1259 | } |
1260 | |
1261 | ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0, |
1262 | IB_UVERBS_NUM_DYNAMIC_MINOR, |
1263 | "infiniband_verbs" ); |
1264 | if (ret) { |
1265 | pr_err("couldn't register dynamic device number\n" ); |
1266 | goto out_alloc; |
1267 | } |
1268 | |
1269 | ret = class_register(class: &uverbs_class); |
1270 | if (ret) { |
1271 | pr_err("user_verbs: couldn't create class infiniband_verbs\n" ); |
1272 | goto out_chrdev; |
1273 | } |
1274 | |
1275 | ret = class_create_file(class: &uverbs_class, attr: &class_attr_abi_version.attr); |
1276 | if (ret) { |
1277 | pr_err("user_verbs: couldn't create abi_version attribute\n" ); |
1278 | goto out_class; |
1279 | } |
1280 | |
1281 | ret = ib_register_client(client: &uverbs_client); |
1282 | if (ret) { |
1283 | pr_err("user_verbs: couldn't register client\n" ); |
1284 | goto out_class; |
1285 | } |
1286 | |
1287 | return 0; |
1288 | |
1289 | out_class: |
1290 | class_unregister(class: &uverbs_class); |
1291 | |
1292 | out_chrdev: |
1293 | unregister_chrdev_region(dynamic_uverbs_dev, |
1294 | IB_UVERBS_NUM_DYNAMIC_MINOR); |
1295 | |
1296 | out_alloc: |
1297 | unregister_chrdev_region(IB_UVERBS_BASE_DEV, |
1298 | IB_UVERBS_NUM_FIXED_MINOR); |
1299 | |
1300 | out: |
1301 | return ret; |
1302 | } |
1303 | |
1304 | static void __exit ib_uverbs_cleanup(void) |
1305 | { |
1306 | ib_unregister_client(client: &uverbs_client); |
1307 | class_unregister(class: &uverbs_class); |
1308 | unregister_chrdev_region(IB_UVERBS_BASE_DEV, |
1309 | IB_UVERBS_NUM_FIXED_MINOR); |
1310 | unregister_chrdev_region(dynamic_uverbs_dev, |
1311 | IB_UVERBS_NUM_DYNAMIC_MINOR); |
1312 | mmu_notifier_synchronize(); |
1313 | } |
1314 | |
1315 | module_init(ib_uverbs_init); |
1316 | module_exit(ib_uverbs_cleanup); |
1317 | |