1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
5 | * Copyright (c) 2008 Cisco. All rights reserved. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * OpenIB.org BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or |
14 | * without modification, are permitted provided that the following |
15 | * conditions are met: |
16 | * |
17 | * - Redistributions of source code must retain the above |
18 | * copyright notice, this list of conditions and the following |
19 | * disclaimer. |
20 | * |
21 | * - Redistributions in binary form must reproduce the above |
22 | * copyright notice, this list of conditions and the following |
23 | * disclaimer in the documentation and/or other materials |
24 | * provided with the distribution. |
25 | * |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
33 | * SOFTWARE. |
34 | */ |
35 | |
36 | #define pr_fmt(fmt) "user_mad: " fmt |
37 | |
38 | #include <linux/module.h> |
39 | #include <linux/init.h> |
40 | #include <linux/device.h> |
41 | #include <linux/err.h> |
42 | #include <linux/fs.h> |
43 | #include <linux/cdev.h> |
44 | #include <linux/dma-mapping.h> |
45 | #include <linux/poll.h> |
46 | #include <linux/mutex.h> |
47 | #include <linux/kref.h> |
48 | #include <linux/compat.h> |
49 | #include <linux/sched.h> |
50 | #include <linux/semaphore.h> |
51 | #include <linux/slab.h> |
52 | #include <linux/nospec.h> |
53 | |
54 | #include <linux/uaccess.h> |
55 | |
56 | #include <rdma/ib_mad.h> |
57 | #include <rdma/ib_user_mad.h> |
58 | #include <rdma/rdma_netlink.h> |
59 | |
60 | #include "core_priv.h" |
61 | |
62 | MODULE_AUTHOR("Roland Dreier" ); |
63 | MODULE_DESCRIPTION("InfiniBand userspace MAD packet access" ); |
64 | MODULE_LICENSE("Dual BSD/GPL" ); |
65 | |
66 | enum { |
67 | IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS, |
68 | IB_UMAD_MAX_AGENTS = 32, |
69 | |
70 | IB_UMAD_MAJOR = 231, |
71 | IB_UMAD_MINOR_BASE = 0, |
72 | IB_UMAD_NUM_FIXED_MINOR = 64, |
73 | IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR, |
74 | IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR, |
75 | }; |
76 | |
77 | /* |
78 | * Our lifetime rules for these structs are the following: |
79 | * device special file is opened, we take a reference on the |
80 | * ib_umad_port's struct ib_umad_device. We drop these |
81 | * references in the corresponding close(). |
82 | * |
83 | * In addition to references coming from open character devices, there |
84 | * is one more reference to each ib_umad_device representing the |
85 | * module's reference taken when allocating the ib_umad_device in |
86 | * ib_umad_add_one(). |
87 | * |
88 | * When destroying an ib_umad_device, we drop the module's reference. |
89 | */ |
90 | |
91 | struct ib_umad_port { |
92 | struct cdev cdev; |
93 | struct device dev; |
94 | struct cdev sm_cdev; |
95 | struct device sm_dev; |
96 | struct semaphore sm_sem; |
97 | |
98 | struct mutex file_mutex; |
99 | struct list_head file_list; |
100 | |
101 | struct ib_device *ib_dev; |
102 | struct ib_umad_device *umad_dev; |
103 | int dev_num; |
104 | u32 port_num; |
105 | }; |
106 | |
107 | struct ib_umad_device { |
108 | struct kref kref; |
109 | struct ib_umad_port ports[]; |
110 | }; |
111 | |
112 | struct ib_umad_file { |
113 | struct mutex mutex; |
114 | struct ib_umad_port *port; |
115 | struct list_head recv_list; |
116 | struct list_head send_list; |
117 | struct list_head port_list; |
118 | spinlock_t send_lock; |
119 | wait_queue_head_t recv_wait; |
120 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; |
121 | int agents_dead; |
122 | u8 use_pkey_index; |
123 | u8 already_used; |
124 | }; |
125 | |
126 | struct ib_umad_packet { |
127 | struct ib_mad_send_buf *msg; |
128 | struct ib_mad_recv_wc *recv_wc; |
129 | struct list_head list; |
130 | int length; |
131 | struct ib_user_mad mad; |
132 | }; |
133 | |
134 | struct ib_rmpp_mad_hdr { |
135 | struct ib_mad_hdr mad_hdr; |
136 | struct ib_rmpp_hdr rmpp_hdr; |
137 | } __packed; |
138 | |
139 | #define CREATE_TRACE_POINTS |
140 | #include <trace/events/ib_umad.h> |
141 | |
142 | static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); |
143 | static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) + |
144 | IB_UMAD_NUM_FIXED_MINOR; |
145 | static dev_t dynamic_umad_dev; |
146 | static dev_t dynamic_issm_dev; |
147 | |
148 | static DEFINE_IDA(umad_ida); |
149 | |
150 | static int ib_umad_add_one(struct ib_device *device); |
151 | static void ib_umad_remove_one(struct ib_device *device, void *client_data); |
152 | |
153 | static void ib_umad_dev_free(struct kref *kref) |
154 | { |
155 | struct ib_umad_device *dev = |
156 | container_of(kref, struct ib_umad_device, kref); |
157 | |
158 | kfree(objp: dev); |
159 | } |
160 | |
161 | static void ib_umad_dev_get(struct ib_umad_device *dev) |
162 | { |
163 | kref_get(kref: &dev->kref); |
164 | } |
165 | |
166 | static void ib_umad_dev_put(struct ib_umad_device *dev) |
167 | { |
168 | kref_put(kref: &dev->kref, release: ib_umad_dev_free); |
169 | } |
170 | |
171 | static int hdr_size(struct ib_umad_file *file) |
172 | { |
173 | return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) : |
174 | sizeof(struct ib_user_mad_hdr_old); |
175 | } |
176 | |
177 | /* caller must hold file->mutex */ |
178 | static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) |
179 | { |
180 | return file->agents_dead ? NULL : file->agent[id]; |
181 | } |
182 | |
183 | static int queue_packet(struct ib_umad_file *file, |
184 | struct ib_mad_agent *agent, |
185 | struct ib_umad_packet *packet) |
186 | { |
187 | int ret = 1; |
188 | |
189 | mutex_lock(&file->mutex); |
190 | |
191 | for (packet->mad.hdr.id = 0; |
192 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; |
193 | packet->mad.hdr.id++) |
194 | if (agent == __get_agent(file, id: packet->mad.hdr.id)) { |
195 | list_add_tail(new: &packet->list, head: &file->recv_list); |
196 | wake_up_interruptible(&file->recv_wait); |
197 | ret = 0; |
198 | break; |
199 | } |
200 | |
201 | mutex_unlock(lock: &file->mutex); |
202 | |
203 | return ret; |
204 | } |
205 | |
206 | static void dequeue_send(struct ib_umad_file *file, |
207 | struct ib_umad_packet *packet) |
208 | { |
209 | spin_lock_irq(lock: &file->send_lock); |
210 | list_del(entry: &packet->list); |
211 | spin_unlock_irq(lock: &file->send_lock); |
212 | } |
213 | |
214 | static void send_handler(struct ib_mad_agent *agent, |
215 | struct ib_mad_send_wc *send_wc) |
216 | { |
217 | struct ib_umad_file *file = agent->context; |
218 | struct ib_umad_packet *packet = send_wc->send_buf->context[0]; |
219 | |
220 | dequeue_send(file, packet); |
221 | rdma_destroy_ah(ah: packet->msg->ah, flags: RDMA_DESTROY_AH_SLEEPABLE); |
222 | ib_free_send_mad(send_buf: packet->msg); |
223 | |
224 | if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { |
225 | packet->length = IB_MGMT_MAD_HDR; |
226 | packet->mad.hdr.status = ETIMEDOUT; |
227 | if (!queue_packet(file, agent, packet)) |
228 | return; |
229 | } |
230 | kfree(objp: packet); |
231 | } |
232 | |
233 | static void recv_handler(struct ib_mad_agent *agent, |
234 | struct ib_mad_send_buf *send_buf, |
235 | struct ib_mad_recv_wc *mad_recv_wc) |
236 | { |
237 | struct ib_umad_file *file = agent->context; |
238 | struct ib_umad_packet *packet; |
239 | |
240 | if (mad_recv_wc->wc->status != IB_WC_SUCCESS) |
241 | goto err1; |
242 | |
243 | packet = kzalloc(size: sizeof *packet, GFP_KERNEL); |
244 | if (!packet) |
245 | goto err1; |
246 | |
247 | packet->length = mad_recv_wc->mad_len; |
248 | packet->recv_wc = mad_recv_wc; |
249 | |
250 | packet->mad.hdr.status = 0; |
251 | packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; |
252 | packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); |
253 | /* |
254 | * On OPA devices it is okay to lose the upper 16 bits of LID as this |
255 | * information is obtained elsewhere. Mask off the upper 16 bits. |
256 | */ |
257 | if (rdma_cap_opa_mad(device: agent->device, port_num: agent->port_num)) |
258 | packet->mad.hdr.lid = ib_lid_be16(lid: 0xFFFF & |
259 | mad_recv_wc->wc->slid); |
260 | else |
261 | packet->mad.hdr.lid = ib_lid_be16(lid: mad_recv_wc->wc->slid); |
262 | packet->mad.hdr.sl = mad_recv_wc->wc->sl; |
263 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; |
264 | packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; |
265 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); |
266 | if (packet->mad.hdr.grh_present) { |
267 | struct rdma_ah_attr ah_attr; |
268 | const struct ib_global_route *grh; |
269 | int ret; |
270 | |
271 | ret = ib_init_ah_attr_from_wc(device: agent->device, port_num: agent->port_num, |
272 | wc: mad_recv_wc->wc, |
273 | grh: mad_recv_wc->recv_buf.grh, |
274 | ah_attr: &ah_attr); |
275 | if (ret) |
276 | goto err2; |
277 | |
278 | grh = rdma_ah_read_grh(attr: &ah_attr); |
279 | packet->mad.hdr.gid_index = grh->sgid_index; |
280 | packet->mad.hdr.hop_limit = grh->hop_limit; |
281 | packet->mad.hdr.traffic_class = grh->traffic_class; |
282 | memcpy(packet->mad.hdr.gid, &grh->dgid, 16); |
283 | packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label); |
284 | rdma_destroy_ah_attr(ah_attr: &ah_attr); |
285 | } |
286 | |
287 | if (queue_packet(file, agent, packet)) |
288 | goto err2; |
289 | return; |
290 | |
291 | err2: |
292 | kfree(objp: packet); |
293 | err1: |
294 | ib_free_recv_mad(mad_recv_wc); |
295 | } |
296 | |
297 | static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, |
298 | struct ib_umad_packet *packet, size_t count) |
299 | { |
300 | struct ib_mad_recv_buf *recv_buf; |
301 | int left, seg_payload, offset, max_seg_payload; |
302 | size_t seg_size; |
303 | |
304 | recv_buf = &packet->recv_wc->recv_buf; |
305 | seg_size = packet->recv_wc->mad_seg_size; |
306 | |
307 | /* We need enough room to copy the first (or only) MAD segment. */ |
308 | if ((packet->length <= seg_size && |
309 | count < hdr_size(file) + packet->length) || |
310 | (packet->length > seg_size && |
311 | count < hdr_size(file) + seg_size)) |
312 | return -EINVAL; |
313 | |
314 | if (copy_to_user(to: buf, from: &packet->mad, n: hdr_size(file))) |
315 | return -EFAULT; |
316 | |
317 | buf += hdr_size(file); |
318 | seg_payload = min_t(int, packet->length, seg_size); |
319 | if (copy_to_user(to: buf, from: recv_buf->mad, n: seg_payload)) |
320 | return -EFAULT; |
321 | |
322 | if (seg_payload < packet->length) { |
323 | /* |
324 | * Multipacket RMPP MAD message. Copy remainder of message. |
325 | * Note that last segment may have a shorter payload. |
326 | */ |
327 | if (count < hdr_size(file) + packet->length) { |
328 | /* |
329 | * The buffer is too small, return the first RMPP segment, |
330 | * which includes the RMPP message length. |
331 | */ |
332 | return -ENOSPC; |
333 | } |
334 | offset = ib_get_mad_data_offset(mgmt_class: recv_buf->mad->mad_hdr.mgmt_class); |
335 | max_seg_payload = seg_size - offset; |
336 | |
337 | for (left = packet->length - seg_payload, buf += seg_payload; |
338 | left; left -= seg_payload, buf += seg_payload) { |
339 | recv_buf = container_of(recv_buf->list.next, |
340 | struct ib_mad_recv_buf, list); |
341 | seg_payload = min(left, max_seg_payload); |
342 | if (copy_to_user(to: buf, from: ((void *) recv_buf->mad) + offset, |
343 | n: seg_payload)) |
344 | return -EFAULT; |
345 | } |
346 | } |
347 | |
348 | trace_ib_umad_read_recv(file, umad_hdr: &packet->mad.hdr, mad_hdr: &recv_buf->mad->mad_hdr); |
349 | |
350 | return hdr_size(file) + packet->length; |
351 | } |
352 | |
353 | static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, |
354 | struct ib_umad_packet *packet, size_t count) |
355 | { |
356 | ssize_t size = hdr_size(file) + packet->length; |
357 | |
358 | if (count < size) |
359 | return -EINVAL; |
360 | |
361 | if (copy_to_user(to: buf, from: &packet->mad, n: hdr_size(file))) |
362 | return -EFAULT; |
363 | |
364 | buf += hdr_size(file); |
365 | |
366 | if (copy_to_user(to: buf, from: packet->mad.data, n: packet->length)) |
367 | return -EFAULT; |
368 | |
369 | trace_ib_umad_read_send(file, umad_hdr: &packet->mad.hdr, |
370 | mad_hdr: (struct ib_mad_hdr *)&packet->mad.data); |
371 | |
372 | return size; |
373 | } |
374 | |
375 | static ssize_t ib_umad_read(struct file *filp, char __user *buf, |
376 | size_t count, loff_t *pos) |
377 | { |
378 | struct ib_umad_file *file = filp->private_data; |
379 | struct ib_umad_packet *packet; |
380 | ssize_t ret; |
381 | |
382 | if (count < hdr_size(file)) |
383 | return -EINVAL; |
384 | |
385 | mutex_lock(&file->mutex); |
386 | |
387 | if (file->agents_dead) { |
388 | mutex_unlock(lock: &file->mutex); |
389 | return -EIO; |
390 | } |
391 | |
392 | while (list_empty(head: &file->recv_list)) { |
393 | mutex_unlock(lock: &file->mutex); |
394 | |
395 | if (filp->f_flags & O_NONBLOCK) |
396 | return -EAGAIN; |
397 | |
398 | if (wait_event_interruptible(file->recv_wait, |
399 | !list_empty(&file->recv_list))) |
400 | return -ERESTARTSYS; |
401 | |
402 | mutex_lock(&file->mutex); |
403 | } |
404 | |
405 | if (file->agents_dead) { |
406 | mutex_unlock(lock: &file->mutex); |
407 | return -EIO; |
408 | } |
409 | |
410 | packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); |
411 | list_del(entry: &packet->list); |
412 | |
413 | mutex_unlock(lock: &file->mutex); |
414 | |
415 | if (packet->recv_wc) |
416 | ret = copy_recv_mad(file, buf, packet, count); |
417 | else |
418 | ret = copy_send_mad(file, buf, packet, count); |
419 | |
420 | if (ret < 0) { |
421 | /* Requeue packet */ |
422 | mutex_lock(&file->mutex); |
423 | list_add(new: &packet->list, head: &file->recv_list); |
424 | mutex_unlock(lock: &file->mutex); |
425 | } else { |
426 | if (packet->recv_wc) |
427 | ib_free_recv_mad(mad_recv_wc: packet->recv_wc); |
428 | kfree(objp: packet); |
429 | } |
430 | return ret; |
431 | } |
432 | |
433 | static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) |
434 | { |
435 | int left, seg; |
436 | |
437 | /* Copy class specific header */ |
438 | if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && |
439 | copy_from_user(to: msg->mad + IB_MGMT_RMPP_HDR, from: buf + IB_MGMT_RMPP_HDR, |
440 | n: msg->hdr_len - IB_MGMT_RMPP_HDR)) |
441 | return -EFAULT; |
442 | |
443 | /* All headers are in place. Copy data segments. */ |
444 | for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; |
445 | seg++, left -= msg->seg_size, buf += msg->seg_size) { |
446 | if (copy_from_user(to: ib_get_rmpp_segment(send_buf: msg, seg_num: seg), from: buf, |
447 | min(left, msg->seg_size))) |
448 | return -EFAULT; |
449 | } |
450 | return 0; |
451 | } |
452 | |
453 | static int same_destination(struct ib_user_mad_hdr *hdr1, |
454 | struct ib_user_mad_hdr *hdr2) |
455 | { |
456 | if (!hdr1->grh_present && !hdr2->grh_present) |
457 | return (hdr1->lid == hdr2->lid); |
458 | |
459 | if (hdr1->grh_present && hdr2->grh_present) |
460 | return !memcmp(p: hdr1->gid, q: hdr2->gid, size: 16); |
461 | |
462 | return 0; |
463 | } |
464 | |
465 | static int is_duplicate(struct ib_umad_file *file, |
466 | struct ib_umad_packet *packet) |
467 | { |
468 | struct ib_umad_packet *sent_packet; |
469 | struct ib_mad_hdr *sent_hdr, *hdr; |
470 | |
471 | hdr = (struct ib_mad_hdr *) packet->mad.data; |
472 | list_for_each_entry(sent_packet, &file->send_list, list) { |
473 | sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; |
474 | |
475 | if ((hdr->tid != sent_hdr->tid) || |
476 | (hdr->mgmt_class != sent_hdr->mgmt_class)) |
477 | continue; |
478 | |
479 | /* |
480 | * No need to be overly clever here. If two new operations have |
481 | * the same TID, reject the second as a duplicate. This is more |
482 | * restrictive than required by the spec. |
483 | */ |
484 | if (!ib_response_mad(hdr)) { |
485 | if (!ib_response_mad(hdr: sent_hdr)) |
486 | return 1; |
487 | continue; |
488 | } else if (!ib_response_mad(hdr: sent_hdr)) |
489 | continue; |
490 | |
491 | if (same_destination(hdr1: &packet->mad.hdr, hdr2: &sent_packet->mad.hdr)) |
492 | return 1; |
493 | } |
494 | |
495 | return 0; |
496 | } |
497 | |
498 | static ssize_t ib_umad_write(struct file *filp, const char __user *buf, |
499 | size_t count, loff_t *pos) |
500 | { |
501 | struct ib_umad_file *file = filp->private_data; |
502 | struct ib_rmpp_mad_hdr *rmpp_mad_hdr; |
503 | struct ib_umad_packet *packet; |
504 | struct ib_mad_agent *agent; |
505 | struct rdma_ah_attr ah_attr; |
506 | struct ib_ah *ah; |
507 | __be64 *tid; |
508 | int ret, data_len, hdr_len, copy_offset, rmpp_active; |
509 | u8 base_version; |
510 | |
511 | if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) |
512 | return -EINVAL; |
513 | |
514 | packet = kzalloc(size: sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL); |
515 | if (!packet) |
516 | return -ENOMEM; |
517 | |
518 | if (copy_from_user(to: &packet->mad, from: buf, n: hdr_size(file))) { |
519 | ret = -EFAULT; |
520 | goto err; |
521 | } |
522 | |
523 | if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { |
524 | ret = -EINVAL; |
525 | goto err; |
526 | } |
527 | |
528 | buf += hdr_size(file); |
529 | |
530 | if (copy_from_user(to: packet->mad.data, from: buf, n: IB_MGMT_RMPP_HDR)) { |
531 | ret = -EFAULT; |
532 | goto err; |
533 | } |
534 | |
535 | mutex_lock(&file->mutex); |
536 | |
537 | trace_ib_umad_write(file, umad_hdr: &packet->mad.hdr, |
538 | mad_hdr: (struct ib_mad_hdr *)&packet->mad.data); |
539 | |
540 | agent = __get_agent(file, id: packet->mad.hdr.id); |
541 | if (!agent) { |
542 | ret = -EIO; |
543 | goto err_up; |
544 | } |
545 | |
546 | memset(&ah_attr, 0, sizeof ah_attr); |
547 | ah_attr.type = rdma_ah_find_type(dev: agent->device, |
548 | port_num: file->port->port_num); |
549 | rdma_ah_set_dlid(attr: &ah_attr, be16_to_cpu(packet->mad.hdr.lid)); |
550 | rdma_ah_set_sl(attr: &ah_attr, sl: packet->mad.hdr.sl); |
551 | rdma_ah_set_path_bits(attr: &ah_attr, src_path_bits: packet->mad.hdr.path_bits); |
552 | rdma_ah_set_port_num(attr: &ah_attr, port_num: file->port->port_num); |
553 | if (packet->mad.hdr.grh_present) { |
554 | rdma_ah_set_grh(attr: &ah_attr, NULL, |
555 | be32_to_cpu(packet->mad.hdr.flow_label), |
556 | sgid_index: packet->mad.hdr.gid_index, |
557 | hop_limit: packet->mad.hdr.hop_limit, |
558 | traffic_class: packet->mad.hdr.traffic_class); |
559 | rdma_ah_set_dgid_raw(attr: &ah_attr, dgid: packet->mad.hdr.gid); |
560 | } |
561 | |
562 | ah = rdma_create_user_ah(pd: agent->qp->pd, ah_attr: &ah_attr, NULL); |
563 | if (IS_ERR(ptr: ah)) { |
564 | ret = PTR_ERR(ptr: ah); |
565 | goto err_up; |
566 | } |
567 | |
568 | rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data; |
569 | hdr_len = ib_get_mad_data_offset(mgmt_class: rmpp_mad_hdr->mad_hdr.mgmt_class); |
570 | |
571 | if (ib_is_mad_class_rmpp(mgmt_class: rmpp_mad_hdr->mad_hdr.mgmt_class) |
572 | && ib_mad_kernel_rmpp_agent(agent)) { |
573 | copy_offset = IB_MGMT_RMPP_HDR; |
574 | rmpp_active = ib_get_rmpp_flags(rmpp_hdr: &rmpp_mad_hdr->rmpp_hdr) & |
575 | IB_MGMT_RMPP_FLAG_ACTIVE; |
576 | } else { |
577 | copy_offset = IB_MGMT_MAD_HDR; |
578 | rmpp_active = 0; |
579 | } |
580 | |
581 | base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; |
582 | data_len = count - hdr_size(file) - hdr_len; |
583 | packet->msg = ib_create_send_mad(mad_agent: agent, |
584 | be32_to_cpu(packet->mad.hdr.qpn), |
585 | pkey_index: packet->mad.hdr.pkey_index, rmpp_active, |
586 | hdr_len, data_len, GFP_KERNEL, |
587 | base_version); |
588 | if (IS_ERR(ptr: packet->msg)) { |
589 | ret = PTR_ERR(ptr: packet->msg); |
590 | goto err_ah; |
591 | } |
592 | |
593 | packet->msg->ah = ah; |
594 | packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; |
595 | packet->msg->retries = packet->mad.hdr.retries; |
596 | packet->msg->context[0] = packet; |
597 | |
598 | /* Copy MAD header. Any RMPP header is already in place. */ |
599 | memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); |
600 | |
601 | if (!rmpp_active) { |
602 | if (copy_from_user(to: packet->msg->mad + copy_offset, |
603 | from: buf + copy_offset, |
604 | n: hdr_len + data_len - copy_offset)) { |
605 | ret = -EFAULT; |
606 | goto err_msg; |
607 | } |
608 | } else { |
609 | ret = copy_rmpp_mad(msg: packet->msg, buf); |
610 | if (ret) |
611 | goto err_msg; |
612 | } |
613 | |
614 | /* |
615 | * Set the high-order part of the transaction ID to make MADs from |
616 | * different agents unique, and allow routing responses back to the |
617 | * original requestor. |
618 | */ |
619 | if (!ib_response_mad(hdr: packet->msg->mad)) { |
620 | tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; |
621 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | |
622 | (be64_to_cpup(tid) & 0xffffffff)); |
623 | rmpp_mad_hdr->mad_hdr.tid = *tid; |
624 | } |
625 | |
626 | if (!ib_mad_kernel_rmpp_agent(agent) |
627 | && ib_is_mad_class_rmpp(mgmt_class: rmpp_mad_hdr->mad_hdr.mgmt_class) |
628 | && (ib_get_rmpp_flags(rmpp_hdr: &rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { |
629 | spin_lock_irq(lock: &file->send_lock); |
630 | list_add_tail(new: &packet->list, head: &file->send_list); |
631 | spin_unlock_irq(lock: &file->send_lock); |
632 | } else { |
633 | spin_lock_irq(lock: &file->send_lock); |
634 | ret = is_duplicate(file, packet); |
635 | if (!ret) |
636 | list_add_tail(new: &packet->list, head: &file->send_list); |
637 | spin_unlock_irq(lock: &file->send_lock); |
638 | if (ret) { |
639 | ret = -EINVAL; |
640 | goto err_msg; |
641 | } |
642 | } |
643 | |
644 | ret = ib_post_send_mad(send_buf: packet->msg, NULL); |
645 | if (ret) |
646 | goto err_send; |
647 | |
648 | mutex_unlock(lock: &file->mutex); |
649 | return count; |
650 | |
651 | err_send: |
652 | dequeue_send(file, packet); |
653 | err_msg: |
654 | ib_free_send_mad(send_buf: packet->msg); |
655 | err_ah: |
656 | rdma_destroy_ah(ah, flags: RDMA_DESTROY_AH_SLEEPABLE); |
657 | err_up: |
658 | mutex_unlock(lock: &file->mutex); |
659 | err: |
660 | kfree(objp: packet); |
661 | return ret; |
662 | } |
663 | |
664 | static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) |
665 | { |
666 | struct ib_umad_file *file = filp->private_data; |
667 | |
668 | /* we will always be able to post a MAD send */ |
669 | __poll_t mask = EPOLLOUT | EPOLLWRNORM; |
670 | |
671 | mutex_lock(&file->mutex); |
672 | poll_wait(filp, wait_address: &file->recv_wait, p: wait); |
673 | |
674 | if (!list_empty(head: &file->recv_list)) |
675 | mask |= EPOLLIN | EPOLLRDNORM; |
676 | if (file->agents_dead) |
677 | mask = EPOLLERR; |
678 | mutex_unlock(lock: &file->mutex); |
679 | |
680 | return mask; |
681 | } |
682 | |
683 | static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, |
684 | int compat_method_mask) |
685 | { |
686 | struct ib_user_mad_reg_req ureq; |
687 | struct ib_mad_reg_req req; |
688 | struct ib_mad_agent *agent = NULL; |
689 | int agent_id; |
690 | int ret; |
691 | |
692 | mutex_lock(&file->port->file_mutex); |
693 | mutex_lock(&file->mutex); |
694 | |
695 | if (!file->port->ib_dev) { |
696 | dev_notice(&file->port->dev, "%s: invalid device\n" , __func__); |
697 | ret = -EPIPE; |
698 | goto out; |
699 | } |
700 | |
701 | if (copy_from_user(to: &ureq, from: arg, n: sizeof ureq)) { |
702 | ret = -EFAULT; |
703 | goto out; |
704 | } |
705 | |
706 | if (ureq.qpn != 0 && ureq.qpn != 1) { |
707 | dev_notice(&file->port->dev, |
708 | "%s: invalid QPN %u specified\n" , __func__, |
709 | ureq.qpn); |
710 | ret = -EINVAL; |
711 | goto out; |
712 | } |
713 | |
714 | for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) |
715 | if (!__get_agent(file, id: agent_id)) |
716 | goto found; |
717 | |
718 | dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n" , __func__, |
719 | IB_UMAD_MAX_AGENTS); |
720 | |
721 | ret = -ENOMEM; |
722 | goto out; |
723 | |
724 | found: |
725 | if (ureq.mgmt_class) { |
726 | memset(&req, 0, sizeof(req)); |
727 | req.mgmt_class = ureq.mgmt_class; |
728 | req.mgmt_class_version = ureq.mgmt_class_version; |
729 | memcpy(req.oui, ureq.oui, sizeof req.oui); |
730 | |
731 | if (compat_method_mask) { |
732 | u32 *umm = (u32 *) ureq.method_mask; |
733 | int i; |
734 | |
735 | for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) |
736 | req.method_mask[i] = |
737 | umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); |
738 | } else |
739 | memcpy(req.method_mask, ureq.method_mask, |
740 | sizeof req.method_mask); |
741 | } |
742 | |
743 | agent = ib_register_mad_agent(device: file->port->ib_dev, port_num: file->port->port_num, |
744 | qp_type: ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, |
745 | mad_reg_req: ureq.mgmt_class ? &req : NULL, |
746 | rmpp_version: ureq.rmpp_version, |
747 | send_handler, recv_handler, context: file, registration_flags: 0); |
748 | if (IS_ERR(ptr: agent)) { |
749 | ret = PTR_ERR(ptr: agent); |
750 | agent = NULL; |
751 | goto out; |
752 | } |
753 | |
754 | if (put_user(agent_id, |
755 | (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { |
756 | ret = -EFAULT; |
757 | goto out; |
758 | } |
759 | |
760 | if (!file->already_used) { |
761 | file->already_used = 1; |
762 | if (!file->use_pkey_index) { |
763 | dev_warn(&file->port->dev, |
764 | "process %s did not enable P_Key index support.\n" , |
765 | current->comm); |
766 | dev_warn(&file->port->dev, |
767 | " Documentation/infiniband/user_mad.rst has info on the new ABI.\n" ); |
768 | } |
769 | } |
770 | |
771 | file->agent[agent_id] = agent; |
772 | ret = 0; |
773 | |
774 | out: |
775 | mutex_unlock(lock: &file->mutex); |
776 | |
777 | if (ret && agent) |
778 | ib_unregister_mad_agent(mad_agent: agent); |
779 | |
780 | mutex_unlock(lock: &file->port->file_mutex); |
781 | |
782 | return ret; |
783 | } |
784 | |
785 | static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) |
786 | { |
787 | struct ib_user_mad_reg_req2 ureq; |
788 | struct ib_mad_reg_req req; |
789 | struct ib_mad_agent *agent = NULL; |
790 | int agent_id; |
791 | int ret; |
792 | |
793 | mutex_lock(&file->port->file_mutex); |
794 | mutex_lock(&file->mutex); |
795 | |
796 | if (!file->port->ib_dev) { |
797 | dev_notice(&file->port->dev, "%s: invalid device\n" , __func__); |
798 | ret = -EPIPE; |
799 | goto out; |
800 | } |
801 | |
802 | if (copy_from_user(to: &ureq, from: arg, n: sizeof(ureq))) { |
803 | ret = -EFAULT; |
804 | goto out; |
805 | } |
806 | |
807 | if (ureq.qpn != 0 && ureq.qpn != 1) { |
808 | dev_notice(&file->port->dev, "%s: invalid QPN %u specified\n" , |
809 | __func__, ureq.qpn); |
810 | ret = -EINVAL; |
811 | goto out; |
812 | } |
813 | |
814 | if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { |
815 | dev_notice(&file->port->dev, |
816 | "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n" , |
817 | __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); |
818 | ret = -EINVAL; |
819 | |
820 | if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, |
821 | (u32 __user *) (arg + offsetof(struct |
822 | ib_user_mad_reg_req2, flags)))) |
823 | ret = -EFAULT; |
824 | |
825 | goto out; |
826 | } |
827 | |
828 | for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) |
829 | if (!__get_agent(file, id: agent_id)) |
830 | goto found; |
831 | |
832 | dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n" , __func__, |
833 | IB_UMAD_MAX_AGENTS); |
834 | ret = -ENOMEM; |
835 | goto out; |
836 | |
837 | found: |
838 | if (ureq.mgmt_class) { |
839 | memset(&req, 0, sizeof(req)); |
840 | req.mgmt_class = ureq.mgmt_class; |
841 | req.mgmt_class_version = ureq.mgmt_class_version; |
842 | if (ureq.oui & 0xff000000) { |
843 | dev_notice(&file->port->dev, |
844 | "%s failed: oui invalid 0x%08x\n" , __func__, |
845 | ureq.oui); |
846 | ret = -EINVAL; |
847 | goto out; |
848 | } |
849 | req.oui[2] = ureq.oui & 0x0000ff; |
850 | req.oui[1] = (ureq.oui & 0x00ff00) >> 8; |
851 | req.oui[0] = (ureq.oui & 0xff0000) >> 16; |
852 | memcpy(req.method_mask, ureq.method_mask, |
853 | sizeof(req.method_mask)); |
854 | } |
855 | |
856 | agent = ib_register_mad_agent(device: file->port->ib_dev, port_num: file->port->port_num, |
857 | qp_type: ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, |
858 | mad_reg_req: ureq.mgmt_class ? &req : NULL, |
859 | rmpp_version: ureq.rmpp_version, |
860 | send_handler, recv_handler, context: file, |
861 | registration_flags: ureq.flags); |
862 | if (IS_ERR(ptr: agent)) { |
863 | ret = PTR_ERR(ptr: agent); |
864 | agent = NULL; |
865 | goto out; |
866 | } |
867 | |
868 | if (put_user(agent_id, |
869 | (u32 __user *)(arg + |
870 | offsetof(struct ib_user_mad_reg_req2, id)))) { |
871 | ret = -EFAULT; |
872 | goto out; |
873 | } |
874 | |
875 | if (!file->already_used) { |
876 | file->already_used = 1; |
877 | file->use_pkey_index = 1; |
878 | } |
879 | |
880 | file->agent[agent_id] = agent; |
881 | ret = 0; |
882 | |
883 | out: |
884 | mutex_unlock(lock: &file->mutex); |
885 | |
886 | if (ret && agent) |
887 | ib_unregister_mad_agent(mad_agent: agent); |
888 | |
889 | mutex_unlock(lock: &file->port->file_mutex); |
890 | |
891 | return ret; |
892 | } |
893 | |
894 | |
895 | static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) |
896 | { |
897 | struct ib_mad_agent *agent = NULL; |
898 | u32 id; |
899 | int ret = 0; |
900 | |
901 | if (get_user(id, arg)) |
902 | return -EFAULT; |
903 | if (id >= IB_UMAD_MAX_AGENTS) |
904 | return -EINVAL; |
905 | |
906 | mutex_lock(&file->port->file_mutex); |
907 | mutex_lock(&file->mutex); |
908 | |
909 | id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); |
910 | if (!__get_agent(file, id)) { |
911 | ret = -EINVAL; |
912 | goto out; |
913 | } |
914 | |
915 | agent = file->agent[id]; |
916 | file->agent[id] = NULL; |
917 | |
918 | out: |
919 | mutex_unlock(lock: &file->mutex); |
920 | |
921 | if (agent) |
922 | ib_unregister_mad_agent(mad_agent: agent); |
923 | |
924 | mutex_unlock(lock: &file->port->file_mutex); |
925 | |
926 | return ret; |
927 | } |
928 | |
929 | static long ib_umad_enable_pkey(struct ib_umad_file *file) |
930 | { |
931 | int ret = 0; |
932 | |
933 | mutex_lock(&file->mutex); |
934 | if (file->already_used) |
935 | ret = -EINVAL; |
936 | else |
937 | file->use_pkey_index = 1; |
938 | mutex_unlock(lock: &file->mutex); |
939 | |
940 | return ret; |
941 | } |
942 | |
943 | static long ib_umad_ioctl(struct file *filp, unsigned int cmd, |
944 | unsigned long arg) |
945 | { |
946 | switch (cmd) { |
947 | case IB_USER_MAD_REGISTER_AGENT: |
948 | return ib_umad_reg_agent(file: filp->private_data, arg: (void __user *) arg, compat_method_mask: 0); |
949 | case IB_USER_MAD_UNREGISTER_AGENT: |
950 | return ib_umad_unreg_agent(file: filp->private_data, arg: (__u32 __user *) arg); |
951 | case IB_USER_MAD_ENABLE_PKEY: |
952 | return ib_umad_enable_pkey(file: filp->private_data); |
953 | case IB_USER_MAD_REGISTER_AGENT2: |
954 | return ib_umad_reg_agent2(file: filp->private_data, arg: (void __user *) arg); |
955 | default: |
956 | return -ENOIOCTLCMD; |
957 | } |
958 | } |
959 | |
960 | #ifdef CONFIG_COMPAT |
961 | static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, |
962 | unsigned long arg) |
963 | { |
964 | switch (cmd) { |
965 | case IB_USER_MAD_REGISTER_AGENT: |
966 | return ib_umad_reg_agent(file: filp->private_data, arg: compat_ptr(uptr: arg), compat_method_mask: 1); |
967 | case IB_USER_MAD_UNREGISTER_AGENT: |
968 | return ib_umad_unreg_agent(file: filp->private_data, arg: compat_ptr(uptr: arg)); |
969 | case IB_USER_MAD_ENABLE_PKEY: |
970 | return ib_umad_enable_pkey(file: filp->private_data); |
971 | case IB_USER_MAD_REGISTER_AGENT2: |
972 | return ib_umad_reg_agent2(file: filp->private_data, arg: compat_ptr(uptr: arg)); |
973 | default: |
974 | return -ENOIOCTLCMD; |
975 | } |
976 | } |
977 | #endif |
978 | |
979 | /* |
980 | * ib_umad_open() does not need the BKL: |
981 | * |
982 | * - the ib_umad_port structures are properly reference counted, and |
983 | * everything else is purely local to the file being created, so |
984 | * races against other open calls are not a problem; |
985 | * - the ioctl method does not affect any global state outside of the |
986 | * file structure being operated on; |
987 | */ |
988 | static int ib_umad_open(struct inode *inode, struct file *filp) |
989 | { |
990 | struct ib_umad_port *port; |
991 | struct ib_umad_file *file; |
992 | int ret = 0; |
993 | |
994 | port = container_of(inode->i_cdev, struct ib_umad_port, cdev); |
995 | |
996 | mutex_lock(&port->file_mutex); |
997 | |
998 | if (!port->ib_dev) { |
999 | ret = -ENXIO; |
1000 | goto out; |
1001 | } |
1002 | |
1003 | if (!rdma_dev_access_netns(device: port->ib_dev, current->nsproxy->net_ns)) { |
1004 | ret = -EPERM; |
1005 | goto out; |
1006 | } |
1007 | |
1008 | file = kzalloc(size: sizeof(*file), GFP_KERNEL); |
1009 | if (!file) { |
1010 | ret = -ENOMEM; |
1011 | goto out; |
1012 | } |
1013 | |
1014 | mutex_init(&file->mutex); |
1015 | spin_lock_init(&file->send_lock); |
1016 | INIT_LIST_HEAD(list: &file->recv_list); |
1017 | INIT_LIST_HEAD(list: &file->send_list); |
1018 | init_waitqueue_head(&file->recv_wait); |
1019 | |
1020 | file->port = port; |
1021 | filp->private_data = file; |
1022 | |
1023 | list_add_tail(new: &file->port_list, head: &port->file_list); |
1024 | |
1025 | stream_open(inode, filp); |
1026 | out: |
1027 | mutex_unlock(lock: &port->file_mutex); |
1028 | return ret; |
1029 | } |
1030 | |
1031 | static int ib_umad_close(struct inode *inode, struct file *filp) |
1032 | { |
1033 | struct ib_umad_file *file = filp->private_data; |
1034 | struct ib_umad_packet *packet, *tmp; |
1035 | int already_dead; |
1036 | int i; |
1037 | |
1038 | mutex_lock(&file->port->file_mutex); |
1039 | mutex_lock(&file->mutex); |
1040 | |
1041 | already_dead = file->agents_dead; |
1042 | file->agents_dead = 1; |
1043 | |
1044 | list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { |
1045 | if (packet->recv_wc) |
1046 | ib_free_recv_mad(mad_recv_wc: packet->recv_wc); |
1047 | kfree(objp: packet); |
1048 | } |
1049 | |
1050 | list_del(entry: &file->port_list); |
1051 | |
1052 | mutex_unlock(lock: &file->mutex); |
1053 | |
1054 | if (!already_dead) |
1055 | for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) |
1056 | if (file->agent[i]) |
1057 | ib_unregister_mad_agent(mad_agent: file->agent[i]); |
1058 | |
1059 | mutex_unlock(lock: &file->port->file_mutex); |
1060 | mutex_destroy(lock: &file->mutex); |
1061 | kfree(objp: file); |
1062 | return 0; |
1063 | } |
1064 | |
1065 | static const struct file_operations umad_fops = { |
1066 | .owner = THIS_MODULE, |
1067 | .read = ib_umad_read, |
1068 | .write = ib_umad_write, |
1069 | .poll = ib_umad_poll, |
1070 | .unlocked_ioctl = ib_umad_ioctl, |
1071 | #ifdef CONFIG_COMPAT |
1072 | .compat_ioctl = ib_umad_compat_ioctl, |
1073 | #endif |
1074 | .open = ib_umad_open, |
1075 | .release = ib_umad_close, |
1076 | .llseek = no_llseek, |
1077 | }; |
1078 | |
1079 | static int ib_umad_sm_open(struct inode *inode, struct file *filp) |
1080 | { |
1081 | struct ib_umad_port *port; |
1082 | struct ib_port_modify props = { |
1083 | .set_port_cap_mask = IB_PORT_SM |
1084 | }; |
1085 | int ret; |
1086 | |
1087 | port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); |
1088 | |
1089 | if (filp->f_flags & O_NONBLOCK) { |
1090 | if (down_trylock(sem: &port->sm_sem)) { |
1091 | ret = -EAGAIN; |
1092 | goto fail; |
1093 | } |
1094 | } else { |
1095 | if (down_interruptible(sem: &port->sm_sem)) { |
1096 | ret = -ERESTARTSYS; |
1097 | goto fail; |
1098 | } |
1099 | } |
1100 | |
1101 | if (!rdma_dev_access_netns(device: port->ib_dev, current->nsproxy->net_ns)) { |
1102 | ret = -EPERM; |
1103 | goto err_up_sem; |
1104 | } |
1105 | |
1106 | ret = ib_modify_port(device: port->ib_dev, port_num: port->port_num, port_modify_mask: 0, port_modify: &props); |
1107 | if (ret) |
1108 | goto err_up_sem; |
1109 | |
1110 | filp->private_data = port; |
1111 | |
1112 | nonseekable_open(inode, filp); |
1113 | return 0; |
1114 | |
1115 | err_up_sem: |
1116 | up(sem: &port->sm_sem); |
1117 | |
1118 | fail: |
1119 | return ret; |
1120 | } |
1121 | |
1122 | static int ib_umad_sm_close(struct inode *inode, struct file *filp) |
1123 | { |
1124 | struct ib_umad_port *port = filp->private_data; |
1125 | struct ib_port_modify props = { |
1126 | .clr_port_cap_mask = IB_PORT_SM |
1127 | }; |
1128 | int ret = 0; |
1129 | |
1130 | mutex_lock(&port->file_mutex); |
1131 | if (port->ib_dev) |
1132 | ret = ib_modify_port(device: port->ib_dev, port_num: port->port_num, port_modify_mask: 0, port_modify: &props); |
1133 | mutex_unlock(lock: &port->file_mutex); |
1134 | |
1135 | up(sem: &port->sm_sem); |
1136 | |
1137 | return ret; |
1138 | } |
1139 | |
1140 | static const struct file_operations umad_sm_fops = { |
1141 | .owner = THIS_MODULE, |
1142 | .open = ib_umad_sm_open, |
1143 | .release = ib_umad_sm_close, |
1144 | .llseek = no_llseek, |
1145 | }; |
1146 | |
1147 | static struct ib_umad_port *get_port(struct ib_device *ibdev, |
1148 | struct ib_umad_device *umad_dev, |
1149 | u32 port) |
1150 | { |
1151 | if (!umad_dev) |
1152 | return ERR_PTR(error: -EOPNOTSUPP); |
1153 | if (!rdma_is_port_valid(device: ibdev, port)) |
1154 | return ERR_PTR(error: -EINVAL); |
1155 | if (!rdma_cap_ib_mad(device: ibdev, port_num: port)) |
1156 | return ERR_PTR(error: -EOPNOTSUPP); |
1157 | |
1158 | return &umad_dev->ports[port - rdma_start_port(device: ibdev)]; |
1159 | } |
1160 | |
1161 | static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data, |
1162 | struct ib_client_nl_info *res) |
1163 | { |
1164 | struct ib_umad_port *port = get_port(ibdev, umad_dev: client_data, port: res->port); |
1165 | |
1166 | if (IS_ERR(ptr: port)) |
1167 | return PTR_ERR(ptr: port); |
1168 | |
1169 | res->abi = IB_USER_MAD_ABI_VERSION; |
1170 | res->cdev = &port->dev; |
1171 | return 0; |
1172 | } |
1173 | |
1174 | static struct ib_client umad_client = { |
1175 | .name = "umad" , |
1176 | .add = ib_umad_add_one, |
1177 | .remove = ib_umad_remove_one, |
1178 | .get_nl_info = ib_umad_get_nl_info, |
1179 | }; |
1180 | MODULE_ALIAS_RDMA_CLIENT("umad" ); |
1181 | |
1182 | static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data, |
1183 | struct ib_client_nl_info *res) |
1184 | { |
1185 | struct ib_umad_port *port = get_port(ibdev, umad_dev: client_data, port: res->port); |
1186 | |
1187 | if (IS_ERR(ptr: port)) |
1188 | return PTR_ERR(ptr: port); |
1189 | |
1190 | res->abi = IB_USER_MAD_ABI_VERSION; |
1191 | res->cdev = &port->sm_dev; |
1192 | return 0; |
1193 | } |
1194 | |
1195 | static struct ib_client issm_client = { |
1196 | .name = "issm" , |
1197 | .get_nl_info = ib_issm_get_nl_info, |
1198 | }; |
1199 | MODULE_ALIAS_RDMA_CLIENT("issm" ); |
1200 | |
1201 | static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr, |
1202 | char *buf) |
1203 | { |
1204 | struct ib_umad_port *port = dev_get_drvdata(dev); |
1205 | |
1206 | if (!port) |
1207 | return -ENODEV; |
1208 | |
1209 | return sysfs_emit(buf, fmt: "%s\n" , dev_name(dev: &port->ib_dev->dev)); |
1210 | } |
1211 | static DEVICE_ATTR_RO(ibdev); |
1212 | |
1213 | static ssize_t port_show(struct device *dev, struct device_attribute *attr, |
1214 | char *buf) |
1215 | { |
1216 | struct ib_umad_port *port = dev_get_drvdata(dev); |
1217 | |
1218 | if (!port) |
1219 | return -ENODEV; |
1220 | |
1221 | return sysfs_emit(buf, fmt: "%d\n" , port->port_num); |
1222 | } |
1223 | static DEVICE_ATTR_RO(port); |
1224 | |
1225 | static struct attribute *umad_class_dev_attrs[] = { |
1226 | &dev_attr_ibdev.attr, |
1227 | &dev_attr_port.attr, |
1228 | NULL, |
1229 | }; |
1230 | ATTRIBUTE_GROUPS(umad_class_dev); |
1231 | |
1232 | static char *umad_devnode(const struct device *dev, umode_t *mode) |
1233 | { |
1234 | return kasprintf(GFP_KERNEL, fmt: "infiniband/%s" , dev_name(dev)); |
1235 | } |
1236 | |
1237 | static ssize_t abi_version_show(const struct class *class, |
1238 | const struct class_attribute *attr, char *buf) |
1239 | { |
1240 | return sysfs_emit(buf, fmt: "%d\n" , IB_USER_MAD_ABI_VERSION); |
1241 | } |
1242 | static CLASS_ATTR_RO(abi_version); |
1243 | |
1244 | static struct attribute *umad_class_attrs[] = { |
1245 | &class_attr_abi_version.attr, |
1246 | NULL, |
1247 | }; |
1248 | ATTRIBUTE_GROUPS(umad_class); |
1249 | |
1250 | static struct class umad_class = { |
1251 | .name = "infiniband_mad" , |
1252 | .devnode = umad_devnode, |
1253 | .class_groups = umad_class_groups, |
1254 | .dev_groups = umad_class_dev_groups, |
1255 | }; |
1256 | |
1257 | static void ib_umad_release_port(struct device *device) |
1258 | { |
1259 | struct ib_umad_port *port = dev_get_drvdata(dev: device); |
1260 | struct ib_umad_device *umad_dev = port->umad_dev; |
1261 | |
1262 | ib_umad_dev_put(dev: umad_dev); |
1263 | } |
1264 | |
1265 | static void ib_umad_init_port_dev(struct device *dev, |
1266 | struct ib_umad_port *port, |
1267 | const struct ib_device *device) |
1268 | { |
1269 | device_initialize(dev); |
1270 | ib_umad_dev_get(dev: port->umad_dev); |
1271 | dev->class = &umad_class; |
1272 | dev->parent = device->dev.parent; |
1273 | dev_set_drvdata(dev, data: port); |
1274 | dev->release = ib_umad_release_port; |
1275 | } |
1276 | |
1277 | static int ib_umad_init_port(struct ib_device *device, int port_num, |
1278 | struct ib_umad_device *umad_dev, |
1279 | struct ib_umad_port *port) |
1280 | { |
1281 | int devnum; |
1282 | dev_t base_umad; |
1283 | dev_t base_issm; |
1284 | int ret; |
1285 | |
1286 | devnum = ida_alloc_max(ida: &umad_ida, max: IB_UMAD_MAX_PORTS - 1, GFP_KERNEL); |
1287 | if (devnum < 0) |
1288 | return -1; |
1289 | port->dev_num = devnum; |
1290 | if (devnum >= IB_UMAD_NUM_FIXED_MINOR) { |
1291 | base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR; |
1292 | base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR; |
1293 | } else { |
1294 | base_umad = devnum + base_umad_dev; |
1295 | base_issm = devnum + base_issm_dev; |
1296 | } |
1297 | |
1298 | port->ib_dev = device; |
1299 | port->umad_dev = umad_dev; |
1300 | port->port_num = port_num; |
1301 | sema_init(sem: &port->sm_sem, val: 1); |
1302 | mutex_init(&port->file_mutex); |
1303 | INIT_LIST_HEAD(list: &port->file_list); |
1304 | |
1305 | ib_umad_init_port_dev(dev: &port->dev, port, device); |
1306 | port->dev.devt = base_umad; |
1307 | dev_set_name(dev: &port->dev, name: "umad%d" , port->dev_num); |
1308 | cdev_init(&port->cdev, &umad_fops); |
1309 | port->cdev.owner = THIS_MODULE; |
1310 | |
1311 | ret = cdev_device_add(cdev: &port->cdev, dev: &port->dev); |
1312 | if (ret) |
1313 | goto err_cdev; |
1314 | |
1315 | ib_umad_init_port_dev(dev: &port->sm_dev, port, device); |
1316 | port->sm_dev.devt = base_issm; |
1317 | dev_set_name(dev: &port->sm_dev, name: "issm%d" , port->dev_num); |
1318 | cdev_init(&port->sm_cdev, &umad_sm_fops); |
1319 | port->sm_cdev.owner = THIS_MODULE; |
1320 | |
1321 | ret = cdev_device_add(cdev: &port->sm_cdev, dev: &port->sm_dev); |
1322 | if (ret) |
1323 | goto err_dev; |
1324 | |
1325 | return 0; |
1326 | |
1327 | err_dev: |
1328 | put_device(dev: &port->sm_dev); |
1329 | cdev_device_del(cdev: &port->cdev, dev: &port->dev); |
1330 | err_cdev: |
1331 | put_device(dev: &port->dev); |
1332 | ida_free(&umad_ida, id: devnum); |
1333 | return ret; |
1334 | } |
1335 | |
1336 | static void ib_umad_kill_port(struct ib_umad_port *port) |
1337 | { |
1338 | struct ib_umad_file *file; |
1339 | int id; |
1340 | |
1341 | cdev_device_del(cdev: &port->sm_cdev, dev: &port->sm_dev); |
1342 | cdev_device_del(cdev: &port->cdev, dev: &port->dev); |
1343 | |
1344 | mutex_lock(&port->file_mutex); |
1345 | |
1346 | /* Mark ib_dev NULL and block ioctl or other file ops to progress |
1347 | * further. |
1348 | */ |
1349 | port->ib_dev = NULL; |
1350 | |
1351 | list_for_each_entry(file, &port->file_list, port_list) { |
1352 | mutex_lock(&file->mutex); |
1353 | file->agents_dead = 1; |
1354 | wake_up_interruptible(&file->recv_wait); |
1355 | mutex_unlock(lock: &file->mutex); |
1356 | |
1357 | for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) |
1358 | if (file->agent[id]) |
1359 | ib_unregister_mad_agent(mad_agent: file->agent[id]); |
1360 | } |
1361 | |
1362 | mutex_unlock(lock: &port->file_mutex); |
1363 | |
1364 | ida_free(&umad_ida, id: port->dev_num); |
1365 | |
1366 | /* balances device_initialize() */ |
1367 | put_device(dev: &port->sm_dev); |
1368 | put_device(dev: &port->dev); |
1369 | } |
1370 | |
1371 | static int ib_umad_add_one(struct ib_device *device) |
1372 | { |
1373 | struct ib_umad_device *umad_dev; |
1374 | int s, e, i; |
1375 | int count = 0; |
1376 | int ret; |
1377 | |
1378 | s = rdma_start_port(device); |
1379 | e = rdma_end_port(device); |
1380 | |
1381 | umad_dev = kzalloc(struct_size(umad_dev, ports, |
1382 | size_add(size_sub(e, s), 1)), |
1383 | GFP_KERNEL); |
1384 | if (!umad_dev) |
1385 | return -ENOMEM; |
1386 | |
1387 | kref_init(kref: &umad_dev->kref); |
1388 | for (i = s; i <= e; ++i) { |
1389 | if (!rdma_cap_ib_mad(device, port_num: i)) |
1390 | continue; |
1391 | |
1392 | ret = ib_umad_init_port(device, port_num: i, umad_dev, |
1393 | port: &umad_dev->ports[i - s]); |
1394 | if (ret) |
1395 | goto err; |
1396 | |
1397 | count++; |
1398 | } |
1399 | |
1400 | if (!count) { |
1401 | ret = -EOPNOTSUPP; |
1402 | goto free; |
1403 | } |
1404 | |
1405 | ib_set_client_data(device, client: &umad_client, data: umad_dev); |
1406 | |
1407 | return 0; |
1408 | |
1409 | err: |
1410 | while (--i >= s) { |
1411 | if (!rdma_cap_ib_mad(device, port_num: i)) |
1412 | continue; |
1413 | |
1414 | ib_umad_kill_port(port: &umad_dev->ports[i - s]); |
1415 | } |
1416 | free: |
1417 | /* balances kref_init */ |
1418 | ib_umad_dev_put(dev: umad_dev); |
1419 | return ret; |
1420 | } |
1421 | |
1422 | static void ib_umad_remove_one(struct ib_device *device, void *client_data) |
1423 | { |
1424 | struct ib_umad_device *umad_dev = client_data; |
1425 | unsigned int i; |
1426 | |
1427 | rdma_for_each_port (device, i) { |
1428 | if (rdma_cap_ib_mad(device, port_num: i)) |
1429 | ib_umad_kill_port( |
1430 | port: &umad_dev->ports[i - rdma_start_port(device)]); |
1431 | } |
1432 | /* balances kref_init() */ |
1433 | ib_umad_dev_put(dev: umad_dev); |
1434 | } |
1435 | |
1436 | static int __init ib_umad_init(void) |
1437 | { |
1438 | int ret; |
1439 | |
1440 | ret = register_chrdev_region(base_umad_dev, |
1441 | IB_UMAD_NUM_FIXED_MINOR * 2, |
1442 | umad_class.name); |
1443 | if (ret) { |
1444 | pr_err("couldn't register device number\n" ); |
1445 | goto out; |
1446 | } |
1447 | |
1448 | ret = alloc_chrdev_region(&dynamic_umad_dev, 0, |
1449 | IB_UMAD_NUM_DYNAMIC_MINOR * 2, |
1450 | umad_class.name); |
1451 | if (ret) { |
1452 | pr_err("couldn't register dynamic device number\n" ); |
1453 | goto out_alloc; |
1454 | } |
1455 | dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR; |
1456 | |
1457 | ret = class_register(class: &umad_class); |
1458 | if (ret) { |
1459 | pr_err("couldn't create class infiniband_mad\n" ); |
1460 | goto out_chrdev; |
1461 | } |
1462 | |
1463 | ret = ib_register_client(client: &umad_client); |
1464 | if (ret) |
1465 | goto out_class; |
1466 | |
1467 | ret = ib_register_client(client: &issm_client); |
1468 | if (ret) |
1469 | goto out_client; |
1470 | |
1471 | return 0; |
1472 | |
1473 | out_client: |
1474 | ib_unregister_client(client: &umad_client); |
1475 | out_class: |
1476 | class_unregister(class: &umad_class); |
1477 | |
1478 | out_chrdev: |
1479 | unregister_chrdev_region(dynamic_umad_dev, |
1480 | IB_UMAD_NUM_DYNAMIC_MINOR * 2); |
1481 | |
1482 | out_alloc: |
1483 | unregister_chrdev_region(base_umad_dev, |
1484 | IB_UMAD_NUM_FIXED_MINOR * 2); |
1485 | |
1486 | out: |
1487 | return ret; |
1488 | } |
1489 | |
1490 | static void __exit ib_umad_cleanup(void) |
1491 | { |
1492 | ib_unregister_client(client: &issm_client); |
1493 | ib_unregister_client(client: &umad_client); |
1494 | class_unregister(class: &umad_class); |
1495 | unregister_chrdev_region(base_umad_dev, |
1496 | IB_UMAD_NUM_FIXED_MINOR * 2); |
1497 | unregister_chrdev_region(dynamic_umad_dev, |
1498 | IB_UMAD_NUM_DYNAMIC_MINOR * 2); |
1499 | } |
1500 | |
1501 | module_init(ib_umad_init); |
1502 | module_exit(ib_umad_cleanup); |
1503 | |