1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Greybus Firmware Management Protocol Driver. |
4 | * |
5 | * Copyright 2016 Google Inc. |
6 | * Copyright 2016 Linaro Ltd. |
7 | */ |
8 | |
9 | #include <linux/cdev.h> |
10 | #include <linux/completion.h> |
11 | #include <linux/firmware.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/idr.h> |
14 | #include <linux/ioctl.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/greybus.h> |
17 | |
18 | #include "firmware.h" |
19 | #include "greybus_firmware.h" |
20 | |
21 | #define FW_MGMT_TIMEOUT_MS 1000 |
22 | |
23 | struct fw_mgmt { |
24 | struct device *parent; |
25 | struct gb_connection *connection; |
26 | struct kref kref; |
27 | struct list_head node; |
28 | |
29 | /* Common id-map for interface and backend firmware requests */ |
30 | struct ida id_map; |
31 | struct mutex mutex; |
32 | struct completion completion; |
33 | struct cdev cdev; |
34 | struct device *class_device; |
35 | dev_t dev_num; |
36 | unsigned int timeout_jiffies; |
37 | bool disabled; /* connection getting disabled */ |
38 | |
39 | /* Interface Firmware specific fields */ |
40 | bool mode_switch_started; |
41 | bool intf_fw_loaded; |
42 | u8 intf_fw_request_id; |
43 | u8 intf_fw_status; |
44 | u16 intf_fw_major; |
45 | u16 intf_fw_minor; |
46 | |
47 | /* Backend Firmware specific fields */ |
48 | u8 backend_fw_request_id; |
49 | u8 backend_fw_status; |
50 | }; |
51 | |
52 | /* |
53 | * Number of minor devices this driver supports. |
54 | * There will be exactly one required per Interface. |
55 | */ |
56 | #define NUM_MINORS U8_MAX |
57 | |
58 | static const struct class fw_mgmt_class = { |
59 | .name = "gb_fw_mgmt" , |
60 | }; |
61 | |
62 | static dev_t fw_mgmt_dev_num; |
63 | static DEFINE_IDA(fw_mgmt_minors_map); |
64 | static LIST_HEAD(fw_mgmt_list); |
65 | static DEFINE_MUTEX(list_mutex); |
66 | |
67 | static void fw_mgmt_kref_release(struct kref *kref) |
68 | { |
69 | struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref); |
70 | |
71 | ida_destroy(ida: &fw_mgmt->id_map); |
72 | kfree(objp: fw_mgmt); |
73 | } |
74 | |
75 | /* |
76 | * All users of fw_mgmt take a reference (from within list_mutex lock), before |
77 | * they get a pointer to play with. And the structure will be freed only after |
78 | * the last user has put the reference to it. |
79 | */ |
80 | static void put_fw_mgmt(struct fw_mgmt *fw_mgmt) |
81 | { |
82 | kref_put(kref: &fw_mgmt->kref, release: fw_mgmt_kref_release); |
83 | } |
84 | |
85 | /* Caller must call put_fw_mgmt() after using struct fw_mgmt */ |
86 | static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev) |
87 | { |
88 | struct fw_mgmt *fw_mgmt; |
89 | |
90 | mutex_lock(&list_mutex); |
91 | |
92 | list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) { |
93 | if (&fw_mgmt->cdev == cdev) { |
94 | kref_get(kref: &fw_mgmt->kref); |
95 | goto unlock; |
96 | } |
97 | } |
98 | |
99 | fw_mgmt = NULL; |
100 | |
101 | unlock: |
102 | mutex_unlock(lock: &list_mutex); |
103 | |
104 | return fw_mgmt; |
105 | } |
106 | |
107 | static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt, |
108 | struct fw_mgmt_ioc_get_intf_version *fw_info) |
109 | { |
110 | struct gb_connection *connection = fw_mgmt->connection; |
111 | struct gb_fw_mgmt_interface_fw_version_response response; |
112 | int ret; |
113 | |
114 | ret = gb_operation_sync(connection, |
115 | GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, request_size: 0, |
116 | response: &response, response_size: sizeof(response)); |
117 | if (ret) { |
118 | dev_err(fw_mgmt->parent, |
119 | "failed to get interface firmware version (%d)\n" , ret); |
120 | return ret; |
121 | } |
122 | |
123 | fw_info->major = le16_to_cpu(response.major); |
124 | fw_info->minor = le16_to_cpu(response.minor); |
125 | |
126 | ret = strscpy_pad(fw_info->firmware_tag, response.firmware_tag); |
127 | if (ret == -E2BIG) |
128 | dev_err(fw_mgmt->parent, |
129 | "fw-version: truncated firmware tag: %s\n" , |
130 | fw_info->firmware_tag); |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt, |
136 | u8 load_method, const char *tag) |
137 | { |
138 | struct gb_fw_mgmt_load_and_validate_fw_request request; |
139 | int ret; |
140 | |
141 | if (load_method != GB_FW_LOAD_METHOD_UNIPRO && |
142 | load_method != GB_FW_LOAD_METHOD_INTERNAL) { |
143 | dev_err(fw_mgmt->parent, |
144 | "invalid load-method (%d)\n" , load_method); |
145 | return -EINVAL; |
146 | } |
147 | |
148 | request.load_method = load_method; |
149 | |
150 | ret = strscpy_pad(request.firmware_tag, tag); |
151 | if (ret == -E2BIG) { |
152 | dev_err(fw_mgmt->parent, |
153 | "load-and-validate: truncated firmware tag: %s\n" , |
154 | request.firmware_tag); |
155 | return -EINVAL; |
156 | } |
157 | |
158 | /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */ |
159 | ret = ida_alloc_range(&fw_mgmt->id_map, min: 1, max: 255, GFP_KERNEL); |
160 | if (ret < 0) { |
161 | dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n" , |
162 | ret); |
163 | return ret; |
164 | } |
165 | |
166 | fw_mgmt->intf_fw_request_id = ret; |
167 | fw_mgmt->intf_fw_loaded = false; |
168 | request.request_id = ret; |
169 | |
170 | ret = gb_operation_sync(connection: fw_mgmt->connection, |
171 | GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, request: &request, |
172 | request_size: sizeof(request), NULL, response_size: 0); |
173 | if (ret) { |
174 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->intf_fw_request_id); |
175 | fw_mgmt->intf_fw_request_id = 0; |
176 | dev_err(fw_mgmt->parent, |
177 | "load and validate firmware request failed (%d)\n" , |
178 | ret); |
179 | return ret; |
180 | } |
181 | |
182 | return 0; |
183 | } |
184 | |
185 | static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op) |
186 | { |
187 | struct gb_connection *connection = op->connection; |
188 | struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection); |
189 | struct gb_fw_mgmt_loaded_fw_request *request; |
190 | |
191 | /* No pending load and validate request ? */ |
192 | if (!fw_mgmt->intf_fw_request_id) { |
193 | dev_err(fw_mgmt->parent, |
194 | "unexpected firmware loaded request received\n" ); |
195 | return -ENODEV; |
196 | } |
197 | |
198 | if (op->request->payload_size != sizeof(*request)) { |
199 | dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n" , |
200 | op->request->payload_size, sizeof(*request)); |
201 | return -EINVAL; |
202 | } |
203 | |
204 | request = op->request->payload; |
205 | |
206 | /* Invalid request-id ? */ |
207 | if (request->request_id != fw_mgmt->intf_fw_request_id) { |
208 | dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n" , |
209 | fw_mgmt->intf_fw_request_id, request->request_id); |
210 | return -ENODEV; |
211 | } |
212 | |
213 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->intf_fw_request_id); |
214 | fw_mgmt->intf_fw_request_id = 0; |
215 | fw_mgmt->intf_fw_status = request->status; |
216 | fw_mgmt->intf_fw_major = le16_to_cpu(request->major); |
217 | fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor); |
218 | |
219 | if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED) |
220 | dev_err(fw_mgmt->parent, |
221 | "failed to load interface firmware, status:%02x\n" , |
222 | fw_mgmt->intf_fw_status); |
223 | else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED) |
224 | dev_err(fw_mgmt->parent, |
225 | "failed to validate interface firmware, status:%02x\n" , |
226 | fw_mgmt->intf_fw_status); |
227 | else |
228 | fw_mgmt->intf_fw_loaded = true; |
229 | |
230 | complete(&fw_mgmt->completion); |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt, |
236 | struct fw_mgmt_ioc_get_backend_version *fw_info) |
237 | { |
238 | struct gb_connection *connection = fw_mgmt->connection; |
239 | struct gb_fw_mgmt_backend_fw_version_request request; |
240 | struct gb_fw_mgmt_backend_fw_version_response response; |
241 | int ret; |
242 | |
243 | ret = strscpy_pad(request.firmware_tag, fw_info->firmware_tag); |
244 | if (ret == -E2BIG) { |
245 | dev_err(fw_mgmt->parent, |
246 | "backend-fw-version: truncated firmware tag: %s\n" , |
247 | request.firmware_tag); |
248 | return -EINVAL; |
249 | } |
250 | |
251 | ret = gb_operation_sync(connection, |
252 | GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, request: &request, |
253 | request_size: sizeof(request), response: &response, response_size: sizeof(response)); |
254 | if (ret) { |
255 | dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n" , |
256 | fw_info->firmware_tag, ret); |
257 | return ret; |
258 | } |
259 | |
260 | fw_info->status = response.status; |
261 | |
262 | /* Reset version as that should be non-zero only for success case */ |
263 | fw_info->major = 0; |
264 | fw_info->minor = 0; |
265 | |
266 | switch (fw_info->status) { |
267 | case GB_FW_BACKEND_VERSION_STATUS_SUCCESS: |
268 | fw_info->major = le16_to_cpu(response.major); |
269 | fw_info->minor = le16_to_cpu(response.minor); |
270 | break; |
271 | case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE: |
272 | case GB_FW_BACKEND_VERSION_STATUS_RETRY: |
273 | break; |
274 | case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED: |
275 | dev_err(fw_mgmt->parent, |
276 | "Firmware with tag %s is not supported by Interface\n" , |
277 | fw_info->firmware_tag); |
278 | break; |
279 | default: |
280 | dev_err(fw_mgmt->parent, "Invalid status received: %u\n" , |
281 | fw_info->status); |
282 | } |
283 | |
284 | return 0; |
285 | } |
286 | |
287 | static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt, |
288 | char *tag) |
289 | { |
290 | struct gb_fw_mgmt_backend_fw_update_request request; |
291 | int ret; |
292 | |
293 | ret = strscpy_pad(request.firmware_tag, tag); |
294 | if (ret == -E2BIG) { |
295 | dev_err(fw_mgmt->parent, |
296 | "backend-fw-update: truncated firmware tag: %s\n" , |
297 | request.firmware_tag); |
298 | return -EINVAL; |
299 | } |
300 | |
301 | /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */ |
302 | ret = ida_alloc_range(&fw_mgmt->id_map, min: 1, max: 255, GFP_KERNEL); |
303 | if (ret < 0) { |
304 | dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n" , |
305 | ret); |
306 | return ret; |
307 | } |
308 | |
309 | fw_mgmt->backend_fw_request_id = ret; |
310 | request.request_id = ret; |
311 | |
312 | ret = gb_operation_sync(connection: fw_mgmt->connection, |
313 | GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, request: &request, |
314 | request_size: sizeof(request), NULL, response_size: 0); |
315 | if (ret) { |
316 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->backend_fw_request_id); |
317 | fw_mgmt->backend_fw_request_id = 0; |
318 | dev_err(fw_mgmt->parent, |
319 | "backend %s firmware update request failed (%d)\n" , tag, |
320 | ret); |
321 | return ret; |
322 | } |
323 | |
324 | return 0; |
325 | } |
326 | |
327 | static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op) |
328 | { |
329 | struct gb_connection *connection = op->connection; |
330 | struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection); |
331 | struct gb_fw_mgmt_backend_fw_updated_request *request; |
332 | |
333 | /* No pending load and validate request ? */ |
334 | if (!fw_mgmt->backend_fw_request_id) { |
335 | dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n" ); |
336 | return -ENODEV; |
337 | } |
338 | |
339 | if (op->request->payload_size != sizeof(*request)) { |
340 | dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n" , |
341 | op->request->payload_size, sizeof(*request)); |
342 | return -EINVAL; |
343 | } |
344 | |
345 | request = op->request->payload; |
346 | |
347 | /* Invalid request-id ? */ |
348 | if (request->request_id != fw_mgmt->backend_fw_request_id) { |
349 | dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n" , |
350 | fw_mgmt->backend_fw_request_id, request->request_id); |
351 | return -ENODEV; |
352 | } |
353 | |
354 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->backend_fw_request_id); |
355 | fw_mgmt->backend_fw_request_id = 0; |
356 | fw_mgmt->backend_fw_status = request->status; |
357 | |
358 | if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) && |
359 | (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY)) |
360 | dev_err(fw_mgmt->parent, |
361 | "failed to load backend firmware: %02x\n" , |
362 | fw_mgmt->backend_fw_status); |
363 | |
364 | complete(&fw_mgmt->completion); |
365 | |
366 | return 0; |
367 | } |
368 | |
369 | /* Char device fops */ |
370 | |
371 | static int fw_mgmt_open(struct inode *inode, struct file *file) |
372 | { |
373 | struct fw_mgmt *fw_mgmt = get_fw_mgmt(cdev: inode->i_cdev); |
374 | |
375 | /* fw_mgmt structure can't get freed until file descriptor is closed */ |
376 | if (fw_mgmt) { |
377 | file->private_data = fw_mgmt; |
378 | return 0; |
379 | } |
380 | |
381 | return -ENODEV; |
382 | } |
383 | |
384 | static int fw_mgmt_release(struct inode *inode, struct file *file) |
385 | { |
386 | struct fw_mgmt *fw_mgmt = file->private_data; |
387 | |
388 | put_fw_mgmt(fw_mgmt); |
389 | return 0; |
390 | } |
391 | |
392 | static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd, |
393 | void __user *buf) |
394 | { |
395 | struct fw_mgmt_ioc_get_intf_version intf_fw_info; |
396 | struct fw_mgmt_ioc_get_backend_version backend_fw_info; |
397 | struct fw_mgmt_ioc_intf_load_and_validate intf_load; |
398 | struct fw_mgmt_ioc_backend_fw_update backend_update; |
399 | unsigned int timeout; |
400 | int ret; |
401 | |
402 | /* Reject any operations after mode-switch has started */ |
403 | if (fw_mgmt->mode_switch_started) |
404 | return -EBUSY; |
405 | |
406 | switch (cmd) { |
407 | case FW_MGMT_IOC_GET_INTF_FW: |
408 | ret = fw_mgmt_interface_fw_version_operation(fw_mgmt, |
409 | fw_info: &intf_fw_info); |
410 | if (ret) |
411 | return ret; |
412 | |
413 | if (copy_to_user(to: buf, from: &intf_fw_info, n: sizeof(intf_fw_info))) |
414 | return -EFAULT; |
415 | |
416 | return 0; |
417 | case FW_MGMT_IOC_GET_BACKEND_FW: |
418 | if (copy_from_user(to: &backend_fw_info, from: buf, |
419 | n: sizeof(backend_fw_info))) |
420 | return -EFAULT; |
421 | |
422 | ret = fw_mgmt_backend_fw_version_operation(fw_mgmt, |
423 | fw_info: &backend_fw_info); |
424 | if (ret) |
425 | return ret; |
426 | |
427 | if (copy_to_user(to: buf, from: &backend_fw_info, |
428 | n: sizeof(backend_fw_info))) |
429 | return -EFAULT; |
430 | |
431 | return 0; |
432 | case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE: |
433 | if (copy_from_user(to: &intf_load, from: buf, n: sizeof(intf_load))) |
434 | return -EFAULT; |
435 | |
436 | ret = fw_mgmt_load_and_validate_operation(fw_mgmt, |
437 | load_method: intf_load.load_method, tag: intf_load.firmware_tag); |
438 | if (ret) |
439 | return ret; |
440 | |
441 | if (!wait_for_completion_timeout(x: &fw_mgmt->completion, |
442 | timeout: fw_mgmt->timeout_jiffies)) { |
443 | dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n" ); |
444 | return -ETIMEDOUT; |
445 | } |
446 | |
447 | intf_load.status = fw_mgmt->intf_fw_status; |
448 | intf_load.major = fw_mgmt->intf_fw_major; |
449 | intf_load.minor = fw_mgmt->intf_fw_minor; |
450 | |
451 | if (copy_to_user(to: buf, from: &intf_load, n: sizeof(intf_load))) |
452 | return -EFAULT; |
453 | |
454 | return 0; |
455 | case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE: |
456 | if (copy_from_user(to: &backend_update, from: buf, |
457 | n: sizeof(backend_update))) |
458 | return -EFAULT; |
459 | |
460 | ret = fw_mgmt_backend_fw_update_operation(fw_mgmt, |
461 | tag: backend_update.firmware_tag); |
462 | if (ret) |
463 | return ret; |
464 | |
465 | if (!wait_for_completion_timeout(x: &fw_mgmt->completion, |
466 | timeout: fw_mgmt->timeout_jiffies)) { |
467 | dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n" ); |
468 | return -ETIMEDOUT; |
469 | } |
470 | |
471 | backend_update.status = fw_mgmt->backend_fw_status; |
472 | |
473 | if (copy_to_user(to: buf, from: &backend_update, n: sizeof(backend_update))) |
474 | return -EFAULT; |
475 | |
476 | return 0; |
477 | case FW_MGMT_IOC_SET_TIMEOUT_MS: |
478 | if (get_user(timeout, (unsigned int __user *)buf)) |
479 | return -EFAULT; |
480 | |
481 | if (!timeout) { |
482 | dev_err(fw_mgmt->parent, "timeout can't be zero\n" ); |
483 | return -EINVAL; |
484 | } |
485 | |
486 | fw_mgmt->timeout_jiffies = msecs_to_jiffies(m: timeout); |
487 | |
488 | return 0; |
489 | case FW_MGMT_IOC_MODE_SWITCH: |
490 | if (!fw_mgmt->intf_fw_loaded) { |
491 | dev_err(fw_mgmt->parent, |
492 | "Firmware not loaded for mode-switch\n" ); |
493 | return -EPERM; |
494 | } |
495 | |
496 | /* |
497 | * Disallow new ioctls as the fw-core bundle driver is going to |
498 | * get disconnected soon and the character device will get |
499 | * removed. |
500 | */ |
501 | fw_mgmt->mode_switch_started = true; |
502 | |
503 | ret = gb_interface_request_mode_switch(intf: fw_mgmt->connection->intf); |
504 | if (ret) { |
505 | dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n" , |
506 | ret); |
507 | fw_mgmt->mode_switch_started = false; |
508 | return ret; |
509 | } |
510 | |
511 | return 0; |
512 | default: |
513 | return -ENOTTY; |
514 | } |
515 | } |
516 | |
517 | static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd, |
518 | unsigned long arg) |
519 | { |
520 | struct fw_mgmt *fw_mgmt = file->private_data; |
521 | struct gb_bundle *bundle = fw_mgmt->connection->bundle; |
522 | int ret = -ENODEV; |
523 | |
524 | /* |
525 | * Serialize ioctls. |
526 | * |
527 | * We don't want the user to do few operations in parallel. For example, |
528 | * updating Interface firmware in parallel for the same Interface. There |
529 | * is no need to do things in parallel for speed and we can avoid having |
530 | * complicated code for now. |
531 | * |
532 | * This is also used to protect ->disabled, which is used to check if |
533 | * the connection is getting disconnected, so that we don't start any |
534 | * new operations. |
535 | */ |
536 | mutex_lock(&fw_mgmt->mutex); |
537 | if (!fw_mgmt->disabled) { |
538 | ret = gb_pm_runtime_get_sync(bundle); |
539 | if (!ret) { |
540 | ret = fw_mgmt_ioctl(fw_mgmt, cmd, buf: (void __user *)arg); |
541 | gb_pm_runtime_put_autosuspend(bundle); |
542 | } |
543 | } |
544 | mutex_unlock(lock: &fw_mgmt->mutex); |
545 | |
546 | return ret; |
547 | } |
548 | |
549 | static const struct file_operations fw_mgmt_fops = { |
550 | .owner = THIS_MODULE, |
551 | .open = fw_mgmt_open, |
552 | .release = fw_mgmt_release, |
553 | .unlocked_ioctl = fw_mgmt_ioctl_unlocked, |
554 | }; |
555 | |
556 | int gb_fw_mgmt_request_handler(struct gb_operation *op) |
557 | { |
558 | u8 type = op->type; |
559 | |
560 | switch (type) { |
561 | case GB_FW_MGMT_TYPE_LOADED_FW: |
562 | return fw_mgmt_interface_fw_loaded_operation(op); |
563 | case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED: |
564 | return fw_mgmt_backend_fw_updated_operation(op); |
565 | default: |
566 | dev_err(&op->connection->bundle->dev, |
567 | "unsupported request: %u\n" , type); |
568 | return -EINVAL; |
569 | } |
570 | } |
571 | |
572 | int gb_fw_mgmt_connection_init(struct gb_connection *connection) |
573 | { |
574 | struct fw_mgmt *fw_mgmt; |
575 | int ret, minor; |
576 | |
577 | if (!connection) |
578 | return 0; |
579 | |
580 | fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL); |
581 | if (!fw_mgmt) |
582 | return -ENOMEM; |
583 | |
584 | fw_mgmt->parent = &connection->bundle->dev; |
585 | fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS); |
586 | fw_mgmt->connection = connection; |
587 | |
588 | gb_connection_set_data(connection, data: fw_mgmt); |
589 | init_completion(x: &fw_mgmt->completion); |
590 | ida_init(ida: &fw_mgmt->id_map); |
591 | mutex_init(&fw_mgmt->mutex); |
592 | kref_init(kref: &fw_mgmt->kref); |
593 | |
594 | mutex_lock(&list_mutex); |
595 | list_add(new: &fw_mgmt->node, head: &fw_mgmt_list); |
596 | mutex_unlock(lock: &list_mutex); |
597 | |
598 | ret = gb_connection_enable(connection); |
599 | if (ret) |
600 | goto err_list_del; |
601 | |
602 | minor = ida_alloc_max(ida: &fw_mgmt_minors_map, NUM_MINORS - 1, GFP_KERNEL); |
603 | if (minor < 0) { |
604 | ret = minor; |
605 | goto err_connection_disable; |
606 | } |
607 | |
608 | /* Add a char device to allow userspace to interact with fw-mgmt */ |
609 | fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor); |
610 | cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops); |
611 | |
612 | ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1); |
613 | if (ret) |
614 | goto err_remove_ida; |
615 | |
616 | /* Add a soft link to the previously added char-dev within the bundle */ |
617 | fw_mgmt->class_device = device_create(cls: &fw_mgmt_class, parent: fw_mgmt->parent, |
618 | devt: fw_mgmt->dev_num, NULL, |
619 | fmt: "gb-fw-mgmt-%d" , minor); |
620 | if (IS_ERR(ptr: fw_mgmt->class_device)) { |
621 | ret = PTR_ERR(ptr: fw_mgmt->class_device); |
622 | goto err_del_cdev; |
623 | } |
624 | |
625 | return 0; |
626 | |
627 | err_del_cdev: |
628 | cdev_del(&fw_mgmt->cdev); |
629 | err_remove_ida: |
630 | ida_free(&fw_mgmt_minors_map, id: minor); |
631 | err_connection_disable: |
632 | gb_connection_disable(connection); |
633 | err_list_del: |
634 | mutex_lock(&list_mutex); |
635 | list_del(entry: &fw_mgmt->node); |
636 | mutex_unlock(lock: &list_mutex); |
637 | |
638 | put_fw_mgmt(fw_mgmt); |
639 | |
640 | return ret; |
641 | } |
642 | |
643 | void gb_fw_mgmt_connection_exit(struct gb_connection *connection) |
644 | { |
645 | struct fw_mgmt *fw_mgmt; |
646 | |
647 | if (!connection) |
648 | return; |
649 | |
650 | fw_mgmt = gb_connection_get_data(connection); |
651 | |
652 | device_destroy(cls: &fw_mgmt_class, devt: fw_mgmt->dev_num); |
653 | cdev_del(&fw_mgmt->cdev); |
654 | ida_free(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num)); |
655 | |
656 | /* |
657 | * Disallow any new ioctl operations on the char device and wait for |
658 | * existing ones to finish. |
659 | */ |
660 | mutex_lock(&fw_mgmt->mutex); |
661 | fw_mgmt->disabled = true; |
662 | mutex_unlock(lock: &fw_mgmt->mutex); |
663 | |
664 | /* All pending greybus operations should have finished by now */ |
665 | gb_connection_disable(connection: fw_mgmt->connection); |
666 | |
667 | /* Disallow new users to get access to the fw_mgmt structure */ |
668 | mutex_lock(&list_mutex); |
669 | list_del(entry: &fw_mgmt->node); |
670 | mutex_unlock(lock: &list_mutex); |
671 | |
672 | /* |
673 | * All current users of fw_mgmt would have taken a reference to it by |
674 | * now, we can drop our reference and wait the last user will get |
675 | * fw_mgmt freed. |
676 | */ |
677 | put_fw_mgmt(fw_mgmt); |
678 | } |
679 | |
680 | int fw_mgmt_init(void) |
681 | { |
682 | int ret; |
683 | |
684 | ret = class_register(class: &fw_mgmt_class); |
685 | if (ret) |
686 | return ret; |
687 | |
688 | ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS, |
689 | "gb_fw_mgmt" ); |
690 | if (ret) |
691 | goto err_remove_class; |
692 | |
693 | return 0; |
694 | |
695 | err_remove_class: |
696 | class_unregister(class: &fw_mgmt_class); |
697 | return ret; |
698 | } |
699 | |
700 | void fw_mgmt_exit(void) |
701 | { |
702 | unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS); |
703 | class_unregister(class: &fw_mgmt_class); |
704 | ida_destroy(ida: &fw_mgmt_minors_map); |
705 | } |
706 | |