1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
4 * misc device. Intended for debugging and development.
5 *
6 * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
7 */
8
9#include <linux/fs.h>
10#include <linux/ioctl.h>
11#include <linux/kernel.h>
12#include <linux/kfifo.h>
13#include <linux/kref.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/poll.h>
18#include <linux/rwsem.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/vmalloc.h>
22
23#include <linux/surface_aggregator/cdev.h>
24#include <linux/surface_aggregator/controller.h>
25#include <linux/surface_aggregator/serial_hub.h>
26
27#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
28
29
30/* -- Main structures. ------------------------------------------------------ */
31
32enum ssam_cdev_device_state {
33 SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0),
34};
35
36struct ssam_cdev {
37 struct kref kref;
38 struct rw_semaphore lock;
39
40 struct device *dev;
41 struct ssam_controller *ctrl;
42 struct miscdevice mdev;
43 unsigned long flags;
44
45 struct rw_semaphore client_lock; /* Guards client list. */
46 struct list_head client_list;
47};
48
49struct ssam_cdev_client;
50
51struct ssam_cdev_notifier {
52 struct ssam_cdev_client *client;
53 struct ssam_event_notifier nf;
54};
55
56struct ssam_cdev_client {
57 struct ssam_cdev *cdev;
58 struct list_head node;
59
60 struct mutex notifier_lock; /* Guards notifier access for registration */
61 struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS];
62
63 struct mutex read_lock; /* Guards FIFO buffer read access */
64 struct mutex write_lock; /* Guards FIFO buffer write access */
65 DECLARE_KFIFO(buffer, u8, 4096);
66
67 wait_queue_head_t waitq;
68 struct fasync_struct *fasync;
69};
70
71static void __ssam_cdev_release(struct kref *kref)
72{
73 kfree(container_of(kref, struct ssam_cdev, kref));
74}
75
76static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
77{
78 if (cdev)
79 kref_get(kref: &cdev->kref);
80
81 return cdev;
82}
83
84static void ssam_cdev_put(struct ssam_cdev *cdev)
85{
86 if (cdev)
87 kref_put(kref: &cdev->kref, release: __ssam_cdev_release);
88}
89
90
91/* -- Notifier handling. ---------------------------------------------------- */
92
93static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
94{
95 struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf);
96 struct ssam_cdev_client *client = cdev_nf->client;
97 struct ssam_cdev_event event;
98 size_t n = struct_size(&event, data, in->length);
99
100 /* Translate event. */
101 event.target_category = in->target_category;
102 event.target_id = in->target_id;
103 event.command_id = in->command_id;
104 event.instance_id = in->instance_id;
105 event.length = in->length;
106
107 mutex_lock(&client->write_lock);
108
109 /* Make sure we have enough space. */
110 if (kfifo_avail(&client->buffer) < n) {
111 dev_warn(client->cdev->dev,
112 "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
113 in->target_category, in->target_id, in->command_id, in->instance_id);
114 mutex_unlock(lock: &client->write_lock);
115 return 0;
116 }
117
118 /* Copy event header and payload. */
119 kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0));
120 kfifo_in(&client->buffer, &in->data[0], in->length);
121
122 mutex_unlock(lock: &client->write_lock);
123
124 /* Notify waiting readers. */
125 kill_fasync(&client->fasync, SIGIO, POLL_IN);
126 wake_up_interruptible(&client->waitq);
127
128 /*
129 * Don't mark events as handled, this is the job of a proper driver and
130 * not the debugging interface.
131 */
132 return 0;
133}
134
135static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority)
136{
137 const u16 rqid = ssh_tc_to_rqid(tc);
138 const u16 event = ssh_rqid_to_event(rqid);
139 struct ssam_cdev_notifier *nf;
140 int status;
141
142 lockdep_assert_held_read(&client->cdev->lock);
143
144 /* Validate notifier target category. */
145 if (!ssh_rqid_is_event(rqid))
146 return -EINVAL;
147
148 mutex_lock(&client->notifier_lock);
149
150 /* Check if the notifier has already been registered. */
151 if (client->notifier[event]) {
152 mutex_unlock(lock: &client->notifier_lock);
153 return -EEXIST;
154 }
155
156 /* Allocate new notifier. */
157 nf = kzalloc(size: sizeof(*nf), GFP_KERNEL);
158 if (!nf) {
159 mutex_unlock(lock: &client->notifier_lock);
160 return -ENOMEM;
161 }
162
163 /*
164 * Create a dummy notifier with the minimal required fields for
165 * observer registration. Note that we can skip fully specifying event
166 * and registry here as we do not need any matching and use silent
167 * registration, which does not enable the corresponding event.
168 */
169 nf->client = client;
170 nf->nf.base.fn = ssam_cdev_notifier;
171 nf->nf.base.priority = priority;
172 nf->nf.event.id.target_category = tc;
173 nf->nf.event.mask = 0; /* Do not do any matching. */
174 nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER;
175
176 /* Register notifier. */
177 status = ssam_notifier_register(ctrl: client->cdev->ctrl, n: &nf->nf);
178 if (status)
179 kfree(objp: nf);
180 else
181 client->notifier[event] = nf;
182
183 mutex_unlock(lock: &client->notifier_lock);
184 return status;
185}
186
187static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc)
188{
189 const u16 rqid = ssh_tc_to_rqid(tc);
190 const u16 event = ssh_rqid_to_event(rqid);
191 int status;
192
193 lockdep_assert_held_read(&client->cdev->lock);
194
195 /* Validate notifier target category. */
196 if (!ssh_rqid_is_event(rqid))
197 return -EINVAL;
198
199 mutex_lock(&client->notifier_lock);
200
201 /* Check if the notifier is currently registered. */
202 if (!client->notifier[event]) {
203 mutex_unlock(lock: &client->notifier_lock);
204 return -ENOENT;
205 }
206
207 /* Unregister and free notifier. */
208 status = ssam_notifier_unregister(ctrl: client->cdev->ctrl, n: &client->notifier[event]->nf);
209 kfree(objp: client->notifier[event]);
210 client->notifier[event] = NULL;
211
212 mutex_unlock(lock: &client->notifier_lock);
213 return status;
214}
215
216static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client)
217{
218 int i;
219
220 down_read(sem: &client->cdev->lock);
221
222 /*
223 * This function may be used during shutdown, thus we need to test for
224 * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
225 */
226 if (client->cdev->ctrl) {
227 for (i = 0; i < SSH_NUM_EVENTS; i++)
228 ssam_cdev_notifier_unregister(client, tc: i + 1);
229
230 } else {
231 int count = 0;
232
233 /*
234 * Device has been shut down. Any notifier remaining is a bug,
235 * so warn about that as this would otherwise hardly be
236 * noticeable. Nevertheless, free them as well.
237 */
238 mutex_lock(&client->notifier_lock);
239 for (i = 0; i < SSH_NUM_EVENTS; i++) {
240 count += !!(client->notifier[i]);
241 kfree(objp: client->notifier[i]);
242 client->notifier[i] = NULL;
243 }
244 mutex_unlock(lock: &client->notifier_lock);
245
246 WARN_ON(count > 0);
247 }
248
249 up_read(sem: &client->cdev->lock);
250}
251
252
253/* -- IOCTL functions. ------------------------------------------------------ */
254
255static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r)
256{
257 struct ssam_cdev_request rqst;
258 struct ssam_request spec = {};
259 struct ssam_response rsp = {};
260 const void __user *plddata;
261 void __user *rspdata;
262 int status = 0, ret = 0, tmp;
263
264 lockdep_assert_held_read(&client->cdev->lock);
265
266 ret = copy_struct_from_user(dst: &rqst, ksize: sizeof(rqst), src: r, usize: sizeof(*r));
267 if (ret)
268 goto out;
269
270 plddata = u64_to_user_ptr(rqst.payload.data);
271 rspdata = u64_to_user_ptr(rqst.response.data);
272
273 /* Setup basic request fields. */
274 spec.target_category = rqst.target_category;
275 spec.target_id = rqst.target_id;
276 spec.command_id = rqst.command_id;
277 spec.instance_id = rqst.instance_id;
278 spec.flags = 0;
279 spec.length = rqst.payload.length;
280 spec.payload = NULL;
281
282 if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
283 spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
284
285 if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
286 spec.flags |= SSAM_REQUEST_UNSEQUENCED;
287
288 rsp.capacity = rqst.response.length;
289 rsp.length = 0;
290 rsp.pointer = NULL;
291
292 /* Get request payload from user-space. */
293 if (spec.length) {
294 if (!plddata) {
295 ret = -EINVAL;
296 goto out;
297 }
298
299 /*
300 * Note: spec.length is limited to U16_MAX bytes via struct
301 * ssam_cdev_request. This is slightly larger than the
302 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
303 * underlying protocol (note that nothing remotely this size
304 * should ever be allocated in any normal case). This size is
305 * validated later in ssam_request_do_sync(), for allocation
306 * the bound imposed by u16 should be enough.
307 */
308 spec.payload = kzalloc(size: spec.length, GFP_KERNEL);
309 if (!spec.payload) {
310 ret = -ENOMEM;
311 goto out;
312 }
313
314 if (copy_from_user(to: (void *)spec.payload, from: plddata, n: spec.length)) {
315 ret = -EFAULT;
316 goto out;
317 }
318 }
319
320 /* Allocate response buffer. */
321 if (rsp.capacity) {
322 if (!rspdata) {
323 ret = -EINVAL;
324 goto out;
325 }
326
327 /*
328 * Note: rsp.capacity is limited to U16_MAX bytes via struct
329 * ssam_cdev_request. This is slightly larger than the
330 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
331 * underlying protocol (note that nothing remotely this size
332 * should ever be allocated in any normal case). In later use,
333 * this capacity does not have to be strictly bounded, as it
334 * is only used as an output buffer to be written to. For
335 * allocation the bound imposed by u16 should be enough.
336 */
337 rsp.pointer = kzalloc(size: rsp.capacity, GFP_KERNEL);
338 if (!rsp.pointer) {
339 ret = -ENOMEM;
340 goto out;
341 }
342 }
343
344 /* Perform request. */
345 status = ssam_request_do_sync(ctrl: client->cdev->ctrl, spec: &spec, rsp: &rsp);
346 if (status)
347 goto out;
348
349 /* Copy response to user-space. */
350 if (rsp.length && copy_to_user(to: rspdata, from: rsp.pointer, n: rsp.length))
351 ret = -EFAULT;
352
353out:
354 /* Always try to set response-length and status. */
355 tmp = put_user(rsp.length, &r->response.length);
356 if (tmp)
357 ret = tmp;
358
359 tmp = put_user(status, &r->status);
360 if (tmp)
361 ret = tmp;
362
363 /* Cleanup. */
364 kfree(objp: spec.payload);
365 kfree(objp: rsp.pointer);
366
367 return ret;
368}
369
370static long ssam_cdev_notif_register(struct ssam_cdev_client *client,
371 const struct ssam_cdev_notifier_desc __user *d)
372{
373 struct ssam_cdev_notifier_desc desc;
374 long ret;
375
376 lockdep_assert_held_read(&client->cdev->lock);
377
378 ret = copy_struct_from_user(dst: &desc, ksize: sizeof(desc), src: d, usize: sizeof(*d));
379 if (ret)
380 return ret;
381
382 return ssam_cdev_notifier_register(client, tc: desc.target_category, priority: desc.priority);
383}
384
385static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client,
386 const struct ssam_cdev_notifier_desc __user *d)
387{
388 struct ssam_cdev_notifier_desc desc;
389 long ret;
390
391 lockdep_assert_held_read(&client->cdev->lock);
392
393 ret = copy_struct_from_user(dst: &desc, ksize: sizeof(desc), src: d, usize: sizeof(*d));
394 if (ret)
395 return ret;
396
397 return ssam_cdev_notifier_unregister(client, tc: desc.target_category);
398}
399
400static long ssam_cdev_event_enable(struct ssam_cdev_client *client,
401 const struct ssam_cdev_event_desc __user *d)
402{
403 struct ssam_cdev_event_desc desc;
404 struct ssam_event_registry reg;
405 struct ssam_event_id id;
406 long ret;
407
408 lockdep_assert_held_read(&client->cdev->lock);
409
410 /* Read descriptor from user-space. */
411 ret = copy_struct_from_user(dst: &desc, ksize: sizeof(desc), src: d, usize: sizeof(*d));
412 if (ret)
413 return ret;
414
415 /* Translate descriptor. */
416 reg.target_category = desc.reg.target_category;
417 reg.target_id = desc.reg.target_id;
418 reg.cid_enable = desc.reg.cid_enable;
419 reg.cid_disable = desc.reg.cid_disable;
420
421 id.target_category = desc.id.target_category;
422 id.instance = desc.id.instance;
423
424 /* Disable event. */
425 return ssam_controller_event_enable(ctrl: client->cdev->ctrl, reg, id, flags: desc.flags);
426}
427
428static long ssam_cdev_event_disable(struct ssam_cdev_client *client,
429 const struct ssam_cdev_event_desc __user *d)
430{
431 struct ssam_cdev_event_desc desc;
432 struct ssam_event_registry reg;
433 struct ssam_event_id id;
434 long ret;
435
436 lockdep_assert_held_read(&client->cdev->lock);
437
438 /* Read descriptor from user-space. */
439 ret = copy_struct_from_user(dst: &desc, ksize: sizeof(desc), src: d, usize: sizeof(*d));
440 if (ret)
441 return ret;
442
443 /* Translate descriptor. */
444 reg.target_category = desc.reg.target_category;
445 reg.target_id = desc.reg.target_id;
446 reg.cid_enable = desc.reg.cid_enable;
447 reg.cid_disable = desc.reg.cid_disable;
448
449 id.target_category = desc.id.target_category;
450 id.instance = desc.id.instance;
451
452 /* Disable event. */
453 return ssam_controller_event_disable(ctrl: client->cdev->ctrl, reg, id, flags: desc.flags);
454}
455
456
457/* -- File operations. ------------------------------------------------------ */
458
459static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
460{
461 struct miscdevice *mdev = filp->private_data;
462 struct ssam_cdev_client *client;
463 struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
464
465 /* Initialize client */
466 client = vzalloc(size: sizeof(*client));
467 if (!client)
468 return -ENOMEM;
469
470 client->cdev = ssam_cdev_get(cdev);
471
472 INIT_LIST_HEAD(list: &client->node);
473
474 mutex_init(&client->notifier_lock);
475
476 mutex_init(&client->read_lock);
477 mutex_init(&client->write_lock);
478 INIT_KFIFO(client->buffer);
479 init_waitqueue_head(&client->waitq);
480
481 filp->private_data = client;
482
483 /* Attach client. */
484 down_write(sem: &cdev->client_lock);
485
486 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
487 up_write(sem: &cdev->client_lock);
488 mutex_destroy(lock: &client->write_lock);
489 mutex_destroy(lock: &client->read_lock);
490 mutex_destroy(lock: &client->notifier_lock);
491 ssam_cdev_put(cdev: client->cdev);
492 vfree(addr: client);
493 return -ENODEV;
494 }
495 list_add_tail(new: &client->node, head: &cdev->client_list);
496
497 up_write(sem: &cdev->client_lock);
498
499 stream_open(inode, filp);
500 return 0;
501}
502
503static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
504{
505 struct ssam_cdev_client *client = filp->private_data;
506
507 /* Force-unregister all remaining notifiers of this client. */
508 ssam_cdev_notifier_unregister_all(client);
509
510 /* Detach client. */
511 down_write(sem: &client->cdev->client_lock);
512 list_del(entry: &client->node);
513 up_write(sem: &client->cdev->client_lock);
514
515 /* Free client. */
516 mutex_destroy(lock: &client->write_lock);
517 mutex_destroy(lock: &client->read_lock);
518
519 mutex_destroy(lock: &client->notifier_lock);
520
521 ssam_cdev_put(cdev: client->cdev);
522 vfree(addr: client);
523
524 return 0;
525}
526
527static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd,
528 unsigned long arg)
529{
530 lockdep_assert_held_read(&client->cdev->lock);
531
532 switch (cmd) {
533 case SSAM_CDEV_REQUEST:
534 return ssam_cdev_request(client, r: (struct ssam_cdev_request __user *)arg);
535
536 case SSAM_CDEV_NOTIF_REGISTER:
537 return ssam_cdev_notif_register(client,
538 d: (struct ssam_cdev_notifier_desc __user *)arg);
539
540 case SSAM_CDEV_NOTIF_UNREGISTER:
541 return ssam_cdev_notif_unregister(client,
542 d: (struct ssam_cdev_notifier_desc __user *)arg);
543
544 case SSAM_CDEV_EVENT_ENABLE:
545 return ssam_cdev_event_enable(client, d: (struct ssam_cdev_event_desc __user *)arg);
546
547 case SSAM_CDEV_EVENT_DISABLE:
548 return ssam_cdev_event_disable(client, d: (struct ssam_cdev_event_desc __user *)arg);
549
550 default:
551 return -ENOTTY;
552 }
553}
554
555static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
556{
557 struct ssam_cdev_client *client = file->private_data;
558 long status;
559
560 /* Ensure that controller is valid for as long as we need it. */
561 if (down_read_killable(sem: &client->cdev->lock))
562 return -ERESTARTSYS;
563
564 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) {
565 up_read(sem: &client->cdev->lock);
566 return -ENODEV;
567 }
568
569 status = __ssam_cdev_device_ioctl(client, cmd, arg);
570
571 up_read(sem: &client->cdev->lock);
572 return status;
573}
574
575static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
576{
577 struct ssam_cdev_client *client = file->private_data;
578 struct ssam_cdev *cdev = client->cdev;
579 unsigned int copied;
580 int status = 0;
581
582 if (down_read_killable(sem: &cdev->lock))
583 return -ERESTARTSYS;
584
585 /* Make sure we're not shut down. */
586 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
587 up_read(sem: &cdev->lock);
588 return -ENODEV;
589 }
590
591 do {
592 /* Check availability, wait if necessary. */
593 if (kfifo_is_empty(&client->buffer)) {
594 up_read(sem: &cdev->lock);
595
596 if (file->f_flags & O_NONBLOCK)
597 return -EAGAIN;
598
599 status = wait_event_interruptible(client->waitq,
600 !kfifo_is_empty(&client->buffer) ||
601 test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT,
602 &cdev->flags));
603 if (status < 0)
604 return status;
605
606 if (down_read_killable(sem: &cdev->lock))
607 return -ERESTARTSYS;
608
609 /* Need to check that we're not shut down again. */
610 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
611 up_read(sem: &cdev->lock);
612 return -ENODEV;
613 }
614 }
615
616 /* Try to read from FIFO. */
617 if (mutex_lock_interruptible(&client->read_lock)) {
618 up_read(sem: &cdev->lock);
619 return -ERESTARTSYS;
620 }
621
622 status = kfifo_to_user(&client->buffer, buf, count, &copied);
623 mutex_unlock(lock: &client->read_lock);
624
625 if (status < 0) {
626 up_read(sem: &cdev->lock);
627 return status;
628 }
629
630 /* We might not have gotten anything, check this here. */
631 if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
632 up_read(sem: &cdev->lock);
633 return -EAGAIN;
634 }
635 } while (copied == 0);
636
637 up_read(sem: &cdev->lock);
638 return copied;
639}
640
641static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt)
642{
643 struct ssam_cdev_client *client = file->private_data;
644 __poll_t events = 0;
645
646 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags))
647 return EPOLLHUP | EPOLLERR;
648
649 poll_wait(filp: file, wait_address: &client->waitq, p: pt);
650
651 if (!kfifo_is_empty(&client->buffer))
652 events |= EPOLLIN | EPOLLRDNORM;
653
654 return events;
655}
656
657static int ssam_cdev_fasync(int fd, struct file *file, int on)
658{
659 struct ssam_cdev_client *client = file->private_data;
660
661 return fasync_helper(fd, file, on, &client->fasync);
662}
663
664static const struct file_operations ssam_controller_fops = {
665 .owner = THIS_MODULE,
666 .open = ssam_cdev_device_open,
667 .release = ssam_cdev_device_release,
668 .read = ssam_cdev_read,
669 .poll = ssam_cdev_poll,
670 .fasync = ssam_cdev_fasync,
671 .unlocked_ioctl = ssam_cdev_device_ioctl,
672 .compat_ioctl = ssam_cdev_device_ioctl,
673 .llseek = no_llseek,
674};
675
676
677/* -- Device and driver setup ----------------------------------------------- */
678
679static int ssam_dbg_device_probe(struct platform_device *pdev)
680{
681 struct ssam_controller *ctrl;
682 struct ssam_cdev *cdev;
683 int status;
684
685 ctrl = ssam_client_bind(client: &pdev->dev);
686 if (IS_ERR(ptr: ctrl))
687 return PTR_ERR(ptr: ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ptr: ctrl);
688
689 cdev = kzalloc(size: sizeof(*cdev), GFP_KERNEL);
690 if (!cdev)
691 return -ENOMEM;
692
693 kref_init(kref: &cdev->kref);
694 init_rwsem(&cdev->lock);
695 cdev->ctrl = ctrl;
696 cdev->dev = &pdev->dev;
697
698 cdev->mdev.parent = &pdev->dev;
699 cdev->mdev.minor = MISC_DYNAMIC_MINOR;
700 cdev->mdev.name = "surface_aggregator";
701 cdev->mdev.nodename = "surface/aggregator";
702 cdev->mdev.fops = &ssam_controller_fops;
703
704 init_rwsem(&cdev->client_lock);
705 INIT_LIST_HEAD(list: &cdev->client_list);
706
707 status = misc_register(misc: &cdev->mdev);
708 if (status) {
709 kfree(objp: cdev);
710 return status;
711 }
712
713 platform_set_drvdata(pdev, data: cdev);
714 return 0;
715}
716
717static void ssam_dbg_device_remove(struct platform_device *pdev)
718{
719 struct ssam_cdev *cdev = platform_get_drvdata(pdev);
720 struct ssam_cdev_client *client;
721
722 /*
723 * Mark device as shut-down. Prevent new clients from being added and
724 * new operations from being executed.
725 */
726 set_bit(nr: SSAM_CDEV_DEVICE_SHUTDOWN_BIT, addr: &cdev->flags);
727
728 down_write(sem: &cdev->client_lock);
729
730 /* Remove all notifiers registered by us. */
731 list_for_each_entry(client, &cdev->client_list, node) {
732 ssam_cdev_notifier_unregister_all(client);
733 }
734
735 /* Wake up async clients. */
736 list_for_each_entry(client, &cdev->client_list, node) {
737 kill_fasync(&client->fasync, SIGIO, POLL_HUP);
738 }
739
740 /* Wake up blocking clients. */
741 list_for_each_entry(client, &cdev->client_list, node) {
742 wake_up_interruptible(&client->waitq);
743 }
744
745 up_write(sem: &cdev->client_lock);
746
747 /*
748 * The controller is only guaranteed to be valid for as long as the
749 * driver is bound. Remove controller so that any lingering open files
750 * cannot access it any more after we're gone.
751 */
752 down_write(sem: &cdev->lock);
753 cdev->ctrl = NULL;
754 cdev->dev = NULL;
755 up_write(sem: &cdev->lock);
756
757 misc_deregister(misc: &cdev->mdev);
758
759 ssam_cdev_put(cdev);
760}
761
762static struct platform_device *ssam_cdev_device;
763
764static struct platform_driver ssam_cdev_driver = {
765 .probe = ssam_dbg_device_probe,
766 .remove_new = ssam_dbg_device_remove,
767 .driver = {
768 .name = SSAM_CDEV_DEVICE_NAME,
769 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
770 },
771};
772
773static int __init ssam_debug_init(void)
774{
775 int status;
776
777 ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
778 PLATFORM_DEVID_NONE);
779 if (!ssam_cdev_device)
780 return -ENOMEM;
781
782 status = platform_device_add(pdev: ssam_cdev_device);
783 if (status)
784 goto err_device;
785
786 status = platform_driver_register(&ssam_cdev_driver);
787 if (status)
788 goto err_driver;
789
790 return 0;
791
792err_driver:
793 platform_device_del(pdev: ssam_cdev_device);
794err_device:
795 platform_device_put(pdev: ssam_cdev_device);
796 return status;
797}
798module_init(ssam_debug_init);
799
800static void __exit ssam_debug_exit(void)
801{
802 platform_driver_unregister(&ssam_cdev_driver);
803 platform_device_unregister(ssam_cdev_device);
804}
805module_exit(ssam_debug_exit);
806
807MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
808MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
809MODULE_LICENSE("GPL");
810

source code of linux/drivers/platform/surface/surface_aggregator_cdev.c