1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Industrial I/O event handling |
3 | * |
4 | * Copyright (c) 2008 Jonathan Cameron |
5 | * |
6 | * Based on elements of hwmon and input subsystems. |
7 | */ |
8 | |
9 | #include <linux/anon_inodes.h> |
10 | #include <linux/device.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/kfifo.h> |
14 | #include <linux/module.h> |
15 | #include <linux/poll.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/wait.h> |
20 | #include <linux/iio/iio.h> |
21 | #include <linux/iio/iio-opaque.h> |
22 | #include "iio_core.h" |
23 | #include <linux/iio/sysfs.h> |
24 | #include <linux/iio/events.h> |
25 | |
26 | /** |
27 | * struct iio_event_interface - chrdev interface for an event line |
28 | * @wait: wait queue to allow blocking reads of events |
29 | * @det_events: list of detected events |
30 | * @dev_attr_list: list of event interface sysfs attribute |
31 | * @flags: file operations related flags including busy flag. |
32 | * @group: event interface sysfs attribute group |
33 | * @read_lock: lock to protect kfifo read operations |
34 | * @ioctl_handler: handler for event ioctl() calls |
35 | */ |
36 | struct iio_event_interface { |
37 | wait_queue_head_t wait; |
38 | DECLARE_KFIFO(det_events, struct iio_event_data, 16); |
39 | |
40 | struct list_head dev_attr_list; |
41 | unsigned long flags; |
42 | struct attribute_group group; |
43 | struct mutex read_lock; |
44 | struct iio_ioctl_handler ioctl_handler; |
45 | }; |
46 | |
47 | bool iio_event_enabled(const struct iio_event_interface *ev_int) |
48 | { |
49 | return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
50 | } |
51 | |
52 | /** |
53 | * iio_push_event() - try to add event to the list for userspace reading |
54 | * @indio_dev: IIO device structure |
55 | * @ev_code: What event |
56 | * @timestamp: When the event occurred |
57 | * |
58 | * Note: The caller must make sure that this function is not running |
59 | * concurrently for the same indio_dev more than once. |
60 | * |
61 | * This function may be safely used as soon as a valid reference to iio_dev has |
62 | * been obtained via iio_device_alloc(), but any events that are submitted |
63 | * before iio_device_register() has successfully completed will be silently |
64 | * discarded. |
65 | **/ |
66 | int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) |
67 | { |
68 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
69 | struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; |
70 | struct iio_event_data ev; |
71 | int copied; |
72 | |
73 | if (!ev_int) |
74 | return 0; |
75 | |
76 | /* Does anyone care? */ |
77 | if (iio_event_enabled(ev_int)) { |
78 | |
79 | ev.id = ev_code; |
80 | ev.timestamp = timestamp; |
81 | |
82 | copied = kfifo_put(&ev_int->det_events, ev); |
83 | if (copied != 0) |
84 | wake_up_poll(&ev_int->wait, EPOLLIN); |
85 | } |
86 | |
87 | return 0; |
88 | } |
89 | EXPORT_SYMBOL(iio_push_event); |
90 | |
91 | /** |
92 | * iio_event_poll() - poll the event queue to find out if it has data |
93 | * @filep: File structure pointer to identify the device |
94 | * @wait: Poll table pointer to add the wait queue on |
95 | * |
96 | * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading |
97 | * or a negative error code on failure |
98 | */ |
99 | static __poll_t iio_event_poll(struct file *filep, |
100 | struct poll_table_struct *wait) |
101 | { |
102 | struct iio_dev *indio_dev = filep->private_data; |
103 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
104 | struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; |
105 | __poll_t events = 0; |
106 | |
107 | if (!indio_dev->info) |
108 | return events; |
109 | |
110 | poll_wait(filp: filep, wait_address: &ev_int->wait, p: wait); |
111 | |
112 | if (!kfifo_is_empty(&ev_int->det_events)) |
113 | events = EPOLLIN | EPOLLRDNORM; |
114 | |
115 | return events; |
116 | } |
117 | |
118 | static ssize_t iio_event_chrdev_read(struct file *filep, |
119 | char __user *buf, |
120 | size_t count, |
121 | loff_t *f_ps) |
122 | { |
123 | struct iio_dev *indio_dev = filep->private_data; |
124 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
125 | struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; |
126 | unsigned int copied; |
127 | int ret; |
128 | |
129 | if (!indio_dev->info) |
130 | return -ENODEV; |
131 | |
132 | if (count < sizeof(struct iio_event_data)) |
133 | return -EINVAL; |
134 | |
135 | do { |
136 | if (kfifo_is_empty(&ev_int->det_events)) { |
137 | if (filep->f_flags & O_NONBLOCK) |
138 | return -EAGAIN; |
139 | |
140 | ret = wait_event_interruptible(ev_int->wait, |
141 | !kfifo_is_empty(&ev_int->det_events) || |
142 | indio_dev->info == NULL); |
143 | if (ret) |
144 | return ret; |
145 | if (indio_dev->info == NULL) |
146 | return -ENODEV; |
147 | } |
148 | |
149 | if (mutex_lock_interruptible(&ev_int->read_lock)) |
150 | return -ERESTARTSYS; |
151 | ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); |
152 | mutex_unlock(lock: &ev_int->read_lock); |
153 | |
154 | if (ret) |
155 | return ret; |
156 | |
157 | /* |
158 | * If we couldn't read anything from the fifo (a different |
159 | * thread might have been faster) we either return -EAGAIN if |
160 | * the file descriptor is non-blocking, otherwise we go back to |
161 | * sleep and wait for more data to arrive. |
162 | */ |
163 | if (copied == 0 && (filep->f_flags & O_NONBLOCK)) |
164 | return -EAGAIN; |
165 | |
166 | } while (copied == 0); |
167 | |
168 | return copied; |
169 | } |
170 | |
171 | static int iio_event_chrdev_release(struct inode *inode, struct file *filep) |
172 | { |
173 | struct iio_dev *indio_dev = filep->private_data; |
174 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
175 | struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; |
176 | |
177 | clear_bit(IIO_BUSY_BIT_POS, addr: &ev_int->flags); |
178 | |
179 | iio_device_put(indio_dev); |
180 | |
181 | return 0; |
182 | } |
183 | |
184 | static const struct file_operations iio_event_chrdev_fileops = { |
185 | .read = iio_event_chrdev_read, |
186 | .poll = iio_event_poll, |
187 | .release = iio_event_chrdev_release, |
188 | .owner = THIS_MODULE, |
189 | .llseek = noop_llseek, |
190 | }; |
191 | |
192 | static int iio_event_getfd(struct iio_dev *indio_dev) |
193 | { |
194 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
195 | struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; |
196 | int fd; |
197 | |
198 | if (ev_int == NULL) |
199 | return -ENODEV; |
200 | |
201 | fd = mutex_lock_interruptible(&iio_dev_opaque->mlock); |
202 | if (fd) |
203 | return fd; |
204 | |
205 | if (test_and_set_bit(IIO_BUSY_BIT_POS, addr: &ev_int->flags)) { |
206 | fd = -EBUSY; |
207 | goto unlock; |
208 | } |
209 | |
210 | iio_device_get(indio_dev); |
211 | |
212 | fd = anon_inode_getfd(name: "iio:event" , fops: &iio_event_chrdev_fileops, |
213 | priv: indio_dev, O_RDONLY | O_CLOEXEC); |
214 | if (fd < 0) { |
215 | clear_bit(IIO_BUSY_BIT_POS, addr: &ev_int->flags); |
216 | iio_device_put(indio_dev); |
217 | } else { |
218 | kfifo_reset_out(&ev_int->det_events); |
219 | } |
220 | |
221 | unlock: |
222 | mutex_unlock(lock: &iio_dev_opaque->mlock); |
223 | return fd; |
224 | } |
225 | |
226 | static const char * const iio_ev_type_text[] = { |
227 | [IIO_EV_TYPE_THRESH] = "thresh" , |
228 | [IIO_EV_TYPE_MAG] = "mag" , |
229 | [IIO_EV_TYPE_ROC] = "roc" , |
230 | [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive" , |
231 | [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive" , |
232 | [IIO_EV_TYPE_CHANGE] = "change" , |
233 | [IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced" , |
234 | [IIO_EV_TYPE_GESTURE] = "gesture" , |
235 | }; |
236 | |
237 | static const char * const iio_ev_dir_text[] = { |
238 | [IIO_EV_DIR_EITHER] = "either" , |
239 | [IIO_EV_DIR_RISING] = "rising" , |
240 | [IIO_EV_DIR_FALLING] = "falling" , |
241 | [IIO_EV_DIR_SINGLETAP] = "singletap" , |
242 | [IIO_EV_DIR_DOUBLETAP] = "doubletap" , |
243 | }; |
244 | |
245 | static const char * const iio_ev_info_text[] = { |
246 | [IIO_EV_INFO_ENABLE] = "en" , |
247 | [IIO_EV_INFO_VALUE] = "value" , |
248 | [IIO_EV_INFO_HYSTERESIS] = "hysteresis" , |
249 | [IIO_EV_INFO_PERIOD] = "period" , |
250 | [IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db" , |
251 | [IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db" , |
252 | [IIO_EV_INFO_TIMEOUT] = "timeout" , |
253 | [IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout" , |
254 | [IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay" , |
255 | [IIO_EV_INFO_RUNNING_PERIOD] = "runningperiod" , |
256 | [IIO_EV_INFO_RUNNING_COUNT] = "runningcount" , |
257 | }; |
258 | |
259 | static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr) |
260 | { |
261 | return attr->c->event_spec[attr->address & 0xffff].dir; |
262 | } |
263 | |
264 | static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr) |
265 | { |
266 | return attr->c->event_spec[attr->address & 0xffff].type; |
267 | } |
268 | |
269 | static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr) |
270 | { |
271 | return (attr->address >> 16) & 0xffff; |
272 | } |
273 | |
274 | static ssize_t iio_ev_state_store(struct device *dev, |
275 | struct device_attribute *attr, |
276 | const char *buf, |
277 | size_t len) |
278 | { |
279 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
280 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
281 | int ret; |
282 | bool val; |
283 | |
284 | ret = kstrtobool(s: buf, res: &val); |
285 | if (ret < 0) |
286 | return ret; |
287 | |
288 | ret = indio_dev->info->write_event_config(indio_dev, |
289 | this_attr->c, iio_ev_attr_type(attr: this_attr), |
290 | iio_ev_attr_dir(attr: this_attr), val); |
291 | |
292 | return (ret < 0) ? ret : len; |
293 | } |
294 | |
295 | static ssize_t iio_ev_state_show(struct device *dev, |
296 | struct device_attribute *attr, |
297 | char *buf) |
298 | { |
299 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
300 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
301 | int val; |
302 | |
303 | val = indio_dev->info->read_event_config(indio_dev, |
304 | this_attr->c, iio_ev_attr_type(attr: this_attr), |
305 | iio_ev_attr_dir(attr: this_attr)); |
306 | if (val < 0) |
307 | return val; |
308 | else |
309 | return sysfs_emit(buf, fmt: "%d\n" , val); |
310 | } |
311 | |
312 | static ssize_t iio_ev_value_show(struct device *dev, |
313 | struct device_attribute *attr, |
314 | char *buf) |
315 | { |
316 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
317 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
318 | int val, val2, val_arr[2]; |
319 | int ret; |
320 | |
321 | ret = indio_dev->info->read_event_value(indio_dev, |
322 | this_attr->c, iio_ev_attr_type(attr: this_attr), |
323 | iio_ev_attr_dir(attr: this_attr), iio_ev_attr_info(attr: this_attr), |
324 | &val, &val2); |
325 | if (ret < 0) |
326 | return ret; |
327 | val_arr[0] = val; |
328 | val_arr[1] = val2; |
329 | return iio_format_value(buf, type: ret, size: 2, vals: val_arr); |
330 | } |
331 | |
332 | static ssize_t iio_ev_value_store(struct device *dev, |
333 | struct device_attribute *attr, |
334 | const char *buf, |
335 | size_t len) |
336 | { |
337 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
338 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
339 | int val, val2; |
340 | int ret; |
341 | |
342 | if (!indio_dev->info->write_event_value) |
343 | return -EINVAL; |
344 | |
345 | ret = iio_str_to_fixpoint(str: buf, fract_mult: 100000, integer: &val, fract: &val2); |
346 | if (ret) |
347 | return ret; |
348 | ret = indio_dev->info->write_event_value(indio_dev, |
349 | this_attr->c, iio_ev_attr_type(attr: this_attr), |
350 | iio_ev_attr_dir(attr: this_attr), iio_ev_attr_info(attr: this_attr), |
351 | val, val2); |
352 | if (ret < 0) |
353 | return ret; |
354 | |
355 | return len; |
356 | } |
357 | |
358 | static ssize_t iio_ev_label_show(struct device *dev, |
359 | struct device_attribute *attr, |
360 | char *buf) |
361 | { |
362 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
363 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
364 | |
365 | if (indio_dev->info->read_event_label) |
366 | return indio_dev->info->read_event_label(indio_dev, |
367 | this_attr->c, iio_ev_attr_type(attr: this_attr), |
368 | iio_ev_attr_dir(attr: this_attr), buf); |
369 | |
370 | return -EINVAL; |
371 | } |
372 | |
373 | static int iio_device_add_event(struct iio_dev *indio_dev, |
374 | const struct iio_chan_spec *chan, unsigned int spec_index, |
375 | enum iio_event_type type, enum iio_event_direction dir, |
376 | enum iio_shared_by shared_by, const unsigned long *mask) |
377 | { |
378 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
379 | ssize_t (*show)(struct device *dev, struct device_attribute *attr, |
380 | char *buf); |
381 | ssize_t (*store)(struct device *dev, struct device_attribute *attr, |
382 | const char *buf, size_t len); |
383 | unsigned int attrcount = 0; |
384 | unsigned int i; |
385 | char *postfix; |
386 | int ret; |
387 | |
388 | for_each_set_bit(i, mask, sizeof(*mask)*8) { |
389 | if (i >= ARRAY_SIZE(iio_ev_info_text)) |
390 | return -EINVAL; |
391 | if (dir != IIO_EV_DIR_NONE) |
392 | postfix = kasprintf(GFP_KERNEL, fmt: "%s_%s_%s" , |
393 | iio_ev_type_text[type], |
394 | iio_ev_dir_text[dir], |
395 | iio_ev_info_text[i]); |
396 | else |
397 | postfix = kasprintf(GFP_KERNEL, fmt: "%s_%s" , |
398 | iio_ev_type_text[type], |
399 | iio_ev_info_text[i]); |
400 | if (postfix == NULL) |
401 | return -ENOMEM; |
402 | |
403 | if (i == IIO_EV_INFO_ENABLE) { |
404 | show = iio_ev_state_show; |
405 | store = iio_ev_state_store; |
406 | } else { |
407 | show = iio_ev_value_show; |
408 | store = iio_ev_value_store; |
409 | } |
410 | |
411 | ret = __iio_add_chan_devattr(postfix, chan, func: show, writefunc: store, |
412 | mask: (i << 16) | spec_index, shared_by, dev: &indio_dev->dev, |
413 | NULL, |
414 | attr_list: &iio_dev_opaque->event_interface->dev_attr_list); |
415 | kfree(objp: postfix); |
416 | |
417 | if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) |
418 | continue; |
419 | |
420 | if (ret) |
421 | return ret; |
422 | |
423 | attrcount++; |
424 | } |
425 | |
426 | return attrcount; |
427 | } |
428 | |
429 | static int iio_device_add_event_label(struct iio_dev *indio_dev, |
430 | const struct iio_chan_spec *chan, |
431 | unsigned int spec_index, |
432 | enum iio_event_type type, |
433 | enum iio_event_direction dir) |
434 | { |
435 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
436 | char *postfix; |
437 | int ret; |
438 | |
439 | if (!indio_dev->info->read_event_label) |
440 | return 0; |
441 | |
442 | if (dir != IIO_EV_DIR_NONE) |
443 | postfix = kasprintf(GFP_KERNEL, fmt: "%s_%s_label" , |
444 | iio_ev_type_text[type], |
445 | iio_ev_dir_text[dir]); |
446 | else |
447 | postfix = kasprintf(GFP_KERNEL, fmt: "%s_label" , |
448 | iio_ev_type_text[type]); |
449 | if (postfix == NULL) |
450 | return -ENOMEM; |
451 | |
452 | ret = __iio_add_chan_devattr(postfix, chan, func: &iio_ev_label_show, NULL, |
453 | mask: spec_index, shared_by: IIO_SEPARATE, dev: &indio_dev->dev, NULL, |
454 | attr_list: &iio_dev_opaque->event_interface->dev_attr_list); |
455 | |
456 | kfree(objp: postfix); |
457 | |
458 | if (ret < 0) |
459 | return ret; |
460 | |
461 | return 1; |
462 | } |
463 | |
464 | static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, |
465 | struct iio_chan_spec const *chan) |
466 | { |
467 | int ret = 0, i, attrcount = 0; |
468 | enum iio_event_direction dir; |
469 | enum iio_event_type type; |
470 | |
471 | for (i = 0; i < chan->num_event_specs; i++) { |
472 | type = chan->event_spec[i].type; |
473 | dir = chan->event_spec[i].dir; |
474 | |
475 | ret = iio_device_add_event(indio_dev, chan, spec_index: i, type, dir, |
476 | shared_by: IIO_SEPARATE, mask: &chan->event_spec[i].mask_separate); |
477 | if (ret < 0) |
478 | return ret; |
479 | attrcount += ret; |
480 | |
481 | ret = iio_device_add_event(indio_dev, chan, spec_index: i, type, dir, |
482 | shared_by: IIO_SHARED_BY_TYPE, |
483 | mask: &chan->event_spec[i].mask_shared_by_type); |
484 | if (ret < 0) |
485 | return ret; |
486 | attrcount += ret; |
487 | |
488 | ret = iio_device_add_event(indio_dev, chan, spec_index: i, type, dir, |
489 | shared_by: IIO_SHARED_BY_DIR, |
490 | mask: &chan->event_spec[i].mask_shared_by_dir); |
491 | if (ret < 0) |
492 | return ret; |
493 | attrcount += ret; |
494 | |
495 | ret = iio_device_add_event(indio_dev, chan, spec_index: i, type, dir, |
496 | shared_by: IIO_SHARED_BY_ALL, |
497 | mask: &chan->event_spec[i].mask_shared_by_all); |
498 | if (ret < 0) |
499 | return ret; |
500 | attrcount += ret; |
501 | |
502 | ret = iio_device_add_event_label(indio_dev, chan, spec_index: i, type, dir); |
503 | if (ret < 0) |
504 | return ret; |
505 | attrcount += ret; |
506 | } |
507 | ret = attrcount; |
508 | return ret; |
509 | } |
510 | |
511 | static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) |
512 | { |
513 | int j, ret, attrcount = 0; |
514 | |
515 | /* Dynamically created from the channels array */ |
516 | for (j = 0; j < indio_dev->num_channels; j++) { |
517 | ret = iio_device_add_event_sysfs(indio_dev, |
518 | chan: &indio_dev->channels[j]); |
519 | if (ret < 0) |
520 | return ret; |
521 | attrcount += ret; |
522 | } |
523 | return attrcount; |
524 | } |
525 | |
526 | static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) |
527 | { |
528 | int j; |
529 | |
530 | for (j = 0; j < indio_dev->num_channels; j++) { |
531 | if (indio_dev->channels[j].num_event_specs != 0) |
532 | return true; |
533 | } |
534 | return false; |
535 | } |
536 | |
537 | static void iio_setup_ev_int(struct iio_event_interface *ev_int) |
538 | { |
539 | INIT_KFIFO(ev_int->det_events); |
540 | init_waitqueue_head(&ev_int->wait); |
541 | mutex_init(&ev_int->read_lock); |
542 | } |
543 | |
544 | static long iio_event_ioctl(struct iio_dev *indio_dev, struct file *filp, |
545 | unsigned int cmd, unsigned long arg) |
546 | { |
547 | int __user *ip = (int __user *)arg; |
548 | int fd; |
549 | |
550 | if (cmd == IIO_GET_EVENT_FD_IOCTL) { |
551 | fd = iio_event_getfd(indio_dev); |
552 | if (fd < 0) |
553 | return fd; |
554 | if (copy_to_user(to: ip, from: &fd, n: sizeof(fd))) |
555 | return -EFAULT; |
556 | return 0; |
557 | } |
558 | |
559 | return IIO_IOCTL_UNHANDLED; |
560 | } |
561 | |
562 | static const char *iio_event_group_name = "events" ; |
563 | int iio_device_register_eventset(struct iio_dev *indio_dev) |
564 | { |
565 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
566 | struct iio_event_interface *ev_int; |
567 | struct iio_dev_attr *p; |
568 | int ret = 0, attrcount_orig = 0, attrcount, attrn; |
569 | struct attribute **attr; |
570 | |
571 | if (!(indio_dev->info->event_attrs || |
572 | iio_check_for_dynamic_events(indio_dev))) |
573 | return 0; |
574 | |
575 | ev_int = kzalloc(size: sizeof(struct iio_event_interface), GFP_KERNEL); |
576 | if (ev_int == NULL) |
577 | return -ENOMEM; |
578 | |
579 | iio_dev_opaque->event_interface = ev_int; |
580 | |
581 | INIT_LIST_HEAD(list: &ev_int->dev_attr_list); |
582 | |
583 | iio_setup_ev_int(ev_int); |
584 | if (indio_dev->info->event_attrs != NULL) { |
585 | attr = indio_dev->info->event_attrs->attrs; |
586 | while (*attr++ != NULL) |
587 | attrcount_orig++; |
588 | } |
589 | attrcount = attrcount_orig; |
590 | if (indio_dev->channels) { |
591 | ret = __iio_add_event_config_attrs(indio_dev); |
592 | if (ret < 0) |
593 | goto error_free_setup_event_lines; |
594 | attrcount += ret; |
595 | } |
596 | |
597 | ev_int->group.name = iio_event_group_name; |
598 | ev_int->group.attrs = kcalloc(n: attrcount + 1, |
599 | size: sizeof(ev_int->group.attrs[0]), |
600 | GFP_KERNEL); |
601 | if (ev_int->group.attrs == NULL) { |
602 | ret = -ENOMEM; |
603 | goto error_free_setup_event_lines; |
604 | } |
605 | if (indio_dev->info->event_attrs) |
606 | memcpy(ev_int->group.attrs, |
607 | indio_dev->info->event_attrs->attrs, |
608 | sizeof(ev_int->group.attrs[0]) * attrcount_orig); |
609 | attrn = attrcount_orig; |
610 | /* Add all elements from the list. */ |
611 | list_for_each_entry(p, &ev_int->dev_attr_list, l) |
612 | ev_int->group.attrs[attrn++] = &p->dev_attr.attr; |
613 | |
614 | ret = iio_device_register_sysfs_group(indio_dev, group: &ev_int->group); |
615 | if (ret) |
616 | goto error_free_group_attrs; |
617 | |
618 | ev_int->ioctl_handler.ioctl = iio_event_ioctl; |
619 | iio_device_ioctl_handler_register(indio_dev: &iio_dev_opaque->indio_dev, |
620 | h: &ev_int->ioctl_handler); |
621 | |
622 | return 0; |
623 | |
624 | error_free_group_attrs: |
625 | kfree(objp: ev_int->group.attrs); |
626 | error_free_setup_event_lines: |
627 | iio_free_chan_devattr_list(attr_list: &ev_int->dev_attr_list); |
628 | kfree(objp: ev_int); |
629 | iio_dev_opaque->event_interface = NULL; |
630 | return ret; |
631 | } |
632 | |
633 | /** |
634 | * iio_device_wakeup_eventset - Wakes up the event waitqueue |
635 | * @indio_dev: The IIO device |
636 | * |
637 | * Wakes up the event waitqueue used for poll() and blocking read(). |
638 | * Should usually be called when the device is unregistered. |
639 | */ |
640 | void iio_device_wakeup_eventset(struct iio_dev *indio_dev) |
641 | { |
642 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
643 | |
644 | if (iio_dev_opaque->event_interface == NULL) |
645 | return; |
646 | wake_up(&iio_dev_opaque->event_interface->wait); |
647 | } |
648 | |
649 | void iio_device_unregister_eventset(struct iio_dev *indio_dev) |
650 | { |
651 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
652 | struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; |
653 | |
654 | if (ev_int == NULL) |
655 | return; |
656 | |
657 | iio_device_ioctl_handler_unregister(h: &ev_int->ioctl_handler); |
658 | iio_free_chan_devattr_list(attr_list: &ev_int->dev_attr_list); |
659 | kfree(objp: ev_int->group.attrs); |
660 | kfree(objp: ev_int); |
661 | iio_dev_opaque->event_interface = NULL; |
662 | } |
663 | |