1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2021, Microsoft Corporation. |
4 | * |
5 | * Authors: |
6 | * Beau Belgrave <beaub@linux.microsoft.com> |
7 | */ |
8 | |
9 | #include <linux/bitmap.h> |
10 | #include <linux/cdev.h> |
11 | #include <linux/hashtable.h> |
12 | #include <linux/list.h> |
13 | #include <linux/io.h> |
14 | #include <linux/uio.h> |
15 | #include <linux/ioctl.h> |
16 | #include <linux/jhash.h> |
17 | #include <linux/refcount.h> |
18 | #include <linux/trace_events.h> |
19 | #include <linux/tracefs.h> |
20 | #include <linux/types.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/highmem.h> |
23 | #include <linux/init.h> |
24 | #include <linux/user_events.h> |
25 | #include "trace_dynevent.h" |
26 | #include "trace_output.h" |
27 | #include "trace.h" |
28 | |
29 | #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1) |
30 | |
31 | #define FIELD_DEPTH_TYPE 0 |
32 | #define FIELD_DEPTH_NAME 1 |
33 | #define FIELD_DEPTH_SIZE 2 |
34 | |
35 | /* Limit how long of an event name plus args within the subsystem. */ |
36 | #define MAX_EVENT_DESC 512 |
37 | #define EVENT_NAME(user_event) ((user_event)->reg_name) |
38 | #define EVENT_TP_NAME(user_event) ((user_event)->tracepoint.name) |
39 | #define MAX_FIELD_ARRAY_SIZE 1024 |
40 | |
41 | /* |
42 | * Internal bits (kernel side only) to keep track of connected probes: |
43 | * These are used when status is requested in text form about an event. These |
44 | * bits are compared against an internal byte on the event to determine which |
45 | * probes to print out to the user. |
46 | * |
47 | * These do not reflect the mapped bytes between the user and kernel space. |
48 | */ |
49 | #define EVENT_STATUS_FTRACE BIT(0) |
50 | #define EVENT_STATUS_PERF BIT(1) |
51 | #define EVENT_STATUS_OTHER BIT(7) |
52 | |
53 | /* |
54 | * Stores the system name, tables, and locks for a group of events. This |
55 | * allows isolation for events by various means. |
56 | */ |
57 | struct user_event_group { |
58 | char *system_name; |
59 | char *system_multi_name; |
60 | struct hlist_node node; |
61 | struct mutex reg_mutex; |
62 | DECLARE_HASHTABLE(register_table, 8); |
63 | /* ID that moves forward within the group for multi-event names */ |
64 | u64 multi_id; |
65 | }; |
66 | |
67 | /* Group for init_user_ns mapping, top-most group */ |
68 | static struct user_event_group *init_group; |
69 | |
70 | /* Max allowed events for the whole system */ |
71 | static unsigned int max_user_events = 32768; |
72 | |
73 | /* Current number of events on the whole system */ |
74 | static unsigned int current_user_events; |
75 | |
76 | /* |
77 | * Stores per-event properties, as users register events |
78 | * within a file a user_event might be created if it does not |
79 | * already exist. These are globally used and their lifetime |
80 | * is tied to the refcnt member. These cannot go away until the |
81 | * refcnt reaches one. |
82 | */ |
83 | struct user_event { |
84 | struct user_event_group *group; |
85 | char *reg_name; |
86 | struct tracepoint tracepoint; |
87 | struct trace_event_call call; |
88 | struct trace_event_class class; |
89 | struct dyn_event devent; |
90 | struct hlist_node node; |
91 | struct list_head fields; |
92 | struct list_head validators; |
93 | struct work_struct put_work; |
94 | refcount_t refcnt; |
95 | int min_size; |
96 | int reg_flags; |
97 | char status; |
98 | }; |
99 | |
100 | /* |
101 | * Stores per-mm/event properties that enable an address to be |
102 | * updated properly for each task. As tasks are forked, we use |
103 | * these to track enablement sites that are tied to an event. |
104 | */ |
105 | struct user_event_enabler { |
106 | struct list_head mm_enablers_link; |
107 | struct user_event *event; |
108 | unsigned long addr; |
109 | |
110 | /* Track enable bit, flags, etc. Aligned for bitops. */ |
111 | unsigned long values; |
112 | }; |
113 | |
114 | /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */ |
115 | #define ENABLE_VAL_BIT_MASK 0x3F |
116 | |
117 | /* Bit 6 is for faulting status of enablement */ |
118 | #define ENABLE_VAL_FAULTING_BIT 6 |
119 | |
120 | /* Bit 7 is for freeing status of enablement */ |
121 | #define ENABLE_VAL_FREEING_BIT 7 |
122 | |
123 | /* Bit 8 is for marking 32-bit on 64-bit */ |
124 | #define ENABLE_VAL_32_ON_64_BIT 8 |
125 | |
126 | #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT) |
127 | |
128 | /* Only duplicate the bit and compat values */ |
129 | #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK) |
130 | |
131 | #define ENABLE_BITOPS(e) (&(e)->values) |
132 | |
133 | #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK)) |
134 | |
135 | #define EVENT_MULTI_FORMAT(f) ((f) & USER_EVENT_REG_MULTI_FORMAT) |
136 | |
137 | /* Used for asynchronous faulting in of pages */ |
138 | struct user_event_enabler_fault { |
139 | struct work_struct work; |
140 | struct user_event_mm *mm; |
141 | struct user_event_enabler *enabler; |
142 | int attempt; |
143 | }; |
144 | |
145 | static struct kmem_cache *fault_cache; |
146 | |
147 | /* Global list of memory descriptors using user_events */ |
148 | static LIST_HEAD(user_event_mms); |
149 | static DEFINE_SPINLOCK(user_event_mms_lock); |
150 | |
151 | /* |
152 | * Stores per-file events references, as users register events |
153 | * within a file this structure is modified and freed via RCU. |
154 | * The lifetime of this struct is tied to the lifetime of the file. |
155 | * These are not shared and only accessible by the file that created it. |
156 | */ |
157 | struct user_event_refs { |
158 | struct rcu_head rcu; |
159 | int count; |
160 | struct user_event *events[]; |
161 | }; |
162 | |
163 | struct user_event_file_info { |
164 | struct user_event_group *group; |
165 | struct user_event_refs *refs; |
166 | }; |
167 | |
168 | #define VALIDATOR_ENSURE_NULL (1 << 0) |
169 | #define VALIDATOR_REL (1 << 1) |
170 | |
171 | struct user_event_validator { |
172 | struct list_head user_event_link; |
173 | int offset; |
174 | int flags; |
175 | }; |
176 | |
177 | static inline void align_addr_bit(unsigned long *addr, int *bit, |
178 | unsigned long *flags) |
179 | { |
180 | if (IS_ALIGNED(*addr, sizeof(long))) { |
181 | #ifdef __BIG_ENDIAN |
182 | /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */ |
183 | if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags)) |
184 | *bit += 32; |
185 | #endif |
186 | return; |
187 | } |
188 | |
189 | *addr = ALIGN_DOWN(*addr, sizeof(long)); |
190 | |
191 | /* |
192 | * We only support 32 and 64 bit values. The only time we need |
193 | * to align is a 32 bit value on a 64 bit kernel, which on LE |
194 | * is always 32 bits, and on BE requires no change when unaligned. |
195 | */ |
196 | #ifdef __LITTLE_ENDIAN |
197 | *bit += 32; |
198 | #endif |
199 | } |
200 | |
201 | typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, |
202 | void *tpdata, bool *faulted); |
203 | |
204 | static int user_event_parse(struct user_event_group *group, char *name, |
205 | char *args, char *flags, |
206 | struct user_event **newuser, int reg_flags); |
207 | |
208 | static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm); |
209 | static struct user_event_mm *user_event_mm_get_all(struct user_event *user); |
210 | static void user_event_mm_put(struct user_event_mm *mm); |
211 | static int destroy_user_event(struct user_event *user); |
212 | static bool user_fields_match(struct user_event *user, int argc, |
213 | const char **argv); |
214 | |
215 | static u32 user_event_key(char *name) |
216 | { |
217 | return jhash(key: name, strlen(name), initval: 0); |
218 | } |
219 | |
220 | static bool user_event_capable(u16 reg_flags) |
221 | { |
222 | /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */ |
223 | if (reg_flags & USER_EVENT_REG_PERSIST) { |
224 | if (!perfmon_capable()) |
225 | return false; |
226 | } |
227 | |
228 | return true; |
229 | } |
230 | |
231 | static struct user_event *user_event_get(struct user_event *user) |
232 | { |
233 | refcount_inc(r: &user->refcnt); |
234 | |
235 | return user; |
236 | } |
237 | |
238 | static void delayed_destroy_user_event(struct work_struct *work) |
239 | { |
240 | struct user_event *user = container_of( |
241 | work, struct user_event, put_work); |
242 | |
243 | mutex_lock(&event_mutex); |
244 | |
245 | if (!refcount_dec_and_test(r: &user->refcnt)) |
246 | goto out; |
247 | |
248 | if (destroy_user_event(user)) { |
249 | /* |
250 | * The only reason this would fail here is if we cannot |
251 | * update the visibility of the event. In this case the |
252 | * event stays in the hashtable, waiting for someone to |
253 | * attempt to delete it later. |
254 | */ |
255 | pr_warn("user_events: Unable to delete event\n" ); |
256 | refcount_set(r: &user->refcnt, n: 1); |
257 | } |
258 | out: |
259 | mutex_unlock(lock: &event_mutex); |
260 | } |
261 | |
262 | static void user_event_put(struct user_event *user, bool locked) |
263 | { |
264 | bool delete; |
265 | |
266 | if (unlikely(!user)) |
267 | return; |
268 | |
269 | /* |
270 | * When the event is not enabled for auto-delete there will always |
271 | * be at least 1 reference to the event. During the event creation |
272 | * we initially set the refcnt to 2 to achieve this. In those cases |
273 | * the caller must acquire event_mutex and after decrement check if |
274 | * the refcnt is 1, meaning this is the last reference. When auto |
275 | * delete is enabled, there will only be 1 ref, IE: refcnt will be |
276 | * only set to 1 during creation to allow the below checks to go |
277 | * through upon the last put. The last put must always be done with |
278 | * the event mutex held. |
279 | */ |
280 | if (!locked) { |
281 | lockdep_assert_not_held(&event_mutex); |
282 | delete = refcount_dec_and_mutex_lock(r: &user->refcnt, lock: &event_mutex); |
283 | } else { |
284 | lockdep_assert_held(&event_mutex); |
285 | delete = refcount_dec_and_test(r: &user->refcnt); |
286 | } |
287 | |
288 | if (!delete) |
289 | return; |
290 | |
291 | /* |
292 | * We now have the event_mutex in all cases, which ensures that |
293 | * no new references will be taken until event_mutex is released. |
294 | * New references come through find_user_event(), which requires |
295 | * the event_mutex to be held. |
296 | */ |
297 | |
298 | if (user->reg_flags & USER_EVENT_REG_PERSIST) { |
299 | /* We should not get here when persist flag is set */ |
300 | pr_alert("BUG: Auto-delete engaged on persistent event\n" ); |
301 | goto out; |
302 | } |
303 | |
304 | /* |
305 | * Unfortunately we have to attempt the actual destroy in a work |
306 | * queue. This is because not all cases handle a trace_event_call |
307 | * being removed within the class->reg() operation for unregister. |
308 | */ |
309 | INIT_WORK(&user->put_work, delayed_destroy_user_event); |
310 | |
311 | /* |
312 | * Since the event is still in the hashtable, we have to re-inc |
313 | * the ref count to 1. This count will be decremented and checked |
314 | * in the work queue to ensure it's still the last ref. This is |
315 | * needed because a user-process could register the same event in |
316 | * between the time of event_mutex release and the work queue |
317 | * running the delayed destroy. If we removed the item now from |
318 | * the hashtable, this would result in a timing window where a |
319 | * user process would fail a register because the trace_event_call |
320 | * register would fail in the tracing layers. |
321 | */ |
322 | refcount_set(r: &user->refcnt, n: 1); |
323 | |
324 | if (WARN_ON_ONCE(!schedule_work(&user->put_work))) { |
325 | /* |
326 | * If we fail we must wait for an admin to attempt delete or |
327 | * another register/close of the event, whichever is first. |
328 | */ |
329 | pr_warn("user_events: Unable to queue delayed destroy\n" ); |
330 | } |
331 | out: |
332 | /* Ensure if we didn't have event_mutex before we unlock it */ |
333 | if (!locked) |
334 | mutex_unlock(lock: &event_mutex); |
335 | } |
336 | |
337 | static void user_event_group_destroy(struct user_event_group *group) |
338 | { |
339 | kfree(objp: group->system_name); |
340 | kfree(objp: group->system_multi_name); |
341 | kfree(objp: group); |
342 | } |
343 | |
344 | static char *user_event_group_system_name(void) |
345 | { |
346 | char *system_name; |
347 | int len = sizeof(USER_EVENTS_SYSTEM) + 1; |
348 | |
349 | system_name = kmalloc(size: len, GFP_KERNEL); |
350 | |
351 | if (!system_name) |
352 | return NULL; |
353 | |
354 | snprintf(buf: system_name, size: len, fmt: "%s" , USER_EVENTS_SYSTEM); |
355 | |
356 | return system_name; |
357 | } |
358 | |
359 | static char *user_event_group_system_multi_name(void) |
360 | { |
361 | return kstrdup(USER_EVENTS_MULTI_SYSTEM, GFP_KERNEL); |
362 | } |
363 | |
364 | static struct user_event_group *current_user_event_group(void) |
365 | { |
366 | return init_group; |
367 | } |
368 | |
369 | static struct user_event_group *user_event_group_create(void) |
370 | { |
371 | struct user_event_group *group; |
372 | |
373 | group = kzalloc(size: sizeof(*group), GFP_KERNEL); |
374 | |
375 | if (!group) |
376 | return NULL; |
377 | |
378 | group->system_name = user_event_group_system_name(); |
379 | |
380 | if (!group->system_name) |
381 | goto error; |
382 | |
383 | group->system_multi_name = user_event_group_system_multi_name(); |
384 | |
385 | if (!group->system_multi_name) |
386 | goto error; |
387 | |
388 | mutex_init(&group->reg_mutex); |
389 | hash_init(group->register_table); |
390 | |
391 | return group; |
392 | error: |
393 | if (group) |
394 | user_event_group_destroy(group); |
395 | |
396 | return NULL; |
397 | }; |
398 | |
399 | static void user_event_enabler_destroy(struct user_event_enabler *enabler, |
400 | bool locked) |
401 | { |
402 | list_del_rcu(entry: &enabler->mm_enablers_link); |
403 | |
404 | /* No longer tracking the event via the enabler */ |
405 | user_event_put(user: enabler->event, locked); |
406 | |
407 | kfree(objp: enabler); |
408 | } |
409 | |
410 | static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr, |
411 | int attempt) |
412 | { |
413 | bool unlocked; |
414 | int ret; |
415 | |
416 | /* |
417 | * Normally this is low, ensure that it cannot be taken advantage of by |
418 | * bad user processes to cause excessive looping. |
419 | */ |
420 | if (attempt > 10) |
421 | return -EFAULT; |
422 | |
423 | mmap_read_lock(mm: mm->mm); |
424 | |
425 | /* Ensure MM has tasks, cannot use after exit_mm() */ |
426 | if (refcount_read(r: &mm->tasks) == 0) { |
427 | ret = -ENOENT; |
428 | goto out; |
429 | } |
430 | |
431 | ret = fixup_user_fault(mm: mm->mm, address: uaddr, fault_flags: FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, |
432 | unlocked: &unlocked); |
433 | out: |
434 | mmap_read_unlock(mm: mm->mm); |
435 | |
436 | return ret; |
437 | } |
438 | |
439 | static int user_event_enabler_write(struct user_event_mm *mm, |
440 | struct user_event_enabler *enabler, |
441 | bool fixup_fault, int *attempt); |
442 | |
443 | static void user_event_enabler_fault_fixup(struct work_struct *work) |
444 | { |
445 | struct user_event_enabler_fault *fault = container_of( |
446 | work, struct user_event_enabler_fault, work); |
447 | struct user_event_enabler *enabler = fault->enabler; |
448 | struct user_event_mm *mm = fault->mm; |
449 | unsigned long uaddr = enabler->addr; |
450 | int attempt = fault->attempt; |
451 | int ret; |
452 | |
453 | ret = user_event_mm_fault_in(mm, uaddr, attempt); |
454 | |
455 | if (ret && ret != -ENOENT) { |
456 | struct user_event *user = enabler->event; |
457 | |
458 | pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n" , |
459 | mm->mm, (unsigned long long)uaddr, EVENT_NAME(user)); |
460 | } |
461 | |
462 | /* Prevent state changes from racing */ |
463 | mutex_lock(&event_mutex); |
464 | |
465 | /* User asked for enabler to be removed during fault */ |
466 | if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) { |
467 | user_event_enabler_destroy(enabler, locked: true); |
468 | goto out; |
469 | } |
470 | |
471 | /* |
472 | * If we managed to get the page, re-issue the write. We do not |
473 | * want to get into a possible infinite loop, which is why we only |
474 | * attempt again directly if the page came in. If we couldn't get |
475 | * the page here, then we will try again the next time the event is |
476 | * enabled/disabled. |
477 | */ |
478 | clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); |
479 | |
480 | if (!ret) { |
481 | mmap_read_lock(mm: mm->mm); |
482 | user_event_enabler_write(mm, enabler, fixup_fault: true, attempt: &attempt); |
483 | mmap_read_unlock(mm: mm->mm); |
484 | } |
485 | out: |
486 | mutex_unlock(lock: &event_mutex); |
487 | |
488 | /* In all cases we no longer need the mm or fault */ |
489 | user_event_mm_put(mm); |
490 | kmem_cache_free(s: fault_cache, objp: fault); |
491 | } |
492 | |
493 | static bool user_event_enabler_queue_fault(struct user_event_mm *mm, |
494 | struct user_event_enabler *enabler, |
495 | int attempt) |
496 | { |
497 | struct user_event_enabler_fault *fault; |
498 | |
499 | fault = kmem_cache_zalloc(k: fault_cache, GFP_NOWAIT | __GFP_NOWARN); |
500 | |
501 | if (!fault) |
502 | return false; |
503 | |
504 | INIT_WORK(&fault->work, user_event_enabler_fault_fixup); |
505 | fault->mm = user_event_mm_get(mm); |
506 | fault->enabler = enabler; |
507 | fault->attempt = attempt; |
508 | |
509 | /* Don't try to queue in again while we have a pending fault */ |
510 | set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); |
511 | |
512 | if (!schedule_work(work: &fault->work)) { |
513 | /* Allow another attempt later */ |
514 | clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); |
515 | |
516 | user_event_mm_put(mm); |
517 | kmem_cache_free(s: fault_cache, objp: fault); |
518 | |
519 | return false; |
520 | } |
521 | |
522 | return true; |
523 | } |
524 | |
525 | static int user_event_enabler_write(struct user_event_mm *mm, |
526 | struct user_event_enabler *enabler, |
527 | bool fixup_fault, int *attempt) |
528 | { |
529 | unsigned long uaddr = enabler->addr; |
530 | unsigned long *ptr; |
531 | struct page *page; |
532 | void *kaddr; |
533 | int bit = ENABLE_BIT(enabler); |
534 | int ret; |
535 | |
536 | lockdep_assert_held(&event_mutex); |
537 | mmap_assert_locked(mm: mm->mm); |
538 | |
539 | *attempt += 1; |
540 | |
541 | /* Ensure MM has tasks, cannot use after exit_mm() */ |
542 | if (refcount_read(r: &mm->tasks) == 0) |
543 | return -ENOENT; |
544 | |
545 | if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) || |
546 | test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)))) |
547 | return -EBUSY; |
548 | |
549 | align_addr_bit(addr: &uaddr, bit: &bit, ENABLE_BITOPS(enabler)); |
550 | |
551 | ret = pin_user_pages_remote(mm: mm->mm, start: uaddr, nr_pages: 1, gup_flags: FOLL_WRITE | FOLL_NOFAULT, |
552 | pages: &page, NULL); |
553 | |
554 | if (unlikely(ret <= 0)) { |
555 | if (!fixup_fault) |
556 | return -EFAULT; |
557 | |
558 | if (!user_event_enabler_queue_fault(mm, enabler, attempt: *attempt)) |
559 | pr_warn("user_events: Unable to queue fault handler\n" ); |
560 | |
561 | return -EFAULT; |
562 | } |
563 | |
564 | kaddr = kmap_local_page(page); |
565 | ptr = kaddr + (uaddr & ~PAGE_MASK); |
566 | |
567 | /* Update bit atomically, user tracers must be atomic as well */ |
568 | if (enabler->event && enabler->event->status) |
569 | set_bit(nr: bit, addr: ptr); |
570 | else |
571 | clear_bit(nr: bit, addr: ptr); |
572 | |
573 | kunmap_local(kaddr); |
574 | unpin_user_pages_dirty_lock(pages: &page, npages: 1, make_dirty: true); |
575 | |
576 | return 0; |
577 | } |
578 | |
579 | static bool user_event_enabler_exists(struct user_event_mm *mm, |
580 | unsigned long uaddr, unsigned char bit) |
581 | { |
582 | struct user_event_enabler *enabler; |
583 | |
584 | list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { |
585 | if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) |
586 | return true; |
587 | } |
588 | |
589 | return false; |
590 | } |
591 | |
592 | static void user_event_enabler_update(struct user_event *user) |
593 | { |
594 | struct user_event_enabler *enabler; |
595 | struct user_event_mm *next; |
596 | struct user_event_mm *mm; |
597 | int attempt; |
598 | |
599 | lockdep_assert_held(&event_mutex); |
600 | |
601 | /* |
602 | * We need to build a one-shot list of all the mms that have an |
603 | * enabler for the user_event passed in. This list is only valid |
604 | * while holding the event_mutex. The only reason for this is due |
605 | * to the global mm list being RCU protected and we use methods |
606 | * which can wait (mmap_read_lock and pin_user_pages_remote). |
607 | * |
608 | * NOTE: user_event_mm_get_all() increments the ref count of each |
609 | * mm that is added to the list to prevent removal timing windows. |
610 | * We must always put each mm after they are used, which may wait. |
611 | */ |
612 | mm = user_event_mm_get_all(user); |
613 | |
614 | while (mm) { |
615 | next = mm->next; |
616 | mmap_read_lock(mm: mm->mm); |
617 | |
618 | list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { |
619 | if (enabler->event == user) { |
620 | attempt = 0; |
621 | user_event_enabler_write(mm, enabler, fixup_fault: true, attempt: &attempt); |
622 | } |
623 | } |
624 | |
625 | mmap_read_unlock(mm: mm->mm); |
626 | user_event_mm_put(mm); |
627 | mm = next; |
628 | } |
629 | } |
630 | |
631 | static bool user_event_enabler_dup(struct user_event_enabler *orig, |
632 | struct user_event_mm *mm) |
633 | { |
634 | struct user_event_enabler *enabler; |
635 | |
636 | /* Skip pending frees */ |
637 | if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig)))) |
638 | return true; |
639 | |
640 | enabler = kzalloc(size: sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT); |
641 | |
642 | if (!enabler) |
643 | return false; |
644 | |
645 | enabler->event = user_event_get(user: orig->event); |
646 | enabler->addr = orig->addr; |
647 | |
648 | /* Only dup part of value (ignore future flags, etc) */ |
649 | enabler->values = orig->values & ENABLE_VAL_DUP_MASK; |
650 | |
651 | /* Enablers not exposed yet, RCU not required */ |
652 | list_add(new: &enabler->mm_enablers_link, head: &mm->enablers); |
653 | |
654 | return true; |
655 | } |
656 | |
657 | static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm) |
658 | { |
659 | refcount_inc(r: &mm->refcnt); |
660 | |
661 | return mm; |
662 | } |
663 | |
664 | static struct user_event_mm *user_event_mm_get_all(struct user_event *user) |
665 | { |
666 | struct user_event_mm *found = NULL; |
667 | struct user_event_enabler *enabler; |
668 | struct user_event_mm *mm; |
669 | |
670 | /* |
671 | * We use the mm->next field to build a one-shot list from the global |
672 | * RCU protected list. To build this list the event_mutex must be held. |
673 | * This lets us build a list without requiring allocs that could fail |
674 | * when user based events are most wanted for diagnostics. |
675 | */ |
676 | lockdep_assert_held(&event_mutex); |
677 | |
678 | /* |
679 | * We do not want to block fork/exec while enablements are being |
680 | * updated, so we use RCU to walk the current tasks that have used |
681 | * user_events ABI for 1 or more events. Each enabler found in each |
682 | * task that matches the event being updated has a write to reflect |
683 | * the kernel state back into the process. Waits/faults must not occur |
684 | * during this. So we scan the list under RCU for all the mm that have |
685 | * the event within it. This is needed because mm_read_lock() can wait. |
686 | * Each user mm returned has a ref inc to handle remove RCU races. |
687 | */ |
688 | rcu_read_lock(); |
689 | |
690 | list_for_each_entry_rcu(mm, &user_event_mms, mms_link) { |
691 | list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) { |
692 | if (enabler->event == user) { |
693 | mm->next = found; |
694 | found = user_event_mm_get(mm); |
695 | break; |
696 | } |
697 | } |
698 | } |
699 | |
700 | rcu_read_unlock(); |
701 | |
702 | return found; |
703 | } |
704 | |
705 | static struct user_event_mm *user_event_mm_alloc(struct task_struct *t) |
706 | { |
707 | struct user_event_mm *user_mm; |
708 | |
709 | user_mm = kzalloc(size: sizeof(*user_mm), GFP_KERNEL_ACCOUNT); |
710 | |
711 | if (!user_mm) |
712 | return NULL; |
713 | |
714 | user_mm->mm = t->mm; |
715 | INIT_LIST_HEAD(list: &user_mm->enablers); |
716 | refcount_set(r: &user_mm->refcnt, n: 1); |
717 | refcount_set(r: &user_mm->tasks, n: 1); |
718 | |
719 | /* |
720 | * The lifetime of the memory descriptor can slightly outlast |
721 | * the task lifetime if a ref to the user_event_mm is taken |
722 | * between list_del_rcu() and call_rcu(). Therefore we need |
723 | * to take a reference to it to ensure it can live this long |
724 | * under this corner case. This can also occur in clones that |
725 | * outlast the parent. |
726 | */ |
727 | mmgrab(mm: user_mm->mm); |
728 | |
729 | return user_mm; |
730 | } |
731 | |
732 | static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t) |
733 | { |
734 | unsigned long flags; |
735 | |
736 | spin_lock_irqsave(&user_event_mms_lock, flags); |
737 | list_add_rcu(new: &user_mm->mms_link, head: &user_event_mms); |
738 | spin_unlock_irqrestore(lock: &user_event_mms_lock, flags); |
739 | |
740 | t->user_event_mm = user_mm; |
741 | } |
742 | |
743 | static struct user_event_mm *current_user_event_mm(void) |
744 | { |
745 | struct user_event_mm *user_mm = current->user_event_mm; |
746 | |
747 | if (user_mm) |
748 | goto inc; |
749 | |
750 | user_mm = user_event_mm_alloc(current); |
751 | |
752 | if (!user_mm) |
753 | goto error; |
754 | |
755 | user_event_mm_attach(user_mm, current); |
756 | inc: |
757 | refcount_inc(r: &user_mm->refcnt); |
758 | error: |
759 | return user_mm; |
760 | } |
761 | |
762 | static void user_event_mm_destroy(struct user_event_mm *mm) |
763 | { |
764 | struct user_event_enabler *enabler, *next; |
765 | |
766 | list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) |
767 | user_event_enabler_destroy(enabler, locked: false); |
768 | |
769 | mmdrop(mm: mm->mm); |
770 | kfree(objp: mm); |
771 | } |
772 | |
773 | static void user_event_mm_put(struct user_event_mm *mm) |
774 | { |
775 | if (mm && refcount_dec_and_test(r: &mm->refcnt)) |
776 | user_event_mm_destroy(mm); |
777 | } |
778 | |
779 | static void delayed_user_event_mm_put(struct work_struct *work) |
780 | { |
781 | struct user_event_mm *mm; |
782 | |
783 | mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork); |
784 | user_event_mm_put(mm); |
785 | } |
786 | |
787 | void user_event_mm_remove(struct task_struct *t) |
788 | { |
789 | struct user_event_mm *mm; |
790 | unsigned long flags; |
791 | |
792 | might_sleep(); |
793 | |
794 | mm = t->user_event_mm; |
795 | t->user_event_mm = NULL; |
796 | |
797 | /* Clone will increment the tasks, only remove if last clone */ |
798 | if (!refcount_dec_and_test(r: &mm->tasks)) |
799 | return; |
800 | |
801 | /* Remove the mm from the list, so it can no longer be enabled */ |
802 | spin_lock_irqsave(&user_event_mms_lock, flags); |
803 | list_del_rcu(entry: &mm->mms_link); |
804 | spin_unlock_irqrestore(lock: &user_event_mms_lock, flags); |
805 | |
806 | /* |
807 | * We need to wait for currently occurring writes to stop within |
808 | * the mm. This is required since exit_mm() snaps the current rss |
809 | * stats and clears them. On the final mmdrop(), check_mm() will |
810 | * report a bug if these increment. |
811 | * |
812 | * All writes/pins are done under mmap_read lock, take the write |
813 | * lock to ensure in-progress faults have completed. Faults that |
814 | * are pending but yet to run will check the task count and skip |
815 | * the fault since the mm is going away. |
816 | */ |
817 | mmap_write_lock(mm: mm->mm); |
818 | mmap_write_unlock(mm: mm->mm); |
819 | |
820 | /* |
821 | * Put for mm must be done after RCU delay to handle new refs in |
822 | * between the list_del_rcu() and now. This ensures any get refs |
823 | * during rcu_read_lock() are accounted for during list removal. |
824 | * |
825 | * CPU A | CPU B |
826 | * --------------------------------------------------------------- |
827 | * user_event_mm_remove() | rcu_read_lock(); |
828 | * list_del_rcu() | list_for_each_entry_rcu(); |
829 | * call_rcu() | refcount_inc(); |
830 | * . | rcu_read_unlock(); |
831 | * schedule_work() | . |
832 | * user_event_mm_put() | . |
833 | * |
834 | * mmdrop() cannot be called in the softirq context of call_rcu() |
835 | * so we use a work queue after call_rcu() to run within. |
836 | */ |
837 | INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put); |
838 | queue_rcu_work(wq: system_wq, rwork: &mm->put_rwork); |
839 | } |
840 | |
841 | void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm) |
842 | { |
843 | struct user_event_mm *mm = user_event_mm_alloc(t); |
844 | struct user_event_enabler *enabler; |
845 | |
846 | if (!mm) |
847 | return; |
848 | |
849 | rcu_read_lock(); |
850 | |
851 | list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) { |
852 | if (!user_event_enabler_dup(orig: enabler, mm)) |
853 | goto error; |
854 | } |
855 | |
856 | rcu_read_unlock(); |
857 | |
858 | user_event_mm_attach(user_mm: mm, t); |
859 | return; |
860 | error: |
861 | rcu_read_unlock(); |
862 | user_event_mm_destroy(mm); |
863 | } |
864 | |
865 | static bool current_user_event_enabler_exists(unsigned long uaddr, |
866 | unsigned char bit) |
867 | { |
868 | struct user_event_mm *user_mm = current_user_event_mm(); |
869 | bool exists; |
870 | |
871 | if (!user_mm) |
872 | return false; |
873 | |
874 | exists = user_event_enabler_exists(mm: user_mm, uaddr, bit); |
875 | |
876 | user_event_mm_put(mm: user_mm); |
877 | |
878 | return exists; |
879 | } |
880 | |
881 | static struct user_event_enabler |
882 | *user_event_enabler_create(struct user_reg *reg, struct user_event *user, |
883 | int *write_result) |
884 | { |
885 | struct user_event_enabler *enabler; |
886 | struct user_event_mm *user_mm; |
887 | unsigned long uaddr = (unsigned long)reg->enable_addr; |
888 | int attempt = 0; |
889 | |
890 | user_mm = current_user_event_mm(); |
891 | |
892 | if (!user_mm) |
893 | return NULL; |
894 | |
895 | enabler = kzalloc(size: sizeof(*enabler), GFP_KERNEL_ACCOUNT); |
896 | |
897 | if (!enabler) |
898 | goto out; |
899 | |
900 | enabler->event = user; |
901 | enabler->addr = uaddr; |
902 | enabler->values = reg->enable_bit; |
903 | |
904 | #if BITS_PER_LONG >= 64 |
905 | if (reg->enable_size == 4) |
906 | set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler)); |
907 | #endif |
908 | |
909 | retry: |
910 | /* Prevents state changes from racing with new enablers */ |
911 | mutex_lock(&event_mutex); |
912 | |
913 | /* Attempt to reflect the current state within the process */ |
914 | mmap_read_lock(mm: user_mm->mm); |
915 | *write_result = user_event_enabler_write(mm: user_mm, enabler, fixup_fault: false, |
916 | attempt: &attempt); |
917 | mmap_read_unlock(mm: user_mm->mm); |
918 | |
919 | /* |
920 | * If the write works, then we will track the enabler. A ref to the |
921 | * underlying user_event is held by the enabler to prevent it going |
922 | * away while the enabler is still in use by a process. The ref is |
923 | * removed when the enabler is destroyed. This means a event cannot |
924 | * be forcefully deleted from the system until all tasks using it |
925 | * exit or run exec(), which includes forks and clones. |
926 | */ |
927 | if (!*write_result) { |
928 | user_event_get(user); |
929 | list_add_rcu(new: &enabler->mm_enablers_link, head: &user_mm->enablers); |
930 | } |
931 | |
932 | mutex_unlock(lock: &event_mutex); |
933 | |
934 | if (*write_result) { |
935 | /* Attempt to fault-in and retry if it worked */ |
936 | if (!user_event_mm_fault_in(mm: user_mm, uaddr, attempt)) |
937 | goto retry; |
938 | |
939 | kfree(objp: enabler); |
940 | enabler = NULL; |
941 | } |
942 | out: |
943 | user_event_mm_put(mm: user_mm); |
944 | |
945 | return enabler; |
946 | } |
947 | |
948 | static __always_inline __must_check |
949 | bool user_event_last_ref(struct user_event *user) |
950 | { |
951 | int last = 0; |
952 | |
953 | if (user->reg_flags & USER_EVENT_REG_PERSIST) |
954 | last = 1; |
955 | |
956 | return refcount_read(r: &user->refcnt) == last; |
957 | } |
958 | |
959 | static __always_inline __must_check |
960 | size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i) |
961 | { |
962 | size_t ret; |
963 | |
964 | pagefault_disable(); |
965 | |
966 | ret = copy_from_iter_nocache(addr, bytes, i); |
967 | |
968 | pagefault_enable(); |
969 | |
970 | return ret; |
971 | } |
972 | |
973 | static struct list_head *user_event_get_fields(struct trace_event_call *call) |
974 | { |
975 | struct user_event *user = (struct user_event *)call->data; |
976 | |
977 | return &user->fields; |
978 | } |
979 | |
980 | /* |
981 | * Parses a register command for user_events |
982 | * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]] |
983 | * |
984 | * Example event named 'test' with a 20 char 'msg' field with an unsigned int |
985 | * 'id' field after: |
986 | * test char[20] msg;unsigned int id |
987 | * |
988 | * NOTE: Offsets are from the user data perspective, they are not from the |
989 | * trace_entry/buffer perspective. We automatically add the common properties |
990 | * sizes to the offset for the user. |
991 | * |
992 | * Upon success user_event has its ref count increased by 1. |
993 | */ |
994 | static int user_event_parse_cmd(struct user_event_group *group, |
995 | char *raw_command, struct user_event **newuser, |
996 | int reg_flags) |
997 | { |
998 | char *name = raw_command; |
999 | char *args = strpbrk(name, " " ); |
1000 | char *flags; |
1001 | |
1002 | if (args) |
1003 | *args++ = '\0'; |
1004 | |
1005 | flags = strpbrk(name, ":" ); |
1006 | |
1007 | if (flags) |
1008 | *flags++ = '\0'; |
1009 | |
1010 | return user_event_parse(group, name, args, flags, newuser, reg_flags); |
1011 | } |
1012 | |
1013 | static int user_field_array_size(const char *type) |
1014 | { |
1015 | const char *start = strchr(type, '['); |
1016 | char val[8]; |
1017 | char *bracket; |
1018 | int size = 0; |
1019 | |
1020 | if (start == NULL) |
1021 | return -EINVAL; |
1022 | |
1023 | if (strscpy(val, start + 1, sizeof(val)) <= 0) |
1024 | return -EINVAL; |
1025 | |
1026 | bracket = strchr(val, ']'); |
1027 | |
1028 | if (!bracket) |
1029 | return -EINVAL; |
1030 | |
1031 | *bracket = '\0'; |
1032 | |
1033 | if (kstrtouint(s: val, base: 0, res: &size)) |
1034 | return -EINVAL; |
1035 | |
1036 | if (size > MAX_FIELD_ARRAY_SIZE) |
1037 | return -EINVAL; |
1038 | |
1039 | return size; |
1040 | } |
1041 | |
1042 | static int user_field_size(const char *type) |
1043 | { |
1044 | /* long is not allowed from a user, since it's ambigious in size */ |
1045 | if (strcmp(type, "s64" ) == 0) |
1046 | return sizeof(s64); |
1047 | if (strcmp(type, "u64" ) == 0) |
1048 | return sizeof(u64); |
1049 | if (strcmp(type, "s32" ) == 0) |
1050 | return sizeof(s32); |
1051 | if (strcmp(type, "u32" ) == 0) |
1052 | return sizeof(u32); |
1053 | if (strcmp(type, "int" ) == 0) |
1054 | return sizeof(int); |
1055 | if (strcmp(type, "unsigned int" ) == 0) |
1056 | return sizeof(unsigned int); |
1057 | if (strcmp(type, "s16" ) == 0) |
1058 | return sizeof(s16); |
1059 | if (strcmp(type, "u16" ) == 0) |
1060 | return sizeof(u16); |
1061 | if (strcmp(type, "short" ) == 0) |
1062 | return sizeof(short); |
1063 | if (strcmp(type, "unsigned short" ) == 0) |
1064 | return sizeof(unsigned short); |
1065 | if (strcmp(type, "s8" ) == 0) |
1066 | return sizeof(s8); |
1067 | if (strcmp(type, "u8" ) == 0) |
1068 | return sizeof(u8); |
1069 | if (strcmp(type, "char" ) == 0) |
1070 | return sizeof(char); |
1071 | if (strcmp(type, "unsigned char" ) == 0) |
1072 | return sizeof(unsigned char); |
1073 | if (str_has_prefix(str: type, prefix: "char[" )) |
1074 | return user_field_array_size(type); |
1075 | if (str_has_prefix(str: type, prefix: "unsigned char[" )) |
1076 | return user_field_array_size(type); |
1077 | if (str_has_prefix(str: type, prefix: "__data_loc " )) |
1078 | return sizeof(u32); |
1079 | if (str_has_prefix(str: type, prefix: "__rel_loc " )) |
1080 | return sizeof(u32); |
1081 | |
1082 | /* Uknown basic type, error */ |
1083 | return -EINVAL; |
1084 | } |
1085 | |
1086 | static void user_event_destroy_validators(struct user_event *user) |
1087 | { |
1088 | struct user_event_validator *validator, *next; |
1089 | struct list_head *head = &user->validators; |
1090 | |
1091 | list_for_each_entry_safe(validator, next, head, user_event_link) { |
1092 | list_del(entry: &validator->user_event_link); |
1093 | kfree(objp: validator); |
1094 | } |
1095 | } |
1096 | |
1097 | static void user_event_destroy_fields(struct user_event *user) |
1098 | { |
1099 | struct ftrace_event_field *field, *next; |
1100 | struct list_head *head = &user->fields; |
1101 | |
1102 | list_for_each_entry_safe(field, next, head, link) { |
1103 | list_del(entry: &field->link); |
1104 | kfree(objp: field); |
1105 | } |
1106 | } |
1107 | |
1108 | static int user_event_add_field(struct user_event *user, const char *type, |
1109 | const char *name, int offset, int size, |
1110 | int is_signed, int filter_type) |
1111 | { |
1112 | struct user_event_validator *validator; |
1113 | struct ftrace_event_field *field; |
1114 | int validator_flags = 0; |
1115 | |
1116 | field = kmalloc(size: sizeof(*field), GFP_KERNEL_ACCOUNT); |
1117 | |
1118 | if (!field) |
1119 | return -ENOMEM; |
1120 | |
1121 | if (str_has_prefix(str: type, prefix: "__data_loc " )) |
1122 | goto add_validator; |
1123 | |
1124 | if (str_has_prefix(str: type, prefix: "__rel_loc " )) { |
1125 | validator_flags |= VALIDATOR_REL; |
1126 | goto add_validator; |
1127 | } |
1128 | |
1129 | goto add_field; |
1130 | |
1131 | add_validator: |
1132 | if (strstr(type, "char" ) != NULL) |
1133 | validator_flags |= VALIDATOR_ENSURE_NULL; |
1134 | |
1135 | validator = kmalloc(size: sizeof(*validator), GFP_KERNEL_ACCOUNT); |
1136 | |
1137 | if (!validator) { |
1138 | kfree(objp: field); |
1139 | return -ENOMEM; |
1140 | } |
1141 | |
1142 | validator->flags = validator_flags; |
1143 | validator->offset = offset; |
1144 | |
1145 | /* Want sequential access when validating */ |
1146 | list_add_tail(new: &validator->user_event_link, head: &user->validators); |
1147 | |
1148 | add_field: |
1149 | field->type = type; |
1150 | field->name = name; |
1151 | field->offset = offset; |
1152 | field->size = size; |
1153 | field->is_signed = is_signed; |
1154 | field->filter_type = filter_type; |
1155 | |
1156 | if (filter_type == FILTER_OTHER) |
1157 | field->filter_type = filter_assign_type(type); |
1158 | |
1159 | list_add(new: &field->link, head: &user->fields); |
1160 | |
1161 | /* |
1162 | * Min size from user writes that are required, this does not include |
1163 | * the size of trace_entry (common fields). |
1164 | */ |
1165 | user->min_size = (offset + size) - sizeof(struct trace_entry); |
1166 | |
1167 | return 0; |
1168 | } |
1169 | |
1170 | /* |
1171 | * Parses the values of a field within the description |
1172 | * Format: type name [size] |
1173 | */ |
1174 | static int user_event_parse_field(char *field, struct user_event *user, |
1175 | u32 *offset) |
1176 | { |
1177 | char *part, *type, *name; |
1178 | u32 depth = 0, saved_offset = *offset; |
1179 | int len, size = -EINVAL; |
1180 | bool is_struct = false; |
1181 | |
1182 | field = skip_spaces(field); |
1183 | |
1184 | if (*field == '\0') |
1185 | return 0; |
1186 | |
1187 | /* Handle types that have a space within */ |
1188 | len = str_has_prefix(str: field, prefix: "unsigned " ); |
1189 | if (len) |
1190 | goto skip_next; |
1191 | |
1192 | len = str_has_prefix(str: field, prefix: "struct " ); |
1193 | if (len) { |
1194 | is_struct = true; |
1195 | goto skip_next; |
1196 | } |
1197 | |
1198 | len = str_has_prefix(str: field, prefix: "__data_loc unsigned " ); |
1199 | if (len) |
1200 | goto skip_next; |
1201 | |
1202 | len = str_has_prefix(str: field, prefix: "__data_loc " ); |
1203 | if (len) |
1204 | goto skip_next; |
1205 | |
1206 | len = str_has_prefix(str: field, prefix: "__rel_loc unsigned " ); |
1207 | if (len) |
1208 | goto skip_next; |
1209 | |
1210 | len = str_has_prefix(str: field, prefix: "__rel_loc " ); |
1211 | if (len) |
1212 | goto skip_next; |
1213 | |
1214 | goto parse; |
1215 | skip_next: |
1216 | type = field; |
1217 | field = strpbrk(field + len, " " ); |
1218 | |
1219 | if (field == NULL) |
1220 | return -EINVAL; |
1221 | |
1222 | *field++ = '\0'; |
1223 | depth++; |
1224 | parse: |
1225 | name = NULL; |
1226 | |
1227 | while ((part = strsep(&field, " " )) != NULL) { |
1228 | switch (depth++) { |
1229 | case FIELD_DEPTH_TYPE: |
1230 | type = part; |
1231 | break; |
1232 | case FIELD_DEPTH_NAME: |
1233 | name = part; |
1234 | break; |
1235 | case FIELD_DEPTH_SIZE: |
1236 | if (!is_struct) |
1237 | return -EINVAL; |
1238 | |
1239 | if (kstrtou32(s: part, base: 10, res: &size)) |
1240 | return -EINVAL; |
1241 | break; |
1242 | default: |
1243 | return -EINVAL; |
1244 | } |
1245 | } |
1246 | |
1247 | if (depth < FIELD_DEPTH_SIZE || !name) |
1248 | return -EINVAL; |
1249 | |
1250 | if (depth == FIELD_DEPTH_SIZE) |
1251 | size = user_field_size(type); |
1252 | |
1253 | if (size == 0) |
1254 | return -EINVAL; |
1255 | |
1256 | if (size < 0) |
1257 | return size; |
1258 | |
1259 | *offset = saved_offset + size; |
1260 | |
1261 | return user_event_add_field(user, type, name, offset: saved_offset, size, |
1262 | is_signed: type[0] != 'u', filter_type: FILTER_OTHER); |
1263 | } |
1264 | |
1265 | static int user_event_parse_fields(struct user_event *user, char *args) |
1266 | { |
1267 | char *field; |
1268 | u32 offset = sizeof(struct trace_entry); |
1269 | int ret = -EINVAL; |
1270 | |
1271 | if (args == NULL) |
1272 | return 0; |
1273 | |
1274 | while ((field = strsep(&args, ";" )) != NULL) { |
1275 | ret = user_event_parse_field(field, user, offset: &offset); |
1276 | |
1277 | if (ret) |
1278 | break; |
1279 | } |
1280 | |
1281 | return ret; |
1282 | } |
1283 | |
1284 | static struct trace_event_fields user_event_fields_array[1]; |
1285 | |
1286 | static const char *user_field_format(const char *type) |
1287 | { |
1288 | if (strcmp(type, "s64" ) == 0) |
1289 | return "%lld" ; |
1290 | if (strcmp(type, "u64" ) == 0) |
1291 | return "%llu" ; |
1292 | if (strcmp(type, "s32" ) == 0) |
1293 | return "%d" ; |
1294 | if (strcmp(type, "u32" ) == 0) |
1295 | return "%u" ; |
1296 | if (strcmp(type, "int" ) == 0) |
1297 | return "%d" ; |
1298 | if (strcmp(type, "unsigned int" ) == 0) |
1299 | return "%u" ; |
1300 | if (strcmp(type, "s16" ) == 0) |
1301 | return "%d" ; |
1302 | if (strcmp(type, "u16" ) == 0) |
1303 | return "%u" ; |
1304 | if (strcmp(type, "short" ) == 0) |
1305 | return "%d" ; |
1306 | if (strcmp(type, "unsigned short" ) == 0) |
1307 | return "%u" ; |
1308 | if (strcmp(type, "s8" ) == 0) |
1309 | return "%d" ; |
1310 | if (strcmp(type, "u8" ) == 0) |
1311 | return "%u" ; |
1312 | if (strcmp(type, "char" ) == 0) |
1313 | return "%d" ; |
1314 | if (strcmp(type, "unsigned char" ) == 0) |
1315 | return "%u" ; |
1316 | if (strstr(type, "char[" ) != NULL) |
1317 | return "%s" ; |
1318 | |
1319 | /* Unknown, likely struct, allowed treat as 64-bit */ |
1320 | return "%llu" ; |
1321 | } |
1322 | |
1323 | static bool user_field_is_dyn_string(const char *type, const char **str_func) |
1324 | { |
1325 | if (str_has_prefix(str: type, prefix: "__data_loc " )) { |
1326 | *str_func = "__get_str" ; |
1327 | goto check; |
1328 | } |
1329 | |
1330 | if (str_has_prefix(str: type, prefix: "__rel_loc " )) { |
1331 | *str_func = "__get_rel_str" ; |
1332 | goto check; |
1333 | } |
1334 | |
1335 | return false; |
1336 | check: |
1337 | return strstr(type, "char" ) != NULL; |
1338 | } |
1339 | |
1340 | #define LEN_OR_ZERO (len ? len - pos : 0) |
1341 | static int user_dyn_field_set_string(int argc, const char **argv, int *iout, |
1342 | char *buf, int len, bool *colon) |
1343 | { |
1344 | int pos = 0, i = *iout; |
1345 | |
1346 | *colon = false; |
1347 | |
1348 | for (; i < argc; ++i) { |
1349 | if (i != *iout) |
1350 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: " " ); |
1351 | |
1352 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: "%s" , argv[i]); |
1353 | |
1354 | if (strchr(argv[i], ';')) { |
1355 | ++i; |
1356 | *colon = true; |
1357 | break; |
1358 | } |
1359 | } |
1360 | |
1361 | /* Actual set, advance i */ |
1362 | if (len != 0) |
1363 | *iout = i; |
1364 | |
1365 | return pos + 1; |
1366 | } |
1367 | |
1368 | static int user_field_set_string(struct ftrace_event_field *field, |
1369 | char *buf, int len, bool colon) |
1370 | { |
1371 | int pos = 0; |
1372 | |
1373 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: "%s" , field->type); |
1374 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: " " ); |
1375 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: "%s" , field->name); |
1376 | |
1377 | if (str_has_prefix(str: field->type, prefix: "struct " )) |
1378 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: " %d" , field->size); |
1379 | |
1380 | if (colon) |
1381 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: ";" ); |
1382 | |
1383 | return pos + 1; |
1384 | } |
1385 | |
1386 | static int user_event_set_print_fmt(struct user_event *user, char *buf, int len) |
1387 | { |
1388 | struct ftrace_event_field *field; |
1389 | struct list_head *head = &user->fields; |
1390 | int pos = 0, depth = 0; |
1391 | const char *str_func; |
1392 | |
1393 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: "\"" ); |
1394 | |
1395 | list_for_each_entry_reverse(field, head, link) { |
1396 | if (depth != 0) |
1397 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: " " ); |
1398 | |
1399 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: "%s=%s" , |
1400 | field->name, user_field_format(type: field->type)); |
1401 | |
1402 | depth++; |
1403 | } |
1404 | |
1405 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, fmt: "\"" ); |
1406 | |
1407 | list_for_each_entry_reverse(field, head, link) { |
1408 | if (user_field_is_dyn_string(type: field->type, str_func: &str_func)) |
1409 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, |
1410 | fmt: ", %s(%s)" , str_func, field->name); |
1411 | else |
1412 | pos += snprintf(buf: buf + pos, LEN_OR_ZERO, |
1413 | fmt: ", REC->%s" , field->name); |
1414 | } |
1415 | |
1416 | return pos + 1; |
1417 | } |
1418 | #undef LEN_OR_ZERO |
1419 | |
1420 | static int user_event_create_print_fmt(struct user_event *user) |
1421 | { |
1422 | char *print_fmt; |
1423 | int len; |
1424 | |
1425 | len = user_event_set_print_fmt(user, NULL, len: 0); |
1426 | |
1427 | print_fmt = kmalloc(size: len, GFP_KERNEL_ACCOUNT); |
1428 | |
1429 | if (!print_fmt) |
1430 | return -ENOMEM; |
1431 | |
1432 | user_event_set_print_fmt(user, buf: print_fmt, len); |
1433 | |
1434 | user->call.print_fmt = print_fmt; |
1435 | |
1436 | return 0; |
1437 | } |
1438 | |
1439 | static enum print_line_t user_event_print_trace(struct trace_iterator *iter, |
1440 | int flags, |
1441 | struct trace_event *event) |
1442 | { |
1443 | return print_event_fields(iter, event); |
1444 | } |
1445 | |
1446 | static struct trace_event_functions user_event_funcs = { |
1447 | .trace = user_event_print_trace, |
1448 | }; |
1449 | |
1450 | static int user_event_set_call_visible(struct user_event *user, bool visible) |
1451 | { |
1452 | int ret; |
1453 | const struct cred *old_cred; |
1454 | struct cred *cred; |
1455 | |
1456 | cred = prepare_creds(); |
1457 | |
1458 | if (!cred) |
1459 | return -ENOMEM; |
1460 | |
1461 | /* |
1462 | * While by default tracefs is locked down, systems can be configured |
1463 | * to allow user_event files to be less locked down. The extreme case |
1464 | * being "other" has read/write access to user_events_data/status. |
1465 | * |
1466 | * When not locked down, processes may not have permissions to |
1467 | * add/remove calls themselves to tracefs. We need to temporarily |
1468 | * switch to root file permission to allow for this scenario. |
1469 | */ |
1470 | cred->fsuid = GLOBAL_ROOT_UID; |
1471 | |
1472 | old_cred = override_creds(cred); |
1473 | |
1474 | if (visible) |
1475 | ret = trace_add_event_call(&user->call); |
1476 | else |
1477 | ret = trace_remove_event_call(&user->call); |
1478 | |
1479 | revert_creds(old_cred); |
1480 | put_cred(cred); |
1481 | |
1482 | return ret; |
1483 | } |
1484 | |
1485 | static int destroy_user_event(struct user_event *user) |
1486 | { |
1487 | int ret = 0; |
1488 | |
1489 | lockdep_assert_held(&event_mutex); |
1490 | |
1491 | /* Must destroy fields before call removal */ |
1492 | user_event_destroy_fields(user); |
1493 | |
1494 | ret = user_event_set_call_visible(user, visible: false); |
1495 | |
1496 | if (ret) |
1497 | return ret; |
1498 | |
1499 | dyn_event_remove(ev: &user->devent); |
1500 | hash_del(node: &user->node); |
1501 | |
1502 | user_event_destroy_validators(user); |
1503 | |
1504 | /* If we have different names, both must be freed */ |
1505 | if (EVENT_NAME(user) != EVENT_TP_NAME(user)) |
1506 | kfree(EVENT_TP_NAME(user)); |
1507 | |
1508 | kfree(objp: user->call.print_fmt); |
1509 | kfree(EVENT_NAME(user)); |
1510 | kfree(objp: user); |
1511 | |
1512 | if (current_user_events > 0) |
1513 | current_user_events--; |
1514 | else |
1515 | pr_alert("BUG: Bad current_user_events\n" ); |
1516 | |
1517 | return ret; |
1518 | } |
1519 | |
1520 | static struct user_event *find_user_event(struct user_event_group *group, |
1521 | char *name, int argc, const char **argv, |
1522 | u32 flags, u32 *outkey) |
1523 | { |
1524 | struct user_event *user; |
1525 | u32 key = user_event_key(name); |
1526 | |
1527 | *outkey = key; |
1528 | |
1529 | hash_for_each_possible(group->register_table, user, node, key) { |
1530 | /* |
1531 | * Single-format events shouldn't return multi-format |
1532 | * events. Callers expect the underlying tracepoint to match |
1533 | * the name exactly in these cases. Only check like-formats. |
1534 | */ |
1535 | if (EVENT_MULTI_FORMAT(flags) != EVENT_MULTI_FORMAT(user->reg_flags)) |
1536 | continue; |
1537 | |
1538 | if (strcmp(EVENT_NAME(user), name)) |
1539 | continue; |
1540 | |
1541 | if (user_fields_match(user, argc, argv)) |
1542 | return user_event_get(user); |
1543 | |
1544 | /* Scan others if this is a multi-format event */ |
1545 | if (EVENT_MULTI_FORMAT(flags)) |
1546 | continue; |
1547 | |
1548 | return ERR_PTR(error: -EADDRINUSE); |
1549 | } |
1550 | |
1551 | return NULL; |
1552 | } |
1553 | |
1554 | static int user_event_validate(struct user_event *user, void *data, int len) |
1555 | { |
1556 | struct list_head *head = &user->validators; |
1557 | struct user_event_validator *validator; |
1558 | void *pos, *end = data + len; |
1559 | u32 loc, offset, size; |
1560 | |
1561 | list_for_each_entry(validator, head, user_event_link) { |
1562 | pos = data + validator->offset; |
1563 | |
1564 | /* Already done min_size check, no bounds check here */ |
1565 | loc = *(u32 *)pos; |
1566 | offset = loc & 0xffff; |
1567 | size = loc >> 16; |
1568 | |
1569 | if (likely(validator->flags & VALIDATOR_REL)) |
1570 | pos += offset + sizeof(loc); |
1571 | else |
1572 | pos = data + offset; |
1573 | |
1574 | pos += size; |
1575 | |
1576 | if (unlikely(pos > end)) |
1577 | return -EFAULT; |
1578 | |
1579 | if (likely(validator->flags & VALIDATOR_ENSURE_NULL)) |
1580 | if (unlikely(*(char *)(pos - 1) != '\0')) |
1581 | return -EFAULT; |
1582 | } |
1583 | |
1584 | return 0; |
1585 | } |
1586 | |
1587 | /* |
1588 | * Writes the user supplied payload out to a trace file. |
1589 | */ |
1590 | static void user_event_ftrace(struct user_event *user, struct iov_iter *i, |
1591 | void *tpdata, bool *faulted) |
1592 | { |
1593 | struct trace_event_file *file; |
1594 | struct trace_entry *entry; |
1595 | struct trace_event_buffer event_buffer; |
1596 | size_t size = sizeof(*entry) + i->count; |
1597 | |
1598 | file = (struct trace_event_file *)tpdata; |
1599 | |
1600 | if (!file || |
1601 | !(file->flags & EVENT_FILE_FL_ENABLED) || |
1602 | trace_trigger_soft_disabled(file)) |
1603 | return; |
1604 | |
1605 | /* Allocates and fills trace_entry, + 1 of this is data payload */ |
1606 | entry = trace_event_buffer_reserve(fbuffer: &event_buffer, trace_file: file, len: size); |
1607 | |
1608 | if (unlikely(!entry)) |
1609 | return; |
1610 | |
1611 | if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i))) |
1612 | goto discard; |
1613 | |
1614 | if (!list_empty(head: &user->validators) && |
1615 | unlikely(user_event_validate(user, entry, size))) |
1616 | goto discard; |
1617 | |
1618 | trace_event_buffer_commit(fbuffer: &event_buffer); |
1619 | |
1620 | return; |
1621 | discard: |
1622 | *faulted = true; |
1623 | __trace_event_discard_commit(buffer: event_buffer.buffer, |
1624 | event: event_buffer.event); |
1625 | } |
1626 | |
1627 | #ifdef CONFIG_PERF_EVENTS |
1628 | /* |
1629 | * Writes the user supplied payload out to perf ring buffer. |
1630 | */ |
1631 | static void user_event_perf(struct user_event *user, struct iov_iter *i, |
1632 | void *tpdata, bool *faulted) |
1633 | { |
1634 | struct hlist_head *perf_head; |
1635 | |
1636 | perf_head = this_cpu_ptr(user->call.perf_events); |
1637 | |
1638 | if (perf_head && !hlist_empty(h: perf_head)) { |
1639 | struct trace_entry *perf_entry; |
1640 | struct pt_regs *regs; |
1641 | size_t size = sizeof(*perf_entry) + i->count; |
1642 | int context; |
1643 | |
1644 | perf_entry = perf_trace_buf_alloc(ALIGN(size, 8), |
1645 | regs: ®s, rctxp: &context); |
1646 | |
1647 | if (unlikely(!perf_entry)) |
1648 | return; |
1649 | |
1650 | perf_fetch_caller_regs(regs); |
1651 | |
1652 | if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i))) |
1653 | goto discard; |
1654 | |
1655 | if (!list_empty(head: &user->validators) && |
1656 | unlikely(user_event_validate(user, perf_entry, size))) |
1657 | goto discard; |
1658 | |
1659 | perf_trace_buf_submit(raw_data: perf_entry, size, rctx: context, |
1660 | type: user->call.event.type, count: 1, regs, |
1661 | head: perf_head, NULL); |
1662 | |
1663 | return; |
1664 | discard: |
1665 | *faulted = true; |
1666 | perf_swevent_put_recursion_context(rctx: context); |
1667 | } |
1668 | } |
1669 | #endif |
1670 | |
1671 | /* |
1672 | * Update the enabled bit among all user processes. |
1673 | */ |
1674 | static void update_enable_bit_for(struct user_event *user) |
1675 | { |
1676 | struct tracepoint *tp = &user->tracepoint; |
1677 | char status = 0; |
1678 | |
1679 | if (atomic_read(v: &tp->key.enabled) > 0) { |
1680 | struct tracepoint_func *probe_func_ptr; |
1681 | user_event_func_t probe_func; |
1682 | |
1683 | rcu_read_lock_sched(); |
1684 | |
1685 | probe_func_ptr = rcu_dereference_sched(tp->funcs); |
1686 | |
1687 | if (probe_func_ptr) { |
1688 | do { |
1689 | probe_func = probe_func_ptr->func; |
1690 | |
1691 | if (probe_func == user_event_ftrace) |
1692 | status |= EVENT_STATUS_FTRACE; |
1693 | #ifdef CONFIG_PERF_EVENTS |
1694 | else if (probe_func == user_event_perf) |
1695 | status |= EVENT_STATUS_PERF; |
1696 | #endif |
1697 | else |
1698 | status |= EVENT_STATUS_OTHER; |
1699 | } while ((++probe_func_ptr)->func); |
1700 | } |
1701 | |
1702 | rcu_read_unlock_sched(); |
1703 | } |
1704 | |
1705 | user->status = status; |
1706 | |
1707 | user_event_enabler_update(user); |
1708 | } |
1709 | |
1710 | /* |
1711 | * Register callback for our events from tracing sub-systems. |
1712 | */ |
1713 | static int user_event_reg(struct trace_event_call *call, |
1714 | enum trace_reg type, |
1715 | void *data) |
1716 | { |
1717 | struct user_event *user = (struct user_event *)call->data; |
1718 | int ret = 0; |
1719 | |
1720 | if (!user) |
1721 | return -ENOENT; |
1722 | |
1723 | switch (type) { |
1724 | case TRACE_REG_REGISTER: |
1725 | ret = tracepoint_probe_register(tp: call->tp, |
1726 | probe: call->class->probe, |
1727 | data); |
1728 | if (!ret) |
1729 | goto inc; |
1730 | break; |
1731 | |
1732 | case TRACE_REG_UNREGISTER: |
1733 | tracepoint_probe_unregister(tp: call->tp, |
1734 | probe: call->class->probe, |
1735 | data); |
1736 | goto dec; |
1737 | |
1738 | #ifdef CONFIG_PERF_EVENTS |
1739 | case TRACE_REG_PERF_REGISTER: |
1740 | ret = tracepoint_probe_register(tp: call->tp, |
1741 | probe: call->class->perf_probe, |
1742 | data); |
1743 | if (!ret) |
1744 | goto inc; |
1745 | break; |
1746 | |
1747 | case TRACE_REG_PERF_UNREGISTER: |
1748 | tracepoint_probe_unregister(tp: call->tp, |
1749 | probe: call->class->perf_probe, |
1750 | data); |
1751 | goto dec; |
1752 | |
1753 | case TRACE_REG_PERF_OPEN: |
1754 | case TRACE_REG_PERF_CLOSE: |
1755 | case TRACE_REG_PERF_ADD: |
1756 | case TRACE_REG_PERF_DEL: |
1757 | break; |
1758 | #endif |
1759 | } |
1760 | |
1761 | return ret; |
1762 | inc: |
1763 | user_event_get(user); |
1764 | update_enable_bit_for(user); |
1765 | return 0; |
1766 | dec: |
1767 | update_enable_bit_for(user); |
1768 | user_event_put(user, locked: true); |
1769 | return 0; |
1770 | } |
1771 | |
1772 | static int user_event_create(const char *raw_command) |
1773 | { |
1774 | struct user_event_group *group; |
1775 | struct user_event *user; |
1776 | char *name; |
1777 | int ret; |
1778 | |
1779 | if (!str_has_prefix(str: raw_command, USER_EVENTS_PREFIX)) |
1780 | return -ECANCELED; |
1781 | |
1782 | raw_command += USER_EVENTS_PREFIX_LEN; |
1783 | raw_command = skip_spaces(raw_command); |
1784 | |
1785 | name = kstrdup(s: raw_command, GFP_KERNEL_ACCOUNT); |
1786 | |
1787 | if (!name) |
1788 | return -ENOMEM; |
1789 | |
1790 | group = current_user_event_group(); |
1791 | |
1792 | if (!group) { |
1793 | kfree(objp: name); |
1794 | return -ENOENT; |
1795 | } |
1796 | |
1797 | mutex_lock(&group->reg_mutex); |
1798 | |
1799 | /* Dyn events persist, otherwise they would cleanup immediately */ |
1800 | ret = user_event_parse_cmd(group, raw_command: name, newuser: &user, reg_flags: USER_EVENT_REG_PERSIST); |
1801 | |
1802 | if (!ret) |
1803 | user_event_put(user, locked: false); |
1804 | |
1805 | mutex_unlock(lock: &group->reg_mutex); |
1806 | |
1807 | if (ret) |
1808 | kfree(objp: name); |
1809 | |
1810 | return ret; |
1811 | } |
1812 | |
1813 | static int user_event_show(struct seq_file *m, struct dyn_event *ev) |
1814 | { |
1815 | struct user_event *user = container_of(ev, struct user_event, devent); |
1816 | struct ftrace_event_field *field; |
1817 | struct list_head *head; |
1818 | int depth = 0; |
1819 | |
1820 | seq_printf(m, fmt: "%s%s" , USER_EVENTS_PREFIX, EVENT_NAME(user)); |
1821 | |
1822 | head = trace_get_fields(event_call: &user->call); |
1823 | |
1824 | list_for_each_entry_reverse(field, head, link) { |
1825 | if (depth == 0) |
1826 | seq_puts(m, s: " " ); |
1827 | else |
1828 | seq_puts(m, s: "; " ); |
1829 | |
1830 | seq_printf(m, fmt: "%s %s" , field->type, field->name); |
1831 | |
1832 | if (str_has_prefix(str: field->type, prefix: "struct " )) |
1833 | seq_printf(m, fmt: " %d" , field->size); |
1834 | |
1835 | depth++; |
1836 | } |
1837 | |
1838 | seq_puts(m, s: "\n" ); |
1839 | |
1840 | return 0; |
1841 | } |
1842 | |
1843 | static bool user_event_is_busy(struct dyn_event *ev) |
1844 | { |
1845 | struct user_event *user = container_of(ev, struct user_event, devent); |
1846 | |
1847 | return !user_event_last_ref(user); |
1848 | } |
1849 | |
1850 | static int user_event_free(struct dyn_event *ev) |
1851 | { |
1852 | struct user_event *user = container_of(ev, struct user_event, devent); |
1853 | |
1854 | if (!user_event_last_ref(user)) |
1855 | return -EBUSY; |
1856 | |
1857 | if (!user_event_capable(reg_flags: user->reg_flags)) |
1858 | return -EPERM; |
1859 | |
1860 | return destroy_user_event(user); |
1861 | } |
1862 | |
1863 | static bool user_field_match(struct ftrace_event_field *field, int argc, |
1864 | const char **argv, int *iout) |
1865 | { |
1866 | char *field_name = NULL, *dyn_field_name = NULL; |
1867 | bool colon = false, match = false; |
1868 | int dyn_len, len; |
1869 | |
1870 | if (*iout >= argc) |
1871 | return false; |
1872 | |
1873 | dyn_len = user_dyn_field_set_string(argc, argv, iout, buf: dyn_field_name, |
1874 | len: 0, colon: &colon); |
1875 | |
1876 | len = user_field_set_string(field, buf: field_name, len: 0, colon); |
1877 | |
1878 | if (dyn_len != len) |
1879 | return false; |
1880 | |
1881 | dyn_field_name = kmalloc(size: dyn_len, GFP_KERNEL); |
1882 | field_name = kmalloc(size: len, GFP_KERNEL); |
1883 | |
1884 | if (!dyn_field_name || !field_name) |
1885 | goto out; |
1886 | |
1887 | user_dyn_field_set_string(argc, argv, iout, buf: dyn_field_name, |
1888 | len: dyn_len, colon: &colon); |
1889 | |
1890 | user_field_set_string(field, buf: field_name, len, colon); |
1891 | |
1892 | match = strcmp(dyn_field_name, field_name) == 0; |
1893 | out: |
1894 | kfree(objp: dyn_field_name); |
1895 | kfree(objp: field_name); |
1896 | |
1897 | return match; |
1898 | } |
1899 | |
1900 | static bool user_fields_match(struct user_event *user, int argc, |
1901 | const char **argv) |
1902 | { |
1903 | struct ftrace_event_field *field; |
1904 | struct list_head *head = &user->fields; |
1905 | int i = 0; |
1906 | |
1907 | if (argc == 0) |
1908 | return list_empty(head); |
1909 | |
1910 | list_for_each_entry_reverse(field, head, link) { |
1911 | if (!user_field_match(field, argc, argv, iout: &i)) |
1912 | return false; |
1913 | } |
1914 | |
1915 | if (i != argc) |
1916 | return false; |
1917 | |
1918 | return true; |
1919 | } |
1920 | |
1921 | static bool user_event_match(const char *system, const char *event, |
1922 | int argc, const char **argv, struct dyn_event *ev) |
1923 | { |
1924 | struct user_event *user = container_of(ev, struct user_event, devent); |
1925 | bool match; |
1926 | |
1927 | match = strcmp(EVENT_NAME(user), event) == 0; |
1928 | |
1929 | if (match && system) { |
1930 | match = strcmp(system, user->group->system_name) == 0 || |
1931 | strcmp(system, user->group->system_multi_name) == 0; |
1932 | } |
1933 | |
1934 | if (match) |
1935 | match = user_fields_match(user, argc, argv); |
1936 | |
1937 | return match; |
1938 | } |
1939 | |
1940 | static struct dyn_event_operations user_event_dops = { |
1941 | .create = user_event_create, |
1942 | .show = user_event_show, |
1943 | .is_busy = user_event_is_busy, |
1944 | .free = user_event_free, |
1945 | .match = user_event_match, |
1946 | }; |
1947 | |
1948 | static int user_event_trace_register(struct user_event *user) |
1949 | { |
1950 | int ret; |
1951 | |
1952 | ret = register_trace_event(event: &user->call.event); |
1953 | |
1954 | if (!ret) |
1955 | return -ENODEV; |
1956 | |
1957 | ret = user_event_set_call_visible(user, visible: true); |
1958 | |
1959 | if (ret) |
1960 | unregister_trace_event(event: &user->call.event); |
1961 | |
1962 | return ret; |
1963 | } |
1964 | |
1965 | static int user_event_set_tp_name(struct user_event *user) |
1966 | { |
1967 | lockdep_assert_held(&user->group->reg_mutex); |
1968 | |
1969 | if (EVENT_MULTI_FORMAT(user->reg_flags)) { |
1970 | char *multi_name; |
1971 | |
1972 | multi_name = kasprintf(GFP_KERNEL_ACCOUNT, fmt: "%s.%llx" , |
1973 | user->reg_name, user->group->multi_id); |
1974 | |
1975 | if (!multi_name) |
1976 | return -ENOMEM; |
1977 | |
1978 | user->call.name = multi_name; |
1979 | user->tracepoint.name = multi_name; |
1980 | |
1981 | /* Inc to ensure unique multi-event name next time */ |
1982 | user->group->multi_id++; |
1983 | } else { |
1984 | /* Non Multi-format uses register name */ |
1985 | user->call.name = user->reg_name; |
1986 | user->tracepoint.name = user->reg_name; |
1987 | } |
1988 | |
1989 | return 0; |
1990 | } |
1991 | |
1992 | /* |
1993 | * Parses the event name, arguments and flags then registers if successful. |
1994 | * The name buffer lifetime is owned by this method for success cases only. |
1995 | * Upon success the returned user_event has its ref count increased by 1. |
1996 | */ |
1997 | static int user_event_parse(struct user_event_group *group, char *name, |
1998 | char *args, char *flags, |
1999 | struct user_event **newuser, int reg_flags) |
2000 | { |
2001 | struct user_event *user; |
2002 | char **argv = NULL; |
2003 | int argc = 0; |
2004 | int ret; |
2005 | u32 key; |
2006 | |
2007 | /* Currently don't support any text based flags */ |
2008 | if (flags != NULL) |
2009 | return -EINVAL; |
2010 | |
2011 | if (!user_event_capable(reg_flags)) |
2012 | return -EPERM; |
2013 | |
2014 | if (args) { |
2015 | argv = argv_split(GFP_KERNEL, str: args, argcp: &argc); |
2016 | |
2017 | if (!argv) |
2018 | return -ENOMEM; |
2019 | } |
2020 | |
2021 | /* Prevent dyn_event from racing */ |
2022 | mutex_lock(&event_mutex); |
2023 | user = find_user_event(group, name, argc, argv: (const char **)argv, |
2024 | flags: reg_flags, outkey: &key); |
2025 | mutex_unlock(lock: &event_mutex); |
2026 | |
2027 | if (argv) |
2028 | argv_free(argv); |
2029 | |
2030 | if (IS_ERR(ptr: user)) |
2031 | return PTR_ERR(ptr: user); |
2032 | |
2033 | if (user) { |
2034 | *newuser = user; |
2035 | /* |
2036 | * Name is allocated by caller, free it since it already exists. |
2037 | * Caller only worries about failure cases for freeing. |
2038 | */ |
2039 | kfree(objp: name); |
2040 | |
2041 | return 0; |
2042 | } |
2043 | |
2044 | user = kzalloc(size: sizeof(*user), GFP_KERNEL_ACCOUNT); |
2045 | |
2046 | if (!user) |
2047 | return -ENOMEM; |
2048 | |
2049 | INIT_LIST_HEAD(list: &user->class.fields); |
2050 | INIT_LIST_HEAD(list: &user->fields); |
2051 | INIT_LIST_HEAD(list: &user->validators); |
2052 | |
2053 | user->group = group; |
2054 | user->reg_name = name; |
2055 | user->reg_flags = reg_flags; |
2056 | |
2057 | ret = user_event_set_tp_name(user); |
2058 | |
2059 | if (ret) |
2060 | goto put_user; |
2061 | |
2062 | ret = user_event_parse_fields(user, args); |
2063 | |
2064 | if (ret) |
2065 | goto put_user; |
2066 | |
2067 | ret = user_event_create_print_fmt(user); |
2068 | |
2069 | if (ret) |
2070 | goto put_user; |
2071 | |
2072 | user->call.data = user; |
2073 | user->call.class = &user->class; |
2074 | user->call.flags = TRACE_EVENT_FL_TRACEPOINT; |
2075 | user->call.tp = &user->tracepoint; |
2076 | user->call.event.funcs = &user_event_funcs; |
2077 | |
2078 | if (EVENT_MULTI_FORMAT(user->reg_flags)) |
2079 | user->class.system = group->system_multi_name; |
2080 | else |
2081 | user->class.system = group->system_name; |
2082 | |
2083 | user->class.fields_array = user_event_fields_array; |
2084 | user->class.get_fields = user_event_get_fields; |
2085 | user->class.reg = user_event_reg; |
2086 | user->class.probe = user_event_ftrace; |
2087 | #ifdef CONFIG_PERF_EVENTS |
2088 | user->class.perf_probe = user_event_perf; |
2089 | #endif |
2090 | |
2091 | mutex_lock(&event_mutex); |
2092 | |
2093 | if (current_user_events >= max_user_events) { |
2094 | ret = -EMFILE; |
2095 | goto put_user_lock; |
2096 | } |
2097 | |
2098 | ret = user_event_trace_register(user); |
2099 | |
2100 | if (ret) |
2101 | goto put_user_lock; |
2102 | |
2103 | if (user->reg_flags & USER_EVENT_REG_PERSIST) { |
2104 | /* Ensure we track self ref and caller ref (2) */ |
2105 | refcount_set(r: &user->refcnt, n: 2); |
2106 | } else { |
2107 | /* Ensure we track only caller ref (1) */ |
2108 | refcount_set(r: &user->refcnt, n: 1); |
2109 | } |
2110 | |
2111 | dyn_event_init(ev: &user->devent, ops: &user_event_dops); |
2112 | dyn_event_add(ev: &user->devent, call: &user->call); |
2113 | hash_add(group->register_table, &user->node, key); |
2114 | current_user_events++; |
2115 | |
2116 | mutex_unlock(lock: &event_mutex); |
2117 | |
2118 | *newuser = user; |
2119 | return 0; |
2120 | put_user_lock: |
2121 | mutex_unlock(lock: &event_mutex); |
2122 | put_user: |
2123 | user_event_destroy_fields(user); |
2124 | user_event_destroy_validators(user); |
2125 | kfree(objp: user->call.print_fmt); |
2126 | |
2127 | /* Caller frees reg_name on error, but not multi-name */ |
2128 | if (EVENT_NAME(user) != EVENT_TP_NAME(user)) |
2129 | kfree(EVENT_TP_NAME(user)); |
2130 | |
2131 | kfree(objp: user); |
2132 | return ret; |
2133 | } |
2134 | |
2135 | /* |
2136 | * Deletes previously created events if they are no longer being used. |
2137 | */ |
2138 | static int delete_user_event(struct user_event_group *group, char *name) |
2139 | { |
2140 | struct user_event *user; |
2141 | struct hlist_node *tmp; |
2142 | u32 key = user_event_key(name); |
2143 | int ret = -ENOENT; |
2144 | |
2145 | /* Attempt to delete all event(s) with the name passed in */ |
2146 | hash_for_each_possible_safe(group->register_table, user, tmp, node, key) { |
2147 | if (strcmp(EVENT_NAME(user), name)) |
2148 | continue; |
2149 | |
2150 | if (!user_event_last_ref(user)) |
2151 | return -EBUSY; |
2152 | |
2153 | if (!user_event_capable(reg_flags: user->reg_flags)) |
2154 | return -EPERM; |
2155 | |
2156 | ret = destroy_user_event(user); |
2157 | |
2158 | if (ret) |
2159 | goto out; |
2160 | } |
2161 | out: |
2162 | return ret; |
2163 | } |
2164 | |
2165 | /* |
2166 | * Validates the user payload and writes via iterator. |
2167 | */ |
2168 | static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) |
2169 | { |
2170 | struct user_event_file_info *info = file->private_data; |
2171 | struct user_event_refs *refs; |
2172 | struct user_event *user = NULL; |
2173 | struct tracepoint *tp; |
2174 | ssize_t ret = i->count; |
2175 | int idx; |
2176 | |
2177 | if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx))) |
2178 | return -EFAULT; |
2179 | |
2180 | if (idx < 0) |
2181 | return -EINVAL; |
2182 | |
2183 | rcu_read_lock_sched(); |
2184 | |
2185 | refs = rcu_dereference_sched(info->refs); |
2186 | |
2187 | /* |
2188 | * The refs->events array is protected by RCU, and new items may be |
2189 | * added. But the user retrieved from indexing into the events array |
2190 | * shall be immutable while the file is opened. |
2191 | */ |
2192 | if (likely(refs && idx < refs->count)) |
2193 | user = refs->events[idx]; |
2194 | |
2195 | rcu_read_unlock_sched(); |
2196 | |
2197 | if (unlikely(user == NULL)) |
2198 | return -ENOENT; |
2199 | |
2200 | if (unlikely(i->count < user->min_size)) |
2201 | return -EINVAL; |
2202 | |
2203 | tp = &user->tracepoint; |
2204 | |
2205 | /* |
2206 | * It's possible key.enabled disables after this check, however |
2207 | * we don't mind if a few events are included in this condition. |
2208 | */ |
2209 | if (likely(atomic_read(&tp->key.enabled) > 0)) { |
2210 | struct tracepoint_func *probe_func_ptr; |
2211 | user_event_func_t probe_func; |
2212 | struct iov_iter copy; |
2213 | void *tpdata; |
2214 | bool faulted; |
2215 | |
2216 | if (unlikely(fault_in_iov_iter_readable(i, i->count))) |
2217 | return -EFAULT; |
2218 | |
2219 | faulted = false; |
2220 | |
2221 | rcu_read_lock_sched(); |
2222 | |
2223 | probe_func_ptr = rcu_dereference_sched(tp->funcs); |
2224 | |
2225 | if (probe_func_ptr) { |
2226 | do { |
2227 | copy = *i; |
2228 | probe_func = probe_func_ptr->func; |
2229 | tpdata = probe_func_ptr->data; |
2230 | probe_func(user, ©, tpdata, &faulted); |
2231 | } while ((++probe_func_ptr)->func); |
2232 | } |
2233 | |
2234 | rcu_read_unlock_sched(); |
2235 | |
2236 | if (unlikely(faulted)) |
2237 | return -EFAULT; |
2238 | } else |
2239 | return -EBADF; |
2240 | |
2241 | return ret; |
2242 | } |
2243 | |
2244 | static int user_events_open(struct inode *node, struct file *file) |
2245 | { |
2246 | struct user_event_group *group; |
2247 | struct user_event_file_info *info; |
2248 | |
2249 | group = current_user_event_group(); |
2250 | |
2251 | if (!group) |
2252 | return -ENOENT; |
2253 | |
2254 | info = kzalloc(size: sizeof(*info), GFP_KERNEL_ACCOUNT); |
2255 | |
2256 | if (!info) |
2257 | return -ENOMEM; |
2258 | |
2259 | info->group = group; |
2260 | |
2261 | file->private_data = info; |
2262 | |
2263 | return 0; |
2264 | } |
2265 | |
2266 | static ssize_t user_events_write(struct file *file, const char __user *ubuf, |
2267 | size_t count, loff_t *ppos) |
2268 | { |
2269 | struct iov_iter i; |
2270 | |
2271 | if (unlikely(*ppos != 0)) |
2272 | return -EFAULT; |
2273 | |
2274 | if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i))) |
2275 | return -EFAULT; |
2276 | |
2277 | return user_events_write_core(file, i: &i); |
2278 | } |
2279 | |
2280 | static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i) |
2281 | { |
2282 | return user_events_write_core(file: kp->ki_filp, i); |
2283 | } |
2284 | |
2285 | static int user_events_ref_add(struct user_event_file_info *info, |
2286 | struct user_event *user) |
2287 | { |
2288 | struct user_event_group *group = info->group; |
2289 | struct user_event_refs *refs, *new_refs; |
2290 | int i, size, count = 0; |
2291 | |
2292 | refs = rcu_dereference_protected(info->refs, |
2293 | lockdep_is_held(&group->reg_mutex)); |
2294 | |
2295 | if (refs) { |
2296 | count = refs->count; |
2297 | |
2298 | for (i = 0; i < count; ++i) |
2299 | if (refs->events[i] == user) |
2300 | return i; |
2301 | } |
2302 | |
2303 | size = struct_size(refs, events, count + 1); |
2304 | |
2305 | new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT); |
2306 | |
2307 | if (!new_refs) |
2308 | return -ENOMEM; |
2309 | |
2310 | new_refs->count = count + 1; |
2311 | |
2312 | for (i = 0; i < count; ++i) |
2313 | new_refs->events[i] = refs->events[i]; |
2314 | |
2315 | new_refs->events[i] = user_event_get(user); |
2316 | |
2317 | rcu_assign_pointer(info->refs, new_refs); |
2318 | |
2319 | if (refs) |
2320 | kfree_rcu(refs, rcu); |
2321 | |
2322 | return i; |
2323 | } |
2324 | |
2325 | static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg) |
2326 | { |
2327 | u32 size; |
2328 | long ret; |
2329 | |
2330 | ret = get_user(size, &ureg->size); |
2331 | |
2332 | if (ret) |
2333 | return ret; |
2334 | |
2335 | if (size > PAGE_SIZE) |
2336 | return -E2BIG; |
2337 | |
2338 | if (size < offsetofend(struct user_reg, write_index)) |
2339 | return -EINVAL; |
2340 | |
2341 | ret = copy_struct_from_user(dst: kreg, ksize: sizeof(*kreg), src: ureg, usize: size); |
2342 | |
2343 | if (ret) |
2344 | return ret; |
2345 | |
2346 | /* Ensure only valid flags */ |
2347 | if (kreg->flags & ~(USER_EVENT_REG_MAX-1)) |
2348 | return -EINVAL; |
2349 | |
2350 | /* Ensure supported size */ |
2351 | switch (kreg->enable_size) { |
2352 | case 4: |
2353 | /* 32-bit */ |
2354 | break; |
2355 | #if BITS_PER_LONG >= 64 |
2356 | case 8: |
2357 | /* 64-bit */ |
2358 | break; |
2359 | #endif |
2360 | default: |
2361 | return -EINVAL; |
2362 | } |
2363 | |
2364 | /* Ensure natural alignment */ |
2365 | if (kreg->enable_addr % kreg->enable_size) |
2366 | return -EINVAL; |
2367 | |
2368 | /* Ensure bit range for size */ |
2369 | if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1) |
2370 | return -EINVAL; |
2371 | |
2372 | /* Ensure accessible */ |
2373 | if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr, |
2374 | kreg->enable_size)) |
2375 | return -EFAULT; |
2376 | |
2377 | kreg->size = size; |
2378 | |
2379 | return 0; |
2380 | } |
2381 | |
2382 | /* |
2383 | * Registers a user_event on behalf of a user process. |
2384 | */ |
2385 | static long user_events_ioctl_reg(struct user_event_file_info *info, |
2386 | unsigned long uarg) |
2387 | { |
2388 | struct user_reg __user *ureg = (struct user_reg __user *)uarg; |
2389 | struct user_reg reg; |
2390 | struct user_event *user; |
2391 | struct user_event_enabler *enabler; |
2392 | char *name; |
2393 | long ret; |
2394 | int write_result; |
2395 | |
2396 | ret = user_reg_get(ureg, kreg: ®); |
2397 | |
2398 | if (ret) |
2399 | return ret; |
2400 | |
2401 | /* |
2402 | * Prevent users from using the same address and bit multiple times |
2403 | * within the same mm address space. This can cause unexpected behavior |
2404 | * for user processes that is far easier to debug if this is explictly |
2405 | * an error upon registering. |
2406 | */ |
2407 | if (current_user_event_enabler_exists(uaddr: (unsigned long)reg.enable_addr, |
2408 | bit: reg.enable_bit)) |
2409 | return -EADDRINUSE; |
2410 | |
2411 | name = strndup_user((const char __user *)(uintptr_t)reg.name_args, |
2412 | MAX_EVENT_DESC); |
2413 | |
2414 | if (IS_ERR(ptr: name)) { |
2415 | ret = PTR_ERR(ptr: name); |
2416 | return ret; |
2417 | } |
2418 | |
2419 | ret = user_event_parse_cmd(group: info->group, raw_command: name, newuser: &user, reg_flags: reg.flags); |
2420 | |
2421 | if (ret) { |
2422 | kfree(objp: name); |
2423 | return ret; |
2424 | } |
2425 | |
2426 | ret = user_events_ref_add(info, user); |
2427 | |
2428 | /* No longer need parse ref, ref_add either worked or not */ |
2429 | user_event_put(user, locked: false); |
2430 | |
2431 | /* Positive number is index and valid */ |
2432 | if (ret < 0) |
2433 | return ret; |
2434 | |
2435 | /* |
2436 | * user_events_ref_add succeeded: |
2437 | * At this point we have a user_event, it's lifetime is bound by the |
2438 | * reference count, not this file. If anything fails, the user_event |
2439 | * still has a reference until the file is released. During release |
2440 | * any remaining references (from user_events_ref_add) are decremented. |
2441 | * |
2442 | * Attempt to create an enabler, which too has a lifetime tied in the |
2443 | * same way for the event. Once the task that caused the enabler to be |
2444 | * created exits or issues exec() then the enablers it has created |
2445 | * will be destroyed and the ref to the event will be decremented. |
2446 | */ |
2447 | enabler = user_event_enabler_create(reg: ®, user, write_result: &write_result); |
2448 | |
2449 | if (!enabler) |
2450 | return -ENOMEM; |
2451 | |
2452 | /* Write failed/faulted, give error back to caller */ |
2453 | if (write_result) |
2454 | return write_result; |
2455 | |
2456 | put_user((u32)ret, &ureg->write_index); |
2457 | |
2458 | return 0; |
2459 | } |
2460 | |
2461 | /* |
2462 | * Deletes a user_event on behalf of a user process. |
2463 | */ |
2464 | static long user_events_ioctl_del(struct user_event_file_info *info, |
2465 | unsigned long uarg) |
2466 | { |
2467 | void __user *ubuf = (void __user *)uarg; |
2468 | char *name; |
2469 | long ret; |
2470 | |
2471 | name = strndup_user(ubuf, MAX_EVENT_DESC); |
2472 | |
2473 | if (IS_ERR(ptr: name)) |
2474 | return PTR_ERR(ptr: name); |
2475 | |
2476 | /* event_mutex prevents dyn_event from racing */ |
2477 | mutex_lock(&event_mutex); |
2478 | ret = delete_user_event(group: info->group, name); |
2479 | mutex_unlock(lock: &event_mutex); |
2480 | |
2481 | kfree(objp: name); |
2482 | |
2483 | return ret; |
2484 | } |
2485 | |
2486 | static long user_unreg_get(struct user_unreg __user *ureg, |
2487 | struct user_unreg *kreg) |
2488 | { |
2489 | u32 size; |
2490 | long ret; |
2491 | |
2492 | ret = get_user(size, &ureg->size); |
2493 | |
2494 | if (ret) |
2495 | return ret; |
2496 | |
2497 | if (size > PAGE_SIZE) |
2498 | return -E2BIG; |
2499 | |
2500 | if (size < offsetofend(struct user_unreg, disable_addr)) |
2501 | return -EINVAL; |
2502 | |
2503 | ret = copy_struct_from_user(dst: kreg, ksize: sizeof(*kreg), src: ureg, usize: size); |
2504 | |
2505 | /* Ensure no reserved values, since we don't support any yet */ |
2506 | if (kreg->__reserved || kreg->__reserved2) |
2507 | return -EINVAL; |
2508 | |
2509 | return ret; |
2510 | } |
2511 | |
2512 | static int user_event_mm_clear_bit(struct user_event_mm *user_mm, |
2513 | unsigned long uaddr, unsigned char bit, |
2514 | unsigned long flags) |
2515 | { |
2516 | struct user_event_enabler enabler; |
2517 | int result; |
2518 | int attempt = 0; |
2519 | |
2520 | memset(&enabler, 0, sizeof(enabler)); |
2521 | enabler.addr = uaddr; |
2522 | enabler.values = bit | flags; |
2523 | retry: |
2524 | /* Prevents state changes from racing with new enablers */ |
2525 | mutex_lock(&event_mutex); |
2526 | |
2527 | /* Force the bit to be cleared, since no event is attached */ |
2528 | mmap_read_lock(mm: user_mm->mm); |
2529 | result = user_event_enabler_write(mm: user_mm, enabler: &enabler, fixup_fault: false, attempt: &attempt); |
2530 | mmap_read_unlock(mm: user_mm->mm); |
2531 | |
2532 | mutex_unlock(lock: &event_mutex); |
2533 | |
2534 | if (result) { |
2535 | /* Attempt to fault-in and retry if it worked */ |
2536 | if (!user_event_mm_fault_in(mm: user_mm, uaddr, attempt)) |
2537 | goto retry; |
2538 | } |
2539 | |
2540 | return result; |
2541 | } |
2542 | |
2543 | /* |
2544 | * Unregisters an enablement address/bit within a task/user mm. |
2545 | */ |
2546 | static long user_events_ioctl_unreg(unsigned long uarg) |
2547 | { |
2548 | struct user_unreg __user *ureg = (struct user_unreg __user *)uarg; |
2549 | struct user_event_mm *mm = current->user_event_mm; |
2550 | struct user_event_enabler *enabler, *next; |
2551 | struct user_unreg reg; |
2552 | unsigned long flags; |
2553 | long ret; |
2554 | |
2555 | ret = user_unreg_get(ureg, kreg: ®); |
2556 | |
2557 | if (ret) |
2558 | return ret; |
2559 | |
2560 | if (!mm) |
2561 | return -ENOENT; |
2562 | |
2563 | flags = 0; |
2564 | ret = -ENOENT; |
2565 | |
2566 | /* |
2567 | * Flags freeing and faulting are used to indicate if the enabler is in |
2568 | * use at all. When faulting is set a page-fault is occurring asyncly. |
2569 | * During async fault if freeing is set, the enabler will be destroyed. |
2570 | * If no async fault is happening, we can destroy it now since we hold |
2571 | * the event_mutex during these checks. |
2572 | */ |
2573 | mutex_lock(&event_mutex); |
2574 | |
2575 | list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) { |
2576 | if (enabler->addr == reg.disable_addr && |
2577 | ENABLE_BIT(enabler) == reg.disable_bit) { |
2578 | set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); |
2579 | |
2580 | /* We must keep compat flags for the clear */ |
2581 | flags |= enabler->values & ENABLE_VAL_COMPAT_MASK; |
2582 | |
2583 | if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler))) |
2584 | user_event_enabler_destroy(enabler, locked: true); |
2585 | |
2586 | /* Removed at least one */ |
2587 | ret = 0; |
2588 | } |
2589 | } |
2590 | |
2591 | mutex_unlock(lock: &event_mutex); |
2592 | |
2593 | /* Ensure bit is now cleared for user, regardless of event status */ |
2594 | if (!ret) |
2595 | ret = user_event_mm_clear_bit(user_mm: mm, uaddr: reg.disable_addr, |
2596 | bit: reg.disable_bit, flags); |
2597 | |
2598 | return ret; |
2599 | } |
2600 | |
2601 | /* |
2602 | * Handles the ioctl from user mode to register or alter operations. |
2603 | */ |
2604 | static long user_events_ioctl(struct file *file, unsigned int cmd, |
2605 | unsigned long uarg) |
2606 | { |
2607 | struct user_event_file_info *info = file->private_data; |
2608 | struct user_event_group *group = info->group; |
2609 | long ret = -ENOTTY; |
2610 | |
2611 | switch (cmd) { |
2612 | case DIAG_IOCSREG: |
2613 | mutex_lock(&group->reg_mutex); |
2614 | ret = user_events_ioctl_reg(info, uarg); |
2615 | mutex_unlock(lock: &group->reg_mutex); |
2616 | break; |
2617 | |
2618 | case DIAG_IOCSDEL: |
2619 | mutex_lock(&group->reg_mutex); |
2620 | ret = user_events_ioctl_del(info, uarg); |
2621 | mutex_unlock(lock: &group->reg_mutex); |
2622 | break; |
2623 | |
2624 | case DIAG_IOCSUNREG: |
2625 | mutex_lock(&group->reg_mutex); |
2626 | ret = user_events_ioctl_unreg(uarg); |
2627 | mutex_unlock(lock: &group->reg_mutex); |
2628 | break; |
2629 | } |
2630 | |
2631 | return ret; |
2632 | } |
2633 | |
2634 | /* |
2635 | * Handles the final close of the file from user mode. |
2636 | */ |
2637 | static int user_events_release(struct inode *node, struct file *file) |
2638 | { |
2639 | struct user_event_file_info *info = file->private_data; |
2640 | struct user_event_group *group; |
2641 | struct user_event_refs *refs; |
2642 | int i; |
2643 | |
2644 | if (!info) |
2645 | return -EINVAL; |
2646 | |
2647 | group = info->group; |
2648 | |
2649 | /* |
2650 | * Ensure refs cannot change under any situation by taking the |
2651 | * register mutex during the final freeing of the references. |
2652 | */ |
2653 | mutex_lock(&group->reg_mutex); |
2654 | |
2655 | refs = info->refs; |
2656 | |
2657 | if (!refs) |
2658 | goto out; |
2659 | |
2660 | /* |
2661 | * The lifetime of refs has reached an end, it's tied to this file. |
2662 | * The underlying user_events are ref counted, and cannot be freed. |
2663 | * After this decrement, the user_events may be freed elsewhere. |
2664 | */ |
2665 | for (i = 0; i < refs->count; ++i) |
2666 | user_event_put(user: refs->events[i], locked: false); |
2667 | |
2668 | out: |
2669 | file->private_data = NULL; |
2670 | |
2671 | mutex_unlock(lock: &group->reg_mutex); |
2672 | |
2673 | kfree(objp: refs); |
2674 | kfree(objp: info); |
2675 | |
2676 | return 0; |
2677 | } |
2678 | |
2679 | static const struct file_operations user_data_fops = { |
2680 | .open = user_events_open, |
2681 | .write = user_events_write, |
2682 | .write_iter = user_events_write_iter, |
2683 | .unlocked_ioctl = user_events_ioctl, |
2684 | .release = user_events_release, |
2685 | }; |
2686 | |
2687 | static void *user_seq_start(struct seq_file *m, loff_t *pos) |
2688 | { |
2689 | if (*pos) |
2690 | return NULL; |
2691 | |
2692 | return (void *)1; |
2693 | } |
2694 | |
2695 | static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos) |
2696 | { |
2697 | ++*pos; |
2698 | return NULL; |
2699 | } |
2700 | |
2701 | static void user_seq_stop(struct seq_file *m, void *p) |
2702 | { |
2703 | } |
2704 | |
2705 | static int user_seq_show(struct seq_file *m, void *p) |
2706 | { |
2707 | struct user_event_group *group = m->private; |
2708 | struct user_event *user; |
2709 | char status; |
2710 | int i, active = 0, busy = 0; |
2711 | |
2712 | if (!group) |
2713 | return -EINVAL; |
2714 | |
2715 | mutex_lock(&group->reg_mutex); |
2716 | |
2717 | hash_for_each(group->register_table, i, user, node) { |
2718 | status = user->status; |
2719 | |
2720 | seq_printf(m, fmt: "%s" , EVENT_TP_NAME(user)); |
2721 | |
2722 | if (status != 0) |
2723 | seq_puts(m, s: " #" ); |
2724 | |
2725 | if (status != 0) { |
2726 | seq_puts(m, s: " Used by" ); |
2727 | if (status & EVENT_STATUS_FTRACE) |
2728 | seq_puts(m, s: " ftrace" ); |
2729 | if (status & EVENT_STATUS_PERF) |
2730 | seq_puts(m, s: " perf" ); |
2731 | if (status & EVENT_STATUS_OTHER) |
2732 | seq_puts(m, s: " other" ); |
2733 | busy++; |
2734 | } |
2735 | |
2736 | seq_puts(m, s: "\n" ); |
2737 | active++; |
2738 | } |
2739 | |
2740 | mutex_unlock(lock: &group->reg_mutex); |
2741 | |
2742 | seq_puts(m, s: "\n" ); |
2743 | seq_printf(m, fmt: "Active: %d\n" , active); |
2744 | seq_printf(m, fmt: "Busy: %d\n" , busy); |
2745 | |
2746 | return 0; |
2747 | } |
2748 | |
2749 | static const struct seq_operations user_seq_ops = { |
2750 | .start = user_seq_start, |
2751 | .next = user_seq_next, |
2752 | .stop = user_seq_stop, |
2753 | .show = user_seq_show, |
2754 | }; |
2755 | |
2756 | static int user_status_open(struct inode *node, struct file *file) |
2757 | { |
2758 | struct user_event_group *group; |
2759 | int ret; |
2760 | |
2761 | group = current_user_event_group(); |
2762 | |
2763 | if (!group) |
2764 | return -ENOENT; |
2765 | |
2766 | ret = seq_open(file, &user_seq_ops); |
2767 | |
2768 | if (!ret) { |
2769 | /* Chain group to seq_file */ |
2770 | struct seq_file *m = file->private_data; |
2771 | |
2772 | m->private = group; |
2773 | } |
2774 | |
2775 | return ret; |
2776 | } |
2777 | |
2778 | static const struct file_operations user_status_fops = { |
2779 | .open = user_status_open, |
2780 | .read = seq_read, |
2781 | .llseek = seq_lseek, |
2782 | .release = seq_release, |
2783 | }; |
2784 | |
2785 | /* |
2786 | * Creates a set of tracefs files to allow user mode interactions. |
2787 | */ |
2788 | static int create_user_tracefs(void) |
2789 | { |
2790 | struct dentry *edata, *emmap; |
2791 | |
2792 | edata = tracefs_create_file(name: "user_events_data" , TRACE_MODE_WRITE, |
2793 | NULL, NULL, fops: &user_data_fops); |
2794 | |
2795 | if (!edata) { |
2796 | pr_warn("Could not create tracefs 'user_events_data' entry\n" ); |
2797 | goto err; |
2798 | } |
2799 | |
2800 | emmap = tracefs_create_file(name: "user_events_status" , TRACE_MODE_READ, |
2801 | NULL, NULL, fops: &user_status_fops); |
2802 | |
2803 | if (!emmap) { |
2804 | tracefs_remove(dentry: edata); |
2805 | pr_warn("Could not create tracefs 'user_events_mmap' entry\n" ); |
2806 | goto err; |
2807 | } |
2808 | |
2809 | return 0; |
2810 | err: |
2811 | return -ENODEV; |
2812 | } |
2813 | |
2814 | static int set_max_user_events_sysctl(struct ctl_table *table, int write, |
2815 | void *buffer, size_t *lenp, loff_t *ppos) |
2816 | { |
2817 | int ret; |
2818 | |
2819 | mutex_lock(&event_mutex); |
2820 | |
2821 | ret = proc_douintvec(table, write, buffer, lenp, ppos); |
2822 | |
2823 | mutex_unlock(lock: &event_mutex); |
2824 | |
2825 | return ret; |
2826 | } |
2827 | |
2828 | static struct ctl_table user_event_sysctls[] = { |
2829 | { |
2830 | .procname = "user_events_max" , |
2831 | .data = &max_user_events, |
2832 | .maxlen = sizeof(unsigned int), |
2833 | .mode = 0644, |
2834 | .proc_handler = set_max_user_events_sysctl, |
2835 | }, |
2836 | {} |
2837 | }; |
2838 | |
2839 | static int __init trace_events_user_init(void) |
2840 | { |
2841 | int ret; |
2842 | |
2843 | fault_cache = KMEM_CACHE(user_event_enabler_fault, 0); |
2844 | |
2845 | if (!fault_cache) |
2846 | return -ENOMEM; |
2847 | |
2848 | init_group = user_event_group_create(); |
2849 | |
2850 | if (!init_group) { |
2851 | kmem_cache_destroy(s: fault_cache); |
2852 | return -ENOMEM; |
2853 | } |
2854 | |
2855 | ret = create_user_tracefs(); |
2856 | |
2857 | if (ret) { |
2858 | pr_warn("user_events could not register with tracefs\n" ); |
2859 | user_event_group_destroy(group: init_group); |
2860 | kmem_cache_destroy(s: fault_cache); |
2861 | init_group = NULL; |
2862 | return ret; |
2863 | } |
2864 | |
2865 | if (dyn_event_register(ops: &user_event_dops)) |
2866 | pr_warn("user_events could not register with dyn_events\n" ); |
2867 | |
2868 | register_sysctl_init("kernel" , user_event_sysctls); |
2869 | |
2870 | return 0; |
2871 | } |
2872 | |
2873 | fs_initcall(trace_events_user_init); |
2874 | |