1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * User-space Probes (UProbes) |
4 | * |
5 | * Copyright (C) IBM Corporation, 2008-2012 |
6 | * Authors: |
7 | * Srikar Dronamraju |
8 | * Jim Keniston |
9 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra |
10 | */ |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/highmem.h> |
14 | #include <linux/pagemap.h> /* read_mapping_page */ |
15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/sched/mm.h> |
18 | #include <linux/sched/coredump.h> |
19 | #include <linux/export.h> |
20 | #include <linux/rmap.h> /* anon_vma_prepare */ |
21 | #include <linux/mmu_notifier.h> /* set_pte_at_notify */ |
22 | #include <linux/swap.h> /* folio_free_swap */ |
23 | #include <linux/ptrace.h> /* user_enable_single_step */ |
24 | #include <linux/kdebug.h> /* notifier mechanism */ |
25 | #include <linux/percpu-rwsem.h> |
26 | #include <linux/task_work.h> |
27 | #include <linux/shmem_fs.h> |
28 | #include <linux/khugepaged.h> |
29 | |
30 | #include <linux/uprobes.h> |
31 | |
32 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
33 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
34 | |
35 | static struct rb_root uprobes_tree = RB_ROOT; |
36 | /* |
37 | * allows us to skip the uprobe_mmap if there are no uprobe events active |
38 | * at this time. Probably a fine grained per inode count is better? |
39 | */ |
40 | #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) |
41 | |
42 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
43 | |
44 | #define UPROBES_HASH_SZ 13 |
45 | /* serialize uprobe->pending_list */ |
46 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; |
47 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
48 | |
49 | DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); |
50 | |
51 | /* Have a copy of original instruction */ |
52 | #define UPROBE_COPY_INSN 0 |
53 | |
54 | struct uprobe { |
55 | struct rb_node rb_node; /* node in the rb tree */ |
56 | refcount_t ref; |
57 | struct rw_semaphore register_rwsem; |
58 | struct rw_semaphore consumer_rwsem; |
59 | struct list_head pending_list; |
60 | struct uprobe_consumer *consumers; |
61 | struct inode *inode; /* Also hold a ref to inode */ |
62 | loff_t offset; |
63 | loff_t ref_ctr_offset; |
64 | unsigned long flags; |
65 | |
66 | /* |
67 | * The generic code assumes that it has two members of unknown type |
68 | * owned by the arch-specific code: |
69 | * |
70 | * insn - copy_insn() saves the original instruction here for |
71 | * arch_uprobe_analyze_insn(). |
72 | * |
73 | * ixol - potentially modified instruction to execute out of |
74 | * line, copied to xol_area by xol_get_insn_slot(). |
75 | */ |
76 | struct arch_uprobe arch; |
77 | }; |
78 | |
79 | struct delayed_uprobe { |
80 | struct list_head list; |
81 | struct uprobe *uprobe; |
82 | struct mm_struct *mm; |
83 | }; |
84 | |
85 | static DEFINE_MUTEX(delayed_uprobe_lock); |
86 | static LIST_HEAD(delayed_uprobe_list); |
87 | |
88 | /* |
89 | * Execute out of line area: anonymous executable mapping installed |
90 | * by the probed task to execute the copy of the original instruction |
91 | * mangled by set_swbp(). |
92 | * |
93 | * On a breakpoint hit, thread contests for a slot. It frees the |
94 | * slot after singlestep. Currently a fixed number of slots are |
95 | * allocated. |
96 | */ |
97 | struct xol_area { |
98 | wait_queue_head_t wq; /* if all slots are busy */ |
99 | atomic_t slot_count; /* number of in-use slots */ |
100 | unsigned long *bitmap; /* 0 = free slot */ |
101 | |
102 | struct vm_special_mapping xol_mapping; |
103 | struct page *pages[2]; |
104 | /* |
105 | * We keep the vma's vm_start rather than a pointer to the vma |
106 | * itself. The probed process or a naughty kernel module could make |
107 | * the vma go away, and we must handle that reasonably gracefully. |
108 | */ |
109 | unsigned long vaddr; /* Page(s) of instruction slots */ |
110 | }; |
111 | |
112 | /* |
113 | * valid_vma: Verify if the specified vma is an executable vma |
114 | * Relax restrictions while unregistering: vm_flags might have |
115 | * changed after breakpoint was inserted. |
116 | * - is_register: indicates if we are in register context. |
117 | * - Return 1 if the specified virtual address is in an |
118 | * executable vma. |
119 | */ |
120 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) |
121 | { |
122 | vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; |
123 | |
124 | if (is_register) |
125 | flags |= VM_WRITE; |
126 | |
127 | return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; |
128 | } |
129 | |
130 | static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) |
131 | { |
132 | return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
133 | } |
134 | |
135 | static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) |
136 | { |
137 | return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); |
138 | } |
139 | |
140 | /** |
141 | * __replace_page - replace page in vma by new page. |
142 | * based on replace_page in mm/ksm.c |
143 | * |
144 | * @vma: vma that holds the pte pointing to page |
145 | * @addr: address the old @page is mapped at |
146 | * @old_page: the page we are replacing by new_page |
147 | * @new_page: the modified page we replace page by |
148 | * |
149 | * If @new_page is NULL, only unmap @old_page. |
150 | * |
151 | * Returns 0 on success, negative error code otherwise. |
152 | */ |
153 | static int __replace_page(struct vm_area_struct *vma, unsigned long addr, |
154 | struct page *old_page, struct page *new_page) |
155 | { |
156 | struct folio *old_folio = page_folio(old_page); |
157 | struct folio *new_folio; |
158 | struct mm_struct *mm = vma->vm_mm; |
159 | DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); |
160 | int err; |
161 | struct mmu_notifier_range range; |
162 | |
163 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_CLEAR, flags: 0, mm, start: addr, |
164 | end: addr + PAGE_SIZE); |
165 | |
166 | if (new_page) { |
167 | new_folio = page_folio(new_page); |
168 | err = mem_cgroup_charge(folio: new_folio, mm: vma->vm_mm, GFP_KERNEL); |
169 | if (err) |
170 | return err; |
171 | } |
172 | |
173 | /* For folio_free_swap() below */ |
174 | folio_lock(folio: old_folio); |
175 | |
176 | mmu_notifier_invalidate_range_start(range: &range); |
177 | err = -EAGAIN; |
178 | if (!page_vma_mapped_walk(pvmw: &pvmw)) |
179 | goto unlock; |
180 | VM_BUG_ON_PAGE(addr != pvmw.address, old_page); |
181 | |
182 | if (new_page) { |
183 | folio_get(folio: new_folio); |
184 | page_add_new_anon_rmap(new_page, vma, address: addr); |
185 | folio_add_lru_vma(new_folio, vma); |
186 | } else |
187 | /* no new page, just dec_mm_counter for old_page */ |
188 | dec_mm_counter(mm, member: MM_ANONPAGES); |
189 | |
190 | if (!folio_test_anon(folio: old_folio)) { |
191 | dec_mm_counter(mm, member: mm_counter_file(page: old_page)); |
192 | inc_mm_counter(mm, member: MM_ANONPAGES); |
193 | } |
194 | |
195 | flush_cache_page(vma, vmaddr: addr, pfn: pte_pfn(pte: ptep_get(ptep: pvmw.pte))); |
196 | ptep_clear_flush(vma, address: addr, ptep: pvmw.pte); |
197 | if (new_page) |
198 | set_pte_at_notify(mm, addr, pvmw.pte, |
199 | mk_pte(new_page, vma->vm_page_prot)); |
200 | |
201 | page_remove_rmap(old_page, vma, compound: false); |
202 | if (!folio_mapped(folio: old_folio)) |
203 | folio_free_swap(folio: old_folio); |
204 | page_vma_mapped_walk_done(pvmw: &pvmw); |
205 | folio_put(folio: old_folio); |
206 | |
207 | err = 0; |
208 | unlock: |
209 | mmu_notifier_invalidate_range_end(range: &range); |
210 | folio_unlock(folio: old_folio); |
211 | return err; |
212 | } |
213 | |
214 | /** |
215 | * is_swbp_insn - check if instruction is breakpoint instruction. |
216 | * @insn: instruction to be checked. |
217 | * Default implementation of is_swbp_insn |
218 | * Returns true if @insn is a breakpoint instruction. |
219 | */ |
220 | bool __weak is_swbp_insn(uprobe_opcode_t *insn) |
221 | { |
222 | return *insn == UPROBE_SWBP_INSN; |
223 | } |
224 | |
225 | /** |
226 | * is_trap_insn - check if instruction is breakpoint instruction. |
227 | * @insn: instruction to be checked. |
228 | * Default implementation of is_trap_insn |
229 | * Returns true if @insn is a breakpoint instruction. |
230 | * |
231 | * This function is needed for the case where an architecture has multiple |
232 | * trap instructions (like powerpc). |
233 | */ |
234 | bool __weak is_trap_insn(uprobe_opcode_t *insn) |
235 | { |
236 | return is_swbp_insn(insn); |
237 | } |
238 | |
239 | static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) |
240 | { |
241 | void *kaddr = kmap_atomic(page); |
242 | memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); |
243 | kunmap_atomic(kaddr); |
244 | } |
245 | |
246 | static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) |
247 | { |
248 | void *kaddr = kmap_atomic(page); |
249 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); |
250 | kunmap_atomic(kaddr); |
251 | } |
252 | |
253 | static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) |
254 | { |
255 | uprobe_opcode_t old_opcode; |
256 | bool is_swbp; |
257 | |
258 | /* |
259 | * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. |
260 | * We do not check if it is any other 'trap variant' which could |
261 | * be conditional trap instruction such as the one powerpc supports. |
262 | * |
263 | * The logic is that we do not care if the underlying instruction |
264 | * is a trap variant; uprobes always wins over any other (gdb) |
265 | * breakpoint. |
266 | */ |
267 | copy_from_page(page, vaddr, dst: &old_opcode, UPROBE_SWBP_INSN_SIZE); |
268 | is_swbp = is_swbp_insn(insn: &old_opcode); |
269 | |
270 | if (is_swbp_insn(insn: new_opcode)) { |
271 | if (is_swbp) /* register: already installed? */ |
272 | return 0; |
273 | } else { |
274 | if (!is_swbp) /* unregister: was it changed by us? */ |
275 | return 0; |
276 | } |
277 | |
278 | return 1; |
279 | } |
280 | |
281 | static struct delayed_uprobe * |
282 | delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) |
283 | { |
284 | struct delayed_uprobe *du; |
285 | |
286 | list_for_each_entry(du, &delayed_uprobe_list, list) |
287 | if (du->uprobe == uprobe && du->mm == mm) |
288 | return du; |
289 | return NULL; |
290 | } |
291 | |
292 | static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) |
293 | { |
294 | struct delayed_uprobe *du; |
295 | |
296 | if (delayed_uprobe_check(uprobe, mm)) |
297 | return 0; |
298 | |
299 | du = kzalloc(size: sizeof(*du), GFP_KERNEL); |
300 | if (!du) |
301 | return -ENOMEM; |
302 | |
303 | du->uprobe = uprobe; |
304 | du->mm = mm; |
305 | list_add(new: &du->list, head: &delayed_uprobe_list); |
306 | return 0; |
307 | } |
308 | |
309 | static void delayed_uprobe_delete(struct delayed_uprobe *du) |
310 | { |
311 | if (WARN_ON(!du)) |
312 | return; |
313 | list_del(entry: &du->list); |
314 | kfree(objp: du); |
315 | } |
316 | |
317 | static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) |
318 | { |
319 | struct list_head *pos, *q; |
320 | struct delayed_uprobe *du; |
321 | |
322 | if (!uprobe && !mm) |
323 | return; |
324 | |
325 | list_for_each_safe(pos, q, &delayed_uprobe_list) { |
326 | du = list_entry(pos, struct delayed_uprobe, list); |
327 | |
328 | if (uprobe && du->uprobe != uprobe) |
329 | continue; |
330 | if (mm && du->mm != mm) |
331 | continue; |
332 | |
333 | delayed_uprobe_delete(du); |
334 | } |
335 | } |
336 | |
337 | static bool valid_ref_ctr_vma(struct uprobe *uprobe, |
338 | struct vm_area_struct *vma) |
339 | { |
340 | unsigned long vaddr = offset_to_vaddr(vma, offset: uprobe->ref_ctr_offset); |
341 | |
342 | return uprobe->ref_ctr_offset && |
343 | vma->vm_file && |
344 | file_inode(f: vma->vm_file) == uprobe->inode && |
345 | (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && |
346 | vma->vm_start <= vaddr && |
347 | vma->vm_end > vaddr; |
348 | } |
349 | |
350 | static struct vm_area_struct * |
351 | find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) |
352 | { |
353 | VMA_ITERATOR(vmi, mm, 0); |
354 | struct vm_area_struct *tmp; |
355 | |
356 | for_each_vma(vmi, tmp) |
357 | if (valid_ref_ctr_vma(uprobe, vma: tmp)) |
358 | return tmp; |
359 | |
360 | return NULL; |
361 | } |
362 | |
363 | static int |
364 | __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) |
365 | { |
366 | void *kaddr; |
367 | struct page *page; |
368 | int ret; |
369 | short *ptr; |
370 | |
371 | if (!vaddr || !d) |
372 | return -EINVAL; |
373 | |
374 | ret = get_user_pages_remote(mm, start: vaddr, nr_pages: 1, |
375 | gup_flags: FOLL_WRITE, pages: &page, NULL); |
376 | if (unlikely(ret <= 0)) { |
377 | /* |
378 | * We are asking for 1 page. If get_user_pages_remote() fails, |
379 | * it may return 0, in that case we have to return error. |
380 | */ |
381 | return ret == 0 ? -EBUSY : ret; |
382 | } |
383 | |
384 | kaddr = kmap_atomic(page); |
385 | ptr = kaddr + (vaddr & ~PAGE_MASK); |
386 | |
387 | if (unlikely(*ptr + d < 0)) { |
388 | pr_warn("ref_ctr going negative. vaddr: 0x%lx, " |
389 | "curr val: %d, delta: %d\n" , vaddr, *ptr, d); |
390 | ret = -EINVAL; |
391 | goto out; |
392 | } |
393 | |
394 | *ptr += d; |
395 | ret = 0; |
396 | out: |
397 | kunmap_atomic(kaddr); |
398 | put_page(page); |
399 | return ret; |
400 | } |
401 | |
402 | static void update_ref_ctr_warn(struct uprobe *uprobe, |
403 | struct mm_struct *mm, short d) |
404 | { |
405 | pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " |
406 | "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n" , |
407 | d > 0 ? "increment" : "decrement" , uprobe->inode->i_ino, |
408 | (unsigned long long) uprobe->offset, |
409 | (unsigned long long) uprobe->ref_ctr_offset, mm); |
410 | } |
411 | |
412 | static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, |
413 | short d) |
414 | { |
415 | struct vm_area_struct *rc_vma; |
416 | unsigned long rc_vaddr; |
417 | int ret = 0; |
418 | |
419 | rc_vma = find_ref_ctr_vma(uprobe, mm); |
420 | |
421 | if (rc_vma) { |
422 | rc_vaddr = offset_to_vaddr(vma: rc_vma, offset: uprobe->ref_ctr_offset); |
423 | ret = __update_ref_ctr(mm, vaddr: rc_vaddr, d); |
424 | if (ret) |
425 | update_ref_ctr_warn(uprobe, mm, d); |
426 | |
427 | if (d > 0) |
428 | return ret; |
429 | } |
430 | |
431 | mutex_lock(&delayed_uprobe_lock); |
432 | if (d > 0) |
433 | ret = delayed_uprobe_add(uprobe, mm); |
434 | else |
435 | delayed_uprobe_remove(uprobe, mm); |
436 | mutex_unlock(lock: &delayed_uprobe_lock); |
437 | |
438 | return ret; |
439 | } |
440 | |
441 | /* |
442 | * NOTE: |
443 | * Expect the breakpoint instruction to be the smallest size instruction for |
444 | * the architecture. If an arch has variable length instruction and the |
445 | * breakpoint instruction is not of the smallest length instruction |
446 | * supported by that architecture then we need to modify is_trap_at_addr and |
447 | * uprobe_write_opcode accordingly. This would never be a problem for archs |
448 | * that have fixed length instructions. |
449 | * |
450 | * uprobe_write_opcode - write the opcode at a given virtual address. |
451 | * @auprobe: arch specific probepoint information. |
452 | * @mm: the probed process address space. |
453 | * @vaddr: the virtual address to store the opcode. |
454 | * @opcode: opcode to be written at @vaddr. |
455 | * |
456 | * Called with mm->mmap_lock held for write. |
457 | * Return 0 (success) or a negative errno. |
458 | */ |
459 | int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, |
460 | unsigned long vaddr, uprobe_opcode_t opcode) |
461 | { |
462 | struct uprobe *uprobe; |
463 | struct page *old_page, *new_page; |
464 | struct vm_area_struct *vma; |
465 | int ret, is_register, ref_ctr_updated = 0; |
466 | bool orig_page_huge = false; |
467 | unsigned int gup_flags = FOLL_FORCE; |
468 | |
469 | is_register = is_swbp_insn(insn: &opcode); |
470 | uprobe = container_of(auprobe, struct uprobe, arch); |
471 | |
472 | retry: |
473 | if (is_register) |
474 | gup_flags |= FOLL_SPLIT_PMD; |
475 | /* Read the page with vaddr into memory */ |
476 | old_page = get_user_page_vma_remote(mm, addr: vaddr, gup_flags, vmap: &vma); |
477 | if (IS_ERR(ptr: old_page)) |
478 | return PTR_ERR(ptr: old_page); |
479 | |
480 | ret = verify_opcode(page: old_page, vaddr, new_opcode: &opcode); |
481 | if (ret <= 0) |
482 | goto put_old; |
483 | |
484 | if (WARN(!is_register && PageCompound(old_page), |
485 | "uprobe unregister should never work on compound page\n" )) { |
486 | ret = -EINVAL; |
487 | goto put_old; |
488 | } |
489 | |
490 | /* We are going to replace instruction, update ref_ctr. */ |
491 | if (!ref_ctr_updated && uprobe->ref_ctr_offset) { |
492 | ret = update_ref_ctr(uprobe, mm, d: is_register ? 1 : -1); |
493 | if (ret) |
494 | goto put_old; |
495 | |
496 | ref_ctr_updated = 1; |
497 | } |
498 | |
499 | ret = 0; |
500 | if (!is_register && !PageAnon(page: old_page)) |
501 | goto put_old; |
502 | |
503 | ret = anon_vma_prepare(vma); |
504 | if (ret) |
505 | goto put_old; |
506 | |
507 | ret = -ENOMEM; |
508 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr: vaddr); |
509 | if (!new_page) |
510 | goto put_old; |
511 | |
512 | __SetPageUptodate(page: new_page); |
513 | copy_highpage(to: new_page, from: old_page); |
514 | copy_to_page(page: new_page, vaddr, src: &opcode, UPROBE_SWBP_INSN_SIZE); |
515 | |
516 | if (!is_register) { |
517 | struct page *orig_page; |
518 | pgoff_t index; |
519 | |
520 | VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); |
521 | |
522 | index = vaddr_to_offset(vma, vaddr: vaddr & PAGE_MASK) >> PAGE_SHIFT; |
523 | orig_page = find_get_page(mapping: vma->vm_file->f_inode->i_mapping, |
524 | offset: index); |
525 | |
526 | if (orig_page) { |
527 | if (PageUptodate(page: orig_page) && |
528 | pages_identical(page1: new_page, page2: orig_page)) { |
529 | /* let go new_page */ |
530 | put_page(page: new_page); |
531 | new_page = NULL; |
532 | |
533 | if (PageCompound(page: orig_page)) |
534 | orig_page_huge = true; |
535 | } |
536 | put_page(page: orig_page); |
537 | } |
538 | } |
539 | |
540 | ret = __replace_page(vma, addr: vaddr, old_page, new_page); |
541 | if (new_page) |
542 | put_page(page: new_page); |
543 | put_old: |
544 | put_page(page: old_page); |
545 | |
546 | if (unlikely(ret == -EAGAIN)) |
547 | goto retry; |
548 | |
549 | /* Revert back reference counter if instruction update failed. */ |
550 | if (ret && is_register && ref_ctr_updated) |
551 | update_ref_ctr(uprobe, mm, d: -1); |
552 | |
553 | /* try collapse pmd for compound page */ |
554 | if (!ret && orig_page_huge) |
555 | collapse_pte_mapped_thp(mm, addr: vaddr, install_pmd: false); |
556 | |
557 | return ret; |
558 | } |
559 | |
560 | /** |
561 | * set_swbp - store breakpoint at a given address. |
562 | * @auprobe: arch specific probepoint information. |
563 | * @mm: the probed process address space. |
564 | * @vaddr: the virtual address to insert the opcode. |
565 | * |
566 | * For mm @mm, store the breakpoint instruction at @vaddr. |
567 | * Return 0 (success) or a negative errno. |
568 | */ |
569 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
570 | { |
571 | return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); |
572 | } |
573 | |
574 | /** |
575 | * set_orig_insn - Restore the original instruction. |
576 | * @mm: the probed process address space. |
577 | * @auprobe: arch specific probepoint information. |
578 | * @vaddr: the virtual address to insert the opcode. |
579 | * |
580 | * For mm @mm, restore the original opcode (opcode) at @vaddr. |
581 | * Return 0 (success) or a negative errno. |
582 | */ |
583 | int __weak |
584 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
585 | { |
586 | return uprobe_write_opcode(auprobe, mm, vaddr, |
587 | opcode: *(uprobe_opcode_t *)&auprobe->insn); |
588 | } |
589 | |
590 | static struct uprobe *get_uprobe(struct uprobe *uprobe) |
591 | { |
592 | refcount_inc(r: &uprobe->ref); |
593 | return uprobe; |
594 | } |
595 | |
596 | static void put_uprobe(struct uprobe *uprobe) |
597 | { |
598 | if (refcount_dec_and_test(r: &uprobe->ref)) { |
599 | /* |
600 | * If application munmap(exec_vma) before uprobe_unregister() |
601 | * gets called, we don't get a chance to remove uprobe from |
602 | * delayed_uprobe_list from remove_breakpoint(). Do it here. |
603 | */ |
604 | mutex_lock(&delayed_uprobe_lock); |
605 | delayed_uprobe_remove(uprobe, NULL); |
606 | mutex_unlock(lock: &delayed_uprobe_lock); |
607 | kfree(objp: uprobe); |
608 | } |
609 | } |
610 | |
611 | static __always_inline |
612 | int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, |
613 | const struct uprobe *r) |
614 | { |
615 | if (l_inode < r->inode) |
616 | return -1; |
617 | |
618 | if (l_inode > r->inode) |
619 | return 1; |
620 | |
621 | if (l_offset < r->offset) |
622 | return -1; |
623 | |
624 | if (l_offset > r->offset) |
625 | return 1; |
626 | |
627 | return 0; |
628 | } |
629 | |
630 | #define __node_2_uprobe(node) \ |
631 | rb_entry((node), struct uprobe, rb_node) |
632 | |
633 | struct __uprobe_key { |
634 | struct inode *inode; |
635 | loff_t offset; |
636 | }; |
637 | |
638 | static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) |
639 | { |
640 | const struct __uprobe_key *a = key; |
641 | return uprobe_cmp(l_inode: a->inode, l_offset: a->offset, __node_2_uprobe(b)); |
642 | } |
643 | |
644 | static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) |
645 | { |
646 | struct uprobe *u = __node_2_uprobe(a); |
647 | return uprobe_cmp(l_inode: u->inode, l_offset: u->offset, __node_2_uprobe(b)); |
648 | } |
649 | |
650 | static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) |
651 | { |
652 | struct __uprobe_key key = { |
653 | .inode = inode, |
654 | .offset = offset, |
655 | }; |
656 | struct rb_node *node = rb_find(key: &key, tree: &uprobes_tree, cmp: __uprobe_cmp_key); |
657 | |
658 | if (node) |
659 | return get_uprobe(__node_2_uprobe(node)); |
660 | |
661 | return NULL; |
662 | } |
663 | |
664 | /* |
665 | * Find a uprobe corresponding to a given inode:offset |
666 | * Acquires uprobes_treelock |
667 | */ |
668 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) |
669 | { |
670 | struct uprobe *uprobe; |
671 | |
672 | spin_lock(lock: &uprobes_treelock); |
673 | uprobe = __find_uprobe(inode, offset); |
674 | spin_unlock(lock: &uprobes_treelock); |
675 | |
676 | return uprobe; |
677 | } |
678 | |
679 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) |
680 | { |
681 | struct rb_node *node; |
682 | |
683 | node = rb_find_add(node: &uprobe->rb_node, tree: &uprobes_tree, cmp: __uprobe_cmp); |
684 | if (node) |
685 | return get_uprobe(__node_2_uprobe(node)); |
686 | |
687 | /* get access + creation ref */ |
688 | refcount_set(r: &uprobe->ref, n: 2); |
689 | return NULL; |
690 | } |
691 | |
692 | /* |
693 | * Acquire uprobes_treelock. |
694 | * Matching uprobe already exists in rbtree; |
695 | * increment (access refcount) and return the matching uprobe. |
696 | * |
697 | * No matching uprobe; insert the uprobe in rb_tree; |
698 | * get a double refcount (access + creation) and return NULL. |
699 | */ |
700 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) |
701 | { |
702 | struct uprobe *u; |
703 | |
704 | spin_lock(lock: &uprobes_treelock); |
705 | u = __insert_uprobe(uprobe); |
706 | spin_unlock(lock: &uprobes_treelock); |
707 | |
708 | return u; |
709 | } |
710 | |
711 | static void |
712 | ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) |
713 | { |
714 | pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " |
715 | "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n" , |
716 | uprobe->inode->i_ino, (unsigned long long) uprobe->offset, |
717 | (unsigned long long) cur_uprobe->ref_ctr_offset, |
718 | (unsigned long long) uprobe->ref_ctr_offset); |
719 | } |
720 | |
721 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, |
722 | loff_t ref_ctr_offset) |
723 | { |
724 | struct uprobe *uprobe, *cur_uprobe; |
725 | |
726 | uprobe = kzalloc(size: sizeof(struct uprobe), GFP_KERNEL); |
727 | if (!uprobe) |
728 | return NULL; |
729 | |
730 | uprobe->inode = inode; |
731 | uprobe->offset = offset; |
732 | uprobe->ref_ctr_offset = ref_ctr_offset; |
733 | init_rwsem(&uprobe->register_rwsem); |
734 | init_rwsem(&uprobe->consumer_rwsem); |
735 | |
736 | /* add to uprobes_tree, sorted on inode:offset */ |
737 | cur_uprobe = insert_uprobe(uprobe); |
738 | /* a uprobe exists for this inode:offset combination */ |
739 | if (cur_uprobe) { |
740 | if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { |
741 | ref_ctr_mismatch_warn(cur_uprobe, uprobe); |
742 | put_uprobe(uprobe: cur_uprobe); |
743 | kfree(objp: uprobe); |
744 | return ERR_PTR(error: -EINVAL); |
745 | } |
746 | kfree(objp: uprobe); |
747 | uprobe = cur_uprobe; |
748 | } |
749 | |
750 | return uprobe; |
751 | } |
752 | |
753 | static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
754 | { |
755 | down_write(sem: &uprobe->consumer_rwsem); |
756 | uc->next = uprobe->consumers; |
757 | uprobe->consumers = uc; |
758 | up_write(sem: &uprobe->consumer_rwsem); |
759 | } |
760 | |
761 | /* |
762 | * For uprobe @uprobe, delete the consumer @uc. |
763 | * Return true if the @uc is deleted successfully |
764 | * or return false. |
765 | */ |
766 | static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
767 | { |
768 | struct uprobe_consumer **con; |
769 | bool ret = false; |
770 | |
771 | down_write(sem: &uprobe->consumer_rwsem); |
772 | for (con = &uprobe->consumers; *con; con = &(*con)->next) { |
773 | if (*con == uc) { |
774 | *con = uc->next; |
775 | ret = true; |
776 | break; |
777 | } |
778 | } |
779 | up_write(sem: &uprobe->consumer_rwsem); |
780 | |
781 | return ret; |
782 | } |
783 | |
784 | static int __copy_insn(struct address_space *mapping, struct file *filp, |
785 | void *insn, int nbytes, loff_t offset) |
786 | { |
787 | struct page *page; |
788 | /* |
789 | * Ensure that the page that has the original instruction is populated |
790 | * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), |
791 | * see uprobe_register(). |
792 | */ |
793 | if (mapping->a_ops->read_folio) |
794 | page = read_mapping_page(mapping, index: offset >> PAGE_SHIFT, file: filp); |
795 | else |
796 | page = shmem_read_mapping_page(mapping, index: offset >> PAGE_SHIFT); |
797 | if (IS_ERR(ptr: page)) |
798 | return PTR_ERR(ptr: page); |
799 | |
800 | copy_from_page(page, vaddr: offset, dst: insn, len: nbytes); |
801 | put_page(page); |
802 | |
803 | return 0; |
804 | } |
805 | |
806 | static int copy_insn(struct uprobe *uprobe, struct file *filp) |
807 | { |
808 | struct address_space *mapping = uprobe->inode->i_mapping; |
809 | loff_t offs = uprobe->offset; |
810 | void *insn = &uprobe->arch.insn; |
811 | int size = sizeof(uprobe->arch.insn); |
812 | int len, err = -EIO; |
813 | |
814 | /* Copy only available bytes, -EIO if nothing was read */ |
815 | do { |
816 | if (offs >= i_size_read(inode: uprobe->inode)) |
817 | break; |
818 | |
819 | len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); |
820 | err = __copy_insn(mapping, filp, insn, nbytes: len, offset: offs); |
821 | if (err) |
822 | break; |
823 | |
824 | insn += len; |
825 | offs += len; |
826 | size -= len; |
827 | } while (size); |
828 | |
829 | return err; |
830 | } |
831 | |
832 | static int prepare_uprobe(struct uprobe *uprobe, struct file *file, |
833 | struct mm_struct *mm, unsigned long vaddr) |
834 | { |
835 | int ret = 0; |
836 | |
837 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
838 | return ret; |
839 | |
840 | /* TODO: move this into _register, until then we abuse this sem. */ |
841 | down_write(sem: &uprobe->consumer_rwsem); |
842 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
843 | goto out; |
844 | |
845 | ret = copy_insn(uprobe, filp: file); |
846 | if (ret) |
847 | goto out; |
848 | |
849 | ret = -ENOTSUPP; |
850 | if (is_trap_insn(insn: (uprobe_opcode_t *)&uprobe->arch.insn)) |
851 | goto out; |
852 | |
853 | ret = arch_uprobe_analyze_insn(aup: &uprobe->arch, mm, addr: vaddr); |
854 | if (ret) |
855 | goto out; |
856 | |
857 | smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ |
858 | set_bit(UPROBE_COPY_INSN, addr: &uprobe->flags); |
859 | |
860 | out: |
861 | up_write(sem: &uprobe->consumer_rwsem); |
862 | |
863 | return ret; |
864 | } |
865 | |
866 | static inline bool consumer_filter(struct uprobe_consumer *uc, |
867 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) |
868 | { |
869 | return !uc->filter || uc->filter(uc, ctx, mm); |
870 | } |
871 | |
872 | static bool filter_chain(struct uprobe *uprobe, |
873 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) |
874 | { |
875 | struct uprobe_consumer *uc; |
876 | bool ret = false; |
877 | |
878 | down_read(sem: &uprobe->consumer_rwsem); |
879 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
880 | ret = consumer_filter(uc, ctx, mm); |
881 | if (ret) |
882 | break; |
883 | } |
884 | up_read(sem: &uprobe->consumer_rwsem); |
885 | |
886 | return ret; |
887 | } |
888 | |
889 | static int |
890 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, |
891 | struct vm_area_struct *vma, unsigned long vaddr) |
892 | { |
893 | bool first_uprobe; |
894 | int ret; |
895 | |
896 | ret = prepare_uprobe(uprobe, file: vma->vm_file, mm, vaddr); |
897 | if (ret) |
898 | return ret; |
899 | |
900 | /* |
901 | * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), |
902 | * the task can hit this breakpoint right after __replace_page(). |
903 | */ |
904 | first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); |
905 | if (first_uprobe) |
906 | set_bit(MMF_HAS_UPROBES, addr: &mm->flags); |
907 | |
908 | ret = set_swbp(auprobe: &uprobe->arch, mm, vaddr); |
909 | if (!ret) |
910 | clear_bit(MMF_RECALC_UPROBES, addr: &mm->flags); |
911 | else if (first_uprobe) |
912 | clear_bit(MMF_HAS_UPROBES, addr: &mm->flags); |
913 | |
914 | return ret; |
915 | } |
916 | |
917 | static int |
918 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
919 | { |
920 | set_bit(MMF_RECALC_UPROBES, addr: &mm->flags); |
921 | return set_orig_insn(auprobe: &uprobe->arch, mm, vaddr); |
922 | } |
923 | |
924 | static inline bool uprobe_is_active(struct uprobe *uprobe) |
925 | { |
926 | return !RB_EMPTY_NODE(&uprobe->rb_node); |
927 | } |
928 | /* |
929 | * There could be threads that have already hit the breakpoint. They |
930 | * will recheck the current insn and restart if find_uprobe() fails. |
931 | * See find_active_uprobe(). |
932 | */ |
933 | static void delete_uprobe(struct uprobe *uprobe) |
934 | { |
935 | if (WARN_ON(!uprobe_is_active(uprobe))) |
936 | return; |
937 | |
938 | spin_lock(lock: &uprobes_treelock); |
939 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
940 | spin_unlock(lock: &uprobes_treelock); |
941 | RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ |
942 | put_uprobe(uprobe); |
943 | } |
944 | |
945 | struct map_info { |
946 | struct map_info *next; |
947 | struct mm_struct *mm; |
948 | unsigned long vaddr; |
949 | }; |
950 | |
951 | static inline struct map_info *free_map_info(struct map_info *info) |
952 | { |
953 | struct map_info *next = info->next; |
954 | kfree(objp: info); |
955 | return next; |
956 | } |
957 | |
958 | static struct map_info * |
959 | build_map_info(struct address_space *mapping, loff_t offset, bool is_register) |
960 | { |
961 | unsigned long pgoff = offset >> PAGE_SHIFT; |
962 | struct vm_area_struct *vma; |
963 | struct map_info *curr = NULL; |
964 | struct map_info *prev = NULL; |
965 | struct map_info *info; |
966 | int more = 0; |
967 | |
968 | again: |
969 | i_mmap_lock_read(mapping); |
970 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
971 | if (!valid_vma(vma, is_register)) |
972 | continue; |
973 | |
974 | if (!prev && !more) { |
975 | /* |
976 | * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through |
977 | * reclaim. This is optimistic, no harm done if it fails. |
978 | */ |
979 | prev = kmalloc(size: sizeof(struct map_info), |
980 | GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); |
981 | if (prev) |
982 | prev->next = NULL; |
983 | } |
984 | if (!prev) { |
985 | more++; |
986 | continue; |
987 | } |
988 | |
989 | if (!mmget_not_zero(mm: vma->vm_mm)) |
990 | continue; |
991 | |
992 | info = prev; |
993 | prev = prev->next; |
994 | info->next = curr; |
995 | curr = info; |
996 | |
997 | info->mm = vma->vm_mm; |
998 | info->vaddr = offset_to_vaddr(vma, offset); |
999 | } |
1000 | i_mmap_unlock_read(mapping); |
1001 | |
1002 | if (!more) |
1003 | goto out; |
1004 | |
1005 | prev = curr; |
1006 | while (curr) { |
1007 | mmput(curr->mm); |
1008 | curr = curr->next; |
1009 | } |
1010 | |
1011 | do { |
1012 | info = kmalloc(size: sizeof(struct map_info), GFP_KERNEL); |
1013 | if (!info) { |
1014 | curr = ERR_PTR(error: -ENOMEM); |
1015 | goto out; |
1016 | } |
1017 | info->next = prev; |
1018 | prev = info; |
1019 | } while (--more); |
1020 | |
1021 | goto again; |
1022 | out: |
1023 | while (prev) |
1024 | prev = free_map_info(info: prev); |
1025 | return curr; |
1026 | } |
1027 | |
1028 | static int |
1029 | register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) |
1030 | { |
1031 | bool is_register = !!new; |
1032 | struct map_info *info; |
1033 | int err = 0; |
1034 | |
1035 | percpu_down_write(&dup_mmap_sem); |
1036 | info = build_map_info(mapping: uprobe->inode->i_mapping, |
1037 | offset: uprobe->offset, is_register); |
1038 | if (IS_ERR(ptr: info)) { |
1039 | err = PTR_ERR(ptr: info); |
1040 | goto out; |
1041 | } |
1042 | |
1043 | while (info) { |
1044 | struct mm_struct *mm = info->mm; |
1045 | struct vm_area_struct *vma; |
1046 | |
1047 | if (err && is_register) |
1048 | goto free; |
1049 | |
1050 | mmap_write_lock(mm); |
1051 | vma = find_vma(mm, addr: info->vaddr); |
1052 | if (!vma || !valid_vma(vma, is_register) || |
1053 | file_inode(f: vma->vm_file) != uprobe->inode) |
1054 | goto unlock; |
1055 | |
1056 | if (vma->vm_start > info->vaddr || |
1057 | vaddr_to_offset(vma, vaddr: info->vaddr) != uprobe->offset) |
1058 | goto unlock; |
1059 | |
1060 | if (is_register) { |
1061 | /* consult only the "caller", new consumer. */ |
1062 | if (consumer_filter(uc: new, |
1063 | ctx: UPROBE_FILTER_REGISTER, mm)) |
1064 | err = install_breakpoint(uprobe, mm, vma, vaddr: info->vaddr); |
1065 | } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { |
1066 | if (!filter_chain(uprobe, |
1067 | ctx: UPROBE_FILTER_UNREGISTER, mm)) |
1068 | err |= remove_breakpoint(uprobe, mm, vaddr: info->vaddr); |
1069 | } |
1070 | |
1071 | unlock: |
1072 | mmap_write_unlock(mm); |
1073 | free: |
1074 | mmput(mm); |
1075 | info = free_map_info(info); |
1076 | } |
1077 | out: |
1078 | percpu_up_write(&dup_mmap_sem); |
1079 | return err; |
1080 | } |
1081 | |
1082 | static void |
1083 | __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) |
1084 | { |
1085 | int err; |
1086 | |
1087 | if (WARN_ON(!consumer_del(uprobe, uc))) |
1088 | return; |
1089 | |
1090 | err = register_for_each_vma(uprobe, NULL); |
1091 | /* TODO : cant unregister? schedule a worker thread */ |
1092 | if (!uprobe->consumers && !err) |
1093 | delete_uprobe(uprobe); |
1094 | } |
1095 | |
1096 | /* |
1097 | * uprobe_unregister - unregister an already registered probe. |
1098 | * @inode: the file in which the probe has to be removed. |
1099 | * @offset: offset from the start of the file. |
1100 | * @uc: identify which probe if multiple probes are colocated. |
1101 | */ |
1102 | void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
1103 | { |
1104 | struct uprobe *uprobe; |
1105 | |
1106 | uprobe = find_uprobe(inode, offset); |
1107 | if (WARN_ON(!uprobe)) |
1108 | return; |
1109 | |
1110 | down_write(sem: &uprobe->register_rwsem); |
1111 | __uprobe_unregister(uprobe, uc); |
1112 | up_write(sem: &uprobe->register_rwsem); |
1113 | put_uprobe(uprobe); |
1114 | } |
1115 | EXPORT_SYMBOL_GPL(uprobe_unregister); |
1116 | |
1117 | /* |
1118 | * __uprobe_register - register a probe |
1119 | * @inode: the file in which the probe has to be placed. |
1120 | * @offset: offset from the start of the file. |
1121 | * @uc: information on howto handle the probe.. |
1122 | * |
1123 | * Apart from the access refcount, __uprobe_register() takes a creation |
1124 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
1125 | * inserted into the rbtree (i.e first consumer for a @inode:@offset |
1126 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
1127 | * @uprobe even before the register operation is complete. Creation |
1128 | * refcount is released when the last @uc for the @uprobe |
1129 | * unregisters. Caller of __uprobe_register() is required to keep @inode |
1130 | * (and the containing mount) referenced. |
1131 | * |
1132 | * Return errno if it cannot successully install probes |
1133 | * else return 0 (success) |
1134 | */ |
1135 | static int __uprobe_register(struct inode *inode, loff_t offset, |
1136 | loff_t ref_ctr_offset, struct uprobe_consumer *uc) |
1137 | { |
1138 | struct uprobe *uprobe; |
1139 | int ret; |
1140 | |
1141 | /* Uprobe must have at least one set consumer */ |
1142 | if (!uc->handler && !uc->ret_handler) |
1143 | return -EINVAL; |
1144 | |
1145 | /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ |
1146 | if (!inode->i_mapping->a_ops->read_folio && |
1147 | !shmem_mapping(mapping: inode->i_mapping)) |
1148 | return -EIO; |
1149 | /* Racy, just to catch the obvious mistakes */ |
1150 | if (offset > i_size_read(inode)) |
1151 | return -EINVAL; |
1152 | |
1153 | /* |
1154 | * This ensures that copy_from_page(), copy_to_page() and |
1155 | * __update_ref_ctr() can't cross page boundary. |
1156 | */ |
1157 | if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) |
1158 | return -EINVAL; |
1159 | if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) |
1160 | return -EINVAL; |
1161 | |
1162 | retry: |
1163 | uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); |
1164 | if (!uprobe) |
1165 | return -ENOMEM; |
1166 | if (IS_ERR(ptr: uprobe)) |
1167 | return PTR_ERR(ptr: uprobe); |
1168 | |
1169 | /* |
1170 | * We can race with uprobe_unregister()->delete_uprobe(). |
1171 | * Check uprobe_is_active() and retry if it is false. |
1172 | */ |
1173 | down_write(sem: &uprobe->register_rwsem); |
1174 | ret = -EAGAIN; |
1175 | if (likely(uprobe_is_active(uprobe))) { |
1176 | consumer_add(uprobe, uc); |
1177 | ret = register_for_each_vma(uprobe, new: uc); |
1178 | if (ret) |
1179 | __uprobe_unregister(uprobe, uc); |
1180 | } |
1181 | up_write(sem: &uprobe->register_rwsem); |
1182 | put_uprobe(uprobe); |
1183 | |
1184 | if (unlikely(ret == -EAGAIN)) |
1185 | goto retry; |
1186 | return ret; |
1187 | } |
1188 | |
1189 | int uprobe_register(struct inode *inode, loff_t offset, |
1190 | struct uprobe_consumer *uc) |
1191 | { |
1192 | return __uprobe_register(inode, offset, ref_ctr_offset: 0, uc); |
1193 | } |
1194 | EXPORT_SYMBOL_GPL(uprobe_register); |
1195 | |
1196 | int uprobe_register_refctr(struct inode *inode, loff_t offset, |
1197 | loff_t ref_ctr_offset, struct uprobe_consumer *uc) |
1198 | { |
1199 | return __uprobe_register(inode, offset, ref_ctr_offset, uc); |
1200 | } |
1201 | EXPORT_SYMBOL_GPL(uprobe_register_refctr); |
1202 | |
1203 | /* |
1204 | * uprobe_apply - unregister an already registered probe. |
1205 | * @inode: the file in which the probe has to be removed. |
1206 | * @offset: offset from the start of the file. |
1207 | * @uc: consumer which wants to add more or remove some breakpoints |
1208 | * @add: add or remove the breakpoints |
1209 | */ |
1210 | int uprobe_apply(struct inode *inode, loff_t offset, |
1211 | struct uprobe_consumer *uc, bool add) |
1212 | { |
1213 | struct uprobe *uprobe; |
1214 | struct uprobe_consumer *con; |
1215 | int ret = -ENOENT; |
1216 | |
1217 | uprobe = find_uprobe(inode, offset); |
1218 | if (WARN_ON(!uprobe)) |
1219 | return ret; |
1220 | |
1221 | down_write(sem: &uprobe->register_rwsem); |
1222 | for (con = uprobe->consumers; con && con != uc ; con = con->next) |
1223 | ; |
1224 | if (con) |
1225 | ret = register_for_each_vma(uprobe, new: add ? uc : NULL); |
1226 | up_write(sem: &uprobe->register_rwsem); |
1227 | put_uprobe(uprobe); |
1228 | |
1229 | return ret; |
1230 | } |
1231 | |
1232 | static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) |
1233 | { |
1234 | VMA_ITERATOR(vmi, mm, 0); |
1235 | struct vm_area_struct *vma; |
1236 | int err = 0; |
1237 | |
1238 | mmap_read_lock(mm); |
1239 | for_each_vma(vmi, vma) { |
1240 | unsigned long vaddr; |
1241 | loff_t offset; |
1242 | |
1243 | if (!valid_vma(vma, is_register: false) || |
1244 | file_inode(f: vma->vm_file) != uprobe->inode) |
1245 | continue; |
1246 | |
1247 | offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
1248 | if (uprobe->offset < offset || |
1249 | uprobe->offset >= offset + vma->vm_end - vma->vm_start) |
1250 | continue; |
1251 | |
1252 | vaddr = offset_to_vaddr(vma, offset: uprobe->offset); |
1253 | err |= remove_breakpoint(uprobe, mm, vaddr); |
1254 | } |
1255 | mmap_read_unlock(mm); |
1256 | |
1257 | return err; |
1258 | } |
1259 | |
1260 | static struct rb_node * |
1261 | find_node_in_range(struct inode *inode, loff_t min, loff_t max) |
1262 | { |
1263 | struct rb_node *n = uprobes_tree.rb_node; |
1264 | |
1265 | while (n) { |
1266 | struct uprobe *u = rb_entry(n, struct uprobe, rb_node); |
1267 | |
1268 | if (inode < u->inode) { |
1269 | n = n->rb_left; |
1270 | } else if (inode > u->inode) { |
1271 | n = n->rb_right; |
1272 | } else { |
1273 | if (max < u->offset) |
1274 | n = n->rb_left; |
1275 | else if (min > u->offset) |
1276 | n = n->rb_right; |
1277 | else |
1278 | break; |
1279 | } |
1280 | } |
1281 | |
1282 | return n; |
1283 | } |
1284 | |
1285 | /* |
1286 | * For a given range in vma, build a list of probes that need to be inserted. |
1287 | */ |
1288 | static void build_probe_list(struct inode *inode, |
1289 | struct vm_area_struct *vma, |
1290 | unsigned long start, unsigned long end, |
1291 | struct list_head *head) |
1292 | { |
1293 | loff_t min, max; |
1294 | struct rb_node *n, *t; |
1295 | struct uprobe *u; |
1296 | |
1297 | INIT_LIST_HEAD(list: head); |
1298 | min = vaddr_to_offset(vma, vaddr: start); |
1299 | max = min + (end - start) - 1; |
1300 | |
1301 | spin_lock(lock: &uprobes_treelock); |
1302 | n = find_node_in_range(inode, min, max); |
1303 | if (n) { |
1304 | for (t = n; t; t = rb_prev(t)) { |
1305 | u = rb_entry(t, struct uprobe, rb_node); |
1306 | if (u->inode != inode || u->offset < min) |
1307 | break; |
1308 | list_add(new: &u->pending_list, head); |
1309 | get_uprobe(uprobe: u); |
1310 | } |
1311 | for (t = n; (t = rb_next(t)); ) { |
1312 | u = rb_entry(t, struct uprobe, rb_node); |
1313 | if (u->inode != inode || u->offset > max) |
1314 | break; |
1315 | list_add(new: &u->pending_list, head); |
1316 | get_uprobe(uprobe: u); |
1317 | } |
1318 | } |
1319 | spin_unlock(lock: &uprobes_treelock); |
1320 | } |
1321 | |
1322 | /* @vma contains reference counter, not the probed instruction. */ |
1323 | static int delayed_ref_ctr_inc(struct vm_area_struct *vma) |
1324 | { |
1325 | struct list_head *pos, *q; |
1326 | struct delayed_uprobe *du; |
1327 | unsigned long vaddr; |
1328 | int ret = 0, err = 0; |
1329 | |
1330 | mutex_lock(&delayed_uprobe_lock); |
1331 | list_for_each_safe(pos, q, &delayed_uprobe_list) { |
1332 | du = list_entry(pos, struct delayed_uprobe, list); |
1333 | |
1334 | if (du->mm != vma->vm_mm || |
1335 | !valid_ref_ctr_vma(uprobe: du->uprobe, vma)) |
1336 | continue; |
1337 | |
1338 | vaddr = offset_to_vaddr(vma, offset: du->uprobe->ref_ctr_offset); |
1339 | ret = __update_ref_ctr(mm: vma->vm_mm, vaddr, d: 1); |
1340 | if (ret) { |
1341 | update_ref_ctr_warn(uprobe: du->uprobe, mm: vma->vm_mm, d: 1); |
1342 | if (!err) |
1343 | err = ret; |
1344 | } |
1345 | delayed_uprobe_delete(du); |
1346 | } |
1347 | mutex_unlock(lock: &delayed_uprobe_lock); |
1348 | return err; |
1349 | } |
1350 | |
1351 | /* |
1352 | * Called from mmap_region/vma_merge with mm->mmap_lock acquired. |
1353 | * |
1354 | * Currently we ignore all errors and always return 0, the callers |
1355 | * can't handle the failure anyway. |
1356 | */ |
1357 | int uprobe_mmap(struct vm_area_struct *vma) |
1358 | { |
1359 | struct list_head tmp_list; |
1360 | struct uprobe *uprobe, *u; |
1361 | struct inode *inode; |
1362 | |
1363 | if (no_uprobe_events()) |
1364 | return 0; |
1365 | |
1366 | if (vma->vm_file && |
1367 | (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && |
1368 | test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) |
1369 | delayed_ref_ctr_inc(vma); |
1370 | |
1371 | if (!valid_vma(vma, is_register: true)) |
1372 | return 0; |
1373 | |
1374 | inode = file_inode(f: vma->vm_file); |
1375 | if (!inode) |
1376 | return 0; |
1377 | |
1378 | mutex_lock(uprobes_mmap_hash(inode)); |
1379 | build_probe_list(inode, vma, start: vma->vm_start, end: vma->vm_end, head: &tmp_list); |
1380 | /* |
1381 | * We can race with uprobe_unregister(), this uprobe can be already |
1382 | * removed. But in this case filter_chain() must return false, all |
1383 | * consumers have gone away. |
1384 | */ |
1385 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
1386 | if (!fatal_signal_pending(current) && |
1387 | filter_chain(uprobe, ctx: UPROBE_FILTER_MMAP, mm: vma->vm_mm)) { |
1388 | unsigned long vaddr = offset_to_vaddr(vma, offset: uprobe->offset); |
1389 | install_breakpoint(uprobe, mm: vma->vm_mm, vma, vaddr); |
1390 | } |
1391 | put_uprobe(uprobe); |
1392 | } |
1393 | mutex_unlock(uprobes_mmap_hash(inode)); |
1394 | |
1395 | return 0; |
1396 | } |
1397 | |
1398 | static bool |
1399 | vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1400 | { |
1401 | loff_t min, max; |
1402 | struct inode *inode; |
1403 | struct rb_node *n; |
1404 | |
1405 | inode = file_inode(f: vma->vm_file); |
1406 | |
1407 | min = vaddr_to_offset(vma, vaddr: start); |
1408 | max = min + (end - start) - 1; |
1409 | |
1410 | spin_lock(lock: &uprobes_treelock); |
1411 | n = find_node_in_range(inode, min, max); |
1412 | spin_unlock(lock: &uprobes_treelock); |
1413 | |
1414 | return !!n; |
1415 | } |
1416 | |
1417 | /* |
1418 | * Called in context of a munmap of a vma. |
1419 | */ |
1420 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1421 | { |
1422 | if (no_uprobe_events() || !valid_vma(vma, is_register: false)) |
1423 | return; |
1424 | |
1425 | if (!atomic_read(v: &vma->vm_mm->mm_users)) /* called by mmput() ? */ |
1426 | return; |
1427 | |
1428 | if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || |
1429 | test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) |
1430 | return; |
1431 | |
1432 | if (vma_has_uprobes(vma, start, end)) |
1433 | set_bit(MMF_RECALC_UPROBES, addr: &vma->vm_mm->flags); |
1434 | } |
1435 | |
1436 | /* Slot allocation for XOL */ |
1437 | static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) |
1438 | { |
1439 | struct vm_area_struct *vma; |
1440 | int ret; |
1441 | |
1442 | if (mmap_write_lock_killable(mm)) |
1443 | return -EINTR; |
1444 | |
1445 | if (mm->uprobes_state.xol_area) { |
1446 | ret = -EALREADY; |
1447 | goto fail; |
1448 | } |
1449 | |
1450 | if (!area->vaddr) { |
1451 | /* Try to map as high as possible, this is only a hint. */ |
1452 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, |
1453 | PAGE_SIZE, 0, 0); |
1454 | if (IS_ERR_VALUE(area->vaddr)) { |
1455 | ret = area->vaddr; |
1456 | goto fail; |
1457 | } |
1458 | } |
1459 | |
1460 | vma = _install_special_mapping(mm, addr: area->vaddr, PAGE_SIZE, |
1461 | VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, |
1462 | spec: &area->xol_mapping); |
1463 | if (IS_ERR(ptr: vma)) { |
1464 | ret = PTR_ERR(ptr: vma); |
1465 | goto fail; |
1466 | } |
1467 | |
1468 | ret = 0; |
1469 | /* pairs with get_xol_area() */ |
1470 | smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ |
1471 | fail: |
1472 | mmap_write_unlock(mm); |
1473 | |
1474 | return ret; |
1475 | } |
1476 | |
1477 | static struct xol_area *__create_xol_area(unsigned long vaddr) |
1478 | { |
1479 | struct mm_struct *mm = current->mm; |
1480 | uprobe_opcode_t insn = UPROBE_SWBP_INSN; |
1481 | struct xol_area *area; |
1482 | |
1483 | area = kmalloc(size: sizeof(*area), GFP_KERNEL); |
1484 | if (unlikely(!area)) |
1485 | goto out; |
1486 | |
1487 | area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), size: sizeof(long), |
1488 | GFP_KERNEL); |
1489 | if (!area->bitmap) |
1490 | goto free_area; |
1491 | |
1492 | area->xol_mapping.name = "[uprobes]" ; |
1493 | area->xol_mapping.fault = NULL; |
1494 | area->xol_mapping.pages = area->pages; |
1495 | area->pages[0] = alloc_page(GFP_HIGHUSER); |
1496 | if (!area->pages[0]) |
1497 | goto free_bitmap; |
1498 | area->pages[1] = NULL; |
1499 | |
1500 | area->vaddr = vaddr; |
1501 | init_waitqueue_head(&area->wq); |
1502 | /* Reserve the 1st slot for get_trampoline_vaddr() */ |
1503 | set_bit(nr: 0, addr: area->bitmap); |
1504 | atomic_set(v: &area->slot_count, i: 1); |
1505 | arch_uprobe_copy_ixol(page: area->pages[0], vaddr: 0, src: &insn, UPROBE_SWBP_INSN_SIZE); |
1506 | |
1507 | if (!xol_add_vma(mm, area)) |
1508 | return area; |
1509 | |
1510 | __free_page(area->pages[0]); |
1511 | free_bitmap: |
1512 | kfree(objp: area->bitmap); |
1513 | free_area: |
1514 | kfree(objp: area); |
1515 | out: |
1516 | return NULL; |
1517 | } |
1518 | |
1519 | /* |
1520 | * get_xol_area - Allocate process's xol_area if necessary. |
1521 | * This area will be used for storing instructions for execution out of line. |
1522 | * |
1523 | * Returns the allocated area or NULL. |
1524 | */ |
1525 | static struct xol_area *get_xol_area(void) |
1526 | { |
1527 | struct mm_struct *mm = current->mm; |
1528 | struct xol_area *area; |
1529 | |
1530 | if (!mm->uprobes_state.xol_area) |
1531 | __create_xol_area(vaddr: 0); |
1532 | |
1533 | /* Pairs with xol_add_vma() smp_store_release() */ |
1534 | area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ |
1535 | return area; |
1536 | } |
1537 | |
1538 | /* |
1539 | * uprobe_clear_state - Free the area allocated for slots. |
1540 | */ |
1541 | void uprobe_clear_state(struct mm_struct *mm) |
1542 | { |
1543 | struct xol_area *area = mm->uprobes_state.xol_area; |
1544 | |
1545 | mutex_lock(&delayed_uprobe_lock); |
1546 | delayed_uprobe_remove(NULL, mm); |
1547 | mutex_unlock(lock: &delayed_uprobe_lock); |
1548 | |
1549 | if (!area) |
1550 | return; |
1551 | |
1552 | put_page(page: area->pages[0]); |
1553 | kfree(objp: area->bitmap); |
1554 | kfree(objp: area); |
1555 | } |
1556 | |
1557 | void uprobe_start_dup_mmap(void) |
1558 | { |
1559 | percpu_down_read(sem: &dup_mmap_sem); |
1560 | } |
1561 | |
1562 | void uprobe_end_dup_mmap(void) |
1563 | { |
1564 | percpu_up_read(sem: &dup_mmap_sem); |
1565 | } |
1566 | |
1567 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
1568 | { |
1569 | if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { |
1570 | set_bit(MMF_HAS_UPROBES, addr: &newmm->flags); |
1571 | /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ |
1572 | set_bit(MMF_RECALC_UPROBES, addr: &newmm->flags); |
1573 | } |
1574 | } |
1575 | |
1576 | /* |
1577 | * - search for a free slot. |
1578 | */ |
1579 | static unsigned long xol_take_insn_slot(struct xol_area *area) |
1580 | { |
1581 | unsigned long slot_addr; |
1582 | int slot_nr; |
1583 | |
1584 | do { |
1585 | slot_nr = find_first_zero_bit(addr: area->bitmap, UINSNS_PER_PAGE); |
1586 | if (slot_nr < UINSNS_PER_PAGE) { |
1587 | if (!test_and_set_bit(nr: slot_nr, addr: area->bitmap)) |
1588 | break; |
1589 | |
1590 | slot_nr = UINSNS_PER_PAGE; |
1591 | continue; |
1592 | } |
1593 | wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); |
1594 | } while (slot_nr >= UINSNS_PER_PAGE); |
1595 | |
1596 | slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); |
1597 | atomic_inc(v: &area->slot_count); |
1598 | |
1599 | return slot_addr; |
1600 | } |
1601 | |
1602 | /* |
1603 | * xol_get_insn_slot - allocate a slot for xol. |
1604 | * Returns the allocated slot address or 0. |
1605 | */ |
1606 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe) |
1607 | { |
1608 | struct xol_area *area; |
1609 | unsigned long xol_vaddr; |
1610 | |
1611 | area = get_xol_area(); |
1612 | if (!area) |
1613 | return 0; |
1614 | |
1615 | xol_vaddr = xol_take_insn_slot(area); |
1616 | if (unlikely(!xol_vaddr)) |
1617 | return 0; |
1618 | |
1619 | arch_uprobe_copy_ixol(page: area->pages[0], vaddr: xol_vaddr, |
1620 | src: &uprobe->arch.ixol, len: sizeof(uprobe->arch.ixol)); |
1621 | |
1622 | return xol_vaddr; |
1623 | } |
1624 | |
1625 | /* |
1626 | * xol_free_insn_slot - If slot was earlier allocated by |
1627 | * @xol_get_insn_slot(), make the slot available for |
1628 | * subsequent requests. |
1629 | */ |
1630 | static void xol_free_insn_slot(struct task_struct *tsk) |
1631 | { |
1632 | struct xol_area *area; |
1633 | unsigned long vma_end; |
1634 | unsigned long slot_addr; |
1635 | |
1636 | if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) |
1637 | return; |
1638 | |
1639 | slot_addr = tsk->utask->xol_vaddr; |
1640 | if (unlikely(!slot_addr)) |
1641 | return; |
1642 | |
1643 | area = tsk->mm->uprobes_state.xol_area; |
1644 | vma_end = area->vaddr + PAGE_SIZE; |
1645 | if (area->vaddr <= slot_addr && slot_addr < vma_end) { |
1646 | unsigned long offset; |
1647 | int slot_nr; |
1648 | |
1649 | offset = slot_addr - area->vaddr; |
1650 | slot_nr = offset / UPROBE_XOL_SLOT_BYTES; |
1651 | if (slot_nr >= UINSNS_PER_PAGE) |
1652 | return; |
1653 | |
1654 | clear_bit(nr: slot_nr, addr: area->bitmap); |
1655 | atomic_dec(v: &area->slot_count); |
1656 | smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ |
1657 | if (waitqueue_active(wq_head: &area->wq)) |
1658 | wake_up(&area->wq); |
1659 | |
1660 | tsk->utask->xol_vaddr = 0; |
1661 | } |
1662 | } |
1663 | |
1664 | void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
1665 | void *src, unsigned long len) |
1666 | { |
1667 | /* Initialize the slot */ |
1668 | copy_to_page(page, vaddr, src, len); |
1669 | |
1670 | /* |
1671 | * We probably need flush_icache_user_page() but it needs vma. |
1672 | * This should work on most of architectures by default. If |
1673 | * architecture needs to do something different it can define |
1674 | * its own version of the function. |
1675 | */ |
1676 | flush_dcache_page(page); |
1677 | } |
1678 | |
1679 | /** |
1680 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs |
1681 | * @regs: Reflects the saved state of the task after it has hit a breakpoint |
1682 | * instruction. |
1683 | * Return the address of the breakpoint instruction. |
1684 | */ |
1685 | unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) |
1686 | { |
1687 | return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; |
1688 | } |
1689 | |
1690 | unsigned long uprobe_get_trap_addr(struct pt_regs *regs) |
1691 | { |
1692 | struct uprobe_task *utask = current->utask; |
1693 | |
1694 | if (unlikely(utask && utask->active_uprobe)) |
1695 | return utask->vaddr; |
1696 | |
1697 | return instruction_pointer(regs); |
1698 | } |
1699 | |
1700 | static struct return_instance *free_ret_instance(struct return_instance *ri) |
1701 | { |
1702 | struct return_instance *next = ri->next; |
1703 | put_uprobe(uprobe: ri->uprobe); |
1704 | kfree(objp: ri); |
1705 | return next; |
1706 | } |
1707 | |
1708 | /* |
1709 | * Called with no locks held. |
1710 | * Called in context of an exiting or an exec-ing thread. |
1711 | */ |
1712 | void uprobe_free_utask(struct task_struct *t) |
1713 | { |
1714 | struct uprobe_task *utask = t->utask; |
1715 | struct return_instance *ri; |
1716 | |
1717 | if (!utask) |
1718 | return; |
1719 | |
1720 | if (utask->active_uprobe) |
1721 | put_uprobe(uprobe: utask->active_uprobe); |
1722 | |
1723 | ri = utask->return_instances; |
1724 | while (ri) |
1725 | ri = free_ret_instance(ri); |
1726 | |
1727 | xol_free_insn_slot(tsk: t); |
1728 | kfree(objp: utask); |
1729 | t->utask = NULL; |
1730 | } |
1731 | |
1732 | /* |
1733 | * Allocate a uprobe_task object for the task if necessary. |
1734 | * Called when the thread hits a breakpoint. |
1735 | * |
1736 | * Returns: |
1737 | * - pointer to new uprobe_task on success |
1738 | * - NULL otherwise |
1739 | */ |
1740 | static struct uprobe_task *get_utask(void) |
1741 | { |
1742 | if (!current->utask) |
1743 | current->utask = kzalloc(size: sizeof(struct uprobe_task), GFP_KERNEL); |
1744 | return current->utask; |
1745 | } |
1746 | |
1747 | static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) |
1748 | { |
1749 | struct uprobe_task *n_utask; |
1750 | struct return_instance **p, *o, *n; |
1751 | |
1752 | n_utask = kzalloc(size: sizeof(struct uprobe_task), GFP_KERNEL); |
1753 | if (!n_utask) |
1754 | return -ENOMEM; |
1755 | t->utask = n_utask; |
1756 | |
1757 | p = &n_utask->return_instances; |
1758 | for (o = o_utask->return_instances; o; o = o->next) { |
1759 | n = kmalloc(size: sizeof(struct return_instance), GFP_KERNEL); |
1760 | if (!n) |
1761 | return -ENOMEM; |
1762 | |
1763 | *n = *o; |
1764 | get_uprobe(uprobe: n->uprobe); |
1765 | n->next = NULL; |
1766 | |
1767 | *p = n; |
1768 | p = &n->next; |
1769 | n_utask->depth++; |
1770 | } |
1771 | |
1772 | return 0; |
1773 | } |
1774 | |
1775 | static void uprobe_warn(struct task_struct *t, const char *msg) |
1776 | { |
1777 | pr_warn("uprobe: %s:%d failed to %s\n" , |
1778 | current->comm, current->pid, msg); |
1779 | } |
1780 | |
1781 | static void dup_xol_work(struct callback_head *work) |
1782 | { |
1783 | if (current->flags & PF_EXITING) |
1784 | return; |
1785 | |
1786 | if (!__create_xol_area(current->utask->dup_xol_addr) && |
1787 | !fatal_signal_pending(current)) |
1788 | uprobe_warn(current, msg: "dup xol area" ); |
1789 | } |
1790 | |
1791 | /* |
1792 | * Called in context of a new clone/fork from copy_process. |
1793 | */ |
1794 | void uprobe_copy_process(struct task_struct *t, unsigned long flags) |
1795 | { |
1796 | struct uprobe_task *utask = current->utask; |
1797 | struct mm_struct *mm = current->mm; |
1798 | struct xol_area *area; |
1799 | |
1800 | t->utask = NULL; |
1801 | |
1802 | if (!utask || !utask->return_instances) |
1803 | return; |
1804 | |
1805 | if (mm == t->mm && !(flags & CLONE_VFORK)) |
1806 | return; |
1807 | |
1808 | if (dup_utask(t, o_utask: utask)) |
1809 | return uprobe_warn(t, msg: "dup ret instances" ); |
1810 | |
1811 | /* The task can fork() after dup_xol_work() fails */ |
1812 | area = mm->uprobes_state.xol_area; |
1813 | if (!area) |
1814 | return uprobe_warn(t, msg: "dup xol area" ); |
1815 | |
1816 | if (mm == t->mm) |
1817 | return; |
1818 | |
1819 | t->utask->dup_xol_addr = area->vaddr; |
1820 | init_task_work(twork: &t->utask->dup_xol_work, func: dup_xol_work); |
1821 | task_work_add(task: t, twork: &t->utask->dup_xol_work, mode: TWA_RESUME); |
1822 | } |
1823 | |
1824 | /* |
1825 | * Current area->vaddr notion assume the trampoline address is always |
1826 | * equal area->vaddr. |
1827 | * |
1828 | * Returns -1 in case the xol_area is not allocated. |
1829 | */ |
1830 | static unsigned long get_trampoline_vaddr(void) |
1831 | { |
1832 | struct xol_area *area; |
1833 | unsigned long trampoline_vaddr = -1; |
1834 | |
1835 | /* Pairs with xol_add_vma() smp_store_release() */ |
1836 | area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ |
1837 | if (area) |
1838 | trampoline_vaddr = area->vaddr; |
1839 | |
1840 | return trampoline_vaddr; |
1841 | } |
1842 | |
1843 | static void cleanup_return_instances(struct uprobe_task *utask, bool chained, |
1844 | struct pt_regs *regs) |
1845 | { |
1846 | struct return_instance *ri = utask->return_instances; |
1847 | enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; |
1848 | |
1849 | while (ri && !arch_uretprobe_is_alive(ret: ri, ctx, regs)) { |
1850 | ri = free_ret_instance(ri); |
1851 | utask->depth--; |
1852 | } |
1853 | utask->return_instances = ri; |
1854 | } |
1855 | |
1856 | static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) |
1857 | { |
1858 | struct return_instance *ri; |
1859 | struct uprobe_task *utask; |
1860 | unsigned long orig_ret_vaddr, trampoline_vaddr; |
1861 | bool chained; |
1862 | |
1863 | if (!get_xol_area()) |
1864 | return; |
1865 | |
1866 | utask = get_utask(); |
1867 | if (!utask) |
1868 | return; |
1869 | |
1870 | if (utask->depth >= MAX_URETPROBE_DEPTH) { |
1871 | printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" |
1872 | " nestedness limit pid/tgid=%d/%d\n" , |
1873 | current->pid, current->tgid); |
1874 | return; |
1875 | } |
1876 | |
1877 | ri = kmalloc(size: sizeof(struct return_instance), GFP_KERNEL); |
1878 | if (!ri) |
1879 | return; |
1880 | |
1881 | trampoline_vaddr = get_trampoline_vaddr(); |
1882 | orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); |
1883 | if (orig_ret_vaddr == -1) |
1884 | goto fail; |
1885 | |
1886 | /* drop the entries invalidated by longjmp() */ |
1887 | chained = (orig_ret_vaddr == trampoline_vaddr); |
1888 | cleanup_return_instances(utask, chained, regs); |
1889 | |
1890 | /* |
1891 | * We don't want to keep trampoline address in stack, rather keep the |
1892 | * original return address of first caller thru all the consequent |
1893 | * instances. This also makes breakpoint unwrapping easier. |
1894 | */ |
1895 | if (chained) { |
1896 | if (!utask->return_instances) { |
1897 | /* |
1898 | * This situation is not possible. Likely we have an |
1899 | * attack from user-space. |
1900 | */ |
1901 | uprobe_warn(current, msg: "handle tail call" ); |
1902 | goto fail; |
1903 | } |
1904 | orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; |
1905 | } |
1906 | |
1907 | ri->uprobe = get_uprobe(uprobe); |
1908 | ri->func = instruction_pointer(regs); |
1909 | ri->stack = user_stack_pointer(regs); |
1910 | ri->orig_ret_vaddr = orig_ret_vaddr; |
1911 | ri->chained = chained; |
1912 | |
1913 | utask->depth++; |
1914 | ri->next = utask->return_instances; |
1915 | utask->return_instances = ri; |
1916 | |
1917 | return; |
1918 | fail: |
1919 | kfree(objp: ri); |
1920 | } |
1921 | |
1922 | /* Prepare to single-step probed instruction out of line. */ |
1923 | static int |
1924 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) |
1925 | { |
1926 | struct uprobe_task *utask; |
1927 | unsigned long xol_vaddr; |
1928 | int err; |
1929 | |
1930 | utask = get_utask(); |
1931 | if (!utask) |
1932 | return -ENOMEM; |
1933 | |
1934 | xol_vaddr = xol_get_insn_slot(uprobe); |
1935 | if (!xol_vaddr) |
1936 | return -ENOMEM; |
1937 | |
1938 | utask->xol_vaddr = xol_vaddr; |
1939 | utask->vaddr = bp_vaddr; |
1940 | |
1941 | err = arch_uprobe_pre_xol(aup: &uprobe->arch, regs); |
1942 | if (unlikely(err)) { |
1943 | xol_free_insn_slot(current); |
1944 | return err; |
1945 | } |
1946 | |
1947 | utask->active_uprobe = uprobe; |
1948 | utask->state = UTASK_SSTEP; |
1949 | return 0; |
1950 | } |
1951 | |
1952 | /* |
1953 | * If we are singlestepping, then ensure this thread is not connected to |
1954 | * non-fatal signals until completion of singlestep. When xol insn itself |
1955 | * triggers the signal, restart the original insn even if the task is |
1956 | * already SIGKILL'ed (since coredump should report the correct ip). This |
1957 | * is even more important if the task has a handler for SIGSEGV/etc, The |
1958 | * _same_ instruction should be repeated again after return from the signal |
1959 | * handler, and SSTEP can never finish in this case. |
1960 | */ |
1961 | bool uprobe_deny_signal(void) |
1962 | { |
1963 | struct task_struct *t = current; |
1964 | struct uprobe_task *utask = t->utask; |
1965 | |
1966 | if (likely(!utask || !utask->active_uprobe)) |
1967 | return false; |
1968 | |
1969 | WARN_ON_ONCE(utask->state != UTASK_SSTEP); |
1970 | |
1971 | if (task_sigpending(p: t)) { |
1972 | spin_lock_irq(lock: &t->sighand->siglock); |
1973 | clear_tsk_thread_flag(tsk: t, TIF_SIGPENDING); |
1974 | spin_unlock_irq(lock: &t->sighand->siglock); |
1975 | |
1976 | if (__fatal_signal_pending(p: t) || arch_uprobe_xol_was_trapped(tsk: t)) { |
1977 | utask->state = UTASK_SSTEP_TRAPPED; |
1978 | set_tsk_thread_flag(tsk: t, TIF_UPROBE); |
1979 | } |
1980 | } |
1981 | |
1982 | return true; |
1983 | } |
1984 | |
1985 | static void mmf_recalc_uprobes(struct mm_struct *mm) |
1986 | { |
1987 | VMA_ITERATOR(vmi, mm, 0); |
1988 | struct vm_area_struct *vma; |
1989 | |
1990 | for_each_vma(vmi, vma) { |
1991 | if (!valid_vma(vma, is_register: false)) |
1992 | continue; |
1993 | /* |
1994 | * This is not strictly accurate, we can race with |
1995 | * uprobe_unregister() and see the already removed |
1996 | * uprobe if delete_uprobe() was not yet called. |
1997 | * Or this uprobe can be filtered out. |
1998 | */ |
1999 | if (vma_has_uprobes(vma, start: vma->vm_start, end: vma->vm_end)) |
2000 | return; |
2001 | } |
2002 | |
2003 | clear_bit(MMF_HAS_UPROBES, addr: &mm->flags); |
2004 | } |
2005 | |
2006 | static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) |
2007 | { |
2008 | struct page *page; |
2009 | uprobe_opcode_t opcode; |
2010 | int result; |
2011 | |
2012 | if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) |
2013 | return -EINVAL; |
2014 | |
2015 | pagefault_disable(); |
2016 | result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); |
2017 | pagefault_enable(); |
2018 | |
2019 | if (likely(result == 0)) |
2020 | goto out; |
2021 | |
2022 | /* |
2023 | * The NULL 'tsk' here ensures that any faults that occur here |
2024 | * will not be accounted to the task. 'mm' *is* current->mm, |
2025 | * but we treat this as a 'remote' access since it is |
2026 | * essentially a kernel access to the memory. |
2027 | */ |
2028 | result = get_user_pages_remote(mm, start: vaddr, nr_pages: 1, gup_flags: FOLL_FORCE, pages: &page, NULL); |
2029 | if (result < 0) |
2030 | return result; |
2031 | |
2032 | copy_from_page(page, vaddr, dst: &opcode, UPROBE_SWBP_INSN_SIZE); |
2033 | put_page(page); |
2034 | out: |
2035 | /* This needs to return true for any variant of the trap insn */ |
2036 | return is_trap_insn(insn: &opcode); |
2037 | } |
2038 | |
2039 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) |
2040 | { |
2041 | struct mm_struct *mm = current->mm; |
2042 | struct uprobe *uprobe = NULL; |
2043 | struct vm_area_struct *vma; |
2044 | |
2045 | mmap_read_lock(mm); |
2046 | vma = vma_lookup(mm, addr: bp_vaddr); |
2047 | if (vma) { |
2048 | if (valid_vma(vma, is_register: false)) { |
2049 | struct inode *inode = file_inode(f: vma->vm_file); |
2050 | loff_t offset = vaddr_to_offset(vma, vaddr: bp_vaddr); |
2051 | |
2052 | uprobe = find_uprobe(inode, offset); |
2053 | } |
2054 | |
2055 | if (!uprobe) |
2056 | *is_swbp = is_trap_at_addr(mm, vaddr: bp_vaddr); |
2057 | } else { |
2058 | *is_swbp = -EFAULT; |
2059 | } |
2060 | |
2061 | if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, addr: &mm->flags)) |
2062 | mmf_recalc_uprobes(mm); |
2063 | mmap_read_unlock(mm); |
2064 | |
2065 | return uprobe; |
2066 | } |
2067 | |
2068 | static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) |
2069 | { |
2070 | struct uprobe_consumer *uc; |
2071 | int remove = UPROBE_HANDLER_REMOVE; |
2072 | bool need_prep = false; /* prepare return uprobe, when needed */ |
2073 | |
2074 | down_read(sem: &uprobe->register_rwsem); |
2075 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
2076 | int rc = 0; |
2077 | |
2078 | if (uc->handler) { |
2079 | rc = uc->handler(uc, regs); |
2080 | WARN(rc & ~UPROBE_HANDLER_MASK, |
2081 | "bad rc=0x%x from %ps()\n" , rc, uc->handler); |
2082 | } |
2083 | |
2084 | if (uc->ret_handler) |
2085 | need_prep = true; |
2086 | |
2087 | remove &= rc; |
2088 | } |
2089 | |
2090 | if (need_prep && !remove) |
2091 | prepare_uretprobe(uprobe, regs); /* put bp at return */ |
2092 | |
2093 | if (remove && uprobe->consumers) { |
2094 | WARN_ON(!uprobe_is_active(uprobe)); |
2095 | unapply_uprobe(uprobe, current->mm); |
2096 | } |
2097 | up_read(sem: &uprobe->register_rwsem); |
2098 | } |
2099 | |
2100 | static void |
2101 | handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) |
2102 | { |
2103 | struct uprobe *uprobe = ri->uprobe; |
2104 | struct uprobe_consumer *uc; |
2105 | |
2106 | down_read(sem: &uprobe->register_rwsem); |
2107 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
2108 | if (uc->ret_handler) |
2109 | uc->ret_handler(uc, ri->func, regs); |
2110 | } |
2111 | up_read(sem: &uprobe->register_rwsem); |
2112 | } |
2113 | |
2114 | static struct return_instance *find_next_ret_chain(struct return_instance *ri) |
2115 | { |
2116 | bool chained; |
2117 | |
2118 | do { |
2119 | chained = ri->chained; |
2120 | ri = ri->next; /* can't be NULL if chained */ |
2121 | } while (chained); |
2122 | |
2123 | return ri; |
2124 | } |
2125 | |
2126 | static void handle_trampoline(struct pt_regs *regs) |
2127 | { |
2128 | struct uprobe_task *utask; |
2129 | struct return_instance *ri, *next; |
2130 | bool valid; |
2131 | |
2132 | utask = current->utask; |
2133 | if (!utask) |
2134 | goto sigill; |
2135 | |
2136 | ri = utask->return_instances; |
2137 | if (!ri) |
2138 | goto sigill; |
2139 | |
2140 | do { |
2141 | /* |
2142 | * We should throw out the frames invalidated by longjmp(). |
2143 | * If this chain is valid, then the next one should be alive |
2144 | * or NULL; the latter case means that nobody but ri->func |
2145 | * could hit this trampoline on return. TODO: sigaltstack(). |
2146 | */ |
2147 | next = find_next_ret_chain(ri); |
2148 | valid = !next || arch_uretprobe_is_alive(ret: next, ctx: RP_CHECK_RET, regs); |
2149 | |
2150 | instruction_pointer_set(regs, val: ri->orig_ret_vaddr); |
2151 | do { |
2152 | if (valid) |
2153 | handle_uretprobe_chain(ri, regs); |
2154 | ri = free_ret_instance(ri); |
2155 | utask->depth--; |
2156 | } while (ri != next); |
2157 | } while (!valid); |
2158 | |
2159 | utask->return_instances = ri; |
2160 | return; |
2161 | |
2162 | sigill: |
2163 | uprobe_warn(current, msg: "handle uretprobe, sending SIGILL." ); |
2164 | force_sig(SIGILL); |
2165 | |
2166 | } |
2167 | |
2168 | bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) |
2169 | { |
2170 | return false; |
2171 | } |
2172 | |
2173 | bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, |
2174 | struct pt_regs *regs) |
2175 | { |
2176 | return true; |
2177 | } |
2178 | |
2179 | /* |
2180 | * Run handler and ask thread to singlestep. |
2181 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
2182 | */ |
2183 | static void handle_swbp(struct pt_regs *regs) |
2184 | { |
2185 | struct uprobe *uprobe; |
2186 | unsigned long bp_vaddr; |
2187 | int is_swbp; |
2188 | |
2189 | bp_vaddr = uprobe_get_swbp_addr(regs); |
2190 | if (bp_vaddr == get_trampoline_vaddr()) |
2191 | return handle_trampoline(regs); |
2192 | |
2193 | uprobe = find_active_uprobe(bp_vaddr, is_swbp: &is_swbp); |
2194 | if (!uprobe) { |
2195 | if (is_swbp > 0) { |
2196 | /* No matching uprobe; signal SIGTRAP. */ |
2197 | force_sig(SIGTRAP); |
2198 | } else { |
2199 | /* |
2200 | * Either we raced with uprobe_unregister() or we can't |
2201 | * access this memory. The latter is only possible if |
2202 | * another thread plays with our ->mm. In both cases |
2203 | * we can simply restart. If this vma was unmapped we |
2204 | * can pretend this insn was not executed yet and get |
2205 | * the (correct) SIGSEGV after restart. |
2206 | */ |
2207 | instruction_pointer_set(regs, val: bp_vaddr); |
2208 | } |
2209 | return; |
2210 | } |
2211 | |
2212 | /* change it in advance for ->handler() and restart */ |
2213 | instruction_pointer_set(regs, val: bp_vaddr); |
2214 | |
2215 | /* |
2216 | * TODO: move copy_insn/etc into _register and remove this hack. |
2217 | * After we hit the bp, _unregister + _register can install the |
2218 | * new and not-yet-analyzed uprobe at the same address, restart. |
2219 | */ |
2220 | if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) |
2221 | goto out; |
2222 | |
2223 | /* |
2224 | * Pairs with the smp_wmb() in prepare_uprobe(). |
2225 | * |
2226 | * Guarantees that if we see the UPROBE_COPY_INSN bit set, then |
2227 | * we must also see the stores to &uprobe->arch performed by the |
2228 | * prepare_uprobe() call. |
2229 | */ |
2230 | smp_rmb(); |
2231 | |
2232 | /* Tracing handlers use ->utask to communicate with fetch methods */ |
2233 | if (!get_utask()) |
2234 | goto out; |
2235 | |
2236 | if (arch_uprobe_ignore(aup: &uprobe->arch, regs)) |
2237 | goto out; |
2238 | |
2239 | handler_chain(uprobe, regs); |
2240 | |
2241 | if (arch_uprobe_skip_sstep(aup: &uprobe->arch, regs)) |
2242 | goto out; |
2243 | |
2244 | if (!pre_ssout(uprobe, regs, bp_vaddr)) |
2245 | return; |
2246 | |
2247 | /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ |
2248 | out: |
2249 | put_uprobe(uprobe); |
2250 | } |
2251 | |
2252 | /* |
2253 | * Perform required fix-ups and disable singlestep. |
2254 | * Allow pending signals to take effect. |
2255 | */ |
2256 | static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) |
2257 | { |
2258 | struct uprobe *uprobe; |
2259 | int err = 0; |
2260 | |
2261 | uprobe = utask->active_uprobe; |
2262 | if (utask->state == UTASK_SSTEP_ACK) |
2263 | err = arch_uprobe_post_xol(aup: &uprobe->arch, regs); |
2264 | else if (utask->state == UTASK_SSTEP_TRAPPED) |
2265 | arch_uprobe_abort_xol(aup: &uprobe->arch, regs); |
2266 | else |
2267 | WARN_ON_ONCE(1); |
2268 | |
2269 | put_uprobe(uprobe); |
2270 | utask->active_uprobe = NULL; |
2271 | utask->state = UTASK_RUNNING; |
2272 | xol_free_insn_slot(current); |
2273 | |
2274 | spin_lock_irq(lock: ¤t->sighand->siglock); |
2275 | recalc_sigpending(); /* see uprobe_deny_signal() */ |
2276 | spin_unlock_irq(lock: ¤t->sighand->siglock); |
2277 | |
2278 | if (unlikely(err)) { |
2279 | uprobe_warn(current, msg: "execute the probed insn, sending SIGILL." ); |
2280 | force_sig(SIGILL); |
2281 | } |
2282 | } |
2283 | |
2284 | /* |
2285 | * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and |
2286 | * allows the thread to return from interrupt. After that handle_swbp() |
2287 | * sets utask->active_uprobe. |
2288 | * |
2289 | * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag |
2290 | * and allows the thread to return from interrupt. |
2291 | * |
2292 | * While returning to userspace, thread notices the TIF_UPROBE flag and calls |
2293 | * uprobe_notify_resume(). |
2294 | */ |
2295 | void uprobe_notify_resume(struct pt_regs *regs) |
2296 | { |
2297 | struct uprobe_task *utask; |
2298 | |
2299 | clear_thread_flag(TIF_UPROBE); |
2300 | |
2301 | utask = current->utask; |
2302 | if (utask && utask->active_uprobe) |
2303 | handle_singlestep(utask, regs); |
2304 | else |
2305 | handle_swbp(regs); |
2306 | } |
2307 | |
2308 | /* |
2309 | * uprobe_pre_sstep_notifier gets called from interrupt context as part of |
2310 | * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. |
2311 | */ |
2312 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) |
2313 | { |
2314 | if (!current->mm) |
2315 | return 0; |
2316 | |
2317 | if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && |
2318 | (!current->utask || !current->utask->return_instances)) |
2319 | return 0; |
2320 | |
2321 | set_thread_flag(TIF_UPROBE); |
2322 | return 1; |
2323 | } |
2324 | |
2325 | /* |
2326 | * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier |
2327 | * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. |
2328 | */ |
2329 | int uprobe_post_sstep_notifier(struct pt_regs *regs) |
2330 | { |
2331 | struct uprobe_task *utask = current->utask; |
2332 | |
2333 | if (!current->mm || !utask || !utask->active_uprobe) |
2334 | /* task is currently not uprobed */ |
2335 | return 0; |
2336 | |
2337 | utask->state = UTASK_SSTEP_ACK; |
2338 | set_thread_flag(TIF_UPROBE); |
2339 | return 1; |
2340 | } |
2341 | |
2342 | static struct notifier_block uprobe_exception_nb = { |
2343 | .notifier_call = arch_uprobe_exception_notify, |
2344 | .priority = INT_MAX-1, /* notified after kprobes, kgdb */ |
2345 | }; |
2346 | |
2347 | void __init uprobes_init(void) |
2348 | { |
2349 | int i; |
2350 | |
2351 | for (i = 0; i < UPROBES_HASH_SZ; i++) |
2352 | mutex_init(&uprobes_mmap_mutex[i]); |
2353 | |
2354 | BUG_ON(register_die_notifier(&uprobe_exception_nb)); |
2355 | } |
2356 | |