1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
4 */
5#include <linux/highmem.h>
6#include <linux/ptrace.h>
7#include <linux/uprobes.h>
8#include <asm/cacheflush.h>
9
10#include "decode-insn.h"
11
12#define UPROBE_INV_FAULT_CODE UINT_MAX
13
14void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
15 void *src, unsigned long len)
16{
17 void *xol_page_kaddr = kmap_atomic(page);
18 void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
19
20 /*
21 * Initial cache maintenance of the xol page done via set_pte_at().
22 * Subsequent CMOs only needed if the xol slot changes.
23 */
24 if (!memcmp(p: dst, q: src, size: len))
25 goto done;
26
27 /* Initialize the slot */
28 memcpy(dst, src, len);
29
30 /* flush caches (dcache/icache) */
31 sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
32
33done:
34 kunmap_atomic(xol_page_kaddr);
35}
36
37unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
38{
39 return instruction_pointer(regs);
40}
41
42int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
43 unsigned long addr)
44{
45 u32 insn;
46
47 /* TODO: Currently we do not support AARCH32 instruction probing */
48 if (mm->context.flags & MMCF_AARCH32)
49 return -EOPNOTSUPP;
50 else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
51 return -EINVAL;
52
53 insn = le32_to_cpu(auprobe->insn);
54
55 switch (arm_probe_decode_insn(insn, asi: &auprobe->api)) {
56 case INSN_REJECTED:
57 return -EINVAL;
58
59 case INSN_GOOD_NO_SLOT:
60 auprobe->simulate = true;
61 break;
62
63 default:
64 break;
65 }
66
67 return 0;
68}
69
70int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
71{
72 struct uprobe_task *utask = current->utask;
73
74 /* Initialize with an invalid fault code to detect if ol insn trapped */
75 current->thread.fault_code = UPROBE_INV_FAULT_CODE;
76
77 /* Instruction points to execute ol */
78 instruction_pointer_set(regs, val: utask->xol_vaddr);
79
80 user_enable_single_step(current);
81
82 return 0;
83}
84
85int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
86{
87 struct uprobe_task *utask = current->utask;
88
89 WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
90
91 /* Instruction points to execute next to breakpoint address */
92 instruction_pointer_set(regs, val: utask->vaddr + 4);
93
94 user_disable_single_step(current);
95
96 return 0;
97}
98bool arch_uprobe_xol_was_trapped(struct task_struct *t)
99{
100 /*
101 * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
102 * insn itself is trapped, then detect the case with the help of
103 * invalid fault code which is being set in arch_uprobe_pre_xol
104 */
105 if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
106 return true;
107
108 return false;
109}
110
111bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
112{
113 u32 insn;
114 unsigned long addr;
115
116 if (!auprobe->simulate)
117 return false;
118
119 insn = le32_to_cpu(auprobe->insn);
120 addr = instruction_pointer(regs);
121
122 if (auprobe->api.handler)
123 auprobe->api.handler(insn, addr, regs);
124
125 return true;
126}
127
128void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
129{
130 struct uprobe_task *utask = current->utask;
131
132 /*
133 * Task has received a fatal signal, so reset back to probbed
134 * address.
135 */
136 instruction_pointer_set(regs, val: utask->vaddr);
137
138 user_disable_single_step(current);
139}
140
141bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
142 struct pt_regs *regs)
143{
144 /*
145 * If a simple branch instruction (B) was called for retprobed
146 * assembly label then return true even when regs->sp and ret->stack
147 * are same. It will ensure that cleanup and reporting of return
148 * instances corresponding to callee label is done when
149 * handle_trampoline for called function is executed.
150 */
151 if (ctx == RP_CHECK_CHAIN_CALL)
152 return regs->sp <= ret->stack;
153 else
154 return regs->sp < ret->stack;
155}
156
157unsigned long
158arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
159 struct pt_regs *regs)
160{
161 unsigned long orig_ret_vaddr;
162
163 orig_ret_vaddr = procedure_link_pointer(regs);
164 /* Replace the return addr with trampoline addr */
165 procedure_link_pointer_set(regs, trampoline_vaddr);
166
167 return orig_ret_vaddr;
168}
169
170int arch_uprobe_exception_notify(struct notifier_block *self,
171 unsigned long val, void *data)
172{
173 return NOTIFY_DONE;
174}
175
176static int uprobe_breakpoint_handler(struct pt_regs *regs,
177 unsigned long esr)
178{
179 if (uprobe_pre_sstep_notifier(regs))
180 return DBG_HOOK_HANDLED;
181
182 return DBG_HOOK_ERROR;
183}
184
185static int uprobe_single_step_handler(struct pt_regs *regs,
186 unsigned long esr)
187{
188 struct uprobe_task *utask = current->utask;
189
190 WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
191 if (uprobe_post_sstep_notifier(regs))
192 return DBG_HOOK_HANDLED;
193
194 return DBG_HOOK_ERROR;
195}
196
197/* uprobe breakpoint handler hook */
198static struct break_hook uprobes_break_hook = {
199 .imm = UPROBES_BRK_IMM,
200 .fn = uprobe_breakpoint_handler,
201};
202
203/* uprobe single step handler hook */
204static struct step_hook uprobes_step_hook = {
205 .fn = uprobe_single_step_handler,
206};
207
208static int __init arch_init_uprobes(void)
209{
210 register_user_break_hook(&uprobes_break_hook);
211 register_user_step_hook(&uprobes_step_hook);
212
213 return 0;
214}
215
216device_initcall(arch_init_uprobes);
217

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/arch/arm64/kernel/probes/uprobes.c