1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Code for Kernel probes Jump optimization.
4 *
5 * Copyright 2017, Anju T, IBM Corp.
6 */
7
8#include <linux/kprobes.h>
9#include <linux/jump_label.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/list.h>
13#include <asm/kprobes.h>
14#include <asm/ptrace.h>
15#include <asm/cacheflush.h>
16#include <asm/code-patching.h>
17#include <asm/sstep.h>
18#include <asm/ppc-opcode.h>
19#include <asm/inst.h>
20
21#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
22#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
23#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
24#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
25#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
26#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
27
28static bool insn_page_in_use;
29
30void *alloc_optinsn_page(void)
31{
32 if (insn_page_in_use)
33 return NULL;
34 insn_page_in_use = true;
35 return &optinsn_slot;
36}
37
38void free_optinsn_page(void *page)
39{
40 insn_page_in_use = false;
41}
42
43/*
44 * Check if we can optimize this probe. Returns NIP post-emulation if this can
45 * be optimized and 0 otherwise.
46 */
47static unsigned long can_optimize(struct kprobe *p)
48{
49 struct pt_regs regs;
50 struct instruction_op op;
51 unsigned long nip = 0;
52 unsigned long addr = (unsigned long)p->addr;
53
54 /*
55 * kprobe placed for kretprobe during boot time
56 * has a 'nop' instruction, which can be emulated.
57 * So further checks can be skipped.
58 */
59 if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
60 return addr + sizeof(kprobe_opcode_t);
61
62 /*
63 * We only support optimizing kernel addresses, but not
64 * module addresses.
65 *
66 * FIXME: Optimize kprobes placed in module addresses.
67 */
68 if (!is_kernel_addr(addr))
69 return 0;
70
71 memset(&regs, 0, sizeof(struct pt_regs));
72 regs.nip = addr;
73 regs.trap = 0x0;
74 regs.msr = MSR_KERNEL;
75
76 /*
77 * Kprobe placed in conditional branch instructions are
78 * not optimized, as we can't predict the nip prior with
79 * dummy pt_regs and can not ensure that the return branch
80 * from detour buffer falls in the range of address (i.e 32MB).
81 * A branch back from trampoline is set up in the detour buffer
82 * to the nip returned by the analyse_instr() here.
83 *
84 * Ensure that the instruction is not a conditional branch,
85 * and that can be emulated.
86 */
87 if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
88 analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
89 emulate_update_regs(&regs, &op);
90 nip = regs.nip;
91 }
92
93 return nip;
94}
95
96static void optimized_callback(struct optimized_kprobe *op,
97 struct pt_regs *regs)
98{
99 /* This is possible if op is under delayed unoptimizing */
100 if (kprobe_disabled(p: &op->kp))
101 return;
102
103 preempt_disable();
104
105 if (kprobe_running()) {
106 kprobes_inc_nmissed_count(p: &op->kp);
107 } else {
108 __this_cpu_write(current_kprobe, &op->kp);
109 regs_set_return_ip(regs, (unsigned long)op->kp.addr);
110 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
111 opt_pre_handler(p: &op->kp, regs);
112 __this_cpu_write(current_kprobe, NULL);
113 }
114
115 preempt_enable();
116}
117NOKPROBE_SYMBOL(optimized_callback);
118
119void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
120{
121 if (op->optinsn.insn) {
122 free_optinsn_slot(slot: op->optinsn.insn, dirty: 1);
123 op->optinsn.insn = NULL;
124 }
125}
126
127static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
128{
129 patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
130 patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
131}
132
133/*
134 * Generate instructions to load provided immediate 64-bit value
135 * to register 'reg' and patch these instructions at 'addr'.
136 */
137static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
138{
139 patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
140 patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
141 patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
142 patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
143 patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
144}
145
146static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
147{
148 if (IS_ENABLED(CONFIG_PPC64))
149 patch_imm64_load_insns(val, reg, addr);
150 else
151 patch_imm32_load_insns(val, reg, addr);
152}
153
154int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
155{
156 ppc_inst_t branch_op_callback, branch_emulate_step, temp;
157 unsigned long op_callback_addr, emulate_step_addr;
158 kprobe_opcode_t *buff;
159 long b_offset;
160 unsigned long nip, size;
161 int rc, i;
162
163 nip = can_optimize(p);
164 if (!nip)
165 return -EILSEQ;
166
167 /* Allocate instruction slot for detour buffer */
168 buff = get_optinsn_slot();
169 if (!buff)
170 return -ENOMEM;
171
172 /*
173 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
174 *
175 * The target address has to be relatively nearby, to permit use
176 * of branch instruction in powerpc, because the address is specified
177 * in an immediate field in the instruction opcode itself, ie 24 bits
178 * in the opcode specify the address. Therefore the address should
179 * be within 32MB on either side of the current instruction.
180 */
181 b_offset = (unsigned long)buff - (unsigned long)p->addr;
182 if (!is_offset_in_branch_range(b_offset))
183 goto error;
184
185 /* Check if the return address is also within 32MB range */
186 b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
187 if (!is_offset_in_branch_range(b_offset))
188 goto error;
189
190 /* Setup template */
191 /* We can optimize this via patch_instruction_window later */
192 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
193 pr_devel("Copying template to %p, size %lu\n", buff, size);
194 for (i = 0; i < size; i++) {
195 rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
196 if (rc < 0)
197 goto error;
198 }
199
200 /*
201 * Fixup the template with instructions to:
202 * 1. load the address of the actual probepoint
203 */
204 patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
205
206 /*
207 * 2. branch to optimized_callback() and emulate_step()
208 */
209 op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
210 emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
211 if (!op_callback_addr || !emulate_step_addr) {
212 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
213 goto error;
214 }
215
216 rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
217 op_callback_addr, BRANCH_SET_LINK);
218
219 rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
220 emulate_step_addr, BRANCH_SET_LINK);
221
222 if (rc)
223 goto error;
224
225 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
226 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
227
228 /*
229 * 3. load instruction to be emulated into relevant register, and
230 */
231 temp = ppc_inst_read(p->ainsn.insn);
232 patch_imm_load_insns(val: ppc_inst_as_ulong(temp), reg: 4, addr: buff + TMPL_INSN_IDX);
233
234 /*
235 * 4. branch back from trampoline
236 */
237 patch_branch(buff + TMPL_RET_IDX, nip, 0);
238
239 flush_icache_range(start: (unsigned long)buff, end: (unsigned long)(&buff[TMPL_END_IDX]));
240
241 op->optinsn.insn = buff;
242
243 return 0;
244
245error:
246 free_optinsn_slot(slot: buff, dirty: 0);
247 return -ERANGE;
248
249}
250
251int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
252{
253 return optinsn->insn != NULL;
254}
255
256/*
257 * On powerpc, Optprobes always replaces one instruction (4 bytes
258 * aligned and 4 bytes long). It is impossible to encounter another
259 * kprobe in this address range. So always return 0.
260 */
261int arch_check_optimized_kprobe(struct optimized_kprobe *op)
262{
263 return 0;
264}
265
266void arch_optimize_kprobes(struct list_head *oplist)
267{
268 ppc_inst_t instr;
269 struct optimized_kprobe *op;
270 struct optimized_kprobe *tmp;
271
272 list_for_each_entry_safe(op, tmp, oplist, list) {
273 /*
274 * Backup instructions which will be replaced
275 * by jump address
276 */
277 memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
278 create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
279 patch_instruction(op->kp.addr, instr);
280 list_del_init(entry: &op->list);
281 }
282}
283
284void arch_unoptimize_kprobe(struct optimized_kprobe *op)
285{
286 arch_arm_kprobe(p: &op->kp);
287}
288
289void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
290{
291 struct optimized_kprobe *op;
292 struct optimized_kprobe *tmp;
293
294 list_for_each_entry_safe(op, tmp, oplist, list) {
295 arch_unoptimize_kprobe(op);
296 list_move(list: &op->list, head: done_list);
297 }
298}
299
300int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
301{
302 return (op->kp.addr <= addr &&
303 op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
304}
305

source code of linux/arch/powerpc/kernel/optprobes.c