1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Common functionality for RV32 and RV64 BPF JIT compilers
4 *
5 * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
6 *
7 */
8
9#include <linux/bpf.h>
10#include <linux/filter.h>
11#include <linux/memory.h>
12#include <asm/patch.h>
13#include <asm/cfi.h>
14#include "bpf_jit.h"
15
16/* Number of iterations to try until offsets converge. */
17#define NR_JIT_ITERATIONS 32
18
19static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
20{
21 const struct bpf_prog *prog = ctx->prog;
22 int i;
23
24 for (i = 0; i < prog->len; i++) {
25 const struct bpf_insn *insn = &prog->insnsi[i];
26 int ret;
27
28 ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
29 /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
30 if (ret > 0)
31 i++;
32 if (offset)
33 offset[i] = ctx->ninsns;
34 if (ret < 0)
35 return ret;
36 }
37 return 0;
38}
39
40bool bpf_jit_needs_zext(void)
41{
42 return true;
43}
44
45struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
46{
47 unsigned int prog_size = 0, extable_size = 0;
48 bool tmp_blinded = false, extra_pass = false;
49 struct bpf_prog *tmp, *orig_prog = prog;
50 int pass = 0, prev_ninsns = 0, i;
51 struct rv_jit_data *jit_data;
52 struct rv_jit_context *ctx;
53
54 if (!prog->jit_requested)
55 return orig_prog;
56
57 tmp = bpf_jit_blind_constants(fp: prog);
58 if (IS_ERR(ptr: tmp))
59 return orig_prog;
60 if (tmp != prog) {
61 tmp_blinded = true;
62 prog = tmp;
63 }
64
65 jit_data = prog->aux->jit_data;
66 if (!jit_data) {
67 jit_data = kzalloc(size: sizeof(*jit_data), GFP_KERNEL);
68 if (!jit_data) {
69 prog = orig_prog;
70 goto out;
71 }
72 prog->aux->jit_data = jit_data;
73 }
74
75 ctx = &jit_data->ctx;
76
77 if (ctx->offset) {
78 extra_pass = true;
79 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
80 goto skip_init_ctx;
81 }
82
83 ctx->prog = prog;
84 ctx->offset = kcalloc(n: prog->len, size: sizeof(int), GFP_KERNEL);
85 if (!ctx->offset) {
86 prog = orig_prog;
87 goto out_offset;
88 }
89
90 if (build_body(ctx, extra_pass, NULL)) {
91 prog = orig_prog;
92 goto out_offset;
93 }
94
95 for (i = 0; i < prog->len; i++) {
96 prev_ninsns += 32;
97 ctx->offset[i] = prev_ninsns;
98 }
99
100 for (i = 0; i < NR_JIT_ITERATIONS; i++) {
101 pass++;
102 ctx->ninsns = 0;
103
104 bpf_jit_build_prologue(ctx, is_subprog: bpf_is_subprog(prog));
105 ctx->prologue_len = ctx->ninsns;
106
107 if (build_body(ctx, extra_pass, offset: ctx->offset)) {
108 prog = orig_prog;
109 goto out_offset;
110 }
111
112 ctx->epilogue_offset = ctx->ninsns;
113 bpf_jit_build_epilogue(ctx);
114
115 if (ctx->ninsns == prev_ninsns) {
116 if (jit_data->header)
117 break;
118 /* obtain the actual image size */
119 extable_size = prog->aux->num_exentries *
120 sizeof(struct exception_table_entry);
121 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
122
123 jit_data->ro_header =
124 bpf_jit_binary_pack_alloc(proglen: prog_size + extable_size,
125 ro_image: &jit_data->ro_image, alignment: sizeof(u32),
126 rw_hdr: &jit_data->header, rw_image: &jit_data->image,
127 bpf_fill_ill_insns);
128 if (!jit_data->ro_header) {
129 prog = orig_prog;
130 goto out_offset;
131 }
132
133 /*
134 * Use the image(RW) for writing the JITed instructions. But also save
135 * the ro_image(RX) for calculating the offsets in the image. The RW
136 * image will be later copied to the RX image from where the program
137 * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
138 * final step.
139 */
140 ctx->ro_insns = (u16 *)jit_data->ro_image;
141 ctx->insns = (u16 *)jit_data->image;
142 /*
143 * Now, when the image is allocated, the image can
144 * potentially shrink more (auipc/jalr -> jal).
145 */
146 }
147 prev_ninsns = ctx->ninsns;
148 }
149
150 if (i == NR_JIT_ITERATIONS) {
151 pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
152 prog = orig_prog;
153 goto out_free_hdr;
154 }
155
156 if (extable_size)
157 prog->aux->extable = (void *)ctx->ro_insns + prog_size;
158
159skip_init_ctx:
160 pass++;
161 ctx->ninsns = 0;
162 ctx->nexentries = 0;
163
164 bpf_jit_build_prologue(ctx, is_subprog: bpf_is_subprog(prog));
165 if (build_body(ctx, extra_pass, NULL)) {
166 prog = orig_prog;
167 goto out_free_hdr;
168 }
169 bpf_jit_build_epilogue(ctx);
170
171 if (bpf_jit_enable > 1)
172 bpf_jit_dump(flen: prog->len, proglen: prog_size, pass, image: ctx->insns);
173
174 prog->bpf_func = (void *)ctx->ro_insns + cfi_get_offset();
175 prog->jited = 1;
176 prog->jited_len = prog_size - cfi_get_offset();
177
178 if (!prog->is_func || extra_pass) {
179 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header,
180 jit_data->header))) {
181 /* ro_header has been freed */
182 jit_data->ro_header = NULL;
183 prog = orig_prog;
184 goto out_offset;
185 }
186 /*
187 * The instructions have now been copied to the ROX region from
188 * where they will execute.
189 * Write any modified data cache blocks out to memory and
190 * invalidate the corresponding blocks in the instruction cache.
191 */
192 bpf_flush_icache(start: jit_data->ro_header, end: ctx->ro_insns + ctx->ninsns);
193 for (i = 0; i < prog->len; i++)
194 ctx->offset[i] = ninsns_rvoff(ninsns: ctx->offset[i]);
195 bpf_prog_fill_jited_linfo(prog, insn_to_jit_off: ctx->offset);
196out_offset:
197 kfree(objp: ctx->offset);
198 kfree(objp: jit_data);
199 prog->aux->jit_data = NULL;
200 }
201out:
202
203 if (tmp_blinded)
204 bpf_jit_prog_release_other(fp: prog, fp_other: prog == orig_prog ?
205 tmp : orig_prog);
206 return prog;
207
208out_free_hdr:
209 if (jit_data->header) {
210 bpf_arch_text_copy(dst: &jit_data->ro_header->size, src: &jit_data->header->size,
211 len: sizeof(jit_data->header->size));
212 bpf_jit_binary_pack_free(ro_header: jit_data->ro_header, rw_header: jit_data->header);
213 }
214 goto out_offset;
215}
216
217u64 bpf_jit_alloc_exec_limit(void)
218{
219 return BPF_JIT_REGION_SIZE;
220}
221
222void *bpf_jit_alloc_exec(unsigned long size)
223{
224 return __vmalloc_node_range(size, PAGE_SIZE, start: BPF_JIT_REGION_START,
225 end: BPF_JIT_REGION_END, GFP_KERNEL,
226 PAGE_KERNEL, vm_flags: 0, NUMA_NO_NODE,
227 caller: __builtin_return_address(0));
228}
229
230void bpf_jit_free_exec(void *addr)
231{
232 return vfree(addr);
233}
234
235void *bpf_arch_text_copy(void *dst, void *src, size_t len)
236{
237 int ret;
238
239 mutex_lock(&text_mutex);
240 ret = patch_text_nosync(dst, src, len);
241 mutex_unlock(lock: &text_mutex);
242
243 if (ret)
244 return ERR_PTR(error: -EINVAL);
245
246 return dst;
247}
248
249int bpf_arch_text_invalidate(void *dst, size_t len)
250{
251 int ret;
252
253 mutex_lock(&text_mutex);
254 ret = patch_text_set_nosync(dst, 0, len);
255 mutex_unlock(lock: &text_mutex);
256
257 return ret;
258}
259
260void bpf_jit_free(struct bpf_prog *prog)
261{
262 if (prog->jited) {
263 struct rv_jit_data *jit_data = prog->aux->jit_data;
264 struct bpf_binary_header *hdr;
265
266 /*
267 * If we fail the final pass of JIT (from jit_subprogs),
268 * the program may not be finalized yet. Call finalize here
269 * before freeing it.
270 */
271 if (jit_data) {
272 bpf_jit_binary_pack_finalize(prog, ro_header: jit_data->ro_header, rw_header: jit_data->header);
273 kfree(objp: jit_data);
274 }
275 hdr = bpf_jit_binary_pack_hdr(fp: prog);
276 bpf_jit_binary_pack_free(ro_header: hdr, NULL);
277 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
278 }
279
280 bpf_prog_unlock_free(fp: prog);
281}
282

source code of linux/arch/riscv/net/bpf_jit_core.c