1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common functionality for RV32 and RV64 BPF JIT compilers
4 *
5 * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
6 *
7 */
8
9 #include <linux/bpf.h>
10 #include <linux/filter.h>
11 #include <linux/memory.h>
12 #include <asm/text-patching.h>
13 #include <asm/cfi.h>
14 #include "bpf_jit.h"
15
16 /* Number of iterations to try until offsets converge. */
17 #define NR_JIT_ITERATIONS 32
18
build_body(struct rv_jit_context * ctx,bool extra_pass,int * offset)19 static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
20 {
21 const struct bpf_prog *prog = ctx->prog;
22 int i;
23
24 for (i = 0; i < prog->len; i++) {
25 const struct bpf_insn *insn = &prog->insnsi[i];
26 int ret;
27
28 ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
29 if (ret > 0)
30 i++; /* skip the next instruction */
31 if (offset)
32 offset[i] = ctx->ninsns;
33 if (ret < 0)
34 return ret;
35 }
36 return 0;
37 }
38
bpf_jit_needs_zext(void)39 bool bpf_jit_needs_zext(void)
40 {
41 return true;
42 }
43
bpf_int_jit_compile(struct bpf_prog * prog)44 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
45 {
46 unsigned int prog_size = 0, extable_size = 0;
47 bool tmp_blinded = false, extra_pass = false;
48 struct bpf_prog *tmp, *orig_prog = prog;
49 int pass = 0, prev_ninsns = 0, i;
50 struct rv_jit_data *jit_data;
51 struct rv_jit_context *ctx;
52
53 if (!prog->jit_requested)
54 return orig_prog;
55
56 tmp = bpf_jit_blind_constants(prog);
57 if (IS_ERR(tmp))
58 return orig_prog;
59 if (tmp != prog) {
60 tmp_blinded = true;
61 prog = tmp;
62 }
63
64 jit_data = prog->aux->jit_data;
65 if (!jit_data) {
66 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
67 if (!jit_data) {
68 prog = orig_prog;
69 goto out;
70 }
71 prog->aux->jit_data = jit_data;
72 }
73
74 ctx = &jit_data->ctx;
75
76 if (ctx->offset) {
77 extra_pass = true;
78 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
79 goto skip_init_ctx;
80 }
81
82 ctx->arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
83 ctx->user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
84 ctx->prog = prog;
85 ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
86 if (!ctx->offset) {
87 prog = orig_prog;
88 goto out_offset;
89 }
90
91 if (build_body(ctx, extra_pass, NULL)) {
92 prog = orig_prog;
93 goto out_offset;
94 }
95
96 for (i = 0; i < prog->len; i++) {
97 prev_ninsns += 32;
98 ctx->offset[i] = prev_ninsns;
99 }
100
101 for (i = 0; i < NR_JIT_ITERATIONS; i++) {
102 pass++;
103 ctx->ninsns = 0;
104
105 bpf_jit_build_prologue(ctx, bpf_is_subprog(prog));
106 ctx->prologue_len = ctx->ninsns;
107
108 if (build_body(ctx, extra_pass, ctx->offset)) {
109 prog = orig_prog;
110 goto out_offset;
111 }
112
113 ctx->epilogue_offset = ctx->ninsns;
114 bpf_jit_build_epilogue(ctx);
115
116 if (ctx->ninsns == prev_ninsns) {
117 if (jit_data->header)
118 break;
119 /* obtain the actual image size */
120 extable_size = prog->aux->num_exentries *
121 sizeof(struct exception_table_entry);
122 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
123
124 jit_data->ro_header =
125 bpf_jit_binary_pack_alloc(prog_size + extable_size,
126 &jit_data->ro_image, sizeof(u32),
127 &jit_data->header, &jit_data->image,
128 bpf_fill_ill_insns);
129 if (!jit_data->ro_header) {
130 prog = orig_prog;
131 goto out_offset;
132 }
133
134 /*
135 * Use the image(RW) for writing the JITed instructions. But also save
136 * the ro_image(RX) for calculating the offsets in the image. The RW
137 * image will be later copied to the RX image from where the program
138 * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
139 * final step.
140 */
141 ctx->ro_insns = (u16 *)jit_data->ro_image;
142 ctx->insns = (u16 *)jit_data->image;
143 /*
144 * Now, when the image is allocated, the image can
145 * potentially shrink more (auipc/jalr -> jal).
146 */
147 }
148 prev_ninsns = ctx->ninsns;
149 }
150
151 if (i == NR_JIT_ITERATIONS) {
152 pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
153 prog = orig_prog;
154 goto out_free_hdr;
155 }
156
157 if (extable_size)
158 prog->aux->extable = (void *)ctx->ro_insns + prog_size;
159
160 skip_init_ctx:
161 pass++;
162 ctx->ninsns = 0;
163 ctx->nexentries = 0;
164
165 bpf_jit_build_prologue(ctx, bpf_is_subprog(prog));
166 if (build_body(ctx, extra_pass, NULL)) {
167 prog = orig_prog;
168 goto out_free_hdr;
169 }
170 bpf_jit_build_epilogue(ctx);
171
172 if (bpf_jit_enable > 1)
173 bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
174
175 prog->bpf_func = (void *)ctx->ro_insns + cfi_get_offset();
176 prog->jited = 1;
177 prog->jited_len = prog_size - cfi_get_offset();
178
179 if (!prog->is_func || extra_pass) {
180 if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header))) {
181 /* ro_header has been freed */
182 jit_data->ro_header = NULL;
183 prog = orig_prog;
184 goto out_offset;
185 }
186 /*
187 * The instructions have now been copied to the ROX region from
188 * where they will execute.
189 * Write any modified data cache blocks out to memory and
190 * invalidate the corresponding blocks in the instruction cache.
191 */
192 bpf_flush_icache(jit_data->ro_header, ctx->ro_insns + ctx->ninsns);
193 for (i = 0; i < prog->len; i++)
194 ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
195 bpf_prog_fill_jited_linfo(prog, ctx->offset);
196 out_offset:
197 kfree(ctx->offset);
198 kfree(jit_data);
199 prog->aux->jit_data = NULL;
200 }
201 out:
202
203 if (tmp_blinded)
204 bpf_jit_prog_release_other(prog, prog == orig_prog ?
205 tmp : orig_prog);
206 return prog;
207
208 out_free_hdr:
209 if (jit_data->header) {
210 bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
211 sizeof(jit_data->header->size));
212 bpf_jit_binary_pack_free(jit_data->ro_header, jit_data->header);
213 }
214 goto out_offset;
215 }
216
bpf_jit_alloc_exec_limit(void)217 u64 bpf_jit_alloc_exec_limit(void)
218 {
219 return BPF_JIT_REGION_SIZE;
220 }
221
bpf_arch_text_copy(void * dst,void * src,size_t len)222 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
223 {
224 int ret;
225
226 mutex_lock(&text_mutex);
227 ret = patch_text_nosync(dst, src, len);
228 mutex_unlock(&text_mutex);
229
230 if (ret)
231 return ERR_PTR(-EINVAL);
232
233 return dst;
234 }
235
bpf_arch_text_invalidate(void * dst,size_t len)236 int bpf_arch_text_invalidate(void *dst, size_t len)
237 {
238 int ret;
239
240 mutex_lock(&text_mutex);
241 ret = patch_text_set_nosync(dst, 0, len);
242 mutex_unlock(&text_mutex);
243
244 return ret;
245 }
246
bpf_jit_free(struct bpf_prog * prog)247 void bpf_jit_free(struct bpf_prog *prog)
248 {
249 if (prog->jited) {
250 struct rv_jit_data *jit_data = prog->aux->jit_data;
251 struct bpf_binary_header *hdr;
252
253 /*
254 * If we fail the final pass of JIT (from jit_subprogs),
255 * the program may not be finalized yet. Call finalize here
256 * before freeing it.
257 */
258 if (jit_data) {
259 bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header);
260 kfree(jit_data);
261 }
262 hdr = bpf_jit_binary_pack_hdr(prog);
263 bpf_jit_binary_pack_free(hdr, NULL);
264 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
265 }
266
267 bpf_prog_unlock_free(prog);
268 }
269