1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3
4 #define BPF_NO_KFUNC_PROTOTYPES
5 #include <vmlinux.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_tracing.h>
8 #include "bpf_misc.h"
9 #include "bpf_experimental.h"
10 #include "bpf_arena_common.h"
11
12 #define ARENA_SIZE (1ull << 32)
13
14 struct {
15 __uint(type, BPF_MAP_TYPE_ARENA);
16 __uint(map_flags, BPF_F_MMAPABLE);
17 __uint(max_entries, ARENA_SIZE / PAGE_SIZE);
18 } arena SEC(".maps");
19
20 SEC("syscall")
21 __success __retval(0)
big_alloc1(void * ctx)22 int big_alloc1(void *ctx)
23 {
24 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
25 volatile char __arena *page1, *page2, *no_page, *page3;
26 void __arena *base;
27
28 page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
29 if (!page1)
30 return 1;
31 *page1 = 1;
32 page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2,
33 1, NUMA_NO_NODE, 0);
34 if (!page2)
35 return 2;
36 *page2 = 2;
37 no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
38 1, NUMA_NO_NODE, 0);
39 if (no_page)
40 return 3;
41 if (*page1 != 1)
42 return 4;
43 if (*page2 != 2)
44 return 5;
45 bpf_arena_free_pages(&arena, (void __arena *)page1, 1);
46 if (*page2 != 2)
47 return 6;
48 if (*page1 != 0) /* use-after-free should return 0 */
49 return 7;
50 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
51 if (!page3)
52 return 8;
53 *page3 = 3;
54 if (page1 != page3)
55 return 9;
56 if (*page2 != 2)
57 return 10;
58 if (*(page1 + PAGE_SIZE) != 0)
59 return 11;
60 if (*(page1 - PAGE_SIZE) != 0)
61 return 12;
62 if (*(page2 + PAGE_SIZE) != 0)
63 return 13;
64 if (*(page2 - PAGE_SIZE) != 0)
65 return 14;
66 #endif
67 return 0;
68 }
69
70 /* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */
71 SEC("syscall")
72 __success __retval(0)
access_reserved(void * ctx)73 int access_reserved(void *ctx)
74 {
75 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
76 volatile char __arena *page;
77 char __arena *base;
78 const size_t len = 4;
79 int ret, i;
80
81 /* Get a separate region of the arena. */
82 page = base = arena_base(&arena) + 16384 * PAGE_SIZE;
83
84 ret = bpf_arena_reserve_pages(&arena, base, len);
85 if (ret)
86 return 1;
87
88 /* Try to dirty reserved memory. */
89 for (i = 0; i < len && can_loop; i++)
90 *page = 0x5a;
91
92 for (i = 0; i < len && can_loop; i++) {
93 page = (volatile char __arena *)(base + i * PAGE_SIZE);
94
95 /*
96 * Error out in case either the write went through,
97 * or the address has random garbage.
98 */
99 if (*page == 0x5a)
100 return 2 + 2 * i;
101
102 if (*page)
103 return 2 + 2 * i + 1;
104 }
105 #endif
106 return 0;
107 }
108
109 /* Try to allocate a region overlapping with a reservation. */
110 SEC("syscall")
111 __success __retval(0)
request_partially_reserved(void * ctx)112 int request_partially_reserved(void *ctx)
113 {
114 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
115 volatile char __arena *page;
116 char __arena *base;
117 int ret;
118
119 /* Add an arbitrary page offset. */
120 page = base = arena_base(&arena) + 4096 * __PAGE_SIZE;
121
122 ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4);
123 if (ret)
124 return 1;
125
126 page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0);
127 if ((u64)page != 0ULL)
128 return 2;
129 #endif
130 return 0;
131 }
132
133 SEC("syscall")
134 __success __retval(0)
free_reserved(void * ctx)135 int free_reserved(void *ctx)
136 {
137 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
138 char __arena *addr;
139 char __arena *page;
140 int ret;
141
142 /* Add an arbitrary page offset. */
143 addr = arena_base(&arena) + 32768 * __PAGE_SIZE;
144
145 page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0);
146 if (!page)
147 return 1;
148
149 ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2);
150 if (ret)
151 return 2;
152
153 /*
154 * Reserved and allocated pages should be interchangeable for
155 * bpf_arena_free_pages(). Free a reserved and an allocated
156 * page with a single call.
157 */
158 bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2);
159
160 /* The free call above should have succeeded, so this allocation should too. */
161 page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0);
162 if (!page)
163 return 3;
164 #endif
165 return 0;
166 }
167
168 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
169 #define PAGE_CNT 100
170 __u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
171 __u8 __arena *base;
172
173 /*
174 * Check that arena's range_tree algorithm allocates pages sequentially
175 * on the first pass and then fills in all gaps on the second pass.
176 */
alloc_pages(int page_cnt,int pages_atonce,bool first_pass,int max_idx,int step)177 __noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass,
178 int max_idx, int step)
179 {
180 __u8 __arena *pg;
181 int i, pg_idx;
182
183 for (i = 0; i < page_cnt; i++) {
184 pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce,
185 NUMA_NO_NODE, 0);
186 if (!pg)
187 return step;
188 pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
189 if (first_pass) {
190 /* Pages must be allocated sequentially */
191 if (pg_idx != i)
192 return step + 100;
193 } else {
194 /* Allocator must fill into gaps */
195 if (pg_idx >= max_idx || (pg_idx & 1))
196 return step + 200;
197 }
198 *pg = pg_idx;
199 page[pg_idx] = pg;
200 cond_break;
201 }
202 return 0;
203 }
204
205 SEC("syscall")
206 __success __retval(0)
big_alloc2(void * ctx)207 int big_alloc2(void *ctx)
208 {
209 __u8 __arena *pg;
210 int i, err;
211
212 base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
213 if (!base)
214 return 1;
215 bpf_arena_free_pages(&arena, (void __arena *)base, 1);
216
217 err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2);
218 if (err)
219 return err;
220
221 /* Clear all even pages */
222 for (i = 0; i < PAGE_CNT; i += 2) {
223 pg = page[i];
224 if (*pg != i)
225 return 3;
226 bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
227 page[i] = NULL;
228 cond_break;
229 }
230
231 /* Allocate into freed gaps */
232 err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4);
233 if (err)
234 return err;
235
236 /* Free pairs of pages */
237 for (i = 0; i < PAGE_CNT; i += 4) {
238 pg = page[i];
239 if (*pg != i)
240 return 5;
241 bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
242 page[i] = NULL;
243 page[i + 1] = NULL;
244 cond_break;
245 }
246
247 /* Allocate 2 pages at a time into freed gaps */
248 err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6);
249 if (err)
250 return err;
251
252 /* Check pages without freeing */
253 for (i = 0; i < PAGE_CNT; i += 2) {
254 pg = page[i];
255 if (*pg != i)
256 return 7;
257 cond_break;
258 }
259
260 pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
261
262 if (!pg)
263 return 8;
264 /*
265 * The first PAGE_CNT pages are occupied. The new page
266 * must be above.
267 */
268 if ((pg - base) / PAGE_SIZE < PAGE_CNT)
269 return 9;
270 return 0;
271 }
272 #endif
273 char _license[] SEC("license") = "GPL";
274