1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/security.h>
22 #include <linux/btf_ids.h>
23 #include <linux/bpf_mem_alloc.h>
24
25 #include "../../lib/kstrtox.h"
26
27 /* If kernel subsystem is allowing eBPF programs to call this function,
28 * inside its own verifier_ops->get_func_proto() callback it should return
29 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
30 *
31 * Different map implementations will rely on rcu in map methods
32 * lookup/update/delete, therefore eBPF programs must run under rcu lock
33 * if program is allowed to access maps, so check rcu_read_lock_held in
34 * all three functions.
35 */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)36 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
37 {
38 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
39 return (unsigned long) map->ops->map_lookup_elem(map, key);
40 }
41
42 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
43 .func = bpf_map_lookup_elem,
44 .gpl_only = false,
45 .pkt_access = true,
46 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
47 .arg1_type = ARG_CONST_MAP_PTR,
48 .arg2_type = ARG_PTR_TO_MAP_KEY,
49 };
50
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)51 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
52 void *, value, u64, flags)
53 {
54 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
55 return map->ops->map_update_elem(map, key, value, flags);
56 }
57
58 const struct bpf_func_proto bpf_map_update_elem_proto = {
59 .func = bpf_map_update_elem,
60 .gpl_only = false,
61 .pkt_access = true,
62 .ret_type = RET_INTEGER,
63 .arg1_type = ARG_CONST_MAP_PTR,
64 .arg2_type = ARG_PTR_TO_MAP_KEY,
65 .arg3_type = ARG_PTR_TO_MAP_VALUE,
66 .arg4_type = ARG_ANYTHING,
67 };
68
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)69 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
70 {
71 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
72 return map->ops->map_delete_elem(map, key);
73 }
74
75 const struct bpf_func_proto bpf_map_delete_elem_proto = {
76 .func = bpf_map_delete_elem,
77 .gpl_only = false,
78 .pkt_access = true,
79 .ret_type = RET_INTEGER,
80 .arg1_type = ARG_CONST_MAP_PTR,
81 .arg2_type = ARG_PTR_TO_MAP_KEY,
82 };
83
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)84 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
85 {
86 return map->ops->map_push_elem(map, value, flags);
87 }
88
89 const struct bpf_func_proto bpf_map_push_elem_proto = {
90 .func = bpf_map_push_elem,
91 .gpl_only = false,
92 .pkt_access = true,
93 .ret_type = RET_INTEGER,
94 .arg1_type = ARG_CONST_MAP_PTR,
95 .arg2_type = ARG_PTR_TO_MAP_VALUE,
96 .arg3_type = ARG_ANYTHING,
97 };
98
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)99 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
100 {
101 return map->ops->map_pop_elem(map, value);
102 }
103
104 const struct bpf_func_proto bpf_map_pop_elem_proto = {
105 .func = bpf_map_pop_elem,
106 .gpl_only = false,
107 .ret_type = RET_INTEGER,
108 .arg1_type = ARG_CONST_MAP_PTR,
109 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
110 };
111
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)112 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
113 {
114 return map->ops->map_peek_elem(map, value);
115 }
116
117 const struct bpf_func_proto bpf_map_peek_elem_proto = {
118 .func = bpf_map_peek_elem,
119 .gpl_only = false,
120 .ret_type = RET_INTEGER,
121 .arg1_type = ARG_CONST_MAP_PTR,
122 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
123 };
124
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)125 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
126 {
127 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
128 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
129 }
130
131 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
132 .func = bpf_map_lookup_percpu_elem,
133 .gpl_only = false,
134 .pkt_access = true,
135 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
136 .arg1_type = ARG_CONST_MAP_PTR,
137 .arg2_type = ARG_PTR_TO_MAP_KEY,
138 .arg3_type = ARG_ANYTHING,
139 };
140
141 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
142 .func = bpf_user_rnd_u32,
143 .gpl_only = false,
144 .ret_type = RET_INTEGER,
145 };
146
BPF_CALL_0(bpf_get_smp_processor_id)147 BPF_CALL_0(bpf_get_smp_processor_id)
148 {
149 return smp_processor_id();
150 }
151
152 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
153 .func = bpf_get_smp_processor_id,
154 .gpl_only = false,
155 .ret_type = RET_INTEGER,
156 };
157
BPF_CALL_0(bpf_get_numa_node_id)158 BPF_CALL_0(bpf_get_numa_node_id)
159 {
160 return numa_node_id();
161 }
162
163 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
164 .func = bpf_get_numa_node_id,
165 .gpl_only = false,
166 .ret_type = RET_INTEGER,
167 };
168
BPF_CALL_0(bpf_ktime_get_ns)169 BPF_CALL_0(bpf_ktime_get_ns)
170 {
171 /* NMI safe access to clock monotonic */
172 return ktime_get_mono_fast_ns();
173 }
174
175 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
176 .func = bpf_ktime_get_ns,
177 .gpl_only = false,
178 .ret_type = RET_INTEGER,
179 };
180
BPF_CALL_0(bpf_ktime_get_boot_ns)181 BPF_CALL_0(bpf_ktime_get_boot_ns)
182 {
183 /* NMI safe access to clock boottime */
184 return ktime_get_boot_fast_ns();
185 }
186
187 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
188 .func = bpf_ktime_get_boot_ns,
189 .gpl_only = false,
190 .ret_type = RET_INTEGER,
191 };
192
BPF_CALL_0(bpf_ktime_get_coarse_ns)193 BPF_CALL_0(bpf_ktime_get_coarse_ns)
194 {
195 return ktime_get_coarse_ns();
196 }
197
198 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
199 .func = bpf_ktime_get_coarse_ns,
200 .gpl_only = false,
201 .ret_type = RET_INTEGER,
202 };
203
BPF_CALL_0(bpf_ktime_get_tai_ns)204 BPF_CALL_0(bpf_ktime_get_tai_ns)
205 {
206 /* NMI safe access to clock tai */
207 return ktime_get_tai_fast_ns();
208 }
209
210 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
211 .func = bpf_ktime_get_tai_ns,
212 .gpl_only = false,
213 .ret_type = RET_INTEGER,
214 };
215
BPF_CALL_0(bpf_get_current_pid_tgid)216 BPF_CALL_0(bpf_get_current_pid_tgid)
217 {
218 struct task_struct *task = current;
219
220 if (unlikely(!task))
221 return -EINVAL;
222
223 return (u64) task->tgid << 32 | task->pid;
224 }
225
226 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
227 .func = bpf_get_current_pid_tgid,
228 .gpl_only = false,
229 .ret_type = RET_INTEGER,
230 };
231
BPF_CALL_0(bpf_get_current_uid_gid)232 BPF_CALL_0(bpf_get_current_uid_gid)
233 {
234 struct task_struct *task = current;
235 kuid_t uid;
236 kgid_t gid;
237
238 if (unlikely(!task))
239 return -EINVAL;
240
241 current_uid_gid(&uid, &gid);
242 return (u64) from_kgid(&init_user_ns, gid) << 32 |
243 from_kuid(&init_user_ns, uid);
244 }
245
246 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
247 .func = bpf_get_current_uid_gid,
248 .gpl_only = false,
249 .ret_type = RET_INTEGER,
250 };
251
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)252 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
253 {
254 struct task_struct *task = current;
255
256 if (unlikely(!task))
257 goto err_clear;
258
259 /* Verifier guarantees that size > 0 */
260 strscpy(buf, task->comm, size);
261 return 0;
262 err_clear:
263 memset(buf, 0, size);
264 return -EINVAL;
265 }
266
267 const struct bpf_func_proto bpf_get_current_comm_proto = {
268 .func = bpf_get_current_comm,
269 .gpl_only = false,
270 .ret_type = RET_INTEGER,
271 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
272 .arg2_type = ARG_CONST_SIZE,
273 };
274
275 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
276
__bpf_spin_lock(struct bpf_spin_lock * lock)277 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
278 {
279 arch_spinlock_t *l = (void *)lock;
280 union {
281 __u32 val;
282 arch_spinlock_t lock;
283 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
284
285 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
286 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
287 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
288 arch_spin_lock(l);
289 }
290
__bpf_spin_unlock(struct bpf_spin_lock * lock)291 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
292 {
293 arch_spinlock_t *l = (void *)lock;
294
295 arch_spin_unlock(l);
296 }
297
298 #else
299
__bpf_spin_lock(struct bpf_spin_lock * lock)300 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
301 {
302 atomic_t *l = (void *)lock;
303
304 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
305 do {
306 atomic_cond_read_relaxed(l, !VAL);
307 } while (atomic_xchg(l, 1));
308 }
309
__bpf_spin_unlock(struct bpf_spin_lock * lock)310 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
311 {
312 atomic_t *l = (void *)lock;
313
314 atomic_set_release(l, 0);
315 }
316
317 #endif
318
319 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
320
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)321 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
322 {
323 unsigned long flags;
324
325 local_irq_save(flags);
326 __bpf_spin_lock(lock);
327 __this_cpu_write(irqsave_flags, flags);
328 }
329
BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)330 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
331 {
332 __bpf_spin_lock_irqsave(lock);
333 return 0;
334 }
335
336 const struct bpf_func_proto bpf_spin_lock_proto = {
337 .func = bpf_spin_lock,
338 .gpl_only = false,
339 .ret_type = RET_VOID,
340 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
341 .arg1_btf_id = BPF_PTR_POISON,
342 };
343
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)344 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
345 {
346 unsigned long flags;
347
348 flags = __this_cpu_read(irqsave_flags);
349 __bpf_spin_unlock(lock);
350 local_irq_restore(flags);
351 }
352
BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)353 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
354 {
355 __bpf_spin_unlock_irqrestore(lock);
356 return 0;
357 }
358
359 const struct bpf_func_proto bpf_spin_unlock_proto = {
360 .func = bpf_spin_unlock,
361 .gpl_only = false,
362 .ret_type = RET_VOID,
363 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
364 .arg1_btf_id = BPF_PTR_POISON,
365 };
366
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)367 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
368 bool lock_src)
369 {
370 struct bpf_spin_lock *lock;
371
372 if (lock_src)
373 lock = src + map->record->spin_lock_off;
374 else
375 lock = dst + map->record->spin_lock_off;
376 preempt_disable();
377 __bpf_spin_lock_irqsave(lock);
378 copy_map_value(map, dst, src);
379 __bpf_spin_unlock_irqrestore(lock);
380 preempt_enable();
381 }
382
BPF_CALL_0(bpf_jiffies64)383 BPF_CALL_0(bpf_jiffies64)
384 {
385 return get_jiffies_64();
386 }
387
388 const struct bpf_func_proto bpf_jiffies64_proto = {
389 .func = bpf_jiffies64,
390 .gpl_only = false,
391 .ret_type = RET_INTEGER,
392 };
393
394 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)395 BPF_CALL_0(bpf_get_current_cgroup_id)
396 {
397 struct cgroup *cgrp;
398 u64 cgrp_id;
399
400 rcu_read_lock();
401 cgrp = task_dfl_cgroup(current);
402 cgrp_id = cgroup_id(cgrp);
403 rcu_read_unlock();
404
405 return cgrp_id;
406 }
407
408 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
409 .func = bpf_get_current_cgroup_id,
410 .gpl_only = false,
411 .ret_type = RET_INTEGER,
412 };
413
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)414 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
415 {
416 struct cgroup *cgrp;
417 struct cgroup *ancestor;
418 u64 cgrp_id;
419
420 rcu_read_lock();
421 cgrp = task_dfl_cgroup(current);
422 ancestor = cgroup_ancestor(cgrp, ancestor_level);
423 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
424 rcu_read_unlock();
425
426 return cgrp_id;
427 }
428
429 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
430 .func = bpf_get_current_ancestor_cgroup_id,
431 .gpl_only = false,
432 .ret_type = RET_INTEGER,
433 .arg1_type = ARG_ANYTHING,
434 };
435 #endif /* CONFIG_CGROUPS */
436
437 #define BPF_STRTOX_BASE_MASK 0x1F
438
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)439 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
440 unsigned long long *res, bool *is_negative)
441 {
442 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
443 const char *cur_buf = buf;
444 size_t cur_len = buf_len;
445 unsigned int consumed;
446 size_t val_len;
447 char str[64];
448
449 if (!buf || !buf_len || !res || !is_negative)
450 return -EINVAL;
451
452 if (base != 0 && base != 8 && base != 10 && base != 16)
453 return -EINVAL;
454
455 if (flags & ~BPF_STRTOX_BASE_MASK)
456 return -EINVAL;
457
458 while (cur_buf < buf + buf_len && isspace(*cur_buf))
459 ++cur_buf;
460
461 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
462 if (*is_negative)
463 ++cur_buf;
464
465 consumed = cur_buf - buf;
466 cur_len -= consumed;
467 if (!cur_len)
468 return -EINVAL;
469
470 cur_len = min(cur_len, sizeof(str) - 1);
471 memcpy(str, cur_buf, cur_len);
472 str[cur_len] = '\0';
473 cur_buf = str;
474
475 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
476 val_len = _parse_integer(cur_buf, base, res);
477
478 if (val_len & KSTRTOX_OVERFLOW)
479 return -ERANGE;
480
481 if (val_len == 0)
482 return -EINVAL;
483
484 cur_buf += val_len;
485 consumed += cur_buf - str;
486
487 return consumed;
488 }
489
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)490 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
491 long long *res)
492 {
493 unsigned long long _res;
494 bool is_negative;
495 int err;
496
497 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
498 if (err < 0)
499 return err;
500 if (is_negative) {
501 if ((long long)-_res > 0)
502 return -ERANGE;
503 *res = -_res;
504 } else {
505 if ((long long)_res < 0)
506 return -ERANGE;
507 *res = _res;
508 }
509 return err;
510 }
511
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,long *,res)512 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
513 long *, res)
514 {
515 long long _res;
516 int err;
517
518 err = __bpf_strtoll(buf, buf_len, flags, &_res);
519 if (err < 0)
520 return err;
521 if (_res != (long)_res)
522 return -ERANGE;
523 *res = _res;
524 return err;
525 }
526
527 const struct bpf_func_proto bpf_strtol_proto = {
528 .func = bpf_strtol,
529 .gpl_only = false,
530 .ret_type = RET_INTEGER,
531 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
532 .arg2_type = ARG_CONST_SIZE,
533 .arg3_type = ARG_ANYTHING,
534 .arg4_type = ARG_PTR_TO_LONG,
535 };
536
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,unsigned long *,res)537 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
538 unsigned long *, res)
539 {
540 unsigned long long _res;
541 bool is_negative;
542 int err;
543
544 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
545 if (err < 0)
546 return err;
547 if (is_negative)
548 return -EINVAL;
549 if (_res != (unsigned long)_res)
550 return -ERANGE;
551 *res = _res;
552 return err;
553 }
554
555 const struct bpf_func_proto bpf_strtoul_proto = {
556 .func = bpf_strtoul,
557 .gpl_only = false,
558 .ret_type = RET_INTEGER,
559 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
560 .arg2_type = ARG_CONST_SIZE,
561 .arg3_type = ARG_ANYTHING,
562 .arg4_type = ARG_PTR_TO_LONG,
563 };
564
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)565 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
566 {
567 return strncmp(s1, s2, s1_sz);
568 }
569
570 static const struct bpf_func_proto bpf_strncmp_proto = {
571 .func = bpf_strncmp,
572 .gpl_only = false,
573 .ret_type = RET_INTEGER,
574 .arg1_type = ARG_PTR_TO_MEM,
575 .arg2_type = ARG_CONST_SIZE,
576 .arg3_type = ARG_PTR_TO_CONST_STR,
577 };
578
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)579 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
580 struct bpf_pidns_info *, nsdata, u32, size)
581 {
582 struct task_struct *task = current;
583 struct pid_namespace *pidns;
584 int err = -EINVAL;
585
586 if (unlikely(size != sizeof(struct bpf_pidns_info)))
587 goto clear;
588
589 if (unlikely((u64)(dev_t)dev != dev))
590 goto clear;
591
592 if (unlikely(!task))
593 goto clear;
594
595 pidns = task_active_pid_ns(task);
596 if (unlikely(!pidns)) {
597 err = -ENOENT;
598 goto clear;
599 }
600
601 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
602 goto clear;
603
604 nsdata->pid = task_pid_nr_ns(task, pidns);
605 nsdata->tgid = task_tgid_nr_ns(task, pidns);
606 return 0;
607 clear:
608 memset((void *)nsdata, 0, (size_t) size);
609 return err;
610 }
611
612 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
613 .func = bpf_get_ns_current_pid_tgid,
614 .gpl_only = false,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_ANYTHING,
617 .arg2_type = ARG_ANYTHING,
618 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
619 .arg4_type = ARG_CONST_SIZE,
620 };
621
622 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
623 .func = bpf_get_raw_cpu_id,
624 .gpl_only = false,
625 .ret_type = RET_INTEGER,
626 };
627
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)628 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
629 u64, flags, void *, data, u64, size)
630 {
631 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
632 return -EINVAL;
633
634 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
635 }
636
637 const struct bpf_func_proto bpf_event_output_data_proto = {
638 .func = bpf_event_output_data,
639 .gpl_only = true,
640 .ret_type = RET_INTEGER,
641 .arg1_type = ARG_PTR_TO_CTX,
642 .arg2_type = ARG_CONST_MAP_PTR,
643 .arg3_type = ARG_ANYTHING,
644 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
645 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
646 };
647
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)648 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
649 const void __user *, user_ptr)
650 {
651 int ret = copy_from_user(dst, user_ptr, size);
652
653 if (unlikely(ret)) {
654 memset(dst, 0, size);
655 ret = -EFAULT;
656 }
657
658 return ret;
659 }
660
661 const struct bpf_func_proto bpf_copy_from_user_proto = {
662 .func = bpf_copy_from_user,
663 .gpl_only = false,
664 .might_sleep = true,
665 .ret_type = RET_INTEGER,
666 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
667 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
668 .arg3_type = ARG_ANYTHING,
669 };
670
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)671 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
672 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
673 {
674 int ret;
675
676 /* flags is not used yet */
677 if (unlikely(flags))
678 return -EINVAL;
679
680 if (unlikely(!size))
681 return 0;
682
683 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
684 if (ret == size)
685 return 0;
686
687 memset(dst, 0, size);
688 /* Return -EFAULT for partial read */
689 return ret < 0 ? ret : -EFAULT;
690 }
691
692 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
693 .func = bpf_copy_from_user_task,
694 .gpl_only = true,
695 .might_sleep = true,
696 .ret_type = RET_INTEGER,
697 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
698 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
699 .arg3_type = ARG_ANYTHING,
700 .arg4_type = ARG_PTR_TO_BTF_ID,
701 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
702 .arg5_type = ARG_ANYTHING
703 };
704
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)705 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
706 {
707 if (cpu >= nr_cpu_ids)
708 return (unsigned long)NULL;
709
710 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
711 }
712
713 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
714 .func = bpf_per_cpu_ptr,
715 .gpl_only = false,
716 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
717 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
718 .arg2_type = ARG_ANYTHING,
719 };
720
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)721 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
722 {
723 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
724 }
725
726 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
727 .func = bpf_this_cpu_ptr,
728 .gpl_only = false,
729 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
730 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
731 };
732
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)733 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
734 size_t bufsz)
735 {
736 void __user *user_ptr = (__force void __user *)unsafe_ptr;
737
738 buf[0] = 0;
739
740 switch (fmt_ptype) {
741 case 's':
742 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
743 if ((unsigned long)unsafe_ptr < TASK_SIZE)
744 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
745 fallthrough;
746 #endif
747 case 'k':
748 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
749 case 'u':
750 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
751 }
752
753 return -EINVAL;
754 }
755
756 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
757 * arguments representation.
758 */
759 #define MAX_BPRINTF_BIN_ARGS 512
760
761 /* Support executing three nested bprintf helper calls on a given CPU */
762 #define MAX_BPRINTF_NEST_LEVEL 3
763 struct bpf_bprintf_buffers {
764 char bin_args[MAX_BPRINTF_BIN_ARGS];
765 char buf[MAX_BPRINTF_BUF];
766 };
767
768 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
769 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
770
try_get_buffers(struct bpf_bprintf_buffers ** bufs)771 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
772 {
773 int nest_level;
774
775 preempt_disable();
776 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
777 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
778 this_cpu_dec(bpf_bprintf_nest_level);
779 preempt_enable();
780 return -EBUSY;
781 }
782 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
783
784 return 0;
785 }
786
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)787 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
788 {
789 if (!data->bin_args && !data->buf)
790 return;
791 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
792 return;
793 this_cpu_dec(bpf_bprintf_nest_level);
794 preempt_enable();
795 }
796
797 /*
798 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
799 *
800 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
801 *
802 * This can be used in two ways:
803 * - Format string verification only: when data->get_bin_args is false
804 * - Arguments preparation: in addition to the above verification, it writes in
805 * data->bin_args a binary representation of arguments usable by bstr_printf
806 * where pointers from BPF have been sanitized.
807 *
808 * In argument preparation mode, if 0 is returned, safe temporary buffers are
809 * allocated and bpf_bprintf_cleanup should be called to free them after use.
810 */
bpf_bprintf_prepare(char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)811 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
812 u32 num_args, struct bpf_bprintf_data *data)
813 {
814 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
815 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
816 struct bpf_bprintf_buffers *buffers = NULL;
817 size_t sizeof_cur_arg, sizeof_cur_ip;
818 int err, i, num_spec = 0;
819 u64 cur_arg;
820 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
821
822 fmt_end = strnchr(fmt, fmt_size, 0);
823 if (!fmt_end)
824 return -EINVAL;
825 fmt_size = fmt_end - fmt;
826
827 if (get_buffers && try_get_buffers(&buffers))
828 return -EBUSY;
829
830 if (data->get_bin_args) {
831 if (num_args)
832 tmp_buf = buffers->bin_args;
833 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
834 data->bin_args = (u32 *)tmp_buf;
835 }
836
837 if (data->get_buf)
838 data->buf = buffers->buf;
839
840 for (i = 0; i < fmt_size; i++) {
841 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
842 err = -EINVAL;
843 goto out;
844 }
845
846 if (fmt[i] != '%')
847 continue;
848
849 if (fmt[i + 1] == '%') {
850 i++;
851 continue;
852 }
853
854 if (num_spec >= num_args) {
855 err = -EINVAL;
856 goto out;
857 }
858
859 /* The string is zero-terminated so if fmt[i] != 0, we can
860 * always access fmt[i + 1], in the worst case it will be a 0
861 */
862 i++;
863
864 /* skip optional "[0 +-][num]" width formatting field */
865 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
866 fmt[i] == ' ')
867 i++;
868 if (fmt[i] >= '1' && fmt[i] <= '9') {
869 i++;
870 while (fmt[i] >= '0' && fmt[i] <= '9')
871 i++;
872 }
873
874 if (fmt[i] == 'p') {
875 sizeof_cur_arg = sizeof(long);
876
877 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
878 fmt[i + 2] == 's') {
879 fmt_ptype = fmt[i + 1];
880 i += 2;
881 goto fmt_str;
882 }
883
884 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
885 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
886 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
887 fmt[i + 1] == 'S') {
888 /* just kernel pointers */
889 if (tmp_buf)
890 cur_arg = raw_args[num_spec];
891 i++;
892 goto nocopy_fmt;
893 }
894
895 if (fmt[i + 1] == 'B') {
896 if (tmp_buf) {
897 err = snprintf(tmp_buf,
898 (tmp_buf_end - tmp_buf),
899 "%pB",
900 (void *)(long)raw_args[num_spec]);
901 tmp_buf += (err + 1);
902 }
903
904 i++;
905 num_spec++;
906 continue;
907 }
908
909 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
910 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
911 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
912 err = -EINVAL;
913 goto out;
914 }
915
916 i += 2;
917 if (!tmp_buf)
918 goto nocopy_fmt;
919
920 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
921 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
922 err = -ENOSPC;
923 goto out;
924 }
925
926 unsafe_ptr = (char *)(long)raw_args[num_spec];
927 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
928 sizeof_cur_ip);
929 if (err < 0)
930 memset(cur_ip, 0, sizeof_cur_ip);
931
932 /* hack: bstr_printf expects IP addresses to be
933 * pre-formatted as strings, ironically, the easiest way
934 * to do that is to call snprintf.
935 */
936 ip_spec[2] = fmt[i - 1];
937 ip_spec[3] = fmt[i];
938 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
939 ip_spec, &cur_ip);
940
941 tmp_buf += err + 1;
942 num_spec++;
943
944 continue;
945 } else if (fmt[i] == 's') {
946 fmt_ptype = fmt[i];
947 fmt_str:
948 if (fmt[i + 1] != 0 &&
949 !isspace(fmt[i + 1]) &&
950 !ispunct(fmt[i + 1])) {
951 err = -EINVAL;
952 goto out;
953 }
954
955 if (!tmp_buf)
956 goto nocopy_fmt;
957
958 if (tmp_buf_end == tmp_buf) {
959 err = -ENOSPC;
960 goto out;
961 }
962
963 unsafe_ptr = (char *)(long)raw_args[num_spec];
964 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
965 fmt_ptype,
966 tmp_buf_end - tmp_buf);
967 if (err < 0) {
968 tmp_buf[0] = '\0';
969 err = 1;
970 }
971
972 tmp_buf += err;
973 num_spec++;
974
975 continue;
976 } else if (fmt[i] == 'c') {
977 if (!tmp_buf)
978 goto nocopy_fmt;
979
980 if (tmp_buf_end == tmp_buf) {
981 err = -ENOSPC;
982 goto out;
983 }
984
985 *tmp_buf = raw_args[num_spec];
986 tmp_buf++;
987 num_spec++;
988
989 continue;
990 }
991
992 sizeof_cur_arg = sizeof(int);
993
994 if (fmt[i] == 'l') {
995 sizeof_cur_arg = sizeof(long);
996 i++;
997 }
998 if (fmt[i] == 'l') {
999 sizeof_cur_arg = sizeof(long long);
1000 i++;
1001 }
1002
1003 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1004 fmt[i] != 'x' && fmt[i] != 'X') {
1005 err = -EINVAL;
1006 goto out;
1007 }
1008
1009 if (tmp_buf)
1010 cur_arg = raw_args[num_spec];
1011 nocopy_fmt:
1012 if (tmp_buf) {
1013 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1014 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1015 err = -ENOSPC;
1016 goto out;
1017 }
1018
1019 if (sizeof_cur_arg == 8) {
1020 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1021 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1022 } else {
1023 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1024 }
1025 tmp_buf += sizeof_cur_arg;
1026 }
1027 num_spec++;
1028 }
1029
1030 err = 0;
1031 out:
1032 if (err)
1033 bpf_bprintf_cleanup(data);
1034 return err;
1035 }
1036
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1037 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1038 const void *, args, u32, data_len)
1039 {
1040 struct bpf_bprintf_data data = {
1041 .get_bin_args = true,
1042 };
1043 int err, num_args;
1044
1045 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1046 (data_len && !args))
1047 return -EINVAL;
1048 num_args = data_len / 8;
1049
1050 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1051 * can safely give an unbounded size.
1052 */
1053 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1054 if (err < 0)
1055 return err;
1056
1057 err = bstr_printf(str, str_size, fmt, data.bin_args);
1058
1059 bpf_bprintf_cleanup(&data);
1060
1061 return err + 1;
1062 }
1063
1064 const struct bpf_func_proto bpf_snprintf_proto = {
1065 .func = bpf_snprintf,
1066 .gpl_only = true,
1067 .ret_type = RET_INTEGER,
1068 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1069 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1070 .arg3_type = ARG_PTR_TO_CONST_STR,
1071 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1072 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1073 };
1074
1075 /* BPF map elements can contain 'struct bpf_timer'.
1076 * Such map owns all of its BPF timers.
1077 * 'struct bpf_timer' is allocated as part of map element allocation
1078 * and it's zero initialized.
1079 * That space is used to keep 'struct bpf_timer_kern'.
1080 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1081 * remembers 'struct bpf_map *' pointer it's part of.
1082 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1083 * bpf_timer_start() arms the timer.
1084 * If user space reference to a map goes to zero at this point
1085 * ops->map_release_uref callback is responsible for cancelling the timers,
1086 * freeing their memory, and decrementing prog's refcnts.
1087 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1088 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1089 * freeing the timers when inner map is replaced or deleted by user space.
1090 */
1091 struct bpf_hrtimer {
1092 struct hrtimer timer;
1093 struct bpf_map *map;
1094 struct bpf_prog *prog;
1095 void __rcu *callback_fn;
1096 void *value;
1097 };
1098
1099 /* the actual struct hidden inside uapi struct bpf_timer */
1100 struct bpf_timer_kern {
1101 struct bpf_hrtimer *timer;
1102 /* bpf_spin_lock is used here instead of spinlock_t to make
1103 * sure that it always fits into space reserved by struct bpf_timer
1104 * regardless of LOCKDEP and spinlock debug flags.
1105 */
1106 struct bpf_spin_lock lock;
1107 } __attribute__((aligned(8)));
1108
1109 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1110
bpf_timer_cb(struct hrtimer * hrtimer)1111 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1112 {
1113 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1114 struct bpf_map *map = t->map;
1115 void *value = t->value;
1116 bpf_callback_t callback_fn;
1117 void *key;
1118 u32 idx;
1119
1120 BTF_TYPE_EMIT(struct bpf_timer);
1121 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1122 if (!callback_fn)
1123 goto out;
1124
1125 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1126 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1127 * Remember the timer this callback is servicing to prevent
1128 * deadlock if callback_fn() calls bpf_timer_cancel() or
1129 * bpf_map_delete_elem() on the same timer.
1130 */
1131 this_cpu_write(hrtimer_running, t);
1132 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1133 struct bpf_array *array = container_of(map, struct bpf_array, map);
1134
1135 /* compute the key */
1136 idx = ((char *)value - array->value) / array->elem_size;
1137 key = &idx;
1138 } else { /* hash or lru */
1139 key = value - round_up(map->key_size, 8);
1140 }
1141
1142 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1143 /* The verifier checked that return value is zero. */
1144
1145 this_cpu_write(hrtimer_running, NULL);
1146 out:
1147 return HRTIMER_NORESTART;
1148 }
1149
BPF_CALL_3(bpf_timer_init,struct bpf_timer_kern *,timer,struct bpf_map *,map,u64,flags)1150 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1151 u64, flags)
1152 {
1153 clockid_t clockid = flags & (MAX_CLOCKS - 1);
1154 struct bpf_hrtimer *t;
1155 int ret = 0;
1156
1157 BUILD_BUG_ON(MAX_CLOCKS != 16);
1158 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1159 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1160
1161 if (in_nmi())
1162 return -EOPNOTSUPP;
1163
1164 if (flags >= MAX_CLOCKS ||
1165 /* similar to timerfd except _ALARM variants are not supported */
1166 (clockid != CLOCK_MONOTONIC &&
1167 clockid != CLOCK_REALTIME &&
1168 clockid != CLOCK_BOOTTIME))
1169 return -EINVAL;
1170 __bpf_spin_lock_irqsave(&timer->lock);
1171 t = timer->timer;
1172 if (t) {
1173 ret = -EBUSY;
1174 goto out;
1175 }
1176 if (!atomic64_read(&map->usercnt)) {
1177 /* maps with timers must be either held by user space
1178 * or pinned in bpffs.
1179 */
1180 ret = -EPERM;
1181 goto out;
1182 }
1183 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1184 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1185 if (!t) {
1186 ret = -ENOMEM;
1187 goto out;
1188 }
1189 t->value = (void *)timer - map->record->timer_off;
1190 t->map = map;
1191 t->prog = NULL;
1192 rcu_assign_pointer(t->callback_fn, NULL);
1193 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1194 t->timer.function = bpf_timer_cb;
1195 timer->timer = t;
1196 out:
1197 __bpf_spin_unlock_irqrestore(&timer->lock);
1198 return ret;
1199 }
1200
1201 static const struct bpf_func_proto bpf_timer_init_proto = {
1202 .func = bpf_timer_init,
1203 .gpl_only = true,
1204 .ret_type = RET_INTEGER,
1205 .arg1_type = ARG_PTR_TO_TIMER,
1206 .arg2_type = ARG_CONST_MAP_PTR,
1207 .arg3_type = ARG_ANYTHING,
1208 };
1209
BPF_CALL_3(bpf_timer_set_callback,struct bpf_timer_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1210 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1211 struct bpf_prog_aux *, aux)
1212 {
1213 struct bpf_prog *prev, *prog = aux->prog;
1214 struct bpf_hrtimer *t;
1215 int ret = 0;
1216
1217 if (in_nmi())
1218 return -EOPNOTSUPP;
1219 __bpf_spin_lock_irqsave(&timer->lock);
1220 t = timer->timer;
1221 if (!t) {
1222 ret = -EINVAL;
1223 goto out;
1224 }
1225 if (!atomic64_read(&t->map->usercnt)) {
1226 /* maps with timers must be either held by user space
1227 * or pinned in bpffs. Otherwise timer might still be
1228 * running even when bpf prog is detached and user space
1229 * is gone, since map_release_uref won't ever be called.
1230 */
1231 ret = -EPERM;
1232 goto out;
1233 }
1234 prev = t->prog;
1235 if (prev != prog) {
1236 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1237 * can pick different callback_fn-s within the same prog.
1238 */
1239 prog = bpf_prog_inc_not_zero(prog);
1240 if (IS_ERR(prog)) {
1241 ret = PTR_ERR(prog);
1242 goto out;
1243 }
1244 if (prev)
1245 /* Drop prev prog refcnt when swapping with new prog */
1246 bpf_prog_put(prev);
1247 t->prog = prog;
1248 }
1249 rcu_assign_pointer(t->callback_fn, callback_fn);
1250 out:
1251 __bpf_spin_unlock_irqrestore(&timer->lock);
1252 return ret;
1253 }
1254
1255 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1256 .func = bpf_timer_set_callback,
1257 .gpl_only = true,
1258 .ret_type = RET_INTEGER,
1259 .arg1_type = ARG_PTR_TO_TIMER,
1260 .arg2_type = ARG_PTR_TO_FUNC,
1261 };
1262
BPF_CALL_3(bpf_timer_start,struct bpf_timer_kern *,timer,u64,nsecs,u64,flags)1263 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1264 {
1265 struct bpf_hrtimer *t;
1266 int ret = 0;
1267
1268 if (in_nmi())
1269 return -EOPNOTSUPP;
1270 if (flags)
1271 return -EINVAL;
1272 __bpf_spin_lock_irqsave(&timer->lock);
1273 t = timer->timer;
1274 if (!t || !t->prog) {
1275 ret = -EINVAL;
1276 goto out;
1277 }
1278 hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1279 out:
1280 __bpf_spin_unlock_irqrestore(&timer->lock);
1281 return ret;
1282 }
1283
1284 static const struct bpf_func_proto bpf_timer_start_proto = {
1285 .func = bpf_timer_start,
1286 .gpl_only = true,
1287 .ret_type = RET_INTEGER,
1288 .arg1_type = ARG_PTR_TO_TIMER,
1289 .arg2_type = ARG_ANYTHING,
1290 .arg3_type = ARG_ANYTHING,
1291 };
1292
drop_prog_refcnt(struct bpf_hrtimer * t)1293 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1294 {
1295 struct bpf_prog *prog = t->prog;
1296
1297 if (prog) {
1298 bpf_prog_put(prog);
1299 t->prog = NULL;
1300 rcu_assign_pointer(t->callback_fn, NULL);
1301 }
1302 }
1303
BPF_CALL_1(bpf_timer_cancel,struct bpf_timer_kern *,timer)1304 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1305 {
1306 struct bpf_hrtimer *t;
1307 int ret = 0;
1308
1309 if (in_nmi())
1310 return -EOPNOTSUPP;
1311 __bpf_spin_lock_irqsave(&timer->lock);
1312 t = timer->timer;
1313 if (!t) {
1314 ret = -EINVAL;
1315 goto out;
1316 }
1317 if (this_cpu_read(hrtimer_running) == t) {
1318 /* If bpf callback_fn is trying to bpf_timer_cancel()
1319 * its own timer the hrtimer_cancel() will deadlock
1320 * since it waits for callback_fn to finish
1321 */
1322 ret = -EDEADLK;
1323 goto out;
1324 }
1325 drop_prog_refcnt(t);
1326 out:
1327 __bpf_spin_unlock_irqrestore(&timer->lock);
1328 /* Cancel the timer and wait for associated callback to finish
1329 * if it was running.
1330 */
1331 ret = ret ?: hrtimer_cancel(&t->timer);
1332 return ret;
1333 }
1334
1335 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1336 .func = bpf_timer_cancel,
1337 .gpl_only = true,
1338 .ret_type = RET_INTEGER,
1339 .arg1_type = ARG_PTR_TO_TIMER,
1340 };
1341
1342 /* This function is called by map_delete/update_elem for individual element and
1343 * by ops->map_release_uref when the user space reference to a map reaches zero.
1344 */
bpf_timer_cancel_and_free(void * val)1345 void bpf_timer_cancel_and_free(void *val)
1346 {
1347 struct bpf_timer_kern *timer = val;
1348 struct bpf_hrtimer *t;
1349
1350 /* Performance optimization: read timer->timer without lock first. */
1351 if (!READ_ONCE(timer->timer))
1352 return;
1353
1354 __bpf_spin_lock_irqsave(&timer->lock);
1355 /* re-read it under lock */
1356 t = timer->timer;
1357 if (!t)
1358 goto out;
1359 drop_prog_refcnt(t);
1360 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1361 * this timer, since it won't be initialized.
1362 */
1363 timer->timer = NULL;
1364 out:
1365 __bpf_spin_unlock_irqrestore(&timer->lock);
1366 if (!t)
1367 return;
1368 /* Cancel the timer and wait for callback to complete if it was running.
1369 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1370 * right after for both preallocated and non-preallocated maps.
1371 * The timer->timer = NULL was already done and no code path can
1372 * see address 't' anymore.
1373 *
1374 * Check that bpf_map_delete/update_elem() wasn't called from timer
1375 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1376 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1377 * return -1). Though callback_fn is still running on this cpu it's
1378 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1379 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1380 * since timer->timer = NULL was already done. The timer will be
1381 * effectively cancelled because bpf_timer_cb() will return
1382 * HRTIMER_NORESTART.
1383 */
1384 if (this_cpu_read(hrtimer_running) != t)
1385 hrtimer_cancel(&t->timer);
1386 kfree(t);
1387 }
1388
BPF_CALL_2(bpf_kptr_xchg,void *,map_value,void *,ptr)1389 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
1390 {
1391 unsigned long *kptr = map_value;
1392
1393 return xchg(kptr, (unsigned long)ptr);
1394 }
1395
1396 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1397 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1398 * denote type that verifier will determine.
1399 */
1400 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1401 .func = bpf_kptr_xchg,
1402 .gpl_only = false,
1403 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1404 .ret_btf_id = BPF_PTR_POISON,
1405 .arg1_type = ARG_PTR_TO_KPTR,
1406 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1407 .arg2_btf_id = BPF_PTR_POISON,
1408 };
1409
1410 /* Since the upper 8 bits of dynptr->size is reserved, the
1411 * maximum supported size is 2^24 - 1.
1412 */
1413 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1414 #define DYNPTR_TYPE_SHIFT 28
1415 #define DYNPTR_SIZE_MASK 0xFFFFFF
1416 #define DYNPTR_RDONLY_BIT BIT(31)
1417
bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1418 static bool bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1419 {
1420 return ptr->size & DYNPTR_RDONLY_BIT;
1421 }
1422
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1423 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1424 {
1425 ptr->size |= type << DYNPTR_TYPE_SHIFT;
1426 }
1427
bpf_dynptr_get_size(const struct bpf_dynptr_kern * ptr)1428 u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr)
1429 {
1430 return ptr->size & DYNPTR_SIZE_MASK;
1431 }
1432
bpf_dynptr_check_size(u32 size)1433 int bpf_dynptr_check_size(u32 size)
1434 {
1435 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1436 }
1437
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1438 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1439 enum bpf_dynptr_type type, u32 offset, u32 size)
1440 {
1441 ptr->data = data;
1442 ptr->offset = offset;
1443 ptr->size = size;
1444 bpf_dynptr_set_type(ptr, type);
1445 }
1446
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1447 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1448 {
1449 memset(ptr, 0, sizeof(*ptr));
1450 }
1451
bpf_dynptr_check_off_len(const struct bpf_dynptr_kern * ptr,u32 offset,u32 len)1452 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1453 {
1454 u32 size = bpf_dynptr_get_size(ptr);
1455
1456 if (len > size || offset > size - len)
1457 return -E2BIG;
1458
1459 return 0;
1460 }
1461
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)1462 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1463 {
1464 int err;
1465
1466 BTF_TYPE_EMIT(struct bpf_dynptr);
1467
1468 err = bpf_dynptr_check_size(size);
1469 if (err)
1470 goto error;
1471
1472 /* flags is currently unsupported */
1473 if (flags) {
1474 err = -EINVAL;
1475 goto error;
1476 }
1477
1478 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1479
1480 return 0;
1481
1482 error:
1483 bpf_dynptr_set_null(ptr);
1484 return err;
1485 }
1486
1487 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1488 .func = bpf_dynptr_from_mem,
1489 .gpl_only = false,
1490 .ret_type = RET_INTEGER,
1491 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1492 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1493 .arg3_type = ARG_ANYTHING,
1494 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
1495 };
1496
BPF_CALL_5(bpf_dynptr_read,void *,dst,u32,len,const struct bpf_dynptr_kern *,src,u32,offset,u64,flags)1497 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1498 u32, offset, u64, flags)
1499 {
1500 int err;
1501
1502 if (!src->data || flags)
1503 return -EINVAL;
1504
1505 err = bpf_dynptr_check_off_len(src, offset, len);
1506 if (err)
1507 return err;
1508
1509 /* Source and destination may possibly overlap, hence use memmove to
1510 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1511 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1512 */
1513 memmove(dst, src->data + src->offset + offset, len);
1514
1515 return 0;
1516 }
1517
1518 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1519 .func = bpf_dynptr_read,
1520 .gpl_only = false,
1521 .ret_type = RET_INTEGER,
1522 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1523 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1524 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1525 .arg4_type = ARG_ANYTHING,
1526 .arg5_type = ARG_ANYTHING,
1527 };
1528
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u32,offset,void *,src,u32,len,u64,flags)1529 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1530 u32, len, u64, flags)
1531 {
1532 int err;
1533
1534 if (!dst->data || flags || bpf_dynptr_is_rdonly(dst))
1535 return -EINVAL;
1536
1537 err = bpf_dynptr_check_off_len(dst, offset, len);
1538 if (err)
1539 return err;
1540
1541 /* Source and destination may possibly overlap, hence use memmove to
1542 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1543 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1544 */
1545 memmove(dst->data + dst->offset + offset, src, len);
1546
1547 return 0;
1548 }
1549
1550 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1551 .func = bpf_dynptr_write,
1552 .gpl_only = false,
1553 .ret_type = RET_INTEGER,
1554 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1555 .arg2_type = ARG_ANYTHING,
1556 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1557 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1558 .arg5_type = ARG_ANYTHING,
1559 };
1560
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u32,offset,u32,len)1561 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1562 {
1563 int err;
1564
1565 if (!ptr->data)
1566 return 0;
1567
1568 err = bpf_dynptr_check_off_len(ptr, offset, len);
1569 if (err)
1570 return 0;
1571
1572 if (bpf_dynptr_is_rdonly(ptr))
1573 return 0;
1574
1575 return (unsigned long)(ptr->data + ptr->offset + offset);
1576 }
1577
1578 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1579 .func = bpf_dynptr_data,
1580 .gpl_only = false,
1581 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1582 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1583 .arg2_type = ARG_ANYTHING,
1584 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1585 };
1586
1587 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1588 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1589 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1590 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1591 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1592 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1593 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1594
1595 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)1596 bpf_base_func_proto(enum bpf_func_id func_id)
1597 {
1598 switch (func_id) {
1599 case BPF_FUNC_map_lookup_elem:
1600 return &bpf_map_lookup_elem_proto;
1601 case BPF_FUNC_map_update_elem:
1602 return &bpf_map_update_elem_proto;
1603 case BPF_FUNC_map_delete_elem:
1604 return &bpf_map_delete_elem_proto;
1605 case BPF_FUNC_map_push_elem:
1606 return &bpf_map_push_elem_proto;
1607 case BPF_FUNC_map_pop_elem:
1608 return &bpf_map_pop_elem_proto;
1609 case BPF_FUNC_map_peek_elem:
1610 return &bpf_map_peek_elem_proto;
1611 case BPF_FUNC_map_lookup_percpu_elem:
1612 return &bpf_map_lookup_percpu_elem_proto;
1613 case BPF_FUNC_get_prandom_u32:
1614 return &bpf_get_prandom_u32_proto;
1615 case BPF_FUNC_get_smp_processor_id:
1616 return &bpf_get_raw_smp_processor_id_proto;
1617 case BPF_FUNC_get_numa_node_id:
1618 return &bpf_get_numa_node_id_proto;
1619 case BPF_FUNC_tail_call:
1620 return &bpf_tail_call_proto;
1621 case BPF_FUNC_ktime_get_ns:
1622 return &bpf_ktime_get_ns_proto;
1623 case BPF_FUNC_ktime_get_boot_ns:
1624 return &bpf_ktime_get_boot_ns_proto;
1625 case BPF_FUNC_ktime_get_tai_ns:
1626 return &bpf_ktime_get_tai_ns_proto;
1627 case BPF_FUNC_ringbuf_output:
1628 return &bpf_ringbuf_output_proto;
1629 case BPF_FUNC_ringbuf_reserve:
1630 return &bpf_ringbuf_reserve_proto;
1631 case BPF_FUNC_ringbuf_submit:
1632 return &bpf_ringbuf_submit_proto;
1633 case BPF_FUNC_ringbuf_discard:
1634 return &bpf_ringbuf_discard_proto;
1635 case BPF_FUNC_ringbuf_query:
1636 return &bpf_ringbuf_query_proto;
1637 case BPF_FUNC_strncmp:
1638 return &bpf_strncmp_proto;
1639 case BPF_FUNC_strtol:
1640 return &bpf_strtol_proto;
1641 case BPF_FUNC_strtoul:
1642 return &bpf_strtoul_proto;
1643 default:
1644 break;
1645 }
1646
1647 if (!bpf_capable())
1648 return NULL;
1649
1650 switch (func_id) {
1651 case BPF_FUNC_spin_lock:
1652 return &bpf_spin_lock_proto;
1653 case BPF_FUNC_spin_unlock:
1654 return &bpf_spin_unlock_proto;
1655 case BPF_FUNC_jiffies64:
1656 return &bpf_jiffies64_proto;
1657 case BPF_FUNC_per_cpu_ptr:
1658 return &bpf_per_cpu_ptr_proto;
1659 case BPF_FUNC_this_cpu_ptr:
1660 return &bpf_this_cpu_ptr_proto;
1661 case BPF_FUNC_timer_init:
1662 return &bpf_timer_init_proto;
1663 case BPF_FUNC_timer_set_callback:
1664 return &bpf_timer_set_callback_proto;
1665 case BPF_FUNC_timer_start:
1666 return &bpf_timer_start_proto;
1667 case BPF_FUNC_timer_cancel:
1668 return &bpf_timer_cancel_proto;
1669 case BPF_FUNC_kptr_xchg:
1670 return &bpf_kptr_xchg_proto;
1671 case BPF_FUNC_for_each_map_elem:
1672 return &bpf_for_each_map_elem_proto;
1673 case BPF_FUNC_loop:
1674 return &bpf_loop_proto;
1675 case BPF_FUNC_user_ringbuf_drain:
1676 return &bpf_user_ringbuf_drain_proto;
1677 case BPF_FUNC_ringbuf_reserve_dynptr:
1678 return &bpf_ringbuf_reserve_dynptr_proto;
1679 case BPF_FUNC_ringbuf_submit_dynptr:
1680 return &bpf_ringbuf_submit_dynptr_proto;
1681 case BPF_FUNC_ringbuf_discard_dynptr:
1682 return &bpf_ringbuf_discard_dynptr_proto;
1683 case BPF_FUNC_dynptr_from_mem:
1684 return &bpf_dynptr_from_mem_proto;
1685 case BPF_FUNC_dynptr_read:
1686 return &bpf_dynptr_read_proto;
1687 case BPF_FUNC_dynptr_write:
1688 return &bpf_dynptr_write_proto;
1689 case BPF_FUNC_dynptr_data:
1690 return &bpf_dynptr_data_proto;
1691 #ifdef CONFIG_CGROUPS
1692 case BPF_FUNC_cgrp_storage_get:
1693 return &bpf_cgrp_storage_get_proto;
1694 case BPF_FUNC_cgrp_storage_delete:
1695 return &bpf_cgrp_storage_delete_proto;
1696 #endif
1697 default:
1698 break;
1699 }
1700
1701 if (!perfmon_capable())
1702 return NULL;
1703
1704 switch (func_id) {
1705 case BPF_FUNC_trace_printk:
1706 return bpf_get_trace_printk_proto();
1707 case BPF_FUNC_get_current_task:
1708 return &bpf_get_current_task_proto;
1709 case BPF_FUNC_get_current_task_btf:
1710 return &bpf_get_current_task_btf_proto;
1711 case BPF_FUNC_probe_read_user:
1712 return &bpf_probe_read_user_proto;
1713 case BPF_FUNC_probe_read_kernel:
1714 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1715 NULL : &bpf_probe_read_kernel_proto;
1716 case BPF_FUNC_probe_read_user_str:
1717 return &bpf_probe_read_user_str_proto;
1718 case BPF_FUNC_probe_read_kernel_str:
1719 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1720 NULL : &bpf_probe_read_kernel_str_proto;
1721 case BPF_FUNC_snprintf_btf:
1722 return &bpf_snprintf_btf_proto;
1723 case BPF_FUNC_snprintf:
1724 return &bpf_snprintf_proto;
1725 case BPF_FUNC_task_pt_regs:
1726 return &bpf_task_pt_regs_proto;
1727 case BPF_FUNC_trace_vprintk:
1728 return bpf_get_trace_vprintk_proto();
1729 default:
1730 return NULL;
1731 }
1732 }
1733
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)1734 void bpf_list_head_free(const struct btf_field *field, void *list_head,
1735 struct bpf_spin_lock *spin_lock)
1736 {
1737 struct list_head *head = list_head, *orig_head = list_head;
1738
1739 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
1740 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
1741
1742 /* Do the actual list draining outside the lock to not hold the lock for
1743 * too long, and also prevent deadlocks if tracing programs end up
1744 * executing on entry/exit of functions called inside the critical
1745 * section, and end up doing map ops that call bpf_list_head_free for
1746 * the same map value again.
1747 */
1748 __bpf_spin_lock_irqsave(spin_lock);
1749 if (!head->next || list_empty(head))
1750 goto unlock;
1751 head = head->next;
1752 unlock:
1753 INIT_LIST_HEAD(orig_head);
1754 __bpf_spin_unlock_irqrestore(spin_lock);
1755
1756 while (head != orig_head) {
1757 void *obj = head;
1758
1759 obj -= field->graph_root.node_offset;
1760 head = head->next;
1761 /* The contained type can also have resources, including a
1762 * bpf_list_head which needs to be freed.
1763 */
1764 bpf_obj_free_fields(field->graph_root.value_rec, obj);
1765 /* bpf_mem_free requires migrate_disable(), since we can be
1766 * called from map free path as well apart from BPF program (as
1767 * part of map ops doing bpf_obj_free_fields).
1768 */
1769 migrate_disable();
1770 bpf_mem_free(&bpf_global_ma, obj);
1771 migrate_enable();
1772 }
1773 }
1774
1775 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
1776 * 'rb_node *', so field name of rb_node within containing struct is not
1777 * needed.
1778 *
1779 * Since bpf_rb_tree's node type has a corresponding struct btf_field with
1780 * graph_root.node_offset, it's not necessary to know field name
1781 * or type of node struct
1782 */
1783 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
1784 for (pos = rb_first_postorder(root); \
1785 pos && ({ n = rb_next_postorder(pos); 1; }); \
1786 pos = n)
1787
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)1788 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
1789 struct bpf_spin_lock *spin_lock)
1790 {
1791 struct rb_root_cached orig_root, *root = rb_root;
1792 struct rb_node *pos, *n;
1793 void *obj;
1794
1795 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
1796 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
1797
1798 __bpf_spin_lock_irqsave(spin_lock);
1799 orig_root = *root;
1800 *root = RB_ROOT_CACHED;
1801 __bpf_spin_unlock_irqrestore(spin_lock);
1802
1803 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
1804 obj = pos;
1805 obj -= field->graph_root.node_offset;
1806
1807 bpf_obj_free_fields(field->graph_root.value_rec, obj);
1808
1809 migrate_disable();
1810 bpf_mem_free(&bpf_global_ma, obj);
1811 migrate_enable();
1812 }
1813 }
1814
1815 __diag_push();
1816 __diag_ignore_all("-Wmissing-prototypes",
1817 "Global functions as their definitions will be in vmlinux BTF");
1818
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)1819 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
1820 {
1821 struct btf_struct_meta *meta = meta__ign;
1822 u64 size = local_type_id__k;
1823 void *p;
1824
1825 p = bpf_mem_alloc(&bpf_global_ma, size);
1826 if (!p)
1827 return NULL;
1828 if (meta)
1829 bpf_obj_init(meta->field_offs, p);
1830 return p;
1831 }
1832
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)1833 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
1834 {
1835 struct btf_struct_meta *meta = meta__ign;
1836 void *p = p__alloc;
1837
1838 if (meta)
1839 bpf_obj_free_fields(meta->record, p);
1840 bpf_mem_free(&bpf_global_ma, p);
1841 }
1842
__bpf_list_add(struct bpf_list_node * node,struct bpf_list_head * head,bool tail)1843 static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail)
1844 {
1845 struct list_head *n = (void *)node, *h = (void *)head;
1846
1847 if (unlikely(!h->next))
1848 INIT_LIST_HEAD(h);
1849 if (unlikely(!n->next))
1850 INIT_LIST_HEAD(n);
1851 tail ? list_add_tail(n, h) : list_add(n, h);
1852 }
1853
bpf_list_push_front(struct bpf_list_head * head,struct bpf_list_node * node)1854 __bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
1855 {
1856 return __bpf_list_add(node, head, false);
1857 }
1858
bpf_list_push_back(struct bpf_list_head * head,struct bpf_list_node * node)1859 __bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
1860 {
1861 return __bpf_list_add(node, head, true);
1862 }
1863
__bpf_list_del(struct bpf_list_head * head,bool tail)1864 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
1865 {
1866 struct list_head *n, *h = (void *)head;
1867
1868 if (unlikely(!h->next))
1869 INIT_LIST_HEAD(h);
1870 if (list_empty(h))
1871 return NULL;
1872 n = tail ? h->prev : h->next;
1873 list_del_init(n);
1874 return (struct bpf_list_node *)n;
1875 }
1876
bpf_list_pop_front(struct bpf_list_head * head)1877 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
1878 {
1879 return __bpf_list_del(head, false);
1880 }
1881
bpf_list_pop_back(struct bpf_list_head * head)1882 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
1883 {
1884 return __bpf_list_del(head, true);
1885 }
1886
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)1887 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
1888 struct bpf_rb_node *node)
1889 {
1890 struct rb_root_cached *r = (struct rb_root_cached *)root;
1891 struct rb_node *n = (struct rb_node *)node;
1892
1893 rb_erase_cached(n, r);
1894 RB_CLEAR_NODE(n);
1895 return (struct bpf_rb_node *)n;
1896 }
1897
1898 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
1899 * program
1900 */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node * node,void * less)1901 static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
1902 void *less)
1903 {
1904 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
1905 bpf_callback_t cb = (bpf_callback_t)less;
1906 struct rb_node *parent = NULL;
1907 bool leftmost = true;
1908
1909 while (*link) {
1910 parent = *link;
1911 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
1912 link = &parent->rb_left;
1913 } else {
1914 link = &parent->rb_right;
1915 leftmost = false;
1916 }
1917 }
1918
1919 rb_link_node((struct rb_node *)node, parent, link);
1920 rb_insert_color_cached((struct rb_node *)node,
1921 (struct rb_root_cached *)root, leftmost);
1922 }
1923
bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b))1924 __bpf_kfunc void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
1925 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
1926 {
1927 __bpf_rbtree_add(root, node, (void *)less);
1928 }
1929
bpf_rbtree_first(struct bpf_rb_root * root)1930 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
1931 {
1932 struct rb_root_cached *r = (struct rb_root_cached *)root;
1933
1934 return (struct bpf_rb_node *)rb_first_cached(r);
1935 }
1936
1937 /**
1938 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
1939 * kfunc which is not stored in a map as a kptr, must be released by calling
1940 * bpf_task_release().
1941 * @p: The task on which a reference is being acquired.
1942 */
bpf_task_acquire(struct task_struct * p)1943 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
1944 {
1945 return get_task_struct(p);
1946 }
1947
1948 /**
1949 * bpf_task_acquire_not_zero - Acquire a reference to a rcu task object. A task
1950 * acquired by this kfunc which is not stored in a map as a kptr, must be
1951 * released by calling bpf_task_release().
1952 * @p: The task on which a reference is being acquired.
1953 */
bpf_task_acquire_not_zero(struct task_struct * p)1954 __bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
1955 {
1956 /* For the time being this function returns NULL, as it's not currently
1957 * possible to safely acquire a reference to a task with RCU protection
1958 * using get_task_struct() and put_task_struct(). This is due to the
1959 * slightly odd mechanics of p->rcu_users, and how task RCU protection
1960 * works.
1961 *
1962 * A struct task_struct is refcounted by two different refcount_t
1963 * fields:
1964 *
1965 * 1. p->usage: The "true" refcount field which tracks a task's
1966 * lifetime. The task is freed as soon as this
1967 * refcount drops to 0.
1968 *
1969 * 2. p->rcu_users: An "RCU users" refcount field which is statically
1970 * initialized to 2, and is co-located in a union with
1971 * a struct rcu_head field (p->rcu). p->rcu_users
1972 * essentially encapsulates a single p->usage
1973 * refcount, and when p->rcu_users goes to 0, an RCU
1974 * callback is scheduled on the struct rcu_head which
1975 * decrements the p->usage refcount.
1976 *
1977 * There are two important implications to this task refcounting logic
1978 * described above. The first is that
1979 * refcount_inc_not_zero(&p->rcu_users) cannot be used anywhere, as
1980 * after the refcount goes to 0, the RCU callback being scheduled will
1981 * cause the memory backing the refcount to again be nonzero due to the
1982 * fields sharing a union. The other is that we can't rely on RCU to
1983 * guarantee that a task is valid in a BPF program. This is because a
1984 * task could have already transitioned to being in the TASK_DEAD
1985 * state, had its rcu_users refcount go to 0, and its rcu callback
1986 * invoked in which it drops its single p->usage reference. At this
1987 * point the task will be freed as soon as the last p->usage reference
1988 * goes to 0, without waiting for another RCU gp to elapse. The only
1989 * way that a BPF program can guarantee that a task is valid is in this
1990 * scenario is to hold a p->usage refcount itself.
1991 *
1992 * Until we're able to resolve this issue, either by pulling
1993 * p->rcu_users and p->rcu out of the union, or by getting rid of
1994 * p->usage and just using p->rcu_users for refcounting, we'll just
1995 * return NULL here.
1996 */
1997 return NULL;
1998 }
1999
2000 /**
2001 * bpf_task_kptr_get - Acquire a reference on a struct task_struct kptr. A task
2002 * kptr acquired by this kfunc which is not subsequently stored in a map, must
2003 * be released by calling bpf_task_release().
2004 * @pp: A pointer to a task kptr on which a reference is being acquired.
2005 */
bpf_task_kptr_get(struct task_struct ** pp)2006 __bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
2007 {
2008 /* We must return NULL here until we have clarity on how to properly
2009 * leverage RCU for ensuring a task's lifetime. See the comment above
2010 * in bpf_task_acquire_not_zero() for more details.
2011 */
2012 return NULL;
2013 }
2014
2015 /**
2016 * bpf_task_release - Release the reference acquired on a task.
2017 * @p: The task on which a reference is being released.
2018 */
bpf_task_release(struct task_struct * p)2019 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2020 {
2021 if (!p)
2022 return;
2023
2024 put_task_struct(p);
2025 }
2026
2027 #ifdef CONFIG_CGROUPS
2028 /**
2029 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2030 * this kfunc which is not stored in a map as a kptr, must be released by
2031 * calling bpf_cgroup_release().
2032 * @cgrp: The cgroup on which a reference is being acquired.
2033 */
bpf_cgroup_acquire(struct cgroup * cgrp)2034 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2035 {
2036 cgroup_get(cgrp);
2037 return cgrp;
2038 }
2039
2040 /**
2041 * bpf_cgroup_kptr_get - Acquire a reference on a struct cgroup kptr. A cgroup
2042 * kptr acquired by this kfunc which is not subsequently stored in a map, must
2043 * be released by calling bpf_cgroup_release().
2044 * @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
2045 */
bpf_cgroup_kptr_get(struct cgroup ** cgrpp)2046 __bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
2047 {
2048 struct cgroup *cgrp;
2049
2050 rcu_read_lock();
2051 /* Another context could remove the cgroup from the map and release it
2052 * at any time, including after we've done the lookup above. This is
2053 * safe because we're in an RCU read region, so the cgroup is
2054 * guaranteed to remain valid until at least the rcu_read_unlock()
2055 * below.
2056 */
2057 cgrp = READ_ONCE(*cgrpp);
2058
2059 if (cgrp && !cgroup_tryget(cgrp))
2060 /* If the cgroup had been removed from the map and freed as
2061 * described above, cgroup_tryget() will return false. The
2062 * cgroup will be freed at some point after the current RCU gp
2063 * has ended, so just return NULL to the user.
2064 */
2065 cgrp = NULL;
2066 rcu_read_unlock();
2067
2068 return cgrp;
2069 }
2070
2071 /**
2072 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2073 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2074 * not be freed until the current grace period has ended, even if its refcount
2075 * drops to 0.
2076 * @cgrp: The cgroup on which a reference is being released.
2077 */
bpf_cgroup_release(struct cgroup * cgrp)2078 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2079 {
2080 if (!cgrp)
2081 return;
2082
2083 cgroup_put(cgrp);
2084 }
2085
2086 /**
2087 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2088 * array. A cgroup returned by this kfunc which is not subsequently stored in a
2089 * map, must be released by calling bpf_cgroup_release().
2090 * @cgrp: The cgroup for which we're performing a lookup.
2091 * @level: The level of ancestor to look up.
2092 */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2093 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2094 {
2095 struct cgroup *ancestor;
2096
2097 if (level > cgrp->level || level < 0)
2098 return NULL;
2099
2100 ancestor = cgrp->ancestors[level];
2101 cgroup_get(ancestor);
2102 return ancestor;
2103 }
2104 #endif /* CONFIG_CGROUPS */
2105
2106 /**
2107 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2108 * in the root pid namespace idr. If a task is returned, it must either be
2109 * stored in a map, or released with bpf_task_release().
2110 * @pid: The pid of the task being looked up.
2111 */
bpf_task_from_pid(s32 pid)2112 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2113 {
2114 struct task_struct *p;
2115
2116 rcu_read_lock();
2117 p = find_task_by_pid_ns(pid, &init_pid_ns);
2118 if (p)
2119 bpf_task_acquire(p);
2120 rcu_read_unlock();
2121
2122 return p;
2123 }
2124
bpf_cast_to_kern_ctx(void * obj)2125 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2126 {
2127 return obj;
2128 }
2129
bpf_rdonly_cast(void * obj__ign,u32 btf_id__k)2130 __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
2131 {
2132 return obj__ign;
2133 }
2134
bpf_rcu_read_lock(void)2135 __bpf_kfunc void bpf_rcu_read_lock(void)
2136 {
2137 rcu_read_lock();
2138 }
2139
bpf_rcu_read_unlock(void)2140 __bpf_kfunc void bpf_rcu_read_unlock(void)
2141 {
2142 rcu_read_unlock();
2143 }
2144
2145 __diag_pop();
2146
2147 BTF_SET8_START(generic_btf_ids)
2148 #ifdef CONFIG_KEXEC_CORE
2149 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
2150 #endif
2151 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2152 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
2153 BTF_ID_FLAGS(func, bpf_list_push_front)
2154 BTF_ID_FLAGS(func, bpf_list_push_back)
2155 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
2156 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
2157 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
2158 BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2159 BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
2160 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
2161 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE)
2162 BTF_ID_FLAGS(func, bpf_rbtree_add)
2163 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
2164
2165 #ifdef CONFIG_CGROUPS
2166 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
2167 BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
2168 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
2169 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
2170 #endif
2171 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
2172 BTF_SET8_END(generic_btf_ids)
2173
2174 static const struct btf_kfunc_id_set generic_kfunc_set = {
2175 .owner = THIS_MODULE,
2176 .set = &generic_btf_ids,
2177 };
2178
2179
2180 BTF_ID_LIST(generic_dtor_ids)
2181 BTF_ID(struct, task_struct)
2182 BTF_ID(func, bpf_task_release)
2183 #ifdef CONFIG_CGROUPS
2184 BTF_ID(struct, cgroup)
2185 BTF_ID(func, bpf_cgroup_release)
2186 #endif
2187
2188 BTF_SET8_START(common_btf_ids)
2189 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
2190 BTF_ID_FLAGS(func, bpf_rdonly_cast)
2191 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
2192 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
2193 BTF_SET8_END(common_btf_ids)
2194
2195 static const struct btf_kfunc_id_set common_kfunc_set = {
2196 .owner = THIS_MODULE,
2197 .set = &common_btf_ids,
2198 };
2199
kfunc_init(void)2200 static int __init kfunc_init(void)
2201 {
2202 int ret;
2203 const struct btf_id_dtor_kfunc generic_dtors[] = {
2204 {
2205 .btf_id = generic_dtor_ids[0],
2206 .kfunc_btf_id = generic_dtor_ids[1]
2207 },
2208 #ifdef CONFIG_CGROUPS
2209 {
2210 .btf_id = generic_dtor_ids[2],
2211 .kfunc_btf_id = generic_dtor_ids[3]
2212 },
2213 #endif
2214 };
2215
2216 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
2217 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
2218 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
2219 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
2220 ARRAY_SIZE(generic_dtors),
2221 THIS_MODULE);
2222 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
2223 }
2224
2225 late_initcall(kfunc_init);
2226