Lines Matching refs:c
140 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) in __alloc() argument
142 if (c->percpu_size) { in __alloc()
143 void __percpu **obj = kmalloc_node(c->percpu_size, flags, node); in __alloc()
144 void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); in __alloc()
155 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); in __alloc()
158 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) in get_memcg() argument
161 if (c->objcg) in get_memcg()
162 return get_mem_cgroup_from_objcg(c->objcg); in get_memcg()
169 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) in inc_active() argument
184 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in inc_active()
187 static void dec_active(struct bpf_mem_cache *c, unsigned long *flags) in dec_active() argument
189 local_dec(&c->active); in dec_active()
194 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) in add_obj_to_free_list() argument
198 inc_active(c, &flags); in add_obj_to_free_list()
199 __llist_add(obj, &c->free_llist); in add_obj_to_free_list()
200 c->free_cnt++; in add_obj_to_free_list()
201 dec_active(c, &flags); in add_obj_to_free_list()
205 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) in alloc_bulk() argument
221 obj = llist_del_first(&c->free_by_rcu_ttrace); in alloc_bulk()
224 add_obj_to_free_list(c, obj); in alloc_bulk()
230 obj = llist_del_first(&c->waiting_for_gp_ttrace); in alloc_bulk()
233 add_obj_to_free_list(c, obj); in alloc_bulk()
238 memcg = get_memcg(c); in alloc_bulk()
246 obj = __alloc(c, node, gfp); in alloc_bulk()
249 add_obj_to_free_list(c, obj); in alloc_bulk()
280 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); in __free_rcu() local
282 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); in __free_rcu()
283 atomic_set(&c->call_rcu_ttrace_in_progress, 0); in __free_rcu()
297 static void enque_to_free(struct bpf_mem_cache *c, void *obj) in enque_to_free() argument
304 llist_add(llnode, &c->free_by_rcu_ttrace); in enque_to_free()
307 static void do_call_rcu_ttrace(struct bpf_mem_cache *c) in do_call_rcu_ttrace() argument
311 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { in do_call_rcu_ttrace()
312 if (unlikely(READ_ONCE(c->draining))) { in do_call_rcu_ttrace()
313 llnode = llist_del_all(&c->free_by_rcu_ttrace); in do_call_rcu_ttrace()
314 free_all(llnode, !!c->percpu_size); in do_call_rcu_ttrace()
319 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); in do_call_rcu_ttrace()
320 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) in do_call_rcu_ttrace()
321 llist_add(llnode, &c->waiting_for_gp_ttrace); in do_call_rcu_ttrace()
323 if (unlikely(READ_ONCE(c->draining))) { in do_call_rcu_ttrace()
324 __free_rcu(&c->rcu_ttrace); in do_call_rcu_ttrace()
333 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); in do_call_rcu_ttrace()
336 static void free_bulk(struct bpf_mem_cache *c) in free_bulk() argument
338 struct bpf_mem_cache *tgt = c->tgt; in free_bulk()
343 WARN_ON_ONCE(tgt->unit_size != c->unit_size); in free_bulk()
344 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); in free_bulk()
347 inc_active(c, &flags); in free_bulk()
348 llnode = __llist_del_first(&c->free_llist); in free_bulk()
350 cnt = --c->free_cnt; in free_bulk()
353 dec_active(c, &flags); in free_bulk()
356 } while (cnt > (c->high_watermark + c->low_watermark) / 2); in free_bulk()
359 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) in free_bulk()
366 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); in __free_by_rcu() local
367 struct bpf_mem_cache *tgt = c->tgt; in __free_by_rcu()
370 WARN_ON_ONCE(tgt->unit_size != c->unit_size); in __free_by_rcu()
371 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); in __free_by_rcu()
373 llnode = llist_del_all(&c->waiting_for_gp); in __free_by_rcu()
377 llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace); in __free_by_rcu()
382 atomic_set(&c->call_rcu_in_progress, 0); in __free_by_rcu()
385 static void check_free_by_rcu(struct bpf_mem_cache *c) in check_free_by_rcu() argument
391 if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) { in check_free_by_rcu()
392 inc_active(c, &flags); in check_free_by_rcu()
393 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu)) in check_free_by_rcu()
394 if (__llist_add(llnode, &c->free_by_rcu)) in check_free_by_rcu()
395 c->free_by_rcu_tail = llnode; in check_free_by_rcu()
396 dec_active(c, &flags); in check_free_by_rcu()
399 if (llist_empty(&c->free_by_rcu)) in check_free_by_rcu()
402 if (atomic_xchg(&c->call_rcu_in_progress, 1)) { in check_free_by_rcu()
415 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in check_free_by_rcu()
417 inc_active(c, &flags); in check_free_by_rcu()
418 WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu)); in check_free_by_rcu()
419 c->waiting_for_gp_tail = c->free_by_rcu_tail; in check_free_by_rcu()
420 dec_active(c, &flags); in check_free_by_rcu()
422 if (unlikely(READ_ONCE(c->draining))) { in check_free_by_rcu()
423 free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); in check_free_by_rcu()
424 atomic_set(&c->call_rcu_in_progress, 0); in check_free_by_rcu()
426 call_rcu_hurry(&c->rcu, __free_by_rcu); in check_free_by_rcu()
432 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); in bpf_mem_refill() local
436 cnt = c->free_cnt; in bpf_mem_refill()
437 if (cnt < c->low_watermark) in bpf_mem_refill()
441 alloc_bulk(c, c->batch, NUMA_NO_NODE, true); in bpf_mem_refill()
442 else if (cnt > c->high_watermark) in bpf_mem_refill()
443 free_bulk(c); in bpf_mem_refill()
445 check_free_by_rcu(c); in bpf_mem_refill()
448 static void notrace irq_work_raise(struct bpf_mem_cache *c) in irq_work_raise() argument
450 irq_work_queue(&c->refill_work); in irq_work_raise()
470 static void init_refill_work(struct bpf_mem_cache *c) in init_refill_work() argument
472 init_irq_work(&c->refill_work, bpf_mem_refill); in init_refill_work()
473 if (c->percpu_size) { in init_refill_work()
474 c->low_watermark = 1; in init_refill_work()
475 c->high_watermark = 3; in init_refill_work()
476 } else if (c->unit_size <= 256) { in init_refill_work()
477 c->low_watermark = 32; in init_refill_work()
478 c->high_watermark = 96; in init_refill_work()
485 c->low_watermark = max(32 * 256 / c->unit_size, 1); in init_refill_work()
486 c->high_watermark = max(96 * 256 / c->unit_size, 3); in init_refill_work()
488 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); in init_refill_work()
491 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) in prefill_mem_cache() argument
500 if (!c->percpu_size && c->unit_size <= 256) in prefill_mem_cache()
502 alloc_bulk(c, cnt, cpu_to_node(cpu), false); in prefill_mem_cache()
515 struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc; in bpf_mem_alloc_init() local
543 c = per_cpu_ptr(pc, cpu); in bpf_mem_alloc_init()
544 c->unit_size = unit_size; in bpf_mem_alloc_init()
545 c->objcg = objcg; in bpf_mem_alloc_init()
546 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
547 c->tgt = c; in bpf_mem_alloc_init()
548 init_refill_work(c); in bpf_mem_alloc_init()
549 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
565 c = &cc->cache[i]; in bpf_mem_alloc_init()
566 c->unit_size = sizes[i]; in bpf_mem_alloc_init()
567 c->objcg = objcg; in bpf_mem_alloc_init()
568 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
569 c->tgt = c; in bpf_mem_alloc_init()
571 init_refill_work(c); in bpf_mem_alloc_init()
572 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
599 struct bpf_mem_cache *c; in bpf_mem_alloc_percpu_unit_init() local
614 c = &cc->cache[i]; in bpf_mem_alloc_percpu_unit_init()
615 if (c->unit_size) in bpf_mem_alloc_percpu_unit_init()
618 c->unit_size = unit_size; in bpf_mem_alloc_percpu_unit_init()
619 c->objcg = objcg; in bpf_mem_alloc_percpu_unit_init()
620 c->percpu_size = percpu_size; in bpf_mem_alloc_percpu_unit_init()
621 c->tgt = c; in bpf_mem_alloc_percpu_unit_init()
623 init_refill_work(c); in bpf_mem_alloc_percpu_unit_init()
624 prefill_mem_cache(c, cpu); in bpf_mem_alloc_percpu_unit_init()
630 static void drain_mem_cache(struct bpf_mem_cache *c) in drain_mem_cache() argument
632 bool percpu = !!c->percpu_size; in drain_mem_cache()
641 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); in drain_mem_cache()
642 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); in drain_mem_cache()
643 free_all(__llist_del_all(&c->free_llist), percpu); in drain_mem_cache()
644 free_all(__llist_del_all(&c->free_llist_extra), percpu); in drain_mem_cache()
645 free_all(__llist_del_all(&c->free_by_rcu), percpu); in drain_mem_cache()
646 free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu); in drain_mem_cache()
647 free_all(llist_del_all(&c->waiting_for_gp), percpu); in drain_mem_cache()
650 static void check_mem_cache(struct bpf_mem_cache *c) in check_mem_cache() argument
652 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace)); in check_mem_cache()
653 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); in check_mem_cache()
654 WARN_ON_ONCE(!llist_empty(&c->free_llist)); in check_mem_cache()
655 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra)); in check_mem_cache()
656 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu)); in check_mem_cache()
657 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu)); in check_mem_cache()
658 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in check_mem_cache()
664 struct bpf_mem_cache *c; in check_leaked_objs() local
669 c = per_cpu_ptr(ma->cache, cpu); in check_leaked_objs()
670 check_mem_cache(c); in check_leaked_objs()
677 c = &cc->cache[i]; in check_leaked_objs()
678 check_mem_cache(c); in check_leaked_objs()
748 struct bpf_mem_cache *c; in bpf_mem_alloc_destroy() local
754 c = per_cpu_ptr(ma->cache, cpu); in bpf_mem_alloc_destroy()
755 WRITE_ONCE(c->draining, true); in bpf_mem_alloc_destroy()
756 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
757 drain_mem_cache(c); in bpf_mem_alloc_destroy()
758 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); in bpf_mem_alloc_destroy()
759 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
769 c = &cc->cache[i]; in bpf_mem_alloc_destroy()
770 WRITE_ONCE(c->draining, true); in bpf_mem_alloc_destroy()
771 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
772 drain_mem_cache(c); in bpf_mem_alloc_destroy()
773 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); in bpf_mem_alloc_destroy()
774 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
785 static void notrace *unit_alloc(struct bpf_mem_cache *c) in unit_alloc() argument
802 if (local_inc_return(&c->active) == 1) { in unit_alloc()
803 llnode = __llist_del_first(&c->free_llist); in unit_alloc()
805 cnt = --c->free_cnt; in unit_alloc()
806 *(struct bpf_mem_cache **)llnode = c; in unit_alloc()
809 local_dec(&c->active); in unit_alloc()
813 if (cnt < c->low_watermark) in unit_alloc()
814 irq_work_raise(c); in unit_alloc()
828 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) in unit_free() argument
840 c->tgt = *(struct bpf_mem_cache **)llnode; in unit_free()
843 if (local_inc_return(&c->active) == 1) { in unit_free()
844 __llist_add(llnode, &c->free_llist); in unit_free()
845 cnt = ++c->free_cnt; in unit_free()
853 llist_add(llnode, &c->free_llist_extra); in unit_free()
855 local_dec(&c->active); in unit_free()
857 if (cnt > c->high_watermark) in unit_free()
859 irq_work_raise(c); in unit_free()
868 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr) in unit_free_rcu() argument
873 c->tgt = *(struct bpf_mem_cache **)llnode; in unit_free_rcu()
876 if (local_inc_return(&c->active) == 1) { in unit_free_rcu()
877 if (__llist_add(llnode, &c->free_by_rcu)) in unit_free_rcu()
878 c->free_by_rcu_tail = llnode; in unit_free_rcu()
880 llist_add(llnode, &c->free_llist_extra_rcu); in unit_free_rcu()
882 local_dec(&c->active); in unit_free_rcu()
884 if (!atomic_read(&c->call_rcu_in_progress)) in unit_free_rcu()
885 irq_work_raise(c); in unit_free_rcu()
912 struct bpf_mem_cache *c; in bpf_mem_free() local
918 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free()
919 idx = bpf_mem_cache_idx(c->unit_size); in bpf_mem_free()
928 struct bpf_mem_cache *c; in bpf_mem_free_rcu() local
934 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free_rcu()
935 idx = bpf_mem_cache_idx(c->unit_size); in bpf_mem_free_rcu()
990 struct bpf_mem_cache *c; in bpf_mem_cache_alloc_flags() local
993 c = this_cpu_ptr(ma->cache); in bpf_mem_cache_alloc_flags()
995 ret = unit_alloc(c); in bpf_mem_cache_alloc_flags()
999 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags()
1001 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); in bpf_mem_cache_alloc_flags()
1003 *(struct bpf_mem_cache **)ret = c; in bpf_mem_cache_alloc_flags()