Lines Matching refs:c
140 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) in __alloc() argument
142 if (c->percpu_size) { in __alloc()
143 void __percpu **obj = kmalloc_node(c->percpu_size, flags, node); in __alloc()
144 void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); in __alloc()
155 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); in __alloc()
158 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) in get_memcg() argument
161 if (c->objcg) in get_memcg()
162 return get_mem_cgroup_from_objcg(c->objcg); in get_memcg()
169 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) in inc_active() argument
184 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in inc_active()
187 static void dec_active(struct bpf_mem_cache *c, unsigned long *flags) in dec_active() argument
189 local_dec(&c->active); in dec_active()
194 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) in add_obj_to_free_list() argument
198 inc_active(c, &flags); in add_obj_to_free_list()
199 __llist_add(obj, &c->free_llist); in add_obj_to_free_list()
200 c->free_cnt++; in add_obj_to_free_list()
201 dec_active(c, &flags); in add_obj_to_free_list()
205 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) in alloc_bulk() argument
221 obj = llist_del_first(&c->free_by_rcu_ttrace); in alloc_bulk()
224 add_obj_to_free_list(c, obj); in alloc_bulk()
230 obj = llist_del_first(&c->waiting_for_gp_ttrace); in alloc_bulk()
233 add_obj_to_free_list(c, obj); in alloc_bulk()
238 memcg = get_memcg(c); in alloc_bulk()
246 obj = __alloc(c, node, gfp); in alloc_bulk()
249 add_obj_to_free_list(c, obj); in alloc_bulk()
277 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); in __free_rcu() local
279 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); in __free_rcu()
280 atomic_set(&c->call_rcu_ttrace_in_progress, 0); in __free_rcu()
294 static void enque_to_free(struct bpf_mem_cache *c, void *obj) in enque_to_free() argument
301 llist_add(llnode, &c->free_by_rcu_ttrace); in enque_to_free()
304 static void do_call_rcu_ttrace(struct bpf_mem_cache *c) in do_call_rcu_ttrace() argument
308 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { in do_call_rcu_ttrace()
309 if (unlikely(READ_ONCE(c->draining))) { in do_call_rcu_ttrace()
310 llnode = llist_del_all(&c->free_by_rcu_ttrace); in do_call_rcu_ttrace()
311 free_all(llnode, !!c->percpu_size); in do_call_rcu_ttrace()
316 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); in do_call_rcu_ttrace()
317 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) in do_call_rcu_ttrace()
318 llist_add(llnode, &c->waiting_for_gp_ttrace); in do_call_rcu_ttrace()
320 if (unlikely(READ_ONCE(c->draining))) { in do_call_rcu_ttrace()
321 __free_rcu(&c->rcu_ttrace); in do_call_rcu_ttrace()
330 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); in do_call_rcu_ttrace()
333 static void free_bulk(struct bpf_mem_cache *c) in free_bulk() argument
335 struct bpf_mem_cache *tgt = c->tgt; in free_bulk()
340 WARN_ON_ONCE(tgt->unit_size != c->unit_size); in free_bulk()
341 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); in free_bulk()
344 inc_active(c, &flags); in free_bulk()
345 llnode = __llist_del_first(&c->free_llist); in free_bulk()
347 cnt = --c->free_cnt; in free_bulk()
350 dec_active(c, &flags); in free_bulk()
353 } while (cnt > (c->high_watermark + c->low_watermark) / 2); in free_bulk()
356 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) in free_bulk()
363 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); in __free_by_rcu() local
364 struct bpf_mem_cache *tgt = c->tgt; in __free_by_rcu()
367 WARN_ON_ONCE(tgt->unit_size != c->unit_size); in __free_by_rcu()
368 WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); in __free_by_rcu()
370 llnode = llist_del_all(&c->waiting_for_gp); in __free_by_rcu()
374 llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace); in __free_by_rcu()
379 atomic_set(&c->call_rcu_in_progress, 0); in __free_by_rcu()
382 static void check_free_by_rcu(struct bpf_mem_cache *c) in check_free_by_rcu() argument
388 if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) { in check_free_by_rcu()
389 inc_active(c, &flags); in check_free_by_rcu()
390 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu)) in check_free_by_rcu()
391 if (__llist_add(llnode, &c->free_by_rcu)) in check_free_by_rcu()
392 c->free_by_rcu_tail = llnode; in check_free_by_rcu()
393 dec_active(c, &flags); in check_free_by_rcu()
396 if (llist_empty(&c->free_by_rcu)) in check_free_by_rcu()
399 if (atomic_xchg(&c->call_rcu_in_progress, 1)) { in check_free_by_rcu()
412 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in check_free_by_rcu()
414 inc_active(c, &flags); in check_free_by_rcu()
415 WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu)); in check_free_by_rcu()
416 c->waiting_for_gp_tail = c->free_by_rcu_tail; in check_free_by_rcu()
417 dec_active(c, &flags); in check_free_by_rcu()
419 if (unlikely(READ_ONCE(c->draining))) { in check_free_by_rcu()
420 free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); in check_free_by_rcu()
421 atomic_set(&c->call_rcu_in_progress, 0); in check_free_by_rcu()
423 call_rcu_hurry(&c->rcu, __free_by_rcu); in check_free_by_rcu()
429 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); in bpf_mem_refill() local
433 cnt = c->free_cnt; in bpf_mem_refill()
434 if (cnt < c->low_watermark) in bpf_mem_refill()
438 alloc_bulk(c, c->batch, NUMA_NO_NODE, true); in bpf_mem_refill()
439 else if (cnt > c->high_watermark) in bpf_mem_refill()
440 free_bulk(c); in bpf_mem_refill()
442 check_free_by_rcu(c); in bpf_mem_refill()
445 static void notrace irq_work_raise(struct bpf_mem_cache *c) in irq_work_raise() argument
447 irq_work_queue(&c->refill_work); in irq_work_raise()
467 static void init_refill_work(struct bpf_mem_cache *c) in init_refill_work() argument
469 init_irq_work(&c->refill_work, bpf_mem_refill); in init_refill_work()
470 if (c->percpu_size) { in init_refill_work()
471 c->low_watermark = 1; in init_refill_work()
472 c->high_watermark = 3; in init_refill_work()
473 } else if (c->unit_size <= 256) { in init_refill_work()
474 c->low_watermark = 32; in init_refill_work()
475 c->high_watermark = 96; in init_refill_work()
482 c->low_watermark = max(32 * 256 / c->unit_size, 1); in init_refill_work()
483 c->high_watermark = max(96 * 256 / c->unit_size, 3); in init_refill_work()
485 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); in init_refill_work()
488 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) in prefill_mem_cache() argument
497 if (!c->percpu_size && c->unit_size <= 256) in prefill_mem_cache()
499 alloc_bulk(c, cnt, cpu_to_node(cpu), false); in prefill_mem_cache()
512 struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc; in bpf_mem_alloc_init() local
540 c = per_cpu_ptr(pc, cpu); in bpf_mem_alloc_init()
541 c->unit_size = unit_size; in bpf_mem_alloc_init()
542 c->objcg = objcg; in bpf_mem_alloc_init()
543 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
544 c->tgt = c; in bpf_mem_alloc_init()
545 init_refill_work(c); in bpf_mem_alloc_init()
546 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
562 c = &cc->cache[i]; in bpf_mem_alloc_init()
563 c->unit_size = sizes[i]; in bpf_mem_alloc_init()
564 c->objcg = objcg; in bpf_mem_alloc_init()
565 c->percpu_size = percpu_size; in bpf_mem_alloc_init()
566 c->tgt = c; in bpf_mem_alloc_init()
568 init_refill_work(c); in bpf_mem_alloc_init()
569 prefill_mem_cache(c, cpu); in bpf_mem_alloc_init()
596 struct bpf_mem_cache *c; in bpf_mem_alloc_percpu_unit_init() local
611 c = &cc->cache[i]; in bpf_mem_alloc_percpu_unit_init()
612 if (c->unit_size) in bpf_mem_alloc_percpu_unit_init()
615 c->unit_size = unit_size; in bpf_mem_alloc_percpu_unit_init()
616 c->objcg = objcg; in bpf_mem_alloc_percpu_unit_init()
617 c->percpu_size = percpu_size; in bpf_mem_alloc_percpu_unit_init()
618 c->tgt = c; in bpf_mem_alloc_percpu_unit_init()
620 init_refill_work(c); in bpf_mem_alloc_percpu_unit_init()
621 prefill_mem_cache(c, cpu); in bpf_mem_alloc_percpu_unit_init()
627 static void drain_mem_cache(struct bpf_mem_cache *c) in drain_mem_cache() argument
629 bool percpu = !!c->percpu_size; in drain_mem_cache()
638 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); in drain_mem_cache()
639 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); in drain_mem_cache()
640 free_all(__llist_del_all(&c->free_llist), percpu); in drain_mem_cache()
641 free_all(__llist_del_all(&c->free_llist_extra), percpu); in drain_mem_cache()
642 free_all(__llist_del_all(&c->free_by_rcu), percpu); in drain_mem_cache()
643 free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu); in drain_mem_cache()
644 free_all(llist_del_all(&c->waiting_for_gp), percpu); in drain_mem_cache()
647 static void check_mem_cache(struct bpf_mem_cache *c) in check_mem_cache() argument
649 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace)); in check_mem_cache()
650 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); in check_mem_cache()
651 WARN_ON_ONCE(!llist_empty(&c->free_llist)); in check_mem_cache()
652 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra)); in check_mem_cache()
653 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu)); in check_mem_cache()
654 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu)); in check_mem_cache()
655 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); in check_mem_cache()
661 struct bpf_mem_cache *c; in check_leaked_objs() local
666 c = per_cpu_ptr(ma->cache, cpu); in check_leaked_objs()
667 check_mem_cache(c); in check_leaked_objs()
674 c = &cc->cache[i]; in check_leaked_objs()
675 check_mem_cache(c); in check_leaked_objs()
745 struct bpf_mem_cache *c; in bpf_mem_alloc_destroy() local
751 c = per_cpu_ptr(ma->cache, cpu); in bpf_mem_alloc_destroy()
752 WRITE_ONCE(c->draining, true); in bpf_mem_alloc_destroy()
753 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
754 drain_mem_cache(c); in bpf_mem_alloc_destroy()
755 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); in bpf_mem_alloc_destroy()
756 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
766 c = &cc->cache[i]; in bpf_mem_alloc_destroy()
767 WRITE_ONCE(c->draining, true); in bpf_mem_alloc_destroy()
768 irq_work_sync(&c->refill_work); in bpf_mem_alloc_destroy()
769 drain_mem_cache(c); in bpf_mem_alloc_destroy()
770 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); in bpf_mem_alloc_destroy()
771 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); in bpf_mem_alloc_destroy()
782 static void notrace *unit_alloc(struct bpf_mem_cache *c) in unit_alloc() argument
799 if (local_inc_return(&c->active) == 1) { in unit_alloc()
800 llnode = __llist_del_first(&c->free_llist); in unit_alloc()
802 cnt = --c->free_cnt; in unit_alloc()
803 *(struct bpf_mem_cache **)llnode = c; in unit_alloc()
806 local_dec(&c->active); in unit_alloc()
810 if (cnt < c->low_watermark) in unit_alloc()
811 irq_work_raise(c); in unit_alloc()
825 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) in unit_free() argument
837 c->tgt = *(struct bpf_mem_cache **)llnode; in unit_free()
840 if (local_inc_return(&c->active) == 1) { in unit_free()
841 __llist_add(llnode, &c->free_llist); in unit_free()
842 cnt = ++c->free_cnt; in unit_free()
850 llist_add(llnode, &c->free_llist_extra); in unit_free()
852 local_dec(&c->active); in unit_free()
854 if (cnt > c->high_watermark) in unit_free()
856 irq_work_raise(c); in unit_free()
865 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr) in unit_free_rcu() argument
870 c->tgt = *(struct bpf_mem_cache **)llnode; in unit_free_rcu()
873 if (local_inc_return(&c->active) == 1) { in unit_free_rcu()
874 if (__llist_add(llnode, &c->free_by_rcu)) in unit_free_rcu()
875 c->free_by_rcu_tail = llnode; in unit_free_rcu()
877 llist_add(llnode, &c->free_llist_extra_rcu); in unit_free_rcu()
879 local_dec(&c->active); in unit_free_rcu()
881 if (!atomic_read(&c->call_rcu_in_progress)) in unit_free_rcu()
882 irq_work_raise(c); in unit_free_rcu()
909 struct bpf_mem_cache *c; in bpf_mem_free() local
915 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free()
916 idx = bpf_mem_cache_idx(c->unit_size); in bpf_mem_free()
925 struct bpf_mem_cache *c; in bpf_mem_free_rcu() local
931 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free_rcu()
932 idx = bpf_mem_cache_idx(c->unit_size); in bpf_mem_free_rcu()
987 struct bpf_mem_cache *c; in bpf_mem_cache_alloc_flags() local
990 c = this_cpu_ptr(ma->cache); in bpf_mem_cache_alloc_flags()
992 ret = unit_alloc(c); in bpf_mem_cache_alloc_flags()
996 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags()
998 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); in bpf_mem_cache_alloc_flags()
1000 *(struct bpf_mem_cache **)ret = c; in bpf_mem_cache_alloc_flags()