Lines Matching refs:lru

139 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,  in __bpf_lru_list_rotate_active()  argument
153 if (++i == lru->nr_scans || node == first_node) in __bpf_lru_list_rotate_active()
166 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru, in __bpf_lru_list_rotate_inactive() argument
182 while (i < lru->nr_scans) { in __bpf_lru_list_rotate_inactive()
206 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru, in __bpf_lru_list_shrink_inactive() argument
220 } else if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink_inactive()
227 if (++i == lru->nr_scans) in __bpf_lru_list_shrink_inactive()
237 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) in __bpf_lru_list_rotate() argument
240 __bpf_lru_list_rotate_active(lru, l); in __bpf_lru_list_rotate()
242 __bpf_lru_list_rotate_inactive(lru, l); in __bpf_lru_list_rotate()
255 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru, in __bpf_lru_list_shrink() argument
266 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, in __bpf_lru_list_shrink()
279 if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink()
318 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, in bpf_lru_list_pop_free_to_local() argument
321 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local()
329 __bpf_lru_list_rotate(lru, l); in bpf_lru_list_pop_free_to_local()
340 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, in bpf_lru_list_pop_free_to_local()
347 static void __local_list_add_pending(struct bpf_lru *lru, in __local_list_add_pending() argument
353 *(u32 *)((void *)node + lru->hash_offset) = hash; in __local_list_add_pending()
375 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l) in __local_list_pop_pending() argument
385 lru->del_from_htab(lru->del_arg, node)) { in __local_list_pop_pending()
399 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru, in bpf_percpu_lru_pop_free() argument
408 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
412 __bpf_lru_list_rotate(lru, l); in bpf_percpu_lru_pop_free()
416 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, in bpf_percpu_lru_pop_free()
421 *(u32 *)((void *)node + lru->hash_offset) = hash; in bpf_percpu_lru_pop_free()
431 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru, in bpf_common_lru_pop_free() argument
435 struct bpf_common_lru *clru = &lru->common_lru; in bpf_common_lru_pop_free()
447 bpf_lru_list_pop_free_to_local(lru, loc_l); in bpf_common_lru_pop_free()
452 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
476 node = __local_list_pop_pending(lru, steal_loc_l); in bpf_common_lru_pop_free()
487 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
494 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) in bpf_lru_pop_free() argument
496 if (lru->percpu) in bpf_lru_pop_free()
497 return bpf_percpu_lru_pop_free(lru, hash); in bpf_lru_pop_free()
499 return bpf_common_lru_pop_free(lru, hash); in bpf_lru_pop_free()
502 static void bpf_common_lru_push_free(struct bpf_lru *lru, in bpf_common_lru_push_free() argument
515 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free()
533 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free()
536 static void bpf_percpu_lru_push_free(struct bpf_lru *lru, in bpf_percpu_lru_push_free() argument
542 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
551 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) in bpf_lru_push_free() argument
553 if (lru->percpu) in bpf_lru_push_free()
554 bpf_percpu_lru_push_free(lru, node); in bpf_lru_push_free()
556 bpf_common_lru_push_free(lru, node); in bpf_lru_push_free()
559 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, in bpf_common_lru_populate() argument
563 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate()
577 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, in bpf_percpu_lru_populate() argument
592 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
608 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, in bpf_lru_populate() argument
611 if (lru->percpu) in bpf_lru_populate()
612 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
615 bpf_common_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
646 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, in bpf_lru_init() argument
652 lru->percpu_lru = alloc_percpu(struct bpf_lru_list); in bpf_lru_init()
653 if (!lru->percpu_lru) in bpf_lru_init()
659 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
662 lru->nr_scans = PERCPU_NR_SCANS; in bpf_lru_init()
664 struct bpf_common_lru *clru = &lru->common_lru; in bpf_lru_init()
678 lru->nr_scans = LOCAL_NR_SCANS; in bpf_lru_init()
681 lru->percpu = percpu; in bpf_lru_init()
682 lru->del_from_htab = del_from_htab; in bpf_lru_init()
683 lru->del_arg = del_arg; in bpf_lru_init()
684 lru->hash_offset = hash_offset; in bpf_lru_init()
689 void bpf_lru_destroy(struct bpf_lru *lru) in bpf_lru_destroy() argument
691 if (lru->percpu) in bpf_lru_destroy()
692 free_percpu(lru->percpu_lru); in bpf_lru_destroy()
694 free_percpu(lru->common_lru.local_list); in bpf_lru_destroy()