Lines Matching refs:bc
49 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in btree_cache_can_free() local
53 can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve); in btree_cache_can_free()
57 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist() argument
62 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
64 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
67 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist() argument
72 bc->nr_freeable++; in __bch2_btree_node_to_freelist()
73 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
78 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_to_freelist() local
80 mutex_lock(&bc->lock); in bch2_btree_node_to_freelist()
81 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_to_freelist()
82 mutex_unlock(&bc->lock); in bch2_btree_node_to_freelist()
117 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free() argument
124 --bc->nr_freeable; in btree_node_data_free()
125 btree_node_to_freedlist(bc, b); in btree_node_data_free()
202 static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) in __btree_node_pinned() argument
206 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
209 bbpos_cmp(bc->pinned_nodes_start, pos) < 0 && in __btree_node_pinned()
210 bbpos_cmp(bc->pinned_nodes_end, pos) >= 0); in __btree_node_pinned()
215 struct btree_cache *bc = &c->btree_cache; in bch2_node_pin() local
217 mutex_lock(&bc->lock); in bch2_node_pin()
220 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
221 bc->live[0].nr--; in bch2_node_pin()
222 bc->live[1].nr++; in bch2_node_pin()
224 mutex_unlock(&bc->lock); in bch2_node_pin()
229 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_unpin() local
232 mutex_lock(&bc->lock); in bch2_btree_cache_unpin()
236 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
238 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
239 bc->live[0].nr++; in bch2_btree_cache_unpin()
240 bc->live[1].nr--; in bch2_btree_cache_unpin()
243 mutex_unlock(&bc->lock); in bch2_btree_cache_unpin()
248 void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_remove() argument
250 lockdep_assert_held(&bc->lock); in __bch2_btree_node_hash_remove()
252 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
259 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
260 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
264 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in bch2_btree_node_hash_remove() argument
266 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_hash_remove()
267 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_hash_remove()
270 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_insert() argument
276 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
282 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
284 bool p = __btree_node_pinned(bc, b); in __bch2_btree_node_hash_insert()
287 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
288 bc->live[p].nr++; in __bch2_btree_node_hash_insert()
292 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, in bch2_btree_node_hash_insert() argument
298 mutex_lock(&bc->lock); in bch2_btree_node_hash_insert()
299 int ret = __bch2_btree_node_hash_insert(bc, b); in bch2_btree_node_hash_insert()
300 mutex_unlock(&bc->lock); in bch2_btree_node_hash_insert()
335 static inline struct btree *btree_cache_find(struct btree_cache *bc, in btree_cache_find() argument
340 return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params); in btree_cache_find()
346 struct btree_cache *bc = &c->btree_cache; in __btree_node_reclaim_checks() local
348 lockdep_assert_held(&bc->lock); in __btree_node_reclaim_checks()
351 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_noevict]++; in __btree_node_reclaim_checks()
355 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_blocked]++; in __btree_node_reclaim_checks()
359 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_will_make_reachable]++; in __btree_node_reclaim_checks()
365 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_dirty]++; in __btree_node_reclaim_checks()
389 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_read_in_flight]++; in __btree_node_reclaim_checks()
391 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_in_flight]++; in __btree_node_reclaim_checks()
412 struct btree_cache *bc = &c->btree_cache; in __btree_node_reclaim() local
415 lockdep_assert_held(&bc->lock); in __btree_node_reclaim()
422 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_intent]++; in __btree_node_reclaim()
427 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_write]++; in __btree_node_reclaim()
461 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in bch2_btree_cache_scan() local
462 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache); in bch2_btree_cache_scan()
470 bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4; in bch2_btree_cache_scan()
475 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
487 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_cache_reserve] += nr - can_free; in bch2_btree_cache_scan()
492 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
506 btree_node_data_free(bc, b); in bch2_btree_cache_scan()
510 bc->nr_freed++; in bch2_btree_cache_scan()
519 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; in bch2_btree_cache_scan()
522 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_cache_scan()
524 btree_node_to_freedlist(bc, b); in bch2_btree_cache_scan()
527 bc->nr_freed++; in bch2_btree_cache_scan()
540 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
545 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
556 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
577 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_exit() local
581 shrinker_free(bc->live[1].shrink); in bch2_fs_btree_cache_exit()
582 shrinker_free(bc->live[0].shrink); in bch2_fs_btree_cache_exit()
586 mutex_lock(&bc->lock); in bch2_fs_btree_cache_exit()
589 list_move(&c->verify_data->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
597 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
600 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
601 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
602 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
603 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
605 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
609 btree_node_data_free(bc, b); in bch2_fs_btree_cache_exit()
616 list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); in bch2_fs_btree_cache_exit()
618 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
624 mutex_unlock(&bc->lock); in bch2_fs_btree_cache_exit()
627 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_fs_btree_cache_exit()
628 BUG_ON(bc->nr_by_btree[i]); in bch2_fs_btree_cache_exit()
629 BUG_ON(bc->live[0].nr); in bch2_fs_btree_cache_exit()
630 BUG_ON(bc->live[1].nr); in bch2_fs_btree_cache_exit()
631 BUG_ON(bc->nr_freeable); in bch2_fs_btree_cache_exit()
633 if (bc->table_init_done) in bch2_fs_btree_cache_exit()
634 rhashtable_destroy(&bc->table); in bch2_fs_btree_cache_exit()
639 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_init() local
644 ret = rhashtable_init(&bc->table, &bch_btree_cache_params); in bch2_fs_btree_cache_init()
648 bc->table_init_done = true; in bch2_fs_btree_cache_init()
652 for (i = 0; i < bc->nr_reserve; i++) { in bch2_fs_btree_cache_init()
656 __bch2_btree_node_to_freelist(bc, b); in bch2_fs_btree_cache_init()
659 list_splice_init(&bc->live[0].list, &bc->freeable); in bch2_fs_btree_cache_init()
666 bc->live[0].shrink = shrink; in bch2_fs_btree_cache_init()
670 shrink->private_data = &bc->live[0]; in bch2_fs_btree_cache_init()
676 bc->live[1].shrink = shrink; in bch2_fs_btree_cache_init()
680 shrink->private_data = &bc->live[1]; in bch2_fs_btree_cache_init()
688 void bch2_fs_btree_cache_init_early(struct btree_cache *bc) in bch2_fs_btree_cache_init_early() argument
690 mutex_init(&bc->lock); in bch2_fs_btree_cache_init_early()
691 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) { in bch2_fs_btree_cache_init_early()
692 bc->live[i].idx = i; in bch2_fs_btree_cache_init_early()
693 INIT_LIST_HEAD(&bc->live[i].list); in bch2_fs_btree_cache_init_early()
695 INIT_LIST_HEAD(&bc->freeable); in bch2_fs_btree_cache_init_early()
696 INIT_LIST_HEAD(&bc->freed_pcpu); in bch2_fs_btree_cache_init_early()
697 INIT_LIST_HEAD(&bc->freed_nonpcpu); in bch2_fs_btree_cache_init_early()
709 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_unlock() local
711 if (bc->alloc_lock == current) { in bch2_btree_cache_cannibalize_unlock()
713 bc->alloc_lock = NULL; in bch2_btree_cache_cannibalize_unlock()
714 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_unlock()
721 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_lock() local
725 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) in bch2_btree_cache_cannibalize_lock()
733 closure_wait(&bc->alloc_wait, cl); in bch2_btree_cache_cannibalize_lock()
737 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) { in bch2_btree_cache_cannibalize_lock()
739 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_lock()
753 struct btree_cache *bc = &c->btree_cache; in btree_node_cannibalize() local
756 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
757 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
762 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
763 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
779 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_mem_alloc() local
781 ? &bc->freed_pcpu in bch2_btree_node_mem_alloc()
782 : &bc->freed_nonpcpu; in bch2_btree_node_mem_alloc()
786 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
802 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
808 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
819 list_for_each_entry(b2, &bc->freeable, list) in bch2_btree_node_mem_alloc()
825 --bc->nr_freeable; in bch2_btree_node_mem_alloc()
826 btree_node_to_freedlist(bc, b2); in bch2_btree_node_mem_alloc()
827 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
834 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
867 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
870 if (bc->alloc_lock == current) { in bch2_btree_node_mem_alloc()
873 __bch2_btree_node_hash_remove(bc, b2); in bch2_btree_node_mem_alloc()
878 btree_node_to_freedlist(bc, b2); in bch2_btree_node_mem_alloc()
886 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
892 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
906 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_fill() local
957 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { in bch2_btree_node_fill()
963 mutex_lock(&bc->lock); in bch2_btree_node_fill()
964 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_fill()
965 mutex_unlock(&bc->lock); in bch2_btree_node_fill()
1045 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_get() local
1052 b = btree_cache_find(bc, k); in __bch2_btree_node_get()
1244 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_get_noiter() local
1256 b = btree_cache_find(bc, k); in bch2_btree_node_get_noiter()
1327 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_prefetch() local
1332 struct btree *b = btree_cache_find(bc, k); in bch2_btree_node_prefetch()
1349 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_evict() local
1352 b = btree_cache_find(bc, k); in bch2_btree_node_evict()
1380 mutex_lock(&bc->lock); in bch2_btree_node_evict()
1381 bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_evict()
1382 btree_node_data_free(bc, b); in bch2_btree_node_evict()
1383 mutex_unlock(&bc->lock); in bch2_btree_node_evict()
1486 void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc) in bch2_btree_cache_to_text() argument
1488 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache); in bch2_btree_cache_to_text()
1493 prt_btree_cache_line(out, c, "live:", bc->live[0].nr); in bch2_btree_cache_to_text()
1494 prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr); in bch2_btree_cache_to_text()
1495 prt_btree_cache_line(out, c, "reserve:", bc->nr_reserve); in bch2_btree_cache_to_text()
1496 prt_btree_cache_line(out, c, "freed:", bc->nr_freeable); in bch2_btree_cache_to_text()
1497 prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty)); in bch2_btree_cache_to_text()
1498 prt_printf(out, "cannibalize lock:\t%s\n", bc->alloc_lock ? "held" : "not held"); in bch2_btree_cache_to_text()
1501 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) { in bch2_btree_cache_to_text()
1504 prt_human_readable_u64(out, bc->nr_by_btree[i] * c->opts.btree_node_size); in bch2_btree_cache_to_text()
1505 prt_printf(out, " (%zu)\n", bc->nr_by_btree[i]); in bch2_btree_cache_to_text()
1510 prt_printf(out, "freed:\t%zu\n", bc->nr_freed); in bch2_btree_cache_to_text()
1513 for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) in bch2_btree_cache_to_text()
1515 bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]); in bch2_btree_cache_to_text()