Lines Matching refs:b

76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)  in bch_inc_gen()  argument
78 uint8_t ret = ++b->gen; in bch_inc_gen()
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen()
89 struct bucket *b; in bch_rescale_priorities() local
107 for_each_bucket(b, ca) in bch_rescale_priorities()
108 if (b->prio && in bch_rescale_priorities()
109 b->prio != BTREE_PRIO && in bch_rescale_priorities()
110 !atomic_read(&b->pin)) { in bch_rescale_priorities()
111 b->prio--; in bch_rescale_priorities()
112 c->min_prio = min(c->min_prio, b->prio); in bch_rescale_priorities()
125 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen() argument
127 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX; in can_inc_bucket_gen()
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() argument
132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) && in bch_can_invalidate_bucket()
133 ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) && in bch_can_invalidate_bucket()
134 !atomic_read(&b->pin) && can_inc_bucket_gen(b)); in bch_can_invalidate_bucket()
137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() argument
140 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE); in __bch_invalidate_one_bucket()
142 if (GC_SECTORS_USED(b)) in __bch_invalidate_one_bucket()
143 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
145 bch_inc_gen(ca, b); in __bch_invalidate_one_bucket()
146 b->prio = INITIAL_PRIO; in __bch_invalidate_one_bucket()
147 atomic_inc(&b->pin); in __bch_invalidate_one_bucket()
148 b->reclaimable_in_gc = 0; in __bch_invalidate_one_bucket()
151 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() argument
153 __bch_invalidate_one_bucket(ca, b); in bch_invalidate_one_bucket()
155 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
167 #define bucket_prio(b) \ argument
171 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
179 struct bucket *b; in invalidate_buckets_lru() local
184 for_each_bucket(b, ca) { in invalidate_buckets_lru()
185 if (!bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_lru()
189 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
190 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
191 ca->heap.data[0] = b; in invalidate_buckets_lru()
200 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
210 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_lru()
216 struct bucket *b; in invalidate_buckets_fifo() local
224 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
226 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_fifo()
227 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_fifo()
239 struct bucket *b; in invalidate_buckets_random() local
250 b = ca->buckets + n; in invalidate_buckets_random()
252 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_random()
253 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_random()
392 struct bucket *b; in bch_bucket_alloc() local
442 b = ca->buckets + r; in bch_bucket_alloc()
444 BUG_ON(atomic_read(&b->pin) != 1); in bch_bucket_alloc()
446 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc()
449 SET_GC_MARK(b, GC_MARK_METADATA); in bch_bucket_alloc()
450 SET_GC_MOVE(b, 0); in bch_bucket_alloc()
451 b->prio = BTREE_PRIO; in bch_bucket_alloc()
453 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); in bch_bucket_alloc()
454 SET_GC_MOVE(b, 0); in bch_bucket_alloc()
455 b->prio = INITIAL_PRIO; in bch_bucket_alloc()
466 void __bch_bucket_free(struct cache *ca, struct bucket *b) in __bch_bucket_free() argument
468 SET_GC_MARK(b, 0); in __bch_bucket_free()
469 SET_GC_SECTORS_USED(b, 0); in __bch_bucket_free()
489 long b; in __bch_bucket_alloc_set() local
500 b = bch_bucket_alloc(ca, reserve, wait); in __bch_bucket_alloc_set()
501 if (b < 0) in __bch_bucket_alloc_set()
504 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
505 bucket_to_sector(c, b), in __bch_bucket_alloc_set()
606 struct open_bucket *b; in bch_alloc_sectors() local
620 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { in bch_alloc_sectors()
641 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_alloc_sectors()
642 EBUG_ON(ptr_stale(c, &b->key, i)); in bch_alloc_sectors()
646 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_alloc_sectors()
647 k->ptr[i] = b->key.ptr[i]; in bch_alloc_sectors()
649 sectors = min(sectors, b->sectors_free); in bch_alloc_sectors()
653 SET_KEY_PTRS(k, KEY_PTRS(&b->key)); in bch_alloc_sectors()
659 list_move_tail(&b->list, &c->data_buckets); in bch_alloc_sectors()
660 bkey_copy_key(&b->key, k); in bch_alloc_sectors()
661 b->last_write_point = write_point; in bch_alloc_sectors()
663 b->sectors_free -= sectors; in bch_alloc_sectors()
665 for (i = 0; i < KEY_PTRS(&b->key); i++) { in bch_alloc_sectors()
666 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); in bch_alloc_sectors()
672 if (b->sectors_free < c->cache->sb.block_size) in bch_alloc_sectors()
673 b->sectors_free = 0; in bch_alloc_sectors()
680 if (b->sectors_free) in bch_alloc_sectors()
681 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_alloc_sectors()
682 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); in bch_alloc_sectors()
692 struct open_bucket *b; in bch_open_buckets_free() local
695 b = list_first_entry(&c->data_buckets, in bch_open_buckets_free()
697 list_del(&b->list); in bch_open_buckets_free()
698 kfree(b); in bch_open_buckets_free()
709 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); in bch_open_buckets_alloc() local
711 if (!b) in bch_open_buckets_alloc()
714 list_add(&b->list, &c->data_buckets); in bch_open_buckets_alloc()