Lines Matching refs:b
350 void (*end_io)(struct dm_buffer *b, blk_status_t bs);
500 static void lh_next(struct lock_history *lh, sector_t b) in lh_next() argument
502 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ in lh_next()
585 struct dm_buffer *b; in __cache_get() local
588 b = container_of(n, struct dm_buffer, node); in __cache_get()
590 if (b->block == block) in __cache_get()
591 return b; in __cache_get()
593 n = block < b->block ? n->rb_left : n->rb_right; in __cache_get()
599 static void __cache_inc_buffer(struct dm_buffer *b) in __cache_inc_buffer() argument
601 atomic_inc(&b->hold_count); in __cache_inc_buffer()
602 WRITE_ONCE(b->last_accessed, jiffies); in __cache_inc_buffer()
607 struct dm_buffer *b; in cache_get() local
610 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
611 if (b) { in cache_get()
612 lru_reference(&b->lru); in cache_get()
613 __cache_inc_buffer(b); in cache_get()
617 return b; in cache_get()
626 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_put() argument
630 cache_read_lock(bc, b->block); in cache_put()
631 BUG_ON(!atomic_read(&b->hold_count)); in cache_put()
632 r = atomic_dec_and_test(&b->hold_count); in cache_put()
633 cache_read_unlock(bc, b->block); in cache_put()
660 struct dm_buffer *b = le_to_buffer(le); in __evict_pred() local
662 lh_next(w->lh, b->block); in __evict_pred()
664 if (atomic_read(&b->hold_count)) in __evict_pred()
667 return w->pred(b, w->context); in __evict_pred()
676 struct dm_buffer *b; in __cache_evict() local
682 b = le_to_buffer(le); in __cache_evict()
684 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
686 return b; in __cache_evict()
692 struct dm_buffer *b; in cache_evict() local
696 b = __cache_evict(bc, list_mode, pred, context, &lh); in cache_evict()
699 return b; in cache_evict()
707 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) in cache_mark() argument
709 cache_write_lock(bc, b->block); in cache_mark()
710 if (list_mode != b->list_mode) { in cache_mark()
711 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
712 b->list_mode = list_mode; in cache_mark()
713 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
715 cache_write_unlock(bc, b->block); in cache_mark()
728 struct dm_buffer *b; in __cache_mark_many() local
736 b = le_to_buffer(le); in __cache_mark_many()
737 b->list_mode = new_mode; in __cache_mark_many()
738 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
768 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
781 struct dm_buffer *b = le_to_buffer(le);
783 lh_next(lh, b->block);
785 switch (fn(b, context)) {
819 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) in __cache_insert() argument
827 if (found->block == b->block) in __cache_insert()
831 new = b->block < found->block ? in __cache_insert()
835 rb_link_node(&b->node, parent, new); in __cache_insert()
836 rb_insert_color(&b->node, root); in __cache_insert()
841 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_insert() argument
845 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) in cache_insert()
848 cache_write_lock(bc, b->block); in cache_insert()
849 BUG_ON(atomic_read(&b->hold_count) != 1); in cache_insert()
850 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
852 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
853 cache_write_unlock(bc, b->block); in cache_insert()
866 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_remove() argument
870 cache_write_lock(bc, b->block); in cache_remove()
872 if (atomic_read(&b->hold_count) != 1) { in cache_remove()
876 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
877 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
880 cache_write_unlock(bc, b->block); in cache_remove()
892 struct dm_buffer *b; in __find_next() local
896 b = container_of(n, struct dm_buffer, node); in __find_next()
898 if (b->block == block) in __find_next()
899 return b; in __find_next()
901 if (block <= b->block) { in __find_next()
903 best = b; in __find_next()
917 struct dm_buffer *b; in __remove_range() local
922 b = __find_next(root, begin); in __remove_range()
923 if (!b || (b->block >= end)) in __remove_range()
926 begin = b->block + 1; in __remove_range()
928 if (atomic_read(&b->hold_count)) in __remove_range()
931 if (pred(b, NULL) == ER_EVICT) { in __remove_range()
932 rb_erase(&b->node, root); in __remove_range()
933 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
934 release(b); in __remove_range()
1083 static void buffer_record_stack(struct dm_buffer *b) in buffer_record_stack() argument
1085 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); in buffer_record_stack()
1091 static void adjust_total_allocated(struct dm_buffer *b, bool unlink) in adjust_total_allocated() argument
1103 data_mode = b->data_mode; in adjust_total_allocated()
1104 diff = (long)b->c->block_size; in adjust_total_allocated()
1229 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer() local
1231 if (!b) in alloc_buffer()
1234 b->c = c; in alloc_buffer()
1236 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
1237 if (!b->data) { in alloc_buffer()
1238 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
1241 adjust_total_allocated(b, false); in alloc_buffer()
1244 b->stack_len = 0; in alloc_buffer()
1246 return b; in alloc_buffer()
1252 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
1254 struct dm_bufio_client *c = b->c; in free_buffer()
1256 adjust_total_allocated(b, true); in free_buffer()
1257 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
1258 kmem_cache_free(c->slab_buffer, b); in free_buffer()
1287 struct dm_buffer *b = context; in dmio_complete() local
1289 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete()
1292 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, in use_dmio() argument
1300 .notify.context = b, in use_dmio()
1301 .client = b->c->dm_io, in use_dmio()
1304 .bdev = b->c->bdev, in use_dmio()
1309 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
1311 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio()
1314 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio()
1319 b->end_io(b, errno_to_blk_status(r)); in use_dmio()
1324 struct dm_buffer *b = bio->bi_private; in bio_complete() local
1329 b->end_io(b, status); in bio_complete()
1332 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, in use_bio() argument
1342 use_dmio(b, op, sector, n_sectors, offset, ioprio); in use_bio()
1345 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); in use_bio()
1348 bio->bi_private = b; in use_bio()
1351 ptr = (char *)b->data + offset; in use_bio()
1372 static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio, in submit_io() argument
1379 b->end_io = end_io; in submit_io()
1381 sector = block_to_sector(b->c, b->block); in submit_io()
1384 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
1387 if (b->c->write_callback) in submit_io()
1388 b->c->write_callback(b); in submit_io()
1389 offset = b->write_start; in submit_io()
1390 end = b->write_end; in submit_io()
1394 if (unlikely(end > b->c->block_size)) in submit_io()
1395 end = b->c->block_size; in submit_io()
1401 if (b->data_mode != DATA_MODE_VMALLOC) in submit_io()
1402 use_bio(b, op, sector, n_sectors, offset, ioprio); in submit_io()
1404 use_dmio(b, op, sector, n_sectors, offset, ioprio); in submit_io()
1419 static void write_endio(struct dm_buffer *b, blk_status_t status) in write_endio() argument
1421 b->write_error = status; in write_endio()
1423 struct dm_bufio_client *c = b->c; in write_endio()
1429 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
1432 clear_bit(B_WRITING, &b->state); in write_endio()
1435 wake_up_bit(&b->state, B_WRITING); in write_endio()
1447 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
1450 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
1453 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
1454 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
1456 b->write_start = b->dirty_start; in __write_dirty_buffer()
1457 b->write_end = b->dirty_end; in __write_dirty_buffer()
1460 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio); in __write_dirty_buffer()
1462 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
1471 struct dm_buffer *b = in __flush_write_list() local
1473 list_del(&b->write_list); in __flush_write_list()
1474 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio); in __flush_write_list()
1485 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
1487 BUG_ON(atomic_read(&b->hold_count)); in __make_buffer_clean()
1490 if (!smp_load_acquire(&b->state)) /* fast case */ in __make_buffer_clean()
1493 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
1494 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
1495 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
1498 static enum evict_result is_clean(struct dm_buffer *b, void *context) in is_clean() argument
1503 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) in is_clean()
1505 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) in is_clean()
1507 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) in is_clean()
1511 unlikely(test_bit(B_READING, &b->state))) in is_clean()
1517 static enum evict_result is_dirty(struct dm_buffer *b, void *context) in is_dirty() argument
1520 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) in is_dirty()
1522 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) in is_dirty()
1534 struct dm_buffer *b; in __get_unclaimed_buffer() local
1536 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); in __get_unclaimed_buffer()
1537 if (b) { in __get_unclaimed_buffer()
1539 __make_buffer_clean(b); in __get_unclaimed_buffer()
1540 return b; in __get_unclaimed_buffer()
1546 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); in __get_unclaimed_buffer()
1547 if (b) { in __get_unclaimed_buffer()
1548 __make_buffer_clean(b); in __get_unclaimed_buffer()
1549 return b; in __get_unclaimed_buffer()
1597 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
1615 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
1616 if (b) in __alloc_buffer_wait_no_callback()
1617 return b; in __alloc_buffer_wait_no_callback()
1625 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
1627 if (b) in __alloc_buffer_wait_no_callback()
1628 return b; in __alloc_buffer_wait_no_callback()
1633 b = list_to_buffer(c->reserved_buffers.next); in __alloc_buffer_wait_no_callback()
1634 list_del(&b->lru.list); in __alloc_buffer_wait_no_callback()
1637 return b; in __alloc_buffer_wait_no_callback()
1640 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
1641 if (b) in __alloc_buffer_wait_no_callback()
1642 return b; in __alloc_buffer_wait_no_callback()
1650 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
1652 if (!b) in __alloc_buffer_wait()
1656 c->alloc_callback(b); in __alloc_buffer_wait()
1658 return b; in __alloc_buffer_wait()
1664 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
1666 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
1668 b->block = -1; in __free_buffer_wake()
1670 free_buffer(b); in __free_buffer_wake()
1672 list_add(&b->lru.list, &c->reserved_buffers); in __free_buffer_wake()
1684 static enum evict_result cleaned(struct dm_buffer *b, void *context) in cleaned() argument
1686 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) in cleaned()
1689 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) in cleaned()
1705 static enum it_action write_one(struct dm_buffer *b, void *context) in write_one() argument
1709 if (wc->no_wait && test_bit(B_WRITING, &b->state)) in write_one()
1712 __write_dirty_buffer(b, wc->write_list); in write_one()
1744 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) in cache_put_and_wake() argument
1750 if (cache_put(&c->cache, b) && in cache_put_and_wake()
1763 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
1779 b = cache_get(&c->cache, block); in __bufio_new()
1780 if (b) { in __bufio_new()
1787 b = new_b; in __bufio_new()
1788 atomic_set(&b->hold_count, 1); in __bufio_new()
1789 WRITE_ONCE(b->last_accessed, jiffies); in __bufio_new()
1790 b->block = block; in __bufio_new()
1791 b->read_error = 0; in __bufio_new()
1792 b->write_error = 0; in __bufio_new()
1793 b->list_mode = LIST_CLEAN; in __bufio_new()
1796 b->state = 0; in __bufio_new()
1798 b->state = 1 << B_READING; in __bufio_new()
1807 cache_insert(&c->cache, b); in __bufio_new()
1809 return b; in __bufio_new()
1813 cache_put_and_wake(c, b); in __bufio_new()
1824 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { in __bufio_new()
1825 cache_put_and_wake(c, b); in __bufio_new()
1829 return b; in __bufio_new()
1836 static void read_endio(struct dm_buffer *b, blk_status_t status) in read_endio() argument
1838 b->read_error = status; in read_endio()
1840 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1843 clear_bit(B_READING, &b->state); in read_endio()
1846 wake_up_bit(&b->state, B_READING); in read_endio()
1860 struct dm_buffer *b; in new_read() local
1870 b = cache_get(&c->cache, block); in new_read()
1871 if (b) { in new_read()
1873 cache_put_and_wake(c, b); in new_read()
1884 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { in new_read()
1885 cache_put_and_wake(c, b); in new_read()
1890 if (!b) { in new_read()
1895 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1900 if (b && (atomic_read(&b->hold_count) == 1)) in new_read()
1901 buffer_record_stack(b); in new_read()
1906 if (!b) in new_read()
1910 submit_io(b, REQ_OP_READ, ioprio, read_endio); in new_read()
1913 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1915 if (b->read_error) { in new_read()
1916 int error = blk_status_to_errno(b->read_error); in new_read()
1918 dm_bufio_release(b); in new_read()
1923 *bp = b; in new_read()
1925 return b->data; in new_read()
1983 struct dm_buffer *b; in __dm_bufio_prefetch() local
1985 b = cache_get(&c->cache, block); in __dm_bufio_prefetch()
1986 if (b) { in __dm_bufio_prefetch()
1988 cache_put_and_wake(c, b); in __dm_bufio_prefetch()
1993 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in __dm_bufio_prefetch()
2002 if (unlikely(b != NULL)) { in __dm_bufio_prefetch()
2006 submit_io(b, REQ_OP_READ, ioprio, read_endio); in __dm_bufio_prefetch()
2007 dm_bufio_release(b); in __dm_bufio_prefetch()
2035 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
2037 struct dm_bufio_client *c = b->c; in dm_bufio_release()
2044 if ((b->read_error || b->write_error) && in dm_bufio_release()
2045 !test_bit_acquire(B_READING, &b->state) && in dm_bufio_release()
2046 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
2047 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
2051 if (cache_remove(&c->cache, b)) { in dm_bufio_release()
2052 __free_buffer_wake(b); in dm_bufio_release()
2060 cache_put_and_wake(c, b); in dm_bufio_release()
2064 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, in dm_bufio_mark_partial_buffer_dirty() argument
2067 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty()
2070 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
2074 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_partial_buffer_dirty()
2076 if (!test_and_set_bit(B_DIRTY, &b->state)) { in dm_bufio_mark_partial_buffer_dirty()
2077 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
2078 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
2079 cache_mark(&c->cache, b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
2081 if (start < b->dirty_start) in dm_bufio_mark_partial_buffer_dirty()
2082 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
2083 if (end > b->dirty_end) in dm_bufio_mark_partial_buffer_dirty()
2084 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
2091 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
2093 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
2120 struct dm_buffer *b = le_to_buffer(e); in is_writing() local
2122 return test_bit(B_WRITING, &b->state); in is_writing()
2143 struct dm_buffer *b = le_to_buffer(e); in dm_bufio_write_dirty_buffers() local
2144 __cache_inc_buffer(b); in dm_bufio_write_dirty_buffers()
2146 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
2151 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in dm_bufio_write_dirty_buffers()
2154 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in dm_bufio_write_dirty_buffers()
2157 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
2158 cache_mark(&c->cache, b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
2160 cache_put_and_wake(c, b); in dm_bufio_write_dirty_buffers()
2228 struct dm_buffer *b; in forget_buffer() local
2230 b = cache_get(&c->cache, block); in forget_buffer()
2231 if (b) { in forget_buffer()
2232 if (likely(!smp_load_acquire(&b->state))) { in forget_buffer()
2233 if (cache_remove(&c->cache, b)) in forget_buffer()
2234 __free_buffer_wake(b); in forget_buffer()
2236 cache_put_and_wake(c, b); in forget_buffer()
2238 cache_put_and_wake(c, b); in forget_buffer()
2257 static enum evict_result idle(struct dm_buffer *b, void *context) in idle() argument
2259 return b->state ? ER_DONT_EVICT : ER_EVICT; in idle()
2304 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
2306 return b->block; in dm_bufio_get_block_number()
2310 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
2312 return b->data; in dm_bufio_get_block_data()
2316 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
2318 return b + 1; in dm_bufio_get_aux_data()
2322 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
2324 return b->c; in dm_bufio_get_client()
2328 static enum it_action warn_leak(struct dm_buffer *b, void *context) in warn_leak() argument
2335 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); in warn_leak()
2337 stack_trace_print(b->stack_entries, b->stack_len, 1); in warn_leak()
2339 atomic_set(&b->hold_count, 0); in warn_leak()
2347 struct dm_buffer *b; in drop_buffers() local
2359 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
2360 __free_buffer_wake(b); in drop_buffers()
2369 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
2370 __free_buffer_wake(b); in drop_buffers()
2394 struct dm_buffer *b; in __scan() local
2406 b = cache_evict(&c->cache, l, in __scan()
2408 if (!b) in __scan()
2411 __make_buffer_clean(b); in __scan()
2412 __free_buffer_wake(b); in __scan()
2552 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
2554 if (!b) { in dm_bufio_client_create()
2558 __free_buffer_wake(b); in dm_bufio_client_create()
2589 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_create() local
2591 list_del(&b->lru.list); in dm_bufio_client_create()
2592 free_buffer(b); in dm_bufio_client_create()
2631 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_destroy() local
2633 list_del(&b->lru.list); in dm_bufio_client_destroy()
2634 free_buffer(b); in dm_bufio_client_destroy()
2707 static enum evict_result select_for_evict(struct dm_buffer *b, void *context) in select_for_evict() argument
2710 if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) { in select_for_evict()
2711 if (test_bit_acquire(B_READING, &b->state) || in select_for_evict()
2712 test_bit(B_WRITING, &b->state) || in select_for_evict()
2713 test_bit(B_DIRTY, &b->state)) in select_for_evict()
2725 struct dm_buffer *b; in __evict_a_few() local
2734 b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL); in __evict_a_few()
2735 if (!b) in __evict_a_few()
2738 last_accessed = READ_ONCE(b->last_accessed); in __evict_a_few()
2742 __make_buffer_clean(b); in __evict_a_few()
2743 __free_buffer_wake(b); in __evict_a_few()