Lines Matching refs:c

349 	struct dm_bufio_client *c;  member
1014 static void dm_bufio_lock(struct dm_bufio_client *c) in dm_bufio_lock() argument
1016 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_lock()
1017 spin_lock_bh(&c->spinlock); in dm_bufio_lock()
1019 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
1022 static void dm_bufio_unlock(struct dm_bufio_client *c) in dm_bufio_unlock() argument
1024 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_unlock()
1025 spin_unlock_bh(&c->spinlock); in dm_bufio_unlock()
1027 mutex_unlock(&c->lock); in dm_bufio_unlock()
1104 diff = (long)b->c->block_size; in adjust_total_allocated()
1168 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument
1171 if (unlikely(c->slab_cache != NULL)) { in alloc_buffer_data()
1173 return kmem_cache_alloc(c->slab_cache, gfp_mask); in alloc_buffer_data()
1176 if (unlikely(c->block_size < PAGE_SIZE)) { in alloc_buffer_data()
1178 return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE); in alloc_buffer_data()
1181 if (c->block_size <= KMALLOC_MAX_SIZE && in alloc_buffer_data()
1185 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in alloc_buffer_data()
1190 return __vmalloc(c->block_size, gfp_mask); in alloc_buffer_data()
1196 static void free_buffer_data(struct dm_bufio_client *c, in free_buffer_data() argument
1201 kmem_cache_free(c->slab_cache, data); in free_buffer_data()
1210 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in free_buffer_data()
1227 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument
1229 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer()
1234 b->c = c; in alloc_buffer()
1236 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
1238 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
1254 struct dm_bufio_client *c = b->c; in free_buffer() local
1257 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
1258 kmem_cache_free(c->slab_buffer, b); in free_buffer()
1301 .client = b->c->dm_io, in use_dmio()
1304 .bdev = b->c->bdev, in use_dmio()
1345 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); in use_bio()
1359 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) in block_to_sector() argument
1363 if (likely(c->sectors_per_block_bits >= 0)) in block_to_sector()
1364 sector = block << c->sectors_per_block_bits; in block_to_sector()
1366 sector = block * (c->block_size >> SECTOR_SHIFT); in block_to_sector()
1367 sector += c->start; in block_to_sector()
1381 sector = block_to_sector(b->c, b->block); in submit_io()
1384 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
1387 if (b->c->write_callback) in submit_io()
1388 b->c->write_callback(b); in submit_io()
1394 if (unlikely(end > b->c->block_size)) in submit_io()
1395 end = b->c->block_size; in submit_io()
1423 struct dm_bufio_client *c = b->c; in write_endio() local
1425 (void)cmpxchg(&c->async_write_error, 0, in write_endio()
1500 struct dm_bufio_client *c = context; in is_clean() local
1510 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && in is_clean()
1532 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) in __get_unclaimed_buffer() argument
1536 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); in __get_unclaimed_buffer()
1543 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in __get_unclaimed_buffer()
1546 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); in __get_unclaimed_buffer()
1562 static void __wait_for_free_buffer(struct dm_bufio_client *c) in __wait_for_free_buffer() argument
1566 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1568 dm_bufio_unlock(c); in __wait_for_free_buffer()
1577 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1579 dm_bufio_lock(c); in __wait_for_free_buffer()
1595 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag n… in __alloc_buffer_wait_no_callback() argument
1615 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
1624 dm_bufio_unlock(c); in __alloc_buffer_wait_no_callback()
1625 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
1626 dm_bufio_lock(c); in __alloc_buffer_wait_no_callback()
1632 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
1633 b = list_to_buffer(c->reserved_buffers.next); in __alloc_buffer_wait_no_callback()
1635 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
1640 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
1644 __wait_for_free_buffer(c); in __alloc_buffer_wait_no_callback()
1648 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) in __alloc_buffer_wait() argument
1650 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait()
1655 if (c->alloc_callback) in __alloc_buffer_wait()
1656 c->alloc_callback(b); in __alloc_buffer_wait()
1666 struct dm_bufio_client *c = b->c; in __free_buffer_wake() local
1669 if (!c->need_reserved_buffers) in __free_buffer_wake()
1672 list_add(&b->lru.list, &c->reserved_buffers); in __free_buffer_wake()
1673 c->need_reserved_buffers--; in __free_buffer_wake()
1680 if (unlikely(waitqueue_active(&c->free_buffer_wait))) in __free_buffer_wake()
1681 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
1695 static void __move_clean_buffers(struct dm_bufio_client *c) in __move_clean_buffers() argument
1697 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); in __move_clean_buffers()
1716 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, in __write_dirty_buffers_async() argument
1721 __move_clean_buffers(c); in __write_dirty_buffers_async()
1722 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); in __write_dirty_buffers_async()
1730 static void __check_watermark(struct dm_bufio_client *c, in __check_watermark() argument
1733 if (cache_count(&c->cache, LIST_DIRTY) > in __check_watermark()
1734 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) in __check_watermark()
1735 __write_dirty_buffers_async(c, 1, write_list); in __check_watermark()
1744 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) in cache_put_and_wake() argument
1750 if (cache_put(&c->cache, b) && in cache_put_and_wake()
1751 unlikely(waitqueue_active(&c->free_buffer_wait))) in cache_put_and_wake()
1752 wake_up(&c->free_buffer_wait); in cache_put_and_wake()
1759 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
1771 new_b = __alloc_buffer_wait(c, nf); in __bufio_new()
1779 b = cache_get(&c->cache, block); in __bufio_new()
1785 __check_watermark(c, write_list); in __bufio_new()
1807 cache_insert(&c->cache, b); in __bufio_new()
1813 cache_put_and_wake(c, b); in __bufio_new()
1825 cache_put_and_wake(c, b); in __bufio_new()
1855 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1870 b = cache_get(&c->cache, block); in new_read()
1873 cache_put_and_wake(c, b); in new_read()
1885 cache_put_and_wake(c, b); in new_read()
1894 dm_bufio_lock(c); in new_read()
1895 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1896 dm_bufio_unlock(c); in new_read()
1928 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1931 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT); in dm_bufio_get()
1935 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block, in __dm_bufio_read() argument
1941 return new_read(c, block, NF_READ, bp, ioprio); in __dm_bufio_read()
1944 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1947 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT); in dm_bufio_read()
1951 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, in dm_bufio_read_with_ioprio() argument
1954 return __dm_bufio_read(c, block, bp, ioprio); in dm_bufio_read_with_ioprio()
1958 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1964 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT); in dm_bufio_new()
1968 static void __dm_bufio_prefetch(struct dm_bufio_client *c, in __dm_bufio_prefetch() argument
1985 b = cache_get(&c->cache, block); in __dm_bufio_prefetch()
1988 cache_put_and_wake(c, b); in __dm_bufio_prefetch()
1992 dm_bufio_lock(c); in __dm_bufio_prefetch()
1993 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in __dm_bufio_prefetch()
1996 dm_bufio_unlock(c); in __dm_bufio_prefetch()
2000 dm_bufio_lock(c); in __dm_bufio_prefetch()
2003 dm_bufio_unlock(c); in __dm_bufio_prefetch()
2013 dm_bufio_lock(c); in __dm_bufio_prefetch()
2015 dm_bufio_unlock(c); in __dm_bufio_prefetch()
2022 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) in dm_bufio_prefetch() argument
2024 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT); in dm_bufio_prefetch()
2028 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, in dm_bufio_prefetch_with_ioprio() argument
2031 return __dm_bufio_prefetch(c, block, n_blocks, ioprio); in dm_bufio_prefetch_with_ioprio()
2037 struct dm_bufio_client *c = b->c; in dm_bufio_release() local
2048 dm_bufio_lock(c); in dm_bufio_release()
2051 if (cache_remove(&c->cache, b)) { in dm_bufio_release()
2053 dm_bufio_unlock(c); in dm_bufio_release()
2057 dm_bufio_unlock(c); in dm_bufio_release()
2060 cache_put_and_wake(c, b); in dm_bufio_release()
2067 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty() local
2070 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
2072 dm_bufio_lock(c); in dm_bufio_mark_partial_buffer_dirty()
2079 cache_mark(&c->cache, b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
2087 dm_bufio_unlock(c); in dm_bufio_mark_partial_buffer_dirty()
2093 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
2097 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers_async() argument
2104 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers_async()
2105 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers_async()
2106 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers_async()
2125 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers() argument
2134 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
2135 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers()
2136 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
2138 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
2140 nr_buffers = cache_count(&c->cache, LIST_DIRTY); in dm_bufio_write_dirty_buffers()
2141 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); in dm_bufio_write_dirty_buffers()
2142 while ((e = lru_iter_next(&it, is_writing, c))) { in dm_bufio_write_dirty_buffers()
2150 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
2152 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
2158 cache_mark(&c->cache, b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
2160 cache_put_and_wake(c, b); in dm_bufio_write_dirty_buffers()
2166 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
2167 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
2169 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
2170 f = dm_bufio_issue_flush(c); in dm_bufio_write_dirty_buffers()
2181 int dm_bufio_issue_flush(struct dm_bufio_client *c) in dm_bufio_issue_flush() argument
2187 .client = c->dm_io, in dm_bufio_issue_flush()
2190 .bdev = c->bdev, in dm_bufio_issue_flush()
2205 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) in dm_bufio_issue_discard() argument
2211 .client = c->dm_io, in dm_bufio_issue_discard()
2214 .bdev = c->bdev, in dm_bufio_issue_discard()
2215 .sector = block_to_sector(c, block), in dm_bufio_issue_discard()
2216 .count = block_to_sector(c, count), in dm_bufio_issue_discard()
2226 static void forget_buffer(struct dm_bufio_client *c, sector_t block) in forget_buffer() argument
2230 b = cache_get(&c->cache, block); in forget_buffer()
2233 if (cache_remove(&c->cache, b)) in forget_buffer()
2236 cache_put_and_wake(c, b); in forget_buffer()
2238 cache_put_and_wake(c, b); in forget_buffer()
2249 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
2251 dm_bufio_lock(c); in dm_bufio_forget()
2252 forget_buffer(c, block); in dm_bufio_forget()
2253 dm_bufio_unlock(c); in dm_bufio_forget()
2262 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) in dm_bufio_forget_buffers() argument
2264 dm_bufio_lock(c); in dm_bufio_forget_buffers()
2265 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); in dm_bufio_forget_buffers()
2266 dm_bufio_unlock(c); in dm_bufio_forget_buffers()
2270 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n) in dm_bufio_set_minimum_buffers() argument
2272 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
2276 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c) in dm_bufio_get_block_size() argument
2278 return c->block_size; in dm_bufio_get_block_size()
2282 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) in dm_bufio_get_device_size() argument
2284 sector_t s = bdev_nr_sectors(c->bdev); in dm_bufio_get_device_size()
2286 if (s >= c->start) in dm_bufio_get_device_size()
2287 s -= c->start; in dm_bufio_get_device_size()
2290 if (likely(c->sectors_per_block_bits >= 0)) in dm_bufio_get_device_size()
2291 s >>= c->sectors_per_block_bits; in dm_bufio_get_device_size()
2293 sector_div(s, c->block_size >> SECTOR_SHIFT); in dm_bufio_get_device_size()
2298 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) in dm_bufio_get_dm_io_client() argument
2300 return c->dm_io; in dm_bufio_get_dm_io_client()
2324 return b->c; in dm_bufio_get_client()
2344 static void drop_buffers(struct dm_bufio_client *c) in drop_buffers() argument
2355 dm_bufio_write_dirty_buffers_async(c); in drop_buffers()
2357 dm_bufio_lock(c); in drop_buffers()
2359 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
2365 cache_iterate(&c->cache, i, warn_leak, &warned); in drop_buffers()
2369 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
2374 WARN_ON(cache_count(&c->cache, i)); in drop_buffers()
2376 dm_bufio_unlock(c); in drop_buffers()
2379 static unsigned long get_retain_buffers(struct dm_bufio_client *c) in get_retain_buffers() argument
2383 if (likely(c->sectors_per_block_bits >= 0)) in get_retain_buffers()
2384 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; in get_retain_buffers()
2386 retain_bytes /= c->block_size; in get_retain_buffers()
2391 static void __scan(struct dm_bufio_client *c) in __scan() argument
2396 unsigned long retain_target = get_retain_buffers(c); in __scan()
2397 unsigned long count = cache_total(&c->cache); in __scan()
2402 atomic_long_set(&c->need_shrink, 0); in __scan()
2403 if (!atomic_long_read(&c->need_shrink)) in __scan()
2406 b = cache_evict(&c->cache, l, in __scan()
2407 l == LIST_CLEAN ? is_clean : is_dirty, c); in __scan()
2414 atomic_long_dec(&c->need_shrink); in __scan()
2418 dm_bufio_unlock(c); in __scan()
2420 dm_bufio_lock(c); in __scan()
2428 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); in shrink_work() local
2430 dm_bufio_lock(c); in shrink_work()
2431 __scan(c); in shrink_work()
2432 dm_bufio_unlock(c); in shrink_work()
2437 struct dm_bufio_client *c; in dm_bufio_shrink_scan() local
2439 c = shrink->private_data; in dm_bufio_shrink_scan()
2440 atomic_long_add(sc->nr_to_scan, &c->need_shrink); in dm_bufio_shrink_scan()
2441 queue_work(dm_bufio_wq, &c->shrink_work); in dm_bufio_shrink_scan()
2448 struct dm_bufio_client *c = shrink->private_data; in dm_bufio_shrink_count() local
2449 unsigned long count = cache_total(&c->cache); in dm_bufio_shrink_count()
2450 unsigned long retain_target = get_retain_buffers(c); in dm_bufio_shrink_count()
2451 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); in dm_bufio_shrink_count()
2477 struct dm_bufio_client *c; in dm_bufio_client_create() local
2488 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); in dm_bufio_client_create()
2489 if (!c) { in dm_bufio_client_create()
2493 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); in dm_bufio_client_create()
2495 c->bdev = bdev; in dm_bufio_client_create()
2496 c->block_size = block_size; in dm_bufio_client_create()
2498 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
2500 c->sectors_per_block_bits = -1; in dm_bufio_client_create()
2502 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
2503 c->write_callback = write_callback; in dm_bufio_client_create()
2506 c->no_sleep = true; in dm_bufio_client_create()
2510 mutex_init(&c->lock); in dm_bufio_client_create()
2511 spin_lock_init(&c->spinlock); in dm_bufio_client_create()
2512 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
2513 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
2515 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); in dm_bufio_client_create()
2517 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
2518 c->async_write_error = 0; in dm_bufio_client_create()
2520 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
2521 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
2522 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
2531 c->slab_cache = kmem_cache_create(slab_name, block_size, align, in dm_bufio_client_create()
2533 if (!c->slab_cache) { in dm_bufio_client_create()
2544 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, in dm_bufio_client_create()
2546 if (!c->slab_buffer) { in dm_bufio_client_create()
2551 while (c->need_reserved_buffers) { in dm_bufio_client_create()
2552 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create()
2561 INIT_WORK(&c->shrink_work, shrink_work); in dm_bufio_client_create()
2562 atomic_long_set(&c->need_shrink, 0); in dm_bufio_client_create()
2564 c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)", in dm_bufio_client_create()
2566 if (!c->shrinker) { in dm_bufio_client_create()
2571 c->shrinker->count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
2572 c->shrinker->scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
2573 c->shrinker->seeks = 1; in dm_bufio_client_create()
2574 c->shrinker->batch = 0; in dm_bufio_client_create()
2575 c->shrinker->private_data = c; in dm_bufio_client_create()
2577 shrinker_register(c->shrinker); in dm_bufio_client_create()
2581 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
2585 return c; in dm_bufio_client_create()
2588 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
2589 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_create()
2594 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_create()
2595 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_create()
2596 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
2598 mutex_destroy(&c->lock); in dm_bufio_client_create()
2599 if (c->no_sleep) in dm_bufio_client_create()
2601 kfree(c); in dm_bufio_client_create()
2611 void dm_bufio_client_destroy(struct dm_bufio_client *c) in dm_bufio_client_destroy() argument
2615 drop_buffers(c); in dm_bufio_client_destroy()
2617 shrinker_free(c->shrinker); in dm_bufio_client_destroy()
2618 flush_work(&c->shrink_work); in dm_bufio_client_destroy()
2622 list_del(&c->client_list); in dm_bufio_client_destroy()
2628 WARN_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
2630 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
2631 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_destroy()
2638 if (cache_count(&c->cache, i)) in dm_bufio_client_destroy()
2639 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2642 WARN_ON(cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2644 cache_destroy(&c->cache); in dm_bufio_client_destroy()
2645 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_destroy()
2646 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_destroy()
2647 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
2648 mutex_destroy(&c->lock); in dm_bufio_client_destroy()
2649 if (c->no_sleep) in dm_bufio_client_destroy()
2651 kfree(c); in dm_bufio_client_destroy()
2655 void dm_bufio_client_reset(struct dm_bufio_client *c) in dm_bufio_client_reset() argument
2657 drop_buffers(c); in dm_bufio_client_reset()
2658 flush_work(&c->shrink_work); in dm_bufio_client_reset()
2662 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) in dm_bufio_set_sector_offset() argument
2664 c->start = start; in dm_bufio_set_sector_offset()
2694 struct dm_bufio_client *c; in __insert_client() local
2698 c = container_of(h, struct dm_bufio_client, client_list); in __insert_client()
2699 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) in __insert_client()
2710 if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) { in select_for_evict()
2721 struct dm_bufio_client *c; in __evict_a_few() local
2727 c = __pop_client(); in __evict_a_few()
2728 if (!c) in __evict_a_few()
2731 dm_bufio_lock(c); in __evict_a_few()
2734 b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL); in __evict_a_few()
2746 dm_bufio_unlock(c); in __evict_a_few()
2748 dm_bufio_lock(c); in __evict_a_few()
2752 dm_bufio_unlock(c); in __evict_a_few()
2755 c->oldest_buffer = oldest_buffer; in __evict_a_few()
2756 __insert_client(c); in __evict_a_few()
2764 struct dm_bufio_client *c; in check_watermarks() local
2767 list_for_each_entry(c, &dm_bufio_all_clients, client_list) { in check_watermarks()
2768 dm_bufio_lock(c); in check_watermarks()
2769 __check_watermark(c, &write_list); in check_watermarks()
2770 dm_bufio_unlock(c); in check_watermarks()