1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2009-2011 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * This file is released under the GPL.
8 */
9
10 #include <linux/dm-bufio.h>
11
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/slab.h>
15 #include <linux/sched/mm.h>
16 #include <linux/jiffies.h>
17 #include <linux/vmalloc.h>
18 #include <linux/shrinker.h>
19 #include <linux/module.h>
20 #include <linux/rbtree.h>
21 #include <linux/stacktrace.h>
22 #include <linux/jump_label.h>
23
24 #define DM_MSG_PREFIX "bufio"
25
26 /*
27 * Memory management policy:
28 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
29 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
30 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
31 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
32 * dirty buffers.
33 */
34 #define DM_BUFIO_MIN_BUFFERS 8
35
36 #define DM_BUFIO_MEMORY_PERCENT 2
37 #define DM_BUFIO_VMALLOC_PERCENT 25
38 #define DM_BUFIO_WRITEBACK_RATIO 3
39 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
40
41 /*
42 * Check buffer ages in this interval (seconds)
43 */
44 #define DM_BUFIO_WORK_TIMER_SECS 30
45
46 /*
47 * Free buffers when they are older than this (seconds)
48 */
49 #define DM_BUFIO_DEFAULT_AGE_SECS 300
50
51 /*
52 * The nr of bytes of cached data to keep around.
53 */
54 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
55
56 /*
57 * Align buffer writes to this boundary.
58 * Tests show that SSDs have the highest IOPS when using 4k writes.
59 */
60 #define DM_BUFIO_WRITE_ALIGN 4096
61
62 /*
63 * dm_buffer->list_mode
64 */
65 #define LIST_CLEAN 0
66 #define LIST_DIRTY 1
67 #define LIST_SIZE 2
68
69 /*
70 * Linking of buffers:
71 * All buffers are linked to buffer_tree with their node field.
72 *
73 * Clean buffers that are not being written (B_WRITING not set)
74 * are linked to lru[LIST_CLEAN] with their lru_list field.
75 *
76 * Dirty and clean buffers that are being written are linked to
77 * lru[LIST_DIRTY] with their lru_list field. When the write
78 * finishes, the buffer cannot be relinked immediately (because we
79 * are in an interrupt context and relinking requires process
80 * context), so some clean-not-writing buffers can be held on
81 * dirty_lru too. They are later added to lru in the process
82 * context.
83 */
84 struct dm_bufio_client {
85 struct mutex lock;
86 spinlock_t spinlock;
87 bool no_sleep;
88
89 struct list_head lru[LIST_SIZE];
90 unsigned long n_buffers[LIST_SIZE];
91
92 struct block_device *bdev;
93 unsigned int block_size;
94 s8 sectors_per_block_bits;
95 void (*alloc_callback)(struct dm_buffer *buf);
96 void (*write_callback)(struct dm_buffer *buf);
97 struct kmem_cache *slab_buffer;
98 struct kmem_cache *slab_cache;
99 struct dm_io_client *dm_io;
100
101 struct list_head reserved_buffers;
102 unsigned int need_reserved_buffers;
103
104 unsigned int minimum_buffers;
105
106 struct rb_root buffer_tree;
107 wait_queue_head_t free_buffer_wait;
108
109 sector_t start;
110
111 int async_write_error;
112
113 struct list_head client_list;
114
115 struct shrinker shrinker;
116 struct work_struct shrink_work;
117 atomic_long_t need_shrink;
118 };
119
120 /*
121 * Buffer state bits.
122 */
123 #define B_READING 0
124 #define B_WRITING 1
125 #define B_DIRTY 2
126
127 /*
128 * Describes how the block was allocated:
129 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
130 * See the comment at alloc_buffer_data.
131 */
132 enum data_mode {
133 DATA_MODE_SLAB = 0,
134 DATA_MODE_GET_FREE_PAGES = 1,
135 DATA_MODE_VMALLOC = 2,
136 DATA_MODE_LIMIT = 3
137 };
138
139 struct dm_buffer {
140 struct rb_node node;
141 struct list_head lru_list;
142 struct list_head global_list;
143 sector_t block;
144 void *data;
145 unsigned char data_mode; /* DATA_MODE_* */
146 unsigned char list_mode; /* LIST_* */
147 blk_status_t read_error;
148 blk_status_t write_error;
149 unsigned int accessed;
150 unsigned int hold_count;
151 unsigned long state;
152 unsigned long last_accessed;
153 unsigned int dirty_start;
154 unsigned int dirty_end;
155 unsigned int write_start;
156 unsigned int write_end;
157 struct dm_bufio_client *c;
158 struct list_head write_list;
159 void (*end_io)(struct dm_buffer *buf, blk_status_t stat);
160 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
161 #define MAX_STACK 10
162 unsigned int stack_len;
163 unsigned long stack_entries[MAX_STACK];
164 #endif
165 };
166
167 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
168
169 /*----------------------------------------------------------------*/
170
171 #define dm_bufio_in_request() (!!current->bio_list)
172
dm_bufio_lock(struct dm_bufio_client * c)173 static void dm_bufio_lock(struct dm_bufio_client *c)
174 {
175 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
176 spin_lock_bh(&c->spinlock);
177 else
178 mutex_lock_nested(&c->lock, dm_bufio_in_request());
179 }
180
dm_bufio_trylock(struct dm_bufio_client * c)181 static int dm_bufio_trylock(struct dm_bufio_client *c)
182 {
183 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
184 return spin_trylock_bh(&c->spinlock);
185 else
186 return mutex_trylock(&c->lock);
187 }
188
dm_bufio_unlock(struct dm_bufio_client * c)189 static void dm_bufio_unlock(struct dm_bufio_client *c)
190 {
191 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
192 spin_unlock_bh(&c->spinlock);
193 else
194 mutex_unlock(&c->lock);
195 }
196
197 /*----------------------------------------------------------------*/
198
199 /*
200 * Default cache size: available memory divided by the ratio.
201 */
202 static unsigned long dm_bufio_default_cache_size;
203
204 /*
205 * Total cache size set by the user.
206 */
207 static unsigned long dm_bufio_cache_size;
208
209 /*
210 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
211 * at any time. If it disagrees, the user has changed cache size.
212 */
213 static unsigned long dm_bufio_cache_size_latch;
214
215 static DEFINE_SPINLOCK(global_spinlock);
216
217 static LIST_HEAD(global_queue);
218
219 static unsigned long global_num;
220
221 /*
222 * Buffers are freed after this timeout
223 */
224 static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
225 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
226
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
232
233 /*----------------------------------------------------------------*/
234
235 /*
236 * The current number of clients.
237 */
238 static int dm_bufio_client_count;
239
240 /*
241 * The list of all clients.
242 */
243 static LIST_HEAD(dm_bufio_all_clients);
244
245 /*
246 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
247 */
248 static DEFINE_MUTEX(dm_bufio_clients_lock);
249
250 static struct workqueue_struct *dm_bufio_wq;
251 static struct delayed_work dm_bufio_cleanup_old_work;
252 static struct work_struct dm_bufio_replacement_work;
253
254
255 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
buffer_record_stack(struct dm_buffer * b)256 static void buffer_record_stack(struct dm_buffer *b)
257 {
258 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
259 }
260 #endif
261
262 /*
263 *----------------------------------------------------------------
264 * A red/black tree acts as an index for all the buffers.
265 *----------------------------------------------------------------
266 */
__find(struct dm_bufio_client * c,sector_t block)267 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
268 {
269 struct rb_node *n = c->buffer_tree.rb_node;
270 struct dm_buffer *b;
271
272 while (n) {
273 b = container_of(n, struct dm_buffer, node);
274
275 if (b->block == block)
276 return b;
277
278 n = block < b->block ? n->rb_left : n->rb_right;
279 }
280
281 return NULL;
282 }
283
__find_next(struct dm_bufio_client * c,sector_t block)284 static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
285 {
286 struct rb_node *n = c->buffer_tree.rb_node;
287 struct dm_buffer *b;
288 struct dm_buffer *best = NULL;
289
290 while (n) {
291 b = container_of(n, struct dm_buffer, node);
292
293 if (b->block == block)
294 return b;
295
296 if (block <= b->block) {
297 n = n->rb_left;
298 best = b;
299 } else {
300 n = n->rb_right;
301 }
302 }
303
304 return best;
305 }
306
__insert(struct dm_bufio_client * c,struct dm_buffer * b)307 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
308 {
309 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
310 struct dm_buffer *found;
311
312 while (*new) {
313 found = container_of(*new, struct dm_buffer, node);
314
315 if (found->block == b->block) {
316 BUG_ON(found != b);
317 return;
318 }
319
320 parent = *new;
321 new = b->block < found->block ?
322 &found->node.rb_left : &found->node.rb_right;
323 }
324
325 rb_link_node(&b->node, parent, new);
326 rb_insert_color(&b->node, &c->buffer_tree);
327 }
328
__remove(struct dm_bufio_client * c,struct dm_buffer * b)329 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
330 {
331 rb_erase(&b->node, &c->buffer_tree);
332 }
333
334 /*----------------------------------------------------------------*/
335
adjust_total_allocated(struct dm_buffer * b,bool unlink)336 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
337 {
338 unsigned char data_mode;
339 long diff;
340
341 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
342 &dm_bufio_allocated_kmem_cache,
343 &dm_bufio_allocated_get_free_pages,
344 &dm_bufio_allocated_vmalloc,
345 };
346
347 data_mode = b->data_mode;
348 diff = (long)b->c->block_size;
349 if (unlink)
350 diff = -diff;
351
352 spin_lock(&global_spinlock);
353
354 *class_ptr[data_mode] += diff;
355
356 dm_bufio_current_allocated += diff;
357
358 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
359 dm_bufio_peak_allocated = dm_bufio_current_allocated;
360
361 b->accessed = 1;
362
363 if (!unlink) {
364 list_add(&b->global_list, &global_queue);
365 global_num++;
366 if (dm_bufio_current_allocated > dm_bufio_cache_size)
367 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
368 } else {
369 list_del(&b->global_list);
370 global_num--;
371 }
372
373 spin_unlock(&global_spinlock);
374 }
375
376 /*
377 * Change the number of clients and recalculate per-client limit.
378 */
__cache_size_refresh(void)379 static void __cache_size_refresh(void)
380 {
381 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
382 BUG_ON(dm_bufio_client_count < 0);
383
384 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
385
386 /*
387 * Use default if set to 0 and report the actual cache size used.
388 */
389 if (!dm_bufio_cache_size_latch) {
390 (void)cmpxchg(&dm_bufio_cache_size, 0,
391 dm_bufio_default_cache_size);
392 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
393 }
394 }
395
396 /*
397 * Allocating buffer data.
398 *
399 * Small buffers are allocated with kmem_cache, to use space optimally.
400 *
401 * For large buffers, we choose between get_free_pages and vmalloc.
402 * Each has advantages and disadvantages.
403 *
404 * __get_free_pages can randomly fail if the memory is fragmented.
405 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
406 * as low as 128M) so using it for caching is not appropriate.
407 *
408 * If the allocation may fail we use __get_free_pages. Memory fragmentation
409 * won't have a fatal effect here, but it just causes flushes of some other
410 * buffers and more I/O will be performed. Don't use __get_free_pages if it
411 * always fails (i.e. order >= MAX_ORDER).
412 *
413 * If the allocation shouldn't fail we use __vmalloc. This is only for the
414 * initial reserve allocation, so there's no risk of wasting all vmalloc
415 * space.
416 */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,unsigned char * data_mode)417 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
418 unsigned char *data_mode)
419 {
420 if (unlikely(c->slab_cache != NULL)) {
421 *data_mode = DATA_MODE_SLAB;
422 return kmem_cache_alloc(c->slab_cache, gfp_mask);
423 }
424
425 if (c->block_size <= KMALLOC_MAX_SIZE &&
426 gfp_mask & __GFP_NORETRY) {
427 *data_mode = DATA_MODE_GET_FREE_PAGES;
428 return (void *)__get_free_pages(gfp_mask,
429 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
430 }
431
432 *data_mode = DATA_MODE_VMALLOC;
433
434 /*
435 * __vmalloc allocates the data pages and auxiliary structures with
436 * gfp_flags that were specified, but pagetables are always allocated
437 * with GFP_KERNEL, no matter what was specified as gfp_mask.
438 *
439 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
440 * all allocations done by this process (including pagetables) are done
441 * as if GFP_NOIO was specified.
442 */
443 if (gfp_mask & __GFP_NORETRY) {
444 unsigned int noio_flag = memalloc_noio_save();
445 void *ptr = __vmalloc(c->block_size, gfp_mask);
446
447 memalloc_noio_restore(noio_flag);
448 return ptr;
449 }
450
451 return __vmalloc(c->block_size, gfp_mask);
452 }
453
454 /*
455 * Free buffer's data.
456 */
free_buffer_data(struct dm_bufio_client * c,void * data,unsigned char data_mode)457 static void free_buffer_data(struct dm_bufio_client *c,
458 void *data, unsigned char data_mode)
459 {
460 switch (data_mode) {
461 case DATA_MODE_SLAB:
462 kmem_cache_free(c->slab_cache, data);
463 break;
464
465 case DATA_MODE_GET_FREE_PAGES:
466 free_pages((unsigned long)data,
467 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
468 break;
469
470 case DATA_MODE_VMALLOC:
471 vfree(data);
472 break;
473
474 default:
475 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
476 data_mode);
477 BUG();
478 }
479 }
480
481 /*
482 * Allocate buffer and its data.
483 */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)484 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
485 {
486 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
487
488 if (!b)
489 return NULL;
490
491 b->c = c;
492
493 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
494 if (!b->data) {
495 kmem_cache_free(c->slab_buffer, b);
496 return NULL;
497 }
498
499 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
500 b->stack_len = 0;
501 #endif
502 return b;
503 }
504
505 /*
506 * Free buffer and its data.
507 */
free_buffer(struct dm_buffer * b)508 static void free_buffer(struct dm_buffer *b)
509 {
510 struct dm_bufio_client *c = b->c;
511
512 free_buffer_data(c, b->data, b->data_mode);
513 kmem_cache_free(c->slab_buffer, b);
514 }
515
516 /*
517 * Link buffer to the buffer tree and clean or dirty queue.
518 */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)519 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
520 {
521 struct dm_bufio_client *c = b->c;
522
523 c->n_buffers[dirty]++;
524 b->block = block;
525 b->list_mode = dirty;
526 list_add(&b->lru_list, &c->lru[dirty]);
527 __insert(b->c, b);
528 b->last_accessed = jiffies;
529
530 adjust_total_allocated(b, false);
531 }
532
533 /*
534 * Unlink buffer from the buffer tree and dirty or clean queue.
535 */
__unlink_buffer(struct dm_buffer * b)536 static void __unlink_buffer(struct dm_buffer *b)
537 {
538 struct dm_bufio_client *c = b->c;
539
540 BUG_ON(!c->n_buffers[b->list_mode]);
541
542 c->n_buffers[b->list_mode]--;
543 __remove(b->c, b);
544 list_del(&b->lru_list);
545
546 adjust_total_allocated(b, true);
547 }
548
549 /*
550 * Place the buffer to the head of dirty or clean LRU queue.
551 */
__relink_lru(struct dm_buffer * b,int dirty)552 static void __relink_lru(struct dm_buffer *b, int dirty)
553 {
554 struct dm_bufio_client *c = b->c;
555
556 b->accessed = 1;
557
558 BUG_ON(!c->n_buffers[b->list_mode]);
559
560 c->n_buffers[b->list_mode]--;
561 c->n_buffers[dirty]++;
562 b->list_mode = dirty;
563 list_move(&b->lru_list, &c->lru[dirty]);
564 b->last_accessed = jiffies;
565 }
566
567 /*
568 *--------------------------------------------------------------------------
569 * Submit I/O on the buffer.
570 *
571 * Bio interface is faster but it has some problems:
572 * the vector list is limited (increasing this limit increases
573 * memory-consumption per buffer, so it is not viable);
574 *
575 * the memory must be direct-mapped, not vmalloced;
576 *
577 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
578 * it is not vmalloced, try using the bio interface.
579 *
580 * If the buffer is big, if it is vmalloced or if the underlying device
581 * rejects the bio because it is too large, use dm-io layer to do the I/O.
582 * The dm-io layer splits the I/O into multiple requests, avoiding the above
583 * shortcomings.
584 *--------------------------------------------------------------------------
585 */
586
587 /*
588 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
589 * that the request was handled directly with bio interface.
590 */
dmio_complete(unsigned long error,void * context)591 static void dmio_complete(unsigned long error, void *context)
592 {
593 struct dm_buffer *b = context;
594
595 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
596 }
597
use_dmio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)598 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
599 unsigned int n_sectors, unsigned int offset)
600 {
601 int r;
602 struct dm_io_request io_req = {
603 .bi_opf = op,
604 .notify.fn = dmio_complete,
605 .notify.context = b,
606 .client = b->c->dm_io,
607 };
608 struct dm_io_region region = {
609 .bdev = b->c->bdev,
610 .sector = sector,
611 .count = n_sectors,
612 };
613
614 if (b->data_mode != DATA_MODE_VMALLOC) {
615 io_req.mem.type = DM_IO_KMEM;
616 io_req.mem.ptr.addr = (char *)b->data + offset;
617 } else {
618 io_req.mem.type = DM_IO_VMA;
619 io_req.mem.ptr.vma = (char *)b->data + offset;
620 }
621
622 r = dm_io(&io_req, 1, ®ion, NULL);
623 if (unlikely(r))
624 b->end_io(b, errno_to_blk_status(r));
625 }
626
bio_complete(struct bio * bio)627 static void bio_complete(struct bio *bio)
628 {
629 struct dm_buffer *b = bio->bi_private;
630 blk_status_t status = bio->bi_status;
631
632 bio_uninit(bio);
633 kfree(bio);
634 b->end_io(b, status);
635 }
636
use_bio(struct dm_buffer * b,enum req_op op,sector_t sector,unsigned int n_sectors,unsigned int offset)637 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
638 unsigned int n_sectors, unsigned int offset)
639 {
640 struct bio *bio;
641 char *ptr;
642 unsigned int vec_size, len;
643
644 vec_size = b->c->block_size >> PAGE_SHIFT;
645 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
646 vec_size += 2;
647
648 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
649 if (!bio) {
650 dmio:
651 use_dmio(b, op, sector, n_sectors, offset);
652 return;
653 }
654 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
655 bio->bi_iter.bi_sector = sector;
656 bio->bi_end_io = bio_complete;
657 bio->bi_private = b;
658
659 ptr = (char *)b->data + offset;
660 len = n_sectors << SECTOR_SHIFT;
661
662 do {
663 unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
664
665 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
666 offset_in_page(ptr))) {
667 bio_put(bio);
668 goto dmio;
669 }
670
671 len -= this_step;
672 ptr += this_step;
673 } while (len > 0);
674
675 submit_bio(bio);
676 }
677
block_to_sector(struct dm_bufio_client * c,sector_t block)678 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
679 {
680 sector_t sector;
681
682 if (likely(c->sectors_per_block_bits >= 0))
683 sector = block << c->sectors_per_block_bits;
684 else
685 sector = block * (c->block_size >> SECTOR_SHIFT);
686 sector += c->start;
687
688 return sector;
689 }
690
submit_io(struct dm_buffer * b,enum req_op op,void (* end_io)(struct dm_buffer *,blk_status_t))691 static void submit_io(struct dm_buffer *b, enum req_op op,
692 void (*end_io)(struct dm_buffer *, blk_status_t))
693 {
694 unsigned int n_sectors;
695 sector_t sector;
696 unsigned int offset, end;
697
698 b->end_io = end_io;
699
700 sector = block_to_sector(b->c, b->block);
701
702 if (op != REQ_OP_WRITE) {
703 n_sectors = b->c->block_size >> SECTOR_SHIFT;
704 offset = 0;
705 } else {
706 if (b->c->write_callback)
707 b->c->write_callback(b);
708 offset = b->write_start;
709 end = b->write_end;
710 offset &= -DM_BUFIO_WRITE_ALIGN;
711 end += DM_BUFIO_WRITE_ALIGN - 1;
712 end &= -DM_BUFIO_WRITE_ALIGN;
713 if (unlikely(end > b->c->block_size))
714 end = b->c->block_size;
715
716 sector += offset >> SECTOR_SHIFT;
717 n_sectors = (end - offset) >> SECTOR_SHIFT;
718 }
719
720 if (b->data_mode != DATA_MODE_VMALLOC)
721 use_bio(b, op, sector, n_sectors, offset);
722 else
723 use_dmio(b, op, sector, n_sectors, offset);
724 }
725
726 /*
727 *--------------------------------------------------------------
728 * Writing dirty buffers
729 *--------------------------------------------------------------
730 */
731
732 /*
733 * The endio routine for write.
734 *
735 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
736 * it.
737 */
write_endio(struct dm_buffer * b,blk_status_t status)738 static void write_endio(struct dm_buffer *b, blk_status_t status)
739 {
740 b->write_error = status;
741 if (unlikely(status)) {
742 struct dm_bufio_client *c = b->c;
743
744 (void)cmpxchg(&c->async_write_error, 0,
745 blk_status_to_errno(status));
746 }
747
748 BUG_ON(!test_bit(B_WRITING, &b->state));
749
750 smp_mb__before_atomic();
751 clear_bit(B_WRITING, &b->state);
752 smp_mb__after_atomic();
753
754 wake_up_bit(&b->state, B_WRITING);
755 }
756
757 /*
758 * Initiate a write on a dirty buffer, but don't wait for it.
759 *
760 * - If the buffer is not dirty, exit.
761 * - If there some previous write going on, wait for it to finish (we can't
762 * have two writes on the same buffer simultaneously).
763 * - Submit our write and don't wait on it. We set B_WRITING indicating
764 * that there is a write in progress.
765 */
__write_dirty_buffer(struct dm_buffer * b,struct list_head * write_list)766 static void __write_dirty_buffer(struct dm_buffer *b,
767 struct list_head *write_list)
768 {
769 if (!test_bit(B_DIRTY, &b->state))
770 return;
771
772 clear_bit(B_DIRTY, &b->state);
773 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
774
775 b->write_start = b->dirty_start;
776 b->write_end = b->dirty_end;
777
778 if (!write_list)
779 submit_io(b, REQ_OP_WRITE, write_endio);
780 else
781 list_add_tail(&b->write_list, write_list);
782 }
783
__flush_write_list(struct list_head * write_list)784 static void __flush_write_list(struct list_head *write_list)
785 {
786 struct blk_plug plug;
787
788 blk_start_plug(&plug);
789 while (!list_empty(write_list)) {
790 struct dm_buffer *b =
791 list_entry(write_list->next, struct dm_buffer, write_list);
792 list_del(&b->write_list);
793 submit_io(b, REQ_OP_WRITE, write_endio);
794 cond_resched();
795 }
796 blk_finish_plug(&plug);
797 }
798
799 /*
800 * Wait until any activity on the buffer finishes. Possibly write the
801 * buffer if it is dirty. When this function finishes, there is no I/O
802 * running on the buffer and the buffer is not dirty.
803 */
__make_buffer_clean(struct dm_buffer * b)804 static void __make_buffer_clean(struct dm_buffer *b)
805 {
806 BUG_ON(b->hold_count);
807
808 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
809 if (!smp_load_acquire(&b->state)) /* fast case */
810 return;
811
812 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
813 __write_dirty_buffer(b, NULL);
814 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
815 }
816
817 /*
818 * Find some buffer that is not held by anybody, clean it, unlink it and
819 * return it.
820 */
__get_unclaimed_buffer(struct dm_bufio_client * c)821 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
822 {
823 struct dm_buffer *b;
824
825 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
826 BUG_ON(test_bit(B_WRITING, &b->state));
827 BUG_ON(test_bit(B_DIRTY, &b->state));
828
829 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
830 unlikely(test_bit_acquire(B_READING, &b->state)))
831 continue;
832
833 if (!b->hold_count) {
834 __make_buffer_clean(b);
835 __unlink_buffer(b);
836 return b;
837 }
838 cond_resched();
839 }
840
841 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
842 return NULL;
843
844 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
845 BUG_ON(test_bit(B_READING, &b->state));
846
847 if (!b->hold_count) {
848 __make_buffer_clean(b);
849 __unlink_buffer(b);
850 return b;
851 }
852 cond_resched();
853 }
854
855 return NULL;
856 }
857
858 /*
859 * Wait until some other threads free some buffer or release hold count on
860 * some buffer.
861 *
862 * This function is entered with c->lock held, drops it and regains it
863 * before exiting.
864 */
__wait_for_free_buffer(struct dm_bufio_client * c)865 static void __wait_for_free_buffer(struct dm_bufio_client *c)
866 {
867 DECLARE_WAITQUEUE(wait, current);
868
869 add_wait_queue(&c->free_buffer_wait, &wait);
870 set_current_state(TASK_UNINTERRUPTIBLE);
871 dm_bufio_unlock(c);
872
873 io_schedule();
874
875 remove_wait_queue(&c->free_buffer_wait, &wait);
876
877 dm_bufio_lock(c);
878 }
879
880 enum new_flag {
881 NF_FRESH = 0,
882 NF_READ = 1,
883 NF_GET = 2,
884 NF_PREFETCH = 3
885 };
886
887 /*
888 * Allocate a new buffer. If the allocation is not possible, wait until
889 * some other thread frees a buffer.
890 *
891 * May drop the lock and regain it.
892 */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)893 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
894 {
895 struct dm_buffer *b;
896 bool tried_noio_alloc = false;
897
898 /*
899 * dm-bufio is resistant to allocation failures (it just keeps
900 * one buffer reserved in cases all the allocations fail).
901 * So set flags to not try too hard:
902 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
903 * mutex and wait ourselves.
904 * __GFP_NORETRY: don't retry and rather return failure
905 * __GFP_NOMEMALLOC: don't use emergency reserves
906 * __GFP_NOWARN: don't print a warning in case of failure
907 *
908 * For debugging, if we set the cache size to 1, no new buffers will
909 * be allocated.
910 */
911 while (1) {
912 if (dm_bufio_cache_size_latch != 1) {
913 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
914 if (b)
915 return b;
916 }
917
918 if (nf == NF_PREFETCH)
919 return NULL;
920
921 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
922 dm_bufio_unlock(c);
923 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
924 dm_bufio_lock(c);
925 if (b)
926 return b;
927 tried_noio_alloc = true;
928 }
929
930 if (!list_empty(&c->reserved_buffers)) {
931 b = list_entry(c->reserved_buffers.next,
932 struct dm_buffer, lru_list);
933 list_del(&b->lru_list);
934 c->need_reserved_buffers++;
935
936 return b;
937 }
938
939 b = __get_unclaimed_buffer(c);
940 if (b)
941 return b;
942
943 __wait_for_free_buffer(c);
944 }
945 }
946
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)947 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
948 {
949 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
950
951 if (!b)
952 return NULL;
953
954 if (c->alloc_callback)
955 c->alloc_callback(b);
956
957 return b;
958 }
959
960 /*
961 * Free a buffer and wake other threads waiting for free buffers.
962 */
__free_buffer_wake(struct dm_buffer * b)963 static void __free_buffer_wake(struct dm_buffer *b)
964 {
965 struct dm_bufio_client *c = b->c;
966
967 if (!c->need_reserved_buffers)
968 free_buffer(b);
969 else {
970 list_add(&b->lru_list, &c->reserved_buffers);
971 c->need_reserved_buffers--;
972 }
973
974 wake_up(&c->free_buffer_wait);
975 }
976
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait,struct list_head * write_list)977 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
978 struct list_head *write_list)
979 {
980 struct dm_buffer *b, *tmp;
981
982 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
983 BUG_ON(test_bit(B_READING, &b->state));
984
985 if (!test_bit(B_DIRTY, &b->state) &&
986 !test_bit(B_WRITING, &b->state)) {
987 __relink_lru(b, LIST_CLEAN);
988 continue;
989 }
990
991 if (no_wait && test_bit(B_WRITING, &b->state))
992 return;
993
994 __write_dirty_buffer(b, write_list);
995 cond_resched();
996 }
997 }
998
999 /*
1000 * Check if we're over watermark.
1001 * If we are over threshold_buffers, start freeing buffers.
1002 * If we're over "limit_buffers", block until we get under the limit.
1003 */
__check_watermark(struct dm_bufio_client * c,struct list_head * write_list)1004 static void __check_watermark(struct dm_bufio_client *c,
1005 struct list_head *write_list)
1006 {
1007 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
1008 __write_dirty_buffers_async(c, 1, write_list);
1009 }
1010
1011 /*
1012 *--------------------------------------------------------------
1013 * Getting a buffer
1014 *--------------------------------------------------------------
1015 */
1016
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit,struct list_head * write_list)1017 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1018 enum new_flag nf, int *need_submit,
1019 struct list_head *write_list)
1020 {
1021 struct dm_buffer *b, *new_b = NULL;
1022
1023 *need_submit = 0;
1024
1025 b = __find(c, block);
1026 if (b)
1027 goto found_buffer;
1028
1029 if (nf == NF_GET)
1030 return NULL;
1031
1032 new_b = __alloc_buffer_wait(c, nf);
1033 if (!new_b)
1034 return NULL;
1035
1036 /*
1037 * We've had a period where the mutex was unlocked, so need to
1038 * recheck the buffer tree.
1039 */
1040 b = __find(c, block);
1041 if (b) {
1042 __free_buffer_wake(new_b);
1043 goto found_buffer;
1044 }
1045
1046 __check_watermark(c, write_list);
1047
1048 b = new_b;
1049 b->hold_count = 1;
1050 b->read_error = 0;
1051 b->write_error = 0;
1052 __link_buffer(b, block, LIST_CLEAN);
1053
1054 if (nf == NF_FRESH) {
1055 b->state = 0;
1056 return b;
1057 }
1058
1059 b->state = 1 << B_READING;
1060 *need_submit = 1;
1061
1062 return b;
1063
1064 found_buffer:
1065 if (nf == NF_PREFETCH)
1066 return NULL;
1067 /*
1068 * Note: it is essential that we don't wait for the buffer to be
1069 * read if dm_bufio_get function is used. Both dm_bufio_get and
1070 * dm_bufio_prefetch can be used in the driver request routine.
1071 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1072 * the same buffer, it would deadlock if we waited.
1073 */
1074 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
1075 return NULL;
1076
1077 b->hold_count++;
1078 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1079 test_bit(B_WRITING, &b->state));
1080 return b;
1081 }
1082
1083 /*
1084 * The endio routine for reading: set the error, clear the bit and wake up
1085 * anyone waiting on the buffer.
1086 */
read_endio(struct dm_buffer * b,blk_status_t status)1087 static void read_endio(struct dm_buffer *b, blk_status_t status)
1088 {
1089 b->read_error = status;
1090
1091 BUG_ON(!test_bit(B_READING, &b->state));
1092
1093 smp_mb__before_atomic();
1094 clear_bit(B_READING, &b->state);
1095 smp_mb__after_atomic();
1096
1097 wake_up_bit(&b->state, B_READING);
1098 }
1099
1100 /*
1101 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1102 * functions is similar except that dm_bufio_new doesn't read the
1103 * buffer from the disk (assuming that the caller overwrites all the data
1104 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1105 */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)1106 static void *new_read(struct dm_bufio_client *c, sector_t block,
1107 enum new_flag nf, struct dm_buffer **bp)
1108 {
1109 int need_submit;
1110 struct dm_buffer *b;
1111
1112 LIST_HEAD(write_list);
1113
1114 dm_bufio_lock(c);
1115 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1116 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1117 if (b && b->hold_count == 1)
1118 buffer_record_stack(b);
1119 #endif
1120 dm_bufio_unlock(c);
1121
1122 __flush_write_list(&write_list);
1123
1124 if (!b)
1125 return NULL;
1126
1127 if (need_submit)
1128 submit_io(b, REQ_OP_READ, read_endio);
1129
1130 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1131
1132 if (b->read_error) {
1133 int error = blk_status_to_errno(b->read_error);
1134
1135 dm_bufio_release(b);
1136
1137 return ERR_PTR(error);
1138 }
1139
1140 *bp = b;
1141
1142 return b->data;
1143 }
1144
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1145 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1146 struct dm_buffer **bp)
1147 {
1148 return new_read(c, block, NF_GET, bp);
1149 }
1150 EXPORT_SYMBOL_GPL(dm_bufio_get);
1151
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1152 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1153 struct dm_buffer **bp)
1154 {
1155 BUG_ON(dm_bufio_in_request());
1156
1157 return new_read(c, block, NF_READ, bp);
1158 }
1159 EXPORT_SYMBOL_GPL(dm_bufio_read);
1160
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1161 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1162 struct dm_buffer **bp)
1163 {
1164 BUG_ON(dm_bufio_in_request());
1165
1166 return new_read(c, block, NF_FRESH, bp);
1167 }
1168 EXPORT_SYMBOL_GPL(dm_bufio_new);
1169
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned int n_blocks)1170 void dm_bufio_prefetch(struct dm_bufio_client *c,
1171 sector_t block, unsigned int n_blocks)
1172 {
1173 struct blk_plug plug;
1174
1175 LIST_HEAD(write_list);
1176
1177 BUG_ON(dm_bufio_in_request());
1178
1179 blk_start_plug(&plug);
1180 dm_bufio_lock(c);
1181
1182 for (; n_blocks--; block++) {
1183 int need_submit;
1184 struct dm_buffer *b;
1185
1186 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1187 &write_list);
1188 if (unlikely(!list_empty(&write_list))) {
1189 dm_bufio_unlock(c);
1190 blk_finish_plug(&plug);
1191 __flush_write_list(&write_list);
1192 blk_start_plug(&plug);
1193 dm_bufio_lock(c);
1194 }
1195 if (unlikely(b != NULL)) {
1196 dm_bufio_unlock(c);
1197
1198 if (need_submit)
1199 submit_io(b, REQ_OP_READ, read_endio);
1200 dm_bufio_release(b);
1201
1202 cond_resched();
1203
1204 if (!n_blocks)
1205 goto flush_plug;
1206 dm_bufio_lock(c);
1207 }
1208 }
1209
1210 dm_bufio_unlock(c);
1211
1212 flush_plug:
1213 blk_finish_plug(&plug);
1214 }
1215 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1216
dm_bufio_release(struct dm_buffer * b)1217 void dm_bufio_release(struct dm_buffer *b)
1218 {
1219 struct dm_bufio_client *c = b->c;
1220
1221 dm_bufio_lock(c);
1222
1223 BUG_ON(!b->hold_count);
1224
1225 b->hold_count--;
1226 if (!b->hold_count) {
1227 wake_up(&c->free_buffer_wait);
1228
1229 /*
1230 * If there were errors on the buffer, and the buffer is not
1231 * to be written, free the buffer. There is no point in caching
1232 * invalid buffer.
1233 */
1234 if ((b->read_error || b->write_error) &&
1235 !test_bit_acquire(B_READING, &b->state) &&
1236 !test_bit(B_WRITING, &b->state) &&
1237 !test_bit(B_DIRTY, &b->state)) {
1238 __unlink_buffer(b);
1239 __free_buffer_wake(b);
1240 }
1241 }
1242
1243 dm_bufio_unlock(c);
1244 }
1245 EXPORT_SYMBOL_GPL(dm_bufio_release);
1246
dm_bufio_mark_partial_buffer_dirty(struct dm_buffer * b,unsigned int start,unsigned int end)1247 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1248 unsigned int start, unsigned int end)
1249 {
1250 struct dm_bufio_client *c = b->c;
1251
1252 BUG_ON(start >= end);
1253 BUG_ON(end > b->c->block_size);
1254
1255 dm_bufio_lock(c);
1256
1257 BUG_ON(test_bit(B_READING, &b->state));
1258
1259 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1260 b->dirty_start = start;
1261 b->dirty_end = end;
1262 __relink_lru(b, LIST_DIRTY);
1263 } else {
1264 if (start < b->dirty_start)
1265 b->dirty_start = start;
1266 if (end > b->dirty_end)
1267 b->dirty_end = end;
1268 }
1269
1270 dm_bufio_unlock(c);
1271 }
1272 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1273
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1274 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1275 {
1276 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1277 }
1278 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1279
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1280 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1281 {
1282 LIST_HEAD(write_list);
1283
1284 BUG_ON(dm_bufio_in_request());
1285
1286 dm_bufio_lock(c);
1287 __write_dirty_buffers_async(c, 0, &write_list);
1288 dm_bufio_unlock(c);
1289 __flush_write_list(&write_list);
1290 }
1291 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1292
1293 /*
1294 * For performance, it is essential that the buffers are written asynchronously
1295 * and simultaneously (so that the block layer can merge the writes) and then
1296 * waited upon.
1297 *
1298 * Finally, we flush hardware disk cache.
1299 */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1300 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1301 {
1302 int a, f;
1303 unsigned long buffers_processed = 0;
1304 struct dm_buffer *b, *tmp;
1305
1306 LIST_HEAD(write_list);
1307
1308 dm_bufio_lock(c);
1309 __write_dirty_buffers_async(c, 0, &write_list);
1310 dm_bufio_unlock(c);
1311 __flush_write_list(&write_list);
1312 dm_bufio_lock(c);
1313
1314 again:
1315 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1316 int dropped_lock = 0;
1317
1318 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1319 buffers_processed++;
1320
1321 BUG_ON(test_bit(B_READING, &b->state));
1322
1323 if (test_bit(B_WRITING, &b->state)) {
1324 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1325 dropped_lock = 1;
1326 b->hold_count++;
1327 dm_bufio_unlock(c);
1328 wait_on_bit_io(&b->state, B_WRITING,
1329 TASK_UNINTERRUPTIBLE);
1330 dm_bufio_lock(c);
1331 b->hold_count--;
1332 } else
1333 wait_on_bit_io(&b->state, B_WRITING,
1334 TASK_UNINTERRUPTIBLE);
1335 }
1336
1337 if (!test_bit(B_DIRTY, &b->state) &&
1338 !test_bit(B_WRITING, &b->state))
1339 __relink_lru(b, LIST_CLEAN);
1340
1341 cond_resched();
1342
1343 /*
1344 * If we dropped the lock, the list is no longer consistent,
1345 * so we must restart the search.
1346 *
1347 * In the most common case, the buffer just processed is
1348 * relinked to the clean list, so we won't loop scanning the
1349 * same buffer again and again.
1350 *
1351 * This may livelock if there is another thread simultaneously
1352 * dirtying buffers, so we count the number of buffers walked
1353 * and if it exceeds the total number of buffers, it means that
1354 * someone is doing some writes simultaneously with us. In
1355 * this case, stop, dropping the lock.
1356 */
1357 if (dropped_lock)
1358 goto again;
1359 }
1360 wake_up(&c->free_buffer_wait);
1361 dm_bufio_unlock(c);
1362
1363 a = xchg(&c->async_write_error, 0);
1364 f = dm_bufio_issue_flush(c);
1365 if (a)
1366 return a;
1367
1368 return f;
1369 }
1370 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1371
1372 /*
1373 * Use dm-io to send an empty barrier to flush the device.
1374 */
dm_bufio_issue_flush(struct dm_bufio_client * c)1375 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1376 {
1377 struct dm_io_request io_req = {
1378 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1379 .mem.type = DM_IO_KMEM,
1380 .mem.ptr.addr = NULL,
1381 .client = c->dm_io,
1382 };
1383 struct dm_io_region io_reg = {
1384 .bdev = c->bdev,
1385 .sector = 0,
1386 .count = 0,
1387 };
1388
1389 BUG_ON(dm_bufio_in_request());
1390
1391 return dm_io(&io_req, 1, &io_reg, NULL);
1392 }
1393 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1394
1395 /*
1396 * Use dm-io to send a discard request to flush the device.
1397 */
dm_bufio_issue_discard(struct dm_bufio_client * c,sector_t block,sector_t count)1398 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1399 {
1400 struct dm_io_request io_req = {
1401 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
1402 .mem.type = DM_IO_KMEM,
1403 .mem.ptr.addr = NULL,
1404 .client = c->dm_io,
1405 };
1406 struct dm_io_region io_reg = {
1407 .bdev = c->bdev,
1408 .sector = block_to_sector(c, block),
1409 .count = block_to_sector(c, count),
1410 };
1411
1412 BUG_ON(dm_bufio_in_request());
1413
1414 return dm_io(&io_req, 1, &io_reg, NULL);
1415 }
1416 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1417
1418 /*
1419 * We first delete any other buffer that may be at that new location.
1420 *
1421 * Then, we write the buffer to the original location if it was dirty.
1422 *
1423 * Then, if we are the only one who is holding the buffer, relink the buffer
1424 * in the buffer tree for the new location.
1425 *
1426 * If there was someone else holding the buffer, we write it to the new
1427 * location but not relink it, because that other user needs to have the buffer
1428 * at the same place.
1429 */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1430 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1431 {
1432 struct dm_bufio_client *c = b->c;
1433 struct dm_buffer *new;
1434
1435 BUG_ON(dm_bufio_in_request());
1436
1437 dm_bufio_lock(c);
1438
1439 retry:
1440 new = __find(c, new_block);
1441 if (new) {
1442 if (new->hold_count) {
1443 __wait_for_free_buffer(c);
1444 goto retry;
1445 }
1446
1447 /*
1448 * FIXME: Is there any point waiting for a write that's going
1449 * to be overwritten in a bit?
1450 */
1451 __make_buffer_clean(new);
1452 __unlink_buffer(new);
1453 __free_buffer_wake(new);
1454 }
1455
1456 BUG_ON(!b->hold_count);
1457 BUG_ON(test_bit(B_READING, &b->state));
1458
1459 __write_dirty_buffer(b, NULL);
1460 if (b->hold_count == 1) {
1461 wait_on_bit_io(&b->state, B_WRITING,
1462 TASK_UNINTERRUPTIBLE);
1463 set_bit(B_DIRTY, &b->state);
1464 b->dirty_start = 0;
1465 b->dirty_end = c->block_size;
1466 __unlink_buffer(b);
1467 __link_buffer(b, new_block, LIST_DIRTY);
1468 } else {
1469 sector_t old_block;
1470
1471 wait_on_bit_lock_io(&b->state, B_WRITING,
1472 TASK_UNINTERRUPTIBLE);
1473 /*
1474 * Relink buffer to "new_block" so that write_callback
1475 * sees "new_block" as a block number.
1476 * After the write, link the buffer back to old_block.
1477 * All this must be done in bufio lock, so that block number
1478 * change isn't visible to other threads.
1479 */
1480 old_block = b->block;
1481 __unlink_buffer(b);
1482 __link_buffer(b, new_block, b->list_mode);
1483 submit_io(b, REQ_OP_WRITE, write_endio);
1484 wait_on_bit_io(&b->state, B_WRITING,
1485 TASK_UNINTERRUPTIBLE);
1486 __unlink_buffer(b);
1487 __link_buffer(b, old_block, b->list_mode);
1488 }
1489
1490 dm_bufio_unlock(c);
1491 dm_bufio_release(b);
1492 }
1493 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1494
forget_buffer_locked(struct dm_buffer * b)1495 static void forget_buffer_locked(struct dm_buffer *b)
1496 {
1497 if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
1498 __unlink_buffer(b);
1499 __free_buffer_wake(b);
1500 }
1501 }
1502
1503 /*
1504 * Free the given buffer.
1505 *
1506 * This is just a hint, if the buffer is in use or dirty, this function
1507 * does nothing.
1508 */
dm_bufio_forget(struct dm_bufio_client * c,sector_t block)1509 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1510 {
1511 struct dm_buffer *b;
1512
1513 dm_bufio_lock(c);
1514
1515 b = __find(c, block);
1516 if (b)
1517 forget_buffer_locked(b);
1518
1519 dm_bufio_unlock(c);
1520 }
1521 EXPORT_SYMBOL_GPL(dm_bufio_forget);
1522
dm_bufio_forget_buffers(struct dm_bufio_client * c,sector_t block,sector_t n_blocks)1523 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1524 {
1525 struct dm_buffer *b;
1526 sector_t end_block = block + n_blocks;
1527
1528 while (block < end_block) {
1529 dm_bufio_lock(c);
1530
1531 b = __find_next(c, block);
1532 if (b) {
1533 block = b->block + 1;
1534 forget_buffer_locked(b);
1535 }
1536
1537 dm_bufio_unlock(c);
1538
1539 if (!b)
1540 break;
1541 }
1542
1543 }
1544 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1545
dm_bufio_set_minimum_buffers(struct dm_bufio_client * c,unsigned int n)1546 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
1547 {
1548 c->minimum_buffers = n;
1549 }
1550 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1551
dm_bufio_get_block_size(struct dm_bufio_client * c)1552 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
1553 {
1554 return c->block_size;
1555 }
1556 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1557
dm_bufio_get_device_size(struct dm_bufio_client * c)1558 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1559 {
1560 sector_t s = bdev_nr_sectors(c->bdev);
1561
1562 if (s >= c->start)
1563 s -= c->start;
1564 else
1565 s = 0;
1566 if (likely(c->sectors_per_block_bits >= 0))
1567 s >>= c->sectors_per_block_bits;
1568 else
1569 sector_div(s, c->block_size >> SECTOR_SHIFT);
1570 return s;
1571 }
1572 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1573
dm_bufio_get_dm_io_client(struct dm_bufio_client * c)1574 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1575 {
1576 return c->dm_io;
1577 }
1578 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1579
dm_bufio_get_block_number(struct dm_buffer * b)1580 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1581 {
1582 return b->block;
1583 }
1584 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1585
dm_bufio_get_block_data(struct dm_buffer * b)1586 void *dm_bufio_get_block_data(struct dm_buffer *b)
1587 {
1588 return b->data;
1589 }
1590 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1591
dm_bufio_get_aux_data(struct dm_buffer * b)1592 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1593 {
1594 return b + 1;
1595 }
1596 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1597
dm_bufio_get_client(struct dm_buffer * b)1598 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1599 {
1600 return b->c;
1601 }
1602 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1603
drop_buffers(struct dm_bufio_client * c)1604 static void drop_buffers(struct dm_bufio_client *c)
1605 {
1606 struct dm_buffer *b;
1607 int i;
1608 bool warned = false;
1609
1610 BUG_ON(dm_bufio_in_request());
1611
1612 /*
1613 * An optimization so that the buffers are not written one-by-one.
1614 */
1615 dm_bufio_write_dirty_buffers_async(c);
1616
1617 dm_bufio_lock(c);
1618
1619 while ((b = __get_unclaimed_buffer(c)))
1620 __free_buffer_wake(b);
1621
1622 for (i = 0; i < LIST_SIZE; i++)
1623 list_for_each_entry(b, &c->lru[i], lru_list) {
1624 WARN_ON(!warned);
1625 warned = true;
1626 DMERR("leaked buffer %llx, hold count %u, list %d",
1627 (unsigned long long)b->block, b->hold_count, i);
1628 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1629 stack_trace_print(b->stack_entries, b->stack_len, 1);
1630 /* mark unclaimed to avoid BUG_ON below */
1631 b->hold_count = 0;
1632 #endif
1633 }
1634
1635 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1636 while ((b = __get_unclaimed_buffer(c)))
1637 __free_buffer_wake(b);
1638 #endif
1639
1640 for (i = 0; i < LIST_SIZE; i++)
1641 BUG_ON(!list_empty(&c->lru[i]));
1642
1643 dm_bufio_unlock(c);
1644 }
1645
1646 /*
1647 * We may not be able to evict this buffer if IO pending or the client
1648 * is still using it. Caller is expected to know buffer is too old.
1649 *
1650 * And if GFP_NOFS is used, we must not do any I/O because we hold
1651 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1652 * rerouted to different bufio client.
1653 */
__try_evict_buffer(struct dm_buffer * b,gfp_t gfp)1654 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1655 {
1656 if (!(gfp & __GFP_FS) ||
1657 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
1658 if (test_bit_acquire(B_READING, &b->state) ||
1659 test_bit(B_WRITING, &b->state) ||
1660 test_bit(B_DIRTY, &b->state))
1661 return false;
1662 }
1663
1664 if (b->hold_count)
1665 return false;
1666
1667 __make_buffer_clean(b);
1668 __unlink_buffer(b);
1669 __free_buffer_wake(b);
1670
1671 return true;
1672 }
1673
get_retain_buffers(struct dm_bufio_client * c)1674 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1675 {
1676 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1677
1678 if (likely(c->sectors_per_block_bits >= 0))
1679 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1680 else
1681 retain_bytes /= c->block_size;
1682
1683 return retain_bytes;
1684 }
1685
__scan(struct dm_bufio_client * c)1686 static void __scan(struct dm_bufio_client *c)
1687 {
1688 int l;
1689 struct dm_buffer *b, *tmp;
1690 unsigned long freed = 0;
1691 unsigned long count = c->n_buffers[LIST_CLEAN] +
1692 c->n_buffers[LIST_DIRTY];
1693 unsigned long retain_target = get_retain_buffers(c);
1694
1695 for (l = 0; l < LIST_SIZE; l++) {
1696 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1697 if (count - freed <= retain_target)
1698 atomic_long_set(&c->need_shrink, 0);
1699 if (!atomic_long_read(&c->need_shrink))
1700 return;
1701 if (__try_evict_buffer(b, GFP_KERNEL)) {
1702 atomic_long_dec(&c->need_shrink);
1703 freed++;
1704 }
1705 cond_resched();
1706 }
1707 }
1708 }
1709
shrink_work(struct work_struct * w)1710 static void shrink_work(struct work_struct *w)
1711 {
1712 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1713
1714 dm_bufio_lock(c);
1715 __scan(c);
1716 dm_bufio_unlock(c);
1717 }
1718
dm_bufio_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1719 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1720 {
1721 struct dm_bufio_client *c;
1722
1723 c = container_of(shrink, struct dm_bufio_client, shrinker);
1724 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1725 queue_work(dm_bufio_wq, &c->shrink_work);
1726
1727 return sc->nr_to_scan;
1728 }
1729
dm_bufio_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1730 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1731 {
1732 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1733 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1734 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1735 unsigned long retain_target = get_retain_buffers(c);
1736 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1737
1738 if (unlikely(count < retain_target))
1739 count = 0;
1740 else
1741 count -= retain_target;
1742
1743 if (unlikely(count < queued_for_cleanup))
1744 count = 0;
1745 else
1746 count -= queued_for_cleanup;
1747
1748 return count;
1749 }
1750
1751 /*
1752 * Create the buffering interface
1753 */
dm_bufio_client_create(struct block_device * bdev,unsigned int block_size,unsigned int reserved_buffers,unsigned int aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *),unsigned int flags)1754 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
1755 unsigned int reserved_buffers, unsigned int aux_size,
1756 void (*alloc_callback)(struct dm_buffer *),
1757 void (*write_callback)(struct dm_buffer *),
1758 unsigned int flags)
1759 {
1760 int r;
1761 struct dm_bufio_client *c;
1762 unsigned int i;
1763 char slab_name[27];
1764
1765 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1766 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1767 r = -EINVAL;
1768 goto bad_client;
1769 }
1770
1771 c = kzalloc(sizeof(*c), GFP_KERNEL);
1772 if (!c) {
1773 r = -ENOMEM;
1774 goto bad_client;
1775 }
1776 c->buffer_tree = RB_ROOT;
1777
1778 c->bdev = bdev;
1779 c->block_size = block_size;
1780 if (is_power_of_2(block_size))
1781 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1782 else
1783 c->sectors_per_block_bits = -1;
1784
1785 c->alloc_callback = alloc_callback;
1786 c->write_callback = write_callback;
1787
1788 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
1789 c->no_sleep = true;
1790 static_branch_inc(&no_sleep_enabled);
1791 }
1792
1793 for (i = 0; i < LIST_SIZE; i++) {
1794 INIT_LIST_HEAD(&c->lru[i]);
1795 c->n_buffers[i] = 0;
1796 }
1797
1798 mutex_init(&c->lock);
1799 spin_lock_init(&c->spinlock);
1800 INIT_LIST_HEAD(&c->reserved_buffers);
1801 c->need_reserved_buffers = reserved_buffers;
1802
1803 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1804
1805 init_waitqueue_head(&c->free_buffer_wait);
1806 c->async_write_error = 0;
1807
1808 c->dm_io = dm_io_client_create();
1809 if (IS_ERR(c->dm_io)) {
1810 r = PTR_ERR(c->dm_io);
1811 goto bad_dm_io;
1812 }
1813
1814 if (block_size <= KMALLOC_MAX_SIZE &&
1815 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1816 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
1817
1818 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
1819 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1820 SLAB_RECLAIM_ACCOUNT, NULL);
1821 if (!c->slab_cache) {
1822 r = -ENOMEM;
1823 goto bad;
1824 }
1825 }
1826 if (aux_size)
1827 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
1828 else
1829 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
1830 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1831 0, SLAB_RECLAIM_ACCOUNT, NULL);
1832 if (!c->slab_buffer) {
1833 r = -ENOMEM;
1834 goto bad;
1835 }
1836
1837 while (c->need_reserved_buffers) {
1838 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1839
1840 if (!b) {
1841 r = -ENOMEM;
1842 goto bad;
1843 }
1844 __free_buffer_wake(b);
1845 }
1846
1847 INIT_WORK(&c->shrink_work, shrink_work);
1848 atomic_long_set(&c->need_shrink, 0);
1849
1850 c->shrinker.count_objects = dm_bufio_shrink_count;
1851 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1852 c->shrinker.seeks = 1;
1853 c->shrinker.batch = 0;
1854 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
1855 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
1856 if (r)
1857 goto bad;
1858
1859 mutex_lock(&dm_bufio_clients_lock);
1860 dm_bufio_client_count++;
1861 list_add(&c->client_list, &dm_bufio_all_clients);
1862 __cache_size_refresh();
1863 mutex_unlock(&dm_bufio_clients_lock);
1864
1865 return c;
1866
1867 bad:
1868 while (!list_empty(&c->reserved_buffers)) {
1869 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1870 struct dm_buffer, lru_list);
1871 list_del(&b->lru_list);
1872 free_buffer(b);
1873 }
1874 kmem_cache_destroy(c->slab_cache);
1875 kmem_cache_destroy(c->slab_buffer);
1876 dm_io_client_destroy(c->dm_io);
1877 bad_dm_io:
1878 mutex_destroy(&c->lock);
1879 if (c->no_sleep)
1880 static_branch_dec(&no_sleep_enabled);
1881 kfree(c);
1882 bad_client:
1883 return ERR_PTR(r);
1884 }
1885 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1886
1887 /*
1888 * Free the buffering interface.
1889 * It is required that there are no references on any buffers.
1890 */
dm_bufio_client_destroy(struct dm_bufio_client * c)1891 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1892 {
1893 unsigned int i;
1894
1895 drop_buffers(c);
1896
1897 unregister_shrinker(&c->shrinker);
1898 flush_work(&c->shrink_work);
1899
1900 mutex_lock(&dm_bufio_clients_lock);
1901
1902 list_del(&c->client_list);
1903 dm_bufio_client_count--;
1904 __cache_size_refresh();
1905
1906 mutex_unlock(&dm_bufio_clients_lock);
1907
1908 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1909 BUG_ON(c->need_reserved_buffers);
1910
1911 while (!list_empty(&c->reserved_buffers)) {
1912 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1913 struct dm_buffer, lru_list);
1914 list_del(&b->lru_list);
1915 free_buffer(b);
1916 }
1917
1918 for (i = 0; i < LIST_SIZE; i++)
1919 if (c->n_buffers[i])
1920 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1921
1922 for (i = 0; i < LIST_SIZE; i++)
1923 BUG_ON(c->n_buffers[i]);
1924
1925 kmem_cache_destroy(c->slab_cache);
1926 kmem_cache_destroy(c->slab_buffer);
1927 dm_io_client_destroy(c->dm_io);
1928 mutex_destroy(&c->lock);
1929 if (c->no_sleep)
1930 static_branch_dec(&no_sleep_enabled);
1931 kfree(c);
1932 }
1933 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1934
dm_bufio_set_sector_offset(struct dm_bufio_client * c,sector_t start)1935 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1936 {
1937 c->start = start;
1938 }
1939 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1940
get_max_age_hz(void)1941 static unsigned int get_max_age_hz(void)
1942 {
1943 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
1944
1945 if (max_age > UINT_MAX / HZ)
1946 max_age = UINT_MAX / HZ;
1947
1948 return max_age * HZ;
1949 }
1950
older_than(struct dm_buffer * b,unsigned long age_hz)1951 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1952 {
1953 return time_after_eq(jiffies, b->last_accessed + age_hz);
1954 }
1955
__evict_old_buffers(struct dm_bufio_client * c,unsigned long age_hz)1956 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1957 {
1958 struct dm_buffer *b, *tmp;
1959 unsigned long retain_target = get_retain_buffers(c);
1960 unsigned long count;
1961 LIST_HEAD(write_list);
1962
1963 dm_bufio_lock(c);
1964
1965 __check_watermark(c, &write_list);
1966 if (unlikely(!list_empty(&write_list))) {
1967 dm_bufio_unlock(c);
1968 __flush_write_list(&write_list);
1969 dm_bufio_lock(c);
1970 }
1971
1972 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1973 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1974 if (count <= retain_target)
1975 break;
1976
1977 if (!older_than(b, age_hz))
1978 break;
1979
1980 if (__try_evict_buffer(b, 0))
1981 count--;
1982
1983 cond_resched();
1984 }
1985
1986 dm_bufio_unlock(c);
1987 }
1988
do_global_cleanup(struct work_struct * w)1989 static void do_global_cleanup(struct work_struct *w)
1990 {
1991 struct dm_bufio_client *locked_client = NULL;
1992 struct dm_bufio_client *current_client;
1993 struct dm_buffer *b;
1994 unsigned int spinlock_hold_count;
1995 unsigned long threshold = dm_bufio_cache_size -
1996 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1997 unsigned long loops = global_num * 2;
1998
1999 mutex_lock(&dm_bufio_clients_lock);
2000
2001 while (1) {
2002 cond_resched();
2003
2004 spin_lock(&global_spinlock);
2005 if (unlikely(dm_bufio_current_allocated <= threshold))
2006 break;
2007
2008 spinlock_hold_count = 0;
2009 get_next:
2010 if (!loops--)
2011 break;
2012 if (unlikely(list_empty(&global_queue)))
2013 break;
2014 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
2015
2016 if (b->accessed) {
2017 b->accessed = 0;
2018 list_move(&b->global_list, &global_queue);
2019 if (likely(++spinlock_hold_count < 16))
2020 goto get_next;
2021 spin_unlock(&global_spinlock);
2022 continue;
2023 }
2024
2025 current_client = b->c;
2026 if (unlikely(current_client != locked_client)) {
2027 if (locked_client)
2028 dm_bufio_unlock(locked_client);
2029
2030 if (!dm_bufio_trylock(current_client)) {
2031 spin_unlock(&global_spinlock);
2032 dm_bufio_lock(current_client);
2033 locked_client = current_client;
2034 continue;
2035 }
2036
2037 locked_client = current_client;
2038 }
2039
2040 spin_unlock(&global_spinlock);
2041
2042 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2043 spin_lock(&global_spinlock);
2044 list_move(&b->global_list, &global_queue);
2045 spin_unlock(&global_spinlock);
2046 }
2047 }
2048
2049 spin_unlock(&global_spinlock);
2050
2051 if (locked_client)
2052 dm_bufio_unlock(locked_client);
2053
2054 mutex_unlock(&dm_bufio_clients_lock);
2055 }
2056
cleanup_old_buffers(void)2057 static void cleanup_old_buffers(void)
2058 {
2059 unsigned long max_age_hz = get_max_age_hz();
2060 struct dm_bufio_client *c;
2061
2062 mutex_lock(&dm_bufio_clients_lock);
2063
2064 __cache_size_refresh();
2065
2066 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2067 __evict_old_buffers(c, max_age_hz);
2068
2069 mutex_unlock(&dm_bufio_clients_lock);
2070 }
2071
work_fn(struct work_struct * w)2072 static void work_fn(struct work_struct *w)
2073 {
2074 cleanup_old_buffers();
2075
2076 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2077 DM_BUFIO_WORK_TIMER_SECS * HZ);
2078 }
2079
2080 /*
2081 *--------------------------------------------------------------
2082 * Module setup
2083 *--------------------------------------------------------------
2084 */
2085
2086 /*
2087 * This is called only once for the whole dm_bufio module.
2088 * It initializes memory limit.
2089 */
dm_bufio_init(void)2090 static int __init dm_bufio_init(void)
2091 {
2092 __u64 mem;
2093
2094 dm_bufio_allocated_kmem_cache = 0;
2095 dm_bufio_allocated_get_free_pages = 0;
2096 dm_bufio_allocated_vmalloc = 0;
2097 dm_bufio_current_allocated = 0;
2098
2099 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2100 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2101
2102 if (mem > ULONG_MAX)
2103 mem = ULONG_MAX;
2104
2105 #ifdef CONFIG_MMU
2106 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2107 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2108 #endif
2109
2110 dm_bufio_default_cache_size = mem;
2111
2112 mutex_lock(&dm_bufio_clients_lock);
2113 __cache_size_refresh();
2114 mutex_unlock(&dm_bufio_clients_lock);
2115
2116 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2117 if (!dm_bufio_wq)
2118 return -ENOMEM;
2119
2120 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2121 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2122 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2123 DM_BUFIO_WORK_TIMER_SECS * HZ);
2124
2125 return 0;
2126 }
2127
2128 /*
2129 * This is called once when unloading the dm_bufio module.
2130 */
dm_bufio_exit(void)2131 static void __exit dm_bufio_exit(void)
2132 {
2133 int bug = 0;
2134
2135 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2136 destroy_workqueue(dm_bufio_wq);
2137
2138 if (dm_bufio_client_count) {
2139 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2140 __func__, dm_bufio_client_count);
2141 bug = 1;
2142 }
2143
2144 if (dm_bufio_current_allocated) {
2145 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2146 __func__, dm_bufio_current_allocated);
2147 bug = 1;
2148 }
2149
2150 if (dm_bufio_allocated_get_free_pages) {
2151 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2152 __func__, dm_bufio_allocated_get_free_pages);
2153 bug = 1;
2154 }
2155
2156 if (dm_bufio_allocated_vmalloc) {
2157 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2158 __func__, dm_bufio_allocated_vmalloc);
2159 bug = 1;
2160 }
2161
2162 BUG_ON(bug);
2163 }
2164
2165 module_init(dm_bufio_init)
2166 module_exit(dm_bufio_exit)
2167
2168 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2169 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2170
2171 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2172 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2173
2174 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2175 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2176
2177 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2178 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2179
2180 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2181 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2182
2183 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2184 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2185
2186 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2187 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2188
2189 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2190 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2191
2192 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2193 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2194 MODULE_LICENSE("GPL");
2195