1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
10 #include "messages.h"
11 #include "misc.h"
12 #include "ctree.h"
13 #include "transaction.h"
14 #include "btrfs_inode.h"
15 #include "extent_io.h"
16 #include "disk-io.h"
17 #include "compression.h"
18 #include "delalloc-space.h"
19 #include "qgroup.h"
20 #include "subpage.h"
21 #include "file.h"
22 #include "super.h"
23
24 static struct kmem_cache *btrfs_ordered_extent_cache;
25
entry_end(struct btrfs_ordered_extent * entry)26 static u64 entry_end(struct btrfs_ordered_extent *entry)
27 {
28 if (entry->file_offset + entry->num_bytes < entry->file_offset)
29 return (u64)-1;
30 return entry->file_offset + entry->num_bytes;
31 }
32
33 /* returns NULL if the insertion worked, or it returns the node it did find
34 * in the tree
35 */
tree_insert(struct rb_root * root,u64 file_offset,struct rb_node * node)36 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
37 struct rb_node *node)
38 {
39 struct rb_node **p = &root->rb_node;
40 struct rb_node *parent = NULL;
41 struct btrfs_ordered_extent *entry;
42
43 while (*p) {
44 parent = *p;
45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
46
47 if (file_offset < entry->file_offset)
48 p = &(*p)->rb_left;
49 else if (file_offset >= entry_end(entry))
50 p = &(*p)->rb_right;
51 else
52 return parent;
53 }
54
55 rb_link_node(node, parent, p);
56 rb_insert_color(node, root);
57 return NULL;
58 }
59
60 /*
61 * look for a given offset in the tree, and if it can't be found return the
62 * first lesser offset
63 */
__tree_search(struct rb_root * root,u64 file_offset,struct rb_node ** prev_ret)64 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65 struct rb_node **prev_ret)
66 {
67 struct rb_node *n = root->rb_node;
68 struct rb_node *prev = NULL;
69 struct rb_node *test;
70 struct btrfs_ordered_extent *entry;
71 struct btrfs_ordered_extent *prev_entry = NULL;
72
73 while (n) {
74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
75 prev = n;
76 prev_entry = entry;
77
78 if (file_offset < entry->file_offset)
79 n = n->rb_left;
80 else if (file_offset >= entry_end(entry))
81 n = n->rb_right;
82 else
83 return n;
84 }
85 if (!prev_ret)
86 return NULL;
87
88 while (prev && file_offset >= entry_end(prev_entry)) {
89 test = rb_next(prev);
90 if (!test)
91 break;
92 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
93 rb_node);
94 if (file_offset < entry_end(prev_entry))
95 break;
96
97 prev = test;
98 }
99 if (prev)
100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
101 rb_node);
102 while (prev && file_offset < entry_end(prev_entry)) {
103 test = rb_prev(prev);
104 if (!test)
105 break;
106 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
107 rb_node);
108 prev = test;
109 }
110 *prev_ret = prev;
111 return NULL;
112 }
113
range_overlaps(struct btrfs_ordered_extent * entry,u64 file_offset,u64 len)114 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
115 u64 len)
116 {
117 if (file_offset + len <= entry->file_offset ||
118 entry->file_offset + entry->num_bytes <= file_offset)
119 return 0;
120 return 1;
121 }
122
123 /*
124 * look find the first ordered struct that has this offset, otherwise
125 * the first one less than this offset
126 */
tree_search(struct btrfs_ordered_inode_tree * tree,u64 file_offset)127 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
128 u64 file_offset)
129 {
130 struct rb_root *root = &tree->tree;
131 struct rb_node *prev = NULL;
132 struct rb_node *ret;
133 struct btrfs_ordered_extent *entry;
134
135 if (tree->last) {
136 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
137 rb_node);
138 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
139 return tree->last;
140 }
141 ret = __tree_search(root, file_offset, &prev);
142 if (!ret)
143 ret = prev;
144 if (ret)
145 tree->last = ret;
146 return ret;
147 }
148
149 /*
150 * Add an ordered extent to the per-inode tree.
151 *
152 * @inode: Inode that this extent is for.
153 * @file_offset: Logical offset in file where the extent starts.
154 * @num_bytes: Logical length of extent in file.
155 * @ram_bytes: Full length of unencoded data.
156 * @disk_bytenr: Offset of extent on disk.
157 * @disk_num_bytes: Size of extent on disk.
158 * @offset: Offset into unencoded data where file data starts.
159 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
160 * @compress_type: Compression algorithm used for data.
161 *
162 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
163 * tree is given a single reference on the ordered extent that was inserted.
164 *
165 * Return: 0 or -ENOMEM.
166 */
btrfs_add_ordered_extent(struct btrfs_inode * inode,u64 file_offset,u64 num_bytes,u64 ram_bytes,u64 disk_bytenr,u64 disk_num_bytes,u64 offset,unsigned flags,int compress_type)167 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
168 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
169 u64 disk_num_bytes, u64 offset, unsigned flags,
170 int compress_type)
171 {
172 struct btrfs_root *root = inode->root;
173 struct btrfs_fs_info *fs_info = root->fs_info;
174 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
175 struct rb_node *node;
176 struct btrfs_ordered_extent *entry;
177 int ret;
178
179 if (flags &
180 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
181 /* For nocow write, we can release the qgroup rsv right now */
182 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
183 if (ret < 0)
184 return ret;
185 ret = 0;
186 } else {
187 /*
188 * The ordered extent has reserved qgroup space, release now
189 * and pass the reserved number for qgroup_record to free.
190 */
191 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
192 if (ret < 0)
193 return ret;
194 }
195 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
196 if (!entry)
197 return -ENOMEM;
198
199 entry->file_offset = file_offset;
200 entry->num_bytes = num_bytes;
201 entry->ram_bytes = ram_bytes;
202 entry->disk_bytenr = disk_bytenr;
203 entry->disk_num_bytes = disk_num_bytes;
204 entry->offset = offset;
205 entry->bytes_left = num_bytes;
206 entry->inode = igrab(&inode->vfs_inode);
207 entry->compress_type = compress_type;
208 entry->truncated_len = (u64)-1;
209 entry->qgroup_rsv = ret;
210 entry->physical = (u64)-1;
211
212 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
213 entry->flags = flags;
214
215 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
216 fs_info->delalloc_batch);
217
218 /* one ref for the tree */
219 refcount_set(&entry->refs, 1);
220 init_waitqueue_head(&entry->wait);
221 INIT_LIST_HEAD(&entry->list);
222 INIT_LIST_HEAD(&entry->log_list);
223 INIT_LIST_HEAD(&entry->root_extent_list);
224 INIT_LIST_HEAD(&entry->work_list);
225 init_completion(&entry->completion);
226
227 trace_btrfs_ordered_extent_add(inode, entry);
228
229 spin_lock_irq(&tree->lock);
230 node = tree_insert(&tree->tree, file_offset,
231 &entry->rb_node);
232 if (node)
233 btrfs_panic(fs_info, -EEXIST,
234 "inconsistency in ordered tree at offset %llu",
235 file_offset);
236 spin_unlock_irq(&tree->lock);
237
238 spin_lock(&root->ordered_extent_lock);
239 list_add_tail(&entry->root_extent_list,
240 &root->ordered_extents);
241 root->nr_ordered_extents++;
242 if (root->nr_ordered_extents == 1) {
243 spin_lock(&fs_info->ordered_root_lock);
244 BUG_ON(!list_empty(&root->ordered_root));
245 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
246 spin_unlock(&fs_info->ordered_root_lock);
247 }
248 spin_unlock(&root->ordered_extent_lock);
249
250 /*
251 * We don't need the count_max_extents here, we can assume that all of
252 * that work has been done at higher layers, so this is truly the
253 * smallest the extent is going to get.
254 */
255 spin_lock(&inode->lock);
256 btrfs_mod_outstanding_extents(inode, 1);
257 spin_unlock(&inode->lock);
258
259 return 0;
260 }
261
262 /*
263 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
264 * when an ordered extent is finished. If the list covers more than one
265 * ordered extent, it is split across multiples.
266 */
btrfs_add_ordered_sum(struct btrfs_ordered_extent * entry,struct btrfs_ordered_sum * sum)267 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
268 struct btrfs_ordered_sum *sum)
269 {
270 struct btrfs_ordered_inode_tree *tree;
271
272 tree = &BTRFS_I(entry->inode)->ordered_tree;
273 spin_lock_irq(&tree->lock);
274 list_add_tail(&sum->list, &entry->list);
275 spin_unlock_irq(&tree->lock);
276 }
277
finish_ordered_fn(struct btrfs_work * work)278 static void finish_ordered_fn(struct btrfs_work *work)
279 {
280 struct btrfs_ordered_extent *ordered_extent;
281
282 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
283 btrfs_finish_ordered_io(ordered_extent);
284 }
285
286 /*
287 * Mark all ordered extents io inside the specified range finished.
288 *
289 * @page: The involved page for the operation.
290 * For uncompressed buffered IO, the page status also needs to be
291 * updated to indicate whether the pending ordered io is finished.
292 * Can be NULL for direct IO and compressed write.
293 * For these cases, callers are ensured they won't execute the
294 * endio function twice.
295 *
296 * This function is called for endio, thus the range must have ordered
297 * extent(s) covering it.
298 */
btrfs_mark_ordered_io_finished(struct btrfs_inode * inode,struct page * page,u64 file_offset,u64 num_bytes,bool uptodate)299 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
300 struct page *page, u64 file_offset,
301 u64 num_bytes, bool uptodate)
302 {
303 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
304 struct btrfs_fs_info *fs_info = inode->root->fs_info;
305 struct btrfs_workqueue *wq;
306 struct rb_node *node;
307 struct btrfs_ordered_extent *entry = NULL;
308 unsigned long flags;
309 u64 cur = file_offset;
310
311 if (btrfs_is_free_space_inode(inode))
312 wq = fs_info->endio_freespace_worker;
313 else
314 wq = fs_info->endio_write_workers;
315
316 if (page)
317 ASSERT(page->mapping && page_offset(page) <= file_offset &&
318 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
319
320 spin_lock_irqsave(&tree->lock, flags);
321 while (cur < file_offset + num_bytes) {
322 u64 entry_end;
323 u64 end;
324 u32 len;
325
326 node = tree_search(tree, cur);
327 /* No ordered extents at all */
328 if (!node)
329 break;
330
331 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
332 entry_end = entry->file_offset + entry->num_bytes;
333 /*
334 * |<-- OE --->| |
335 * cur
336 * Go to next OE.
337 */
338 if (cur >= entry_end) {
339 node = rb_next(node);
340 /* No more ordered extents, exit */
341 if (!node)
342 break;
343 entry = rb_entry(node, struct btrfs_ordered_extent,
344 rb_node);
345
346 /* Go to next ordered extent and continue */
347 cur = entry->file_offset;
348 continue;
349 }
350 /*
351 * | |<--- OE --->|
352 * cur
353 * Go to the start of OE.
354 */
355 if (cur < entry->file_offset) {
356 cur = entry->file_offset;
357 continue;
358 }
359
360 /*
361 * Now we are definitely inside one ordered extent.
362 *
363 * |<--- OE --->|
364 * |
365 * cur
366 */
367 end = min(entry->file_offset + entry->num_bytes,
368 file_offset + num_bytes) - 1;
369 ASSERT(end + 1 - cur < U32_MAX);
370 len = end + 1 - cur;
371
372 if (page) {
373 /*
374 * Ordered (Private2) bit indicates whether we still
375 * have pending io unfinished for the ordered extent.
376 *
377 * If there's no such bit, we need to skip to next range.
378 */
379 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
380 cur += len;
381 continue;
382 }
383 btrfs_page_clear_ordered(fs_info, page, cur, len);
384 }
385
386 /* Now we're fine to update the accounting */
387 if (unlikely(len > entry->bytes_left)) {
388 WARN_ON(1);
389 btrfs_crit(fs_info,
390 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
391 inode->root->root_key.objectid,
392 btrfs_ino(inode),
393 entry->file_offset,
394 entry->num_bytes,
395 len, entry->bytes_left);
396 entry->bytes_left = 0;
397 } else {
398 entry->bytes_left -= len;
399 }
400
401 if (!uptodate)
402 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
403
404 /*
405 * All the IO of the ordered extent is finished, we need to queue
406 * the finish_func to be executed.
407 */
408 if (entry->bytes_left == 0) {
409 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
410 cond_wake_up(&entry->wait);
411 refcount_inc(&entry->refs);
412 trace_btrfs_ordered_extent_mark_finished(inode, entry);
413 spin_unlock_irqrestore(&tree->lock, flags);
414 btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
415 btrfs_queue_work(wq, &entry->work);
416 spin_lock_irqsave(&tree->lock, flags);
417 }
418 cur += len;
419 }
420 spin_unlock_irqrestore(&tree->lock, flags);
421 }
422
423 /*
424 * Finish IO for one ordered extent across a given range. The range can only
425 * contain one ordered extent.
426 *
427 * @cached: The cached ordered extent. If not NULL, we can skip the tree
428 * search and use the ordered extent directly.
429 * Will be also used to store the finished ordered extent.
430 * @file_offset: File offset for the finished IO
431 * @io_size: Length of the finish IO range
432 *
433 * Return true if the ordered extent is finished in the range, and update
434 * @cached.
435 * Return false otherwise.
436 *
437 * NOTE: The range can NOT cross multiple ordered extents.
438 * Thus caller should ensure the range doesn't cross ordered extents.
439 */
btrfs_dec_test_ordered_pending(struct btrfs_inode * inode,struct btrfs_ordered_extent ** cached,u64 file_offset,u64 io_size)440 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
441 struct btrfs_ordered_extent **cached,
442 u64 file_offset, u64 io_size)
443 {
444 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
445 struct rb_node *node;
446 struct btrfs_ordered_extent *entry = NULL;
447 unsigned long flags;
448 bool finished = false;
449
450 spin_lock_irqsave(&tree->lock, flags);
451 if (cached && *cached) {
452 entry = *cached;
453 goto have_entry;
454 }
455
456 node = tree_search(tree, file_offset);
457 if (!node)
458 goto out;
459
460 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
461 have_entry:
462 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
463 goto out;
464
465 if (io_size > entry->bytes_left)
466 btrfs_crit(inode->root->fs_info,
467 "bad ordered accounting left %llu size %llu",
468 entry->bytes_left, io_size);
469
470 entry->bytes_left -= io_size;
471
472 if (entry->bytes_left == 0) {
473 /*
474 * Ensure only one caller can set the flag and finished_ret
475 * accordingly
476 */
477 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
478 /* test_and_set_bit implies a barrier */
479 cond_wake_up_nomb(&entry->wait);
480 }
481 out:
482 if (finished && cached && entry) {
483 *cached = entry;
484 refcount_inc(&entry->refs);
485 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
486 }
487 spin_unlock_irqrestore(&tree->lock, flags);
488 return finished;
489 }
490
491 /*
492 * used to drop a reference on an ordered extent. This will free
493 * the extent if the last reference is dropped
494 */
btrfs_put_ordered_extent(struct btrfs_ordered_extent * entry)495 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
496 {
497 struct list_head *cur;
498 struct btrfs_ordered_sum *sum;
499
500 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
501
502 if (refcount_dec_and_test(&entry->refs)) {
503 ASSERT(list_empty(&entry->root_extent_list));
504 ASSERT(list_empty(&entry->log_list));
505 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
506 if (entry->inode)
507 btrfs_add_delayed_iput(BTRFS_I(entry->inode));
508 while (!list_empty(&entry->list)) {
509 cur = entry->list.next;
510 sum = list_entry(cur, struct btrfs_ordered_sum, list);
511 list_del(&sum->list);
512 kvfree(sum);
513 }
514 kmem_cache_free(btrfs_ordered_extent_cache, entry);
515 }
516 }
517
518 /*
519 * remove an ordered extent from the tree. No references are dropped
520 * and waiters are woken up.
521 */
btrfs_remove_ordered_extent(struct btrfs_inode * btrfs_inode,struct btrfs_ordered_extent * entry)522 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
523 struct btrfs_ordered_extent *entry)
524 {
525 struct btrfs_ordered_inode_tree *tree;
526 struct btrfs_root *root = btrfs_inode->root;
527 struct btrfs_fs_info *fs_info = root->fs_info;
528 struct rb_node *node;
529 bool pending;
530 bool freespace_inode;
531
532 /*
533 * If this is a free space inode the thread has not acquired the ordered
534 * extents lockdep map.
535 */
536 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
537
538 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
539 /* This is paired with btrfs_add_ordered_extent. */
540 spin_lock(&btrfs_inode->lock);
541 btrfs_mod_outstanding_extents(btrfs_inode, -1);
542 spin_unlock(&btrfs_inode->lock);
543 if (root != fs_info->tree_root) {
544 u64 release;
545
546 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
547 release = entry->disk_num_bytes;
548 else
549 release = entry->num_bytes;
550 btrfs_delalloc_release_metadata(btrfs_inode, release, false);
551 }
552
553 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
554 fs_info->delalloc_batch);
555
556 tree = &btrfs_inode->ordered_tree;
557 spin_lock_irq(&tree->lock);
558 node = &entry->rb_node;
559 rb_erase(node, &tree->tree);
560 RB_CLEAR_NODE(node);
561 if (tree->last == node)
562 tree->last = NULL;
563 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
564 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
565 spin_unlock_irq(&tree->lock);
566
567 /*
568 * The current running transaction is waiting on us, we need to let it
569 * know that we're complete and wake it up.
570 */
571 if (pending) {
572 struct btrfs_transaction *trans;
573
574 /*
575 * The checks for trans are just a formality, it should be set,
576 * but if it isn't we don't want to deref/assert under the spin
577 * lock, so be nice and check if trans is set, but ASSERT() so
578 * if it isn't set a developer will notice.
579 */
580 spin_lock(&fs_info->trans_lock);
581 trans = fs_info->running_transaction;
582 if (trans)
583 refcount_inc(&trans->use_count);
584 spin_unlock(&fs_info->trans_lock);
585
586 ASSERT(trans);
587 if (trans) {
588 if (atomic_dec_and_test(&trans->pending_ordered))
589 wake_up(&trans->pending_wait);
590 btrfs_put_transaction(trans);
591 }
592 }
593
594 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
595
596 spin_lock(&root->ordered_extent_lock);
597 list_del_init(&entry->root_extent_list);
598 root->nr_ordered_extents--;
599
600 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
601
602 if (!root->nr_ordered_extents) {
603 spin_lock(&fs_info->ordered_root_lock);
604 BUG_ON(list_empty(&root->ordered_root));
605 list_del_init(&root->ordered_root);
606 spin_unlock(&fs_info->ordered_root_lock);
607 }
608 spin_unlock(&root->ordered_extent_lock);
609 wake_up(&entry->wait);
610 if (!freespace_inode)
611 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
612 }
613
btrfs_run_ordered_extent_work(struct btrfs_work * work)614 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
615 {
616 struct btrfs_ordered_extent *ordered;
617
618 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
619 btrfs_start_ordered_extent(ordered);
620 complete(&ordered->completion);
621 }
622
623 /*
624 * wait for all the ordered extents in a root. This is done when balancing
625 * space between drives.
626 */
btrfs_wait_ordered_extents(struct btrfs_root * root,u64 nr,const u64 range_start,const u64 range_len)627 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
628 const u64 range_start, const u64 range_len)
629 {
630 struct btrfs_fs_info *fs_info = root->fs_info;
631 LIST_HEAD(splice);
632 LIST_HEAD(skipped);
633 LIST_HEAD(works);
634 struct btrfs_ordered_extent *ordered, *next;
635 u64 count = 0;
636 const u64 range_end = range_start + range_len;
637
638 mutex_lock(&root->ordered_extent_mutex);
639 spin_lock(&root->ordered_extent_lock);
640 list_splice_init(&root->ordered_extents, &splice);
641 while (!list_empty(&splice) && nr) {
642 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
643 root_extent_list);
644
645 if (range_end <= ordered->disk_bytenr ||
646 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
647 list_move_tail(&ordered->root_extent_list, &skipped);
648 cond_resched_lock(&root->ordered_extent_lock);
649 continue;
650 }
651
652 list_move_tail(&ordered->root_extent_list,
653 &root->ordered_extents);
654 refcount_inc(&ordered->refs);
655 spin_unlock(&root->ordered_extent_lock);
656
657 btrfs_init_work(&ordered->flush_work,
658 btrfs_run_ordered_extent_work, NULL, NULL);
659 list_add_tail(&ordered->work_list, &works);
660 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
661
662 cond_resched();
663 spin_lock(&root->ordered_extent_lock);
664 if (nr != U64_MAX)
665 nr--;
666 count++;
667 }
668 list_splice_tail(&skipped, &root->ordered_extents);
669 list_splice_tail(&splice, &root->ordered_extents);
670 spin_unlock(&root->ordered_extent_lock);
671
672 list_for_each_entry_safe(ordered, next, &works, work_list) {
673 list_del_init(&ordered->work_list);
674 wait_for_completion(&ordered->completion);
675 btrfs_put_ordered_extent(ordered);
676 cond_resched();
677 }
678 mutex_unlock(&root->ordered_extent_mutex);
679
680 return count;
681 }
682
btrfs_wait_ordered_roots(struct btrfs_fs_info * fs_info,u64 nr,const u64 range_start,const u64 range_len)683 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
684 const u64 range_start, const u64 range_len)
685 {
686 struct btrfs_root *root;
687 struct list_head splice;
688 u64 done;
689
690 INIT_LIST_HEAD(&splice);
691
692 mutex_lock(&fs_info->ordered_operations_mutex);
693 spin_lock(&fs_info->ordered_root_lock);
694 list_splice_init(&fs_info->ordered_roots, &splice);
695 while (!list_empty(&splice) && nr) {
696 root = list_first_entry(&splice, struct btrfs_root,
697 ordered_root);
698 root = btrfs_grab_root(root);
699 BUG_ON(!root);
700 list_move_tail(&root->ordered_root,
701 &fs_info->ordered_roots);
702 spin_unlock(&fs_info->ordered_root_lock);
703
704 done = btrfs_wait_ordered_extents(root, nr,
705 range_start, range_len);
706 btrfs_put_root(root);
707
708 spin_lock(&fs_info->ordered_root_lock);
709 if (nr != U64_MAX) {
710 nr -= done;
711 }
712 }
713 list_splice_tail(&splice, &fs_info->ordered_roots);
714 spin_unlock(&fs_info->ordered_root_lock);
715 mutex_unlock(&fs_info->ordered_operations_mutex);
716 }
717
718 /*
719 * Start IO and wait for a given ordered extent to finish.
720 *
721 * Wait on page writeback for all the pages in the extent and the IO completion
722 * code to insert metadata into the btree corresponding to the extent.
723 */
btrfs_start_ordered_extent(struct btrfs_ordered_extent * entry)724 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
725 {
726 u64 start = entry->file_offset;
727 u64 end = start + entry->num_bytes - 1;
728 struct btrfs_inode *inode = BTRFS_I(entry->inode);
729 bool freespace_inode;
730
731 trace_btrfs_ordered_extent_start(inode, entry);
732
733 /*
734 * If this is a free space inode do not take the ordered extents lockdep
735 * map.
736 */
737 freespace_inode = btrfs_is_free_space_inode(inode);
738
739 /*
740 * pages in the range can be dirty, clean or writeback. We
741 * start IO on any dirty ones so the wait doesn't stall waiting
742 * for the flusher thread to find them
743 */
744 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
745 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
746
747 if (!freespace_inode)
748 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
749 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
750 }
751
752 /*
753 * Used to wait on ordered extents across a large range of bytes.
754 */
btrfs_wait_ordered_range(struct inode * inode,u64 start,u64 len)755 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
756 {
757 int ret = 0;
758 int ret_wb = 0;
759 u64 end;
760 u64 orig_end;
761 struct btrfs_ordered_extent *ordered;
762
763 if (start + len < start) {
764 orig_end = OFFSET_MAX;
765 } else {
766 orig_end = start + len - 1;
767 if (orig_end > OFFSET_MAX)
768 orig_end = OFFSET_MAX;
769 }
770
771 /* start IO across the range first to instantiate any delalloc
772 * extents
773 */
774 ret = btrfs_fdatawrite_range(inode, start, orig_end);
775 if (ret)
776 return ret;
777
778 /*
779 * If we have a writeback error don't return immediately. Wait first
780 * for any ordered extents that haven't completed yet. This is to make
781 * sure no one can dirty the same page ranges and call writepages()
782 * before the ordered extents complete - to avoid failures (-EEXIST)
783 * when adding the new ordered extents to the ordered tree.
784 */
785 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
786
787 end = orig_end;
788 while (1) {
789 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
790 if (!ordered)
791 break;
792 if (ordered->file_offset > orig_end) {
793 btrfs_put_ordered_extent(ordered);
794 break;
795 }
796 if (ordered->file_offset + ordered->num_bytes <= start) {
797 btrfs_put_ordered_extent(ordered);
798 break;
799 }
800 btrfs_start_ordered_extent(ordered);
801 end = ordered->file_offset;
802 /*
803 * If the ordered extent had an error save the error but don't
804 * exit without waiting first for all other ordered extents in
805 * the range to complete.
806 */
807 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
808 ret = -EIO;
809 btrfs_put_ordered_extent(ordered);
810 if (end == 0 || end == start)
811 break;
812 end--;
813 }
814 return ret_wb ? ret_wb : ret;
815 }
816
817 /*
818 * find an ordered extent corresponding to file_offset. return NULL if
819 * nothing is found, otherwise take a reference on the extent and return it
820 */
btrfs_lookup_ordered_extent(struct btrfs_inode * inode,u64 file_offset)821 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
822 u64 file_offset)
823 {
824 struct btrfs_ordered_inode_tree *tree;
825 struct rb_node *node;
826 struct btrfs_ordered_extent *entry = NULL;
827 unsigned long flags;
828
829 tree = &inode->ordered_tree;
830 spin_lock_irqsave(&tree->lock, flags);
831 node = tree_search(tree, file_offset);
832 if (!node)
833 goto out;
834
835 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
836 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
837 entry = NULL;
838 if (entry) {
839 refcount_inc(&entry->refs);
840 trace_btrfs_ordered_extent_lookup(inode, entry);
841 }
842 out:
843 spin_unlock_irqrestore(&tree->lock, flags);
844 return entry;
845 }
846
847 /* Since the DIO code tries to lock a wide area we need to look for any ordered
848 * extents that exist in the range, rather than just the start of the range.
849 */
btrfs_lookup_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)850 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
851 struct btrfs_inode *inode, u64 file_offset, u64 len)
852 {
853 struct btrfs_ordered_inode_tree *tree;
854 struct rb_node *node;
855 struct btrfs_ordered_extent *entry = NULL;
856
857 tree = &inode->ordered_tree;
858 spin_lock_irq(&tree->lock);
859 node = tree_search(tree, file_offset);
860 if (!node) {
861 node = tree_search(tree, file_offset + len);
862 if (!node)
863 goto out;
864 }
865
866 while (1) {
867 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
868 if (range_overlaps(entry, file_offset, len))
869 break;
870
871 if (entry->file_offset >= file_offset + len) {
872 entry = NULL;
873 break;
874 }
875 entry = NULL;
876 node = rb_next(node);
877 if (!node)
878 break;
879 }
880 out:
881 if (entry) {
882 refcount_inc(&entry->refs);
883 trace_btrfs_ordered_extent_lookup_range(inode, entry);
884 }
885 spin_unlock_irq(&tree->lock);
886 return entry;
887 }
888
889 /*
890 * Adds all ordered extents to the given list. The list ends up sorted by the
891 * file_offset of the ordered extents.
892 */
btrfs_get_ordered_extents_for_logging(struct btrfs_inode * inode,struct list_head * list)893 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
894 struct list_head *list)
895 {
896 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
897 struct rb_node *n;
898
899 ASSERT(inode_is_locked(&inode->vfs_inode));
900
901 spin_lock_irq(&tree->lock);
902 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
903 struct btrfs_ordered_extent *ordered;
904
905 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
906
907 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
908 continue;
909
910 ASSERT(list_empty(&ordered->log_list));
911 list_add_tail(&ordered->log_list, list);
912 refcount_inc(&ordered->refs);
913 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
914 }
915 spin_unlock_irq(&tree->lock);
916 }
917
918 /*
919 * lookup and return any extent before 'file_offset'. NULL is returned
920 * if none is found
921 */
922 struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode * inode,u64 file_offset)923 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
924 {
925 struct btrfs_ordered_inode_tree *tree;
926 struct rb_node *node;
927 struct btrfs_ordered_extent *entry = NULL;
928
929 tree = &inode->ordered_tree;
930 spin_lock_irq(&tree->lock);
931 node = tree_search(tree, file_offset);
932 if (!node)
933 goto out;
934
935 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
936 refcount_inc(&entry->refs);
937 trace_btrfs_ordered_extent_lookup_first(inode, entry);
938 out:
939 spin_unlock_irq(&tree->lock);
940 return entry;
941 }
942
943 /*
944 * Lookup the first ordered extent that overlaps the range
945 * [@file_offset, @file_offset + @len).
946 *
947 * The difference between this and btrfs_lookup_first_ordered_extent() is
948 * that this one won't return any ordered extent that does not overlap the range.
949 * And the difference against btrfs_lookup_ordered_extent() is, this function
950 * ensures the first ordered extent gets returned.
951 */
btrfs_lookup_first_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)952 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
953 struct btrfs_inode *inode, u64 file_offset, u64 len)
954 {
955 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
956 struct rb_node *node;
957 struct rb_node *cur;
958 struct rb_node *prev;
959 struct rb_node *next;
960 struct btrfs_ordered_extent *entry = NULL;
961
962 spin_lock_irq(&tree->lock);
963 node = tree->tree.rb_node;
964 /*
965 * Here we don't want to use tree_search() which will use tree->last
966 * and screw up the search order.
967 * And __tree_search() can't return the adjacent ordered extents
968 * either, thus here we do our own search.
969 */
970 while (node) {
971 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
972
973 if (file_offset < entry->file_offset) {
974 node = node->rb_left;
975 } else if (file_offset >= entry_end(entry)) {
976 node = node->rb_right;
977 } else {
978 /*
979 * Direct hit, got an ordered extent that starts at
980 * @file_offset
981 */
982 goto out;
983 }
984 }
985 if (!entry) {
986 /* Empty tree */
987 goto out;
988 }
989
990 cur = &entry->rb_node;
991 /* We got an entry around @file_offset, check adjacent entries */
992 if (entry->file_offset < file_offset) {
993 prev = cur;
994 next = rb_next(cur);
995 } else {
996 prev = rb_prev(cur);
997 next = cur;
998 }
999 if (prev) {
1000 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1001 if (range_overlaps(entry, file_offset, len))
1002 goto out;
1003 }
1004 if (next) {
1005 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1006 if (range_overlaps(entry, file_offset, len))
1007 goto out;
1008 }
1009 /* No ordered extent in the range */
1010 entry = NULL;
1011 out:
1012 if (entry) {
1013 refcount_inc(&entry->refs);
1014 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1015 }
1016
1017 spin_unlock_irq(&tree->lock);
1018 return entry;
1019 }
1020
1021 /*
1022 * Lock the passed range and ensures all pending ordered extents in it are run
1023 * to completion.
1024 *
1025 * @inode: Inode whose ordered tree is to be searched
1026 * @start: Beginning of range to flush
1027 * @end: Last byte of range to lock
1028 * @cached_state: If passed, will return the extent state responsible for the
1029 * locked range. It's the caller's responsibility to free the
1030 * cached state.
1031 *
1032 * Always return with the given range locked, ensuring after it's called no
1033 * order extent can be pending.
1034 */
btrfs_lock_and_flush_ordered_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state)1035 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1036 u64 end,
1037 struct extent_state **cached_state)
1038 {
1039 struct btrfs_ordered_extent *ordered;
1040 struct extent_state *cache = NULL;
1041 struct extent_state **cachedp = &cache;
1042
1043 if (cached_state)
1044 cachedp = cached_state;
1045
1046 while (1) {
1047 lock_extent(&inode->io_tree, start, end, cachedp);
1048 ordered = btrfs_lookup_ordered_range(inode, start,
1049 end - start + 1);
1050 if (!ordered) {
1051 /*
1052 * If no external cached_state has been passed then
1053 * decrement the extra ref taken for cachedp since we
1054 * aren't exposing it outside of this function
1055 */
1056 if (!cached_state)
1057 refcount_dec(&cache->refs);
1058 break;
1059 }
1060 unlock_extent(&inode->io_tree, start, end, cachedp);
1061 btrfs_start_ordered_extent(ordered);
1062 btrfs_put_ordered_extent(ordered);
1063 }
1064 }
1065
1066 /*
1067 * Lock the passed range and ensure all pending ordered extents in it are run
1068 * to completion in nowait mode.
1069 *
1070 * Return true if btrfs_lock_ordered_range does not return any extents,
1071 * otherwise false.
1072 */
btrfs_try_lock_ordered_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state)1073 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1074 struct extent_state **cached_state)
1075 {
1076 struct btrfs_ordered_extent *ordered;
1077
1078 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1079 return false;
1080
1081 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1082 if (!ordered)
1083 return true;
1084
1085 btrfs_put_ordered_extent(ordered);
1086 unlock_extent(&inode->io_tree, start, end, cached_state);
1087
1088 return false;
1089 }
1090
1091
clone_ordered_extent(struct btrfs_ordered_extent * ordered,u64 pos,u64 len)1092 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1093 u64 len)
1094 {
1095 struct inode *inode = ordered->inode;
1096 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1097 u64 file_offset = ordered->file_offset + pos;
1098 u64 disk_bytenr = ordered->disk_bytenr + pos;
1099 unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1100
1101 /*
1102 * The splitting extent is already counted and will be added again in
1103 * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
1104 */
1105 percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
1106 fs_info->delalloc_batch);
1107 WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
1108 return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1109 disk_bytenr, len, 0, flags,
1110 ordered->compress_type);
1111 }
1112
btrfs_split_ordered_extent(struct btrfs_ordered_extent * ordered,u64 pre,u64 post)1113 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1114 u64 post)
1115 {
1116 struct inode *inode = ordered->inode;
1117 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1118 struct rb_node *node;
1119 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1120 int ret = 0;
1121
1122 trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
1123
1124 spin_lock_irq(&tree->lock);
1125 /* Remove from tree once */
1126 node = &ordered->rb_node;
1127 rb_erase(node, &tree->tree);
1128 RB_CLEAR_NODE(node);
1129 if (tree->last == node)
1130 tree->last = NULL;
1131
1132 ordered->file_offset += pre;
1133 ordered->disk_bytenr += pre;
1134 ordered->num_bytes -= (pre + post);
1135 ordered->disk_num_bytes -= (pre + post);
1136 ordered->bytes_left -= (pre + post);
1137
1138 /* Re-insert the node */
1139 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1140 if (node)
1141 btrfs_panic(fs_info, -EEXIST,
1142 "zoned: inconsistency in ordered tree at offset %llu",
1143 ordered->file_offset);
1144
1145 spin_unlock_irq(&tree->lock);
1146
1147 if (pre)
1148 ret = clone_ordered_extent(ordered, 0, pre);
1149 if (ret == 0 && post)
1150 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1151 post);
1152
1153 return ret;
1154 }
1155
ordered_data_init(void)1156 int __init ordered_data_init(void)
1157 {
1158 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1159 sizeof(struct btrfs_ordered_extent), 0,
1160 SLAB_MEM_SPREAD,
1161 NULL);
1162 if (!btrfs_ordered_extent_cache)
1163 return -ENOMEM;
1164
1165 return 0;
1166 }
1167
ordered_data_exit(void)1168 void __cold ordered_data_exit(void)
1169 {
1170 kmem_cache_destroy(btrfs_ordered_extent_cache);
1171 }
1172