1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "messages.h"
17 #include "misc.h"
18 #include "ctree.h"
19 #include "disk-io.h"
20 #include "volumes.h"
21 #include "raid56.h"
22 #include "async-thread.h"
23 #include "file-item.h"
24 #include "btrfs_inode.h"
25
26 /* set when additional merges to this rbio are not allowed */
27 #define RBIO_RMW_LOCKED_BIT 1
28
29 /*
30 * set when this rbio is sitting in the hash, but it is just a cache
31 * of past RMW
32 */
33 #define RBIO_CACHE_BIT 2
34
35 /*
36 * set when it is safe to trust the stripe_pages for caching
37 */
38 #define RBIO_CACHE_READY_BIT 3
39
40 #define RBIO_CACHE_SIZE 1024
41
42 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
43
44 /* Used by the raid56 code to lock stripes for read/modify/write */
45 struct btrfs_stripe_hash {
46 struct list_head hash_list;
47 spinlock_t lock;
48 };
49
50 /* Used by the raid56 code to lock stripes for read/modify/write */
51 struct btrfs_stripe_hash_table {
52 struct list_head stripe_cache;
53 spinlock_t cache_lock;
54 int cache_size;
55 struct btrfs_stripe_hash table[];
56 };
57
58 /*
59 * A bvec like structure to present a sector inside a page.
60 *
61 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
62 */
63 struct sector_ptr {
64 struct page *page;
65 unsigned int pgoff:24;
66 unsigned int uptodate:8;
67 };
68
69 static void rmw_rbio_work(struct work_struct *work);
70 static void rmw_rbio_work_locked(struct work_struct *work);
71 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
72 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
73
74 static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
75 static void scrub_rbio_work_locked(struct work_struct *work);
76
free_raid_bio_pointers(struct btrfs_raid_bio * rbio)77 static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
78 {
79 bitmap_free(rbio->error_bitmap);
80 kfree(rbio->stripe_pages);
81 kfree(rbio->bio_sectors);
82 kfree(rbio->stripe_sectors);
83 kfree(rbio->finish_pointers);
84 }
85
free_raid_bio(struct btrfs_raid_bio * rbio)86 static void free_raid_bio(struct btrfs_raid_bio *rbio)
87 {
88 int i;
89
90 if (!refcount_dec_and_test(&rbio->refs))
91 return;
92
93 WARN_ON(!list_empty(&rbio->stripe_cache));
94 WARN_ON(!list_empty(&rbio->hash_list));
95 WARN_ON(!bio_list_empty(&rbio->bio_list));
96
97 for (i = 0; i < rbio->nr_pages; i++) {
98 if (rbio->stripe_pages[i]) {
99 __free_page(rbio->stripe_pages[i]);
100 rbio->stripe_pages[i] = NULL;
101 }
102 }
103
104 btrfs_put_bioc(rbio->bioc);
105 free_raid_bio_pointers(rbio);
106 kfree(rbio);
107 }
108
start_async_work(struct btrfs_raid_bio * rbio,work_func_t work_func)109 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
110 {
111 INIT_WORK(&rbio->work, work_func);
112 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
113 }
114
115 /*
116 * the stripe hash table is used for locking, and to collect
117 * bios in hopes of making a full stripe
118 */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)119 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
120 {
121 struct btrfs_stripe_hash_table *table;
122 struct btrfs_stripe_hash_table *x;
123 struct btrfs_stripe_hash *cur;
124 struct btrfs_stripe_hash *h;
125 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
126 int i;
127
128 if (info->stripe_hash_table)
129 return 0;
130
131 /*
132 * The table is large, starting with order 4 and can go as high as
133 * order 7 in case lock debugging is turned on.
134 *
135 * Try harder to allocate and fallback to vmalloc to lower the chance
136 * of a failing mount.
137 */
138 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
139 if (!table)
140 return -ENOMEM;
141
142 spin_lock_init(&table->cache_lock);
143 INIT_LIST_HEAD(&table->stripe_cache);
144
145 h = table->table;
146
147 for (i = 0; i < num_entries; i++) {
148 cur = h + i;
149 INIT_LIST_HEAD(&cur->hash_list);
150 spin_lock_init(&cur->lock);
151 }
152
153 x = cmpxchg(&info->stripe_hash_table, NULL, table);
154 kvfree(x);
155 return 0;
156 }
157
158 /*
159 * caching an rbio means to copy anything from the
160 * bio_sectors array into the stripe_pages array. We
161 * use the page uptodate bit in the stripe cache array
162 * to indicate if it has valid data
163 *
164 * once the caching is done, we set the cache ready
165 * bit.
166 */
cache_rbio_pages(struct btrfs_raid_bio * rbio)167 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
168 {
169 int i;
170 int ret;
171
172 ret = alloc_rbio_pages(rbio);
173 if (ret)
174 return;
175
176 for (i = 0; i < rbio->nr_sectors; i++) {
177 /* Some range not covered by bio (partial write), skip it */
178 if (!rbio->bio_sectors[i].page) {
179 /*
180 * Even if the sector is not covered by bio, if it is
181 * a data sector it should still be uptodate as it is
182 * read from disk.
183 */
184 if (i < rbio->nr_data * rbio->stripe_nsectors)
185 ASSERT(rbio->stripe_sectors[i].uptodate);
186 continue;
187 }
188
189 ASSERT(rbio->stripe_sectors[i].page);
190 memcpy_page(rbio->stripe_sectors[i].page,
191 rbio->stripe_sectors[i].pgoff,
192 rbio->bio_sectors[i].page,
193 rbio->bio_sectors[i].pgoff,
194 rbio->bioc->fs_info->sectorsize);
195 rbio->stripe_sectors[i].uptodate = 1;
196 }
197 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
198 }
199
200 /*
201 * we hash on the first logical address of the stripe
202 */
rbio_bucket(struct btrfs_raid_bio * rbio)203 static int rbio_bucket(struct btrfs_raid_bio *rbio)
204 {
205 u64 num = rbio->bioc->raid_map[0];
206
207 /*
208 * we shift down quite a bit. We're using byte
209 * addressing, and most of the lower bits are zeros.
210 * This tends to upset hash_64, and it consistently
211 * returns just one or two different values.
212 *
213 * shifting off the lower bits fixes things.
214 */
215 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
216 }
217
full_page_sectors_uptodate(struct btrfs_raid_bio * rbio,unsigned int page_nr)218 static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
219 unsigned int page_nr)
220 {
221 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
222 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223 int i;
224
225 ASSERT(page_nr < rbio->nr_pages);
226
227 for (i = sectors_per_page * page_nr;
228 i < sectors_per_page * page_nr + sectors_per_page;
229 i++) {
230 if (!rbio->stripe_sectors[i].uptodate)
231 return false;
232 }
233 return true;
234 }
235
236 /*
237 * Update the stripe_sectors[] array to use correct page and pgoff
238 *
239 * Should be called every time any page pointer in stripes_pages[] got modified.
240 */
index_stripe_sectors(struct btrfs_raid_bio * rbio)241 static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
242 {
243 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
244 u32 offset;
245 int i;
246
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
248 int page_index = offset >> PAGE_SHIFT;
249
250 ASSERT(page_index < rbio->nr_pages);
251 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
253 }
254 }
255
steal_rbio_page(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest,int page_nr)256 static void steal_rbio_page(struct btrfs_raid_bio *src,
257 struct btrfs_raid_bio *dest, int page_nr)
258 {
259 const u32 sectorsize = src->bioc->fs_info->sectorsize;
260 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
261 int i;
262
263 if (dest->stripe_pages[page_nr])
264 __free_page(dest->stripe_pages[page_nr]);
265 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
266 src->stripe_pages[page_nr] = NULL;
267
268 /* Also update the sector->uptodate bits. */
269 for (i = sectors_per_page * page_nr;
270 i < sectors_per_page * page_nr + sectors_per_page; i++)
271 dest->stripe_sectors[i].uptodate = true;
272 }
273
is_data_stripe_page(struct btrfs_raid_bio * rbio,int page_nr)274 static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
275 {
276 const int sector_nr = (page_nr << PAGE_SHIFT) >>
277 rbio->bioc->fs_info->sectorsize_bits;
278
279 /*
280 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
281 * we won't have a page which is half data half parity.
282 *
283 * Thus if the first sector of the page belongs to data stripes, then
284 * the full page belongs to data stripes.
285 */
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
287 }
288
289 /*
290 * Stealing an rbio means taking all the uptodate pages from the stripe array
291 * in the source rbio and putting them into the destination rbio.
292 *
293 * This will also update the involved stripe_sectors[] which are referring to
294 * the old pages.
295 */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)296 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
297 {
298 int i;
299
300 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
301 return;
302
303 for (i = 0; i < dest->nr_pages; i++) {
304 struct page *p = src->stripe_pages[i];
305
306 /*
307 * We don't need to steal P/Q pages as they will always be
308 * regenerated for RMW or full write anyway.
309 */
310 if (!is_data_stripe_page(src, i))
311 continue;
312
313 /*
314 * If @src already has RBIO_CACHE_READY_BIT, it should have
315 * all data stripe pages present and uptodate.
316 */
317 ASSERT(p);
318 ASSERT(full_page_sectors_uptodate(src, i));
319 steal_rbio_page(src, dest, i);
320 }
321 index_stripe_sectors(dest);
322 index_stripe_sectors(src);
323 }
324
325 /*
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
329 *
330 * must be called with dest->rbio_list_lock held
331 */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)332 static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334 {
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 /* Also inherit the bitmaps from @victim. */
338 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
339 dest->stripe_nsectors);
340 bio_list_init(&victim->bio_list);
341 }
342
343 /*
344 * used to prune items that are in the cache. The caller
345 * must hold the hash table lock.
346 */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)347 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
348 {
349 int bucket = rbio_bucket(rbio);
350 struct btrfs_stripe_hash_table *table;
351 struct btrfs_stripe_hash *h;
352 int freeit = 0;
353
354 /*
355 * check the bit again under the hash table lock.
356 */
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
358 return;
359
360 table = rbio->bioc->fs_info->stripe_hash_table;
361 h = table->table + bucket;
362
363 /* hold the lock for the bucket because we may be
364 * removing it from the hash table
365 */
366 spin_lock(&h->lock);
367
368 /*
369 * hold the lock for the bio list because we need
370 * to make sure the bio list is empty
371 */
372 spin_lock(&rbio->bio_list_lock);
373
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
375 list_del_init(&rbio->stripe_cache);
376 table->cache_size -= 1;
377 freeit = 1;
378
379 /* if the bio list isn't empty, this rbio is
380 * still involved in an IO. We take it out
381 * of the cache list, and drop the ref that
382 * was held for the list.
383 *
384 * If the bio_list was empty, we also remove
385 * the rbio from the hash_table, and drop
386 * the corresponding ref
387 */
388 if (bio_list_empty(&rbio->bio_list)) {
389 if (!list_empty(&rbio->hash_list)) {
390 list_del_init(&rbio->hash_list);
391 refcount_dec(&rbio->refs);
392 BUG_ON(!list_empty(&rbio->plug_list));
393 }
394 }
395 }
396
397 spin_unlock(&rbio->bio_list_lock);
398 spin_unlock(&h->lock);
399
400 if (freeit)
401 free_raid_bio(rbio);
402 }
403
404 /*
405 * prune a given rbio from the cache
406 */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)407 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
408 {
409 struct btrfs_stripe_hash_table *table;
410 unsigned long flags;
411
412 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
413 return;
414
415 table = rbio->bioc->fs_info->stripe_hash_table;
416
417 spin_lock_irqsave(&table->cache_lock, flags);
418 __remove_rbio_from_cache(rbio);
419 spin_unlock_irqrestore(&table->cache_lock, flags);
420 }
421
422 /*
423 * remove everything in the cache
424 */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)425 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
426 {
427 struct btrfs_stripe_hash_table *table;
428 unsigned long flags;
429 struct btrfs_raid_bio *rbio;
430
431 table = info->stripe_hash_table;
432
433 spin_lock_irqsave(&table->cache_lock, flags);
434 while (!list_empty(&table->stripe_cache)) {
435 rbio = list_entry(table->stripe_cache.next,
436 struct btrfs_raid_bio,
437 stripe_cache);
438 __remove_rbio_from_cache(rbio);
439 }
440 spin_unlock_irqrestore(&table->cache_lock, flags);
441 }
442
443 /*
444 * remove all cached entries and free the hash table
445 * used by unmount
446 */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)447 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
448 {
449 if (!info->stripe_hash_table)
450 return;
451 btrfs_clear_rbio_cache(info);
452 kvfree(info->stripe_hash_table);
453 info->stripe_hash_table = NULL;
454 }
455
456 /*
457 * insert an rbio into the stripe cache. It
458 * must have already been prepared by calling
459 * cache_rbio_pages
460 *
461 * If this rbio was already cached, it gets
462 * moved to the front of the lru.
463 *
464 * If the size of the rbio cache is too big, we
465 * prune an item.
466 */
cache_rbio(struct btrfs_raid_bio * rbio)467 static void cache_rbio(struct btrfs_raid_bio *rbio)
468 {
469 struct btrfs_stripe_hash_table *table;
470 unsigned long flags;
471
472 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
473 return;
474
475 table = rbio->bioc->fs_info->stripe_hash_table;
476
477 spin_lock_irqsave(&table->cache_lock, flags);
478 spin_lock(&rbio->bio_list_lock);
479
480 /* bump our ref if we were not in the list before */
481 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
482 refcount_inc(&rbio->refs);
483
484 if (!list_empty(&rbio->stripe_cache)){
485 list_move(&rbio->stripe_cache, &table->stripe_cache);
486 } else {
487 list_add(&rbio->stripe_cache, &table->stripe_cache);
488 table->cache_size += 1;
489 }
490
491 spin_unlock(&rbio->bio_list_lock);
492
493 if (table->cache_size > RBIO_CACHE_SIZE) {
494 struct btrfs_raid_bio *found;
495
496 found = list_entry(table->stripe_cache.prev,
497 struct btrfs_raid_bio,
498 stripe_cache);
499
500 if (found != rbio)
501 __remove_rbio_from_cache(found);
502 }
503
504 spin_unlock_irqrestore(&table->cache_lock, flags);
505 }
506
507 /*
508 * helper function to run the xor_blocks api. It is only
509 * able to do MAX_XOR_BLOCKS at a time, so we need to
510 * loop through.
511 */
run_xor(void ** pages,int src_cnt,ssize_t len)512 static void run_xor(void **pages, int src_cnt, ssize_t len)
513 {
514 int src_off = 0;
515 int xor_src_cnt = 0;
516 void *dest = pages[src_cnt];
517
518 while(src_cnt > 0) {
519 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
520 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
521
522 src_cnt -= xor_src_cnt;
523 src_off += xor_src_cnt;
524 }
525 }
526
527 /*
528 * Returns true if the bio list inside this rbio covers an entire stripe (no
529 * rmw required).
530 */
rbio_is_full(struct btrfs_raid_bio * rbio)531 static int rbio_is_full(struct btrfs_raid_bio *rbio)
532 {
533 unsigned long flags;
534 unsigned long size = rbio->bio_list_bytes;
535 int ret = 1;
536
537 spin_lock_irqsave(&rbio->bio_list_lock, flags);
538 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
539 ret = 0;
540 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
541 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
542
543 return ret;
544 }
545
546 /*
547 * returns 1 if it is safe to merge two rbios together.
548 * The merging is safe if the two rbios correspond to
549 * the same stripe and if they are both going in the same
550 * direction (read vs write), and if neither one is
551 * locked for final IO
552 *
553 * The caller is responsible for locking such that
554 * rmw_locked is safe to test
555 */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)556 static int rbio_can_merge(struct btrfs_raid_bio *last,
557 struct btrfs_raid_bio *cur)
558 {
559 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
560 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
561 return 0;
562
563 /*
564 * we can't merge with cached rbios, since the
565 * idea is that when we merge the destination
566 * rbio is going to run our IO for us. We can
567 * steal from cached rbios though, other functions
568 * handle that.
569 */
570 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
571 test_bit(RBIO_CACHE_BIT, &cur->flags))
572 return 0;
573
574 if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
575 return 0;
576
577 /* we can't merge with different operations */
578 if (last->operation != cur->operation)
579 return 0;
580 /*
581 * We've need read the full stripe from the drive.
582 * check and repair the parity and write the new results.
583 *
584 * We're not allowed to add any new bios to the
585 * bio list here, anyone else that wants to
586 * change this stripe needs to do their own rmw.
587 */
588 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
589 return 0;
590
591 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
592 last->operation == BTRFS_RBIO_READ_REBUILD)
593 return 0;
594
595 return 1;
596 }
597
rbio_stripe_sector_index(const struct btrfs_raid_bio * rbio,unsigned int stripe_nr,unsigned int sector_nr)598 static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
599 unsigned int stripe_nr,
600 unsigned int sector_nr)
601 {
602 ASSERT(stripe_nr < rbio->real_stripes);
603 ASSERT(sector_nr < rbio->stripe_nsectors);
604
605 return stripe_nr * rbio->stripe_nsectors + sector_nr;
606 }
607
608 /* Return a sector from rbio->stripe_sectors, not from the bio list */
rbio_stripe_sector(const struct btrfs_raid_bio * rbio,unsigned int stripe_nr,unsigned int sector_nr)609 static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
610 unsigned int stripe_nr,
611 unsigned int sector_nr)
612 {
613 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
614 sector_nr)];
615 }
616
617 /* Grab a sector inside P stripe */
rbio_pstripe_sector(const struct btrfs_raid_bio * rbio,unsigned int sector_nr)618 static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
619 unsigned int sector_nr)
620 {
621 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
622 }
623
624 /* Grab a sector inside Q stripe, return NULL if not RAID6 */
rbio_qstripe_sector(const struct btrfs_raid_bio * rbio,unsigned int sector_nr)625 static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
626 unsigned int sector_nr)
627 {
628 if (rbio->nr_data + 1 == rbio->real_stripes)
629 return NULL;
630 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
631 }
632
633 /*
634 * The first stripe in the table for a logical address
635 * has the lock. rbios are added in one of three ways:
636 *
637 * 1) Nobody has the stripe locked yet. The rbio is given
638 * the lock and 0 is returned. The caller must start the IO
639 * themselves.
640 *
641 * 2) Someone has the stripe locked, but we're able to merge
642 * with the lock owner. The rbio is freed and the IO will
643 * start automatically along with the existing rbio. 1 is returned.
644 *
645 * 3) Someone has the stripe locked, but we're not able to merge.
646 * The rbio is added to the lock owner's plug list, or merged into
647 * an rbio already on the plug list. When the lock owner unlocks,
648 * the next rbio on the list is run and the IO is started automatically.
649 * 1 is returned
650 *
651 * If we return 0, the caller still owns the rbio and must continue with
652 * IO submission. If we return 1, the caller must assume the rbio has
653 * already been freed.
654 */
lock_stripe_add(struct btrfs_raid_bio * rbio)655 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
656 {
657 struct btrfs_stripe_hash *h;
658 struct btrfs_raid_bio *cur;
659 struct btrfs_raid_bio *pending;
660 unsigned long flags;
661 struct btrfs_raid_bio *freeit = NULL;
662 struct btrfs_raid_bio *cache_drop = NULL;
663 int ret = 0;
664
665 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
666
667 spin_lock_irqsave(&h->lock, flags);
668 list_for_each_entry(cur, &h->hash_list, hash_list) {
669 if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
670 continue;
671
672 spin_lock(&cur->bio_list_lock);
673
674 /* Can we steal this cached rbio's pages? */
675 if (bio_list_empty(&cur->bio_list) &&
676 list_empty(&cur->plug_list) &&
677 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
678 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
679 list_del_init(&cur->hash_list);
680 refcount_dec(&cur->refs);
681
682 steal_rbio(cur, rbio);
683 cache_drop = cur;
684 spin_unlock(&cur->bio_list_lock);
685
686 goto lockit;
687 }
688
689 /* Can we merge into the lock owner? */
690 if (rbio_can_merge(cur, rbio)) {
691 merge_rbio(cur, rbio);
692 spin_unlock(&cur->bio_list_lock);
693 freeit = rbio;
694 ret = 1;
695 goto out;
696 }
697
698
699 /*
700 * We couldn't merge with the running rbio, see if we can merge
701 * with the pending ones. We don't have to check for rmw_locked
702 * because there is no way they are inside finish_rmw right now
703 */
704 list_for_each_entry(pending, &cur->plug_list, plug_list) {
705 if (rbio_can_merge(pending, rbio)) {
706 merge_rbio(pending, rbio);
707 spin_unlock(&cur->bio_list_lock);
708 freeit = rbio;
709 ret = 1;
710 goto out;
711 }
712 }
713
714 /*
715 * No merging, put us on the tail of the plug list, our rbio
716 * will be started with the currently running rbio unlocks
717 */
718 list_add_tail(&rbio->plug_list, &cur->plug_list);
719 spin_unlock(&cur->bio_list_lock);
720 ret = 1;
721 goto out;
722 }
723 lockit:
724 refcount_inc(&rbio->refs);
725 list_add(&rbio->hash_list, &h->hash_list);
726 out:
727 spin_unlock_irqrestore(&h->lock, flags);
728 if (cache_drop)
729 remove_rbio_from_cache(cache_drop);
730 if (freeit)
731 free_raid_bio(freeit);
732 return ret;
733 }
734
735 static void recover_rbio_work_locked(struct work_struct *work);
736
737 /*
738 * called as rmw or parity rebuild is completed. If the plug list has more
739 * rbios waiting for this stripe, the next one on the list will be started
740 */
unlock_stripe(struct btrfs_raid_bio * rbio)741 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
742 {
743 int bucket;
744 struct btrfs_stripe_hash *h;
745 unsigned long flags;
746 int keep_cache = 0;
747
748 bucket = rbio_bucket(rbio);
749 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
750
751 if (list_empty(&rbio->plug_list))
752 cache_rbio(rbio);
753
754 spin_lock_irqsave(&h->lock, flags);
755 spin_lock(&rbio->bio_list_lock);
756
757 if (!list_empty(&rbio->hash_list)) {
758 /*
759 * if we're still cached and there is no other IO
760 * to perform, just leave this rbio here for others
761 * to steal from later
762 */
763 if (list_empty(&rbio->plug_list) &&
764 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
765 keep_cache = 1;
766 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
767 BUG_ON(!bio_list_empty(&rbio->bio_list));
768 goto done;
769 }
770
771 list_del_init(&rbio->hash_list);
772 refcount_dec(&rbio->refs);
773
774 /*
775 * we use the plug list to hold all the rbios
776 * waiting for the chance to lock this stripe.
777 * hand the lock over to one of them.
778 */
779 if (!list_empty(&rbio->plug_list)) {
780 struct btrfs_raid_bio *next;
781 struct list_head *head = rbio->plug_list.next;
782
783 next = list_entry(head, struct btrfs_raid_bio,
784 plug_list);
785
786 list_del_init(&rbio->plug_list);
787
788 list_add(&next->hash_list, &h->hash_list);
789 refcount_inc(&next->refs);
790 spin_unlock(&rbio->bio_list_lock);
791 spin_unlock_irqrestore(&h->lock, flags);
792
793 if (next->operation == BTRFS_RBIO_READ_REBUILD)
794 start_async_work(next, recover_rbio_work_locked);
795 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
796 steal_rbio(rbio, next);
797 start_async_work(next, recover_rbio_work_locked);
798 } else if (next->operation == BTRFS_RBIO_WRITE) {
799 steal_rbio(rbio, next);
800 start_async_work(next, rmw_rbio_work_locked);
801 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
802 steal_rbio(rbio, next);
803 start_async_work(next, scrub_rbio_work_locked);
804 }
805
806 goto done_nolock;
807 }
808 }
809 done:
810 spin_unlock(&rbio->bio_list_lock);
811 spin_unlock_irqrestore(&h->lock, flags);
812
813 done_nolock:
814 if (!keep_cache)
815 remove_rbio_from_cache(rbio);
816 }
817
rbio_endio_bio_list(struct bio * cur,blk_status_t err)818 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
819 {
820 struct bio *next;
821
822 while (cur) {
823 next = cur->bi_next;
824 cur->bi_next = NULL;
825 cur->bi_status = err;
826 bio_endio(cur);
827 cur = next;
828 }
829 }
830
831 /*
832 * this frees the rbio and runs through all the bios in the
833 * bio_list and calls end_io on them
834 */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)835 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
836 {
837 struct bio *cur = bio_list_get(&rbio->bio_list);
838 struct bio *extra;
839
840 kfree(rbio->csum_buf);
841 bitmap_free(rbio->csum_bitmap);
842 rbio->csum_buf = NULL;
843 rbio->csum_bitmap = NULL;
844
845 /*
846 * Clear the data bitmap, as the rbio may be cached for later usage.
847 * do this before before unlock_stripe() so there will be no new bio
848 * for this bio.
849 */
850 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
851
852 /*
853 * At this moment, rbio->bio_list is empty, however since rbio does not
854 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
855 * hash list, rbio may be merged with others so that rbio->bio_list
856 * becomes non-empty.
857 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
858 * more and we can call bio_endio() on all queued bios.
859 */
860 unlock_stripe(rbio);
861 extra = bio_list_get(&rbio->bio_list);
862 free_raid_bio(rbio);
863
864 rbio_endio_bio_list(cur, err);
865 if (extra)
866 rbio_endio_bio_list(extra, err);
867 }
868
869 /*
870 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
871 *
872 * @rbio: The raid bio
873 * @stripe_nr: Stripe number, valid range [0, real_stripe)
874 * @sector_nr: Sector number inside the stripe,
875 * valid range [0, stripe_nsectors)
876 * @bio_list_only: Whether to use sectors inside the bio list only.
877 *
878 * The read/modify/write code wants to reuse the original bio page as much
879 * as possible, and only use stripe_sectors as fallback.
880 */
sector_in_rbio(struct btrfs_raid_bio * rbio,int stripe_nr,int sector_nr,bool bio_list_only)881 static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
882 int stripe_nr, int sector_nr,
883 bool bio_list_only)
884 {
885 struct sector_ptr *sector;
886 int index;
887
888 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
889 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
890
891 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
892 ASSERT(index >= 0 && index < rbio->nr_sectors);
893
894 spin_lock_irq(&rbio->bio_list_lock);
895 sector = &rbio->bio_sectors[index];
896 if (sector->page || bio_list_only) {
897 /* Don't return sector without a valid page pointer */
898 if (!sector->page)
899 sector = NULL;
900 spin_unlock_irq(&rbio->bio_list_lock);
901 return sector;
902 }
903 spin_unlock_irq(&rbio->bio_list_lock);
904
905 return &rbio->stripe_sectors[index];
906 }
907
908 /*
909 * allocation and initial setup for the btrfs_raid_bio. Not
910 * this does not allocate any pages for rbio->pages.
911 */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_io_context * bioc)912 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
913 struct btrfs_io_context *bioc)
914 {
915 const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
916 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
917 const unsigned int num_pages = stripe_npages * real_stripes;
918 const unsigned int stripe_nsectors =
919 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
920 const unsigned int num_sectors = stripe_nsectors * real_stripes;
921 struct btrfs_raid_bio *rbio;
922
923 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
924 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
925 /*
926 * Our current stripe len should be fixed to 64k thus stripe_nsectors
927 * (at most 16) should be no larger than BITS_PER_LONG.
928 */
929 ASSERT(stripe_nsectors <= BITS_PER_LONG);
930
931 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
932 if (!rbio)
933 return ERR_PTR(-ENOMEM);
934 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
935 GFP_NOFS);
936 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
937 GFP_NOFS);
938 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
939 GFP_NOFS);
940 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
941 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
942
943 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
944 !rbio->finish_pointers || !rbio->error_bitmap) {
945 free_raid_bio_pointers(rbio);
946 kfree(rbio);
947 return ERR_PTR(-ENOMEM);
948 }
949
950 bio_list_init(&rbio->bio_list);
951 init_waitqueue_head(&rbio->io_wait);
952 INIT_LIST_HEAD(&rbio->plug_list);
953 spin_lock_init(&rbio->bio_list_lock);
954 INIT_LIST_HEAD(&rbio->stripe_cache);
955 INIT_LIST_HEAD(&rbio->hash_list);
956 btrfs_get_bioc(bioc);
957 rbio->bioc = bioc;
958 rbio->nr_pages = num_pages;
959 rbio->nr_sectors = num_sectors;
960 rbio->real_stripes = real_stripes;
961 rbio->stripe_npages = stripe_npages;
962 rbio->stripe_nsectors = stripe_nsectors;
963 refcount_set(&rbio->refs, 1);
964 atomic_set(&rbio->stripes_pending, 0);
965
966 ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
967 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
968
969 return rbio;
970 }
971
972 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)973 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
974 {
975 int ret;
976
977 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
978 if (ret < 0)
979 return ret;
980 /* Mapping all sectors */
981 index_stripe_sectors(rbio);
982 return 0;
983 }
984
985 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)986 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
987 {
988 const int data_pages = rbio->nr_data * rbio->stripe_npages;
989 int ret;
990
991 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
992 rbio->stripe_pages + data_pages);
993 if (ret < 0)
994 return ret;
995
996 index_stripe_sectors(rbio);
997 return 0;
998 }
999
1000 /*
1001 * Return the total number of errors found in the vertical stripe of @sector_nr.
1002 *
1003 * @faila and @failb will also be updated to the first and second stripe
1004 * number of the errors.
1005 */
get_rbio_veritical_errors(struct btrfs_raid_bio * rbio,int sector_nr,int * faila,int * failb)1006 static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1007 int *faila, int *failb)
1008 {
1009 int stripe_nr;
1010 int found_errors = 0;
1011
1012 if (faila || failb) {
1013 /*
1014 * Both @faila and @failb should be valid pointers if any of
1015 * them is specified.
1016 */
1017 ASSERT(faila && failb);
1018 *faila = -1;
1019 *failb = -1;
1020 }
1021
1022 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1023 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1024
1025 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1026 found_errors++;
1027 if (faila) {
1028 /* Update faila and failb. */
1029 if (*faila < 0)
1030 *faila = stripe_nr;
1031 else if (*failb < 0)
1032 *failb = stripe_nr;
1033 }
1034 }
1035 }
1036 return found_errors;
1037 }
1038
1039 /*
1040 * Add a single sector @sector into our list of bios for IO.
1041 *
1042 * Return 0 if everything went well.
1043 * Return <0 for error.
1044 */
rbio_add_io_sector(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct sector_ptr * sector,unsigned int stripe_nr,unsigned int sector_nr,enum req_op op)1045 static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1046 struct bio_list *bio_list,
1047 struct sector_ptr *sector,
1048 unsigned int stripe_nr,
1049 unsigned int sector_nr,
1050 enum req_op op)
1051 {
1052 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1053 struct bio *last = bio_list->tail;
1054 int ret;
1055 struct bio *bio;
1056 struct btrfs_io_stripe *stripe;
1057 u64 disk_start;
1058
1059 /*
1060 * Note: here stripe_nr has taken device replace into consideration,
1061 * thus it can be larger than rbio->real_stripe.
1062 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1063 */
1064 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1065 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1066 ASSERT(sector->page);
1067
1068 stripe = &rbio->bioc->stripes[stripe_nr];
1069 disk_start = stripe->physical + sector_nr * sectorsize;
1070
1071 /* if the device is missing, just fail this stripe */
1072 if (!stripe->dev->bdev) {
1073 int found_errors;
1074
1075 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1076 rbio->error_bitmap);
1077
1078 /* Check if we have reached tolerance early. */
1079 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1080 NULL, NULL);
1081 if (found_errors > rbio->bioc->max_errors)
1082 return -EIO;
1083 return 0;
1084 }
1085
1086 /* see if we can add this page onto our existing bio */
1087 if (last) {
1088 u64 last_end = last->bi_iter.bi_sector << 9;
1089 last_end += last->bi_iter.bi_size;
1090
1091 /*
1092 * we can't merge these if they are from different
1093 * devices or if they are not contiguous
1094 */
1095 if (last_end == disk_start && !last->bi_status &&
1096 last->bi_bdev == stripe->dev->bdev) {
1097 ret = bio_add_page(last, sector->page, sectorsize,
1098 sector->pgoff);
1099 if (ret == sectorsize)
1100 return 0;
1101 }
1102 }
1103
1104 /* put a new bio on the list */
1105 bio = bio_alloc(stripe->dev->bdev,
1106 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1107 op, GFP_NOFS);
1108 bio->bi_iter.bi_sector = disk_start >> 9;
1109 bio->bi_private = rbio;
1110
1111 bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1112 bio_list_add(bio_list, bio);
1113 return 0;
1114 }
1115
index_one_bio(struct btrfs_raid_bio * rbio,struct bio * bio)1116 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1117 {
1118 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1119 struct bio_vec bvec;
1120 struct bvec_iter iter;
1121 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1122 rbio->bioc->raid_map[0];
1123
1124 bio_for_each_segment(bvec, bio, iter) {
1125 u32 bvec_offset;
1126
1127 for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1128 bvec_offset += sectorsize, offset += sectorsize) {
1129 int index = offset / sectorsize;
1130 struct sector_ptr *sector = &rbio->bio_sectors[index];
1131
1132 sector->page = bvec.bv_page;
1133 sector->pgoff = bvec.bv_offset + bvec_offset;
1134 ASSERT(sector->pgoff < PAGE_SIZE);
1135 }
1136 }
1137 }
1138
1139 /*
1140 * helper function to walk our bio list and populate the bio_pages array with
1141 * the result. This seems expensive, but it is faster than constantly
1142 * searching through the bio list as we setup the IO in finish_rmw or stripe
1143 * reconstruction.
1144 *
1145 * This must be called before you trust the answers from page_in_rbio
1146 */
index_rbio_pages(struct btrfs_raid_bio * rbio)1147 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1148 {
1149 struct bio *bio;
1150
1151 spin_lock_irq(&rbio->bio_list_lock);
1152 bio_list_for_each(bio, &rbio->bio_list)
1153 index_one_bio(rbio, bio);
1154
1155 spin_unlock_irq(&rbio->bio_list_lock);
1156 }
1157
bio_get_trace_info(struct btrfs_raid_bio * rbio,struct bio * bio,struct raid56_bio_trace_info * trace_info)1158 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1159 struct raid56_bio_trace_info *trace_info)
1160 {
1161 const struct btrfs_io_context *bioc = rbio->bioc;
1162 int i;
1163
1164 ASSERT(bioc);
1165
1166 /* We rely on bio->bi_bdev to find the stripe number. */
1167 if (!bio->bi_bdev)
1168 goto not_found;
1169
1170 for (i = 0; i < bioc->num_stripes; i++) {
1171 if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1172 continue;
1173 trace_info->stripe_nr = i;
1174 trace_info->devid = bioc->stripes[i].dev->devid;
1175 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1176 bioc->stripes[i].physical;
1177 return;
1178 }
1179
1180 not_found:
1181 trace_info->devid = -1;
1182 trace_info->offset = -1;
1183 trace_info->stripe_nr = -1;
1184 }
1185
bio_list_put(struct bio_list * bio_list)1186 static inline void bio_list_put(struct bio_list *bio_list)
1187 {
1188 struct bio *bio;
1189
1190 while ((bio = bio_list_pop(bio_list)))
1191 bio_put(bio);
1192 }
1193
1194 /* Generate PQ for one vertical stripe. */
generate_pq_vertical(struct btrfs_raid_bio * rbio,int sectornr)1195 static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1196 {
1197 void **pointers = rbio->finish_pointers;
1198 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1199 struct sector_ptr *sector;
1200 int stripe;
1201 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1202
1203 /* First collect one sector from each data stripe */
1204 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1205 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1206 pointers[stripe] = kmap_local_page(sector->page) +
1207 sector->pgoff;
1208 }
1209
1210 /* Then add the parity stripe */
1211 sector = rbio_pstripe_sector(rbio, sectornr);
1212 sector->uptodate = 1;
1213 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1214
1215 if (has_qstripe) {
1216 /*
1217 * RAID6, add the qstripe and call the library function
1218 * to fill in our p/q
1219 */
1220 sector = rbio_qstripe_sector(rbio, sectornr);
1221 sector->uptodate = 1;
1222 pointers[stripe++] = kmap_local_page(sector->page) +
1223 sector->pgoff;
1224
1225 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1226 pointers);
1227 } else {
1228 /* raid5 */
1229 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1230 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1231 }
1232 for (stripe = stripe - 1; stripe >= 0; stripe--)
1233 kunmap_local(pointers[stripe]);
1234 }
1235
rmw_assemble_write_bios(struct btrfs_raid_bio * rbio,struct bio_list * bio_list)1236 static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1237 struct bio_list *bio_list)
1238 {
1239 /* The total sector number inside the full stripe. */
1240 int total_sector_nr;
1241 int sectornr;
1242 int stripe;
1243 int ret;
1244
1245 ASSERT(bio_list_size(bio_list) == 0);
1246
1247 /* We should have at least one data sector. */
1248 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1249
1250 /*
1251 * Reset errors, as we may have errors inherited from from degraded
1252 * write.
1253 */
1254 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1255
1256 /*
1257 * Start assembly. Make bios for everything from the higher layers (the
1258 * bio_list in our rbio) and our P/Q. Ignore everything else.
1259 */
1260 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1261 total_sector_nr++) {
1262 struct sector_ptr *sector;
1263
1264 stripe = total_sector_nr / rbio->stripe_nsectors;
1265 sectornr = total_sector_nr % rbio->stripe_nsectors;
1266
1267 /* This vertical stripe has no data, skip it. */
1268 if (!test_bit(sectornr, &rbio->dbitmap))
1269 continue;
1270
1271 if (stripe < rbio->nr_data) {
1272 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1273 if (!sector)
1274 continue;
1275 } else {
1276 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1277 }
1278
1279 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1280 sectornr, REQ_OP_WRITE);
1281 if (ret)
1282 goto error;
1283 }
1284
1285 if (likely(!rbio->bioc->num_tgtdevs))
1286 return 0;
1287
1288 /* Make a copy for the replace target device. */
1289 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1290 total_sector_nr++) {
1291 struct sector_ptr *sector;
1292
1293 stripe = total_sector_nr / rbio->stripe_nsectors;
1294 sectornr = total_sector_nr % rbio->stripe_nsectors;
1295
1296 if (!rbio->bioc->tgtdev_map[stripe]) {
1297 /*
1298 * We can skip the whole stripe completely, note
1299 * total_sector_nr will be increased by one anyway.
1300 */
1301 ASSERT(sectornr == 0);
1302 total_sector_nr += rbio->stripe_nsectors - 1;
1303 continue;
1304 }
1305
1306 /* This vertical stripe has no data, skip it. */
1307 if (!test_bit(sectornr, &rbio->dbitmap))
1308 continue;
1309
1310 if (stripe < rbio->nr_data) {
1311 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1312 if (!sector)
1313 continue;
1314 } else {
1315 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1316 }
1317
1318 ret = rbio_add_io_sector(rbio, bio_list, sector,
1319 rbio->bioc->tgtdev_map[stripe],
1320 sectornr, REQ_OP_WRITE);
1321 if (ret)
1322 goto error;
1323 }
1324
1325 return 0;
1326 error:
1327 bio_list_put(bio_list);
1328 return -EIO;
1329 }
1330
set_rbio_range_error(struct btrfs_raid_bio * rbio,struct bio * bio)1331 static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1332 {
1333 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1334 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1335 rbio->bioc->raid_map[0];
1336 int total_nr_sector = offset >> fs_info->sectorsize_bits;
1337
1338 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1339
1340 bitmap_set(rbio->error_bitmap, total_nr_sector,
1341 bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1342
1343 /*
1344 * Special handling for raid56_alloc_missing_rbio() used by
1345 * scrub/replace. Unlike call path in raid56_parity_recover(), they
1346 * pass an empty bio here. Thus we have to find out the missing device
1347 * and mark the stripe error instead.
1348 */
1349 if (bio->bi_iter.bi_size == 0) {
1350 bool found_missing = false;
1351 int stripe_nr;
1352
1353 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1354 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1355 found_missing = true;
1356 bitmap_set(rbio->error_bitmap,
1357 stripe_nr * rbio->stripe_nsectors,
1358 rbio->stripe_nsectors);
1359 }
1360 }
1361 ASSERT(found_missing);
1362 }
1363 }
1364
1365 /*
1366 * For subpage case, we can no longer set page Up-to-date directly for
1367 * stripe_pages[], thus we need to locate the sector.
1368 */
find_stripe_sector(struct btrfs_raid_bio * rbio,struct page * page,unsigned int pgoff)1369 static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1370 struct page *page,
1371 unsigned int pgoff)
1372 {
1373 int i;
1374
1375 for (i = 0; i < rbio->nr_sectors; i++) {
1376 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1377
1378 if (sector->page == page && sector->pgoff == pgoff)
1379 return sector;
1380 }
1381 return NULL;
1382 }
1383
1384 /*
1385 * this sets each page in the bio uptodate. It should only be used on private
1386 * rbio pages, nothing that comes in from the higher layers
1387 */
set_bio_pages_uptodate(struct btrfs_raid_bio * rbio,struct bio * bio)1388 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1389 {
1390 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1391 struct bio_vec *bvec;
1392 struct bvec_iter_all iter_all;
1393
1394 ASSERT(!bio_flagged(bio, BIO_CLONED));
1395
1396 bio_for_each_segment_all(bvec, bio, iter_all) {
1397 struct sector_ptr *sector;
1398 int pgoff;
1399
1400 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1401 pgoff += sectorsize) {
1402 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1403 ASSERT(sector);
1404 if (sector)
1405 sector->uptodate = 1;
1406 }
1407 }
1408 }
1409
get_bio_sector_nr(struct btrfs_raid_bio * rbio,struct bio * bio)1410 static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1411 {
1412 struct bio_vec *bv = bio_first_bvec_all(bio);
1413 int i;
1414
1415 for (i = 0; i < rbio->nr_sectors; i++) {
1416 struct sector_ptr *sector;
1417
1418 sector = &rbio->stripe_sectors[i];
1419 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1420 break;
1421 sector = &rbio->bio_sectors[i];
1422 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1423 break;
1424 }
1425 ASSERT(i < rbio->nr_sectors);
1426 return i;
1427 }
1428
rbio_update_error_bitmap(struct btrfs_raid_bio * rbio,struct bio * bio)1429 static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1430 {
1431 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1432 u32 bio_size = 0;
1433 struct bio_vec *bvec;
1434 int i;
1435
1436 bio_for_each_bvec_all(bvec, bio, i)
1437 bio_size += bvec->bv_len;
1438
1439 /*
1440 * Since we can have multiple bios touching the error_bitmap, we cannot
1441 * call bitmap_set() without protection.
1442 *
1443 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1444 */
1445 for (i = total_sector_nr; i < total_sector_nr +
1446 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1447 set_bit(i, rbio->error_bitmap);
1448 }
1449
1450 /* Verify the data sectors at read time. */
verify_bio_data_sectors(struct btrfs_raid_bio * rbio,struct bio * bio)1451 static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1452 struct bio *bio)
1453 {
1454 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1455 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1456 struct bio_vec *bvec;
1457 struct bvec_iter_all iter_all;
1458
1459 /* No data csum for the whole stripe, no need to verify. */
1460 if (!rbio->csum_bitmap || !rbio->csum_buf)
1461 return;
1462
1463 /* P/Q stripes, they have no data csum to verify against. */
1464 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1465 return;
1466
1467 bio_for_each_segment_all(bvec, bio, iter_all) {
1468 int bv_offset;
1469
1470 for (bv_offset = bvec->bv_offset;
1471 bv_offset < bvec->bv_offset + bvec->bv_len;
1472 bv_offset += fs_info->sectorsize, total_sector_nr++) {
1473 u8 csum_buf[BTRFS_CSUM_SIZE];
1474 u8 *expected_csum = rbio->csum_buf +
1475 total_sector_nr * fs_info->csum_size;
1476 int ret;
1477
1478 /* No csum for this sector, skip to the next sector. */
1479 if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1480 continue;
1481
1482 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1483 bv_offset, csum_buf, expected_csum);
1484 if (ret < 0)
1485 set_bit(total_sector_nr, rbio->error_bitmap);
1486 }
1487 }
1488 }
1489
raid_wait_read_end_io(struct bio * bio)1490 static void raid_wait_read_end_io(struct bio *bio)
1491 {
1492 struct btrfs_raid_bio *rbio = bio->bi_private;
1493
1494 if (bio->bi_status) {
1495 rbio_update_error_bitmap(rbio, bio);
1496 } else {
1497 set_bio_pages_uptodate(rbio, bio);
1498 verify_bio_data_sectors(rbio, bio);
1499 }
1500
1501 bio_put(bio);
1502 if (atomic_dec_and_test(&rbio->stripes_pending))
1503 wake_up(&rbio->io_wait);
1504 }
1505
submit_read_wait_bio_list(struct btrfs_raid_bio * rbio,struct bio_list * bio_list)1506 static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
1507 struct bio_list *bio_list)
1508 {
1509 struct bio *bio;
1510
1511 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1512 while ((bio = bio_list_pop(bio_list))) {
1513 bio->bi_end_io = raid_wait_read_end_io;
1514
1515 if (trace_raid56_scrub_read_recover_enabled()) {
1516 struct raid56_bio_trace_info trace_info = { 0 };
1517
1518 bio_get_trace_info(rbio, bio, &trace_info);
1519 trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1520 }
1521 submit_bio(bio);
1522 }
1523
1524 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
1525 }
1526
alloc_rbio_data_pages(struct btrfs_raid_bio * rbio)1527 static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1528 {
1529 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1530 int ret;
1531
1532 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
1533 if (ret < 0)
1534 return ret;
1535
1536 index_stripe_sectors(rbio);
1537 return 0;
1538 }
1539
1540 /*
1541 * We use plugging call backs to collect full stripes.
1542 * Any time we get a partial stripe write while plugged
1543 * we collect it into a list. When the unplug comes down,
1544 * we sort the list by logical block number and merge
1545 * everything we can into the same rbios
1546 */
1547 struct btrfs_plug_cb {
1548 struct blk_plug_cb cb;
1549 struct btrfs_fs_info *info;
1550 struct list_head rbio_list;
1551 struct work_struct work;
1552 };
1553
1554 /*
1555 * rbios on the plug list are sorted for easier merging.
1556 */
plug_cmp(void * priv,const struct list_head * a,const struct list_head * b)1557 static int plug_cmp(void *priv, const struct list_head *a,
1558 const struct list_head *b)
1559 {
1560 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1561 plug_list);
1562 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1563 plug_list);
1564 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1565 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1566
1567 if (a_sector < b_sector)
1568 return -1;
1569 if (a_sector > b_sector)
1570 return 1;
1571 return 0;
1572 }
1573
raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1574 static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1575 {
1576 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1577 struct btrfs_raid_bio *cur;
1578 struct btrfs_raid_bio *last = NULL;
1579
1580 list_sort(NULL, &plug->rbio_list, plug_cmp);
1581
1582 while (!list_empty(&plug->rbio_list)) {
1583 cur = list_entry(plug->rbio_list.next,
1584 struct btrfs_raid_bio, plug_list);
1585 list_del_init(&cur->plug_list);
1586
1587 if (rbio_is_full(cur)) {
1588 /* We have a full stripe, queue it down. */
1589 start_async_work(cur, rmw_rbio_work);
1590 continue;
1591 }
1592 if (last) {
1593 if (rbio_can_merge(last, cur)) {
1594 merge_rbio(last, cur);
1595 free_raid_bio(cur);
1596 continue;
1597 }
1598 start_async_work(last, rmw_rbio_work);
1599 }
1600 last = cur;
1601 }
1602 if (last)
1603 start_async_work(last, rmw_rbio_work);
1604 kfree(plug);
1605 }
1606
1607 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
rbio_add_bio(struct btrfs_raid_bio * rbio,struct bio * orig_bio)1608 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1609 {
1610 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1611 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1612 const u64 full_stripe_start = rbio->bioc->raid_map[0];
1613 const u32 orig_len = orig_bio->bi_iter.bi_size;
1614 const u32 sectorsize = fs_info->sectorsize;
1615 u64 cur_logical;
1616
1617 ASSERT(orig_logical >= full_stripe_start &&
1618 orig_logical + orig_len <= full_stripe_start +
1619 rbio->nr_data * BTRFS_STRIPE_LEN);
1620
1621 bio_list_add(&rbio->bio_list, orig_bio);
1622 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1623
1624 /* Update the dbitmap. */
1625 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1626 cur_logical += sectorsize) {
1627 int bit = ((u32)(cur_logical - full_stripe_start) >>
1628 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1629
1630 set_bit(bit, &rbio->dbitmap);
1631 }
1632 }
1633
1634 /*
1635 * our main entry point for writes from the rest of the FS.
1636 */
raid56_parity_write(struct bio * bio,struct btrfs_io_context * bioc)1637 void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1638 {
1639 struct btrfs_fs_info *fs_info = bioc->fs_info;
1640 struct btrfs_raid_bio *rbio;
1641 struct btrfs_plug_cb *plug = NULL;
1642 struct blk_plug_cb *cb;
1643
1644 rbio = alloc_rbio(fs_info, bioc);
1645 if (IS_ERR(rbio)) {
1646 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1647 bio_endio(bio);
1648 return;
1649 }
1650 rbio->operation = BTRFS_RBIO_WRITE;
1651 rbio_add_bio(rbio, bio);
1652
1653 /*
1654 * Don't plug on full rbios, just get them out the door
1655 * as quickly as we can
1656 */
1657 if (!rbio_is_full(rbio)) {
1658 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1659 if (cb) {
1660 plug = container_of(cb, struct btrfs_plug_cb, cb);
1661 if (!plug->info) {
1662 plug->info = fs_info;
1663 INIT_LIST_HEAD(&plug->rbio_list);
1664 }
1665 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1666 return;
1667 }
1668 }
1669
1670 /*
1671 * Either we don't have any existing plug, or we're doing a full stripe,
1672 * queue the rmw work now.
1673 */
1674 start_async_work(rbio, rmw_rbio_work);
1675 }
1676
verify_one_sector(struct btrfs_raid_bio * rbio,int stripe_nr,int sector_nr)1677 static int verify_one_sector(struct btrfs_raid_bio *rbio,
1678 int stripe_nr, int sector_nr)
1679 {
1680 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1681 struct sector_ptr *sector;
1682 u8 csum_buf[BTRFS_CSUM_SIZE];
1683 u8 *csum_expected;
1684 int ret;
1685
1686 if (!rbio->csum_bitmap || !rbio->csum_buf)
1687 return 0;
1688
1689 /* No way to verify P/Q as they are not covered by data csum. */
1690 if (stripe_nr >= rbio->nr_data)
1691 return 0;
1692 /*
1693 * If we're rebuilding a read, we have to use pages from the
1694 * bio list if possible.
1695 */
1696 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1697 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1698 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1699 } else {
1700 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1701 }
1702
1703 ASSERT(sector->page);
1704
1705 csum_expected = rbio->csum_buf +
1706 (stripe_nr * rbio->stripe_nsectors + sector_nr) *
1707 fs_info->csum_size;
1708 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1709 csum_buf, csum_expected);
1710 return ret;
1711 }
1712
1713 /*
1714 * Recover a vertical stripe specified by @sector_nr.
1715 * @*pointers are the pre-allocated pointers by the caller, so we don't
1716 * need to allocate/free the pointers again and again.
1717 */
recover_vertical(struct btrfs_raid_bio * rbio,int sector_nr,void ** pointers,void ** unmap_array)1718 static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1719 void **pointers, void **unmap_array)
1720 {
1721 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1722 struct sector_ptr *sector;
1723 const u32 sectorsize = fs_info->sectorsize;
1724 int found_errors;
1725 int faila;
1726 int failb;
1727 int stripe_nr;
1728 int ret = 0;
1729
1730 /*
1731 * Now we just use bitmap to mark the horizontal stripes in
1732 * which we have data when doing parity scrub.
1733 */
1734 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1735 !test_bit(sector_nr, &rbio->dbitmap))
1736 return 0;
1737
1738 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1739 &failb);
1740 /*
1741 * No errors in the vertical stripe, skip it. Can happen for recovery
1742 * which only part of a stripe failed csum check.
1743 */
1744 if (!found_errors)
1745 return 0;
1746
1747 if (found_errors > rbio->bioc->max_errors)
1748 return -EIO;
1749
1750 /*
1751 * Setup our array of pointers with sectors from each stripe
1752 *
1753 * NOTE: store a duplicate array of pointers to preserve the
1754 * pointer order.
1755 */
1756 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1757 /*
1758 * If we're rebuilding a read, we have to use pages from the
1759 * bio list if possible.
1760 */
1761 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1762 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1763 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1764 } else {
1765 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1766 }
1767 ASSERT(sector->page);
1768 pointers[stripe_nr] = kmap_local_page(sector->page) +
1769 sector->pgoff;
1770 unmap_array[stripe_nr] = pointers[stripe_nr];
1771 }
1772
1773 /* All raid6 handling here */
1774 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1775 /* Single failure, rebuild from parity raid5 style */
1776 if (failb < 0) {
1777 if (faila == rbio->nr_data)
1778 /*
1779 * Just the P stripe has failed, without
1780 * a bad data or Q stripe.
1781 * We have nothing to do, just skip the
1782 * recovery for this stripe.
1783 */
1784 goto cleanup;
1785 /*
1786 * a single failure in raid6 is rebuilt
1787 * in the pstripe code below
1788 */
1789 goto pstripe;
1790 }
1791
1792 /*
1793 * If the q stripe is failed, do a pstripe reconstruction from
1794 * the xors.
1795 * If both the q stripe and the P stripe are failed, we're
1796 * here due to a crc mismatch and we can't give them the
1797 * data they want.
1798 */
1799 if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
1800 if (rbio->bioc->raid_map[faila] ==
1801 RAID5_P_STRIPE)
1802 /*
1803 * Only P and Q are corrupted.
1804 * We only care about data stripes recovery,
1805 * can skip this vertical stripe.
1806 */
1807 goto cleanup;
1808 /*
1809 * Otherwise we have one bad data stripe and
1810 * a good P stripe. raid5!
1811 */
1812 goto pstripe;
1813 }
1814
1815 if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
1816 raid6_datap_recov(rbio->real_stripes, sectorsize,
1817 faila, pointers);
1818 } else {
1819 raid6_2data_recov(rbio->real_stripes, sectorsize,
1820 faila, failb, pointers);
1821 }
1822 } else {
1823 void *p;
1824
1825 /* Rebuild from P stripe here (raid5 or raid6). */
1826 ASSERT(failb == -1);
1827 pstripe:
1828 /* Copy parity block into failed block to start with */
1829 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1830
1831 /* Rearrange the pointer array */
1832 p = pointers[faila];
1833 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1834 stripe_nr++)
1835 pointers[stripe_nr] = pointers[stripe_nr + 1];
1836 pointers[rbio->nr_data - 1] = p;
1837
1838 /* Xor in the rest */
1839 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1840
1841 }
1842
1843 /*
1844 * No matter if this is a RMW or recovery, we should have all
1845 * failed sectors repaired in the vertical stripe, thus they are now
1846 * uptodate.
1847 * Especially if we determine to cache the rbio, we need to
1848 * have at least all data sectors uptodate.
1849 *
1850 * If possible, also check if the repaired sector matches its data
1851 * checksum.
1852 */
1853 if (faila >= 0) {
1854 ret = verify_one_sector(rbio, faila, sector_nr);
1855 if (ret < 0)
1856 goto cleanup;
1857
1858 sector = rbio_stripe_sector(rbio, faila, sector_nr);
1859 sector->uptodate = 1;
1860 }
1861 if (failb >= 0) {
1862 ret = verify_one_sector(rbio, failb, sector_nr);
1863 if (ret < 0)
1864 goto cleanup;
1865
1866 sector = rbio_stripe_sector(rbio, failb, sector_nr);
1867 sector->uptodate = 1;
1868 }
1869
1870 cleanup:
1871 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1872 kunmap_local(unmap_array[stripe_nr]);
1873 return ret;
1874 }
1875
recover_sectors(struct btrfs_raid_bio * rbio)1876 static int recover_sectors(struct btrfs_raid_bio *rbio)
1877 {
1878 void **pointers = NULL;
1879 void **unmap_array = NULL;
1880 int sectornr;
1881 int ret = 0;
1882
1883 /*
1884 * @pointers array stores the pointer for each sector.
1885 *
1886 * @unmap_array stores copy of pointers that does not get reordered
1887 * during reconstruction so that kunmap_local works.
1888 */
1889 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1890 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1891 if (!pointers || !unmap_array) {
1892 ret = -ENOMEM;
1893 goto out;
1894 }
1895
1896 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1897 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1898 spin_lock_irq(&rbio->bio_list_lock);
1899 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1900 spin_unlock_irq(&rbio->bio_list_lock);
1901 }
1902
1903 index_rbio_pages(rbio);
1904
1905 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1906 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1907 if (ret < 0)
1908 break;
1909 }
1910
1911 out:
1912 kfree(pointers);
1913 kfree(unmap_array);
1914 return ret;
1915 }
1916
recover_rbio(struct btrfs_raid_bio * rbio)1917 static void recover_rbio(struct btrfs_raid_bio *rbio)
1918 {
1919 struct bio_list bio_list = BIO_EMPTY_LIST;
1920 int total_sector_nr;
1921 int ret = 0;
1922
1923 /*
1924 * Either we're doing recover for a read failure or degraded write,
1925 * caller should have set error bitmap correctly.
1926 */
1927 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1928
1929 /* For recovery, we need to read all sectors including P/Q. */
1930 ret = alloc_rbio_pages(rbio);
1931 if (ret < 0)
1932 goto out;
1933
1934 index_rbio_pages(rbio);
1935
1936 /*
1937 * Read everything that hasn't failed. However this time we will
1938 * not trust any cached sector.
1939 * As we may read out some stale data but higher layer is not reading
1940 * that stale part.
1941 *
1942 * So here we always re-read everything in recovery path.
1943 */
1944 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1945 total_sector_nr++) {
1946 int stripe = total_sector_nr / rbio->stripe_nsectors;
1947 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1948 struct sector_ptr *sector;
1949
1950 /*
1951 * Skip the range which has error. It can be a range which is
1952 * marked error (for csum mismatch), or it can be a missing
1953 * device.
1954 */
1955 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1956 test_bit(total_sector_nr, rbio->error_bitmap)) {
1957 /*
1958 * Also set the error bit for missing device, which
1959 * may not yet have its error bit set.
1960 */
1961 set_bit(total_sector_nr, rbio->error_bitmap);
1962 continue;
1963 }
1964
1965 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1966 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
1967 sectornr, REQ_OP_READ);
1968 if (ret < 0) {
1969 bio_list_put(&bio_list);
1970 goto out;
1971 }
1972 }
1973
1974 submit_read_wait_bio_list(rbio, &bio_list);
1975 ret = recover_sectors(rbio);
1976 out:
1977 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
1978 }
1979
recover_rbio_work(struct work_struct * work)1980 static void recover_rbio_work(struct work_struct *work)
1981 {
1982 struct btrfs_raid_bio *rbio;
1983
1984 rbio = container_of(work, struct btrfs_raid_bio, work);
1985 if (!lock_stripe_add(rbio))
1986 recover_rbio(rbio);
1987 }
1988
recover_rbio_work_locked(struct work_struct * work)1989 static void recover_rbio_work_locked(struct work_struct *work)
1990 {
1991 recover_rbio(container_of(work, struct btrfs_raid_bio, work));
1992 }
1993
set_rbio_raid6_extra_error(struct btrfs_raid_bio * rbio,int mirror_num)1994 static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
1995 {
1996 bool found = false;
1997 int sector_nr;
1998
1999 /*
2000 * This is for RAID6 extra recovery tries, thus mirror number should
2001 * be large than 2.
2002 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2003 * RAID5 methods.
2004 */
2005 ASSERT(mirror_num > 2);
2006 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2007 int found_errors;
2008 int faila;
2009 int failb;
2010
2011 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2012 &faila, &failb);
2013 /* This vertical stripe doesn't have errors. */
2014 if (!found_errors)
2015 continue;
2016
2017 /*
2018 * If we found errors, there should be only one error marked
2019 * by previous set_rbio_range_error().
2020 */
2021 ASSERT(found_errors == 1);
2022 found = true;
2023
2024 /* Now select another stripe to mark as error. */
2025 failb = rbio->real_stripes - (mirror_num - 1);
2026 if (failb <= faila)
2027 failb--;
2028
2029 /* Set the extra bit in error bitmap. */
2030 if (failb >= 0)
2031 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2032 rbio->error_bitmap);
2033 }
2034
2035 /* We should found at least one vertical stripe with error.*/
2036 ASSERT(found);
2037 }
2038
2039 /*
2040 * the main entry point for reads from the higher layers. This
2041 * is really only called when the normal read path had a failure,
2042 * so we assume the bio they send down corresponds to a failed part
2043 * of the drive.
2044 */
raid56_parity_recover(struct bio * bio,struct btrfs_io_context * bioc,int mirror_num)2045 void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2046 int mirror_num)
2047 {
2048 struct btrfs_fs_info *fs_info = bioc->fs_info;
2049 struct btrfs_raid_bio *rbio;
2050
2051 rbio = alloc_rbio(fs_info, bioc);
2052 if (IS_ERR(rbio)) {
2053 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2054 bio_endio(bio);
2055 return;
2056 }
2057
2058 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2059 rbio_add_bio(rbio, bio);
2060
2061 set_rbio_range_error(rbio, bio);
2062
2063 /*
2064 * Loop retry:
2065 * for 'mirror == 2', reconstruct from all other stripes.
2066 * for 'mirror_num > 2', select a stripe to fail on every retry.
2067 */
2068 if (mirror_num > 2)
2069 set_rbio_raid6_extra_error(rbio, mirror_num);
2070
2071 start_async_work(rbio, recover_rbio_work);
2072 }
2073
fill_data_csums(struct btrfs_raid_bio * rbio)2074 static void fill_data_csums(struct btrfs_raid_bio *rbio)
2075 {
2076 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2077 struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2078 rbio->bioc->raid_map[0]);
2079 const u64 start = rbio->bioc->raid_map[0];
2080 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2081 fs_info->sectorsize_bits;
2082 int ret;
2083
2084 /* The rbio should not have its csum buffer initialized. */
2085 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2086
2087 /*
2088 * Skip the csum search if:
2089 *
2090 * - The rbio doesn't belong to data block groups
2091 * Then we are doing IO for tree blocks, no need to search csums.
2092 *
2093 * - The rbio belongs to mixed block groups
2094 * This is to avoid deadlock, as we're already holding the full
2095 * stripe lock, if we trigger a metadata read, and it needs to do
2096 * raid56 recovery, we will deadlock.
2097 */
2098 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2099 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2100 return;
2101
2102 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2103 fs_info->csum_size, GFP_NOFS);
2104 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2105 GFP_NOFS);
2106 if (!rbio->csum_buf || !rbio->csum_bitmap) {
2107 ret = -ENOMEM;
2108 goto error;
2109 }
2110
2111 ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
2112 rbio->csum_buf, rbio->csum_bitmap);
2113 if (ret < 0)
2114 goto error;
2115 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2116 goto no_csum;
2117 return;
2118
2119 error:
2120 /*
2121 * We failed to allocate memory or grab the csum, but it's not fatal,
2122 * we can still continue. But better to warn users that RMW is no
2123 * longer safe for this particular sub-stripe write.
2124 */
2125 btrfs_warn_rl(fs_info,
2126 "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2127 rbio->bioc->raid_map[0], ret);
2128 no_csum:
2129 kfree(rbio->csum_buf);
2130 bitmap_free(rbio->csum_bitmap);
2131 rbio->csum_buf = NULL;
2132 rbio->csum_bitmap = NULL;
2133 }
2134
rmw_read_wait_recover(struct btrfs_raid_bio * rbio)2135 static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2136 {
2137 struct bio_list bio_list = BIO_EMPTY_LIST;
2138 int total_sector_nr;
2139 int ret = 0;
2140
2141 /*
2142 * Fill the data csums we need for data verification. We need to fill
2143 * the csum_bitmap/csum_buf first, as our endio function will try to
2144 * verify the data sectors.
2145 */
2146 fill_data_csums(rbio);
2147
2148 /*
2149 * Build a list of bios to read all sectors (including data and P/Q).
2150 *
2151 * This behavior is to compensate the later csum verification and recovery.
2152 */
2153 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2154 total_sector_nr++) {
2155 struct sector_ptr *sector;
2156 int stripe = total_sector_nr / rbio->stripe_nsectors;
2157 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2158
2159 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2160 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2161 stripe, sectornr, REQ_OP_READ);
2162 if (ret) {
2163 bio_list_put(&bio_list);
2164 return ret;
2165 }
2166 }
2167
2168 /*
2169 * We may or may not have any corrupted sectors (including missing dev
2170 * and csum mismatch), just let recover_sectors() to handle them all.
2171 */
2172 submit_read_wait_bio_list(rbio, &bio_list);
2173 return recover_sectors(rbio);
2174 }
2175
raid_wait_write_end_io(struct bio * bio)2176 static void raid_wait_write_end_io(struct bio *bio)
2177 {
2178 struct btrfs_raid_bio *rbio = bio->bi_private;
2179 blk_status_t err = bio->bi_status;
2180
2181 if (err)
2182 rbio_update_error_bitmap(rbio, bio);
2183 bio_put(bio);
2184 if (atomic_dec_and_test(&rbio->stripes_pending))
2185 wake_up(&rbio->io_wait);
2186 }
2187
submit_write_bios(struct btrfs_raid_bio * rbio,struct bio_list * bio_list)2188 static void submit_write_bios(struct btrfs_raid_bio *rbio,
2189 struct bio_list *bio_list)
2190 {
2191 struct bio *bio;
2192
2193 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2194 while ((bio = bio_list_pop(bio_list))) {
2195 bio->bi_end_io = raid_wait_write_end_io;
2196
2197 if (trace_raid56_write_stripe_enabled()) {
2198 struct raid56_bio_trace_info trace_info = { 0 };
2199
2200 bio_get_trace_info(rbio, bio, &trace_info);
2201 trace_raid56_write_stripe(rbio, bio, &trace_info);
2202 }
2203 submit_bio(bio);
2204 }
2205 }
2206
2207 /*
2208 * To determine if we need to read any sector from the disk.
2209 * Should only be utilized in RMW path, to skip cached rbio.
2210 */
need_read_stripe_sectors(struct btrfs_raid_bio * rbio)2211 static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2212 {
2213 int i;
2214
2215 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2216 struct sector_ptr *sector = &rbio->stripe_sectors[i];
2217
2218 /*
2219 * We have a sector which doesn't have page nor uptodate,
2220 * thus this rbio can not be cached one, as cached one must
2221 * have all its data sectors present and uptodate.
2222 */
2223 if (!sector->page || !sector->uptodate)
2224 return true;
2225 }
2226 return false;
2227 }
2228
rmw_rbio(struct btrfs_raid_bio * rbio)2229 static void rmw_rbio(struct btrfs_raid_bio *rbio)
2230 {
2231 struct bio_list bio_list;
2232 int sectornr;
2233 int ret = 0;
2234
2235 /*
2236 * Allocate the pages for parity first, as P/Q pages will always be
2237 * needed for both full-stripe and sub-stripe writes.
2238 */
2239 ret = alloc_rbio_parity_pages(rbio);
2240 if (ret < 0)
2241 goto out;
2242
2243 /*
2244 * Either full stripe write, or we have every data sector already
2245 * cached, can go to write path immediately.
2246 */
2247 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
2248 /*
2249 * Now we're doing sub-stripe write, also need all data stripes
2250 * to do the full RMW.
2251 */
2252 ret = alloc_rbio_data_pages(rbio);
2253 if (ret < 0)
2254 goto out;
2255
2256 index_rbio_pages(rbio);
2257
2258 ret = rmw_read_wait_recover(rbio);
2259 if (ret < 0)
2260 goto out;
2261 }
2262
2263 /*
2264 * At this stage we're not allowed to add any new bios to the
2265 * bio list any more, anyone else that wants to change this stripe
2266 * needs to do their own rmw.
2267 */
2268 spin_lock_irq(&rbio->bio_list_lock);
2269 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2270 spin_unlock_irq(&rbio->bio_list_lock);
2271
2272 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2273
2274 index_rbio_pages(rbio);
2275
2276 /*
2277 * We don't cache full rbios because we're assuming
2278 * the higher layers are unlikely to use this area of
2279 * the disk again soon. If they do use it again,
2280 * hopefully they will send another full bio.
2281 */
2282 if (!rbio_is_full(rbio))
2283 cache_rbio_pages(rbio);
2284 else
2285 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2286
2287 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2288 generate_pq_vertical(rbio, sectornr);
2289
2290 bio_list_init(&bio_list);
2291 ret = rmw_assemble_write_bios(rbio, &bio_list);
2292 if (ret < 0)
2293 goto out;
2294
2295 /* We should have at least one bio assembled. */
2296 ASSERT(bio_list_size(&bio_list));
2297 submit_write_bios(rbio, &bio_list);
2298 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2299
2300 /* We may have more errors than our tolerance during the read. */
2301 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2302 int found_errors;
2303
2304 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2305 if (found_errors > rbio->bioc->max_errors) {
2306 ret = -EIO;
2307 break;
2308 }
2309 }
2310 out:
2311 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2312 }
2313
rmw_rbio_work(struct work_struct * work)2314 static void rmw_rbio_work(struct work_struct *work)
2315 {
2316 struct btrfs_raid_bio *rbio;
2317
2318 rbio = container_of(work, struct btrfs_raid_bio, work);
2319 if (lock_stripe_add(rbio) == 0)
2320 rmw_rbio(rbio);
2321 }
2322
rmw_rbio_work_locked(struct work_struct * work)2323 static void rmw_rbio_work_locked(struct work_struct *work)
2324 {
2325 rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
2326 }
2327
2328 /*
2329 * The following code is used to scrub/replace the parity stripe
2330 *
2331 * Caller must have already increased bio_counter for getting @bioc.
2332 *
2333 * Note: We need make sure all the pages that add into the scrub/replace
2334 * raid bio are correct and not be changed during the scrub/replace. That
2335 * is those pages just hold metadata or file data with checksum.
2336 */
2337
raid56_parity_alloc_scrub_rbio(struct bio * bio,struct btrfs_io_context * bioc,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2338 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2339 struct btrfs_io_context *bioc,
2340 struct btrfs_device *scrub_dev,
2341 unsigned long *dbitmap, int stripe_nsectors)
2342 {
2343 struct btrfs_fs_info *fs_info = bioc->fs_info;
2344 struct btrfs_raid_bio *rbio;
2345 int i;
2346
2347 rbio = alloc_rbio(fs_info, bioc);
2348 if (IS_ERR(rbio))
2349 return NULL;
2350 bio_list_add(&rbio->bio_list, bio);
2351 /*
2352 * This is a special bio which is used to hold the completion handler
2353 * and make the scrub rbio is similar to the other types
2354 */
2355 ASSERT(!bio->bi_iter.bi_size);
2356 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2357
2358 /*
2359 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2360 * to the end position, so this search can start from the first parity
2361 * stripe.
2362 */
2363 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2364 if (bioc->stripes[i].dev == scrub_dev) {
2365 rbio->scrubp = i;
2366 break;
2367 }
2368 }
2369 ASSERT(i < rbio->real_stripes);
2370
2371 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2372 return rbio;
2373 }
2374
2375 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,unsigned int pgoff,u64 logical)2376 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2377 unsigned int pgoff, u64 logical)
2378 {
2379 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2380 int stripe_offset;
2381 int index;
2382
2383 ASSERT(logical >= rbio->bioc->raid_map[0]);
2384 ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2385 BTRFS_STRIPE_LEN * rbio->nr_data);
2386 stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
2387 index = stripe_offset / sectorsize;
2388 rbio->bio_sectors[index].page = page;
2389 rbio->bio_sectors[index].pgoff = pgoff;
2390 }
2391
2392 /*
2393 * We just scrub the parity that we have correct data on the same horizontal,
2394 * so we needn't allocate all pages for all the stripes.
2395 */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2396 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2397 {
2398 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2399 int total_sector_nr;
2400
2401 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2402 total_sector_nr++) {
2403 struct page *page;
2404 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2405 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2406
2407 if (!test_bit(sectornr, &rbio->dbitmap))
2408 continue;
2409 if (rbio->stripe_pages[index])
2410 continue;
2411 page = alloc_page(GFP_NOFS);
2412 if (!page)
2413 return -ENOMEM;
2414 rbio->stripe_pages[index] = page;
2415 }
2416 index_stripe_sectors(rbio);
2417 return 0;
2418 }
2419
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2420 static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
2421 {
2422 struct btrfs_io_context *bioc = rbio->bioc;
2423 const u32 sectorsize = bioc->fs_info->sectorsize;
2424 void **pointers = rbio->finish_pointers;
2425 unsigned long *pbitmap = &rbio->finish_pbitmap;
2426 int nr_data = rbio->nr_data;
2427 int stripe;
2428 int sectornr;
2429 bool has_qstripe;
2430 struct sector_ptr p_sector = { 0 };
2431 struct sector_ptr q_sector = { 0 };
2432 struct bio_list bio_list;
2433 int is_replace = 0;
2434 int ret;
2435
2436 bio_list_init(&bio_list);
2437
2438 if (rbio->real_stripes - rbio->nr_data == 1)
2439 has_qstripe = false;
2440 else if (rbio->real_stripes - rbio->nr_data == 2)
2441 has_qstripe = true;
2442 else
2443 BUG();
2444
2445 if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
2446 is_replace = 1;
2447 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2448 }
2449
2450 /*
2451 * Because the higher layers(scrubber) are unlikely to
2452 * use this area of the disk again soon, so don't cache
2453 * it.
2454 */
2455 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2456
2457 if (!need_check)
2458 goto writeback;
2459
2460 p_sector.page = alloc_page(GFP_NOFS);
2461 if (!p_sector.page)
2462 return -ENOMEM;
2463 p_sector.pgoff = 0;
2464 p_sector.uptodate = 1;
2465
2466 if (has_qstripe) {
2467 /* RAID6, allocate and map temp space for the Q stripe */
2468 q_sector.page = alloc_page(GFP_NOFS);
2469 if (!q_sector.page) {
2470 __free_page(p_sector.page);
2471 p_sector.page = NULL;
2472 return -ENOMEM;
2473 }
2474 q_sector.pgoff = 0;
2475 q_sector.uptodate = 1;
2476 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2477 }
2478
2479 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2480
2481 /* Map the parity stripe just once */
2482 pointers[nr_data] = kmap_local_page(p_sector.page);
2483
2484 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2485 struct sector_ptr *sector;
2486 void *parity;
2487
2488 /* first collect one page from each data stripe */
2489 for (stripe = 0; stripe < nr_data; stripe++) {
2490 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2491 pointers[stripe] = kmap_local_page(sector->page) +
2492 sector->pgoff;
2493 }
2494
2495 if (has_qstripe) {
2496 /* RAID6, call the library function to fill in our P/Q */
2497 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2498 pointers);
2499 } else {
2500 /* raid5 */
2501 memcpy(pointers[nr_data], pointers[0], sectorsize);
2502 run_xor(pointers + 1, nr_data - 1, sectorsize);
2503 }
2504
2505 /* Check scrubbing parity and repair it */
2506 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2507 parity = kmap_local_page(sector->page) + sector->pgoff;
2508 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2509 memcpy(parity, pointers[rbio->scrubp], sectorsize);
2510 else
2511 /* Parity is right, needn't writeback */
2512 bitmap_clear(&rbio->dbitmap, sectornr, 1);
2513 kunmap_local(parity);
2514
2515 for (stripe = nr_data - 1; stripe >= 0; stripe--)
2516 kunmap_local(pointers[stripe]);
2517 }
2518
2519 kunmap_local(pointers[nr_data]);
2520 __free_page(p_sector.page);
2521 p_sector.page = NULL;
2522 if (q_sector.page) {
2523 kunmap_local(pointers[rbio->real_stripes - 1]);
2524 __free_page(q_sector.page);
2525 q_sector.page = NULL;
2526 }
2527
2528 writeback:
2529 /*
2530 * time to start writing. Make bios for everything from the
2531 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2532 * everything else.
2533 */
2534 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2535 struct sector_ptr *sector;
2536
2537 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2538 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2539 sectornr, REQ_OP_WRITE);
2540 if (ret)
2541 goto cleanup;
2542 }
2543
2544 if (!is_replace)
2545 goto submit_write;
2546
2547 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2548 struct sector_ptr *sector;
2549
2550 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2551 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2552 bioc->tgtdev_map[rbio->scrubp],
2553 sectornr, REQ_OP_WRITE);
2554 if (ret)
2555 goto cleanup;
2556 }
2557
2558 submit_write:
2559 submit_write_bios(rbio, &bio_list);
2560 return 0;
2561
2562 cleanup:
2563 bio_list_put(&bio_list);
2564 return ret;
2565 }
2566
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2567 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2568 {
2569 if (stripe >= 0 && stripe < rbio->nr_data)
2570 return 1;
2571 return 0;
2572 }
2573
recover_scrub_rbio(struct btrfs_raid_bio * rbio)2574 static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2575 {
2576 void **pointers = NULL;
2577 void **unmap_array = NULL;
2578 int sector_nr;
2579 int ret = 0;
2580
2581 /*
2582 * @pointers array stores the pointer for each sector.
2583 *
2584 * @unmap_array stores copy of pointers that does not get reordered
2585 * during reconstruction so that kunmap_local works.
2586 */
2587 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2588 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2589 if (!pointers || !unmap_array) {
2590 ret = -ENOMEM;
2591 goto out;
2592 }
2593
2594 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2595 int dfail = 0, failp = -1;
2596 int faila;
2597 int failb;
2598 int found_errors;
2599
2600 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2601 &faila, &failb);
2602 if (found_errors > rbio->bioc->max_errors) {
2603 ret = -EIO;
2604 goto out;
2605 }
2606 if (found_errors == 0)
2607 continue;
2608
2609 /* We should have at least one error here. */
2610 ASSERT(faila >= 0 || failb >= 0);
2611
2612 if (is_data_stripe(rbio, faila))
2613 dfail++;
2614 else if (is_parity_stripe(faila))
2615 failp = faila;
2616
2617 if (is_data_stripe(rbio, failb))
2618 dfail++;
2619 else if (is_parity_stripe(failb))
2620 failp = failb;
2621 /*
2622 * Because we can not use a scrubbing parity to repair the
2623 * data, so the capability of the repair is declined. (In the
2624 * case of RAID5, we can not repair anything.)
2625 */
2626 if (dfail > rbio->bioc->max_errors - 1) {
2627 ret = -EIO;
2628 goto out;
2629 }
2630 /*
2631 * If all data is good, only parity is correctly, just repair
2632 * the parity, no need to recover data stripes.
2633 */
2634 if (dfail == 0)
2635 continue;
2636
2637 /*
2638 * Here means we got one corrupted data stripe and one
2639 * corrupted parity on RAID6, if the corrupted parity is
2640 * scrubbing parity, luckily, use the other one to repair the
2641 * data, or we can not repair the data stripe.
2642 */
2643 if (failp != rbio->scrubp) {
2644 ret = -EIO;
2645 goto out;
2646 }
2647
2648 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2649 if (ret < 0)
2650 goto out;
2651 }
2652 out:
2653 kfree(pointers);
2654 kfree(unmap_array);
2655 return ret;
2656 }
2657
scrub_assemble_read_bios(struct btrfs_raid_bio * rbio)2658 static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
2659 {
2660 struct bio_list bio_list = BIO_EMPTY_LIST;
2661 int total_sector_nr;
2662 int ret = 0;
2663
2664 /* Build a list of bios to read all the missing parts. */
2665 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2666 total_sector_nr++) {
2667 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2668 int stripe = total_sector_nr / rbio->stripe_nsectors;
2669 struct sector_ptr *sector;
2670
2671 /* No data in the vertical stripe, no need to read. */
2672 if (!test_bit(sectornr, &rbio->dbitmap))
2673 continue;
2674
2675 /*
2676 * We want to find all the sectors missing from the rbio and
2677 * read them from the disk. If sector_in_rbio() finds a sector
2678 * in the bio list we don't need to read it off the stripe.
2679 */
2680 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2681 if (sector)
2682 continue;
2683
2684 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2685 /*
2686 * The bio cache may have handed us an uptodate sector. If so,
2687 * use it.
2688 */
2689 if (sector->uptodate)
2690 continue;
2691
2692 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2693 sectornr, REQ_OP_READ);
2694 if (ret) {
2695 bio_list_put(&bio_list);
2696 return ret;
2697 }
2698 }
2699
2700 submit_read_wait_bio_list(rbio, &bio_list);
2701 return 0;
2702 }
2703
scrub_rbio(struct btrfs_raid_bio * rbio)2704 static void scrub_rbio(struct btrfs_raid_bio *rbio)
2705 {
2706 bool need_check = false;
2707 int sector_nr;
2708 int ret;
2709
2710 ret = alloc_rbio_essential_pages(rbio);
2711 if (ret)
2712 goto out;
2713
2714 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2715
2716 ret = scrub_assemble_read_bios(rbio);
2717 if (ret < 0)
2718 goto out;
2719
2720 /* We may have some failures, recover the failed sectors first. */
2721 ret = recover_scrub_rbio(rbio);
2722 if (ret < 0)
2723 goto out;
2724
2725 /*
2726 * We have every sector properly prepared. Can finish the scrub
2727 * and writeback the good content.
2728 */
2729 ret = finish_parity_scrub(rbio, need_check);
2730 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2731 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2732 int found_errors;
2733
2734 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2735 if (found_errors > rbio->bioc->max_errors) {
2736 ret = -EIO;
2737 break;
2738 }
2739 }
2740 out:
2741 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2742 }
2743
scrub_rbio_work_locked(struct work_struct * work)2744 static void scrub_rbio_work_locked(struct work_struct *work)
2745 {
2746 scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
2747 }
2748
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2749 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2750 {
2751 if (!lock_stripe_add(rbio))
2752 start_async_work(rbio, scrub_rbio_work_locked);
2753 }
2754
2755 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2756
2757 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct bio * bio,struct btrfs_io_context * bioc)2758 raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
2759 {
2760 struct btrfs_fs_info *fs_info = bioc->fs_info;
2761 struct btrfs_raid_bio *rbio;
2762
2763 rbio = alloc_rbio(fs_info, bioc);
2764 if (IS_ERR(rbio))
2765 return NULL;
2766
2767 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2768 bio_list_add(&rbio->bio_list, bio);
2769 /*
2770 * This is a special bio which is used to hold the completion handler
2771 * and make the scrub rbio is similar to the other types
2772 */
2773 ASSERT(!bio->bi_iter.bi_size);
2774
2775 set_rbio_range_error(rbio, bio);
2776
2777 return rbio;
2778 }
2779
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2780 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2781 {
2782 start_async_work(rbio, recover_rbio_work);
2783 }
2784