Lines Matching refs:s
503 struct search *s = container_of(cl, struct search, cl); in bch_cache_read_endio() local
513 s->iop.status = bio->bi_status; in bch_cache_read_endio()
515 ptr_stale(s->iop.c, &b->key, 0)) { in bch_cache_read_endio()
516 atomic_long_inc(&s->iop.c->cache_read_races); in bch_cache_read_endio()
517 s->iop.status = BLK_STS_IOERR; in bch_cache_read_endio()
520 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); in bch_cache_read_endio()
529 struct search *s = container_of(op, struct search, op); in cache_lookup_fn() local
530 struct bio *n, *bio = &s->bio.bio; in cache_lookup_fn()
534 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn()
537 if (KEY_INODE(k) != s->iop.inode || in cache_lookup_fn()
540 unsigned int sectors = KEY_INODE(k) == s->iop.inode in cache_lookup_fn()
544 int ret = s->d->cache_miss(b, s, bio, sectors); in cache_lookup_fn()
562 s->read_dirty_data = true; in cache_lookup_fn()
566 GFP_NOIO, &s->d->bio_split); in cache_lookup_fn()
571 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); in cache_lookup_fn()
572 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); in cache_lookup_fn()
575 n->bi_private = &s->cl; in cache_lookup_fn()
594 closure_type(s, struct search, iop.cl); in CLOSURE_CALLBACK()
595 struct bio *bio = &s->bio.bio; in CLOSURE_CALLBACK()
599 bch_btree_op_init(&s->op, -1); in CLOSURE_CALLBACK()
601 ret = bch_btree_map_keys(&s->op, s->iop.c, in CLOSURE_CALLBACK()
602 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), in CLOSURE_CALLBACK()
620 if (s->d && s->d->c && in CLOSURE_CALLBACK()
621 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { in CLOSURE_CALLBACK()
622 dc = container_of(s->d, struct cached_dev, disk); in CLOSURE_CALLBACK()
624 s->recoverable = false; in CLOSURE_CALLBACK()
626 if (!s->iop.status) in CLOSURE_CALLBACK()
627 s->iop.status = BLK_STS_IOERR; in CLOSURE_CALLBACK()
640 struct search *s = container_of(cl, struct search, cl); in request_endio() local
642 s->iop.status = bio->bi_status; in request_endio()
644 s->recoverable = false; in request_endio()
656 struct search *s = container_of(cl, struct search, cl); in backing_request_endio() local
657 struct cached_dev *dc = container_of(s->d, in backing_request_endio()
666 if (unlikely(s->iop.writeback && in backing_request_endio()
672 s->iop.status = bio->bi_status; in backing_request_endio()
674 s->recoverable = false; in backing_request_endio()
683 static void bio_complete(struct search *s) in bio_complete() argument
685 if (s->orig_bio) { in bio_complete()
687 bio_end_io_acct_remapped(s->orig_bio, s->start_time, in bio_complete()
688 s->orig_bdev); in bio_complete()
689 trace_bcache_request_end(s->d, s->orig_bio); in bio_complete()
690 s->orig_bio->bi_status = s->iop.status; in bio_complete()
691 bio_endio(s->orig_bio); in bio_complete()
692 s->orig_bio = NULL; in bio_complete()
696 static void do_bio_hook(struct search *s, in do_bio_hook() argument
700 struct bio *bio = &s->bio.bio; in do_bio_hook()
710 bio->bi_private = &s->cl; in do_bio_hook()
717 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
719 atomic_dec(&s->iop.c->search_inflight); in CLOSURE_CALLBACK()
721 if (s->iop.bio) in CLOSURE_CALLBACK()
722 bio_put(s->iop.bio); in CLOSURE_CALLBACK()
724 bio_complete(s); in CLOSURE_CALLBACK()
726 mempool_free(s, &s->iop.c->search); in CLOSURE_CALLBACK()
733 struct search *s; in search_alloc() local
735 s = mempool_alloc(&d->c->search, GFP_NOIO); in search_alloc()
737 closure_init(&s->cl, NULL); in search_alloc()
738 do_bio_hook(s, bio, request_endio); in search_alloc()
741 s->orig_bio = bio; in search_alloc()
742 s->cache_miss = NULL; in search_alloc()
743 s->cache_missed = 0; in search_alloc()
744 s->d = d; in search_alloc()
745 s->recoverable = 1; in search_alloc()
746 s->write = op_is_write(bio_op(bio)); in search_alloc()
747 s->read_dirty_data = 0; in search_alloc()
749 s->orig_bdev = orig_bdev; in search_alloc()
750 s->start_time = start_time; in search_alloc()
751 s->iop.c = d->c; in search_alloc()
752 s->iop.bio = NULL; in search_alloc()
753 s->iop.inode = d->id; in search_alloc()
754 s->iop.write_point = hash_long((unsigned long) current, 16); in search_alloc()
755 s->iop.write_prio = 0; in search_alloc()
756 s->iop.status = 0; in search_alloc()
757 s->iop.flags = 0; in search_alloc()
758 s->iop.flush_journal = op_is_flush(bio->bi_opf); in search_alloc()
759 s->iop.wq = bcache_wq; in search_alloc()
761 return s; in search_alloc()
768 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
769 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in CLOSURE_CALLBACK()
779 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
781 if (s->iop.replace_collision) in CLOSURE_CALLBACK()
782 bch_mark_cache_miss_collision(s->iop.c, s->d); in CLOSURE_CALLBACK()
784 if (s->iop.bio) in CLOSURE_CALLBACK()
785 bio_free_pages(s->iop.bio); in CLOSURE_CALLBACK()
792 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
793 struct bio *bio = &s->bio.bio; in CLOSURE_CALLBACK()
802 if (s->recoverable && !s->read_dirty_data) { in CLOSURE_CALLBACK()
804 trace_bcache_read_retry(s->orig_bio); in CLOSURE_CALLBACK()
806 s->iop.status = 0; in CLOSURE_CALLBACK()
807 do_bio_hook(s, s->orig_bio, backing_request_endio); in CLOSURE_CALLBACK()
812 closure_bio_submit(s->iop.c, bio, cl); in CLOSURE_CALLBACK()
820 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
821 struct bcache_device *d = s->d; in CLOSURE_CALLBACK()
823 if (s->iop.replace_collision) in CLOSURE_CALLBACK()
824 bch_mark_cache_miss_collision(s->iop.c, s->d); in CLOSURE_CALLBACK()
826 if (s->iop.bio) in CLOSURE_CALLBACK()
827 bio_free_pages(s->iop.bio); in CLOSURE_CALLBACK()
835 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
836 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in CLOSURE_CALLBACK()
846 if (s->iop.bio) { in CLOSURE_CALLBACK()
847 bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ); in CLOSURE_CALLBACK()
848 s->iop.bio->bi_iter.bi_sector = in CLOSURE_CALLBACK()
849 s->cache_miss->bi_iter.bi_sector; in CLOSURE_CALLBACK()
850 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; in CLOSURE_CALLBACK()
851 bio_clone_blkg_association(s->iop.bio, s->cache_miss); in CLOSURE_CALLBACK()
852 bch_bio_map(s->iop.bio, NULL); in CLOSURE_CALLBACK()
854 bio_copy_data(s->cache_miss, s->iop.bio); in CLOSURE_CALLBACK()
856 bio_put(s->cache_miss); in CLOSURE_CALLBACK()
857 s->cache_miss = NULL; in CLOSURE_CALLBACK()
860 if (verify(dc) && s->recoverable && !s->read_dirty_data) in CLOSURE_CALLBACK()
861 bch_data_verify(dc, s->orig_bio); in CLOSURE_CALLBACK()
864 bio_complete(s); in CLOSURE_CALLBACK()
866 if (s->iop.bio && in CLOSURE_CALLBACK()
867 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { in CLOSURE_CALLBACK()
868 BUG_ON(!s->iop.replace); in CLOSURE_CALLBACK()
869 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); in CLOSURE_CALLBACK()
877 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
878 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in CLOSURE_CALLBACK()
880 bch_mark_cache_accounting(s->iop.c, s->d, in CLOSURE_CALLBACK()
881 !s->cache_missed, s->iop.bypass); in CLOSURE_CALLBACK()
882 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass); in CLOSURE_CALLBACK()
884 if (s->iop.status) in CLOSURE_CALLBACK()
886 else if (s->iop.bio || verify(dc)) in CLOSURE_CALLBACK()
892 static int cached_dev_cache_miss(struct btree *b, struct search *s, in cached_dev_cache_miss() argument
896 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_cache_miss()
900 s->cache_missed = 1; in cached_dev_cache_miss()
902 if (s->cache_miss || s->iop.bypass) { in cached_dev_cache_miss()
903 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); in cached_dev_cache_miss()
911 s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio)); in cached_dev_cache_miss()
913 s->iop.replace_key = KEY(s->iop.inode, in cached_dev_cache_miss()
914 bio->bi_iter.bi_sector + s->insert_bio_sectors, in cached_dev_cache_miss()
915 s->insert_bio_sectors); in cached_dev_cache_miss()
917 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); in cached_dev_cache_miss()
921 s->iop.replace = true; in cached_dev_cache_miss()
923 miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO, in cached_dev_cache_miss()
924 &s->d->bio_split); in cached_dev_cache_miss()
930 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), in cached_dev_cache_miss()
936 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; in cached_dev_cache_miss()
939 cache_bio->bi_private = &s->cl; in cached_dev_cache_miss()
945 s->cache_miss = miss; in cached_dev_cache_miss()
946 s->iop.bio = cache_bio; in cached_dev_cache_miss()
949 closure_bio_submit(s->iop.c, cache_bio, &s->cl); in cached_dev_cache_miss()
956 miss->bi_private = &s->cl; in cached_dev_cache_miss()
958 closure_bio_submit(s->iop.c, miss, &s->cl); in cached_dev_cache_miss()
962 static void cached_dev_read(struct cached_dev *dc, struct search *s) in cached_dev_read() argument
964 struct closure *cl = &s->cl; in cached_dev_read()
966 closure_call(&s->iop.cl, cache_lookup, NULL, cl); in cached_dev_read()
974 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
975 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in CLOSURE_CALLBACK()
981 static void cached_dev_write(struct cached_dev *dc, struct search *s) in cached_dev_write() argument
983 struct closure *cl = &s->cl; in cached_dev_write()
984 struct bio *bio = &s->bio.bio; in cached_dev_write()
988 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); in cached_dev_write()
996 s->iop.bypass = false; in cached_dev_write()
997 s->iop.writeback = true; in cached_dev_write()
1008 s->iop.bypass = true; in cached_dev_write()
1010 if (should_writeback(dc, s->orig_bio, in cached_dev_write()
1012 s->iop.bypass)) { in cached_dev_write()
1013 s->iop.bypass = false; in cached_dev_write()
1014 s->iop.writeback = true; in cached_dev_write()
1017 if (s->iop.bypass) { in cached_dev_write()
1018 s->iop.bio = s->orig_bio; in cached_dev_write()
1019 bio_get(s->iop.bio); in cached_dev_write()
1027 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_write()
1029 } else if (s->iop.writeback) { in cached_dev_write()
1031 s->iop.bio = bio; in cached_dev_write()
1044 s->iop.status = BLK_STS_RESOURCE; in cached_dev_write()
1050 closure_bio_submit(s->iop.c, flush, cl); in cached_dev_write()
1053 s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, in cached_dev_write()
1057 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_write()
1061 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); in cached_dev_write()
1067 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
1068 struct bio *bio = &s->bio.bio; in CLOSURE_CALLBACK()
1070 if (s->iop.flush_journal) in CLOSURE_CALLBACK()
1071 bch_journal_meta(s->iop.c, cl); in CLOSURE_CALLBACK()
1075 closure_bio_submit(s->iop.c, bio, cl); in CLOSURE_CALLBACK()
1186 struct search *s; in cached_dev_submit_bio() local
1221 s = search_alloc(bio, d, orig_bdev, start_time); in cached_dev_submit_bio()
1222 trace_bcache_request_start(s->d, bio); in cached_dev_submit_bio()
1229 continue_at_nobarrier(&s->cl, in cached_dev_submit_bio()
1233 s->iop.bypass = check_should_bypass(dc, bio); in cached_dev_submit_bio()
1236 cached_dev_write(dc, s); in cached_dev_submit_bio()
1238 cached_dev_read(dc, s); in cached_dev_submit_bio()
1265 static int flash_dev_cache_miss(struct btree *b, struct search *s, in flash_dev_cache_miss() argument
1284 closure_type(s, struct search, cl); in CLOSURE_CALLBACK()
1286 if (s->iop.flush_journal) in CLOSURE_CALLBACK()
1287 bch_journal_meta(s->iop.c, cl); in CLOSURE_CALLBACK()
1294 struct search *s; in flash_dev_submit_bio() local
1304 s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); in flash_dev_submit_bio()
1305 cl = &s->cl; in flash_dev_submit_bio()
1306 bio = &s->bio.bio; in flash_dev_submit_bio()
1308 trace_bcache_request_start(s->d, bio); in flash_dev_submit_bio()
1314 continue_at_nobarrier(&s->cl, in flash_dev_submit_bio()
1319 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, in flash_dev_submit_bio()
1323 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; in flash_dev_submit_bio()
1324 s->iop.writeback = true; in flash_dev_submit_bio()
1325 s->iop.bio = bio; in flash_dev_submit_bio()
1327 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); in flash_dev_submit_bio()
1329 closure_call(&s->iop.cl, cache_lookup, NULL, cl); in flash_dev_submit_bio()