Lines Matching refs:tc
224 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
225 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
380 struct thin_c *tc; member
386 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) in begin_discard() argument
390 op->tc = tc; in begin_discard()
398 struct thin_c *tc = op->tc; in issue_discard() local
399 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
400 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
402 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT, in issue_discard()
585 struct thin_c *tc; member
609 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, in error_thin_bio_list() argument
616 spin_lock_irq(&tc->lock); in error_thin_bio_list()
618 spin_unlock_irq(&tc->lock); in error_thin_bio_list()
623 static void requeue_deferred_cells(struct thin_c *tc) in requeue_deferred_cells() argument
625 struct pool *pool = tc->pool; in requeue_deferred_cells()
631 spin_lock_irq(&tc->lock); in requeue_deferred_cells()
632 list_splice_init(&tc->deferred_cells, &cells); in requeue_deferred_cells()
633 spin_unlock_irq(&tc->lock); in requeue_deferred_cells()
639 static void requeue_io(struct thin_c *tc) in requeue_io() argument
645 spin_lock_irq(&tc->lock); in requeue_io()
646 __merge_bio_list(&bios, &tc->deferred_bio_list); in requeue_io()
647 __merge_bio_list(&bios, &tc->retry_on_resume_list); in requeue_io()
648 spin_unlock_irq(&tc->lock); in requeue_io()
651 requeue_deferred_cells(tc); in requeue_io()
656 struct thin_c *tc; in error_retry_list_with_code() local
659 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
660 error_thin_bio_list(tc, &tc->retry_on_resume_list, error); in error_retry_list_with_code()
676 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
678 struct pool *pool = tc->pool; in get_bio_block()
692 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, in get_bio_block_range() argument
695 struct pool *pool = tc->pool; in get_bio_block_range()
717 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
719 struct pool *pool = tc->pool; in remap()
722 bio_set_dev(bio, tc->pool_dev->bdev); in remap()
732 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
734 bio_set_dev(bio, tc->origin_dev->bdev); in remap_to_origin()
737 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
740 dm_thin_changed_this_transaction(tc->td); in bio_triggers_commit()
754 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
756 struct pool *pool = tc->pool; in issue()
758 if (!bio_triggers_commit(tc, bio)) { in issue()
768 if (dm_thin_aborted_changes(tc->td)) { in issue()
782 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
784 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
785 issue(tc, bio); in remap_to_origin_and_issue()
788 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
791 remap(tc, bio, block); in remap_and_issue()
792 issue(tc, bio); in remap_and_issue()
814 struct thin_c *tc; member
831 struct pool *pool = m->tc->pool; in __complete_mapping_preparation()
842 struct pool *pool = m->tc->pool; in complete_mapping_preparation()
882 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) in cell_defer_no_holder() argument
884 struct pool *pool = tc->pool; in cell_defer_no_holder()
888 spin_lock_irqsave(&tc->lock, flags); in cell_defer_no_holder()
889 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
890 has_work = !bio_list_empty(&tc->deferred_bio_list); in cell_defer_no_holder()
891 spin_unlock_irqrestore(&tc->lock, flags); in cell_defer_no_holder()
897 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
900 struct thin_c *tc; member
915 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
927 static void inc_remap_and_issue_cell(struct thin_c *tc, in inc_remap_and_issue_cell() argument
934 info.tc = tc; in inc_remap_and_issue_cell()
943 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
947 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
950 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
955 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
957 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
960 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) in complete_overwrite_bio() argument
962 struct pool *pool = tc->pool; in complete_overwrite_bio()
968 if (!bio_triggers_commit(tc, bio)) { in complete_overwrite_bio()
978 if (dm_thin_aborted_changes(tc->td)) { in complete_overwrite_bio()
994 struct thin_c *tc = m->tc; in process_prepared_mapping() local
995 struct pool *pool = tc->pool; in process_prepared_mapping()
1009 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); in process_prepared_mapping()
1023 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1024 complete_overwrite_bio(tc, bio); in process_prepared_mapping()
1026 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1027 remap_and_issue(tc, m->cell->holder, m->data_block); in process_prepared_mapping()
1028 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1040 struct thin_c *tc = m->tc; in free_discard_mapping() local
1043 cell_defer_no_holder(tc, m->cell); in free_discard_mapping()
1044 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1062 struct thin_c *tc = m->tc; in process_prepared_discard_no_passdown() local
1064 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); in process_prepared_discard_no_passdown()
1066 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1071 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_no_passdown()
1072 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1086 struct thin_c *tc = m->tc; in passdown_double_checking_shared_status() local
1087 struct pool *pool = tc->pool; in passdown_double_checking_shared_status()
1091 begin_discard(&op, tc, discard_parent); in passdown_double_checking_shared_status()
1129 struct pool *pool = m->tc->pool; in queue_passdown_pt2()
1150 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt1() local
1151 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1()
1160 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); in process_prepared_discard_passdown_pt1()
1164 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1177 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1190 begin_discard(&op, tc, discard_parent); in process_prepared_discard_passdown_pt1()
1199 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt2() local
1200 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2()
1214 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt2()
1280 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, in ll_zero() argument
1285 to.bdev = tc->pool_dev->bdev; in ll_zero()
1289 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1292 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
1296 struct pool *pool = tc->pool; in remap_and_issue_overwrite()
1303 remap_and_issue(tc, bio, data_begin); in remap_and_issue_overwrite()
1309 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_copy() argument
1315 struct pool *pool = tc->pool; in schedule_copy()
1318 m->tc = tc; in schedule_copy()
1341 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1349 to.bdev = tc->pool_dev->bdev; in schedule_copy()
1361 ll_zero(tc, m, in schedule_copy()
1370 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_internal_copy() argument
1374 schedule_copy(tc, virt_block, tc->pool_dev, in schedule_internal_copy()
1376 tc->pool->sectors_per_block); in schedule_internal_copy()
1379 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, in schedule_zero() argument
1383 struct pool *pool = tc->pool; in schedule_zero()
1387 m->tc = tc; in schedule_zero()
1400 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1402 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1408 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_external_copy() argument
1412 struct pool *pool = tc->pool; in schedule_external_copy()
1416 if (virt_block_end <= tc->origin_size) in schedule_external_copy()
1417 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1421 else if (virt_block_begin < tc->origin_size) in schedule_external_copy()
1422 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1424 tc->origin_size - virt_block_begin); in schedule_external_copy()
1427 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1514 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) in alloc_data_block() argument
1518 struct pool *pool = tc->pool; in alloc_data_block()
1584 struct thin_c *tc = h->tc; in retry_on_resume() local
1586 spin_lock_irq(&tc->lock); in retry_on_resume()
1587 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1588 spin_unlock_irq(&tc->lock); in retry_on_resume()
1645 static void process_discard_cell_no_passdown(struct thin_c *tc, in process_discard_cell_no_passdown() argument
1648 struct pool *pool = tc->pool; in process_discard_cell_no_passdown()
1655 m->tc = tc; in process_discard_cell_no_passdown()
1665 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end, in break_up_discard_bio() argument
1668 struct pool *pool = tc->pool; in break_up_discard_bio()
1683 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, in break_up_discard_bio()
1692 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); in break_up_discard_bio()
1693 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1704 m->tc = tc; in break_up_discard_bio()
1728 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell) in process_discard_cell_passdown() argument
1739 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1749 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1755 get_bio_block_range(tc, bio, &begin, &end); in process_discard_bio()
1764 build_key(tc->td, VIRTUAL, begin, end, &virt_key); in process_discard_bio()
1765 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1775 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1778 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1785 struct pool *pool = tc->pool; in break_sharing()
1787 r = alloc_data_block(tc, &data_block); in break_sharing()
1790 schedule_internal_copy(tc, block, lookup_result->block, in break_sharing()
1819 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1820 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1826 static void remap_and_issue_shared_cell(struct thin_c *tc, in remap_and_issue_shared_cell() argument
1833 info.tc = tc; in remap_and_issue_shared_cell()
1837 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1841 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1844 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1847 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1853 struct pool *pool = tc->pool; in process_shared_bio()
1860 build_data_key(tc->td, lookup_result->block, &key); in process_shared_bio()
1862 cell_defer_no_holder(tc, virt_cell); in process_shared_bio()
1867 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1868 cell_defer_no_holder(tc, virt_cell); in process_shared_bio()
1874 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1876 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); in process_shared_bio()
1877 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); in process_shared_bio()
1881 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1886 struct pool *pool = tc->pool; in provision_block()
1893 cell_defer_no_holder(tc, cell); in provision_block()
1895 remap_and_issue(tc, bio, 0); in provision_block()
1904 cell_defer_no_holder(tc, cell); in provision_block()
1909 r = alloc_data_block(tc, &data_block); in provision_block()
1912 if (tc->origin_dev) in provision_block()
1913 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1915 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1930 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell() argument
1933 struct pool *pool = tc->pool; in process_cell()
1935 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1938 if (tc->requeue_mode) { in process_cell()
1943 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in process_cell()
1947 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1950 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1951 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in process_cell()
1956 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1958 cell_defer_no_holder(tc, cell); in process_cell()
1960 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1961 remap_to_origin_and_issue(tc, bio); in process_cell()
1963 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1965 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1966 remap_to_origin_and_issue(tc, bio); in process_cell()
1973 provision_block(tc, bio, block, cell); in process_cell()
1979 cell_defer_no_holder(tc, cell); in process_cell()
1985 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
1987 struct pool *pool = tc->pool; in process_bio()
1988 dm_block_t block = get_bio_block(tc, bio); in process_bio()
1996 build_virtual_key(tc->td, block, &key); in process_bio()
2000 process_cell(tc, cell); in process_bio()
2003 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
2008 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
2011 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in __process_bio_read_only()
2015 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2017 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2019 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2020 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
2022 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in __process_bio_read_only()
2028 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2030 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2034 if (tc->origin_dev) { in __process_bio_read_only()
2035 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2036 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
2048 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2054 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
2056 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
2059 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_read_only() argument
2061 __process_bio_read_only(tc, cell->holder, cell); in process_cell_read_only()
2064 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
2069 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
2074 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_success() argument
2076 cell_success(tc->pool, cell); in process_cell_success()
2079 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_fail() argument
2081 cell_error(tc->pool, cell); in process_cell_fail()
2097 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
2103 rbp = &tc->sort_bio_list.rb_node; in __thin_bio_rb_add()
2117 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); in __thin_bio_rb_add()
2120 static void __extract_sorted_bios(struct thin_c *tc) in __extract_sorted_bios() argument
2126 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { in __extract_sorted_bios()
2130 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
2131 rb_erase(&pbd->rb_node, &tc->sort_bio_list); in __extract_sorted_bios()
2134 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); in __extract_sorted_bios()
2137 static void __sort_thin_deferred_bios(struct thin_c *tc) in __sort_thin_deferred_bios() argument
2143 bio_list_merge(&bios, &tc->deferred_bio_list); in __sort_thin_deferred_bios()
2144 bio_list_init(&tc->deferred_bio_list); in __sort_thin_deferred_bios()
2148 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
2155 __extract_sorted_bios(tc); in __sort_thin_deferred_bios()
2158 static void process_thin_deferred_bios(struct thin_c *tc) in process_thin_deferred_bios() argument
2160 struct pool *pool = tc->pool; in process_thin_deferred_bios()
2166 if (tc->requeue_mode) { in process_thin_deferred_bios()
2167 error_thin_bio_list(tc, &tc->deferred_bio_list, in process_thin_deferred_bios()
2174 spin_lock_irq(&tc->lock); in process_thin_deferred_bios()
2176 if (bio_list_empty(&tc->deferred_bio_list)) { in process_thin_deferred_bios()
2177 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2181 __sort_thin_deferred_bios(tc); in process_thin_deferred_bios()
2183 bio_list_merge(&bios, &tc->deferred_bio_list); in process_thin_deferred_bios()
2184 bio_list_init(&tc->deferred_bio_list); in process_thin_deferred_bios()
2186 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2196 spin_lock_irq(&tc->lock); in process_thin_deferred_bios()
2197 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2198 bio_list_merge(&tc->deferred_bio_list, &bios); in process_thin_deferred_bios()
2199 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2204 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2206 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2252 static void process_thin_deferred_cells(struct thin_c *tc) in process_thin_deferred_cells() argument
2254 struct pool *pool = tc->pool; in process_thin_deferred_cells()
2261 spin_lock_irq(&tc->lock); in process_thin_deferred_cells()
2262 list_splice_init(&tc->deferred_cells, &cells); in process_thin_deferred_cells()
2263 spin_unlock_irq(&tc->lock); in process_thin_deferred_cells()
2269 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2284 spin_lock_irq(&tc->lock); in process_thin_deferred_cells()
2285 list_splice(&cells, &tc->deferred_cells); in process_thin_deferred_cells()
2286 spin_unlock_irq(&tc->lock); in process_thin_deferred_cells()
2291 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2293 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2299 static void thin_get(struct thin_c *tc);
2300 static void thin_put(struct thin_c *tc);
2309 struct thin_c *tc = NULL; in get_first_thin() local
2313 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2314 thin_get(tc); in get_first_thin()
2318 return tc; in get_first_thin()
2321 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
2323 struct thin_c *old_tc = tc; in get_next_thin()
2326 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2327 thin_get(tc); in get_next_thin()
2330 return tc; in get_next_thin()
2342 struct thin_c *tc; in process_deferred_bios() local
2344 tc = get_first_thin(pool); in process_deferred_bios()
2345 while (tc) { in process_deferred_bios()
2346 process_thin_deferred_cells(tc); in process_deferred_bios()
2347 process_thin_deferred_bios(tc); in process_deferred_bios()
2348 tc = get_next_thin(pool, tc); in process_deferred_bios()
2470 struct thin_c *tc; member
2482 w->tc->requeue_mode = true; in do_noflush_start()
2483 requeue_io(w->tc); in do_noflush_start()
2491 w->tc->requeue_mode = false; in do_noflush_stop()
2495 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) in noflush_work() argument
2499 w.tc = tc; in noflush_work()
2500 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2656 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2658 struct pool *pool = tc->pool; in thin_defer_bio()
2660 spin_lock_irq(&tc->lock); in thin_defer_bio()
2661 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2662 spin_unlock_irq(&tc->lock); in thin_defer_bio()
2667 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2669 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle()
2672 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2676 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in thin_defer_cell() argument
2678 struct pool *pool = tc->pool; in thin_defer_cell()
2681 spin_lock_irq(&tc->lock); in thin_defer_cell()
2682 list_add_tail(&cell->user_list, &tc->deferred_cells); in thin_defer_cell()
2683 spin_unlock_irq(&tc->lock); in thin_defer_cell()
2689 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2693 h->tc = tc; in thin_hook_bio()
2706 struct thin_c *tc = ti->private; in thin_bio_map() local
2707 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2708 struct dm_thin_device *td = tc->td; in thin_bio_map()
2713 thin_hook_bio(tc, bio); in thin_bio_map()
2715 if (tc->requeue_mode) { in thin_bio_map()
2721 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2727 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2735 build_virtual_key(tc->td, block, &key); in thin_bio_map()
2736 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2761 thin_defer_cell(tc, virt_cell); in thin_bio_map()
2765 build_data_key(tc->td, result.block, &key); in thin_bio_map()
2766 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2767 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2771 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2772 cell_defer_no_holder(tc, data_cell); in thin_bio_map()
2773 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2775 remap(tc, bio, result.block); in thin_bio_map()
2780 thin_defer_cell(tc, virt_cell); in thin_bio_map()
2790 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2797 struct thin_c *tc; in requeue_bios() local
2800 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2801 spin_lock_irq(&tc->lock); in requeue_bios()
2802 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); in requeue_bios()
2803 bio_list_init(&tc->retry_on_resume_list); in requeue_bios()
2804 spin_unlock_irq(&tc->lock); in requeue_bios()
3585 struct thin_c *tc; in pool_suspend_active_thins() local
3588 tc = get_first_thin(pool); in pool_suspend_active_thins()
3589 while (tc) { in pool_suspend_active_thins()
3590 dm_internal_suspend_noflush(tc->thin_md); in pool_suspend_active_thins()
3591 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3597 struct thin_c *tc; in pool_resume_active_thins() local
3600 tc = get_first_thin(pool); in pool_resume_active_thins()
3601 while (tc) { in pool_resume_active_thins()
3602 dm_internal_resume(tc->thin_md); in pool_resume_active_thins()
3603 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
4117 static void thin_get(struct thin_c *tc) in thin_get() argument
4119 refcount_inc(&tc->refcount); in thin_get()
4122 static void thin_put(struct thin_c *tc) in thin_put() argument
4124 if (refcount_dec_and_test(&tc->refcount)) in thin_put()
4125 complete(&tc->can_destroy); in thin_put()
4130 struct thin_c *tc = ti->private; in thin_dtr() local
4132 spin_lock_irq(&tc->pool->lock); in thin_dtr()
4133 list_del_rcu(&tc->list); in thin_dtr()
4134 spin_unlock_irq(&tc->pool->lock); in thin_dtr()
4137 thin_put(tc); in thin_dtr()
4138 wait_for_completion(&tc->can_destroy); in thin_dtr()
4142 __pool_dec(tc->pool); in thin_dtr()
4143 dm_pool_close_thin_device(tc->td); in thin_dtr()
4144 dm_put_device(ti, tc->pool_dev); in thin_dtr()
4145 if (tc->origin_dev) in thin_dtr()
4146 dm_put_device(ti, tc->origin_dev); in thin_dtr()
4147 kfree(tc); in thin_dtr()
4167 struct thin_c *tc; in thin_ctr() local
4179 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); in thin_ctr()
4180 if (!tc) { in thin_ctr()
4185 tc->thin_md = dm_table_get_md(ti->table); in thin_ctr()
4186 spin_lock_init(&tc->lock); in thin_ctr()
4187 INIT_LIST_HEAD(&tc->deferred_cells); in thin_ctr()
4188 bio_list_init(&tc->deferred_bio_list); in thin_ctr()
4189 bio_list_init(&tc->retry_on_resume_list); in thin_ctr()
4190 tc->sort_bio_list = RB_ROOT; in thin_ctr()
4204 tc->origin_dev = origin_dev; in thin_ctr()
4212 tc->pool_dev = pool_dev; in thin_ctr()
4214 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { in thin_ctr()
4220 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); in thin_ctr()
4227 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4228 if (!tc->pool) { in thin_ctr()
4233 __pool_inc(tc->pool); in thin_ctr()
4235 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4241 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4247 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4257 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4264 spin_lock_irq(&tc->pool->lock); in thin_ctr()
4265 if (tc->pool->suspended) { in thin_ctr()
4266 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4272 refcount_set(&tc->refcount, 1); in thin_ctr()
4273 init_completion(&tc->can_destroy); in thin_ctr()
4274 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4275 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4289 dm_pool_close_thin_device(tc->td); in thin_ctr()
4291 __pool_dec(tc->pool); in thin_ctr()
4295 dm_put_device(ti, tc->pool_dev); in thin_ctr()
4297 if (tc->origin_dev) in thin_ctr()
4298 dm_put_device(ti, tc->origin_dev); in thin_ctr()
4300 kfree(tc); in thin_ctr()
4321 struct pool *pool = h->tc->pool; in thin_endio()
4348 cell_defer_no_holder(h->tc, h->cell); in thin_endio()
4355 struct thin_c *tc = ti->private; in thin_presuspend() local
4358 noflush_work(tc, do_noflush_start); in thin_presuspend()
4363 struct thin_c *tc = ti->private; in thin_postsuspend() local
4369 noflush_work(tc, do_noflush_stop); in thin_postsuspend()
4374 struct thin_c *tc = ti->private; in thin_preresume() local
4376 if (tc->origin_dev) in thin_preresume()
4377 tc->origin_size = get_dev_size(tc->origin_dev->bdev); in thin_preresume()
4392 struct thin_c *tc = ti->private; in thin_status() local
4394 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4399 if (!tc->td) in thin_status()
4404 r = dm_thin_get_mapped_count(tc->td, &mapped); in thin_status()
4410 r = dm_thin_get_highest_mapped_block(tc->td, &highest); in thin_status()
4416 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4419 tc->pool->sectors_per_block) - 1); in thin_status()
4426 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), in thin_status()
4427 (unsigned long) tc->dev_id); in thin_status()
4428 if (tc->origin_dev) in thin_status()
4429 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); in thin_status()
4448 struct thin_c *tc = ti->private; in thin_iterate_devices() local
4449 struct pool *pool = tc->pool; in thin_iterate_devices()
4461 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4468 struct thin_c *tc = ti->private; in thin_io_hints() local
4469 struct pool *pool = tc->pool; in thin_io_hints()