| /fs/btrfs/ |
| A D | extent-io-tree.h | 145 struct extent_state **cached); 147 u32 bits, struct extent_state **cached); 150 struct extent_state **cached) in btrfs_lock_extent() argument 156 u64 end, struct extent_state **cached) in btrfs_try_lock_extent() argument 183 struct extent_state **cached) in btrfs_clear_extent_bit() argument 189 struct extent_state **cached) in btrfs_unlock_extent() argument 192 cached, NULL); in btrfs_unlock_extent() 205 EXTENT_DO_ACCOUNTING, cached); in btrfs_clear_extent_dirty() 223 u64 end, struct extent_state **cached) in btrfs_lock_dio_extent() argument 235 u64 end, struct extent_state **cached) in btrfs_unlock_dio_extent() argument [all …]
|
| A D | extent-io-tree.c | 652 cached = *cached_state; in btrfs_clear_extent_bit_changeset() 659 if (cached && extent_state_in_tree(cached) && in btrfs_clear_extent_bit_changeset() 660 cached->start <= start && cached->end > start) { in btrfs_clear_extent_bit_changeset() 663 state = cached; in btrfs_clear_extent_bit_changeset() 1685 cached = *cached_state; in btrfs_count_range_bits() 1690 if (cached->start <= cur_start && cur_start <= cached->end) { in btrfs_count_range_bits() 1691 state = cached; in btrfs_count_range_bits() 1704 state = cached; in btrfs_count_range_bits() 1826 if (cached && extent_state_in_tree(cached) && cached->start <= start && in btrfs_test_range_bit() 1827 cached->end > start) in btrfs_test_range_bit() [all …]
|
| A D | fiemap.c | 72 bool cached; member 117 if (!cache->cached) in emit_fiemap_extent() 252 cache->cached = false; in emit_fiemap_extent() 266 cache->cached = false; in emit_fiemap_extent() 270 cache->cached = true; in emit_fiemap_extent() 295 if (!cache->cached) in emit_last_fiemap_cache() 300 cache->cached = false; in emit_last_fiemap_cache() 822 if (cache.cached && cache.offset + cache.len >= last_extent_end) { in extent_fiemap()
|
| A D | block-group.h | 167 int cached; member 383 return cache->cached == BTRFS_CACHE_FINISHED || in btrfs_block_group_done() 384 cache->cached == BTRFS_CACHE_ERROR; in btrfs_block_group_done()
|
| A D | ordered-data.c | 571 struct btrfs_ordered_extent **cached, in btrfs_dec_test_ordered_pending() argument 580 if (cached && *cached) { in btrfs_dec_test_ordered_pending() 581 entry = *cached; in btrfs_dec_test_ordered_pending() 611 if (finished && cached && entry) { in btrfs_dec_test_ordered_pending() 612 *cached = entry; in btrfs_dec_test_ordered_pending()
|
| A D | block-group.c | 879 block_group->cached = BTRFS_CACHE_STARTED; in caching_thread() 949 if (cache->cached != BTRFS_CACHE_NO) { in btrfs_cache_block_group() 960 cache->cached = BTRFS_CACHE_STARTED; in btrfs_cache_block_group() 1185 if (block_group->cached == BTRFS_CACHE_STARTED) in btrfs_remove_block_group() 2444 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group() 2447 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group() 2502 bg->cached = BTRFS_CACHE_FINISHED; in fill_dummy_bgs() 2902 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_make_block_group() 3279 if (block_group->cached != BTRFS_CACHE_FINISHED || in cache_save_setup() 4550 if (block_group->cached == BTRFS_CACHE_NO || in btrfs_free_block_groups() [all …]
|
| A D | backref.c | 46 bool cached; in check_extent_in_eb() local 65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids, in check_extent_in_eb() 67 if (!cached) in check_extent_in_eb() 1546 bool cached; in find_parent_nodes() local 1549 cached = lookup_backref_shared_cache(sc->ctx, sc->root, in find_parent_nodes() 1552 if (cached) { in find_parent_nodes() 1978 bool cached; in btrfs_is_data_extent_shared() local 1983 if (cached) { in btrfs_is_data_extent_shared() 2461 bool cached; in iterate_extent_inodes() local 2463 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx, in iterate_extent_inodes() [all …]
|
| A D | file.h | 40 size_t write_bytes, struct extent_state **cached, bool noreserve);
|
| /fs/lockd/ |
| A D | mon.c | 343 if (cached != NULL) { in nsm_get_handle() 344 refcount_inc(&cached->sm_count); in nsm_get_handle() 348 "cnt %d\n", cached->sm_name, in nsm_get_handle() 349 cached->sm_addrbuf, in nsm_get_handle() 351 return cached; in nsm_get_handle() 382 struct nsm_handle *cached; in nsm_reboot_lookup() local 388 if (unlikely(cached == NULL)) { in nsm_reboot_lookup() 392 return cached; in nsm_reboot_lookup() 395 refcount_inc(&cached->sm_count); in nsm_reboot_lookup() 399 cached->sm_name, cached->sm_addrbuf, in nsm_reboot_lookup() [all …]
|
| /fs/proc/ |
| A D | meminfo.c | 38 long cached; in meminfo_proc_show() local 48 cached = global_node_page_state(NR_FILE_PAGES) - in meminfo_proc_show() 50 if (cached < 0) in meminfo_proc_show() 51 cached = 0; in meminfo_proc_show() 64 show_val_kb(m, "Cached: ", cached); in meminfo_proc_show()
|
| /fs/bcachefs/ |
| A D | extents.c | 205 have_dirty_ptrs |= !p.ptr.cached; in bch2_bkey_pick_read_device() 768 if (!p.ptr.cached && crc_is_compressed(p.crc)) in bch2_bkey_sectors_compressed() 794 if (p.ptr.cached) in bch2_bkey_replicas() 809 if (p->ptr.cached) in __extent_ptr_durability() 1019 (!ptr->cached || in bch2_bkey_has_target() 1150 if (p.ptr.cached) { in bch2_extent_ptr_set_cached() 1169 ptr->cached = true; in bch2_extent_ptr_set_cached() 1189 ptr->cached && in bch2_extent_normalize() 1215 if (ptr->cached) { in bch2_extent_normalize_by_opts() 1234 ptr->cached ? " cached" : ""); in bch2_extent_ptr_to_text() [all …]
|
| A D | btree_update.c | 23 cmp_int(l->cached, r->cached) ?: in btree_insert_entry_cmp() 385 .cached = path->cached, in bch2_trans_update_by_path() 414 i->cached = n.cached; in bch2_trans_update_by_path() 447 if (path->cached && !i->old_btree_u64s) in bch2_trans_update_by_path() 521 !path->cached && in bch2_trans_update_ip()
|
| A D | btree_iter.c | 153 if (path->cached) { in __bch2_btree_path_verify_level() 223 BUG_ON(!path->cached && in __bch2_btree_path_verify() 337 if (!path->cached) { in __bch2_assert_pos_locked() 631 BUG_ON(path->cached); in bch2_btree_path_level_init() 647 if (!i->cached && in bch2_trans_revalidate_updates_in_node() 1178 if (path->cached) { in bch2_btree_path_traverse_one() 1490 i->cached, in bch2_trans_updates_to_text() 1759 cached, in bch2_path_get() 1768 trans->paths[path_pos].cached == cached && in bch2_path_get() 1784 path->cached = cached; in bch2_path_get() [all …]
|
| A D | alloc_foreground.h | 260 bool cached) in bch2_alloc_sectors_append_ptrs_inlined() argument 273 ptr.cached = cached || in bch2_alloc_sectors_append_ptrs_inlined()
|
| A D | btree_types.h | 69 bool cached; member 225 x(cached) \ 320 bool cached:1; member 415 return !b->cached in btree_node_pos() 426 bool cached:1; member
|
| A D | extents_format.h | 174 cached:1, member 186 cached:1,
|
| A D | buckets.c | 155 if (!p.ptr.cached) { in bch2_check_fix_ptr() 176 if (!p.ptr.cached && in bch2_check_fix_ptr() 199 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, in bch2_check_fix_ptr() 222 if (!p.ptr.cached && in bch2_check_fix_ptr() 329 if ((p.ptr.cached && in bch2_check_fix_ptrs() 331 (!p.ptr.cached && in bch2_check_fix_ptrs() 495 if (b_gen != ptr->gen && ptr->cached) { in bch2_bucket_ref_update() 603 !p->ptr.cached ? &a->dirty_sectors : in __mark_pointer() 803 if (p.ptr.cached && stale) in __trigger_extent() 806 if (p.ptr.cached) { in __trigger_extent()
|
| A D | extents.h | 581 if (!ptr->cached) in bch2_bkey_dirty_devs() 593 if (ptr->cached) in bch2_bkey_cached_devs() 700 return (ptr1.cached == ptr2.cached && in bch2_extent_ptr_eq()
|
| A D | btree_trans_commit.c | 130 if (!i->cached) in bch2_trans_lock_write() 339 BUG_ON(i->cached != path->cached); in btree_insert_entry_checks() 619 ret = !i->cached in bch2_trans_commit_write_locked() 780 if (!i->cached) in bch2_trans_commit_write_locked() 832 if (i->cached) in do_bch2_trans_commit()
|
| A D | data_update.c | 163 !ptr->cached) in trace_io_move_fail2() 339 !ptr->cached) { in __bch2_data_update_index_update() 361 !ptr_c->cached) { in __bch2_data_update_index_update() 385 if (!p.ptr.cached && in __bch2_data_update_index_update() 874 if (!p.ptr.cached) { in bch2_data_update_init() 1014 if ((opts->rewrite_ptrs & ptr_bit) && ptr->cached) { in bch2_data_update_opts_normalize()
|
| A D | buckets_types.h | 79 u64 cached; member
|
| A D | disk_accounting_format.h | 61 x(cached, 5) \
|
| A D | rebalance.c | 64 if (!p.ptr.cached && p.crc.compression_type != compression_type) in bch2_bkey_ptrs_need_compress() 85 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target)) in bch2_bkey_ptrs_need_move() 131 if (!p.ptr.cached && p.crc.compression_type != compression_type) in bch2_bkey_sectors_need_rebalance() 139 if (!p.ptr.cached && in bch2_bkey_sectors_need_rebalance()
|
| A D | io_write_types.h | 18 x(cached) \
|
| /fs/fuse/ |
| A D | readdir.c | 49 if (fi->rdc.cached || pos != fi->rdc.pos) { in fuse_add_dirent_to_cache() 106 fi->rdc.cached = true; in fuse_readdir_cache_end() 437 fi->rdc.cached = false; in fuse_rdc_reset() 477 if (!fi->rdc.cached) { in fuse_readdir_cached()
|