| /fs/quota/ |
| A D | quota_tree.c | 36 depth = info->dqi_qtree_depth - depth - 1; in __get_index() 37 while (depth--) in __get_index() 346 if (!blks[depth]) { in do_insert_tree() 358 blks[depth] = ret; in do_insert_tree() 382 blks[depth], in do_insert_tree() 558 blks[depth]); in remove_tree() 571 blks[depth], in remove_tree() 578 blks[depth + 1] = 0; in remove_tree() 592 blks[depth] = 0; in remove_tree() 674 blks[depth]); in find_tree_dqentry() [all …]
|
| /fs/ext4/ |
| A D | extents.c | 115 int depth, i; in ext4_ext_drop_refs() local 901 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { in ext4_find_extent() 903 depth); in ext4_find_extent() 926 i = depth; in ext4_find_extent() 1642 int depth; in ext4_ext_next_allocated_block() local 1662 depth--; in ext4_ext_next_allocated_block() 1674 int depth; in ext4_ext_next_leaf_block() local 1684 depth--; in ext4_ext_next_leaf_block() 1691 depth--; in ext4_ext_next_leaf_block() 5276 depth--; in ext4_ext_shift_path_extents() [all …]
|
| A D | indirect.c | 159 while (--depth) { in ext4_get_branch() 541 int depth; in ext4_ind_map_blocks() local 551 if (depth == 0) in ext4_ind_map_blocks() 586 for (i = partial - chain + 1; i < depth; i++) in ext4_ind_map_blocks() 806 for (k = depth; k > 1 && !offsets[k-1]; k--) in ext4_find_shared() 1010 if (depth--) { in ext4_free_branches() 1023 (unsigned long) nr, depth); in ext4_free_branches() 1045 depth); in ext4_free_branches() 1388 int depth = (chain+n-1) - partial; in ext4_ind_remove_space() local 1411 if (partial > chain && depth <= depth2) { in ext4_ind_remove_space() [all …]
|
| /fs/minix/ |
| A D | itree_common.c | 31 int depth, in get_branch() argument 45 while (--depth) { in get_branch() 160 int depth = block_to_path(inode, block, offsets); in get_block() local 162 if (depth == 0) in get_block() 166 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 173 partial = chain+depth-1; /* the whole chain */ in get_block() 196 left = (chain + depth) - partial; in get_block() 224 int depth, in find_shared() argument 233 for (k = depth; k > 1 && !offsets[k-1]; k--) in find_shared() 281 if (depth--) { in free_branches() [all …]
|
| /fs/configfs/ |
| A D | symlink.c | 25 int depth = 0; in item_depth() local 26 do { depth++; } while ((p = p->ci_parent) && !configfs_is_root(p)); in item_depth() 27 return depth; in item_depth() 59 int depth, size; in configfs_get_target_path() local 62 depth = item_depth(item); in configfs_get_target_path() 63 size = item_path_length(target) + depth * 3 - 1; in configfs_get_target_path() 67 pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size); in configfs_get_target_path() 69 for (s = path; depth--; s += 3) in configfs_get_target_path()
|
| A D | inode.c | 130 int depth = sd->s_depth; in configfs_set_inode_lock_class() local 132 if (depth > 0) { in configfs_set_inode_lock_class() 133 if (depth <= ARRAY_SIZE(default_group_class)) { in configfs_set_inode_lock_class() 135 &default_group_class[depth - 1]); in configfs_set_inode_lock_class()
|
| /fs/ufs/ |
| A D | inode.c | 141 if (depth == 0) in ufs_frag_map() 154 while (--depth) { in ufs_frag_map() 178 while (--depth) { in ufs_frag_map() 404 if (unlikely(!depth)) { in ufs_getfrag_block() 420 if (depth == 1) { in ufs_getfrag_block() 972 if (--depth) { in free_full_branch() 1001 if (--depth) { in free_branch_tail() 1108 int depth; in ufs_truncate_blocks() local 1118 if (!depth) in ufs_truncate_blocks() 1121 depth = 1; in ufs_truncate_blocks() [all …]
|
| /fs/ext2/ |
| A D | inode.c | 235 int depth, in ext2_get_branch() argument 249 while (--depth) { in ext2_get_branch() 636 int depth; in ext2_get_blocks() local 645 if (depth == 0) in ext2_get_blocks() 666 partial = chain + depth - 1; in ext2_get_blocks() 748 le32_to_cpu(chain[depth-1].key), in ext2_get_blocks() 756 le32_to_cpu(chain[depth-1].key), count, in ext2_get_blocks() 779 *bno = le32_to_cpu(chain[depth-1].key); in ext2_get_blocks() 1038 int depth, in ext2_find_shared() argument 1141 if (depth--) { in ext2_free_branches() [all …]
|
| /fs/ |
| A D | eventpoll.c | 990 mutex_lock_nested(&ep->mtx, depth); in __ep_eventpoll_poll() 993 if (ep_item_poll(epi, &pt, depth + 1)) { in __ep_eventpoll_poll() 1045 int depth) in ep_item_poll() argument 1061 res = __ep_eventpoll_poll(file, pt, depth); in ep_item_poll() 1534 if (depth > EP_MAX_NESTS) /* too deep nesting */ in reverse_path_check_proc() 1541 error = path_count_inc(depth); in reverse_path_check_proc() 2162 mutex_lock_nested(&ep->mtx, depth + 1); in ep_loop_check_proc() 2221 int depth, upwards_depth; in ep_loop_check() local 2228 depth = ep_loop_check_proc(to, 0); in ep_loop_check() 2229 if (depth > EP_MAX_NESTS) in ep_loop_check() [all …]
|
| A D | namei.c | 640 unsigned depth; member 665 p->depth = 0; in __set_nameidata() 732 int i = nd->depth; in drop_links() 753 for (i = 0; i < nd->depth; i++) in terminate_walk() 762 nd->depth = 0; in terminate_walk() 795 nd->depth = 0; in legitimize_links() 802 nd->depth = i + 1; in legitimize_links() 1894 last = nd->stack + nd->depth++; in pick_link() 2486 if (!depth) { in link_path_walk() 3772 if (nd->depth) in open_last_lookups() [all …]
|
| /fs/bcachefs/ |
| A D | snapshot.h | 75 s->depth != snapshot_t(c, parent)->depth + 1) in __bch2_snapshot_parent() 77 id, snapshot_t(c, id)->depth, in __bch2_snapshot_parent() 78 parent, snapshot_t(c, parent)->depth); in __bch2_snapshot_parent() 145 return parent ? snapshot_t(c, parent)->depth + 1 : 0; in bch2_snapshot_depth()
|
| A D | btree_update_interior.h | 204 unsigned depth = btree_node_root(c, b)->c.level + 1; in btree_update_reserve_required() local 211 if (depth < BTREE_MAX_DEPTH) in btree_update_reserve_required() 212 return (depth - b->c.level) * 2 + 1; in btree_update_reserve_required() 214 return (depth - b->c.level) * 2 - 1; in btree_update_reserve_required()
|
| A D | snapshot.c | 224 le32_to_cpu(s.v->depth), in bch2_snapshot_to_text() 326 t->depth = le32_to_cpu(s.v->depth); in __bch2_mark_snapshot() 331 t->depth = 0; in __bch2_mark_snapshot() 816 u->v.depth = cpu_to_le32(real_depth); in check_snapshot() 1260 s->v.depth = 0; in bch2_snapshot_node_delete() 1286 u32 depth = bch2_snapshot_depth(c, parent); in create_snapids() local 1316 n->v.depth = cpu_to_le32(depth); in create_snapids() 1775 if (!s->v.depth) { in bch2_fix_child_of_deleted_snapshot() 1780 u32 depth = le32_to_cpu(s->v.depth); in bch2_fix_child_of_deleted_snapshot() local 1789 depth > 1 in bch2_fix_child_of_deleted_snapshot() [all …]
|
| A D | snapshot_format.h | 13 __le32 depth; member
|
| A D | snapshot_types.h | 21 u32 depth; member
|
| /fs/ocfs2/ |
| A D | ocfs2_trace.h | 425 TP_ARGS(owner, depth), 428 __field(int, depth) 432 __entry->depth = depth; 439 int depth), 440 TP_ARGS(subtree_root, blkno, depth), 444 __field(int, depth) 449 __entry->depth = depth; 452 __entry->blkno, __entry->depth) 536 __field(unsigned int, depth) 542 __entry->depth = depth; [all …]
|
| /fs/hfs/ |
| A D | btree.c | 126 tree->depth = be16_to_cpu(head->depth); in hfs_btree_open() 219 head->depth = cpu_to_be16(tree->depth); in hfs_btree_write()
|
| A D | btree.h | 41 unsigned int depth; member 148 __be16 depth; /* (V) The number of levels in this B-tree */ member
|
| /fs/hfsplus/ |
| A D | btree.c | 177 tree->depth = be16_to_cpu(head->depth); in hfs_btree_open() 305 head->depth = cpu_to_be16(tree->depth); in hfs_btree_write()
|
| A D | attributes.c | 221 err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1); in hfsplus_create_attr() 326 err = hfs_bmap_reserve(fd.tree, fd.tree->depth); in hfsplus_delete_attr()
|
| A D | catalog.c | 272 err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth); in hfsplus_create_cat() 348 err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2); in hfsplus_delete_cat() 456 err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1); in hfsplus_rename_cat()
|
| /fs/overlayfs/ |
| A D | inode.c | 826 int depth = inode->i_sb->s_stack_depth - 1; in ovl_lockdep_annotate_inode_mutex_key() local 828 if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING)) in ovl_lockdep_annotate_inode_mutex_key() 829 depth = 0; in ovl_lockdep_annotate_inode_mutex_key() 832 lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]); in ovl_lockdep_annotate_inode_mutex_key() 834 lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]); in ovl_lockdep_annotate_inode_mutex_key() 836 lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]); in ovl_lockdep_annotate_inode_mutex_key()
|
| /fs/gfs2/ |
| A D | glops.c | 394 u16 height, depth; in gfs2_dinode_in() local 449 depth = be16_to_cpu(str->di_depth); in gfs2_dinode_in() 450 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) { in gfs2_dinode_in() 455 depth < ilog2(sdp->sd_hash_ptrs)) { in gfs2_dinode_in() 459 ip->i_depth = (u8)depth; in gfs2_dinode_in()
|
| A D | dir.c | 864 static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth) in new_leaf() argument 886 leaf->lf_depth = cpu_to_be16(depth); in new_leaf() 1368 int *copied, unsigned *depth, in gfs2_dir_read_leaf() argument 1389 *depth = be16_to_cpu(lf->lf_depth); in gfs2_dir_read_leaf() 1396 if (*depth < GFS2_DIR_MAX_DEPTH || !sdp->sd_args.ar_loccookie) { in gfs2_dir_read_leaf() 1538 unsigned depth = 0; in dir_e_read() local 1554 &copied, &depth, in dir_e_read() 1559 len = BIT(dip->i_depth - depth); in dir_e_read()
|
| /fs/qnx6/ |
| A D | inode.c | 124 int depth = ei->di_filelevels; in qnx6_block_map() local 127 bitdelta = ptrbits * depth; in qnx6_block_map() 137 for (i = 0; i < depth; i++) { in qnx6_block_map()
|