| /fs/xfs/libxfs/ |
| A D | xfs_btree.c | 274 switch (cur->bc_ops->type) { in __xfs_btree_check_block() 604 cur->bc_ops->buf_ops); in xfs_btree_dup_cursor() 741 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); in xfs_btree_high_key_offset() 754 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + in xfs_btree_ptr_offset() 994 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock() 1000 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock() 1022 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock() 1029 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock() 1119 cur->bc_ops->buf_ops); in xfs_btree_readahead_ptr() 1404 cur->bc_ops->buf_ops); in xfs_btree_read_buf_block() [all …]
|
| A D | xfs_btree_staging.c | 64 ASSERT(cur->bc_ops->type != XFS_BTREE_TYPE_INODE); in xfs_btree_stage_afakeroot() 131 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_stage_ifakeroot() 444 return cur->bc_ops->get_dmaxrecs(cur, level); in xfs_btree_bload_max_npb() 446 ret = cur->bc_ops->get_maxrecs(cur, level); in xfs_btree_bload_max_npb() 499 if (cur->bc_ops->get_dmaxrecs) in xfs_btree_bload_level_geometry() 500 maxnr = cur->bc_ops->get_dmaxrecs(cur, level); in xfs_btree_bload_level_geometry() 502 maxnr = cur->bc_ops->get_maxrecs(cur, level); in xfs_btree_bload_level_geometry() 552 maxr = cur->bc_ops->get_maxrecs(cur, level); in xfs_btree_bload_ensure_slack() 553 minr = cur->bc_ops->get_minrecs(cur, level); in xfs_btree_bload_ensure_slack() 577 const struct xfs_btree_ops *ops = cur->bc_ops; in xfs_btree_bload_compute_geometry() [all …]
|
| A D | xfs_btree.h | 83 (cur)->bc_ops->statoff + __XBTS_ ## stat) 86 (cur)->bc_ops->statoff + __XBTS_ ## stat, val) 270 const struct xfs_btree_ops *bc_ops; member 552 return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) < 0; in xfs_btree_keycmp_lt() 561 return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) > 0; in xfs_btree_keycmp_gt() 570 return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) == 0; in xfs_btree_keycmp_eq() 608 return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) < 0; in xfs_btree_masked_keycmp_lt() 618 return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) > 0; in xfs_btree_masked_keycmp_gt() 642 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_islastblock() 685 cur->bc_ops = ops; in xfs_btree_alloc_cursor() [all …]
|
| A D | xfs_btree_mem.c | 30 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM); in xfbtree_set_root() 42 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM); in xfbtree_init_ptr_from_cur() 54 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM); in xfbtree_dup_cursor() 56 ncur = xfs_btree_alloc_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ops, in xfbtree_dup_cursor() 171 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM); in xfbtree_alloc_block() 197 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM); in xfbtree_free_block()
|
| A D | xfs_ag.c | 324 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno); in xfs_btroot_init() 390 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno); in xfs_bnoroot_init() 406 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno); in xfs_rmaproot_init() 618 const struct xfs_btree_ops *bc_ops; member 672 .bc_ops = &xfs_bnobt_ops, in xfs_ag_init_headers() 680 .bc_ops = &xfs_cntbt_ops, in xfs_ag_init_headers() 688 .bc_ops = &xfs_inobt_ops, in xfs_ag_init_headers() 696 .bc_ops = &xfs_finobt_ops, in xfs_ag_init_headers() 704 .bc_ops = &xfs_rmapbt_ops, in xfs_ag_init_headers() 712 .bc_ops = &xfs_refcountbt_ops, in xfs_ag_init_headers() [all …]
|
| A D | xfs_alloc_btree.c | 54 if (xfs_btree_is_bno(cur->bc_ops)) { in xfs_allocbt_set_root() 183 if (xfs_btree_is_bno(cur->bc_ops)) in xfs_allocbt_init_ptr_from_cur() 538 if (xfs_btree_is_bno(cur->bc_ops)) { in xfs_allocbt_commit_staged_btree()
|
| A D | xfs_ialloc_btree.c | 93 if (xfs_btree_is_fino(cur->bc_ops)) in xfs_inobt_mod_blockcount() 539 if (xfs_btree_is_ino(cur->bc_ops)) { in xfs_inobt_commit_staged_btree()
|
| A D | xfs_ag.h | 328 const struct xfs_btree_ops *bc_ops; /* btree ops */ member
|
| A D | xfs_refcount.c | 176 if (xfs_btree_is_rtrefcount(cur->bc_ops)) in xfs_refcount_check_btrec() 189 if (xfs_btree_is_rtrefcount(cur->bc_ops)) { in xfs_refcount_complain_bad_rec() 1136 if (xfs_btree_is_rtrefcount(cur->bc_ops)) in xrefc_free_extent()
|
| A D | xfs_rmap.c | 337 if (xfs_btree_is_rtrmap(cur->bc_ops) || in xfs_rmap_check_btrec() 338 xfs_btree_is_mem_rtrmap(cur->bc_ops)) in xfs_rmap_check_btrec() 351 if (xfs_btree_is_mem_rmap(cur->bc_ops)) in xfs_rmap_complain_bad_rec() 354 else if (xfs_btree_is_rtrmap(cur->bc_ops)) in xfs_rmap_complain_bad_rec()
|
| A D | xfs_alloc.c | 276 cur->bc_ops->name, cur->bc_group->xg_gno, fa); in xfs_alloc_complain_bad_rec() 1020 bool isbnobt = xfs_btree_is_bno(cur->bc_ops); in xfs_alloc_cur_check() 4099 ASSERT(xfs_btree_is_bno(cur->bc_ops)); in xfs_alloc_query_range() 4113 ASSERT(xfs_btree_is_bno(cur->bc_ops)); in xfs_alloc_query_all()
|
| /fs/xfs/scrub/ |
| A D | btree.c | 50 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in __xchk_btree_process_error() 94 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in __xchk_btree_set_corrupt() 158 memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len); in xchk_btree_rec() 165 cur->bc_ops->init_key_from_rec(&key, rec); in xchk_btree_rec() 175 cur->bc_ops->init_high_key_from_rec(&hkey, rec); in xchk_btree_rec() 240 if (bs->cur->bc_ops->type == XFS_BTREE_TYPE_INODE && in xchk_btree_ptr_ok() 431 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE) in xchk_btree_check_owner() 444 if (xfs_btree_is_bno(cur->bc_ops) || xfs_btree_is_rmap(cur->bc_ops)) { in xchk_btree_check_owner() 477 if (xfs_btree_is_bmap(bs->cur->bc_ops) && in xchk_btree_check_iroot_minrecs() 500 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) in xchk_btree_check_minrecs() [all …]
|
| A D | trace.c | 49 cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xchk_btree_cur_fsbno()
|
| A D | ialloc.c | 79 ASSERT(xfs_btree_is_fino(cur->bc_ops)); in xchk_inobt_xref_finobt() 182 ASSERT(xfs_btree_is_ino(cur->bc_ops)); in xchk_finobt_xref_inobt() 515 if (xfs_btree_is_fino(bs->cur->bc_ops)) { in xchk_iallocbt_rec_alignment()
|
| A D | health.c | 276 unsigned int mask = (*curp)->bc_ops->sick_mask; in xchk_ag_btree_del_cursor_if_sick()
|
| A D | ialloc_repair.c | 541 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_fibt_get_records() 568 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_ibt_get_records()
|
| A D | refcount_repair.c | 538 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_refc_get_records()
|
| A D | newbt.c | 584 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xrep_newbt_claim_block()
|
| A D | rtrefcount_repair.c | 579 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_rtrefc_get_records()
|
| A D | alloc_repair.c | 625 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_abt_get_records()
|
| A D | bmap_repair.c | 585 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_bmap_get_records()
|
| A D | rtrmap_repair.c | 673 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_rtrmap_get_records()
|
| A D | trace.h | 563 __string(name, cur->bc_ops->name) 605 __string(name, cur->bc_ops->name) 648 __string(name, cur->bc_ops->name) 686 __string(name, cur->bc_ops->name) 726 __string(name, cur->bc_ops->name)
|
| /fs/xfs/ |
| A D | xfs_health.c | 545 if (xfs_btree_is_bmap(cur->bc_ops)) { in xfs_btree_mark_sick() 548 } else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) { in xfs_btree_mark_sick() 550 ASSERT(cur->bc_ops->sick_mask); in xfs_btree_mark_sick() 551 xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask); in xfs_btree_mark_sick()
|
| A D | xfs_trace.h | 2281 __string(name, cur->bc_ops->name) 2868 __string(name, cur->bc_ops->name) 2906 __string(name, cur->bc_ops->name) 2912 switch (cur->bc_ops->type) { 2959 __string(name, cur->bc_ops->name) 3219 switch (cur->bc_ops->type) { 4702 __string(name, cur->bc_ops->name) 4730 __string(name, cur->bc_ops->name) 4767 __string(name, cur->bc_ops->name) 4806 __string(name, cur->bc_ops->name) [all …]
|