| /fs/xfs/scrub/ |
| A D | btree.c | 246 xchk_btree_set_corrupt(bs->sc, bs->cur, level); in xchk_btree_ptr_ok() 390 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, in xchk_btree_check_block_owner() 404 xchk_xref_is_only_owned_by(bs->sc, agbno, 1, bs->oinfo); in xchk_btree_check_block_owner() 410 xchk_ag_free(bs->sc, &bs->sc->sa); in xchk_btree_check_block_owner() 432 xchk_btree_set_corrupt(bs->sc, bs->cur, level); in xchk_btree_check_owner() 560 xchk_btree_set_corrupt(bs->sc, bs->cur, level); in xchk_btree_block_check_keys() 572 xchk_btree_set_corrupt(bs->sc, bs->cur, level); in xchk_btree_block_check_keys() 599 xchk_btree_set_corrupt(bs->sc, bs->cur, level); in xchk_btree_get_block() 707 if (!bs) in xchk_btree() 752 error = bs->scrub_rec(bs, recp); in xchk_btree() [all …]
|
| A D | ialloc.c | 269 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_chunk() 326 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster_ifree() 348 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster_ifree() 404 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster() 416 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster() 441 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster() 548 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment() 553 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment() 587 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec() 616 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec() [all …]
|
| A D | rtrmap.c | 96 struct xchk_btree *bs, in xchk_rtrmapbt_check_overlapping() argument 116 !xchk_rtrmapbt_is_shareable(bs->sc, irec)) in xchk_rtrmapbt_check_overlapping() 117 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrmapbt_check_overlapping() 155 struct xchk_btree *bs, in xchk_rtrmapbt_check_mergeable() argument 163 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrmapbt_check_mergeable() 221 struct xchk_btree *bs, in xchk_rtrmapbt_rec() argument 224 struct xchk_rtrmap *cr = bs->private; in xchk_rtrmapbt_rec() 229 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrmapbt_rec() 236 xchk_rtrmapbt_check_mergeable(bs, cr, &irec); in xchk_rtrmapbt_rec() 237 xchk_rtrmapbt_check_overlapping(bs, cr, &irec); in xchk_rtrmapbt_rec() [all …]
|
| A D | rmap.c | 147 struct xchk_btree *bs) in xchk_rmapbt_check_unwritten_in_keyflags() argument 149 struct xfs_scrub *sc = bs->sc; in xchk_rmapbt_check_unwritten_in_keyflags() 203 struct xchk_btree *bs, in xchk_rmapbt_check_overlapping() argument 224 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rmapbt_check_overlapping() 267 struct xchk_btree *bs, in xchk_rmapbt_check_mergeable() argument 275 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rmapbt_check_mergeable() 283 struct xchk_btree *bs, in xchk_rmapbt_mark_bitmap() argument 336 bs->sc->sa.rmap_cur, 0); in xchk_rmapbt_mark_bitmap() 344 xchk_btree_xref_set_corrupt(bs->sc, bs->sc->sa.rmap_cur, 0); in xchk_rmapbt_mark_bitmap() 354 struct xchk_btree *bs, in xchk_rmapbt_rec() argument [all …]
|
| A D | alloc.c | 117 struct xchk_btree *bs, in xchk_allocbt_mergeable() argument 121 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_allocbt_mergeable() 127 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_allocbt_mergeable() 135 struct xchk_btree *bs, in xchk_allocbt_rec() argument 139 struct xchk_alloc *ca = bs->private; in xchk_allocbt_rec() 142 if (xfs_alloc_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) { in xchk_allocbt_rec() 143 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_allocbt_rec() 147 xchk_allocbt_mergeable(bs, ca, &irec); in xchk_allocbt_rec() 148 xchk_allocbt_xref(bs->sc, &irec); in xchk_allocbt_rec()
|
| A D | rtrefcount.c | 396 struct xchk_btree *bs, in xchk_rtrefcountbt_check_mergeable() argument 404 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrefcountbt_check_mergeable() 459 struct xchk_btree *bs, in xchk_rtrefcountbt_rec() argument 462 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_rtrefcountbt_rec() 463 struct xchk_rtrefcbt_records *rrc = bs->private; in xchk_rtrefcountbt_rec() 470 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrefcountbt_rec() 477 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrefcountbt_rec() 480 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrefcountbt_rec() 488 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_rtrefcountbt_rec() 491 xchk_rtrefcountbt_check_mergeable(bs, rrc, &irec); in xchk_rtrefcountbt_rec() [all …]
|
| A D | refcount.c | 433 struct xchk_btree *bs, in xchk_refcountbt_check_mergeable() argument 437 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_refcountbt_check_mergeable() 441 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_refcountbt_check_mergeable() 449 struct xchk_btree *bs, in xchk_refcountbt_rec() argument 453 struct xchk_refcbt_records *rrc = bs->private; in xchk_refcountbt_rec() 456 if (xfs_refcount_check_irec(to_perag(bs->cur->bc_group), &irec) != in xchk_refcountbt_rec() 458 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_refcountbt_rec() 468 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_refcountbt_rec() 471 xchk_refcountbt_check_mergeable(bs, rrc, &irec); in xchk_refcountbt_rec() 472 xchk_refcountbt_xref(bs->sc, &irec); in xchk_refcountbt_rec() [all …]
|
| A D | bmap.c | 523 struct xchk_btree *bs, in xchk_bmapbt_rec() argument 529 struct xchk_bmap_info *info = bs->private; in xchk_bmapbt_rec() 530 struct xfs_inode *ip = bs->cur->bc_ino.ip; in xchk_bmapbt_rec() 541 if (xfs_has_crc(bs->cur->bc_mp) && in xchk_bmapbt_rec() 542 bs->cur->bc_levels[0].ptr == 1) { in xchk_bmapbt_rec() 543 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) { in xchk_bmapbt_rec() 544 block = xfs_btree_get_block(bs->cur, i, &bp); in xchk_bmapbt_rec() 547 xchk_fblock_set_corrupt(bs->sc, in xchk_bmapbt_rec() 564 xchk_fblock_set_corrupt(bs->sc, info->whichfork, in xchk_bmapbt_rec() 575 xchk_fblock_set_corrupt(bs->sc, info->whichfork, in xchk_bmapbt_rec()
|
| A D | btree.h | 31 struct xchk_btree *bs,
|
| /fs/afs/ |
| A D | vl_list.c | 206 bs.source = *b++; in afs_extract_vlserver_list() 207 bs.status = *b++; in afs_extract_vlserver_list() 208 bs.protocol = *b++; in afs_extract_vlserver_list() 209 bs.nr_addrs = *b++; in afs_extract_vlserver_list() 212 bs.name_len, bs.priority, bs.weight, in afs_extract_vlserver_list() 213 bs.port, bs.protocol, bs.nr_addrs, in afs_extract_vlserver_list() 214 bs.name_len, bs.name_len, b); in afs_extract_vlserver_list() 227 if (bs.port == 0) in afs_extract_vlserver_list() 249 server = afs_alloc_vlserver(b, bs.name_len, bs.port); in afs_extract_vlserver_list() 254 b += bs.name_len; in afs_extract_vlserver_list() [all …]
|
| /fs/ext4/ |
| A D | xattr.c | 1873 bs->s.base = BHDR(bs->bh); in ext4_xattr_block_find() 1874 bs->s.first = BFIRST(bs->bh); in ext4_xattr_block_find() 1875 bs->s.end = bs->bh->b_data + bs->bh->b_size; in ext4_xattr_block_find() 1876 bs->s.here = bs->s.first; in ext4_xattr_block_find() 1877 error = xattr_find_entry(inode, &bs->s.here, bs->s.end, in ext4_xattr_block_find() 2098 } else if (bs->bh && s->base == bs->bh->b_data) { in ext4_xattr_block_set() 2172 if (bs->bh && bs->bh != new_bh) { in ext4_xattr_block_set() 2199 if (!(bs->bh && s->base == bs->bh->b_data)) in ext4_xattr_block_set() 2427 if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i)) in ext4_xattr_set_handle() 2674 if (bs) in ext4_xattr_move_to_block() [all …]
|
| /fs/adfs/ |
| A D | dir_fplus.c | 74 unsigned int end, bs, bi, i; in adfs_fplus_checkbyte() local 84 bs = dir->bhs[bi]->b_size; in adfs_fplus_checkbyte() 85 if (bs > end) in adfs_fplus_checkbyte() 86 bs = end; in adfs_fplus_checkbyte() 88 for (i = 0; i < bs; i += sizeof(u32)) in adfs_fplus_checkbyte() 91 end -= bs; in adfs_fplus_checkbyte()
|
| /fs/btrfs/ |
| A D | reflink.c | 641 const u64 bs = fs_info->sectorsize; in btrfs_extent_same_range() local 652 ALIGN(len, bs), dst_loff, 1); in btrfs_extent_same_range() 713 u64 bs = fs_info->sectorsize; in btrfs_clone_files() local 723 len = ALIGN(src->i_size, bs) - off; in btrfs_clone_files() 726 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); in btrfs_clone_files() 783 u64 bs = inode_out->root->fs_info->sectorsize; in btrfs_remap_file_range_prep() local 817 wb_len = ALIGN(inode_in->vfs_inode.i_size, bs) - ALIGN_DOWN(pos_in, bs); in btrfs_remap_file_range_prep() 819 wb_len = ALIGN(*len, bs); in btrfs_remap_file_range_prep() 842 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), wb_len); in btrfs_remap_file_range_prep() 845 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), wb_len); in btrfs_remap_file_range_prep()
|
| A D | send.c | 6088 u64 bs = sctx->send_root->fs_info->sectorsize; in send_write_or_clone() local 6104 if (IS_ALIGNED(end, bs)) in send_write_or_clone()
|
| /fs/ |
| A D | remap_range.c | 40 loff_t bs = inode_out->i_sb->s_blocksize; in generic_remap_checks() local 44 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) in generic_remap_checks() 78 bcount = ALIGN(size_in, bs) - pos_in; in generic_remap_checks() 80 if (!IS_ALIGNED(count, bs)) in generic_remap_checks() 81 count = ALIGN_DOWN(count, bs); in generic_remap_checks()
|
| /fs/nfs/ |
| A D | nfs4file.c | 255 unsigned int bs = server->clone_blksize; in nfs42_remap_file_range() local 270 if (bs) { in nfs42_remap_file_range() 271 if (!IS_ALIGNED(src_off, bs) || !IS_ALIGNED(dst_off, bs)) in nfs42_remap_file_range() 273 if (!IS_ALIGNED(count, bs) && i_size_read(src_inode) != (src_off + count)) in nfs42_remap_file_range()
|
| /fs/xfs/ |
| A D | xfs_health.c | 496 struct xfs_bulkstat *bs) in xfs_bulkstat_health() argument 502 bs->bs_sick = 0; in xfs_bulkstat_health() 503 bs->bs_checked = 0; in xfs_bulkstat_health() 508 bs->bs_checked |= m->ioctl_mask; in xfs_bulkstat_health() 510 bs->bs_sick |= m->ioctl_mask; in xfs_bulkstat_health()
|
| /fs/hpfs/ |
| A D | alloc.c | 119 unsigned bs = near & ~0x3fff; in alloc_in_bmp() local 129 if (bs != ~0x3fff) { in alloc_in_bmp() 135 ret = bs + nr; in alloc_in_bmp() 150 ret = bs + q; in alloc_in_bmp() 173 ret = bs + q; in alloc_in_bmp() 181 …if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~… in alloc_in_bmp()
|
| /fs/erofs/ |
| A D | decompressor.c | 292 const unsigned int bs = rq->sb->s_blocksize; in z_erofs_transform_plain() local 299 cur = bs - (rq->pageofs_out & (bs - 1)); in z_erofs_transform_plain()
|
| A D | zdata.c | 1014 const unsigned int bs = i_blocksize(inode); in z_erofs_scan_folio() local 1019 tight = (bs == PAGE_SIZE); in z_erofs_scan_folio() 1094 tight = (bs == PAGE_SIZE); in z_erofs_scan_folio() 1496 int bs = i_blocksize(f->inode); in z_erofs_fill_bio_vec() local 1525 bvec->bv_offset = round_up(-zbv.offset, bs); in z_erofs_fill_bio_vec() 1526 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; in z_erofs_fill_bio_vec() 1577 if (!tocache || bs != PAGE_SIZE || in z_erofs_fill_bio_vec()
|
| /fs/udf/ |
| A D | inode.c | 1356 int bs = inode->i_sb->s_blocksize; in udf_read_inode() local 1458 ret = udf_alloc_i_data(inode, bs - in udf_read_inode() 1464 bs - sizeof(struct extendedFileEntry)); in udf_read_inode() 1468 ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry)); in udf_read_inode() 1473 bs - sizeof(struct fileEntry)); in udf_read_inode() 1480 ret = udf_alloc_i_data(inode, bs - in udf_read_inode() 1486 bs - sizeof(struct unallocSpaceEntry)); in udf_read_inode() 1582 if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs) in udf_read_inode() 1585 if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs) in udf_read_inode() 1596 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode)) in udf_read_inode()
|
| /fs/xfs/libxfs/ |
| A D | xfs_fs.h | 431 bstat_get_projid(const struct xfs_bstat *bs) in bstat_get_projid() argument 433 return (uint32_t)bs->bs_projid_hi << 16 | bs->bs_projid_lo; in bstat_get_projid()
|
| A D | xfs_health.h | 287 void xfs_bulkstat_health(struct xfs_inode *ip, struct xfs_bulkstat *bs);
|
| /fs/gfs2/ |
| A D | meta_io.c | 393 struct gfs2_bufdata *bd, *bs; in gfs2_ail1_wipe() local 400 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list, in gfs2_ail1_wipe()
|
| /fs/nfsd/ |
| A D | nfs3xdr.c | 1187 u64 bs = s->f_bsize; in svcxdr_encode_fsstat3resok() local 1193 p = xdr_encode_hyper(p, bs * s->f_blocks); /* total bytes */ in svcxdr_encode_fsstat3resok() 1194 p = xdr_encode_hyper(p, bs * s->f_bfree); /* free bytes */ in svcxdr_encode_fsstat3resok() 1195 p = xdr_encode_hyper(p, bs * s->f_bavail); /* user available bytes */ in svcxdr_encode_fsstat3resok()
|