| /fs/btrfs/ |
| A D | raid56.c | 978 sector = NULL; in sector_in_rbio() 980 return sector; in sector_in_rbio() 1326 sector->uptodate = 1; in generate_pq_vertical() 1387 if (!sector) in rmw_assemble_write_bios() 1437 if (!sector) in rmw_assemble_write_bios() 1502 if (sector->has_paddr && sector->paddr == paddr) in find_stripe_sector() 1503 return sector; in find_stripe_sector() 1526 ASSERT(sector); in set_bio_pages_uptodate() 1527 if (sector) in set_bio_pages_uptodate() 2337 if (!sector->has_paddr || !sector->uptodate) in need_read_stripe_sectors() [all …]
|
| A D | zoned.c | 93 sector_t sector; in sb_write_pointer() local 145 sector = zones[1].start; in sb_write_pointer() 147 sector = zones[0].start; in sb_write_pointer() 152 sector = zones[0].wp; in sb_write_pointer() 154 sector = zones[1].wp; in sb_write_pointer() 158 *wp_ret = sector << SECTOR_SHIFT; in sb_write_pointer() 361 sector_t sector = 0; in btrfs_get_dev_zone_info() local 480 while (sector < nr_sectors) { in btrfs_get_dev_zone_info() 482 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones, in btrfs_get_dev_zone_info() 503 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; in btrfs_get_dev_zone_info()
|
| A D | scrub.c | 789 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; in scrub_verify_one_sector() local 831 if (!sector->csum) { in scrub_verify_one_sector() 836 ret = btrfs_check_sector_csum(fs_info, kaddr, csum_buf, sector->csum); in scrub_verify_one_sector() 1615 struct scrub_sector_verification *sector = in fill_one_extent_info() local 1621 sector->generation = extent_gen; in fill_one_extent_info()
|
| /fs/fat/ |
| A D | cache.c | 310 int fat_get_mapped_cluster(struct inode *inode, sector_t sector, in fat_get_mapped_cluster() argument 319 offset = sector & (sbi->sec_per_clus - 1); in fat_get_mapped_cluster() 326 if (*mapped_blocks > last_block - sector) in fat_get_mapped_cluster() 327 *mapped_blocks = last_block - sector; in fat_get_mapped_cluster() 333 static int is_exceed_eof(struct inode *inode, sector_t sector, in is_exceed_eof() argument 341 if (sector >= *last_block) { in is_exceed_eof() 351 if (sector >= *last_block) in is_exceed_eof() 367 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap() 368 *phys = sector + sbi->dir_start; in fat_bmap() 375 if (is_exceed_eof(inode, sector, &last_block, create)) in fat_bmap() [all …]
|
| A D | fat.h | 321 extern int fat_get_mapped_cluster(struct inode *inode, sector_t sector, 324 extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
|
| /fs/zonefs/ |
| A D | trace.h | 30 __field(sector_t, sector) 38 __entry->sector = z->z_sector; 43 blk_op_str(__entry->op), __entry->sector, 54 __field(sector_t, sector) 62 __entry->sector = zonefs_inode_zone(inode)->z_sector; 70 __entry->sector, __entry->size, __entry->wpoffset,
|
| /fs/exfat/ |
| A D | nls.c | 651 sector_t sector, unsigned long long num_sectors, in exfat_load_upcase_table() argument 666 num_sectors += sector; in exfat_load_upcase_table() 668 while (sector < num_sectors) { in exfat_load_upcase_table() 671 bh = sb_bread(sb, sector); in exfat_load_upcase_table() 674 (unsigned long long)sector); in exfat_load_upcase_table() 677 sector++; in exfat_load_upcase_table() 745 sector_t sector; in exfat_create_upcase_table() local 776 sector = exfat_cluster_to_sector(sbi, tbl_clu); in exfat_create_upcase_table() 778 ret = exfat_load_upcase_table(sb, sector, num_sectors, in exfat_create_upcase_table()
|
| A D | balloc.c | 35 sector_t sector; in exfat_allocate_bitmap() local 58 sector = exfat_cluster_to_sector(sbi, sbi->map_clu); in exfat_allocate_bitmap() 60 sbi->vol_amap[i] = sb_bread(sb, sector + i); in exfat_allocate_bitmap()
|
| A D | dir.c | 595 int entry, sector_t *sector, int *offset) in exfat_find_location() argument 614 *sector = EXFAT_B_TO_BLK(off, sb); in exfat_find_location() 615 *sector += exfat_cluster_to_sector(sbi, clu); in exfat_find_location()
|
| /fs/xfs/ |
| A D | xfs_bio_io.c | 15 sector_t sector, in xfs_rw_bdev() argument 27 return bdev_rw_virt(bdev, sector, data, count, op); in xfs_rw_bdev() 30 bio->bi_iter.bi_sector = sector; in xfs_rw_bdev()
|
| A D | xfs_zone_alloc.c | 752 sector_t *sector, in xfs_zone_alloc_blocks() argument 772 *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); in xfs_zone_alloc_blocks() 773 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector); in xfs_zone_alloc_blocks() 775 *sector += XFS_FSB_TO_BB(mp, allocated); in xfs_zone_alloc_blocks() 784 sector_t sector = ioend->io_bio.bi_iter.bi_sector; in xfs_mark_rtg_boundary() local 786 if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0) in xfs_mark_rtg_boundary()
|
| A D | xfs_linux.h | 216 int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
|
| A D | xfs_mount.h | 427 __XFS_HAS_FEAT(sector, SECTOR) in __XFS_ADD_FEAT()
|
| /fs/hfsplus/ |
| A D | wrapper.c | 48 int hfsplus_submit_bio(struct super_block *sb, sector_t sector, in hfsplus_submit_bio() argument 52 loff_t start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT; in hfsplus_submit_bio() 62 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); in hfsplus_submit_bio() 63 return bdev_rw_virt(sb->s_bdev, sector, buf, io_size, opf); in hfsplus_submit_bio()
|
| A D | extents.c | 233 sector_t sector; in hfsplus_get_block() local 282 sector = ((sector_t)dblock << sbi->fs_shift) + in hfsplus_get_block() 284 map_bh(bh_result, sb, sector); in hfsplus_get_block()
|
| /fs/crypto/ |
| A D | bio.c | 123 sector_t sector = pblk << (inode->i_blkbits - SECTOR_SHIFT); in fscrypt_zeroout_range() local 163 bio->bi_iter.bi_sector = sector; in fscrypt_zeroout_range() 174 sector += 1U << (du_bits - SECTOR_SHIFT); in fscrypt_zeroout_range()
|
| /fs/bcachefs/ |
| A D | fs-io-buffered.c | 497 u64 sector, in bch2_writepage_io_alloc() argument 516 op->pos = POS(inode->v.i_ino, sector); in bch2_writepage_io_alloc() 519 op->wbio.bio.bi_iter.bi_sector = sector; in bch2_writepage_io_alloc() 610 u64 sector; in __bch2_writepage() local 627 sector = folio_sector(folio) + offset; in __bch2_writepage() 632 bio_end_sector(&w->io->op.wbio.bio) != sector)) in __bch2_writepage() 636 bch2_writepage_io_alloc(c, wbc, w, inode, sector, in __bch2_writepage()
|
| A D | journal_io.h | 16 u64 sector; member
|
| A D | journal_io.c | 57 p->dev, p->bucket, p->bucket_offset, p->sector); in bch2_journal_ptr_to_text() 924 struct jset *jset, u64 sector, in jset_validate() argument 943 sector, le64_to_cpu(jset->seq), in jset_validate() 955 sector, le64_to_cpu(jset->seq), in jset_validate() 978 struct jset *jset, u64 sector, in jset_validate_early() argument 997 sector, le64_to_cpu(jset->seq), in jset_validate_early() 1014 sector, le64_to_cpu(jset->seq), bytes)) in jset_validate_early() 1176 .sector = offset, in journal_read_bucket() 1499 i->ptrs.data[0].sector, in bch2_journal_read()
|
| A D | btree_node_scan.c | 259 u64 sector = bucket * ca->mi.bucket_size + bucket_offset; in read_btree_nodes_worker() local 262 !bch2_dev_btree_bitmap_marked_sectors(ca, sector, btree_sectors(c))) in read_btree_nodes_worker() 265 try_read_btree_node(w->f, ca, b, bio, sector); in read_btree_nodes_worker()
|
| /fs/udf/ |
| A D | super.c | 741 loff_t sector = VSD_FIRST_SECTOR_OFFSET; in udf_check_vsd() local 755 sector += session_offset; in udf_check_vsd() 758 (unsigned int)(sector >> sb->s_blocksize_bits), in udf_check_vsd() 770 for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) { in udf_check_vsd() 772 bh = sb_bread(sb, sector >> sb->s_blocksize_bits); in udf_check_vsd() 777 (sector & (sb->s_blocksize - 1))); in udf_check_vsd() 800 else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) in udf_check_vsd()
|
| /fs/ |
| A D | direct-io.c | 666 sector_t sector; in dio_new_bio() local 672 sector = start_sector << (sdio->blkbits - 9); in dio_new_bio() 675 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
|
| /fs/f2fs/ |
| A D | data.c | 383 block_t blk_addr, sector_t *sector) in f2fs_target_device() argument 399 if (sector) in f2fs_target_device() 400 *sector = SECTOR_FROM_BLOCK(blk_addr); in f2fs_target_device() 457 sector_t sector; in __bio_alloc() local 460 bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or); in __bio_alloc() 464 bio->bi_iter.bi_sector = sector; in __bio_alloc() 1047 sector_t sector; in f2fs_grab_read_bio() local 1048 struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or); in f2fs_grab_read_bio() 1053 bio->bi_iter.bi_sector = sector; in f2fs_grab_read_bio()
|
| /fs/iomap/ |
| A D | buffered-io.c | 369 sector_t sector; in iomap_readpage_iter() local 398 sector = iomap_sector(iomap, pos); in iomap_readpage_iter() 400 bio_end_sector(ctx->bio) != sector || in iomap_readpage_iter() 424 ctx->bio->bi_iter.bi_sector = sector; in iomap_readpage_iter()
|
| /fs/gfs2/ |
| A D | lops.c | 542 sector_t sector = dblock << sdp->sd_fsb2bb_shift; in gfs2_find_jhead() local 544 if (bio_end_sector(bio) == sector) { in gfs2_find_jhead()
|