Lines Matching refs:bdev
38 struct block_device bdev; member
47 static inline struct inode *BD_INODE(struct block_device *bdev) in BD_INODE() argument
49 return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode; in BD_INODE()
54 return &BDEV_I(inode)->bdev; in I_BDEV()
64 static void bdev_write_inode(struct block_device *bdev) in bdev_write_inode() argument
66 struct inode *inode = BD_INODE(bdev); in bdev_write_inode()
76 bdev, ret); in bdev_write_inode()
83 static void kill_bdev(struct block_device *bdev) in kill_bdev() argument
85 struct address_space *mapping = bdev->bd_mapping; in kill_bdev()
95 void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
97 struct address_space *mapping = bdev->bd_mapping; in invalidate_bdev()
111 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, in truncate_bdev_range() argument
120 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); in truncate_bdev_range()
125 truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); in truncate_bdev_range()
127 bd_abort_claiming(bdev, truncate_bdev_range); in truncate_bdev_range()
135 return invalidate_inode_pages2_range(bdev->bd_mapping, in truncate_bdev_range()
140 static void set_init_blocksize(struct block_device *bdev) in set_init_blocksize() argument
142 unsigned int bsize = bdev_logical_block_size(bdev); in set_init_blocksize()
143 loff_t size = i_size_read(BD_INODE(bdev)); in set_init_blocksize()
150 BD_INODE(bdev)->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
156 struct block_device *bdev = I_BDEV(inode); in set_blocksize() local
163 if (size < bdev_logical_block_size(bdev)) in set_blocksize()
171 sync_blockdev(bdev); in set_blocksize()
173 kill_bdev(bdev); in set_blocksize()
203 int sync_blockdev_nowait(struct block_device *bdev) in sync_blockdev_nowait() argument
205 if (!bdev) in sync_blockdev_nowait()
207 return filemap_flush(bdev->bd_mapping); in sync_blockdev_nowait()
215 int sync_blockdev(struct block_device *bdev) in sync_blockdev() argument
217 if (!bdev) in sync_blockdev()
219 return filemap_write_and_wait(bdev->bd_mapping); in sync_blockdev()
223 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) in sync_blockdev_range() argument
225 return filemap_write_and_wait_range(bdev->bd_mapping, in sync_blockdev_range()
244 int bdev_freeze(struct block_device *bdev) in bdev_freeze() argument
248 mutex_lock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
250 if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) { in bdev_freeze()
251 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
255 mutex_lock(&bdev->bd_holder_lock); in bdev_freeze()
256 if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) { in bdev_freeze()
257 error = bdev->bd_holder_ops->freeze(bdev); in bdev_freeze()
258 lockdep_assert_not_held(&bdev->bd_holder_lock); in bdev_freeze()
260 mutex_unlock(&bdev->bd_holder_lock); in bdev_freeze()
261 error = sync_blockdev(bdev); in bdev_freeze()
265 atomic_dec(&bdev->bd_fsfreeze_count); in bdev_freeze()
267 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
280 int bdev_thaw(struct block_device *bdev) in bdev_thaw() argument
284 mutex_lock(&bdev->bd_fsfreeze_mutex); in bdev_thaw()
290 nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count); in bdev_thaw()
298 mutex_lock(&bdev->bd_holder_lock); in bdev_thaw()
299 if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) { in bdev_thaw()
300 error = bdev->bd_holder_ops->thaw(bdev); in bdev_thaw()
301 lockdep_assert_not_held(&bdev->bd_holder_lock); in bdev_thaw()
303 mutex_unlock(&bdev->bd_holder_lock); in bdev_thaw()
307 atomic_inc(&bdev->bd_fsfreeze_count); in bdev_thaw()
309 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_thaw()
327 memset(&ei->bdev, 0, sizeof(ei->bdev)); in bdev_alloc_inode()
329 if (security_bdev_alloc(&ei->bdev)) { in bdev_alloc_inode()
338 struct block_device *bdev = I_BDEV(inode); in bdev_free_inode() local
340 free_percpu(bdev->bd_stats); in bdev_free_inode()
341 kfree(bdev->bd_meta_info); in bdev_free_inode()
342 security_bdev_free(bdev); in bdev_free_inode()
344 if (!bdev_is_partition(bdev)) { in bdev_free_inode()
345 if (bdev->bd_disk && bdev->bd_disk->bdi) in bdev_free_inode()
346 bdi_put(bdev->bd_disk->bdi); in bdev_free_inode()
347 kfree(bdev->bd_disk); in bdev_free_inode()
350 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) in bdev_free_inode()
351 blk_free_ext_minor(MINOR(bdev->bd_dev)); in bdev_free_inode()
417 struct block_device *bdev; in bdev_alloc() local
428 bdev = I_BDEV(inode); in bdev_alloc()
429 mutex_init(&bdev->bd_fsfreeze_mutex); in bdev_alloc()
430 spin_lock_init(&bdev->bd_size_lock); in bdev_alloc()
431 mutex_init(&bdev->bd_holder_lock); in bdev_alloc()
432 atomic_set(&bdev->__bd_flags, partno); in bdev_alloc()
433 bdev->bd_mapping = &inode->i_data; in bdev_alloc()
434 bdev->bd_queue = disk->queue; in bdev_alloc()
436 bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO); in bdev_alloc()
437 bdev->bd_stats = alloc_percpu(struct disk_stats); in bdev_alloc()
438 if (!bdev->bd_stats) { in bdev_alloc()
442 bdev->bd_disk = disk; in bdev_alloc()
443 return bdev; in bdev_alloc()
446 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) in bdev_set_nr_sectors() argument
448 spin_lock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
449 i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT); in bdev_set_nr_sectors()
450 bdev->bd_nr_sectors = sectors; in bdev_set_nr_sectors()
451 spin_unlock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
454 void bdev_add(struct block_device *bdev, dev_t dev) in bdev_add() argument
456 struct inode *inode = BD_INODE(bdev); in bdev_add()
457 if (bdev_stable_writes(bdev)) in bdev_add()
458 mapping_set_stable_writes(bdev->bd_mapping); in bdev_add()
459 bdev->bd_dev = dev; in bdev_add()
465 void bdev_unhash(struct block_device *bdev) in bdev_unhash() argument
467 remove_inode_hash(BD_INODE(bdev)); in bdev_unhash()
470 void bdev_drop(struct block_device *bdev) in bdev_drop() argument
472 iput(BD_INODE(bdev)); in bdev_drop()
499 static bool bd_may_claim(struct block_device *bdev, void *holder, in bd_may_claim() argument
502 struct block_device *whole = bdev_whole(bdev); in bd_may_claim()
506 if (bdev->bd_holder) { in bd_may_claim()
510 if (bdev->bd_holder == holder) { in bd_may_claim()
511 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) in bd_may_claim()
522 if (whole != bdev && in bd_may_claim()
541 int bd_prepare_to_claim(struct block_device *bdev, void *holder, in bd_prepare_to_claim() argument
544 struct block_device *whole = bdev_whole(bdev); in bd_prepare_to_claim()
551 if (!bd_may_claim(bdev, holder, hops)) { in bd_prepare_to_claim()
593 static void bd_finish_claiming(struct block_device *bdev, void *holder, in bd_finish_claiming() argument
596 struct block_device *whole = bdev_whole(bdev); in bd_finish_claiming()
599 BUG_ON(!bd_may_claim(bdev, holder, hops)); in bd_finish_claiming()
606 bdev->bd_holders++; in bd_finish_claiming()
607 mutex_lock(&bdev->bd_holder_lock); in bd_finish_claiming()
608 bdev->bd_holder = holder; in bd_finish_claiming()
609 bdev->bd_holder_ops = hops; in bd_finish_claiming()
610 mutex_unlock(&bdev->bd_holder_lock); in bd_finish_claiming()
624 void bd_abort_claiming(struct block_device *bdev, void *holder) in bd_abort_claiming() argument
627 bd_clear_claiming(bdev_whole(bdev), holder); in bd_abort_claiming()
632 static void bd_end_claim(struct block_device *bdev, void *holder) in bd_end_claim() argument
634 struct block_device *whole = bdev_whole(bdev); in bd_end_claim()
642 WARN_ON_ONCE(bdev->bd_holder != holder); in bd_end_claim()
643 WARN_ON_ONCE(--bdev->bd_holders < 0); in bd_end_claim()
645 if (!bdev->bd_holders) { in bd_end_claim()
646 mutex_lock(&bdev->bd_holder_lock); in bd_end_claim()
647 bdev->bd_holder = NULL; in bd_end_claim()
648 bdev->bd_holder_ops = NULL; in bd_end_claim()
649 mutex_unlock(&bdev->bd_holder_lock); in bd_end_claim()
650 if (bdev_test_flag(bdev, BD_WRITE_HOLDER)) in bd_end_claim()
662 disk_unblock_events(bdev->bd_disk); in bd_end_claim()
663 bdev_clear_flag(bdev, BD_WRITE_HOLDER); in bd_end_claim()
667 static void blkdev_flush_mapping(struct block_device *bdev) in blkdev_flush_mapping() argument
669 WARN_ON_ONCE(bdev->bd_holders); in blkdev_flush_mapping()
670 sync_blockdev(bdev); in blkdev_flush_mapping()
671 kill_bdev(bdev); in blkdev_flush_mapping()
672 bdev_write_inode(bdev); in blkdev_flush_mapping()
675 static void blkdev_put_whole(struct block_device *bdev) in blkdev_put_whole() argument
677 if (atomic_dec_and_test(&bdev->bd_openers)) in blkdev_put_whole()
678 blkdev_flush_mapping(bdev); in blkdev_put_whole()
679 if (bdev->bd_disk->fops->release) in blkdev_put_whole()
680 bdev->bd_disk->fops->release(bdev->bd_disk); in blkdev_put_whole()
683 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) in blkdev_get_whole() argument
685 struct gendisk *disk = bdev->bd_disk; in blkdev_get_whole()
699 if (!atomic_read(&bdev->bd_openers)) in blkdev_get_whole()
700 set_init_blocksize(bdev); in blkdev_get_whole()
701 atomic_inc(&bdev->bd_openers); in blkdev_get_whole()
709 blkdev_put_whole(bdev); in blkdev_get_whole()
779 struct block_device *bdev; in blkdev_get_no_open() local
794 bdev = &BDEV_I(inode)->bdev; in blkdev_get_no_open()
795 if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) in blkdev_get_no_open()
796 bdev = NULL; in blkdev_get_no_open()
798 return bdev; in blkdev_get_no_open()
801 void blkdev_put_no_open(struct block_device *bdev) in blkdev_put_no_open() argument
803 put_device(&bdev->bd_device); in blkdev_put_no_open()
806 static bool bdev_writes_blocked(struct block_device *bdev) in bdev_writes_blocked() argument
808 return bdev->bd_writers < 0; in bdev_writes_blocked()
811 static void bdev_block_writes(struct block_device *bdev) in bdev_block_writes() argument
813 bdev->bd_writers--; in bdev_block_writes()
816 static void bdev_unblock_writes(struct block_device *bdev) in bdev_unblock_writes() argument
818 bdev->bd_writers++; in bdev_unblock_writes()
821 static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode) in bdev_may_open() argument
826 if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev)) in bdev_may_open()
828 if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0) in bdev_may_open()
833 static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode) in bdev_claim_write_access() argument
840 bdev_block_writes(bdev); in bdev_claim_write_access()
842 bdev->bd_writers++; in bdev_claim_write_access()
852 struct block_device *bdev; in bdev_yield_write_access() local
860 bdev = file_bdev(bdev_file); in bdev_yield_write_access()
863 bdev_unblock_writes(bdev); in bdev_yield_write_access()
865 bdev->bd_writers--; in bdev_yield_write_access()
885 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, in bdev_open() argument
889 struct gendisk *disk = bdev->bd_disk; in bdev_open()
894 ret = bd_prepare_to_claim(bdev, holder, hops); in bdev_open()
911 if (!bdev_may_open(bdev, mode)) in bdev_open()
913 if (bdev_is_partition(bdev)) in bdev_open()
914 ret = blkdev_get_part(bdev, mode); in bdev_open()
916 ret = blkdev_get_whole(bdev, mode); in bdev_open()
919 bdev_claim_write_access(bdev, mode); in bdev_open()
921 bd_finish_claiming(bdev, holder, hops); in bdev_open()
931 !bdev_test_flag(bdev, BD_WRITE_HOLDER) && in bdev_open()
933 bdev_set_flag(bdev, BD_WRITE_HOLDER); in bdev_open()
944 if (bdev_nowait(bdev)) in bdev_open()
948 bdev_file->f_mapping = bdev->bd_mapping; in bdev_open()
957 bd_abort_claiming(bdev, holder); in bdev_open()
997 struct block_device *bdev; in bdev_file_open_by_dev() local
1005 bdev = blkdev_get_no_open(dev); in bdev_file_open_by_dev()
1006 if (!bdev) in bdev_file_open_by_dev()
1010 bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev), in bdev_file_open_by_dev()
1013 blkdev_put_no_open(bdev); in bdev_file_open_by_dev()
1016 ihold(BD_INODE(bdev)); in bdev_file_open_by_dev()
1018 ret = bdev_open(bdev, mode, holder, hops, bdev_file); in bdev_file_open_by_dev()
1055 struct block_device *bdev = file_bdev(bdev_file); in bd_yield_claim() local
1058 lockdep_assert_held(&bdev->bd_disk->open_mutex); in bd_yield_claim()
1064 bd_end_claim(bdev, holder); in bd_yield_claim()
1069 struct block_device *bdev = file_bdev(bdev_file); in bdev_release() local
1071 struct gendisk *disk = bdev->bd_disk; in bdev_release()
1084 if (atomic_read(&bdev->bd_openers) == 1) in bdev_release()
1085 sync_blockdev(bdev); in bdev_release()
1100 if (bdev_is_partition(bdev)) in bdev_release()
1101 blkdev_put_part(bdev); in bdev_release()
1103 blkdev_put_whole(bdev); in bdev_release()
1108 blkdev_put_no_open(bdev); in bdev_release()
1125 struct block_device *bdev = file_bdev(bdev_file); in bdev_fput() local
1126 struct gendisk *disk = bdev->bd_disk; in bdev_fput()
1197 void bdev_mark_dead(struct block_device *bdev, bool surprise) in bdev_mark_dead() argument
1199 mutex_lock(&bdev->bd_holder_lock); in bdev_mark_dead()
1200 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) in bdev_mark_dead()
1201 bdev->bd_holder_ops->mark_dead(bdev, surprise); in bdev_mark_dead()
1203 mutex_unlock(&bdev->bd_holder_lock); in bdev_mark_dead()
1204 sync_blockdev(bdev); in bdev_mark_dead()
1207 invalidate_bdev(bdev); in bdev_mark_dead()
1224 struct block_device *bdev; in sync_bdevs() local
1245 bdev = I_BDEV(inode); in sync_bdevs()
1247 mutex_lock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1248 if (!atomic_read(&bdev->bd_openers)) { in sync_bdevs()
1261 mutex_unlock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1276 struct block_device *bdev; in bdev_statx() local
1289 bdev = blkdev_get_no_open(backing_inode->i_rdev); in bdev_statx()
1290 if (!bdev) in bdev_statx()
1294 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; in bdev_statx()
1295 stat->dio_offset_align = bdev_logical_block_size(bdev); in bdev_statx()
1299 if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) { in bdev_statx()
1300 struct request_queue *bd_queue = bdev->bd_queue; in bdev_statx()
1307 blkdev_put_no_open(bdev); in bdev_statx()
1316 unsigned int block_size(struct block_device *bdev) in block_size() argument
1318 return 1 << BD_INODE(bdev)->i_blkbits; in block_size()