Lines Matching refs:disk

140 	state->disk = hd;  in check_partition()
316 static struct block_device *add_partition(struct gendisk *disk, int partno, in add_partition() argument
321 struct device *ddev = disk_to_dev(disk); in add_partition()
327 lockdep_assert_held(&disk->open_mutex); in add_partition()
329 if (partno >= disk_max_parts(disk)) in add_partition()
336 switch (disk->queue->limits.zoned) { in add_partition()
339 disk->disk_name); in add_partition()
343 disk->disk_name); in add_partition()
344 blk_queue_set_zoned(disk, BLK_ZONED_NONE); in add_partition()
350 if (xa_load(&disk->part_tbl, partno)) in add_partition()
354 get_device(disk_to_dev(disk)); in add_partition()
357 bdev = bdev_alloc(disk, partno); in add_partition()
377 if (bdev->bd_partno < disk->minors) { in add_partition()
378 devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno); in add_partition()
413 err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL); in add_partition()
430 put_disk(disk); in add_partition()
434 static bool partition_overlaps(struct gendisk *disk, sector_t start, in partition_overlaps() argument
442 xa_for_each_start(&disk->part_tbl, idx, part, 1) { in partition_overlaps()
455 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, in bdev_add_partition() argument
461 mutex_lock(&disk->open_mutex); in bdev_add_partition()
462 if (!disk_live(disk)) { in bdev_add_partition()
467 if (partition_overlaps(disk, start, length, -1)) { in bdev_add_partition()
472 part = add_partition(disk, partno, start, length, in bdev_add_partition()
476 mutex_unlock(&disk->open_mutex); in bdev_add_partition()
480 int bdev_del_partition(struct gendisk *disk, int partno) in bdev_del_partition() argument
485 mutex_lock(&disk->open_mutex); in bdev_del_partition()
486 part = xa_load(&disk->part_tbl, partno); in bdev_del_partition()
497 mutex_unlock(&disk->open_mutex); in bdev_del_partition()
501 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, in bdev_resize_partition() argument
507 mutex_lock(&disk->open_mutex); in bdev_resize_partition()
508 part = xa_load(&disk->part_tbl, partno); in bdev_resize_partition()
517 if (partition_overlaps(disk, start, length, partno)) in bdev_resize_partition()
524 mutex_unlock(&disk->open_mutex); in bdev_resize_partition()
528 static bool disk_unlock_native_capacity(struct gendisk *disk) in disk_unlock_native_capacity() argument
530 const struct block_device_operations *bdops = disk->fops; in disk_unlock_native_capacity()
533 !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) { in disk_unlock_native_capacity()
535 bdops->unlock_native_capacity(disk); in disk_unlock_native_capacity()
536 disk->flags |= GENHD_FL_NATIVE_CAPACITY; in disk_unlock_native_capacity()
544 void blk_drop_partitions(struct gendisk *disk) in blk_drop_partitions() argument
549 lockdep_assert_held(&disk->open_mutex); in blk_drop_partitions()
551 xa_for_each_start(&disk->part_tbl, idx, part, 1) in blk_drop_partitions()
555 static bool blk_add_partition(struct gendisk *disk, in blk_add_partition() argument
565 if (from >= get_capacity(disk)) { in blk_add_partition()
568 disk->disk_name, p, (unsigned long long) from); in blk_add_partition()
569 if (disk_unlock_native_capacity(disk)) in blk_add_partition()
574 if (from + size > get_capacity(disk)) { in blk_add_partition()
577 disk->disk_name, p, (unsigned long long) size); in blk_add_partition()
579 if (disk_unlock_native_capacity(disk)) in blk_add_partition()
587 size = get_capacity(disk) - from; in blk_add_partition()
590 part = add_partition(disk, p, from, size, state->parts[p].flags, in blk_add_partition()
594 disk->disk_name, p, -PTR_ERR(part)); in blk_add_partition()
605 static int blk_add_partitions(struct gendisk *disk) in blk_add_partitions() argument
610 if (!disk_part_scan_enabled(disk)) in blk_add_partitions()
613 state = check_partition(disk); in blk_add_partitions()
623 disk->disk_name); in blk_add_partitions()
624 if (disk_unlock_native_capacity(disk)) in blk_add_partitions()
633 if (disk->queue->limits.zoned == BLK_ZONED_HM) { in blk_add_partitions()
635 disk->disk_name); in blk_add_partitions()
648 disk->disk_name); in blk_add_partitions()
649 if (disk_unlock_native_capacity(disk)) in blk_add_partitions()
654 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); in blk_add_partitions()
657 if (!blk_add_partition(disk, state, p)) in blk_add_partitions()
666 int bdev_disk_changed(struct gendisk *disk, bool invalidate) in bdev_disk_changed() argument
670 lockdep_assert_held(&disk->open_mutex); in bdev_disk_changed()
672 if (!disk_live(disk)) in bdev_disk_changed()
676 if (disk->open_partitions) in bdev_disk_changed()
678 sync_blockdev(disk->part0); in bdev_disk_changed()
679 invalidate_bdev(disk->part0); in bdev_disk_changed()
680 blk_drop_partitions(disk); in bdev_disk_changed()
682 clear_bit(GD_NEED_PART_SCAN, &disk->state); in bdev_disk_changed()
693 if (disk_part_scan_enabled(disk) || in bdev_disk_changed()
694 !(disk->flags & GENHD_FL_REMOVABLE)) in bdev_disk_changed()
695 set_capacity(disk, 0); in bdev_disk_changed()
698 if (get_capacity(disk)) { in bdev_disk_changed()
699 ret = blk_add_partitions(disk); in bdev_disk_changed()
707 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); in bdev_disk_changed()
720 struct address_space *mapping = state->disk->part0->bd_inode->i_mapping; in read_part_sector()
723 if (n >= get_capacity(state->disk)) { in read_part_sector()