Lines Matching refs:bio
333 int __bio_queue_enter(struct request_queue *q, struct bio *bio) in __bio_queue_enter() argument
336 struct gendisk *disk = bio->bi_bdev->bd_disk; in __bio_queue_enter()
338 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter()
341 bio_wouldblock_error(bio); in __bio_queue_enter()
365 bio_io_error(bio); in __bio_queue_enter()
522 static inline void bio_check_ro(struct bio *bio) in bio_check_ro() argument
524 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro()
525 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro()
528 if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) in bio_check_ro()
531 bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); in bio_check_ro()
538 bio->bi_bdev); in bio_check_ro()
542 static noinline int should_fail_bio(struct bio *bio) in should_fail_bio() argument
544 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) in should_fail_bio()
555 static inline int bio_check_eod(struct bio *bio) in bio_check_eod() argument
557 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in bio_check_eod()
558 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod()
562 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod()
565 current->comm, bio->bi_bdev, bio->bi_opf, in bio_check_eod()
566 bio->bi_iter.bi_sector, nr_sectors, maxsector); in bio_check_eod()
575 static int blk_partition_remap(struct bio *bio) in blk_partition_remap() argument
577 struct block_device *p = bio->bi_bdev; in blk_partition_remap()
579 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) in blk_partition_remap()
581 if (bio_sectors(bio)) { in blk_partition_remap()
582 bio->bi_iter.bi_sector += p->bd_start_sect; in blk_partition_remap()
583 trace_block_bio_remap(bio, p->bd_dev, in blk_partition_remap()
584 bio->bi_iter.bi_sector - in blk_partition_remap()
587 bio_set_flag(bio, BIO_REMAPPED); in blk_partition_remap()
595 struct bio *bio) in blk_check_zone_append() argument
597 int nr_sectors = bio_sectors(bio); in blk_check_zone_append()
600 if (!bdev_is_zoned(bio->bi_bdev)) in blk_check_zone_append()
604 if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector)) in blk_check_zone_append()
619 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append()
624 static void __submit_bio(struct bio *bio) in __submit_bio() argument
629 if (unlikely(!blk_crypto_bio_prep(&bio))) in __submit_bio()
634 if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { in __submit_bio()
635 blk_mq_submit_bio(bio); in __submit_bio()
636 } else if (likely(bio_queue_enter(bio) == 0)) { in __submit_bio()
637 struct gendisk *disk = bio->bi_bdev->bd_disk; in __submit_bio()
639 if ((bio->bi_opf & REQ_POLLED) && in __submit_bio()
641 bio->bi_status = BLK_STS_NOTSUPP; in __submit_bio()
642 bio_endio(bio); in __submit_bio()
644 disk->fops->submit_bio(bio); in __submit_bio()
671 static void __submit_bio_noacct(struct bio *bio) in __submit_bio_noacct() argument
675 BUG_ON(bio->bi_next); in __submit_bio_noacct()
681 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __submit_bio_noacct()
690 __submit_bio(bio); in __submit_bio_noacct()
698 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) in __submit_bio_noacct()
699 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct()
700 bio_list_add(&same, bio); in __submit_bio_noacct()
702 bio_list_add(&lower, bio); in __submit_bio_noacct()
710 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); in __submit_bio_noacct()
715 static void __submit_bio_noacct_mq(struct bio *bio) in __submit_bio_noacct_mq() argument
722 __submit_bio(bio); in __submit_bio_noacct_mq()
723 } while ((bio = bio_list_pop(&bio_list[0]))); in __submit_bio_noacct_mq()
728 void submit_bio_noacct_nocheck(struct bio *bio) in submit_bio_noacct_nocheck() argument
730 blk_cgroup_bio_start(bio); in submit_bio_noacct_nocheck()
731 blkcg_bio_issue_init(bio); in submit_bio_noacct_nocheck()
733 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { in submit_bio_noacct_nocheck()
734 trace_block_bio_queue(bio); in submit_bio_noacct_nocheck()
739 bio_set_flag(bio, BIO_TRACE_COMPLETION); in submit_bio_noacct_nocheck()
749 bio_list_add(¤t->bio_list[0], bio); in submit_bio_noacct_nocheck()
750 else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) in submit_bio_noacct_nocheck()
751 __submit_bio_noacct_mq(bio); in submit_bio_noacct_nocheck()
753 __submit_bio_noacct(bio); in submit_bio_noacct_nocheck()
757 struct bio *bio) in blk_validate_atomic_write_op_size() argument
759 if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) in blk_validate_atomic_write_op_size()
762 if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) in blk_validate_atomic_write_op_size()
777 void submit_bio_noacct(struct bio *bio) in submit_bio_noacct() argument
779 struct block_device *bdev = bio->bi_bdev; in submit_bio_noacct()
789 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) in submit_bio_noacct()
792 if (should_fail_bio(bio)) in submit_bio_noacct()
794 bio_check_ro(bio); in submit_bio_noacct()
795 if (!bio_flagged(bio, BIO_REMAPPED)) { in submit_bio_noacct()
796 if (unlikely(bio_check_eod(bio))) in submit_bio_noacct()
799 unlikely(blk_partition_remap(bio))) in submit_bio_noacct()
807 if (op_is_flush(bio->bi_opf)) { in submit_bio_noacct()
808 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && in submit_bio_noacct()
809 bio_op(bio) != REQ_OP_ZONE_APPEND)) in submit_bio_noacct()
812 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_noacct()
813 if (!bio_sectors(bio)) { in submit_bio_noacct()
820 switch (bio_op(bio)) { in submit_bio_noacct()
824 if (bio->bi_opf & REQ_ATOMIC) { in submit_bio_noacct()
825 status = blk_validate_atomic_write_op_size(q, bio); in submit_bio_noacct()
845 status = blk_check_zone_append(q, bio); in submit_bio_noacct()
858 if (!bdev_is_zoned(bio->bi_bdev)) in submit_bio_noacct()
872 if (blk_throtl_bio(bio)) in submit_bio_noacct()
874 submit_bio_noacct_nocheck(bio); in submit_bio_noacct()
880 bio->bi_status = status; in submit_bio_noacct()
881 bio_endio(bio); in submit_bio_noacct()
885 static void bio_set_ioprio(struct bio *bio) in bio_set_ioprio() argument
888 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) in bio_set_ioprio()
889 bio->bi_ioprio = get_current_ioprio(); in bio_set_ioprio()
890 blkcg_set_ioprio(bio); in bio_set_ioprio()
906 void submit_bio(struct bio *bio) in submit_bio() argument
908 if (bio_op(bio) == REQ_OP_READ) { in submit_bio()
909 task_io_account_read(bio->bi_iter.bi_size); in submit_bio()
910 count_vm_events(PGPGIN, bio_sectors(bio)); in submit_bio()
911 } else if (bio_op(bio) == REQ_OP_WRITE) { in submit_bio()
912 count_vm_events(PGPGOUT, bio_sectors(bio)); in submit_bio()
915 bio_set_ioprio(bio); in submit_bio()
916 submit_bio_noacct(bio); in submit_bio()
932 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) in bio_poll() argument
934 blk_qc_t cookie = READ_ONCE(bio->bi_cookie); in bio_poll()
939 bdev = READ_ONCE(bio->bi_bdev); in bio_poll()
967 ret = disk->fops->poll_bio(bio, iob, flags); in bio_poll()
981 struct bio *bio; in iocb_bio_iopoll() local
1005 bio = READ_ONCE(kiocb->private); in iocb_bio_iopoll()
1006 if (bio) in iocb_bio_iopoll()
1007 ret = bio_poll(bio, iob, flags); in iocb_bio_iopoll()
1048 unsigned long bio_start_io_acct(struct bio *bio) in bio_start_io_acct() argument
1050 return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies); in bio_start_io_acct()
1071 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, in bio_end_io_acct_remapped() argument
1074 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time); in bio_end_io_acct_remapped()
1271 sizeof_field(struct bio, bi_opf)); in blk_dev_init()