| /block/ |
| A D | bio-integrity.c | 82 size_t bytes = bounce_bvec->bv_len; in bio_integrity_uncopy_user() local 88 WARN_ON_ONCE(ret != bytes); in bio_integrity_uncopy_user() 233 int nr_vecs, ssize_t bytes, ssize_t offset) in bvec_from_pages() argument 239 size_t size = min_t(size_t, bytes, PAGE_SIZE - offset); in bvec_from_pages() 242 bytes -= size; in bvec_from_pages() 244 size_t next = min_t(size_t, PAGE_SIZE, bytes); in bvec_from_pages() 251 bytes -= next; in bvec_from_pages() 268 size_t offset, bytes = iter->count; in bio_integrity_map_user() local 275 if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q)) in bio_integrity_map_user() 380 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); in bio_integrity_advance() local [all …]
|
| A D | blk-map.c | 167 unsigned int bytes = PAGE_SIZE; in bio_copy_user_iov() local 169 bytes -= offset; in bio_copy_user_iov() 171 if (bytes > len) in bio_copy_user_iov() 172 bytes = len; in bio_copy_user_iov() 192 if (bio_add_page(bio, page, bytes, offset) < bytes) { in bio_copy_user_iov() 198 len -= bytes; in bio_copy_user_iov() 401 if (bytes > len) in bio_copy_kern() 402 bytes = len; in bio_copy_kern() 411 if (bio_add_page(bio, page, bytes, 0) < bytes) in bio_copy_kern() 414 len -= bytes; in bio_copy_kern() [all …]
|
| A D | blk-merge.c | 248 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes, in bvec_split_segs() argument 251 unsigned max_len = max_bytes - *bytes; in bvec_split_segs() 267 *bytes += total_len; in bvec_split_segs() 298 unsigned nsegs = 0, bytes = 0; in bio_split_rw_at() local 309 bytes + bv.bv_len <= max_bytes && in bio_split_rw_at() 312 bytes += bv.bv_len; in bio_split_rw_at() 314 if (bvec_split_segs(lim, &bv, &nsegs, &bytes, in bio_split_rw_at() 343 bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim)); in bio_split_rw_at() 351 return bytes >> SECTOR_SHIFT; in bio_split_rw_at() 424 unsigned int bytes = 0; in blk_recalc_rq_segments() local [all …]
|
| A D | blk-crypto-internal.h | 144 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); 145 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) in bio_crypt_advance() argument 148 __bio_crypt_advance(bio, bytes); in bio_crypt_advance()
|
| A D | bio.c | 1170 size_t bytes = left; in get_contig_folio_len() local 1179 bytes -= contig_sz; in get_contig_folio_len() 1181 size_t next = min_t(size_t, PAGE_SIZE, bytes); in get_contig_folio_len() 1188 bytes -= next; in get_contig_folio_len() 1424 void __bio_advance(struct bio *bio, unsigned bytes) in __bio_advance() argument 1427 bio_integrity_advance(bio, bytes); in __bio_advance() 1429 bio_crypt_advance(bio, bytes); in __bio_advance() 1430 bio_advance_iter(bio, &bio->bi_iter, bytes); in __bio_advance() 1444 memcpy(dst_buf, src_buf, bytes); in bio_copy_data_iter() 1449 bio_advance_iter_single(src, src_iter, bytes); in bio_copy_data_iter() [all …]
|
| A D | blk-crypto.c | 154 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) in __bio_crypt_advance() argument 159 bytes >> bc->bc_key->data_unit_size_bits); in __bio_crypt_advance() 167 unsigned int bytes, in bio_crypt_dun_is_contiguous() argument 171 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; in bio_crypt_dun_is_contiguous() 380 memcpy(blk_key->bytes, key_bytes, key_size); in blk_crypto_init_key()
|
| A D | blk-cgroup.c | 627 dst->bytes[i] = src->bytes[i]; in blkg_iostat_set() 1031 dst->bytes[i] += src->bytes[i]; in blkg_iostat_add() 1041 dst->bytes[i] -= src->bytes[i]; in blkg_iostat_sub() 1181 tmp.bytes[BLKG_IOSTAT_READ] += in blkcg_fill_root_iostats() 1183 tmp.bytes[BLKG_IOSTAT_WRITE] += in blkcg_fill_root_iostats() 1185 tmp.bytes[BLKG_IOSTAT_DISCARD] += in blkcg_fill_root_iostats() 1216 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; in blkcg_print_one_stat() 1217 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; in blkcg_print_one_stat() 1218 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; in blkcg_print_one_stat() 2230 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; in blk_cgroup_bio_start()
|
| A D | blk-crypto-fallback.c | 122 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->bytes, in blk_crypto_fallback_keyslot_program() 244 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; member 315 iv.bytes); in blk_crypto_fallback_encrypt_bio() 416 iv.bytes); in blk_crypto_fallback_decrypt_bio()
|
| A D | bfq-cgroup.c | 350 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); in bfqg_stats_update_legacy_io() 438 blkg_rwstat_exit(&stats->bytes); in bfqg_stats_exit() 457 if (blkg_rwstat_init(&stats->bytes, gfp) || in bfqg_stats_init() 1171 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); in bfqg_prfill_sectors() 1189 offsetof(struct bfq_group, stats.bytes), &tmp); in bfqg_prfill_sectors_recursive() 1270 .private = offsetof(struct bfq_group, stats.bytes), 1313 .private = offsetof(struct bfq_group, stats.bytes),
|
| A D | blk-throttle.c | 711 long long *bytes, int *ios) in __tg_update_carryover() argument 741 *bytes = bytes_allowed - tg->bytes_disp[rw]; in __tg_update_carryover() 749 tg->bytes_disp[rw] = -*bytes; in __tg_update_carryover() 755 long long bytes[2] = {0}; in tg_update_carryover() local 758 __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]); in tg_update_carryover() 759 __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]); in tg_update_carryover() 763 bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]); in tg_update_carryover()
|
| A D | blk-integrity.c | 184 ssize_t bytes) in blk_rq_integrity_map_user() argument 189 iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes); in blk_rq_integrity_map_user()
|
| A D | blk.h | 619 bool should_fail_request(struct block_device *part, unsigned int bytes); 622 unsigned int bytes) in should_fail_request() argument
|
| A D | blk-cgroup.h | 42 u64 bytes[BLKG_IOSTAT_NR]; member
|
| A D | blk-core.c | 505 bool should_fail_request(struct block_device *part, unsigned int bytes) in should_fail_request() argument 508 should_fail(&fail_make_request, bytes); in should_fail_request()
|
| A D | bfq-iosched.h | 921 struct blkg_rwstat bytes; member
|
| A D | blk-mq.c | 829 static void blk_account_io_completion(struct request *req, unsigned int bytes) in blk_account_io_completion() argument 835 part_stat_add(req->part, sectors[sgrp], bytes >> 9); in blk_account_io_completion()
|