1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe ZNS-ZBD command implementation.
4 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/nvme.h>
8 #include <linux/blkdev.h>
9 #include "nvmet.h"
10
11 /*
12 * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
13 * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
14 * as page_shift value. When calculating the ZASL use shift by 12.
15 */
16 #define NVMET_MPSMIN_SHIFT 12
17
nvmet_zasl(unsigned int zone_append_sects)18 static inline u8 nvmet_zasl(unsigned int zone_append_sects)
19 {
20 /*
21 * Zone Append Size Limit (zasl) is expressed as a power of 2 value
22 * with the minimum memory page size (i.e. 12) as unit.
23 */
24 return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
25 }
26
validate_conv_zones_cb(struct blk_zone * z,unsigned int i,void * data)27 static int validate_conv_zones_cb(struct blk_zone *z,
28 unsigned int i, void *data)
29 {
30 if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
31 return -EOPNOTSUPP;
32 return 0;
33 }
34
nvmet_bdev_zns_enable(struct nvmet_ns * ns)35 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
36 {
37 u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
38 struct gendisk *bd_disk = ns->bdev->bd_disk;
39 int ret;
40
41 if (ns->subsys->zasl) {
42 if (ns->subsys->zasl > zasl)
43 return false;
44 }
45 ns->subsys->zasl = zasl;
46
47 /*
48 * Generic zoned block devices may have a smaller last zone which is
49 * not supported by ZNS. Exclude zoned drives that have such smaller
50 * last zone.
51 */
52 if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
53 return false;
54 /*
55 * ZNS does not define a conventional zone type. If the underlying
56 * device has a bitmap set indicating the existence of conventional
57 * zones, reject the device. Otherwise, use report zones to detect if
58 * the device has conventional zones.
59 */
60 if (ns->bdev->bd_disk->conv_zones_bitmap)
61 return false;
62
63 ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
64 validate_conv_zones_cb, NULL);
65 if (ret < 0)
66 return false;
67
68 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
69
70 return true;
71 }
72
nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req * req)73 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
74 {
75 u8 zasl = req->sq->ctrl->subsys->zasl;
76 struct nvmet_ctrl *ctrl = req->sq->ctrl;
77 struct nvme_id_ctrl_zns *id;
78 u16 status;
79
80 id = kzalloc(sizeof(*id), GFP_KERNEL);
81 if (!id) {
82 status = NVME_SC_INTERNAL;
83 goto out;
84 }
85
86 if (ctrl->ops->get_mdts)
87 id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
88 else
89 id->zasl = zasl;
90
91 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
92
93 kfree(id);
94 out:
95 nvmet_req_complete(req, status);
96 }
97
nvmet_execute_identify_cns_cs_ns(struct nvmet_req * req)98 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
99 {
100 struct nvme_id_ns_zns *id_zns;
101 u64 zsze;
102 u16 status;
103 u32 mar, mor;
104
105 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
106 req->error_loc = offsetof(struct nvme_identify, nsid);
107 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
108 goto out;
109 }
110
111 id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
112 if (!id_zns) {
113 status = NVME_SC_INTERNAL;
114 goto out;
115 }
116
117 status = nvmet_req_find_ns(req);
118 if (status)
119 goto done;
120
121 if (!bdev_is_zoned(req->ns->bdev)) {
122 req->error_loc = offsetof(struct nvme_identify, nsid);
123 goto done;
124 }
125
126 if (nvmet_ns_revalidate(req->ns)) {
127 mutex_lock(&req->ns->subsys->lock);
128 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
129 mutex_unlock(&req->ns->subsys->lock);
130 }
131 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
132 req->ns->blksize_shift;
133 id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
134
135 mor = bdev_max_open_zones(req->ns->bdev);
136 if (!mor)
137 mor = U32_MAX;
138 else
139 mor--;
140 id_zns->mor = cpu_to_le32(mor);
141
142 mar = bdev_max_active_zones(req->ns->bdev);
143 if (!mar)
144 mar = U32_MAX;
145 else
146 mar--;
147 id_zns->mar = cpu_to_le32(mar);
148
149 done:
150 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
151 kfree(id_zns);
152 out:
153 nvmet_req_complete(req, status);
154 }
155
nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req * req)156 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
157 {
158 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
159 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
160
161 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
162 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
163 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
164 }
165
166 if (out_bufsize < sizeof(struct nvme_zone_report)) {
167 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
168 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
169 }
170
171 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
172 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
173 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
174 }
175
176 switch (req->cmd->zmr.pr) {
177 case 0:
178 case 1:
179 break;
180 default:
181 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
182 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
183 }
184
185 switch (req->cmd->zmr.zrasf) {
186 case NVME_ZRASF_ZONE_REPORT_ALL:
187 case NVME_ZRASF_ZONE_STATE_EMPTY:
188 case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
189 case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
190 case NVME_ZRASF_ZONE_STATE_CLOSED:
191 case NVME_ZRASF_ZONE_STATE_FULL:
192 case NVME_ZRASF_ZONE_STATE_READONLY:
193 case NVME_ZRASF_ZONE_STATE_OFFLINE:
194 break;
195 default:
196 req->error_loc =
197 offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
198 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
199 }
200
201 return NVME_SC_SUCCESS;
202 }
203
204 struct nvmet_report_zone_data {
205 struct nvmet_req *req;
206 u64 out_buf_offset;
207 u64 out_nr_zones;
208 u64 nr_zones;
209 u8 zrasf;
210 };
211
nvmet_bdev_report_zone_cb(struct blk_zone * z,unsigned i,void * d)212 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
213 {
214 static const unsigned int nvme_zrasf_to_blk_zcond[] = {
215 [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
216 [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
217 [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
218 [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
219 [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
220 [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
221 [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
222 };
223 struct nvmet_report_zone_data *rz = d;
224
225 if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
226 z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
227 return 0;
228
229 if (rz->nr_zones < rz->out_nr_zones) {
230 struct nvme_zone_descriptor zdesc = { };
231 u16 status;
232
233 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
234 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
235 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
236 zdesc.za = z->reset ? 1 << 2 : 0;
237 zdesc.zs = z->cond << 4;
238 zdesc.zt = z->type;
239
240 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
241 sizeof(zdesc));
242 if (status)
243 return -EINVAL;
244
245 rz->out_buf_offset += sizeof(zdesc);
246 }
247
248 rz->nr_zones++;
249
250 return 0;
251 }
252
nvmet_req_nr_zones_from_slba(struct nvmet_req * req)253 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
254 {
255 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
256
257 return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect);
258 }
259
get_nr_zones_from_buf(struct nvmet_req * req,u32 bufsize)260 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
261 {
262 if (bufsize <= sizeof(struct nvme_zone_report))
263 return 0;
264
265 return (bufsize - sizeof(struct nvme_zone_report)) /
266 sizeof(struct nvme_zone_descriptor);
267 }
268
nvmet_bdev_zone_zmgmt_recv_work(struct work_struct * w)269 static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
270 {
271 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
272 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
273 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
274 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
275 __le64 nr_zones;
276 u16 status;
277 int ret;
278 struct nvmet_report_zone_data rz_data = {
279 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
280 /* leave the place for report zone header */
281 .out_buf_offset = sizeof(struct nvme_zone_report),
282 .zrasf = req->cmd->zmr.zrasf,
283 .nr_zones = 0,
284 .req = req,
285 };
286
287 status = nvmet_bdev_validate_zone_mgmt_recv(req);
288 if (status)
289 goto out;
290
291 if (!req_slba_nr_zones) {
292 status = NVME_SC_SUCCESS;
293 goto out;
294 }
295
296 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
297 nvmet_bdev_report_zone_cb, &rz_data);
298 if (ret < 0) {
299 status = NVME_SC_INTERNAL;
300 goto out;
301 }
302
303 /*
304 * When partial bit is set nr_zones must indicate the number of zone
305 * descriptors actually transferred.
306 */
307 if (req->cmd->zmr.pr)
308 rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
309
310 nr_zones = cpu_to_le64(rz_data.nr_zones);
311 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
312
313 out:
314 nvmet_req_complete(req, status);
315 }
316
nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req * req)317 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
318 {
319 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
320 queue_work(zbd_wq, &req->z.zmgmt_work);
321 }
322
zsa_req_op(u8 zsa)323 static inline enum req_op zsa_req_op(u8 zsa)
324 {
325 switch (zsa) {
326 case NVME_ZONE_OPEN:
327 return REQ_OP_ZONE_OPEN;
328 case NVME_ZONE_CLOSE:
329 return REQ_OP_ZONE_CLOSE;
330 case NVME_ZONE_FINISH:
331 return REQ_OP_ZONE_FINISH;
332 case NVME_ZONE_RESET:
333 return REQ_OP_ZONE_RESET;
334 default:
335 return REQ_OP_LAST;
336 }
337 }
338
blkdev_zone_mgmt_errno_to_nvme_status(int ret)339 static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
340 {
341 switch (ret) {
342 case 0:
343 return NVME_SC_SUCCESS;
344 case -EINVAL:
345 case -EIO:
346 return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
347 default:
348 return NVME_SC_INTERNAL;
349 }
350 }
351
352 struct nvmet_zone_mgmt_send_all_data {
353 unsigned long *zbitmap;
354 struct nvmet_req *req;
355 };
356
zmgmt_send_scan_cb(struct blk_zone * z,unsigned i,void * d)357 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
358 {
359 struct nvmet_zone_mgmt_send_all_data *data = d;
360
361 switch (zsa_req_op(data->req->cmd->zms.zsa)) {
362 case REQ_OP_ZONE_OPEN:
363 switch (z->cond) {
364 case BLK_ZONE_COND_CLOSED:
365 break;
366 default:
367 return 0;
368 }
369 break;
370 case REQ_OP_ZONE_CLOSE:
371 switch (z->cond) {
372 case BLK_ZONE_COND_IMP_OPEN:
373 case BLK_ZONE_COND_EXP_OPEN:
374 break;
375 default:
376 return 0;
377 }
378 break;
379 case REQ_OP_ZONE_FINISH:
380 switch (z->cond) {
381 case BLK_ZONE_COND_IMP_OPEN:
382 case BLK_ZONE_COND_EXP_OPEN:
383 case BLK_ZONE_COND_CLOSED:
384 break;
385 default:
386 return 0;
387 }
388 break;
389 default:
390 return -EINVAL;
391 }
392
393 set_bit(i, data->zbitmap);
394
395 return 0;
396 }
397
nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req * req)398 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
399 {
400 struct block_device *bdev = req->ns->bdev;
401 unsigned int nr_zones = bdev_nr_zones(bdev);
402 struct bio *bio = NULL;
403 sector_t sector = 0;
404 int ret;
405 struct nvmet_zone_mgmt_send_all_data d = {
406 .req = req,
407 };
408
409 d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
410 GFP_NOIO, bdev->bd_disk->node_id);
411 if (!d.zbitmap) {
412 ret = -ENOMEM;
413 goto out;
414 }
415
416 /* Scan and build bitmap of the eligible zones */
417 ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
418 if (ret != nr_zones) {
419 if (ret > 0)
420 ret = -EIO;
421 goto out;
422 } else {
423 /* We scanned all the zones */
424 ret = 0;
425 }
426
427 while (sector < bdev_nr_sectors(bdev)) {
428 if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
429 bio = blk_next_bio(bio, bdev, 0,
430 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
431 GFP_KERNEL);
432 bio->bi_iter.bi_sector = sector;
433 /* This may take a while, so be nice to others */
434 cond_resched();
435 }
436 sector += bdev_zone_sectors(bdev);
437 }
438
439 if (bio) {
440 ret = submit_bio_wait(bio);
441 bio_put(bio);
442 }
443
444 out:
445 kfree(d.zbitmap);
446
447 return blkdev_zone_mgmt_errno_to_nvme_status(ret);
448 }
449
nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req * req)450 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
451 {
452 int ret;
453
454 switch (zsa_req_op(req->cmd->zms.zsa)) {
455 case REQ_OP_ZONE_RESET:
456 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
457 get_capacity(req->ns->bdev->bd_disk),
458 GFP_KERNEL);
459 if (ret < 0)
460 return blkdev_zone_mgmt_errno_to_nvme_status(ret);
461 break;
462 case REQ_OP_ZONE_OPEN:
463 case REQ_OP_ZONE_CLOSE:
464 case REQ_OP_ZONE_FINISH:
465 return nvmet_bdev_zone_mgmt_emulate_all(req);
466 default:
467 /* this is needed to quiet compiler warning */
468 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
469 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
470 }
471
472 return NVME_SC_SUCCESS;
473 }
474
nvmet_bdev_zmgmt_send_work(struct work_struct * w)475 static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
476 {
477 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
478 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
479 enum req_op op = zsa_req_op(req->cmd->zms.zsa);
480 struct block_device *bdev = req->ns->bdev;
481 sector_t zone_sectors = bdev_zone_sectors(bdev);
482 u16 status = NVME_SC_SUCCESS;
483 int ret;
484
485 if (op == REQ_OP_LAST) {
486 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
487 status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
488 goto out;
489 }
490
491 /* when select all bit is set slba field is ignored */
492 if (req->cmd->zms.select_all) {
493 status = nvmet_bdev_execute_zmgmt_send_all(req);
494 goto out;
495 }
496
497 if (sect >= get_capacity(bdev->bd_disk)) {
498 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
499 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
500 goto out;
501 }
502
503 if (sect & (zone_sectors - 1)) {
504 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
505 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
506 goto out;
507 }
508
509 ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
510 if (ret < 0)
511 status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
512
513 out:
514 nvmet_req_complete(req, status);
515 }
516
nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req * req)517 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
518 {
519 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
520 queue_work(zbd_wq, &req->z.zmgmt_work);
521 }
522
nvmet_bdev_zone_append_bio_done(struct bio * bio)523 static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
524 {
525 struct nvmet_req *req = bio->bi_private;
526
527 if (bio->bi_status == BLK_STS_OK) {
528 req->cqe->result.u64 =
529 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
530 }
531
532 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
533 nvmet_req_bio_put(req, bio);
534 }
535
nvmet_bdev_execute_zone_append(struct nvmet_req * req)536 void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
537 {
538 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
539 const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
540 u16 status = NVME_SC_SUCCESS;
541 unsigned int total_len = 0;
542 struct scatterlist *sg;
543 struct bio *bio;
544 int sg_cnt;
545
546 /* Request is completed on len mismatch in nvmet_check_transter_len() */
547 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
548 return;
549
550 if (!req->sg_cnt) {
551 nvmet_req_complete(req, 0);
552 return;
553 }
554
555 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
556 req->error_loc = offsetof(struct nvme_rw_command, slba);
557 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
558 goto out;
559 }
560
561 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
562 req->error_loc = offsetof(struct nvme_rw_command, slba);
563 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
564 goto out;
565 }
566
567 if (nvmet_use_inline_bvec(req)) {
568 bio = &req->z.inline_bio;
569 bio_init(bio, req->ns->bdev, req->inline_bvec,
570 ARRAY_SIZE(req->inline_bvec), opf);
571 } else {
572 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
573 }
574
575 bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
576 bio->bi_iter.bi_sector = sect;
577 bio->bi_private = req;
578 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
579 bio->bi_opf |= REQ_FUA;
580
581 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
582 struct page *p = sg_page(sg);
583 unsigned int l = sg->length;
584 unsigned int o = sg->offset;
585 unsigned int ret;
586
587 ret = bio_add_zone_append_page(bio, p, l, o);
588 if (ret != sg->length) {
589 status = NVME_SC_INTERNAL;
590 goto out_put_bio;
591 }
592 total_len += sg->length;
593 }
594
595 if (total_len != nvmet_rw_data_len(req)) {
596 status = NVME_SC_INTERNAL | NVME_SC_DNR;
597 goto out_put_bio;
598 }
599
600 submit_bio(bio);
601 return;
602
603 out_put_bio:
604 nvmet_req_bio_put(req, bio);
605 out:
606 nvmet_req_complete(req, status);
607 }
608
nvmet_bdev_zns_parse_io_cmd(struct nvmet_req * req)609 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
610 {
611 struct nvme_command *cmd = req->cmd;
612
613 switch (cmd->common.opcode) {
614 case nvme_cmd_zone_append:
615 req->execute = nvmet_bdev_execute_zone_append;
616 return 0;
617 case nvme_cmd_zone_mgmt_recv:
618 req->execute = nvmet_bdev_execute_zone_mgmt_recv;
619 return 0;
620 case nvme_cmd_zone_mgmt_send:
621 req->execute = nvmet_bdev_execute_zone_mgmt_send;
622 return 0;
623 default:
624 return nvmet_bdev_parse_io_cmd(req);
625 }
626 }
627