1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #include "kublk.h"
4
5 #ifndef IORING_NOP_INJECT_RESULT
6 #define IORING_NOP_INJECT_RESULT (1U << 0)
7 #endif
8
9 #ifndef IORING_NOP_FIXED_BUFFER
10 #define IORING_NOP_FIXED_BUFFER (1U << 3)
11 #endif
12
ublk_null_tgt_init(const struct dev_ctx * ctx,struct ublk_dev * dev)13 static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
14 {
15 const struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
16 unsigned long dev_size = 250UL << 30;
17
18 dev->tgt.dev_size = dev_size;
19 dev->tgt.params = (struct ublk_params) {
20 .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN |
21 UBLK_PARAM_TYPE_SEGMENT,
22 .basic = {
23 .logical_bs_shift = 9,
24 .physical_bs_shift = 12,
25 .io_opt_shift = 12,
26 .io_min_shift = 9,
27 .max_sectors = info->max_io_buf_bytes >> 9,
28 .dev_sectors = dev_size >> 9,
29 },
30 .dma = {
31 .alignment = 4095,
32 },
33 .seg = {
34 .seg_boundary_mask = 4095,
35 .max_segment_size = 32 << 10,
36 .max_segments = 32,
37 },
38 };
39
40 if (info->flags & UBLK_F_SUPPORT_ZERO_COPY)
41 dev->tgt.sq_depth = dev->tgt.cq_depth = 2 * info->queue_depth;
42 return 0;
43 }
44
__setup_nop_io(int tag,const struct ublksrv_io_desc * iod,struct io_uring_sqe * sqe,int q_id)45 static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod,
46 struct io_uring_sqe *sqe, int q_id)
47 {
48 unsigned ublk_op = ublksrv_get_op(iod);
49
50 io_uring_prep_nop(sqe);
51 sqe->buf_index = tag;
52 sqe->flags |= IOSQE_FIXED_FILE;
53 sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT;
54 sqe->len = iod->nr_sectors << 9; /* injected result */
55 sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1);
56 }
57
null_queue_zc_io(struct ublk_thread * t,struct ublk_queue * q,int tag)58 static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
59 int tag)
60 {
61 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
62 struct io_uring_sqe *sqe[3];
63
64 ublk_io_alloc_sqes(t, sqe, 3);
65
66 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
67 sqe[0]->user_data = build_user_data(tag,
68 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
69 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
70
71 __setup_nop_io(tag, iod, sqe[1], q->q_id);
72 sqe[1]->flags |= IOSQE_IO_HARDLINK;
73
74 io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
75 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
76
77 // buf register is marked as IOSQE_CQE_SKIP_SUCCESS
78 return 2;
79 }
80
null_queue_auto_zc_io(struct ublk_thread * t,struct ublk_queue * q,int tag)81 static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q,
82 int tag)
83 {
84 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
85 struct io_uring_sqe *sqe[1];
86
87 ublk_io_alloc_sqes(t, sqe, 1);
88 __setup_nop_io(tag, iod, sqe[0], q->q_id);
89 return 1;
90 }
91
ublk_null_io_done(struct ublk_thread * t,struct ublk_queue * q,const struct io_uring_cqe * cqe)92 static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q,
93 const struct io_uring_cqe *cqe)
94 {
95 unsigned tag = user_data_to_tag(cqe->user_data);
96 unsigned op = user_data_to_op(cqe->user_data);
97 struct ublk_io *io = ublk_get_io(q, tag);
98
99 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
100 if (!io->result)
101 io->result = cqe->res;
102 if (cqe->res < 0)
103 ublk_err("%s: io failed op %x user_data %lx\n",
104 __func__, op, cqe->user_data);
105 }
106
107 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
108 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
109 io->tgt_ios += 1;
110
111 if (ublk_completed_tgt_io(t, q, tag))
112 ublk_complete_io(t, q, tag, io->result);
113 }
114
ublk_null_queue_io(struct ublk_thread * t,struct ublk_queue * q,int tag)115 static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q,
116 int tag)
117 {
118 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
119 unsigned auto_zc = ublk_queue_use_auto_zc(q);
120 unsigned zc = ublk_queue_use_zc(q);
121 int queued;
122
123 if (auto_zc && !ublk_io_auto_zc_fallback(iod))
124 queued = null_queue_auto_zc_io(t, q, tag);
125 else if (zc)
126 queued = null_queue_zc_io(t, q, tag);
127 else {
128 ublk_complete_io(t, q, tag, iod->nr_sectors << 9);
129 return 0;
130 }
131 ublk_queued_tgt_io(t, q, tag, queued);
132 return 0;
133 }
134
135 /*
136 * return invalid buffer index for triggering auto buffer register failure,
137 * then UBLK_IO_RES_NEED_REG_BUF handling is covered
138 */
ublk_null_buf_index(const struct ublk_queue * q,int tag)139 static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag)
140 {
141 if (ublk_queue_auto_zc_fallback(q))
142 return (unsigned short)-1;
143 return q->ios[tag].buf_index;
144 }
145
146 const struct ublk_tgt_ops null_tgt_ops = {
147 .name = "null",
148 .init_tgt = ublk_null_tgt_init,
149 .queue_io = ublk_null_queue_io,
150 .tgt_io_done = ublk_null_io_done,
151 .buf_index = ublk_null_buf_index,
152 };
153