1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 #include "efct_driver.h"
8 #include "efct_unsol.h"
9 
10 #define frame_printf(efct, hdr, fmt, ...) \
11 	do { \
12 		char s_id_text[16]; \
13 		efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
14 			s_id_text, sizeof(s_id_text)); \
15 		efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
16 			ntoh24((hdr)->fh_d_id), s_id_text, \
17 			(hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
18 			be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
19 	} while (0)
20 
21 static struct efct_node *
efct_node_find(struct efct * efct,u32 port_id,u32 node_id)22 efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
23 {
24 	struct efct_node *node;
25 	u64 id = (u64)port_id << 32 | node_id;
26 
27 	/*
28 	 * During node shutdown, Lookup will be removed first,
29 	 * before announcing to backend. So, no new IOs will be allowed
30 	 */
31 	/* Find a target node, given s_id and d_id */
32 	node = xa_load(&efct->lookup, id);
33 	if (node)
34 		kref_get(&node->ref);
35 
36 	return node;
37 }
38 
39 static int
efct_dispatch_frame(struct efct * efct,struct efc_hw_sequence * seq)40 efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
41 {
42 	struct efct_node *node;
43 	struct fc_frame_header *hdr;
44 	u32 s_id, d_id;
45 
46 	hdr = seq->header->dma.virt;
47 
48 	/* extract the s_id and d_id */
49 	s_id = ntoh24(hdr->fh_s_id);
50 	d_id = ntoh24(hdr->fh_d_id);
51 
52 	if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
53 		return -EIO;
54 
55 	if (hdr->fh_type == FC_TYPE_FCP) {
56 		node = efct_node_find(efct, d_id, s_id);
57 		if (!node) {
58 			efc_log_err(efct,
59 				    "Node not found, drop cmd d_id:%x s_id:%x\n",
60 				    d_id, s_id);
61 			efct_hw_sequence_free(&efct->hw, seq);
62 			return 0;
63 		}
64 
65 		efct_dispatch_fcp_cmd(node, seq);
66 	} else {
67 		node = efct_node_find(efct, d_id, s_id);
68 		if (!node) {
69 			efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
70 				    d_id, s_id);
71 			return -EIO;
72 		}
73 
74 		efc_log_err(efct, "Received ABTS for Node:%p\n", node);
75 		efct_node_recv_abts_frame(node, seq);
76 	}
77 
78 	kref_put(&node->ref, node->release);
79 	efct_hw_sequence_free(&efct->hw, seq);
80 	return 0;
81 }
82 
83 int
efct_unsolicited_cb(void * arg,struct efc_hw_sequence * seq)84 efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
85 {
86 	struct efct *efct = arg;
87 
88 	/* Process FCP command */
89 	if (!efct_dispatch_frame(efct, seq))
90 		return 0;
91 
92 	/* Forward frame to discovery lib */
93 	efc_dispatch_frame(efct->efcport, seq);
94 	return 0;
95 }
96 
97 static int
efct_fc_tmf_rejected_cb(struct efct_io * io,enum efct_scsi_io_status scsi_status,u32 flags,void * arg)98 efct_fc_tmf_rejected_cb(struct efct_io *io,
99 			enum efct_scsi_io_status scsi_status,
100 			u32 flags, void *arg)
101 {
102 	efct_scsi_io_free(io);
103 	return 0;
104 }
105 
106 static void
efct_dispatch_unsol_tmf(struct efct_io * io,u8 tm_flags,u32 lun)107 efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
108 {
109 	u32 i;
110 	struct {
111 		u32 mask;
112 		enum efct_scsi_tmf_cmd cmd;
113 	} tmflist[] = {
114 	{FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
115 	{FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
116 	{FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
117 	{FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
118 	{FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
119 
120 	io->exp_xfer_len = 0;
121 
122 	for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
123 		if (tmflist[i].mask & tm_flags) {
124 			io->tmf_cmd = tmflist[i].cmd;
125 			efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
126 			break;
127 		}
128 	}
129 	if (i == ARRAY_SIZE(tmflist)) {
130 		/* Not handled */
131 		efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
132 		efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
133 					NULL, efct_fc_tmf_rejected_cb, NULL);
134 	}
135 }
136 
137 static int
efct_validate_fcp_cmd(struct efct * efct,struct efc_hw_sequence * seq)138 efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
139 {
140 	/*
141 	 * If we received less than FCP_CMND_IU bytes, assume that the frame is
142 	 * corrupted in some way and drop it.
143 	 * This was seen when jamming the FCTL
144 	 * fill bytes field.
145 	 */
146 	if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
147 		struct fc_frame_header	*fchdr = seq->header->dma.virt;
148 
149 		efc_log_debug(efct,
150 			      "drop ox_id %04x payload (%zd) less than (%zd)\n",
151 			      be16_to_cpu(fchdr->fh_ox_id),
152 			      seq->payload->dma.len, sizeof(struct fcp_cmnd));
153 		return -EIO;
154 	}
155 	return 0;
156 }
157 
158 static void
efct_populate_io_fcp_cmd(struct efct_io * io,struct fcp_cmnd * cmnd,struct fc_frame_header * fchdr,bool sit)159 efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
160 			 struct fc_frame_header *fchdr, bool sit)
161 {
162 	io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
163 	/* note, tgt_task_tag, hw_tag  set when HW io is allocated */
164 	io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
165 	io->transferred = 0;
166 
167 	/* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
168 	 * Our assertion here is, the priority given to a frame containing
169 	 * the FCP cmd should be the priority given to ALL frames contained
170 	 * in that IO. Thus we need to save the incoming CS_CTL here.
171 	 */
172 	if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
173 		io->cs_ctl = fchdr->fh_cs_ctl;
174 	else
175 		io->cs_ctl = 0;
176 
177 	io->seq_init = sit;
178 }
179 
180 static u32
efct_get_flags_fcp_cmd(struct fcp_cmnd * cmnd)181 efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
182 {
183 	u32 flags = 0;
184 
185 	switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
186 	case FCP_PTA_SIMPLE:
187 		flags |= EFCT_SCSI_CMD_SIMPLE;
188 		break;
189 	case FCP_PTA_HEADQ:
190 		flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
191 		break;
192 	case FCP_PTA_ORDERED:
193 		flags |= EFCT_SCSI_CMD_ORDERED;
194 		break;
195 	case FCP_PTA_ACA:
196 		flags |= EFCT_SCSI_CMD_ACA;
197 		break;
198 	}
199 	if (cmnd->fc_flags & FCP_CFL_WRDATA)
200 		flags |= EFCT_SCSI_CMD_DIR_IN;
201 	if (cmnd->fc_flags & FCP_CFL_RDDATA)
202 		flags |= EFCT_SCSI_CMD_DIR_OUT;
203 
204 	return flags;
205 }
206 
207 static void
efct_sframe_common_send_cb(void * arg,u8 * cqe,int status)208 efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
209 {
210 	struct efct_hw_send_frame_context *ctx = arg;
211 	struct efct_hw *hw = ctx->hw;
212 
213 	/* Free WQ completion callback */
214 	efct_hw_reqtag_free(hw, ctx->wqcb);
215 
216 	/* Free sequence */
217 	efct_hw_sequence_free(hw, ctx->seq);
218 }
219 
220 static int
efct_sframe_common_send(struct efct_node * node,struct efc_hw_sequence * seq,enum fc_rctl r_ctl,u32 f_ctl,u8 type,void * payload,u32 payload_len)221 efct_sframe_common_send(struct efct_node *node,
222 			struct efc_hw_sequence *seq,
223 			enum fc_rctl r_ctl, u32 f_ctl,
224 			u8 type, void *payload, u32 payload_len)
225 {
226 	struct efct *efct = node->efct;
227 	struct efct_hw *hw = &efct->hw;
228 	int rc = 0;
229 	struct fc_frame_header *req_hdr = seq->header->dma.virt;
230 	struct fc_frame_header hdr;
231 	struct efct_hw_send_frame_context *ctx;
232 
233 	u32 heap_size = seq->payload->dma.size;
234 	uintptr_t heap_phys_base = seq->payload->dma.phys;
235 	u8 *heap_virt_base = seq->payload->dma.virt;
236 	u32 heap_offset = 0;
237 
238 	/* Build the FC header reusing the RQ header DMA buffer */
239 	memset(&hdr, 0, sizeof(hdr));
240 	hdr.fh_r_ctl = r_ctl;
241 	/* send it back to whomever sent it to us */
242 	memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
243 	memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
244 	hdr.fh_type = type;
245 	hton24(hdr.fh_f_ctl, f_ctl);
246 	hdr.fh_ox_id = req_hdr->fh_ox_id;
247 	hdr.fh_rx_id = req_hdr->fh_rx_id;
248 	hdr.fh_cs_ctl = 0;
249 	hdr.fh_df_ctl = 0;
250 	hdr.fh_seq_cnt = 0;
251 	hdr.fh_parm_offset = 0;
252 
253 	/*
254 	 * send_frame_seq_id is an atomic, we just let it increment,
255 	 * while storing only the low 8 bits to hdr->seq_id
256 	 */
257 	hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
258 	hdr.fh_seq_id--;
259 
260 	/* Allocate and fill in the send frame request context */
261 	ctx = (void *)(heap_virt_base + heap_offset);
262 	heap_offset += sizeof(*ctx);
263 	if (heap_offset > heap_size) {
264 		efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
265 			    heap_offset, heap_size);
266 		return -EIO;
267 	}
268 
269 	memset(ctx, 0, sizeof(*ctx));
270 
271 	/* Save sequence */
272 	ctx->seq = seq;
273 
274 	/* Allocate a response payload DMA buffer from the heap */
275 	ctx->payload.phys = heap_phys_base + heap_offset;
276 	ctx->payload.virt = heap_virt_base + heap_offset;
277 	ctx->payload.size = payload_len;
278 	ctx->payload.len = payload_len;
279 	heap_offset += payload_len;
280 	if (heap_offset > heap_size) {
281 		efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
282 			    heap_offset, heap_size);
283 		return -EIO;
284 	}
285 
286 	/* Copy the payload in */
287 	memcpy(ctx->payload.virt, payload, payload_len);
288 
289 	/* Send */
290 	rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
291 				FC_EOF_T, &ctx->payload, ctx,
292 				efct_sframe_common_send_cb, ctx);
293 	if (rc)
294 		efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
295 
296 	return rc;
297 }
298 
299 static int
efct_sframe_send_fcp_rsp(struct efct_node * node,struct efc_hw_sequence * seq,void * rsp,u32 rsp_len)300 efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
301 			 void *rsp, u32 rsp_len)
302 {
303 	return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
304 				      FC_FC_EX_CTX |
305 				      FC_FC_LAST_SEQ |
306 				      FC_FC_END_SEQ |
307 				      FC_FC_SEQ_INIT,
308 				      FC_TYPE_FCP,
309 				      rsp, rsp_len);
310 }
311 
312 static int
efct_sframe_send_task_set_full_or_busy(struct efct_node * node,struct efc_hw_sequence * seq)313 efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
314 				       struct efc_hw_sequence *seq)
315 {
316 	struct fcp_resp_with_ext fcprsp;
317 	struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
318 	int rc = 0;
319 	unsigned long flags = 0;
320 	struct efct *efct = node->efct;
321 
322 	/* construct task set full or busy response */
323 	memset(&fcprsp, 0, sizeof(fcprsp));
324 	spin_lock_irqsave(&node->active_ios_lock, flags);
325 	fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
326 				SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
327 	spin_unlock_irqrestore(&node->active_ios_lock, flags);
328 	*((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
329 
330 	/* send it using send_frame */
331 	rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
332 	if (rc)
333 		efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
334 
335 	return rc;
336 }
337 
338 int
efct_dispatch_fcp_cmd(struct efct_node * node,struct efc_hw_sequence * seq)339 efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
340 {
341 	struct efct *efct = node->efct;
342 	struct fc_frame_header *fchdr = seq->header->dma.virt;
343 	struct fcp_cmnd	*cmnd = NULL;
344 	struct efct_io *io = NULL;
345 	u32 lun;
346 
347 	if (!seq->payload) {
348 		efc_log_err(efct, "Sequence payload is NULL.\n");
349 		return -EIO;
350 	}
351 
352 	cmnd = seq->payload->dma.virt;
353 
354 	/* perform FCP_CMND validation check(s) */
355 	if (efct_validate_fcp_cmd(efct, seq))
356 		return -EIO;
357 
358 	lun = scsilun_to_int(&cmnd->fc_lun);
359 	if (lun == U32_MAX)
360 		return -EIO;
361 
362 	io = efct_scsi_io_alloc(node);
363 	if (!io) {
364 		int rc;
365 
366 		/* Use SEND_FRAME to send task set full or busy */
367 		rc = efct_sframe_send_task_set_full_or_busy(node, seq);
368 		if (rc)
369 			efc_log_err(efct, "Failed to send busy task: %d\n", rc);
370 
371 		return rc;
372 	}
373 
374 	io->hw_priv = seq->hw_priv;
375 
376 	io->app_id = 0;
377 
378 	/* RQ pair, if we got here, SIT=1 */
379 	efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
380 
381 	if (cmnd->fc_tm_flags) {
382 		efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
383 	} else {
384 		u32 flags = efct_get_flags_fcp_cmd(cmnd);
385 
386 		if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
387 			efc_log_err(efct, "Additional CDB not supported\n");
388 			return -EIO;
389 		}
390 		/*
391 		 * Can return failure for things like task set full and UAs,
392 		 * no need to treat as a dropped frame if rc != 0
393 		 */
394 		efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
395 				   sizeof(cmnd->fc_cdb), flags);
396 	}
397 
398 	return 0;
399 }
400 
401 static int
efct_process_abts(struct efct_io * io,struct fc_frame_header * hdr)402 efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
403 {
404 	struct efct_node *node = io->node;
405 	struct efct *efct = io->efct;
406 	u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
407 	u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
408 	struct efct_io *abortio;
409 
410 	/* Find IO and attempt to take a reference on it */
411 	abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
412 
413 	if (abortio) {
414 		/* Got a reference on the IO. Hold it until backend
415 		 * is notified below
416 		 */
417 		efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
418 			     ox_id, rx_id);
419 
420 		/*
421 		 * Save the ox_id for the ABTS as the init_task_tag in our
422 		 * manufactured
423 		 * TMF IO object
424 		 */
425 		io->display_name = "abts";
426 		io->init_task_tag = ox_id;
427 		/* don't set tgt_task_tag, don't want to confuse with XRI */
428 
429 		/*
430 		 * Save the rx_id from the ABTS as it is
431 		 * needed for the BLS response,
432 		 * regardless of the IO context's rx_id
433 		 */
434 		io->abort_rx_id = rx_id;
435 
436 		/* Call target server command abort */
437 		io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
438 		efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
439 				   EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
440 
441 		/*
442 		 * Backend will have taken an additional
443 		 * reference on the IO if needed;
444 		 * done with current reference.
445 		 */
446 		kref_put(&abortio->ref, abortio->release);
447 	} else {
448 		/*
449 		 * Either IO was not found or it has been
450 		 * freed between finding it
451 		 * and attempting to get the reference,
452 		 */
453 		efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
454 			     ox_id);
455 
456 		/* Send a BA_RJT */
457 		efct_bls_send_rjt(io, hdr);
458 	}
459 	return 0;
460 }
461 
462 int
efct_node_recv_abts_frame(struct efct_node * node,struct efc_hw_sequence * seq)463 efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
464 {
465 	struct efct *efct = node->efct;
466 	struct fc_frame_header *hdr = seq->header->dma.virt;
467 	struct efct_io *io = NULL;
468 
469 	node->abort_cnt++;
470 	io = efct_scsi_io_alloc(node);
471 	if (io) {
472 		io->hw_priv = seq->hw_priv;
473 		/* If we got this far, SIT=1 */
474 		io->seq_init = 1;
475 
476 		/* fill out generic fields */
477 		io->efct = efct;
478 		io->node = node;
479 		io->cmd_tgt = true;
480 
481 		efct_process_abts(io, seq->header->dma.virt);
482 	} else {
483 		efc_log_err(efct,
484 			    "SCSI IO allocation failed for ABTS received ");
485 		efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
486 			    ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
487 			    be16_to_cpu(hdr->fh_ox_id),
488 			    be16_to_cpu(hdr->fh_rx_id));
489 	}
490 
491 	return 0;
492 }
493