1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  QLogic FCoE Offload Driver
4  *  Copyright (c) 2016-2018 QLogic Corporation
5  */
6 #ifdef CONFIG_DEBUG_FS
7 
8 #include <linux/uaccess.h>
9 #include <linux/debugfs.h>
10 #include <linux/module.h>
11 
12 #include "qedf.h"
13 #include "qedf_dbg.h"
14 
15 static struct dentry *qedf_dbg_root;
16 
17 /*
18  * qedf_dbg_host_init - setup the debugfs file for the pf
19  */
20 void
qedf_dbg_host_init(struct qedf_dbg_ctx * qedf,const struct qedf_debugfs_ops * dops,const struct file_operations * fops)21 qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
22 		    const struct qedf_debugfs_ops *dops,
23 		    const struct file_operations *fops)
24 {
25 	char host_dirname[32];
26 
27 	QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
28 	/* create pf dir */
29 	sprintf(host_dirname, "host%u", qedf->host_no);
30 	qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
31 
32 	/* create debugfs files */
33 	while (dops) {
34 		if (!(dops->name))
35 			break;
36 
37 		debugfs_create_file(dops->name, 0600, qedf->bdf_dentry, qedf,
38 				    fops);
39 		dops++;
40 		fops++;
41 	}
42 }
43 
44 /*
45  * qedf_dbg_host_exit - clear out the pf's debugfs entries
46  */
47 void
qedf_dbg_host_exit(struct qedf_dbg_ctx * qedf_dbg)48 qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg)
49 {
50 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
51 		   "entry\n");
52 	/* remove debugfs  entries of this PF */
53 	debugfs_remove_recursive(qedf_dbg->bdf_dentry);
54 	qedf_dbg->bdf_dentry = NULL;
55 }
56 
57 /*
58  * qedf_dbg_init - start up debugfs for the driver
59  */
60 void
qedf_dbg_init(char * drv_name)61 qedf_dbg_init(char *drv_name)
62 {
63 	QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n");
64 
65 	/* create qed dir in root of debugfs. NULL means debugfs root */
66 	qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
67 }
68 
69 /*
70  * qedf_dbg_exit - clean out the driver's debugfs entries
71  */
72 void
qedf_dbg_exit(void)73 qedf_dbg_exit(void)
74 {
75 	QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root "
76 		   "entry\n");
77 
78 	/* remove qed dir in root of debugfs */
79 	debugfs_remove_recursive(qedf_dbg_root);
80 	qedf_dbg_root = NULL;
81 }
82 
83 const struct qedf_debugfs_ops qedf_debugfs_ops[] = {
84 	{ "fp_int", NULL },
85 	{ "io_trace", NULL },
86 	{ "debug", NULL },
87 	{ "stop_io_on_error", NULL},
88 	{ "driver_stats", NULL},
89 	{ "clear_stats", NULL},
90 	{ "offload_stats", NULL},
91 	/* This must be last */
92 	{ NULL, NULL }
93 };
94 
95 DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads);
96 
97 static ssize_t
qedf_dbg_fp_int_cmd_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)98 qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
99 			 loff_t *ppos)
100 {
101 	size_t cnt = 0;
102 	int id;
103 	struct qedf_fastpath *fp = NULL;
104 	struct qedf_dbg_ctx *qedf_dbg =
105 				(struct qedf_dbg_ctx *)filp->private_data;
106 	struct qedf_ctx *qedf = container_of(qedf_dbg,
107 	    struct qedf_ctx, dbg_ctx);
108 
109 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
110 
111 	cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
112 
113 	for (id = 0; id < qedf->num_queues; id++) {
114 		fp = &(qedf->fp_array[id]);
115 		if (fp->sb_id == QEDF_SB_ID_NULL)
116 			continue;
117 		cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
118 			       fp->completions);
119 	}
120 
121 	cnt = min_t(int, count, cnt - *ppos);
122 	*ppos += cnt;
123 	return cnt;
124 }
125 
126 static ssize_t
qedf_dbg_fp_int_cmd_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)127 qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer,
128 			  size_t count, loff_t *ppos)
129 {
130 	if (!count || *ppos)
131 		return 0;
132 
133 	return count;
134 }
135 
136 static ssize_t
qedf_dbg_debug_cmd_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)137 qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
138 			loff_t *ppos)
139 {
140 	int cnt;
141 	struct qedf_dbg_ctx *qedf_dbg =
142 				(struct qedf_dbg_ctx *)filp->private_data;
143 
144 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
145 	cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
146 
147 	cnt = min_t(int, count, cnt - *ppos);
148 	*ppos += cnt;
149 	return cnt;
150 }
151 
152 static ssize_t
qedf_dbg_debug_cmd_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)153 qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
154 			 size_t count, loff_t *ppos)
155 {
156 	uint32_t val;
157 	void *kern_buf;
158 	int rval;
159 	struct qedf_dbg_ctx *qedf_dbg =
160 	    (struct qedf_dbg_ctx *)filp->private_data;
161 
162 	if (!count || *ppos)
163 		return 0;
164 
165 	kern_buf = memdup_user(buffer, count);
166 	if (IS_ERR(kern_buf))
167 		return PTR_ERR(kern_buf);
168 
169 	rval = kstrtouint(kern_buf, 10, &val);
170 	kfree(kern_buf);
171 	if (rval)
172 		return rval;
173 
174 	if (val == 1)
175 		qedf_debug = QEDF_DEFAULT_LOG_MASK;
176 	else
177 		qedf_debug = val;
178 
179 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
180 	return count;
181 }
182 
183 static ssize_t
qedf_dbg_stop_io_on_error_cmd_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)184 qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
185 				   size_t count, loff_t *ppos)
186 {
187 	int cnt;
188 	struct qedf_dbg_ctx *qedf_dbg =
189 				(struct qedf_dbg_ctx *)filp->private_data;
190 	struct qedf_ctx *qedf = container_of(qedf_dbg,
191 	    struct qedf_ctx, dbg_ctx);
192 
193 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
194 	cnt = sprintf(buffer, "%s\n",
195 	    qedf->stop_io_on_error ? "true" : "false");
196 
197 	cnt = min_t(int, count, cnt - *ppos);
198 	*ppos += cnt;
199 	return cnt;
200 }
201 
202 static ssize_t
qedf_dbg_stop_io_on_error_cmd_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)203 qedf_dbg_stop_io_on_error_cmd_write(struct file *filp,
204 				    const char __user *buffer, size_t count,
205 				    loff_t *ppos)
206 {
207 	void *kern_buf;
208 	struct qedf_dbg_ctx *qedf_dbg =
209 				(struct qedf_dbg_ctx *)filp->private_data;
210 	struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
211 	    dbg_ctx);
212 
213 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
214 
215 	if (!count || *ppos)
216 		return 0;
217 
218 	kern_buf = memdup_user(buffer, 6);
219 	if (IS_ERR(kern_buf))
220 		return PTR_ERR(kern_buf);
221 
222 	if (strncmp(kern_buf, "false", 5) == 0)
223 		qedf->stop_io_on_error = false;
224 	else if (strncmp(kern_buf, "true", 4) == 0)
225 		qedf->stop_io_on_error = true;
226 	else if (strncmp(kern_buf, "now", 3) == 0)
227 		/* Trigger from user to stop all I/O on this host */
228 		set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
229 
230 	kfree(kern_buf);
231 	return count;
232 }
233 
234 static int
qedf_io_trace_show(struct seq_file * s,void * unused)235 qedf_io_trace_show(struct seq_file *s, void *unused)
236 {
237 	int i, idx = 0;
238 	struct qedf_ctx *qedf = s->private;
239 	struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx;
240 	struct qedf_io_log *io_log;
241 	unsigned long flags;
242 
243 	if (!qedf_io_tracing) {
244 		seq_puts(s, "I/O tracing not enabled.\n");
245 		goto out;
246 	}
247 
248 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
249 
250 	spin_lock_irqsave(&qedf->io_trace_lock, flags);
251 	idx = qedf->io_trace_idx;
252 	for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) {
253 		io_log = &qedf->io_trace_buf[idx];
254 		seq_printf(s, "%d:", io_log->direction);
255 		seq_printf(s, "0x%x:", io_log->task_id);
256 		seq_printf(s, "0x%06x:", io_log->port_id);
257 		seq_printf(s, "%d:", io_log->lun);
258 		seq_printf(s, "0x%02x:", io_log->op);
259 		seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
260 		    io_log->lba[1], io_log->lba[2], io_log->lba[3]);
261 		seq_printf(s, "%d:", io_log->bufflen);
262 		seq_printf(s, "%d:", io_log->sg_count);
263 		seq_printf(s, "0x%08x:", io_log->result);
264 		seq_printf(s, "%lu:", io_log->jiffies);
265 		seq_printf(s, "%d:", io_log->refcount);
266 		seq_printf(s, "%d:", io_log->req_cpu);
267 		seq_printf(s, "%d:", io_log->int_cpu);
268 		seq_printf(s, "%d:", io_log->rsp_cpu);
269 		seq_printf(s, "%d\n", io_log->sge_type);
270 
271 		idx++;
272 		if (idx == QEDF_IO_TRACE_SIZE)
273 			idx = 0;
274 	}
275 	spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
276 
277 out:
278 	return 0;
279 }
280 
281 static int
qedf_dbg_io_trace_open(struct inode * inode,struct file * file)282 qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
283 {
284 	struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
285 	struct qedf_ctx *qedf = container_of(qedf_dbg,
286 	    struct qedf_ctx, dbg_ctx);
287 
288 	return single_open(file, qedf_io_trace_show, qedf);
289 }
290 
291 /* Based on fip_state enum from libfcoe.h */
292 static char *fip_state_names[] = {
293 	"FIP_ST_DISABLED",
294 	"FIP_ST_LINK_WAIT",
295 	"FIP_ST_AUTO",
296 	"FIP_ST_NON_FIP",
297 	"FIP_ST_ENABLED",
298 	"FIP_ST_VNMP_START",
299 	"FIP_ST_VNMP_PROBE1",
300 	"FIP_ST_VNMP_PROBE2",
301 	"FIP_ST_VNMP_CLAIM",
302 	"FIP_ST_VNMP_UP",
303 };
304 
305 /* Based on fc_rport_state enum from libfc.h */
306 static char *fc_rport_state_names[] = {
307 	"RPORT_ST_INIT",
308 	"RPORT_ST_FLOGI",
309 	"RPORT_ST_PLOGI_WAIT",
310 	"RPORT_ST_PLOGI",
311 	"RPORT_ST_PRLI",
312 	"RPORT_ST_RTV",
313 	"RPORT_ST_READY",
314 	"RPORT_ST_ADISC",
315 	"RPORT_ST_DELETE",
316 };
317 
318 static int
qedf_driver_stats_show(struct seq_file * s,void * unused)319 qedf_driver_stats_show(struct seq_file *s, void *unused)
320 {
321 	struct qedf_ctx *qedf = s->private;
322 	struct qedf_rport *fcport;
323 	struct fc_rport_priv *rdata;
324 
325 	seq_printf(s, "Host WWNN/WWPN: %016llx/%016llx\n",
326 		   qedf->wwnn, qedf->wwpn);
327 	seq_printf(s, "Host NPortID: %06x\n", qedf->lport->port_id);
328 	seq_printf(s, "Link State: %s\n", atomic_read(&qedf->link_state) ?
329 	    "Up" : "Down");
330 	seq_printf(s, "Logical Link State: %s\n", qedf->lport->link_up ?
331 	    "Up" : "Down");
332 	seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]);
333 	seq_printf(s, "FIP VLAN ID: %d\n", qedf->vlan_id & 0xfff);
334 	seq_printf(s, "FIP 802.1Q Priority: %d\n", qedf->prio);
335 	if (qedf->ctlr.sel_fcf) {
336 		seq_printf(s, "FCF WWPN: %016llx\n",
337 			   qedf->ctlr.sel_fcf->switch_name);
338 		seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac);
339 	} else {
340 		seq_puts(s, "FCF not selected\n");
341 	}
342 
343 	seq_puts(s, "\nSGE stats:\n\n");
344 	seq_printf(s, "cmg_mgr free io_reqs: %d\n",
345 	    atomic_read(&qedf->cmd_mgr->free_list_cnt));
346 	seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
347 	seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
348 
349 	seq_puts(s, "Offloaded ports:\n\n");
350 
351 	rcu_read_lock();
352 	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
353 		rdata = fcport->rdata;
354 		if (rdata == NULL)
355 			continue;
356 		seq_printf(s, "%016llx/%016llx/%06x: state=%s, free_sqes=%d, num_active_ios=%d\n",
357 			   rdata->rport->node_name, rdata->rport->port_name,
358 			   rdata->ids.port_id,
359 			   fc_rport_state_names[rdata->rp_state],
360 			   atomic_read(&fcport->free_sqes),
361 			   atomic_read(&fcport->num_active_ios));
362 	}
363 	rcu_read_unlock();
364 
365 	return 0;
366 }
367 
368 static int
qedf_dbg_driver_stats_open(struct inode * inode,struct file * file)369 qedf_dbg_driver_stats_open(struct inode *inode, struct file *file)
370 {
371 	struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
372 	struct qedf_ctx *qedf = container_of(qedf_dbg,
373 	    struct qedf_ctx, dbg_ctx);
374 
375 	return single_open(file, qedf_driver_stats_show, qedf);
376 }
377 
378 static ssize_t
qedf_dbg_clear_stats_cmd_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)379 qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer,
380 				   size_t count, loff_t *ppos)
381 {
382 	int cnt = 0;
383 
384 	/* Essentially a read stub */
385 	cnt = min_t(int, count, cnt - *ppos);
386 	*ppos += cnt;
387 	return cnt;
388 }
389 
390 static ssize_t
qedf_dbg_clear_stats_cmd_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)391 qedf_dbg_clear_stats_cmd_write(struct file *filp,
392 				    const char __user *buffer, size_t count,
393 				    loff_t *ppos)
394 {
395 	struct qedf_dbg_ctx *qedf_dbg =
396 				(struct qedf_dbg_ctx *)filp->private_data;
397 	struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
398 	    dbg_ctx);
399 
400 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n");
401 
402 	if (!count || *ppos)
403 		return 0;
404 
405 	/* Clear stat counters exposed by 'stats' node */
406 	qedf->slow_sge_ios = 0;
407 	qedf->fast_sge_ios = 0;
408 
409 	return count;
410 }
411 
412 static int
qedf_offload_stats_show(struct seq_file * s,void * unused)413 qedf_offload_stats_show(struct seq_file *s, void *unused)
414 {
415 	struct qedf_ctx *qedf = s->private;
416 	struct qed_fcoe_stats *fw_fcoe_stats;
417 
418 	fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
419 	if (!fw_fcoe_stats) {
420 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
421 		    "fw_fcoe_stats.\n");
422 		goto out;
423 	}
424 
425 	/* Query firmware for offload stats */
426 	qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
427 
428 	seq_printf(s, "fcoe_rx_byte_cnt=%llu\n"
429 	    "fcoe_rx_data_pkt_cnt=%llu\n"
430 	    "fcoe_rx_xfer_pkt_cnt=%llu\n"
431 	    "fcoe_rx_other_pkt_cnt=%llu\n"
432 	    "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n"
433 	    "fcoe_silent_drop_pkt_crc_error_cnt=%u\n"
434 	    "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n"
435 	    "fcoe_silent_drop_total_pkt_cnt=%u\n"
436 	    "fcoe_silent_drop_pkt_rq_full_cnt=%u\n"
437 	    "fcoe_tx_byte_cnt=%llu\n"
438 	    "fcoe_tx_data_pkt_cnt=%llu\n"
439 	    "fcoe_tx_xfer_pkt_cnt=%llu\n"
440 	    "fcoe_tx_other_pkt_cnt=%llu\n",
441 	    fw_fcoe_stats->fcoe_rx_byte_cnt,
442 	    fw_fcoe_stats->fcoe_rx_data_pkt_cnt,
443 	    fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt,
444 	    fw_fcoe_stats->fcoe_rx_other_pkt_cnt,
445 	    fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt,
446 	    fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt,
447 	    fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt,
448 	    fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt,
449 	    fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt,
450 	    fw_fcoe_stats->fcoe_tx_byte_cnt,
451 	    fw_fcoe_stats->fcoe_tx_data_pkt_cnt,
452 	    fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt,
453 	    fw_fcoe_stats->fcoe_tx_other_pkt_cnt);
454 
455 	kfree(fw_fcoe_stats);
456 out:
457 	return 0;
458 }
459 
460 static int
qedf_dbg_offload_stats_open(struct inode * inode,struct file * file)461 qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
462 {
463 	struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
464 	struct qedf_ctx *qedf = container_of(qedf_dbg,
465 	    struct qedf_ctx, dbg_ctx);
466 
467 	return single_open(file, qedf_offload_stats_show, qedf);
468 }
469 
470 const struct file_operations qedf_dbg_fops[] = {
471 	qedf_dbg_fileops(qedf, fp_int),
472 	qedf_dbg_fileops_seq(qedf, io_trace),
473 	qedf_dbg_fileops(qedf, debug),
474 	qedf_dbg_fileops(qedf, stop_io_on_error),
475 	qedf_dbg_fileops_seq(qedf, driver_stats),
476 	qedf_dbg_fileops(qedf, clear_stats),
477 	qedf_dbg_fileops_seq(qedf, offload_stats),
478 	/* This must be last */
479 	{ },
480 };
481 
482 #else /* CONFIG_DEBUG_FS */
483 void qedf_dbg_host_init(struct qedf_dbg_ctx *);
484 void qedf_dbg_host_exit(struct qedf_dbg_ctx *);
485 void qedf_dbg_init(char *);
486 void qedf_dbg_exit(void);
487 #endif /* CONFIG_DEBUG_FS */
488