1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/debugfs.h>
5 #include "eswitch.h"
6 
7 enum vnic_diag_counter {
8 	MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
9 	MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
10 	MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
11 	MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
12 	MLX5_VNIC_DIAG_CQ_OVERRUN,
13 	MLX5_VNIC_DIAG_INVALID_COMMAND,
14 	MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
15 	MLX5_VNIC_DIAG_RX_STEERING_DISCARD,
16 };
17 
mlx5_esw_query_vnic_diag(struct mlx5_vport * vport,enum vnic_diag_counter counter,u64 * val)18 static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
19 				    u64 *val)
20 {
21 	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
22 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
23 	struct mlx5_core_dev *dev = vport->dev;
24 	u16 vport_num = vport->vport;
25 	void *vnic_diag_out;
26 	int err;
27 
28 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
29 	MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
30 	if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
31 		MLX5_SET(query_vnic_env_in, in, other_vport, 1);
32 
33 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
34 	if (err)
35 		return err;
36 
37 	vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
38 	switch (counter) {
39 	case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
40 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
41 		break;
42 	case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
43 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
44 				send_queue_priority_update_flow);
45 		break;
46 	case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
47 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
48 		break;
49 	case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
50 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
51 		break;
52 	case MLX5_VNIC_DIAG_CQ_OVERRUN:
53 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
54 		break;
55 	case MLX5_VNIC_DIAG_INVALID_COMMAND:
56 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
57 		break;
58 	case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
59 		*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
60 		break;
61 	case MLX5_VNIC_DIAG_RX_STEERING_DISCARD:
62 		*val = MLX5_GET64(vnic_diagnostic_statistics, vnic_diag_out,
63 				  nic_receive_steering_discard);
64 		break;
65 	}
66 
67 	return 0;
68 }
69 
__show_vnic_diag(struct seq_file * file,struct mlx5_vport * vport,enum vnic_diag_counter type)70 static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
71 			    enum vnic_diag_counter type)
72 {
73 	u64 val = 0;
74 	int ret;
75 
76 	ret = mlx5_esw_query_vnic_diag(vport, type, &val);
77 	if (ret)
78 		return ret;
79 
80 	seq_printf(file, "%llu\n", val);
81 	return 0;
82 }
83 
total_q_under_processor_handle_show(struct seq_file * file,void * priv)84 static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
85 {
86 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
87 }
88 
send_queue_priority_update_flow_show(struct seq_file * file,void * priv)89 static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
90 {
91 	return __show_vnic_diag(file, file->private,
92 				MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
93 }
94 
comp_eq_overrun_show(struct seq_file * file,void * priv)95 static int comp_eq_overrun_show(struct seq_file *file, void *priv)
96 {
97 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
98 }
99 
async_eq_overrun_show(struct seq_file * file,void * priv)100 static int async_eq_overrun_show(struct seq_file *file, void *priv)
101 {
102 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
103 }
104 
cq_overrun_show(struct seq_file * file,void * priv)105 static int cq_overrun_show(struct seq_file *file, void *priv)
106 {
107 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
108 }
109 
invalid_command_show(struct seq_file * file,void * priv)110 static int invalid_command_show(struct seq_file *file, void *priv)
111 {
112 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
113 }
114 
quota_exceeded_command_show(struct seq_file * file,void * priv)115 static int quota_exceeded_command_show(struct seq_file *file, void *priv)
116 {
117 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
118 }
119 
rx_steering_discard_show(struct seq_file * file,void * priv)120 static int rx_steering_discard_show(struct seq_file *file, void *priv)
121 {
122 	return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_RX_STEERING_DISCARD);
123 }
124 
125 DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
126 DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
127 DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
128 DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
129 DEFINE_SHOW_ATTRIBUTE(cq_overrun);
130 DEFINE_SHOW_ATTRIBUTE(invalid_command);
131 DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
132 DEFINE_SHOW_ATTRIBUTE(rx_steering_discard);
133 
mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch * esw,u16 vport_num)134 void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
135 {
136 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
137 
138 	debugfs_remove_recursive(vport->dbgfs);
139 	vport->dbgfs = NULL;
140 }
141 
142 /* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
143 #define VNIC_DIAG_DIR_NAME_MAX_LEN 8
144 
mlx5_esw_vport_debugfs_create(struct mlx5_eswitch * esw,u16 vport_num,bool is_sf,u16 sf_num)145 void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
146 {
147 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
148 	struct dentry *vnic_diag;
149 	char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
150 	int err;
151 
152 	if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
153 		return;
154 
155 	if (vport_num == MLX5_VPORT_PF) {
156 		strcpy(dir_name, "pf");
157 	} else if (vport_num == MLX5_VPORT_ECPF) {
158 		strcpy(dir_name, "ecpf");
159 	} else {
160 		err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
161 			       is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
162 		if (WARN_ON(err < 0))
163 			return;
164 	}
165 
166 	vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
167 	vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
168 
169 	if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
170 		debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
171 				    &total_q_under_processor_handle_fops);
172 		debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
173 				    &send_queue_priority_update_flow_fops);
174 	}
175 
176 	if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
177 		debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
178 				    &comp_eq_overrun_fops);
179 		debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
180 				    &async_eq_overrun_fops);
181 	}
182 
183 	if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
184 		debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
185 
186 	if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
187 		debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
188 				    &invalid_command_fops);
189 
190 	if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
191 		debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
192 				    &quota_exceeded_command_fops);
193 
194 	if (MLX5_CAP_GEN(esw->dev, nic_receive_steering_discard))
195 		debugfs_create_file("rx_steering_discard", 0444, vnic_diag, vport,
196 				    &rx_steering_discard_fops);
197 
198 }
199