1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
39 
40 #include "eswitch.h"
41 #include "en.h"
42 #include "en_rep.h"
43 #include "en/params.h"
44 #include "en/txrx.h"
45 #include "en_tc.h"
46 #include "en/rep/tc.h"
47 #include "en/rep/neigh.h"
48 #include "en/rep/bridge.h"
49 #include "en/devlink.h"
50 #include "fs_core.h"
51 #include "lib/mlx5.h"
52 #include "lib/devcom.h"
53 #include "lib/vxlan.h"
54 #define CREATE_TRACE_POINTS
55 #include "diag/en_rep_tracepoint.h"
56 #include "en_accel/ipsec.h"
57 #include "en/tc/int_port.h"
58 #include "en/ptp.h"
59 #include "en/fs_ethtool.h"
60 
61 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
62 	max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
63 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
64 
65 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
66 
mlx5e_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)67 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
68 				  struct ethtool_drvinfo *drvinfo)
69 {
70 	struct mlx5e_priv *priv = netdev_priv(dev);
71 	struct mlx5_core_dev *mdev = priv->mdev;
72 
73 	strscpy(drvinfo->driver, mlx5e_rep_driver_name,
74 		sizeof(drvinfo->driver));
75 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
76 		 "%d.%d.%04d (%.16s)",
77 		 fw_rev_maj(mdev), fw_rev_min(mdev),
78 		 fw_rev_sub(mdev), mdev->board_id);
79 }
80 
81 static const struct counter_desc sw_rep_stats_desc[] = {
82 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
83 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
84 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
85 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
86 };
87 
88 static const struct counter_desc vport_rep_stats_desc[] = {
89 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) },
90 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) },
91 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) },
92 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) },
93 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
94 			     rx_vport_rdma_unicast_packets) },
95 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) },
96 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
97 			     tx_vport_rdma_unicast_packets) },
98 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) },
99 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
100 			     rx_vport_rdma_multicast_packets) },
101 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
102 			     rx_vport_rdma_multicast_bytes) },
103 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
104 			     tx_vport_rdma_multicast_packets) },
105 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
106 			     tx_vport_rdma_multicast_bytes) },
107 };
108 
109 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
110 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
111 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)112 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
113 {
114 	return NUM_VPORT_REP_SW_COUNTERS;
115 }
116 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)117 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
118 {
119 	int i;
120 
121 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
122 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
123 		       sw_rep_stats_desc[i].format);
124 	return idx;
125 }
126 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)127 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
128 {
129 	int i;
130 
131 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
132 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
133 						   sw_rep_stats_desc, i);
134 	return idx;
135 }
136 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)137 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
138 {
139 	struct mlx5e_sw_stats *s = &priv->stats.sw;
140 	struct rtnl_link_stats64 stats64 = {};
141 
142 	memset(s, 0, sizeof(*s));
143 	mlx5e_fold_sw_stats64(priv, &stats64);
144 
145 	s->rx_packets = stats64.rx_packets;
146 	s->rx_bytes   = stats64.rx_bytes;
147 	s->tx_packets = stats64.tx_packets;
148 	s->tx_bytes   = stats64.tx_bytes;
149 	s->tx_queue_dropped = stats64.tx_dropped;
150 }
151 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)152 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
153 {
154 	return NUM_VPORT_REP_HW_COUNTERS;
155 }
156 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)157 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
158 {
159 	int i;
160 
161 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
162 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
163 	return idx;
164 }
165 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)166 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
167 {
168 	int i;
169 
170 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
171 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
172 						   vport_rep_stats_desc, i);
173 	return idx;
174 }
175 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)176 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
177 {
178 	struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
179 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
180 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
181 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
182 	struct mlx5_eswitch_rep *rep = rpriv->rep;
183 	u32 *out;
184 	int err;
185 
186 	out = kvzalloc(outlen, GFP_KERNEL);
187 	if (!out)
188 		return;
189 
190 	err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out);
191 	if (err) {
192 		netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
193 			    rep->vport, err);
194 		goto out;
195 	}
196 
197 	#define MLX5_GET_CTR(p, x) \
198 		MLX5_GET64(query_vport_counter_out, p, x)
199 	/* flip tx/rx as we are reporting the counters for the switch vport */
200 	rep_stats->vport_rx_packets =
201 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
202 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
203 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
204 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
205 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
206 
207 	rep_stats->vport_tx_packets =
208 		MLX5_GET_CTR(out, received_ib_unicast.packets) +
209 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
210 		MLX5_GET_CTR(out, received_ib_multicast.packets) +
211 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
212 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
213 
214 	rep_stats->vport_rx_bytes =
215 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
216 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
217 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
218 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
219 
220 	rep_stats->vport_tx_bytes =
221 		MLX5_GET_CTR(out, received_ib_unicast.octets) +
222 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
223 		MLX5_GET_CTR(out, received_ib_multicast.octets) +
224 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
225 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
226 
227 	rep_stats->rx_vport_rdma_unicast_packets =
228 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets);
229 	rep_stats->tx_vport_rdma_unicast_packets =
230 		MLX5_GET_CTR(out, received_ib_unicast.packets);
231 	rep_stats->rx_vport_rdma_unicast_bytes =
232 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets);
233 	rep_stats->tx_vport_rdma_unicast_bytes =
234 		MLX5_GET_CTR(out, received_ib_unicast.octets);
235 	rep_stats->rx_vport_rdma_multicast_packets =
236 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets);
237 	rep_stats->tx_vport_rdma_multicast_packets =
238 		MLX5_GET_CTR(out, received_ib_multicast.packets);
239 	rep_stats->rx_vport_rdma_multicast_bytes =
240 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets);
241 	rep_stats->tx_vport_rdma_multicast_bytes =
242 		MLX5_GET_CTR(out, received_ib_multicast.octets);
243 
244 out:
245 	kvfree(out);
246 }
247 
mlx5e_rep_get_strings(struct net_device * dev,u32 stringset,uint8_t * data)248 static void mlx5e_rep_get_strings(struct net_device *dev,
249 				  u32 stringset, uint8_t *data)
250 {
251 	struct mlx5e_priv *priv = netdev_priv(dev);
252 
253 	switch (stringset) {
254 	case ETH_SS_STATS:
255 		mlx5e_stats_fill_strings(priv, data);
256 		break;
257 	}
258 }
259 
mlx5e_rep_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)260 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
261 					struct ethtool_stats *stats, u64 *data)
262 {
263 	struct mlx5e_priv *priv = netdev_priv(dev);
264 
265 	mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
266 }
267 
mlx5e_rep_get_sset_count(struct net_device * dev,int sset)268 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
269 {
270 	struct mlx5e_priv *priv = netdev_priv(dev);
271 
272 	switch (sset) {
273 	case ETH_SS_STATS:
274 		return mlx5e_stats_total_num(priv);
275 	default:
276 		return -EOPNOTSUPP;
277 	}
278 }
279 
280 static void
mlx5e_rep_get_ringparam(struct net_device * dev,struct ethtool_ringparam * param,struct kernel_ethtool_ringparam * kernel_param,struct netlink_ext_ack * extack)281 mlx5e_rep_get_ringparam(struct net_device *dev,
282 			struct ethtool_ringparam *param,
283 			struct kernel_ethtool_ringparam *kernel_param,
284 			struct netlink_ext_ack *extack)
285 {
286 	struct mlx5e_priv *priv = netdev_priv(dev);
287 
288 	mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
289 }
290 
291 static int
mlx5e_rep_set_ringparam(struct net_device * dev,struct ethtool_ringparam * param,struct kernel_ethtool_ringparam * kernel_param,struct netlink_ext_ack * extack)292 mlx5e_rep_set_ringparam(struct net_device *dev,
293 			struct ethtool_ringparam *param,
294 			struct kernel_ethtool_ringparam *kernel_param,
295 			struct netlink_ext_ack *extack)
296 {
297 	struct mlx5e_priv *priv = netdev_priv(dev);
298 
299 	return mlx5e_ethtool_set_ringparam(priv, param);
300 }
301 
mlx5e_rep_get_channels(struct net_device * dev,struct ethtool_channels * ch)302 static void mlx5e_rep_get_channels(struct net_device *dev,
303 				   struct ethtool_channels *ch)
304 {
305 	struct mlx5e_priv *priv = netdev_priv(dev);
306 
307 	mlx5e_ethtool_get_channels(priv, ch);
308 }
309 
mlx5e_rep_set_channels(struct net_device * dev,struct ethtool_channels * ch)310 static int mlx5e_rep_set_channels(struct net_device *dev,
311 				  struct ethtool_channels *ch)
312 {
313 	struct mlx5e_priv *priv = netdev_priv(dev);
314 
315 	return mlx5e_ethtool_set_channels(priv, ch);
316 }
317 
mlx5e_rep_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)318 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
319 				  struct ethtool_coalesce *coal,
320 				  struct kernel_ethtool_coalesce *kernel_coal,
321 				  struct netlink_ext_ack *extack)
322 {
323 	struct mlx5e_priv *priv = netdev_priv(netdev);
324 
325 	return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
326 }
327 
mlx5e_rep_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)328 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
329 				  struct ethtool_coalesce *coal,
330 				  struct kernel_ethtool_coalesce *kernel_coal,
331 				  struct netlink_ext_ack *extack)
332 {
333 	struct mlx5e_priv *priv = netdev_priv(netdev);
334 
335 	return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
336 }
337 
mlx5e_rep_get_rxfh_key_size(struct net_device * netdev)338 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
339 {
340 	struct mlx5e_priv *priv = netdev_priv(netdev);
341 
342 	return mlx5e_ethtool_get_rxfh_key_size(priv);
343 }
344 
mlx5e_rep_get_rxfh_indir_size(struct net_device * netdev)345 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
346 {
347 	struct mlx5e_priv *priv = netdev_priv(netdev);
348 
349 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
350 }
351 
352 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
353 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
354 				     ETHTOOL_COALESCE_MAX_FRAMES |
355 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
356 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
357 	.get_link	   = ethtool_op_get_link,
358 	.get_strings       = mlx5e_rep_get_strings,
359 	.get_sset_count    = mlx5e_rep_get_sset_count,
360 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
361 	.get_ringparam     = mlx5e_rep_get_ringparam,
362 	.set_ringparam     = mlx5e_rep_set_ringparam,
363 	.get_channels      = mlx5e_rep_get_channels,
364 	.set_channels      = mlx5e_rep_set_channels,
365 	.get_coalesce      = mlx5e_rep_get_coalesce,
366 	.set_coalesce      = mlx5e_rep_set_coalesce,
367 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
368 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
369 };
370 
mlx5e_sqs2vport_stop(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)371 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
372 				 struct mlx5_eswitch_rep *rep)
373 {
374 	struct mlx5e_rep_sq *rep_sq, *tmp;
375 	struct mlx5e_rep_priv *rpriv;
376 
377 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
378 		return;
379 
380 	rpriv = mlx5e_rep_to_rep_priv(rep);
381 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
382 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
383 		if (rep_sq->send_to_vport_rule_peer)
384 			mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
385 		list_del(&rep_sq->list);
386 		kfree(rep_sq);
387 	}
388 }
389 
mlx5e_sqs2vport_start(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u32 * sqns_array,int sqns_num)390 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
391 				 struct mlx5_eswitch_rep *rep,
392 				 u32 *sqns_array, int sqns_num)
393 {
394 	struct mlx5_eswitch *peer_esw = NULL;
395 	struct mlx5_flow_handle *flow_rule;
396 	struct mlx5e_rep_priv *rpriv;
397 	struct mlx5e_rep_sq *rep_sq;
398 	int err;
399 	int i;
400 
401 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
402 		return 0;
403 
404 	rpriv = mlx5e_rep_to_rep_priv(rep);
405 	if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
406 		peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
407 						     MLX5_DEVCOM_ESW_OFFLOADS);
408 
409 	for (i = 0; i < sqns_num; i++) {
410 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
411 		if (!rep_sq) {
412 			err = -ENOMEM;
413 			goto out_err;
414 		}
415 
416 		/* Add re-inject rule to the PF/representor sqs */
417 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
418 								sqns_array[i]);
419 		if (IS_ERR(flow_rule)) {
420 			err = PTR_ERR(flow_rule);
421 			kfree(rep_sq);
422 			goto out_err;
423 		}
424 		rep_sq->send_to_vport_rule = flow_rule;
425 		rep_sq->sqn = sqns_array[i];
426 
427 		if (peer_esw) {
428 			flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
429 									rep, sqns_array[i]);
430 			if (IS_ERR(flow_rule)) {
431 				err = PTR_ERR(flow_rule);
432 				mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
433 				kfree(rep_sq);
434 				goto out_err;
435 			}
436 			rep_sq->send_to_vport_rule_peer = flow_rule;
437 		}
438 
439 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
440 	}
441 
442 	if (peer_esw)
443 		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
444 
445 	return 0;
446 
447 out_err:
448 	mlx5e_sqs2vport_stop(esw, rep);
449 
450 	if (peer_esw)
451 		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
452 
453 	return err;
454 }
455 
456 static int
mlx5e_add_sqs_fwd_rules(struct mlx5e_priv * priv)457 mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
458 {
459 	int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
460 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
461 	bool is_uplink_rep = mlx5e_is_uplink_rep(priv);
462 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
463 	struct mlx5_eswitch_rep *rep = rpriv->rep;
464 	int n, tc, nch, num_sqs = 0;
465 	struct mlx5e_channel *c;
466 	int err = -ENOMEM;
467 	bool ptp_sq;
468 	u32 *sqs;
469 
470 	ptp_sq = !!(priv->channels.ptp &&
471 		    MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
472 	nch = priv->channels.num + ptp_sq;
473 	/* +2 for xdpsqs, they don't exist on the ptp channel but will not be
474 	 * counted for by num_sqs.
475 	 */
476 	if (is_uplink_rep)
477 		sqs_per_channel += 2;
478 
479 	sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL);
480 	if (!sqs)
481 		goto out;
482 
483 	for (n = 0; n < priv->channels.num; n++) {
484 		c = priv->channels.c[n];
485 		for (tc = 0; tc < c->num_tc; tc++)
486 			sqs[num_sqs++] = c->sq[tc].sqn;
487 
488 		if (is_uplink_rep) {
489 			if (c->xdp)
490 				sqs[num_sqs++] = c->rq_xdpsq.sqn;
491 
492 			sqs[num_sqs++] = c->xdpsq.sqn;
493 		}
494 	}
495 	if (ptp_sq) {
496 		struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
497 
498 		for (tc = 0; tc < ptp_ch->num_tc; tc++)
499 			sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
500 	}
501 
502 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
503 	kvfree(sqs);
504 
505 out:
506 	if (err)
507 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
508 	return err;
509 }
510 
511 static void
mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv * priv)512 mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
513 {
514 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
515 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
516 	struct mlx5_eswitch_rep *rep = rpriv->rep;
517 
518 	mlx5e_sqs2vport_stop(esw, rep);
519 }
520 
521 static int
mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv * priv)522 mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv)
523 {
524 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
525 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
526 	struct mlx5_eswitch_rep *rep = rpriv->rep;
527 	struct mlx5_flow_handle *flow_rule;
528 	struct mlx5_flow_group *g;
529 
530 	g = esw->fdb_table.offloads.send_to_vport_meta_grp;
531 	if (!g)
532 		return 0;
533 
534 	flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport);
535 	if (IS_ERR(flow_rule))
536 		return PTR_ERR(flow_rule);
537 
538 	rpriv->send_to_vport_meta_rule = flow_rule;
539 
540 	return 0;
541 }
542 
543 static void
mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv * priv)544 mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv)
545 {
546 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
547 
548 	if (rpriv->send_to_vport_meta_rule)
549 		mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule);
550 }
551 
mlx5e_rep_activate_channels(struct mlx5e_priv * priv)552 void mlx5e_rep_activate_channels(struct mlx5e_priv *priv)
553 {
554 	mlx5e_add_sqs_fwd_rules(priv);
555 	mlx5e_rep_add_meta_tunnel_rule(priv);
556 }
557 
mlx5e_rep_deactivate_channels(struct mlx5e_priv * priv)558 void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv)
559 {
560 	mlx5e_rep_del_meta_tunnel_rule(priv);
561 	mlx5e_remove_sqs_fwd_rules(priv);
562 }
563 
mlx5e_rep_open(struct net_device * dev)564 static int mlx5e_rep_open(struct net_device *dev)
565 {
566 	struct mlx5e_priv *priv = netdev_priv(dev);
567 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
568 	struct mlx5_eswitch_rep *rep = rpriv->rep;
569 	int err;
570 
571 	mutex_lock(&priv->state_lock);
572 	err = mlx5e_open_locked(dev);
573 	if (err)
574 		goto unlock;
575 
576 	if (!mlx5_modify_vport_admin_state(priv->mdev,
577 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
578 					   rep->vport, 1,
579 					   MLX5_VPORT_ADMIN_STATE_UP))
580 		netif_carrier_on(dev);
581 
582 unlock:
583 	mutex_unlock(&priv->state_lock);
584 	return err;
585 }
586 
mlx5e_rep_close(struct net_device * dev)587 static int mlx5e_rep_close(struct net_device *dev)
588 {
589 	struct mlx5e_priv *priv = netdev_priv(dev);
590 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
591 	struct mlx5_eswitch_rep *rep = rpriv->rep;
592 	int ret;
593 
594 	mutex_lock(&priv->state_lock);
595 	mlx5_modify_vport_admin_state(priv->mdev,
596 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
597 				      rep->vport, 1,
598 				      MLX5_VPORT_ADMIN_STATE_DOWN);
599 	ret = mlx5e_close_locked(dev);
600 	mutex_unlock(&priv->state_lock);
601 	return ret;
602 }
603 
mlx5e_is_uplink_rep(struct mlx5e_priv * priv)604 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
605 {
606 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
607 	struct mlx5_eswitch_rep *rep;
608 
609 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
610 		return false;
611 
612 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
613 		return false;
614 
615 	rep = rpriv->rep;
616 	return (rep->vport == MLX5_VPORT_UPLINK);
617 }
618 
mlx5e_rep_has_offload_stats(const struct net_device * dev,int attr_id)619 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
620 {
621 	switch (attr_id) {
622 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
623 			return true;
624 	}
625 
626 	return false;
627 }
628 
629 static int
mlx5e_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)630 mlx5e_get_sw_stats64(const struct net_device *dev,
631 		     struct rtnl_link_stats64 *stats)
632 {
633 	struct mlx5e_priv *priv = netdev_priv(dev);
634 
635 	mlx5e_fold_sw_stats64(priv, stats);
636 	return 0;
637 }
638 
mlx5e_rep_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)639 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
640 				void *sp)
641 {
642 	switch (attr_id) {
643 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
644 		return mlx5e_get_sw_stats64(dev, sp);
645 	}
646 
647 	return -EINVAL;
648 }
649 
650 static void
mlx5e_rep_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)651 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
652 {
653 	struct mlx5e_priv *priv = netdev_priv(dev);
654 
655 	/* update HW stats in background for next time */
656 	mlx5e_queue_update_stats(priv);
657 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
658 }
659 
mlx5e_rep_change_mtu(struct net_device * netdev,int new_mtu)660 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
661 {
662 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
663 }
664 
mlx5e_rep_change_carrier(struct net_device * dev,bool new_carrier)665 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
666 {
667 	struct mlx5e_priv *priv = netdev_priv(dev);
668 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
669 	struct mlx5_eswitch_rep *rep = rpriv->rep;
670 	int err;
671 
672 	if (new_carrier) {
673 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
674 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
675 		if (err)
676 			return err;
677 		netif_carrier_on(dev);
678 	} else {
679 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
680 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
681 		if (err)
682 			return err;
683 		netif_carrier_off(dev);
684 	}
685 	return 0;
686 }
687 
688 static const struct net_device_ops mlx5e_netdev_ops_rep = {
689 	.ndo_open                = mlx5e_rep_open,
690 	.ndo_stop                = mlx5e_rep_close,
691 	.ndo_start_xmit          = mlx5e_xmit,
692 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
693 	.ndo_get_stats64         = mlx5e_rep_get_stats,
694 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
695 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
696 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
697 	.ndo_change_carrier      = mlx5e_rep_change_carrier,
698 };
699 
mlx5e_eswitch_uplink_rep(const struct net_device * netdev)700 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
701 {
702 	return netdev->netdev_ops == &mlx5e_netdev_ops &&
703 	       mlx5e_is_uplink_rep(netdev_priv(netdev));
704 }
705 
mlx5e_eswitch_vf_rep(const struct net_device * netdev)706 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
707 {
708 	return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
709 }
710 
711 /* One indirect TIR set for outer. Inner not supported in reps. */
712 #define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS
713 
mlx5e_rep_max_nch_limit(struct mlx5_core_dev * mdev)714 static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
715 {
716 	int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir);
717 	int num_vports = mlx5_eswitch_get_total_vports(mdev);
718 
719 	return (max_tir_num - mlx5e_get_pf_num_tirs(mdev)
720 		- (num_vports * REP_NUM_INDIR_TIRS)) / num_vports;
721 }
722 
mlx5e_build_rep_params(struct net_device * netdev)723 static void mlx5e_build_rep_params(struct net_device *netdev)
724 {
725 	struct mlx5e_priv *priv = netdev_priv(netdev);
726 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
727 	struct mlx5_eswitch_rep *rep = rpriv->rep;
728 	struct mlx5_core_dev *mdev = priv->mdev;
729 	struct mlx5e_params *params;
730 
731 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
732 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
733 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
734 
735 	params = &priv->channels.params;
736 
737 	params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
738 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
739 	params->sw_mtu      = netdev->mtu;
740 
741 	/* SQ */
742 	if (rep->vport == MLX5_VPORT_UPLINK)
743 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
744 	else
745 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
746 
747 	/* RQ */
748 	mlx5e_build_rq_params(mdev, params);
749 
750 	/* CQ moderation params */
751 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
752 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
753 
754 	params->mqprio.num_tc       = 1;
755 	params->tunneled_offload_en = false;
756 	if (rep->vport != MLX5_VPORT_UPLINK)
757 		params->vlan_strip_disable = true;
758 
759 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
760 }
761 
mlx5e_build_rep_netdev(struct net_device * netdev,struct mlx5_core_dev * mdev)762 static void mlx5e_build_rep_netdev(struct net_device *netdev,
763 				   struct mlx5_core_dev *mdev)
764 {
765 	SET_NETDEV_DEV(netdev, mdev->device);
766 	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
767 	eth_hw_addr_random(netdev);
768 	netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
769 
770 	netdev->watchdog_timeo    = 15 * HZ;
771 
772 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
773 	netdev->hw_features    |= NETIF_F_HW_TC;
774 #endif
775 	netdev->hw_features    |= NETIF_F_SG;
776 	netdev->hw_features    |= NETIF_F_IP_CSUM;
777 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
778 	netdev->hw_features    |= NETIF_F_GRO;
779 	netdev->hw_features    |= NETIF_F_TSO;
780 	netdev->hw_features    |= NETIF_F_TSO6;
781 	netdev->hw_features    |= NETIF_F_RXCSUM;
782 
783 	netdev->features |= netdev->hw_features;
784 	netdev->features |= NETIF_F_NETNS_LOCAL;
785 }
786 
mlx5e_init_rep(struct mlx5_core_dev * mdev,struct net_device * netdev)787 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
788 			  struct net_device *netdev)
789 {
790 	struct mlx5e_priv *priv = netdev_priv(netdev);
791 
792 	priv->fs =
793 		mlx5e_fs_init(priv->profile, mdev,
794 			      !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
795 			      priv->dfs_root);
796 	if (!priv->fs) {
797 		netdev_err(priv->netdev, "FS allocation failed\n");
798 		return -ENOMEM;
799 	}
800 
801 	mlx5e_build_rep_params(netdev);
802 	mlx5e_timestamp_init(priv);
803 
804 	return 0;
805 }
806 
mlx5e_init_ul_rep(struct mlx5_core_dev * mdev,struct net_device * netdev)807 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
808 			     struct net_device *netdev)
809 {
810 	struct mlx5e_priv *priv = netdev_priv(netdev);
811 
812 	priv->fs = mlx5e_fs_init(priv->profile, mdev,
813 				 !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
814 				 priv->dfs_root);
815 	if (!priv->fs) {
816 		netdev_err(priv->netdev, "FS allocation failed\n");
817 		return -ENOMEM;
818 	}
819 
820 	mlx5e_vxlan_set_netdev_info(priv);
821 	mlx5e_build_rep_params(netdev);
822 	mlx5e_timestamp_init(priv);
823 	return 0;
824 }
825 
mlx5e_cleanup_rep(struct mlx5e_priv * priv)826 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
827 {
828 	mlx5e_fs_cleanup(priv->fs);
829 }
830 
mlx5e_create_rep_ttc_table(struct mlx5e_priv * priv)831 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
832 {
833 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
834 	struct mlx5_eswitch_rep *rep = rpriv->rep;
835 	struct ttc_params ttc_params = {};
836 	int err;
837 
838 	mlx5e_fs_set_ns(priv->fs,
839 			mlx5_get_flow_namespace(priv->mdev,
840 						MLX5_FLOW_NAMESPACE_KERNEL), false);
841 
842 	/* The inner_ttc in the ttc params is intentionally not set */
843 	mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
844 
845 	if (rep->vport != MLX5_VPORT_UPLINK)
846 		/* To give uplik rep TTC a lower level for chaining from root ft */
847 		ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
848 
849 	mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false);
850 	if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) {
851 		err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false));
852 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
853 			   err);
854 		return err;
855 	}
856 	return 0;
857 }
858 
mlx5e_create_rep_root_ft(struct mlx5e_priv * priv)859 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
860 {
861 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
862 	struct mlx5_eswitch_rep *rep = rpriv->rep;
863 	struct mlx5_flow_table_attr ft_attr = {};
864 	struct mlx5_flow_namespace *ns;
865 	int err = 0;
866 
867 	if (rep->vport != MLX5_VPORT_UPLINK) {
868 		/* non uplik reps will skip any bypass tables and go directly to
869 		 * their own ttc
870 		 */
871 		rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false));
872 		return 0;
873 	}
874 
875 	/* uplink root ft will be used to auto chain, to ethtool or ttc tables */
876 	ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
877 	if (!ns) {
878 		netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
879 		return -EOPNOTSUPP;
880 	}
881 
882 	ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
883 	ft_attr.prio = 1;
884 	ft_attr.level = 1;
885 
886 	rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
887 	if (IS_ERR(rpriv->root_ft)) {
888 		err = PTR_ERR(rpriv->root_ft);
889 		rpriv->root_ft = NULL;
890 	}
891 
892 	return err;
893 }
894 
mlx5e_destroy_rep_root_ft(struct mlx5e_priv * priv)895 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
896 {
897 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
898 	struct mlx5_eswitch_rep *rep = rpriv->rep;
899 
900 	if (rep->vport != MLX5_VPORT_UPLINK)
901 		return;
902 	mlx5_destroy_flow_table(rpriv->root_ft);
903 }
904 
mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv * priv)905 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
906 {
907 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
908 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
909 	struct mlx5_eswitch_rep *rep = rpriv->rep;
910 	struct mlx5_flow_handle *flow_rule;
911 	struct mlx5_flow_destination dest;
912 
913 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
914 	dest.ft = rpriv->root_ft;
915 
916 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
917 	if (IS_ERR(flow_rule))
918 		return PTR_ERR(flow_rule);
919 	rpriv->vport_rx_rule = flow_rule;
920 	return 0;
921 }
922 
rep_vport_rx_rule_destroy(struct mlx5e_priv * priv)923 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
924 {
925 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
926 
927 	if (!rpriv->vport_rx_rule)
928 		return;
929 
930 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
931 	rpriv->vport_rx_rule = NULL;
932 }
933 
mlx5e_rep_bond_update(struct mlx5e_priv * priv,bool cleanup)934 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
935 {
936 	rep_vport_rx_rule_destroy(priv);
937 
938 	return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
939 }
940 
mlx5e_init_rep_rx(struct mlx5e_priv * priv)941 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
942 {
943 	struct mlx5_core_dev *mdev = priv->mdev;
944 	int err;
945 
946 	priv->rx_res = mlx5e_rx_res_alloc();
947 	if (!priv->rx_res) {
948 		err = -ENOMEM;
949 		goto err_free_fs;
950 	}
951 
952 	mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
953 
954 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
955 	if (err) {
956 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
957 		return err;
958 	}
959 
960 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
961 				priv->max_nch, priv->drop_rq.rqn,
962 				&priv->channels.params.packet_merge,
963 				priv->channels.params.num_channels);
964 	if (err)
965 		goto err_close_drop_rq;
966 
967 	err = mlx5e_create_rep_ttc_table(priv);
968 	if (err)
969 		goto err_destroy_rx_res;
970 
971 	err = mlx5e_create_rep_root_ft(priv);
972 	if (err)
973 		goto err_destroy_ttc_table;
974 
975 	err = mlx5e_create_rep_vport_rx_rule(priv);
976 	if (err)
977 		goto err_destroy_root_ft;
978 
979 	mlx5e_ethtool_init_steering(priv->fs);
980 
981 	return 0;
982 
983 err_destroy_root_ft:
984 	mlx5e_destroy_rep_root_ft(priv);
985 err_destroy_ttc_table:
986 	mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
987 err_destroy_rx_res:
988 	mlx5e_rx_res_destroy(priv->rx_res);
989 err_close_drop_rq:
990 	mlx5e_close_drop_rq(&priv->drop_rq);
991 	mlx5e_rx_res_free(priv->rx_res);
992 	priv->rx_res = NULL;
993 err_free_fs:
994 	mlx5e_fs_cleanup(priv->fs);
995 	return err;
996 }
997 
mlx5e_cleanup_rep_rx(struct mlx5e_priv * priv)998 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
999 {
1000 	mlx5e_ethtool_cleanup_steering(priv->fs);
1001 	rep_vport_rx_rule_destroy(priv);
1002 	mlx5e_destroy_rep_root_ft(priv);
1003 	mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
1004 	mlx5e_rx_res_destroy(priv->rx_res);
1005 	mlx5e_close_drop_rq(&priv->drop_rq);
1006 	mlx5e_rx_res_free(priv->rx_res);
1007 	priv->rx_res = NULL;
1008 }
1009 
mlx5e_rep_mpesw_work(struct work_struct * work)1010 static void mlx5e_rep_mpesw_work(struct work_struct *work)
1011 {
1012 	struct mlx5_rep_uplink_priv *uplink_priv =
1013 		container_of(work, struct mlx5_rep_uplink_priv,
1014 			     mpesw_work);
1015 	struct mlx5e_rep_priv *rpriv =
1016 		container_of(uplink_priv, struct mlx5e_rep_priv,
1017 			     uplink_priv);
1018 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1019 
1020 	rep_vport_rx_rule_destroy(priv);
1021 	mlx5e_create_rep_vport_rx_rule(priv);
1022 }
1023 
mlx5e_init_ul_rep_rx(struct mlx5e_priv * priv)1024 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
1025 {
1026 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1027 	int err;
1028 
1029 	mlx5e_create_q_counters(priv);
1030 	err = mlx5e_init_rep_rx(priv);
1031 	if (err)
1032 		goto out;
1033 
1034 	mlx5e_tc_int_port_init_rep_rx(priv);
1035 
1036 	INIT_WORK(&rpriv->uplink_priv.mpesw_work, mlx5e_rep_mpesw_work);
1037 
1038 out:
1039 	return err;
1040 }
1041 
mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv * priv)1042 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
1043 {
1044 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1045 
1046 	cancel_work_sync(&rpriv->uplink_priv.mpesw_work);
1047 	mlx5e_tc_int_port_cleanup_rep_rx(priv);
1048 	mlx5e_cleanup_rep_rx(priv);
1049 	mlx5e_destroy_q_counters(priv);
1050 }
1051 
mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv * rpriv)1052 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1053 {
1054 	struct mlx5_rep_uplink_priv *uplink_priv;
1055 	struct net_device *netdev;
1056 	struct mlx5e_priv *priv;
1057 	int err;
1058 
1059 	netdev = rpriv->netdev;
1060 	priv = netdev_priv(netdev);
1061 	uplink_priv = &rpriv->uplink_priv;
1062 
1063 	err = mlx5e_rep_tc_init(rpriv);
1064 	if (err)
1065 		return err;
1066 
1067 	mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1068 
1069 	mlx5e_rep_bond_init(rpriv);
1070 	err = mlx5e_rep_tc_netdevice_event_register(rpriv);
1071 	if (err) {
1072 		mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
1073 			      err);
1074 		goto err_event_reg;
1075 	}
1076 
1077 	return 0;
1078 
1079 err_event_reg:
1080 	mlx5e_rep_bond_cleanup(rpriv);
1081 	mlx5e_rep_tc_cleanup(rpriv);
1082 	return err;
1083 }
1084 
mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv * rpriv)1085 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1086 {
1087 	mlx5e_rep_tc_netdevice_event_unregister(rpriv);
1088 	mlx5e_rep_bond_cleanup(rpriv);
1089 	mlx5e_rep_tc_cleanup(rpriv);
1090 }
1091 
mlx5e_init_rep_tx(struct mlx5e_priv * priv)1092 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1093 {
1094 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1095 	int err;
1096 
1097 	err = mlx5e_create_tises(priv);
1098 	if (err) {
1099 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1100 		return err;
1101 	}
1102 
1103 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1104 		err = mlx5e_init_uplink_rep_tx(rpriv);
1105 		if (err)
1106 			goto err_init_tx;
1107 	}
1108 
1109 	err = mlx5e_tc_ht_init(&rpriv->tc_ht);
1110 	if (err)
1111 		goto err_ht_init;
1112 
1113 	return 0;
1114 
1115 err_ht_init:
1116 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1117 		mlx5e_cleanup_uplink_rep_tx(rpriv);
1118 err_init_tx:
1119 	mlx5e_destroy_tises(priv);
1120 	return err;
1121 }
1122 
mlx5e_cleanup_rep_tx(struct mlx5e_priv * priv)1123 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1124 {
1125 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1126 
1127 	mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
1128 
1129 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1130 		mlx5e_cleanup_uplink_rep_tx(rpriv);
1131 
1132 	mlx5e_destroy_tises(priv);
1133 }
1134 
mlx5e_rep_enable(struct mlx5e_priv * priv)1135 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1136 {
1137 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1138 
1139 	mlx5e_set_netdev_mtu_boundaries(priv);
1140 	mlx5e_rep_neigh_init(rpriv);
1141 }
1142 
mlx5e_rep_disable(struct mlx5e_priv * priv)1143 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
1144 {
1145 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1146 
1147 	mlx5e_rep_neigh_cleanup(rpriv);
1148 }
1149 
mlx5e_update_rep_rx(struct mlx5e_priv * priv)1150 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1151 {
1152 	return 0;
1153 }
1154 
mlx5e_rep_event_mpesw(struct mlx5e_priv * priv)1155 static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
1156 {
1157 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1158 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1159 
1160 	if (rep->vport != MLX5_VPORT_UPLINK)
1161 		return NOTIFY_DONE;
1162 
1163 	queue_work(priv->wq, &rpriv->uplink_priv.mpesw_work);
1164 
1165 	return NOTIFY_OK;
1166 }
1167 
uplink_rep_async_event(struct notifier_block * nb,unsigned long event,void * data)1168 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1169 {
1170 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1171 
1172 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1173 		struct mlx5_eqe *eqe = data;
1174 
1175 		switch (eqe->sub_type) {
1176 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1177 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1178 			queue_work(priv->wq, &priv->update_carrier_work);
1179 			break;
1180 		default:
1181 			return NOTIFY_DONE;
1182 		}
1183 
1184 		return NOTIFY_OK;
1185 	}
1186 
1187 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
1188 		return mlx5e_rep_tc_event_port_affinity(priv);
1189 	else if (event == MLX5_DEV_EVENT_MULTIPORT_ESW)
1190 		return mlx5e_rep_event_mpesw(priv);
1191 
1192 	return NOTIFY_DONE;
1193 }
1194 
mlx5e_uplink_rep_enable(struct mlx5e_priv * priv)1195 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1196 {
1197 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1198 	struct net_device *netdev = priv->netdev;
1199 	struct mlx5_core_dev *mdev = priv->mdev;
1200 	u16 max_mtu;
1201 
1202 	mlx5e_ipsec_init(priv);
1203 
1204 	netdev->min_mtu = ETH_MIN_MTU;
1205 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1206 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1207 	mlx5e_set_dev_port_mtu(priv);
1208 
1209 	mlx5e_rep_tc_enable(priv);
1210 
1211 	if (MLX5_CAP_GEN(mdev, uplink_follow))
1212 		mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1213 					      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1214 	mlx5_lag_add_netdev(mdev, netdev);
1215 	priv->events_nb.notifier_call = uplink_rep_async_event;
1216 	mlx5_notifier_register(mdev, &priv->events_nb);
1217 	mlx5e_dcbnl_initialize(priv);
1218 	mlx5e_dcbnl_init_app(priv);
1219 	mlx5e_rep_neigh_init(rpriv);
1220 	mlx5e_rep_bridge_init(priv);
1221 
1222 	netdev->wanted_features |= NETIF_F_HW_TC;
1223 
1224 	rtnl_lock();
1225 	if (netif_running(netdev))
1226 		mlx5e_open(netdev);
1227 	udp_tunnel_nic_reset_ntf(priv->netdev);
1228 	netif_device_attach(netdev);
1229 	rtnl_unlock();
1230 }
1231 
mlx5e_uplink_rep_disable(struct mlx5e_priv * priv)1232 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1233 {
1234 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1235 	struct mlx5_core_dev *mdev = priv->mdev;
1236 
1237 	rtnl_lock();
1238 	if (netif_running(priv->netdev))
1239 		mlx5e_close(priv->netdev);
1240 	netif_device_detach(priv->netdev);
1241 	rtnl_unlock();
1242 
1243 	mlx5e_rep_bridge_cleanup(priv);
1244 	mlx5e_rep_neigh_cleanup(rpriv);
1245 	mlx5e_dcbnl_delete_app(priv);
1246 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1247 	mlx5e_rep_tc_disable(priv);
1248 	mlx5_lag_remove_netdev(mdev, priv->netdev);
1249 	mlx5_vxlan_reset_to_default(mdev->vxlan);
1250 
1251 	mlx5e_ipsec_cleanup(priv);
1252 }
1253 
1254 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1255 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1256 
1257 /* The stats groups order is opposite to the update_stats() order calls */
1258 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1259 	&MLX5E_STATS_GRP(sw_rep),
1260 	&MLX5E_STATS_GRP(vport_rep),
1261 };
1262 
mlx5e_rep_stats_grps_num(struct mlx5e_priv * priv)1263 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1264 {
1265 	return ARRAY_SIZE(mlx5e_rep_stats_grps);
1266 }
1267 
1268 /* The stats groups order is opposite to the update_stats() order calls */
1269 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1270 	&MLX5E_STATS_GRP(sw),
1271 	&MLX5E_STATS_GRP(qcnt),
1272 	&MLX5E_STATS_GRP(vnic_env),
1273 	&MLX5E_STATS_GRP(vport),
1274 	&MLX5E_STATS_GRP(802_3),
1275 	&MLX5E_STATS_GRP(2863),
1276 	&MLX5E_STATS_GRP(2819),
1277 	&MLX5E_STATS_GRP(phy),
1278 	&MLX5E_STATS_GRP(eth_ext),
1279 	&MLX5E_STATS_GRP(pcie),
1280 	&MLX5E_STATS_GRP(per_prio),
1281 	&MLX5E_STATS_GRP(pme),
1282 	&MLX5E_STATS_GRP(channels),
1283 	&MLX5E_STATS_GRP(per_port_buff_congest),
1284 #ifdef CONFIG_MLX5_EN_IPSEC
1285 	&MLX5E_STATS_GRP(ipsec_sw),
1286 #endif
1287 	&MLX5E_STATS_GRP(ptp),
1288 };
1289 
mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv * priv)1290 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1291 {
1292 	return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1293 }
1294 
1295 static const struct mlx5e_profile mlx5e_rep_profile = {
1296 	.init			= mlx5e_init_rep,
1297 	.cleanup		= mlx5e_cleanup_rep,
1298 	.init_rx		= mlx5e_init_rep_rx,
1299 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1300 	.init_tx		= mlx5e_init_rep_tx,
1301 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1302 	.enable		        = mlx5e_rep_enable,
1303 	.disable	        = mlx5e_rep_disable,
1304 	.update_rx		= mlx5e_update_rep_rx,
1305 	.update_stats           = mlx5e_stats_update_ndo_stats,
1306 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1307 	.max_tc			= 1,
1308 	.stats_grps		= mlx5e_rep_stats_grps,
1309 	.stats_grps_num		= mlx5e_rep_stats_grps_num,
1310 	.max_nch_limit		= mlx5e_rep_max_nch_limit,
1311 };
1312 
1313 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1314 	.init			= mlx5e_init_ul_rep,
1315 	.cleanup		= mlx5e_cleanup_rep,
1316 	.init_rx		= mlx5e_init_ul_rep_rx,
1317 	.cleanup_rx		= mlx5e_cleanup_ul_rep_rx,
1318 	.init_tx		= mlx5e_init_rep_tx,
1319 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1320 	.enable		        = mlx5e_uplink_rep_enable,
1321 	.disable	        = mlx5e_uplink_rep_disable,
1322 	.update_rx		= mlx5e_update_rep_rx,
1323 	.update_stats           = mlx5e_stats_update_ndo_stats,
1324 	.update_carrier	        = mlx5e_update_carrier,
1325 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1326 	.max_tc			= MLX5E_MAX_NUM_TC,
1327 	.stats_grps		= mlx5e_ul_rep_stats_grps,
1328 	.stats_grps_num		= mlx5e_ul_rep_stats_grps_num,
1329 };
1330 
1331 /* e-Switch vport representors */
1332 static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1333 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1334 {
1335 	struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1336 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1337 
1338 	rpriv->netdev = priv->netdev;
1339 	return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1340 					   rpriv);
1341 }
1342 
1343 static void
mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv * rpriv)1344 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1345 {
1346 	struct net_device *netdev = rpriv->netdev;
1347 	struct mlx5e_priv *priv;
1348 
1349 	priv = netdev_priv(netdev);
1350 
1351 	mlx5e_netdev_attach_nic_profile(priv);
1352 }
1353 
1354 static int
mlx5e_vport_vf_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1355 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1356 {
1357 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1358 	const struct mlx5e_profile *profile;
1359 	struct devlink_port *dl_port;
1360 	struct net_device *netdev;
1361 	struct mlx5e_priv *priv;
1362 	int err;
1363 
1364 	profile = &mlx5e_rep_profile;
1365 	netdev = mlx5e_create_netdev(dev, profile);
1366 	if (!netdev) {
1367 		mlx5_core_warn(dev,
1368 			       "Failed to create representor netdev for vport %d\n",
1369 			       rep->vport);
1370 		return -EINVAL;
1371 	}
1372 
1373 	mlx5e_build_rep_netdev(netdev, dev);
1374 	rpriv->netdev = netdev;
1375 
1376 	priv = netdev_priv(netdev);
1377 	priv->profile = profile;
1378 	priv->ppriv = rpriv;
1379 	err = profile->init(dev, netdev);
1380 	if (err) {
1381 		netdev_warn(netdev, "rep profile init failed, %d\n", err);
1382 		goto err_destroy_netdev;
1383 	}
1384 
1385 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1386 	if (err) {
1387 		netdev_warn(netdev,
1388 			    "Failed to attach representor netdev for vport %d\n",
1389 			    rep->vport);
1390 		goto err_cleanup_profile;
1391 	}
1392 
1393 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
1394 						 rpriv->rep->vport);
1395 	if (dl_port)
1396 		SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
1397 
1398 	err = register_netdev(netdev);
1399 	if (err) {
1400 		netdev_warn(netdev,
1401 			    "Failed to register representor netdev for vport %d\n",
1402 			    rep->vport);
1403 		goto err_detach_netdev;
1404 	}
1405 
1406 	return 0;
1407 
1408 err_detach_netdev:
1409 	mlx5e_detach_netdev(netdev_priv(netdev));
1410 
1411 err_cleanup_profile:
1412 	priv->profile->cleanup(priv);
1413 
1414 err_destroy_netdev:
1415 	mlx5e_destroy_netdev(netdev_priv(netdev));
1416 	return err;
1417 }
1418 
1419 static int
mlx5e_vport_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1420 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1421 {
1422 	struct mlx5e_rep_priv *rpriv;
1423 	int err;
1424 
1425 	rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL);
1426 	if (!rpriv)
1427 		return -ENOMEM;
1428 
1429 	/* rpriv->rep to be looked up when profile->init() is called */
1430 	rpriv->rep = rep;
1431 	rep->rep_data[REP_ETH].priv = rpriv;
1432 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1433 
1434 	if (rep->vport == MLX5_VPORT_UPLINK)
1435 		err = mlx5e_vport_uplink_rep_load(dev, rep);
1436 	else
1437 		err = mlx5e_vport_vf_rep_load(dev, rep);
1438 
1439 	if (err)
1440 		kvfree(rpriv);
1441 
1442 	return err;
1443 }
1444 
1445 static void
mlx5e_vport_rep_unload(struct mlx5_eswitch_rep * rep)1446 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1447 {
1448 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1449 	struct net_device *netdev = rpriv->netdev;
1450 	struct mlx5e_priv *priv = netdev_priv(netdev);
1451 	void *ppriv = priv->ppriv;
1452 
1453 	if (rep->vport == MLX5_VPORT_UPLINK) {
1454 		mlx5e_vport_uplink_rep_unload(rpriv);
1455 		goto free_ppriv;
1456 	}
1457 
1458 	unregister_netdev(netdev);
1459 	mlx5e_detach_netdev(priv);
1460 	priv->profile->cleanup(priv);
1461 	mlx5e_destroy_netdev(priv);
1462 free_ppriv:
1463 	kvfree(ppriv); /* mlx5e_rep_priv */
1464 }
1465 
mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep * rep)1466 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1467 {
1468 	struct mlx5e_rep_priv *rpriv;
1469 
1470 	rpriv = mlx5e_rep_to_rep_priv(rep);
1471 
1472 	return rpriv->netdev;
1473 }
1474 
mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep * rep)1475 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
1476 {
1477 	struct mlx5e_rep_priv *rpriv;
1478 	struct mlx5e_rep_sq *rep_sq;
1479 
1480 	rpriv = mlx5e_rep_to_rep_priv(rep);
1481 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1482 		if (!rep_sq->send_to_vport_rule_peer)
1483 			continue;
1484 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
1485 		rep_sq->send_to_vport_rule_peer = NULL;
1486 	}
1487 }
1488 
mlx5e_vport_rep_event_pair(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,struct mlx5_eswitch * peer_esw)1489 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1490 				      struct mlx5_eswitch_rep *rep,
1491 				      struct mlx5_eswitch *peer_esw)
1492 {
1493 	struct mlx5_flow_handle *flow_rule;
1494 	struct mlx5e_rep_priv *rpriv;
1495 	struct mlx5e_rep_sq *rep_sq;
1496 
1497 	rpriv = mlx5e_rep_to_rep_priv(rep);
1498 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1499 		if (rep_sq->send_to_vport_rule_peer)
1500 			continue;
1501 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
1502 		if (IS_ERR(flow_rule))
1503 			goto err_out;
1504 		rep_sq->send_to_vport_rule_peer = flow_rule;
1505 	}
1506 
1507 	return 0;
1508 err_out:
1509 	mlx5e_vport_rep_event_unpair(rep);
1510 	return PTR_ERR(flow_rule);
1511 }
1512 
mlx5e_vport_rep_event(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,enum mlx5_switchdev_event event,void * data)1513 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1514 				 struct mlx5_eswitch_rep *rep,
1515 				 enum mlx5_switchdev_event event,
1516 				 void *data)
1517 {
1518 	int err = 0;
1519 
1520 	if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1521 		err = mlx5e_vport_rep_event_pair(esw, rep, data);
1522 	else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1523 		mlx5e_vport_rep_event_unpair(rep);
1524 
1525 	return err;
1526 }
1527 
1528 static const struct mlx5_eswitch_rep_ops rep_ops = {
1529 	.load = mlx5e_vport_rep_load,
1530 	.unload = mlx5e_vport_rep_unload,
1531 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1532 	.event = mlx5e_vport_rep_event,
1533 };
1534 
mlx5e_rep_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)1535 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1536 			   const struct auxiliary_device_id *id)
1537 {
1538 	struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1539 	struct mlx5_core_dev *mdev = edev->mdev;
1540 	struct mlx5_eswitch *esw;
1541 
1542 	esw = mdev->priv.eswitch;
1543 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1544 	return 0;
1545 }
1546 
mlx5e_rep_remove(struct auxiliary_device * adev)1547 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1548 {
1549 	struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1550 	struct mlx5_core_dev *mdev = vdev->mdev;
1551 	struct mlx5_eswitch *esw;
1552 
1553 	esw = mdev->priv.eswitch;
1554 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1555 }
1556 
1557 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1558 	{ .name = MLX5_ADEV_NAME ".eth-rep", },
1559 	{},
1560 };
1561 
1562 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1563 
1564 static struct auxiliary_driver mlx5e_rep_driver = {
1565 	.name = "eth-rep",
1566 	.probe = mlx5e_rep_probe,
1567 	.remove = mlx5e_rep_remove,
1568 	.id_table = mlx5e_rep_id_table,
1569 };
1570 
mlx5e_rep_init(void)1571 int mlx5e_rep_init(void)
1572 {
1573 	return auxiliary_driver_register(&mlx5e_rep_driver);
1574 }
1575 
mlx5e_rep_cleanup(void)1576 void mlx5e_rep_cleanup(void)
1577 {
1578 	auxiliary_driver_unregister(&mlx5e_rep_driver);
1579 }
1580