1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
3 
4 #include "en/ptp.h"
5 #include "en/txrx.h"
6 #include "en/params.h"
7 #include "en/fs_tt_redirect.h"
8 
9 struct mlx5e_ptp_fs {
10 	struct mlx5_flow_handle *l2_rule;
11 	struct mlx5_flow_handle *udp_v4_rule;
12 	struct mlx5_flow_handle *udp_v6_rule;
13 	bool valid;
14 };
15 
16 struct mlx5e_ptp_params {
17 	struct mlx5e_params params;
18 	struct mlx5e_sq_param txq_sq_param;
19 	struct mlx5e_rq_param rq_param;
20 };
21 
22 struct mlx5e_skb_cb_hwtstamp {
23 	ktime_t cqe_hwtstamp;
24 	ktime_t port_hwtstamp;
25 };
26 
mlx5e_skb_cb_hwtstamp_init(struct sk_buff * skb)27 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
28 {
29 	memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
30 }
31 
mlx5e_skb_cb_get_hwts(struct sk_buff * skb)32 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
33 {
34 	BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
35 	return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
36 }
37 
mlx5e_skb_cb_hwtstamp_tx(struct sk_buff * skb,struct mlx5e_ptp_cq_stats * cq_stats)38 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
39 				     struct mlx5e_ptp_cq_stats *cq_stats)
40 {
41 	struct skb_shared_hwtstamps hwts = {};
42 	ktime_t diff;
43 
44 	diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
45 		   mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
46 
47 	/* Maximal allowed diff is 1 / 128 second */
48 	if (diff > (NSEC_PER_SEC >> 7)) {
49 		cq_stats->abort++;
50 		cq_stats->abort_abs_diff_ns += diff;
51 		return;
52 	}
53 
54 	hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
55 	skb_tstamp_tx(skb, &hwts);
56 }
57 
mlx5e_skb_cb_hwtstamp_handler(struct sk_buff * skb,int hwtstamp_type,ktime_t hwtstamp,struct mlx5e_ptp_cq_stats * cq_stats)58 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
59 				   ktime_t hwtstamp,
60 				   struct mlx5e_ptp_cq_stats *cq_stats)
61 {
62 	switch (hwtstamp_type) {
63 	case (MLX5E_SKB_CB_CQE_HWTSTAMP):
64 		mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
65 		break;
66 	case (MLX5E_SKB_CB_PORT_HWTSTAMP):
67 		mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
68 		break;
69 	}
70 
71 	/* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
72 	 * skb soon to be released.
73 	 */
74 	if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
75 	    !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
76 		return;
77 
78 	mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
79 	memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
80 }
81 
82 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
83 
mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq * ptpsq,u16 skb_cc,u16 skb_id)84 static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
85 {
86 	return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
87 }
88 
mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq * ptpsq,u16 skb_id)89 static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
90 {
91 	u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
92 	u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
93 
94 	if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc))
95 		return true;
96 
97 	return false;
98 }
99 
mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq * ptpsq,u16 skb_cc,u16 skb_id,int budget)100 static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc,
101 					     u16 skb_id, int budget)
102 {
103 	struct skb_shared_hwtstamps hwts = {};
104 	struct sk_buff *skb;
105 
106 	ptpsq->cq_stats->resync_event++;
107 
108 	while (skb_cc != skb_id) {
109 		skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
110 		hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
111 		skb_tstamp_tx(skb, &hwts);
112 		ptpsq->cq_stats->resync_cqe++;
113 		napi_consume_skb(skb, budget);
114 		skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
115 	}
116 }
117 
mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq * ptpsq,struct mlx5_cqe64 * cqe,int budget)118 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
119 				    struct mlx5_cqe64 *cqe,
120 				    int budget)
121 {
122 	u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
123 	u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
124 	struct mlx5e_txqsq *sq = &ptpsq->txqsq;
125 	struct sk_buff *skb;
126 	ktime_t hwtstamp;
127 
128 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
129 		skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
130 		ptpsq->cq_stats->err_cqe++;
131 		goto out;
132 	}
133 
134 	if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) {
135 		if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
136 			/* already handled by a previous resync */
137 			ptpsq->cq_stats->ooo_cqe_drop++;
138 			return;
139 		}
140 		mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget);
141 	}
142 
143 	skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
144 	hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
145 	mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
146 				      hwtstamp, ptpsq->cq_stats);
147 	ptpsq->cq_stats->cqe++;
148 
149 out:
150 	napi_consume_skb(skb, budget);
151 }
152 
mlx5e_ptp_poll_ts_cq(struct mlx5e_cq * cq,int budget)153 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
154 {
155 	struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
156 	struct mlx5_cqwq *cqwq = &cq->wq;
157 	struct mlx5_cqe64 *cqe;
158 	int work_done = 0;
159 
160 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
161 		return false;
162 
163 	cqe = mlx5_cqwq_get_cqe(cqwq);
164 	if (!cqe)
165 		return false;
166 
167 	do {
168 		mlx5_cqwq_pop(cqwq);
169 
170 		mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
171 	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
172 
173 	mlx5_cqwq_update_db_record(cqwq);
174 
175 	/* ensure cq space is freed before enabling more cqes */
176 	wmb();
177 
178 	return work_done == budget;
179 }
180 
mlx5e_ptp_napi_poll(struct napi_struct * napi,int budget)181 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
182 {
183 	struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
184 	struct mlx5e_ch_stats *ch_stats = c->stats;
185 	struct mlx5e_rq *rq = &c->rq;
186 	bool busy = false;
187 	int work_done = 0;
188 	int i;
189 
190 	rcu_read_lock();
191 
192 	ch_stats->poll++;
193 
194 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
195 		for (i = 0; i < c->num_tc; i++) {
196 			busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
197 			busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
198 		}
199 	}
200 	if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
201 		work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
202 		busy |= work_done == budget;
203 		busy |= INDIRECT_CALL_2(rq->post_wqes,
204 					mlx5e_post_rx_mpwqes,
205 					mlx5e_post_rx_wqes,
206 					rq);
207 	}
208 
209 	if (busy) {
210 		work_done = budget;
211 		goto out;
212 	}
213 
214 	if (unlikely(!napi_complete_done(napi, work_done)))
215 		goto out;
216 
217 	ch_stats->arm++;
218 
219 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
220 		for (i = 0; i < c->num_tc; i++) {
221 			mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
222 			mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
223 		}
224 	}
225 	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
226 		mlx5e_cq_arm(&rq->cq);
227 
228 out:
229 	rcu_read_unlock();
230 
231 	return work_done;
232 }
233 
mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp * c,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc,struct mlx5e_ptpsq * ptpsq)234 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
235 				 struct mlx5e_params *params,
236 				 struct mlx5e_sq_param *param,
237 				 struct mlx5e_txqsq *sq, int tc,
238 				 struct mlx5e_ptpsq *ptpsq)
239 {
240 	void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
241 	struct mlx5_core_dev *mdev = c->mdev;
242 	struct mlx5_wq_cyc *wq = &sq->wq;
243 	int err;
244 	int node;
245 
246 	sq->pdev      = c->pdev;
247 	sq->clock     = &mdev->clock;
248 	sq->mkey_be   = c->mkey_be;
249 	sq->netdev    = c->netdev;
250 	sq->priv      = c->priv;
251 	sq->mdev      = mdev;
252 	sq->ch_ix     = MLX5E_PTP_CHANNEL_IX;
253 	sq->txq_ix    = txq_ix;
254 	sq->uar_map   = mdev->mlx5e_res.hw_objs.bfreg.map;
255 	sq->min_inline_mode = params->tx_min_inline_mode;
256 	sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
257 	sq->stats     = &c->priv->ptp_stats.sq[tc];
258 	sq->ptpsq     = ptpsq;
259 	INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
260 	if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
261 		set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
262 	sq->stop_room = param->stop_room;
263 	sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
264 
265 	node = dev_to_node(mlx5_core_dma_dev(mdev));
266 
267 	param->wq.db_numa_node = node;
268 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
269 	if (err)
270 		return err;
271 	wq->db    = &wq->db[MLX5_SND_DBR];
272 
273 	err = mlx5e_alloc_txqsq_db(sq, node);
274 	if (err)
275 		goto err_sq_wq_destroy;
276 
277 	return 0;
278 
279 err_sq_wq_destroy:
280 	mlx5_wq_destroy(&sq->wq_ctrl);
281 
282 	return err;
283 }
284 
mlx5e_ptp_destroy_sq(struct mlx5_core_dev * mdev,u32 sqn)285 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
286 {
287 	mlx5_core_destroy_sq(mdev, sqn);
288 }
289 
mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq * ptpsq,int numa)290 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
291 {
292 	int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
293 	struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
294 
295 	ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
296 					     GFP_KERNEL, numa);
297 	if (!ptpsq->skb_fifo.fifo)
298 		return -ENOMEM;
299 
300 	ptpsq->skb_fifo.pc   = &ptpsq->skb_fifo_pc;
301 	ptpsq->skb_fifo.cc   = &ptpsq->skb_fifo_cc;
302 	ptpsq->skb_fifo.mask = wq_sz - 1;
303 	if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
304 		ptpsq->ts_cqe_ctr_mask =
305 			(1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
306 	return 0;
307 }
308 
mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo * skb_fifo)309 static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
310 {
311 	while (*skb_fifo->pc != *skb_fifo->cc) {
312 		struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
313 
314 		dev_kfree_skb_any(skb);
315 	}
316 }
317 
mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo * skb_fifo)318 static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
319 {
320 	mlx5e_ptp_drain_skb_fifo(skb_fifo);
321 	kvfree(skb_fifo->fifo);
322 }
323 
mlx5e_ptp_open_txqsq(struct mlx5e_ptp * c,u32 tisn,int txq_ix,struct mlx5e_ptp_params * cparams,int tc,struct mlx5e_ptpsq * ptpsq)324 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
325 				int txq_ix, struct mlx5e_ptp_params *cparams,
326 				int tc, struct mlx5e_ptpsq *ptpsq)
327 {
328 	struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
329 	struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
330 	struct mlx5e_create_sq_param csp = {};
331 	int err;
332 
333 	err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
334 				    txqsq, tc, ptpsq);
335 	if (err)
336 		return err;
337 
338 	csp.tisn            = tisn;
339 	csp.tis_lst_sz      = 1;
340 	csp.cqn             = txqsq->cq.mcq.cqn;
341 	csp.wq_ctrl         = &txqsq->wq_ctrl;
342 	csp.min_inline_mode = txqsq->min_inline_mode;
343 	csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
344 
345 	err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
346 	if (err)
347 		goto err_free_txqsq;
348 
349 	err = mlx5e_ptp_alloc_traffic_db(ptpsq,
350 					 dev_to_node(mlx5_core_dma_dev(c->mdev)));
351 	if (err)
352 		goto err_free_txqsq;
353 
354 	return 0;
355 
356 err_free_txqsq:
357 	mlx5e_free_txqsq(txqsq);
358 
359 	return err;
360 }
361 
mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq * ptpsq)362 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
363 {
364 	struct mlx5e_txqsq *sq = &ptpsq->txqsq;
365 	struct mlx5_core_dev *mdev = sq->mdev;
366 
367 	mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
368 	cancel_work_sync(&sq->recover_work);
369 	mlx5e_ptp_destroy_sq(mdev, sq->sqn);
370 	mlx5e_free_txqsq_descs(sq);
371 	mlx5e_free_txqsq(sq);
372 }
373 
mlx5e_ptp_open_txqsqs(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)374 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
375 				 struct mlx5e_ptp_params *cparams)
376 {
377 	struct mlx5e_params *params = &cparams->params;
378 	u8 num_tc = mlx5e_get_dcb_num_tc(params);
379 	int ix_base;
380 	int err;
381 	int tc;
382 
383 	ix_base = num_tc * params->num_channels;
384 
385 	for (tc = 0; tc < num_tc; tc++) {
386 		int txq_ix = ix_base + tc;
387 
388 		err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
389 					   cparams, tc, &c->ptpsq[tc]);
390 		if (err)
391 			goto close_txqsq;
392 	}
393 
394 	return 0;
395 
396 close_txqsq:
397 	for (--tc; tc >= 0; tc--)
398 		mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
399 
400 	return err;
401 }
402 
mlx5e_ptp_close_txqsqs(struct mlx5e_ptp * c)403 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
404 {
405 	int tc;
406 
407 	for (tc = 0; tc < c->num_tc; tc++)
408 		mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
409 }
410 
mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)411 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
412 				 struct mlx5e_ptp_params *cparams)
413 {
414 	struct mlx5e_params *params = &cparams->params;
415 	struct mlx5e_create_cq_param ccp = {};
416 	struct dim_cq_moder ptp_moder = {};
417 	struct mlx5e_cq_param *cq_param;
418 	u8 num_tc;
419 	int err;
420 	int tc;
421 
422 	num_tc = mlx5e_get_dcb_num_tc(params);
423 
424 	ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
425 	ccp.ch_stats = c->stats;
426 	ccp.napi     = &c->napi;
427 	ccp.ix       = MLX5E_PTP_CHANNEL_IX;
428 
429 	cq_param = &cparams->txq_sq_param.cqp;
430 
431 	for (tc = 0; tc < num_tc; tc++) {
432 		struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
433 
434 		err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
435 		if (err)
436 			goto out_err_txqsq_cq;
437 	}
438 
439 	for (tc = 0; tc < num_tc; tc++) {
440 		struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
441 		struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
442 
443 		err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
444 		if (err)
445 			goto out_err_ts_cq;
446 
447 		ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
448 	}
449 
450 	return 0;
451 
452 out_err_ts_cq:
453 	for (--tc; tc >= 0; tc--)
454 		mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
455 	tc = num_tc;
456 out_err_txqsq_cq:
457 	for (--tc; tc >= 0; tc--)
458 		mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
459 
460 	return err;
461 }
462 
mlx5e_ptp_open_rx_cq(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)463 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
464 				struct mlx5e_ptp_params *cparams)
465 {
466 	struct mlx5e_create_cq_param ccp = {};
467 	struct dim_cq_moder ptp_moder = {};
468 	struct mlx5e_cq_param *cq_param;
469 	struct mlx5e_cq *cq = &c->rq.cq;
470 
471 	ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
472 	ccp.ch_stats = c->stats;
473 	ccp.napi     = &c->napi;
474 	ccp.ix       = MLX5E_PTP_CHANNEL_IX;
475 
476 	cq_param = &cparams->rq_param.cqp;
477 
478 	return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
479 }
480 
mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp * c)481 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
482 {
483 	int tc;
484 
485 	for (tc = 0; tc < c->num_tc; tc++)
486 		mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
487 
488 	for (tc = 0; tc < c->num_tc; tc++)
489 		mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
490 }
491 
mlx5e_ptp_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)492 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
493 				     struct mlx5e_params *params,
494 				     struct mlx5e_sq_param *param)
495 {
496 	void *sqc = param->sqc;
497 	void *wq;
498 
499 	mlx5e_build_sq_param_common(mdev, param);
500 
501 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
502 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
503 	param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
504 	mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
505 }
506 
mlx5e_ptp_build_rq_param(struct mlx5_core_dev * mdev,struct net_device * netdev,u16 q_counter,struct mlx5e_ptp_params * ptp_params)507 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
508 				     struct net_device *netdev,
509 				     u16 q_counter,
510 				     struct mlx5e_ptp_params *ptp_params)
511 {
512 	struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
513 	struct mlx5e_params *params = &ptp_params->params;
514 
515 	params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
516 	mlx5e_init_rq_type_params(mdev, params);
517 	params->sw_mtu = netdev->max_mtu;
518 	mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
519 }
520 
mlx5e_ptp_build_params(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams,struct mlx5e_params * orig)521 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
522 				   struct mlx5e_ptp_params *cparams,
523 				   struct mlx5e_params *orig)
524 {
525 	struct mlx5e_params *params = &cparams->params;
526 
527 	params->tx_min_inline_mode = orig->tx_min_inline_mode;
528 	params->num_channels = orig->num_channels;
529 	params->hard_mtu = orig->hard_mtu;
530 	params->sw_mtu = orig->sw_mtu;
531 	params->mqprio = orig->mqprio;
532 
533 	/* SQ */
534 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
535 		params->log_sq_size = orig->log_sq_size;
536 		mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
537 	}
538 	/* RQ */
539 	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
540 		params->vlan_strip_disable = orig->vlan_strip_disable;
541 		mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
542 	}
543 }
544 
mlx5e_init_ptp_rq(struct mlx5e_ptp * c,struct mlx5e_params * params,struct mlx5e_rq * rq)545 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
546 			     struct mlx5e_rq *rq)
547 {
548 	struct mlx5_core_dev *mdev = c->mdev;
549 	struct mlx5e_priv *priv = c->priv;
550 	int err;
551 
552 	rq->wq_type      = params->rq_wq_type;
553 	rq->pdev         = c->pdev;
554 	rq->netdev       = priv->netdev;
555 	rq->priv         = priv;
556 	rq->clock        = &mdev->clock;
557 	rq->tstamp       = &priv->tstamp;
558 	rq->mdev         = mdev;
559 	rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
560 	rq->stats        = &c->priv->ptp_stats.rq;
561 	rq->ix           = MLX5E_PTP_CHANNEL_IX;
562 	rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
563 	err = mlx5e_rq_set_handlers(rq, params, false);
564 	if (err)
565 		return err;
566 
567 	return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
568 }
569 
mlx5e_ptp_open_rq(struct mlx5e_ptp * c,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)570 static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
571 			     struct mlx5e_rq_param *rq_param)
572 {
573 	int node = dev_to_node(c->mdev->device);
574 	int err;
575 
576 	err = mlx5e_init_ptp_rq(c, params, &c->rq);
577 	if (err)
578 		return err;
579 
580 	return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
581 }
582 
mlx5e_ptp_open_queues(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)583 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
584 				 struct mlx5e_ptp_params *cparams)
585 {
586 	int err;
587 
588 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
589 		err = mlx5e_ptp_open_tx_cqs(c, cparams);
590 		if (err)
591 			return err;
592 
593 		err = mlx5e_ptp_open_txqsqs(c, cparams);
594 		if (err)
595 			goto close_tx_cqs;
596 	}
597 	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
598 		err = mlx5e_ptp_open_rx_cq(c, cparams);
599 		if (err)
600 			goto close_txqsq;
601 
602 		err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
603 		if (err)
604 			goto close_rx_cq;
605 	}
606 	return 0;
607 
608 close_rx_cq:
609 	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
610 		mlx5e_close_cq(&c->rq.cq);
611 close_txqsq:
612 	if (test_bit(MLX5E_PTP_STATE_TX, c->state))
613 		mlx5e_ptp_close_txqsqs(c);
614 close_tx_cqs:
615 	if (test_bit(MLX5E_PTP_STATE_TX, c->state))
616 		mlx5e_ptp_close_tx_cqs(c);
617 
618 	return err;
619 }
620 
mlx5e_ptp_close_queues(struct mlx5e_ptp * c)621 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
622 {
623 	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
624 		mlx5e_close_rq(&c->rq);
625 		mlx5e_close_cq(&c->rq.cq);
626 	}
627 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
628 		mlx5e_ptp_close_txqsqs(c);
629 		mlx5e_ptp_close_tx_cqs(c);
630 	}
631 }
632 
mlx5e_ptp_set_state(struct mlx5e_ptp * c,struct mlx5e_params * params)633 static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
634 {
635 	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
636 		__set_bit(MLX5E_PTP_STATE_TX, c->state);
637 
638 	if (params->ptp_rx)
639 		__set_bit(MLX5E_PTP_STATE_RX, c->state);
640 
641 	return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
642 }
643 
mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering * fs)644 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
645 {
646 	struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
647 
648 	if (!ptp_fs->valid)
649 		return;
650 
651 	mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
652 	mlx5e_fs_tt_redirect_any_destroy(fs);
653 
654 	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
655 	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
656 	mlx5e_fs_tt_redirect_udp_destroy(fs);
657 	ptp_fs->valid = false;
658 }
659 
mlx5e_ptp_rx_set_fs(struct mlx5e_priv * priv)660 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
661 {
662 	u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
663 	struct mlx5e_flow_steering *fs = priv->fs;
664 	struct mlx5_flow_handle *rule;
665 	struct mlx5e_ptp_fs *ptp_fs;
666 	int err;
667 
668 	ptp_fs = mlx5e_fs_get_ptp(fs);
669 	if (ptp_fs->valid)
670 		return 0;
671 
672 	err = mlx5e_fs_tt_redirect_udp_create(fs);
673 	if (err)
674 		goto out_free;
675 
676 	rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
677 						 tirn, PTP_EV_PORT);
678 	if (IS_ERR(rule)) {
679 		err = PTR_ERR(rule);
680 		goto out_destroy_fs_udp;
681 	}
682 	ptp_fs->udp_v4_rule = rule;
683 
684 	rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
685 						 tirn, PTP_EV_PORT);
686 	if (IS_ERR(rule)) {
687 		err = PTR_ERR(rule);
688 		goto out_destroy_udp_v4_rule;
689 	}
690 	ptp_fs->udp_v6_rule = rule;
691 
692 	err = mlx5e_fs_tt_redirect_any_create(fs);
693 	if (err)
694 		goto out_destroy_udp_v6_rule;
695 
696 	rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
697 	if (IS_ERR(rule)) {
698 		err = PTR_ERR(rule);
699 		goto out_destroy_fs_any;
700 	}
701 	ptp_fs->l2_rule = rule;
702 	ptp_fs->valid = true;
703 
704 	return 0;
705 
706 out_destroy_fs_any:
707 	mlx5e_fs_tt_redirect_any_destroy(fs);
708 out_destroy_udp_v6_rule:
709 	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
710 out_destroy_udp_v4_rule:
711 	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
712 out_destroy_fs_udp:
713 	mlx5e_fs_tt_redirect_udp_destroy(fs);
714 out_free:
715 	return err;
716 }
717 
mlx5e_ptp_open(struct mlx5e_priv * priv,struct mlx5e_params * params,u8 lag_port,struct mlx5e_ptp ** cp)718 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
719 		   u8 lag_port, struct mlx5e_ptp **cp)
720 {
721 	struct net_device *netdev = priv->netdev;
722 	struct mlx5_core_dev *mdev = priv->mdev;
723 	struct mlx5e_ptp_params *cparams;
724 	struct mlx5e_ptp *c;
725 	int err;
726 
727 
728 	c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
729 	cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
730 	if (!c || !cparams)
731 		return -ENOMEM;
732 
733 	c->priv     = priv;
734 	c->mdev     = priv->mdev;
735 	c->tstamp   = &priv->tstamp;
736 	c->pdev     = mlx5_core_dma_dev(priv->mdev);
737 	c->netdev   = priv->netdev;
738 	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
739 	c->num_tc   = mlx5e_get_dcb_num_tc(params);
740 	c->stats    = &priv->ptp_stats.ch;
741 	c->lag_port = lag_port;
742 
743 	err = mlx5e_ptp_set_state(c, params);
744 	if (err)
745 		goto err_free;
746 
747 	netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
748 
749 	mlx5e_ptp_build_params(c, cparams, params);
750 
751 	err = mlx5e_ptp_open_queues(c, cparams);
752 	if (unlikely(err))
753 		goto err_napi_del;
754 
755 	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
756 		priv->rx_ptp_opened = true;
757 
758 	*cp = c;
759 
760 	kvfree(cparams);
761 
762 	return 0;
763 
764 err_napi_del:
765 	netif_napi_del(&c->napi);
766 err_free:
767 	kvfree(cparams);
768 	kvfree(c);
769 	return err;
770 }
771 
mlx5e_ptp_close(struct mlx5e_ptp * c)772 void mlx5e_ptp_close(struct mlx5e_ptp *c)
773 {
774 	mlx5e_ptp_close_queues(c);
775 	netif_napi_del(&c->napi);
776 
777 	kvfree(c);
778 }
779 
mlx5e_ptp_activate_channel(struct mlx5e_ptp * c)780 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
781 {
782 	int tc;
783 
784 	napi_enable(&c->napi);
785 
786 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
787 		for (tc = 0; tc < c->num_tc; tc++)
788 			mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
789 	}
790 	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
791 		mlx5e_ptp_rx_set_fs(c->priv);
792 		mlx5e_activate_rq(&c->rq);
793 	}
794 	mlx5e_trigger_napi_sched(&c->napi);
795 }
796 
mlx5e_ptp_deactivate_channel(struct mlx5e_ptp * c)797 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
798 {
799 	int tc;
800 
801 	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
802 		mlx5e_deactivate_rq(&c->rq);
803 
804 	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
805 		for (tc = 0; tc < c->num_tc; tc++)
806 			mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
807 	}
808 
809 	napi_disable(&c->napi);
810 }
811 
mlx5e_ptp_get_rqn(struct mlx5e_ptp * c,u32 * rqn)812 int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
813 {
814 	if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
815 		return -EINVAL;
816 
817 	*rqn = c->rq.rqn;
818 	return 0;
819 }
820 
mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering * fs,const struct mlx5e_profile * profile)821 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
822 			  const struct mlx5e_profile *profile)
823 {
824 	struct mlx5e_ptp_fs *ptp_fs;
825 
826 	if (!mlx5e_profile_feature_cap(profile, PTP_RX))
827 		return 0;
828 
829 	ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
830 	if (!ptp_fs)
831 		return -ENOMEM;
832 	mlx5e_fs_set_ptp(fs, ptp_fs);
833 
834 	return 0;
835 }
836 
mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering * fs,const struct mlx5e_profile * profile)837 void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
838 			  const struct mlx5e_profile *profile)
839 {
840 	struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
841 
842 	if (!mlx5e_profile_feature_cap(profile, PTP_RX))
843 		return;
844 
845 	mlx5e_ptp_rx_unset_fs(fs);
846 	kfree(ptp_fs);
847 }
848 
mlx5e_ptp_rx_manage_fs(struct mlx5e_priv * priv,bool set)849 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
850 {
851 	struct mlx5e_ptp *c = priv->channels.ptp;
852 
853 	if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
854 		return 0;
855 
856 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
857 		return 0;
858 
859 	if (set) {
860 		if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
861 			netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
862 			return -EINVAL;
863 		}
864 		return mlx5e_ptp_rx_set_fs(priv);
865 	}
866 	/* set == false */
867 	if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
868 		netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
869 		return -EINVAL;
870 	}
871 	mlx5e_ptp_rx_unset_fs(priv->fs);
872 	return 0;
873 }
874