1 /*
2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #ifndef __MLX5_EN_XDP_H__
33 #define __MLX5_EN_XDP_H__
34
35 #include <linux/indirect_call_wrapper.h>
36
37 #include "en.h"
38 #include "en/txrx.h"
39
40 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
41
42 #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
43 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
44 (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
45 sizeof(struct mlx5_wqe_inline_seg))
46
47 struct mlx5e_xdp_buff {
48 struct xdp_buff xdp;
49 struct mlx5_cqe64 *cqe;
50 struct mlx5e_rq *rq;
51 };
52
53 struct mlx5e_xsk_param;
54 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
55 bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
56 struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
57 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
58 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
59 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
60 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
61 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
62 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
63 u32 flags);
64
65 extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
66
67 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
68 struct mlx5e_xmit_data *xdptxd,
69 struct skb_shared_info *sinfo,
70 int check_result));
71 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
72 struct mlx5e_xmit_data *xdptxd,
73 struct skb_shared_info *sinfo,
74 int check_result));
75 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
76 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
77
mlx5e_xdp_tx_enable(struct mlx5e_priv * priv)78 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
79 {
80 set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
81
82 if (priv->channels.params.xdp_prog)
83 set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
84 }
85
mlx5e_xdp_tx_disable(struct mlx5e_priv * priv)86 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
87 {
88 if (priv->channels.params.xdp_prog)
89 clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
90
91 clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
92 /* Let other device's napi(s) and XSK wakeups see our new state. */
93 synchronize_net();
94 }
95
mlx5e_xdp_tx_is_enabled(struct mlx5e_priv * priv)96 static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
97 {
98 return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
99 }
100
mlx5e_xdp_is_active(struct mlx5e_priv * priv)101 static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
102 {
103 return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
104 }
105
mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq * sq)106 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
107 {
108 if (sq->doorbell_cseg) {
109 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
110 sq->doorbell_cseg = NULL;
111 }
112 }
113
114 /* Enable inline WQEs to shift some load from a congested HCA (HW) to
115 * a less congested cpu (SW).
116 */
mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq * sq,bool cur)117 static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
118 {
119 u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
120
121 #define MLX5E_XDP_INLINE_WATERMARK_LOW 10
122 #define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
123
124 if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
125 return false;
126
127 if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
128 return true;
129
130 return cur;
131 }
132
mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe * session,u8 max_sq_mpw_wqebbs)133 static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
134 {
135 if (session->inline_on)
136 return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
137 max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
138
139 return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
140 }
141
142 struct mlx5e_xdp_wqe_info {
143 u8 num_wqebbs;
144 u8 num_pkts;
145 };
146
147 static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq * sq,struct mlx5e_xmit_data * xdptxd,struct mlx5e_xdpsq_stats * stats)148 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
149 struct mlx5e_xmit_data *xdptxd,
150 struct mlx5e_xdpsq_stats *stats)
151 {
152 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
153 struct mlx5_wqe_data_seg *dseg =
154 (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
155 u32 dma_len = xdptxd->len;
156
157 session->pkt_count++;
158 session->bytes_count += dma_len;
159
160 if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
161 struct mlx5_wqe_inline_seg *inline_dseg =
162 (struct mlx5_wqe_inline_seg *)dseg;
163 u16 ds_len = sizeof(*inline_dseg) + dma_len;
164 u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
165
166 inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
167 memcpy(inline_dseg->data, xdptxd->data, dma_len);
168
169 session->ds_count += ds_cnt;
170 stats->inlnw++;
171 return;
172 }
173
174 dseg->addr = cpu_to_be64(xdptxd->dma_addr);
175 dseg->byte_count = cpu_to_be32(dma_len);
176 dseg->lkey = sq->mkey_be;
177 session->ds_count++;
178 }
179
180 static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo * fifo,struct mlx5e_xdp_info * xi)181 mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
182 struct mlx5e_xdp_info *xi)
183 {
184 u32 i = (*fifo->pc)++ & fifo->mask;
185
186 fifo->xi[i] = *xi;
187 }
188
189 static inline struct mlx5e_xdp_info
mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo * fifo)190 mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
191 {
192 return fifo->xi[(*fifo->cc)++ & fifo->mask];
193 }
194 #endif
195