1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #ifndef __HNS3_ENET_H
5 #define __HNS3_ENET_H
6
7 #include <linux/dim.h>
8 #include <linux/if_vlan.h>
9 #include <net/page_pool.h>
10
11 #include "hnae3.h"
12
13 enum hns3_nic_state {
14 HNS3_NIC_STATE_TESTING,
15 HNS3_NIC_STATE_RESETTING,
16 HNS3_NIC_STATE_INITED,
17 HNS3_NIC_STATE_DOWN,
18 HNS3_NIC_STATE_DISABLED,
19 HNS3_NIC_STATE_REMOVING,
20 HNS3_NIC_STATE_SERVICE_INITED,
21 HNS3_NIC_STATE_SERVICE_SCHED,
22 HNS3_NIC_STATE2_RESET_REQUESTED,
23 HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
24 HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
25 HNS3_NIC_STATE_MAX
26 };
27
28 #define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
29 #define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
30 #define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
31 #define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C
32 #define HNS3_RING_RX_RING_TAIL_REG 0x00018
33 #define HNS3_RING_RX_RING_HEAD_REG 0x0001C
34 #define HNS3_RING_RX_RING_FBDNUM_REG 0x00020
35 #define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
36
37 #define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
38 #define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
39 #define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
40 #define HNS3_RING_TX_RING_TC_REG 0x00050
41 #define HNS3_RING_TX_RING_TAIL_REG 0x00058
42 #define HNS3_RING_TX_RING_HEAD_REG 0x0005C
43 #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
44 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064
45 #define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
46 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
47 #define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
48 #define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
49 #define HNS3_RING_EN_REG 0x00090
50 #define HNS3_RING_RX_EN_REG 0x00098
51 #define HNS3_RING_TX_EN_REG 0x000D4
52
53 #define HNS3_RX_HEAD_SIZE 256
54
55 #define HNS3_TX_TIMEOUT (5 * HZ)
56 #define HNS3_RING_NAME_LEN 16
57 #define HNS3_BUFFER_SIZE_2048 2048
58 #define HNS3_RING_MAX_PENDING 32760
59 #define HNS3_RING_MIN_PENDING 72
60 #define HNS3_RING_BD_MULTIPLE 8
61 /* max frame size of mac */
62 #define HNS3_MAX_MTU(max_frm_size) \
63 ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
64
65 #define HNS3_BD_SIZE_512_TYPE 0
66 #define HNS3_BD_SIZE_1024_TYPE 1
67 #define HNS3_BD_SIZE_2048_TYPE 2
68 #define HNS3_BD_SIZE_4096_TYPE 3
69
70 #define HNS3_RX_FLAG_VLAN_PRESENT 0x1
71 #define HNS3_RX_FLAG_L3ID_IPV4 0x0
72 #define HNS3_RX_FLAG_L3ID_IPV6 0x1
73 #define HNS3_RX_FLAG_L4ID_UDP 0x0
74 #define HNS3_RX_FLAG_L4ID_TCP 0x1
75
76 #define HNS3_RXD_DMAC_S 0
77 #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
78 #define HNS3_RXD_VLAN_S 2
79 #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
80 #define HNS3_RXD_L3ID_S 4
81 #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
82 #define HNS3_RXD_L4ID_S 8
83 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
84 #define HNS3_RXD_FRAG_B 12
85 #define HNS3_RXD_STRP_TAGP_S 13
86 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
87
88 #define HNS3_RXD_L2E_B 16
89 #define HNS3_RXD_L3E_B 17
90 #define HNS3_RXD_L4E_B 18
91 #define HNS3_RXD_TRUNCAT_B 19
92 #define HNS3_RXD_HOI_B 20
93 #define HNS3_RXD_DOI_B 21
94 #define HNS3_RXD_OL3E_B 22
95 #define HNS3_RXD_OL4E_B 23
96 #define HNS3_RXD_GRO_COUNT_S 24
97 #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
98 #define HNS3_RXD_GRO_FIXID_B 30
99 #define HNS3_RXD_GRO_ECN_B 31
100
101 #define HNS3_RXD_ODMAC_S 0
102 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
103 #define HNS3_RXD_OVLAN_S 2
104 #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
105 #define HNS3_RXD_OL3ID_S 4
106 #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
107 #define HNS3_RXD_OL4ID_S 8
108 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
109 #define HNS3_RXD_FBHI_S 12
110 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
111 #define HNS3_RXD_FBLI_S 14
112 #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
113
114 #define HNS3_RXD_PTYPE_S 4
115 #define HNS3_RXD_PTYPE_M GENMASK(11, 4)
116
117 #define HNS3_RXD_BDTYPE_S 0
118 #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
119 #define HNS3_RXD_VLD_B 4
120 #define HNS3_RXD_UDP0_B 5
121 #define HNS3_RXD_EXTEND_B 7
122 #define HNS3_RXD_FE_B 8
123 #define HNS3_RXD_LUM_B 9
124 #define HNS3_RXD_CRCP_B 10
125 #define HNS3_RXD_L3L4P_B 11
126 #define HNS3_RXD_TSIDX_S 12
127 #define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S)
128 #define HNS3_RXD_TS_VLD_B 14
129 #define HNS3_RXD_LKBK_B 15
130 #define HNS3_RXD_GRO_SIZE_S 16
131 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
132
133 #define HNS3_TXD_L3T_S 0
134 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
135 #define HNS3_TXD_L4T_S 2
136 #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
137 #define HNS3_TXD_L3CS_B 4
138 #define HNS3_TXD_L4CS_B 5
139 #define HNS3_TXD_VLAN_B 6
140 #define HNS3_TXD_TSO_B 7
141
142 #define HNS3_TXD_L2LEN_S 8
143 #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
144 #define HNS3_TXD_L3LEN_S 16
145 #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
146 #define HNS3_TXD_L4LEN_S 24
147 #define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
148
149 #define HNS3_TXD_CSUM_START_S 8
150 #define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S)
151
152 #define HNS3_TXD_OL3T_S 0
153 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
154 #define HNS3_TXD_OVLAN_B 2
155 #define HNS3_TXD_MACSEC_B 3
156 #define HNS3_TXD_TUNTYPE_S 4
157 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
158
159 #define HNS3_TXD_CSUM_OFFSET_S 8
160 #define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S)
161
162 #define HNS3_TXD_BDTYPE_S 0
163 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
164 #define HNS3_TXD_FE_B 4
165 #define HNS3_TXD_SC_S 5
166 #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
167 #define HNS3_TXD_EXTEND_B 7
168 #define HNS3_TXD_VLD_B 8
169 #define HNS3_TXD_RI_B 9
170 #define HNS3_TXD_RA_B 10
171 #define HNS3_TXD_TSYN_B 11
172 #define HNS3_TXD_DECTTL_S 12
173 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
174
175 #define HNS3_TXD_OL4CS_B 22
176
177 #define HNS3_TXD_MSS_S 0
178 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
179 #define HNS3_TXD_HW_CS_B 14
180
181 #define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
182 #define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
183
184 #define HNS3_VECTOR_NOT_INITED 0
185 #define HNS3_VECTOR_INITED 1
186
187 #define HNS3_MAX_BD_SIZE 65535
188 #define HNS3_MAX_TSO_BD_NUM 63U
189 #define HNS3_MAX_TSO_SIZE 1048576U
190 #define HNS3_MAX_NON_TSO_SIZE 9728U
191
192 #define HNS3_VECTOR_GL_MASK GENMASK(11, 0)
193 #define HNS3_VECTOR_GL0_OFFSET 0x100
194 #define HNS3_VECTOR_GL1_OFFSET 0x200
195 #define HNS3_VECTOR_GL2_OFFSET 0x300
196 #define HNS3_VECTOR_RL_OFFSET 0x900
197 #define HNS3_VECTOR_RL_EN_B 6
198 #define HNS3_VECTOR_QL_MASK GENMASK(9, 0)
199 #define HNS3_VECTOR_TX_QL_OFFSET 0xe00
200 #define HNS3_VECTOR_RX_QL_OFFSET 0xf00
201
202 #define HNS3_RING_EN_B 0
203
204 #define HNS3_GL0_CQ_MODE_REG 0x20d00
205 #define HNS3_GL1_CQ_MODE_REG 0x20d04
206 #define HNS3_GL2_CQ_MODE_REG 0x20d08
207 #define HNS3_CQ_MODE_EQE 1U
208 #define HNS3_CQ_MODE_CQE 0U
209
210 enum hns3_pkt_l2t_type {
211 HNS3_L2_TYPE_UNICAST,
212 HNS3_L2_TYPE_MULTICAST,
213 HNS3_L2_TYPE_BROADCAST,
214 HNS3_L2_TYPE_INVALID,
215 };
216
217 enum hns3_pkt_l3t_type {
218 HNS3_L3T_NONE,
219 HNS3_L3T_IPV6,
220 HNS3_L3T_IPV4,
221 HNS3_L3T_RESERVED
222 };
223
224 enum hns3_pkt_l4t_type {
225 HNS3_L4T_UNKNOWN,
226 HNS3_L4T_TCP,
227 HNS3_L4T_UDP,
228 HNS3_L4T_SCTP
229 };
230
231 enum hns3_pkt_ol3t_type {
232 HNS3_OL3T_NONE,
233 HNS3_OL3T_IPV6,
234 HNS3_OL3T_IPV4_NO_CSUM,
235 HNS3_OL3T_IPV4_CSUM
236 };
237
238 enum hns3_pkt_tun_type {
239 HNS3_TUN_NONE,
240 HNS3_TUN_MAC_IN_UDP,
241 HNS3_TUN_NVGRE,
242 HNS3_TUN_OTHER
243 };
244
245 /* hardware spec ring buffer format */
246 struct __packed hns3_desc {
247 union {
248 __le64 addr;
249 __le16 csum;
250 struct {
251 __le32 ts_nsec;
252 __le32 ts_sec;
253 };
254 };
255 union {
256 struct {
257 __le16 vlan_tag;
258 __le16 send_size;
259 union {
260 __le32 type_cs_vlan_tso_len;
261 struct {
262 __u8 type_cs_vlan_tso;
263 __u8 l2_len;
264 __u8 l3_len;
265 __u8 l4_len;
266 };
267 };
268 __le16 outer_vlan_tag;
269 __le16 tv;
270
271 union {
272 __le32 ol_type_vlan_len_msec;
273 struct {
274 __u8 ol_type_vlan_msec;
275 __u8 ol2_len;
276 __u8 ol3_len;
277 __u8 ol4_len;
278 };
279 };
280
281 __le32 paylen_ol4cs;
282 __le16 bdtp_fe_sc_vld_ra_ri;
283 __le16 mss_hw_csum;
284 } tx;
285
286 struct {
287 __le32 l234_info;
288 __le16 pkt_len;
289 __le16 size;
290
291 __le32 rss_hash;
292 __le16 fd_id;
293 __le16 vlan_tag;
294
295 union {
296 __le32 ol_info;
297 struct {
298 __le16 o_dm_vlan_id_fb;
299 __le16 ot_vlan_tag;
300 };
301 };
302
303 __le32 bd_base_info;
304 } rx;
305 };
306 };
307
308 enum hns3_desc_type {
309 DESC_TYPE_UNKNOWN = 0,
310 DESC_TYPE_SKB = 1 << 0,
311 DESC_TYPE_FRAGLIST_SKB = 1 << 1,
312 DESC_TYPE_PAGE = 1 << 2,
313 DESC_TYPE_BOUNCE_ALL = 1 << 3,
314 DESC_TYPE_BOUNCE_HEAD = 1 << 4,
315 DESC_TYPE_SGL_SKB = 1 << 5,
316 DESC_TYPE_PP_FRAG = 1 << 6,
317 };
318
319 struct hns3_desc_cb {
320 dma_addr_t dma; /* dma address of this desc */
321 void *buf; /* cpu addr for a desc */
322
323 /* priv data for the desc, e.g. skb when use with ip stack */
324 void *priv;
325
326 union {
327 u32 page_offset; /* for rx */
328 u32 send_bytes; /* for tx */
329 };
330
331 u32 length; /* length of the buffer */
332
333 u16 reuse_flag;
334 u16 refill;
335
336 /* desc type, used by the ring user to mark the type of the priv data */
337 u16 type;
338 u16 pagecnt_bias;
339 };
340
341 enum hns3_pkt_l3type {
342 HNS3_L3_TYPE_IPV4,
343 HNS3_L3_TYPE_IPV6,
344 HNS3_L3_TYPE_ARP,
345 HNS3_L3_TYPE_RARP,
346 HNS3_L3_TYPE_IPV4_OPT,
347 HNS3_L3_TYPE_IPV6_EXT,
348 HNS3_L3_TYPE_LLDP,
349 HNS3_L3_TYPE_BPDU,
350 HNS3_L3_TYPE_MAC_PAUSE,
351 HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
352
353 /* reserved for 0xA~0xB */
354
355 HNS3_L3_TYPE_CNM = 0xc,
356
357 /* reserved for 0xD~0xE */
358
359 HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
360 };
361
362 enum hns3_pkt_l4type {
363 HNS3_L4_TYPE_UDP,
364 HNS3_L4_TYPE_TCP,
365 HNS3_L4_TYPE_GRE,
366 HNS3_L4_TYPE_SCTP,
367 HNS3_L4_TYPE_IGMP,
368 HNS3_L4_TYPE_ICMP,
369
370 /* reserved for 0x6~0xE */
371
372 HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */
373 };
374
375 enum hns3_pkt_ol3type {
376 HNS3_OL3_TYPE_IPV4 = 0,
377 HNS3_OL3_TYPE_IPV6,
378 /* reserved for 0x2~0x3 */
379 HNS3_OL3_TYPE_IPV4_OPT = 4,
380 HNS3_OL3_TYPE_IPV6_EXT,
381
382 /* reserved for 0x6~0xE */
383
384 HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
385 };
386
387 enum hns3_pkt_ol4type {
388 HNS3_OL4_TYPE_NO_TUN,
389 HNS3_OL4_TYPE_MAC_IN_UDP,
390 HNS3_OL4_TYPE_NVGRE,
391 HNS3_OL4_TYPE_UNKNOWN
392 };
393
394 struct hns3_rx_ptype {
395 u32 ptype : 8;
396 u32 csum_level : 2;
397 u32 ip_summed : 2;
398 u32 l3_type : 4;
399 u32 valid : 1;
400 };
401
402 struct ring_stats {
403 u64 sw_err_cnt;
404 u64 seg_pkt_cnt;
405 union {
406 struct {
407 u64 tx_pkts;
408 u64 tx_bytes;
409 u64 tx_more;
410 u64 restart_queue;
411 u64 tx_busy;
412 u64 tx_copy;
413 u64 tx_vlan_err;
414 u64 tx_l4_proto_err;
415 u64 tx_l2l3l4_err;
416 u64 tx_tso_err;
417 u64 over_max_recursion;
418 u64 hw_limitation;
419 u64 tx_bounce;
420 u64 tx_spare_full;
421 u64 copy_bits_err;
422 u64 tx_sgl;
423 u64 skb2sgl_err;
424 u64 map_sg_err;
425 };
426 struct {
427 u64 rx_pkts;
428 u64 rx_bytes;
429 u64 rx_err_cnt;
430 u64 reuse_pg_cnt;
431 u64 err_pkt_len;
432 u64 err_bd_num;
433 u64 l2_err;
434 u64 l3l4_csum_err;
435 u64 csum_complete;
436 u64 rx_multicast;
437 u64 non_reuse_pg;
438 u64 frag_alloc_err;
439 u64 frag_alloc;
440 };
441 __le16 csum;
442 };
443 };
444
445 struct hns3_tx_spare {
446 dma_addr_t dma;
447 void *buf;
448 u32 next_to_use;
449 u32 next_to_clean;
450 u32 last_to_clean;
451 u32 len;
452 };
453
454 struct hns3_enet_ring {
455 struct hns3_desc *desc; /* dma map address space */
456 struct hns3_desc_cb *desc_cb;
457 struct hns3_enet_ring *next;
458 struct hns3_enet_tqp_vector *tqp_vector;
459 struct hnae3_queue *tqp;
460 int queue_index;
461 struct device *dev; /* will be used for DMA mapping of descriptors */
462 struct page_pool *page_pool;
463
464 /* statistic */
465 struct ring_stats stats;
466 struct u64_stats_sync syncp;
467
468 dma_addr_t desc_dma_addr;
469 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
470 u16 desc_num; /* total number of desc */
471 int next_to_use; /* idx of next spare desc */
472
473 /* idx of lastest sent desc, the ring is empty when equal to
474 * next_to_use
475 */
476 int next_to_clean;
477 u32 flag; /* ring attribute */
478
479 int pending_buf;
480 union {
481 /* for Tx ring */
482 struct {
483 u32 fd_qb_tx_sample;
484 int last_to_use; /* last idx used by xmit */
485 u32 tx_copybreak;
486 struct hns3_tx_spare *tx_spare;
487 };
488
489 /* for Rx ring */
490 struct {
491 u32 pull_len; /* memcpy len for current rx packet */
492 u32 rx_copybreak;
493 u32 frag_num;
494 /* first buffer address for current packet */
495 unsigned char *va;
496 struct sk_buff *skb;
497 struct sk_buff *tail_skb;
498 };
499 };
500 } ____cacheline_internodealigned_in_smp;
501
502 enum hns3_flow_level_range {
503 HNS3_FLOW_LOW = 0,
504 HNS3_FLOW_MID = 1,
505 HNS3_FLOW_HIGH = 2,
506 HNS3_FLOW_ULTRA = 3,
507 };
508
509 #define HNS3_INT_GL_50K 0x0014
510 #define HNS3_INT_GL_20K 0x0032
511 #define HNS3_INT_GL_18K 0x0036
512 #define HNS3_INT_GL_8K 0x007C
513
514 #define HNS3_INT_GL_1US BIT(31)
515
516 #define HNS3_INT_RL_MAX 0x00EC
517 #define HNS3_INT_RL_ENABLE_MASK 0x40
518
519 #define HNS3_INT_QL_DEFAULT_CFG 0x20
520
521 struct hns3_enet_coalesce {
522 u16 int_gl;
523 u16 int_ql;
524 u16 int_ql_max;
525 u8 adapt_enable : 1;
526 u8 ql_enable : 1;
527 u8 unit_1us : 1;
528 enum hns3_flow_level_range flow_level;
529 };
530
531 struct hns3_enet_ring_group {
532 /* array of pointers to rings */
533 struct hns3_enet_ring *ring;
534 u64 total_bytes; /* total bytes processed this group */
535 u64 total_packets; /* total packets processed this group */
536 u16 count;
537 struct hns3_enet_coalesce coal;
538 struct dim dim;
539 };
540
541 struct hns3_enet_tqp_vector {
542 struct hnae3_handle *handle;
543 u8 __iomem *mask_addr;
544 int vector_irq;
545 int irq_init_flag;
546
547 u16 idx; /* index in the TQP vector array per handle. */
548
549 struct napi_struct napi;
550
551 struct hns3_enet_ring_group rx_group;
552 struct hns3_enet_ring_group tx_group;
553
554 cpumask_t affinity_mask;
555 u16 num_tqps; /* total number of tqps in TQP vector */
556 struct irq_affinity_notify affinity_notify;
557
558 char name[HNAE3_INT_NAME_LEN];
559
560 u64 event_cnt;
561 } ____cacheline_internodealigned_in_smp;
562
563 struct hns3_nic_priv {
564 struct hnae3_handle *ae_handle;
565 struct net_device *netdev;
566 struct device *dev;
567
568 /**
569 * the cb for nic to manage the ring buffer, the first half of the
570 * array is for tx_ring and vice versa for the second half
571 */
572 struct hns3_enet_ring *ring;
573 struct hns3_enet_tqp_vector *tqp_vector;
574 u16 vector_num;
575 u8 max_non_tso_bd_num;
576
577 u64 tx_timeout_count;
578
579 unsigned long state;
580
581 enum dim_cq_period_mode tx_cqe_mode;
582 enum dim_cq_period_mode rx_cqe_mode;
583 struct hns3_enet_coalesce tx_coal;
584 struct hns3_enet_coalesce rx_coal;
585 u32 tx_copybreak;
586 u32 rx_copybreak;
587 };
588
589 union l3_hdr_info {
590 struct iphdr *v4;
591 struct ipv6hdr *v6;
592 unsigned char *hdr;
593 };
594
595 union l4_hdr_info {
596 struct tcphdr *tcp;
597 struct udphdr *udp;
598 struct gre_base_hdr *gre;
599 unsigned char *hdr;
600 };
601
602 struct hns3_hw_error_info {
603 enum hnae3_hw_error_type type;
604 const char *msg;
605 };
606
607 struct hns3_reset_type_map {
608 enum ethtool_reset_flags rst_flags;
609 enum hnae3_reset_type rst_type;
610 };
611
ring_space(struct hns3_enet_ring * ring)612 static inline int ring_space(struct hns3_enet_ring *ring)
613 {
614 /* This smp_load_acquire() pairs with smp_store_release() in
615 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
616 */
617 int begin = smp_load_acquire(&ring->next_to_clean);
618 int end = READ_ONCE(ring->next_to_use);
619
620 return ((end >= begin) ? (ring->desc_num - end + begin) :
621 (begin - end)) - 1;
622 }
623
hns3_read_reg(void __iomem * base,u32 reg)624 static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
625 {
626 return readl(base + reg);
627 }
628
hns3_write_reg(void __iomem * base,u32 reg,u32 value)629 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
630 {
631 u8 __iomem *reg_addr = READ_ONCE(base);
632
633 writel(value, reg_addr + reg);
634 }
635
636 #define hns3_read_dev(a, reg) \
637 hns3_read_reg((a)->io_base, reg)
638
hns3_nic_resetting(struct net_device * netdev)639 static inline bool hns3_nic_resetting(struct net_device *netdev)
640 {
641 struct hns3_nic_priv *priv = netdev_priv(netdev);
642
643 return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
644 }
645
646 #define hns3_write_dev(a, reg, value) \
647 hns3_write_reg((a)->io_base, reg, value)
648
649 #define ring_to_dev(ring) ((ring)->dev)
650
651 #define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
652
653 #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
654 DMA_TO_DEVICE : DMA_FROM_DEVICE)
655
656 #define hns3_buf_size(_ring) ((_ring)->buf_size)
657
hns3_page_order(struct hns3_enet_ring * ring)658 static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
659 {
660 #if (PAGE_SIZE < 8192)
661 if (ring->buf_size > (PAGE_SIZE / 2))
662 return 1;
663 #endif
664 return 0;
665 }
666
667 #define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
668
669 /* iterator for handling rings in ring group */
670 #define hns3_for_each_ring(pos, head) \
671 for (pos = (head).ring; (pos); pos = (pos)->next)
672
673 #define hns3_get_handle(ndev) \
674 (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
675
676 #define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
677 #define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
678
679 #define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
680 #define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
681
682 void hns3_ethtool_set_ops(struct net_device *netdev);
683 int hns3_set_channels(struct net_device *netdev,
684 struct ethtool_channels *ch);
685
686 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
687 int hns3_init_all_ring(struct hns3_nic_priv *priv);
688 int hns3_nic_reset_all_ring(struct hnae3_handle *h);
689 void hns3_fini_ring(struct hns3_enet_ring *ring);
690 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
691 bool hns3_is_phys_func(struct pci_dev *pdev);
692 int hns3_clean_rx_ring(
693 struct hns3_enet_ring *ring, int budget,
694 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
695
696 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
697 u32 gl_value);
698 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
699 u32 gl_value);
700 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
701 u32 rl_value);
702 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
703 u32 ql_value);
704 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
705 u32 ql_value);
706
707 void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
708
709 #ifdef CONFIG_HNS3_DCB
710 void hns3_dcbnl_setup(struct hnae3_handle *handle);
711 #else
hns3_dcbnl_setup(struct hnae3_handle * handle)712 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
713 #endif
714
715 int hns3_dbg_init(struct hnae3_handle *handle);
716 void hns3_dbg_uninit(struct hnae3_handle *handle);
717 void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
718 void hns3_dbg_unregister_debugfs(void);
719 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
720 u16 hns3_get_max_available_channels(struct hnae3_handle *h);
721 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
722 enum dim_cq_period_mode tx_mode,
723 enum dim_cq_period_mode rx_mode);
724 #endif
725