1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #ifndef _ICE_XSK_H_
5 #define _ICE_XSK_H_
6 #include "ice_txrx.h"
7
8 #define PKTS_PER_BATCH 8
9
10 #ifdef __clang__
11 #define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
12 #elif __GNUC__ >= 8
13 #define loop_unrolled_for _Pragma("GCC unroll 8") for
14 #else
15 #define loop_unrolled_for for
16 #endif
17
18 struct ice_vsi;
19
20 #ifdef CONFIG_XDP_SOCKETS
21 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
22 u16 qid);
23 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
24 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
25 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
26 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
27 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
28 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
29 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
30 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
31 #else
ice_xmit_zc(struct ice_tx_ring __always_unused * xdp_ring)32 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
33 {
34 return false;
35 }
36
37 static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused * vsi,struct xsk_buff_pool __always_unused * pool,u16 __always_unused qid)38 ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
39 struct xsk_buff_pool __always_unused *pool,
40 u16 __always_unused qid)
41 {
42 return -EOPNOTSUPP;
43 }
44
45 static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused * rx_ring,int __always_unused budget)46 ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
47 int __always_unused budget)
48 {
49 return 0;
50 }
51
52 static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused * rx_ring,u16 __always_unused count)53 ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
54 u16 __always_unused count)
55 {
56 return false;
57 }
58
ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused * vsi)59 static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
60 {
61 return false;
62 }
63
64 static inline int
ice_xsk_wakeup(struct net_device __always_unused * netdev,u32 __always_unused queue_id,u32 __always_unused flags)65 ice_xsk_wakeup(struct net_device __always_unused *netdev,
66 u32 __always_unused queue_id, u32 __always_unused flags)
67 {
68 return -EOPNOTSUPP;
69 }
70
ice_xsk_clean_rx_ring(struct ice_rx_ring * rx_ring)71 static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
ice_xsk_clean_xdp_ring(struct ice_tx_ring * xdp_ring)72 static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
73
74 static inline int
ice_realloc_zc_buf(struct ice_vsi __always_unused * vsi,bool __always_unused zc)75 ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
76 bool __always_unused zc)
77 {
78 return 0;
79 }
80 #endif /* CONFIG_XDP_SOCKETS */
81 #endif /* !_ICE_XSK_H_ */
82