1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #ifndef __MLX5_EN_ACCEL_MACSEC_H__
5 #define __MLX5_EN_ACCEL_MACSEC_H__
6 
7 #ifdef CONFIG_MLX5_EN_MACSEC
8 
9 #include <linux/mlx5/driver.h>
10 #include <net/macsec.h>
11 #include <net/dst_metadata.h>
12 
13 /* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
14 #define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
15 #define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
16 #define MLX5_MACSEC_METADATA_MARKER(metadata)  ((((metadata) >> 30) & 0x3)  == 0x1)
17 #define MLX5_MACSEC_RX_METADAT_HANDLE(metadata)  ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
18 
19 struct mlx5e_priv;
20 struct mlx5e_macsec;
21 
22 struct mlx5e_macsec_stats {
23 	u64 macsec_rx_pkts;
24 	u64 macsec_rx_bytes;
25 	u64 macsec_rx_pkts_drop;
26 	u64 macsec_rx_bytes_drop;
27 	u64 macsec_tx_pkts;
28 	u64 macsec_tx_bytes;
29 	u64 macsec_tx_pkts_drop;
30 	u64 macsec_tx_bytes_drop;
31 };
32 
33 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
34 int mlx5e_macsec_init(struct mlx5e_priv *priv);
35 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
36 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb);
37 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
38 				struct sk_buff *skb,
39 				struct mlx5_wqe_eth_seg *eseg);
40 
mlx5e_macsec_skb_is_offload(struct sk_buff * skb)41 static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb)
42 {
43 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
44 
45 	return md_dst && (md_dst->type == METADATA_MACSEC);
46 }
47 
mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 * cqe)48 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
49 {
50 	return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
51 }
52 
53 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
54 					struct mlx5_cqe64 *cqe);
55 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
56 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
57 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
58 
59 #else
60 
mlx5e_macsec_build_netdev(struct mlx5e_priv * priv)61 static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {}
mlx5e_macsec_init(struct mlx5e_priv * priv)62 static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_macsec_cleanup(struct mlx5e_priv * priv)63 static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {}
mlx5e_macsec_skb_is_offload(struct sk_buff * skb)64 static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; }
mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 * cqe)65 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
mlx5e_macsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)66 static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
67 						      struct sk_buff *skb,
68 						      struct mlx5_cqe64 *cqe)
69 {}
mlx5e_is_macsec_device(const struct mlx5_core_dev * mdev)70 static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
71 #endif  /* CONFIG_MLX5_EN_MACSEC */
72 
73 #endif	/* __MLX5_ACCEL_EN_MACSEC_H__ */
74