| /drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| A D | ktls.c | 155 if (!priv->tls->rx_wq) in mlx5e_ktls_init_rx() 177 destroy_workqueue(priv->tls->rx_wq); in mlx5e_ktls_cleanup_rx() 191 struct mlx5e_tls *tls; in mlx5e_ktls_init() local 196 tls = kzalloc(sizeof(*tls), GFP_KERNEL); in mlx5e_ktls_init() 197 if (!tls) in mlx5e_ktls_init() 199 tls->mdev = priv->mdev; in mlx5e_ktls_init() 201 priv->tls = tls; in mlx5e_ktls_init() 210 struct mlx5e_tls *tls = priv->tls; in mlx5e_ktls_cleanup() local 216 tls->debugfs.dfs = NULL; in mlx5e_ktls_cleanup() 218 kfree(priv->tls); in mlx5e_ktls_cleanup() [all …]
|
| A D | ktls_tx.c | 466 pool = priv->tls->tx_pool; in mlx5e_ktls_add_tx() 518 pool = priv->tls->tx_pool; in mlx5e_ktls_del_tx() 906 &tls->tx_pool->size); in mlx5e_tls_tx_debugfs_init() 912 struct mlx5e_tls *tls = priv->tls; in mlx5e_ktls_init_tx() local 924 tls->dek_pool = dek_pool; in mlx5e_ktls_init_tx() 929 priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats); in mlx5e_ktls_init_tx() 930 if (!priv->tls->tx_pool) { in mlx5e_ktls_init_tx() 935 mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs); in mlx5e_ktls_init_tx() 950 priv->tls->debugfs.dfs_tx = NULL; in mlx5e_ktls_cleanup_tx() 952 mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool); in mlx5e_ktls_cleanup_tx() [all …]
|
| A D | ktls_stats.c | 55 if (!priv->tls) in mlx5e_ktls_get_count() 65 if (!priv->tls) in mlx5e_ktls_get_strings() 78 if (!priv->tls) in mlx5e_ktls_get_stats() 86 MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, in mlx5e_ktls_get_stats()
|
| A D | en_accel.h | 117 struct mlx5e_accel_tx_tls_state tls; member 136 &state->tls))) in mlx5e_accel_tx_begin() 199 mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls); in mlx5e_accel_tx_finish()
|
| A D | ktls_rx.c | 475 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) in resync_queue_get_psv() 592 queue_work(rule->priv->tls->rx_wq, &rule->work); in mlx5e_ktls_handle_ctx_completion() 638 dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); in mlx5e_ktls_add_rx() 653 priv_rx->sw_stats = &priv->tls->sw_stats; in mlx5e_ktls_add_rx() 679 mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek); in mlx5e_ktls_add_rx() 711 mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek); in mlx5e_ktls_del_rx()
|
| /drivers/thunderbolt/ |
| A D | xdomain.c | 505 u8 *sls, u8 *tls) in tb_xdp_link_state_status_request() argument 532 *tls = res.tls; in tb_xdp_link_state_status_request() 558 res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK; in tb_xdp_link_state_status_response() 567 u8 sequence, u8 tlw, u8 tls) in tb_xdp_link_state_change_request() argument 577 req.tls = tls; in tb_xdp_link_state_change_request() 1251 u8 slw, tlw, sls, tls; in tb_xdomain_get_link_status() local 1259 &tls); in tb_xdomain_get_link_status() 1285 u8 tlw, tls; in tb_xdomain_link_state_change() local 1300 tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK; in tb_xdomain_link_state_change() 1303 tlw, tls); in tb_xdomain_link_state_change() [all …]
|
| A D | tb_msgs.h | 565 u8 tls; member 573 u8 tls; member
|
| /drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
| A D | chtls_cm.h | 101 #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld) 102 #define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
|
| A D | chtls_io.c | 57 ULP_SKB_CB(skb)->ulp.tls.iv = 1; in set_ivs_imm() 60 ULP_SKB_CB(skb)->ulp.tls.iv = 0; in set_ivs_imm() 420 data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type); in tls_tx_data_wr() 862 ULP_SKB_CB(skb)->ulp.tls.ofld = 1; in get_record_skb() 863 ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type; in get_record_skb()
|
| A D | chtls.h | 433 } tls; member
|
| /drivers/net/ethernet/chelsio/inline_crypto/ |
| A D | Kconfig | 47 This flag enables support for kernel tls offload over Chelsio T6
|
| /drivers/net/ethernet/netronome/nfp/ |
| A D | Makefile | 49 crypto/tls.o
|
| /drivers/nvme/host/ |
| A D | fabrics.h | 134 bool tls; member
|
| A D | fabrics.c | 737 opts->tls = false; in nvmf_parse_options() 1057 opts->tls = true; in nvmf_parse_options() 1094 if (opts->tls) { in nvmf_parse_options()
|
| A D | sysfs.c | 838 !ctrl->opts->tls && !ctrl->opts->concat) in nvme_tls_attrs_are_visible()
|
| A D | tcp.c | 251 return ctrl->opts->tls || ctrl->opts->concat; in nvme_tcp_tls_configured() 2071 else if (ctrl->opts->tls) { in nvme_tcp_alloc_admin_queue()
|
| /drivers/net/ethernet/fungible/funeth/ |
| A D | funeth_tx.c | 280 struct fun_eth_tls *tls = (struct fun_eth_tls *)gle; in write_pkt_desc() local 287 tls->tlsid = tls_ctx->tlsid; in write_pkt_desc()
|
| /drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | en_stats.c | 2066 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) in MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS() argument 2071 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) in MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS() argument 2076 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) in MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS() argument 2081 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } in MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS() argument 2585 static MLX5E_DEFINE_STATS_GRP(tls, 0); 2607 &MLX5E_STATS_GRP(tls),
|
| A D | en_tx.c | 137 } else if (unlikely(accel && accel->tls.tls_tisn)) { in mlx5e_txwqe_build_eseg_csum() 253 if (accel && accel->tls.tls_tisn) in mlx5e_tx_wqe_inline_mode()
|
| A D | en.h | 940 struct mlx5e_tls *tls; member
|