/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
D | tls.c | 72 struct mlx5_fpga_conn *conn = fdev->tls->conn; in mlx5_fpga_tls_cmd_complete() 74 struct mlx5_fpga_tls *tls = fdev->tls; in mlx5_fpga_tls_cmd_complete() local 77 spin_lock_irqsave(&tls->pending_cmds_lock, flags); in mlx5_fpga_tls_cmd_complete() 78 ctx = list_first_entry(&tls->pending_cmds, in mlx5_fpga_tls_cmd_complete() 81 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); in mlx5_fpga_tls_cmd_complete() 103 struct mlx5_fpga_tls *tls = fdev->tls; in mlx5_fpga_tls_cmd_send() local 111 spin_lock_irqsave(&tls->pending_cmds_lock, flags); in mlx5_fpga_tls_cmd_send() 116 ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf); in mlx5_fpga_tls_cmd_send() 118 list_add_tail(&cmd->list, &tls->pending_cmds); in mlx5_fpga_tls_cmd_send() 120 complete(tls->conn, fdev, cmd, NULL); in mlx5_fpga_tls_cmd_send() [all …]
|
D | tls.h | 68 return mdev->fpga->tls->caps; in mlx5_fpga_tls_device_caps()
|
D | core.h | 62 struct mlx5_fpga_tls *tls; member
|
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | tls.c | 179 atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); in mlx5e_tls_resync() 225 struct mlx5e_tls *tls; in mlx5e_tls_init() local 230 tls = kzalloc(sizeof(*tls), GFP_KERNEL); in mlx5e_tls_init() 231 if (!tls) in mlx5e_tls_init() 234 priv->tls = tls; in mlx5e_tls_init() 240 struct mlx5e_tls *tls = priv->tls; in mlx5e_tls_cleanup() local 242 if (!tls) in mlx5e_tls_cleanup() 245 kfree(tls); in mlx5e_tls_cleanup() 246 priv->tls = NULL; in mlx5e_tls_cleanup()
|
D | tls_rxtx.c | 189 struct mlx5e_tls *tls) in mlx5e_tls_handle_ooo() argument 205 atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data); in mlx5e_tls_handle_ooo() 219 atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); in mlx5e_tls_handle_ooo() 224 atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata); in mlx5e_tls_handle_ooo() 232 atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc); in mlx5e_tls_handle_ooo() 285 return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls); in mlx5e_tls_handle_tx_skb() 288 atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); in mlx5e_tls_handle_tx_skb() 334 atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); in tls_update_resync_sn() 363 atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); in mlx5e_tls_handle_rx_skb_metadata() 368 atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); in mlx5e_tls_handle_rx_skb_metadata()
|
D | ktls.c | 98 priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); in mlx5e_ktls_init_rx() 99 if (!priv->tls->rx_wq) in mlx5e_ktls_init_rx() 105 destroy_workqueue(priv->tls->rx_wq); in mlx5e_ktls_init_rx() 121 destroy_workqueue(priv->tls->rx_wq); in mlx5e_ktls_cleanup_rx()
|
D | tls_stats.c | 60 if (!priv->tls) in get_tls_atomic_stats() 69 if (!priv->tls) in mlx5e_tls_get_count() 101 MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, in mlx5e_tls_get_stats()
|
D | en_accel.h | 110 struct mlx5e_accel_tx_tls_state tls; member 128 if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) in mlx5e_accel_tx_begin() 186 mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); in mlx5e_accel_tx_finish()
|
D | tls.h | 96 return priv->tls; in mlx5e_is_tls_on()
|
D | ktls_rx.c | 453 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) in resync_queue_get_psv() 569 queue_work(rule->priv->tls->rx_wq, &rule->work); in mlx5e_ktls_handle_ctx_completion() 614 priv_rx->sw_stats = &priv->tls->sw_stats; in mlx5e_ktls_add_rx()
|
D | ktls_tx.c | 106 priv_tx->sw_stats = &priv->tls->sw_stats; in mlx5e_ktls_add_tx()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | Makefile | 80 mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o 81 mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o 88 mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
|
D | en_stats.c | 1733 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) in MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS() argument 1738 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) in MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS() argument 1743 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) in MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS() argument 1748 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } in MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS() argument 2240 static MLX5E_DEFINE_STATS_GRP(tls, 0); 2262 &MLX5E_STATS_GRP(tls),
|
D | en_tx.c | 236 } else if (unlikely(accel && accel->tls.tls_tisn)) { in mlx5e_txwqe_build_eseg_csum() 338 if (accel && accel->tls.tls_tisn) in mlx5e_tx_wqe_inline_mode()
|
D | en.h | 884 struct mlx5e_tls *tls; member
|
/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
D | chtls_cm.h | 101 #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld) 102 #define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
|
D | chtls_io.c | 57 ULP_SKB_CB(skb)->ulp.tls.iv = 1; in set_ivs_imm() 60 ULP_SKB_CB(skb)->ulp.tls.iv = 0; in set_ivs_imm() 420 data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type); in tls_tx_data_wr() 862 ULP_SKB_CB(skb)->ulp.tls.ofld = 1; in get_record_skb() 863 ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type; in get_record_skb()
|
D | chtls.h | 433 } tls; member
|
/drivers/net/ethernet/netronome/nfp/ |
D | Makefile | 41 crypto/tls.o
|
/drivers/net/ethernet/chelsio/inline_crypto/ |
D | Kconfig | 47 This flag enables support for kernel tls offload over Chelsio T6
|