Home
last modified time | relevance | path

Searched refs:flow (Results 1 – 25 of 212) sorted by relevance

123456789

/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dflowring.c43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) in brcmf_flowring_is_tdls_mac() argument
47 search = flow->tdls_entry; in brcmf_flowring_is_tdls_mac()
59 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_lookup() argument
71 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_lookup()
77 if ((sta) && (flow->tdls_active) && in brcmf_flowring_lookup()
78 (brcmf_flowring_is_tdls_mac(flow, da))) { in brcmf_flowring_lookup()
85 hash = flow->hash; in brcmf_flowring_lookup()
103 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_create() argument
116 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_create()
122 if ((sta) && (flow->tdls_active) && in brcmf_flowring_create()
[all …]
Dflowring.h50 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
52 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
54 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
55 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
56 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
57 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
59 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
60 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
62 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
63 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
[all …]
/drivers/gpu/ipu-v3/
Dipu-dp.c46 u32 flow; member
64 struct ipu_flow flow[IPUV3_NUM_FLOWS]; member
82 struct ipu_flow *flow = to_flow(dp); in ipu_dp_set_global_alpha() local
83 struct ipu_dp_priv *priv = flow->priv; in ipu_dp_set_global_alpha()
88 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
93 writel(reg, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
96 reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL; in ipu_dp_set_global_alpha()
98 flow->base + DP_GRAPH_WIND_CTRL); in ipu_dp_set_global_alpha()
100 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
101 writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
[all …]
/drivers/net/phy/mscc/
Dmscc_macsec.c371 struct macsec_flow *flow) in vsc8584_macsec_flow() argument
374 enum macsec_bank bank = flow->bank; in vsc8584_macsec_flow()
375 u32 val, match = 0, mask = 0, action = 0, idx = flow->index; in vsc8584_macsec_flow()
377 if (flow->match.tagged) in vsc8584_macsec_flow()
379 if (flow->match.untagged) in vsc8584_macsec_flow()
382 if (bank == MACSEC_INGR && flow->assoc_num >= 0) { in vsc8584_macsec_flow()
383 match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num); in vsc8584_macsec_flow()
387 if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) { in vsc8584_macsec_flow()
388 u64 sci = (__force u64)flow->rx_sa->sc->sci; in vsc8584_macsec_flow()
400 if (flow->match.etype) { in vsc8584_macsec_flow()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dtc_priv.h130 struct mlx5e_tc_flow *flow,
135 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
137 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
138 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
140 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
141 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
142 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
143 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
146 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) in __flow_flag_set() argument
150 set_bit(flag, &flow->flags); in __flow_flag_set()
[all …]
Dtc_tun_encap.c74 int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, in mlx5e_tc_set_attr_rx_tun() argument
77 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; in mlx5e_tc_set_attr_rx_tun()
117 flow_flag_set(flow, TUN_RX); in mlx5e_tc_set_attr_rx_tun()
118 flow->attr->tun_ip_version = ip_version; in mlx5e_tc_set_attr_rx_tun()
153 struct mlx5e_tc_flow *flow; in mlx5e_tc_encap_flows_add() local
174 list_for_each_entry(flow, flow_list, tmp_list) { in mlx5e_tc_encap_flows_add()
175 if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW)) in mlx5e_tc_encap_flows_add()
178 spec = &flow->attr->parse_attr->spec; in mlx5e_tc_encap_flows_add()
180 attr = mlx5e_tc_get_encap_attr(flow); in mlx5e_tc_encap_flows_add()
182 esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat; in mlx5e_tc_encap_flows_add()
[all …]
Dtc_tun_encap.h10 struct mlx5e_tc_flow *flow,
15 struct mlx5e_tc_flow *flow,
23 struct mlx5e_tc_flow *flow,
26 struct mlx5e_tc_flow *flow);
29 struct mlx5e_tc_flow *flow);
31 struct mlx5e_tc_flow *flow);
34 struct mlx5e_tc_flow *flow,
39 struct mlx5e_tc_flow *flow,
44 int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_tc.c370 struct bnxt_tc_flow *flow) in bnxt_tc_parse_flow() argument
387 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow()
388 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow()
392 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow()
393 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow()
401 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow()
402 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow()
403 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow()
404 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow()
405 ether_addr_copy(flow->l2_mask.smac, match.mask->src); in bnxt_tc_parse_flow()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Den_tc.c178 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
179 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
180 static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
603 struct mlx5e_tc_flow *flow);
605 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) in mlx5e_flow_get() argument
607 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) in mlx5e_flow_get()
609 return flow; in mlx5e_flow_get()
612 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) in mlx5e_flow_put() argument
614 if (refcount_dec_and_test(&flow->refcnt)) { in mlx5e_flow_put()
615 mlx5e_tc_del_flow(priv, flow); in mlx5e_flow_put()
[all …]
/drivers/infiniband/hw/hfi1/
Dtid_rdma.c134 struct tid_rdma_flow *flow,
881 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_4k() argument
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); in tid_rdma_find_phys_blocks_4k()
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, in tid_rdma_find_phys_blocks_4k()
935 trace_hfi1_tid_pageset(flow->req->qp, setcount, in tid_rdma_find_phys_blocks_4k()
1020 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_8k() argument
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); in tid_rdma_find_phys_blocks_8k()
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); in tid_rdma_find_phys_blocks_8k()
1087 static u32 kern_find_pages(struct tid_rdma_flow *flow, in kern_find_pages() argument
1091 struct tid_rdma_request *req = flow->req; in kern_find_pages()
[all …]
Dtrace_tid.h416 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
418 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
450 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
452 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
491 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
492 TP_ARGS(qp, index, flow),
517 __entry->idx = flow->idx;
518 __entry->resp_ib_psn = flow->flow_state.resp_ib_psn;
519 __entry->generation = flow->flow_state.generation;
520 __entry->fspsn = full_flow_psn(flow,
[all …]
/drivers/infiniband/hw/usnic/
Dusnic_fwd.c203 struct usnic_fwd_flow *flow; in usnic_fwd_alloc_flow() local
213 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in usnic_fwd_alloc_flow()
214 if (!flow) in usnic_fwd_alloc_flow()
255 flow->flow_id = (uint32_t) a0; in usnic_fwd_alloc_flow()
256 flow->vnic_idx = uaction->vnic_idx; in usnic_fwd_alloc_flow()
257 flow->ufdev = ufdev; in usnic_fwd_alloc_flow()
263 return flow; in usnic_fwd_alloc_flow()
265 kfree(flow); in usnic_fwd_alloc_flow()
269 int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow) in usnic_fwd_dealloc_flow() argument
274 a0 = flow->flow_id; in usnic_fwd_dealloc_flow()
[all …]
Dusnic_ib_qp_grp.c89 default_flow->flow->flow_id); in usnic_ib_qp_grp_dump_rows()
211 struct usnic_fwd_flow *flow; in create_roce_custom_flow() local
228 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction); in create_roce_custom_flow()
229 if (IS_ERR_OR_NULL(flow)) { in create_roce_custom_flow()
230 err = flow ? PTR_ERR(flow) : -EFAULT; in create_roce_custom_flow()
240 qp_flow->flow = flow; in create_roce_custom_flow()
247 usnic_fwd_dealloc_flow(flow); in create_roce_custom_flow()
255 usnic_fwd_dealloc_flow(qp_flow->flow); in release_roce_custom_flow()
271 struct usnic_fwd_flow *flow; in create_udp_flow() local
301 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction); in create_udp_flow()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/
Dgoto.c10 struct mlx5e_tc_flow *flow, in validate_goto_chain() argument
16 bool is_esw = mlx5e_is_eswitch_flow(flow); in validate_goto_chain()
17 bool ft_flow = mlx5e_is_ft_flow(flow); in validate_goto_chain()
66 struct mlx5e_tc_flow *flow = parse_state->flow; in tc_act_can_offload_goto() local
68 if (validate_goto_chain(flow->priv, flow, attr, act, extack)) in tc_act_can_offload_goto()
93 struct mlx5e_tc_flow *flow = parse_state->flow; in tc_act_post_parse_goto() local
112 if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) { in tc_act_post_parse_goto()
Dact.c75 struct mlx5e_tc_flow *flow, in mlx5e_tc_act_init_parse_state() argument
80 parse_state->flow = flow; in mlx5e_tc_act_init_parse_state()
96 priv = parse_state->flow->priv; in mlx5e_tc_act_post_parse()
118 mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow, in mlx5e_tc_act_set_next_post_act() argument
122 struct mlx5_core_dev *mdev = flow->priv->mdev; in mlx5e_tc_act_set_next_post_act()
/drivers/dma/ti/
Dk3-udma-glue.c645 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow() local
647 if (IS_ERR_OR_NULL(flow->udma_rflow)) in k3_udma_glue_release_rx_flow()
650 if (flow->ringrxfdq) in k3_udma_glue_release_rx_flow()
651 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_release_rx_flow()
653 if (flow->ringrx) in k3_udma_glue_release_rx_flow()
654 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_release_rx_flow()
656 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_release_rx_flow()
657 flow->udma_rflow = NULL; in k3_udma_glue_release_rx_flow()
665 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow() local
673 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, in k3_udma_glue_cfg_rx_flow()
[all …]
/drivers/net/ethernet/marvell/octeontx2/nic/
Dotx2_flows.c461 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) in otx2_add_flow_to_list() argument
467 if (iter->location > flow->location) in otx2_add_flow_to_list()
472 list_add(&flow->list, head); in otx2_add_flow_to_list()
966 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) in otx2_add_flow_msg() argument
968 u64 ring_cookie = flow->flow_spec.ring_cookie; in otx2_add_flow_msg()
982 err = otx2_prepare_flow_request(&flow->flow_spec, req); in otx2_add_flow_msg()
990 req->entry = flow->entry; in otx2_add_flow_msg()
1000 if (flow->flow_spec.flow_type & FLOW_RSS) { in otx2_add_flow_msg()
1002 req->index = flow->rss_ctx_id; in otx2_add_flow_msg()
1023 flow->rule_type |= PFC_FLOWCTRL_RULE; in otx2_add_flow_msg()
[all …]
/drivers/net/ethernet/marvell/mvpp2/
Dmvpp2_cls.c472 const struct mvpp2_cls_flow *flow) in mvpp2_cls_flow_prs_init() argument
474 mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, in mvpp2_cls_flow_prs_init()
475 flow->prs_ri.ri_mask); in mvpp2_cls_flow_prs_init()
480 const struct mvpp2_cls_flow *flow) in mvpp2_cls_flow_lkp_init() argument
485 le.lkpid = flow->flow_id; in mvpp2_cls_flow_lkp_init()
493 le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id)); in mvpp2_cls_flow_lkp_init()
583 const struct mvpp2_cls_flow *flow) in mvpp2_cls_flow_init() argument
589 for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id); in mvpp2_cls_flow_init()
590 i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) { in mvpp2_cls_flow_init()
595 if (i == MVPP2_CLS_FLT_LAST(flow->flow_id)) in mvpp2_cls_flow_init()
[all …]
/drivers/net/ethernet/netronome/nfp/flower/
Doffload.c585 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, in nfp_flower_update_merge_with_actions() argument
604 while (act_off < flow->meta.act_len) { in nfp_flower_update_merge_with_actions()
605 a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; in nfp_flower_update_merge_with_actions()
701 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, in nfp_flower_populate_merge_match() argument
706 u8 *mask = flow->mask_data; in nfp_flower_populate_merge_match()
967 if (link->sub_flow.flow == sub_flow) { in nfp_flower_unlink_flows()
982 link->merge_flow.flow = merge_flow; in nfp_flower_link_flows()
984 link->sub_flow.flow = sub_flow; in nfp_flower_link_flows()
1124 struct nfp_fl_payload *flow, in nfp_flower_validate_pre_tun_rule() argument
1131 u8 *ext = flow->unmasked_data; in nfp_flower_validate_pre_tun_rule()
[all …]
Dtunnel_conf.c336 if (neigh->flow) in nfp_tun_mutual_link()
353 neigh->flow = predt; in nfp_tun_mutual_link()
430 if (neigh->flow) in nfp_tun_cleanup_nn_entries()
450 neigh->flow = NULL; in nfp_tun_unlink_and_update_nn_entries()
467 void *flow, struct neighbour *neigh, bool is_ipv6, in nfp_tun_write_neigh() argument
505 nn_entry->flow = NULL; in nfp_tun_write_neigh()
507 struct flowi6 *flowi6 = (struct flowi6 *)flow; in nfp_tun_write_neigh()
518 struct flowi4 *flowi4 = (struct flowi4 *)flow; in nfp_tun_write_neigh()
550 struct flowi6 *flowi6 = (struct flowi6 *)flow; in nfp_tun_write_neigh()
558 struct flowi4 *flowi4 = (struct flowi4 *)flow; in nfp_tun_write_neigh()
[all …]
/drivers/net/ethernet/intel/ice/
Dice_ethtool_fdir.c41 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) in ice_fltr_to_ethtool_flow() argument
43 switch (flow) { in ice_fltr_to_ethtool_flow()
292 int status, flow; in ice_fdir_rem_adq_chnl() local
297 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { in ice_fdir_rem_adq_chnl()
298 struct ice_fd_hw_prof *prof = hw->fdir_prof[flow]; in ice_fdir_rem_adq_chnl()
307 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; in ice_fdir_rem_adq_chnl()
341 ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow) in ice_fdir_get_hw_prof() argument
344 return hw->fdir_prof[flow]; in ice_fdir_get_hw_prof()
356 ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) in ice_fdir_erase_flow_from_hw() argument
358 struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); in ice_fdir_erase_flow_from_hw()
[all …]
Dice_virtchnl_fdir.c22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ argument
23 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
171 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) in ice_vc_fdir_alloc_prof() argument
184 if (!fdir->fdir_prof[flow]) { in ice_vc_fdir_alloc_prof()
185 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), in ice_vc_fdir_alloc_prof()
188 if (!fdir->fdir_prof[flow]) in ice_vc_fdir_alloc_prof()
201 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) in ice_vc_fdir_free_prof() argument
208 if (!fdir->fdir_prof[flow]) in ice_vc_fdir_free_prof()
211 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); in ice_vc_fdir_free_prof()
212 fdir->fdir_prof[flow] = NULL; in ice_vc_fdir_free_prof()
[all …]
/drivers/net/xen-netback/
Dhash.c153 struct flow_keys flow; in xenvif_set_skb_hash() local
182 memset(&flow, 0, sizeof(flow)); in xenvif_set_skb_hash()
183 if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) in xenvif_set_skb_hash()
186 has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) && in xenvif_set_skb_hash()
187 !(flow.control.flags & FLOW_DIS_IS_FRAGMENT); in xenvif_set_skb_hash()
195 memcpy(&data[0], &flow.addrs.v4addrs.src, 4); in xenvif_set_skb_hash()
196 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4); in xenvif_set_skb_hash()
197 memcpy(&data[8], &flow.ports.src, 2); in xenvif_set_skb_hash()
198 memcpy(&data[10], &flow.ports.dst, 2); in xenvif_set_skb_hash()
205 memcpy(&data[0], &flow.addrs.v4addrs.src, 4); in xenvif_set_skb_hash()
[all …]
/drivers/crypto/amlogic/
Damlogic-gxl-cipher.c22 return atomic_inc_return(&mc->flow) % MAXFLOW; in get_engine_number()
92 int flow = rctx->flow; in meson_cipher() local
110 op->keylen, flow); in meson_cipher()
114 mc->chanlist[flow].stat_req++; in meson_cipher()
164 desc = &mc->chanlist[flow].tl[tloffset]; in meson_cipher()
207 desc = &mc->chanlist[flow].tl[tloffset]; in meson_cipher()
226 reinit_completion(&mc->chanlist[flow].complete); in meson_cipher()
227 mc->chanlist[flow].status = 0; in meson_cipher()
228 writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2)); in meson_cipher()
229 wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete, in meson_cipher()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/esw/
Dipsec_fs.c124 struct mlx5e_tc_flow *flow) in mlx5_esw_ipsec_modify_flow_dests() argument
130 attr = flow->attr; in mlx5_esw_ipsec_modify_flow_dests()
135 err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr, in mlx5_esw_ipsec_modify_flow_dests()
149 struct mlx5e_tc_flow *flow; in mlx5_esw_ipsec_restore_dest_uplink() local
160 while ((flow = rhashtable_walk_next(&iter)) != NULL) { in mlx5_esw_ipsec_restore_dest_uplink()
161 if (IS_ERR(flow)) in mlx5_esw_ipsec_restore_dest_uplink()
164 err = mlx5_esw_ipsec_modify_flow_dests(esw, flow); in mlx5_esw_ipsec_restore_dest_uplink()

123456789