/drivers/crypto/allwinner/sun8i-ss/ |
D | sun8i-ss-core.c | 74 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 130 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task() 131 ss->flows[flow].status = 0; in sun8i_ss_run_task() 136 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_task() 138 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task() 157 ss->flows[flow].status = 1; in ss_irq_handler() 158 complete(&ss->flows[flow].complete); in ss_irq_handler() 422 seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); in sun8i_ss_debugfs_show() 457 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows() 469 ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), in allocate_flows() [all …]
|
D | sun8i-ss-prng.c | 131 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 132 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 138 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 140 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
D | sun8i-ss-hash.c | 208 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task() 241 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task() 242 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task() 247 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task() 249 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task() 310 engine = ss->flows[e].engine; in sun8i_ss_hash_digest() 344 result = ss->flows[rctx->flow].result; in sun8i_ss_hash_run() 345 pad = ss->flows[rctx->flow].pad; in sun8i_ss_hash_run()
|
D | sun8i-ss-cipher.c | 106 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_setup_ivs() 167 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_cipher() 337 engine = op->ss->flows[e].engine; in sun8i_ss_skdecrypt() 358 engine = op->ss->flows[e].engine; in sun8i_ss_skencrypt()
|
D | sun8i-ss.h | 160 struct sun8i_ss_flow *flows; member
|
/drivers/dma/ti/ |
D | k3-udma-glue.c | 79 struct k3_udma_glue_rx_flow *flows; member 533 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow() 553 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow() 786 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 787 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 788 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 798 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 880 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn() 881 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn() 882 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 752 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow() 770 rcd->flows[flow_idx].generation = in kern_clear_hw_flow() 771 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow() 799 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow() 848 rcd->flows[i].generation = mask_generation(prandom_u32()); in hfi1_kern_init_ctxt_generations() 1458 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup() 1549 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear() 1605 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows() 1606 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows() 1627 struct tid_rdma_flow *flows; in hfi1_kern_exp_rcv_alloc_flows() local [all …]
|
D | tid_rdma.h | 105 struct tid_rdma_flow *flows; /* array of tid flows */ member
|
D | rc.c | 824 &req->flows[req->setup_head]; in hfi1_make_rc_req() 1113 flow = &req->flows[req->flow_idx]; in hfi1_make_rc_req()
|
D | hfi.h | 351 struct tid_flow_state flows[RXE_NUM_TID_FLOWS]; member
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rep.h | 164 struct list_head flows; member 183 struct list_head flows; member
|
D | eswitch_offloads.c | 925 struct mlx5_flow_handle **flows; in esw_add_fdb_peer_miss_rules() local 939 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules() 940 if (!flows) { in esw_add_fdb_peer_miss_rules() 959 flows[MLX5_VPORT_PF] = flow; in esw_add_fdb_peer_miss_rules() 970 flows[mlx5_eswitch_ecpf_idx(esw)] = flow; in esw_add_fdb_peer_miss_rules() 984 flows[i] = flow; in esw_add_fdb_peer_miss_rules() 987 esw->fdb_table.offloads.peer_miss_rules = flows; in esw_add_fdb_peer_miss_rules() 995 mlx5_del_flow_rules(flows[i]); in esw_add_fdb_peer_miss_rules() 998 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); in esw_add_fdb_peer_miss_rules() 1001 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); in esw_add_fdb_peer_miss_rules() [all …]
|
D | en_tc.c | 344 struct list_head flows; member 791 WARN_ON(!list_empty(&hpe->flows)); in mlx5e_hairpin_put() 886 INIT_LIST_HEAD(&hpe->flows); in mlx5e_hairpin_flow_add() 939 list_add(&flow->hairpin, &hpe->flows); in mlx5e_hairpin_flow_add() 1636 list_for_each_entry(efi, &e->flows, list) { in mlx5e_take_all_encap_flows() 1732 list_for_each_entry_safe(efi, tmp, &e->flows, list) { in mlx5e_tc_update_neigh_used_value() 1777 WARN_ON(!list_empty(&e->flows)); in mlx5e_encap_dealloc() 1794 WARN_ON(!list_empty(&d->flows)); in mlx5e_decap_dealloc() 3865 INIT_LIST_HEAD(&e->flows); in mlx5e_attach_encap() 3885 list_add(&flow->encaps[out_index].list, &e->flows); in mlx5e_attach_encap() [all …]
|
/drivers/net/ |
D | tun.c | 210 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; member 430 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) in tun_flow_flush() 445 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { in tun_flow_delete_by_queue() 466 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { in tun_flow_cleanup() 493 head = &tun->flows[tun_hashfn(rxhash)]; in tun_flow_update() 544 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); in tun_automq_select_queue() 1063 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); in tun_automq_xmit() 1375 INIT_HLIST_HEAD(&tun->flows[i]); in tun_flow_init()
|
/drivers/infiniband/core/ |
D | uverbs_cmd.c | 3253 flow_attr = kzalloc(struct_size(flow_attr, flows, in ib_uverbs_ex_create_flow()
|