/kernel/linux/linux-5.10/include/net/ |
D | fq_impl.h | 125 flow = &fq->flows[idx]; in fq_flow_classify() 133 tin->flows++; in fq_flow_classify() 315 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() 316 if (!fq->flows) in fq_init() 320 fq_flow_init(&fq->flows[i]); in fq_init() 331 fq_flow_reset(fq, &fq->flows[i], free_func); in fq_reset() 333 kvfree(fq->flows); in fq_reset() 334 fq->flows = NULL; in fq_reset()
|
D | fq.h | 54 u32 flows; member 68 struct fq_flow *flows; member
|
/kernel/linux/linux-5.10/samples/bpf/ |
D | do_hbm_test.sh | 78 flows=1 140 -f=*|--flows=*) 141 flows="${i#*=}" 267 while [ $flow_cnt -le $flows ] ; do 309 while [ $flow_cnt -le $flows ] ; do 335 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id 355 while [ $flow_cnt -le $flows ] ; do 375 while [ $flow_cnt -le $flows ] ; do
|
/kernel/linux/linux-5.10/net/sched/ |
D | sch_fq_codel.c | 53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ member 164 flow = &q->flows[idx]; in fq_codel_drop() 204 flow = &q->flows[idx]; in fq_codel_enqueue() 265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func() 343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 383 if (q->flows) in fq_codel_change() 454 kvfree(q->flows); in fq_codel_destroy() 486 if (!q->flows) { in fq_codel_init() 487 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init() 490 if (!q->flows) { in fq_codel_init() [all …]
|
D | sch_fq_pie.c | 57 struct fq_pie_flow *flows; member 149 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 301 if (q->flows) { in fq_pie_change() 383 pie_calculate_probability(&q->p_params, &q->flows[idx].vars, in fq_pie_timer() 384 q->flows[idx].backlog); in fq_pie_timer() 424 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init() 426 if (!q->flows) { in fq_pie_init() 431 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init() 515 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset() 536 kvfree(q->flows); in fq_pie_destroy()
|
D | sch_atm.c | 66 struct list_head flows; /* NB: "link" is also on this member 78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow() 355 list_for_each_entry(flow, &p->flows, list) { in atm_tc_walk() 393 list_for_each_entry(flow, &p->flows, list) { in atm_tc_enqueue() 480 list_for_each_entry(flow, &p->flows, list) { in sch_atm_dequeue() 551 INIT_LIST_HEAD(&p->flows); in atm_tc_init() 553 list_add(&p->link.list, &p->flows); in atm_tc_init() 579 list_for_each_entry(flow, &p->flows, list) in atm_tc_reset() 590 list_for_each_entry(flow, &p->flows, list) { in atm_tc_destroy() 595 list_for_each_entry_safe(flow, tmp, &p->flows, list) { in atm_tc_destroy()
|
D | sch_fq.c | 120 u32 flows; member 254 q->flows -= fcnt; in fq_gc() 304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify() 305 q->inactive_flows > q->flows/2) in fq_classify() 359 q->flows++; in fq_classify() 692 q->flows = 0; in fq_reset() 737 q->flows -= fcnt; in fq_rehash() 1023 st.flows = q->flows; in fq_dump_stats()
|
D | sch_cake.c | 150 struct cake_flow flows[CAKE_QUEUES]; member 743 q->flows[reduced_hash].set)) { in cake_hash() 761 if (!q->flows[outer_hash + k].set) { in cake_hash() 776 if (!q->flows[outer_hash + k].set) { in cake_hash() 788 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash() 789 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash() 790 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash() 817 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash() 819 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash() 840 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash() [all …]
|
/kernel/linux/linux-5.10/drivers/crypto/allwinner/sun8i-ss/ |
D | sun8i-ss-core.c | 71 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 128 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task() 129 ss->flows[flow].status = 0; in sun8i_ss_run_task() 134 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_task() 136 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task() 155 ss->flows[flow].status = 1; in ss_irq_handler() 156 complete(&ss->flows[flow].complete); in ss_irq_handler() 420 seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); in sun8i_ss_debugfs_show() 455 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows() 467 ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), in allocate_flows() [all …]
|
D | sun8i-ss-prng.c | 131 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 132 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 138 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 140 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
D | sun8i-ss-hash.c | 207 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task() 240 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task() 241 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task() 246 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task() 248 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task() 309 engine = ss->flows[e].engine; in sun8i_ss_hash_digest()
|
D | sun8i-ss.h | 152 struct sun8i_ss_flow *flows; member
|
/kernel/linux/linux-5.10/drivers/dma/ti/ |
D | k3-udma-glue.c | 79 struct k3_udma_glue_rx_flow *flows; member 533 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow() 553 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow() 786 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 787 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 788 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 798 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 880 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn() 881 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn() 882 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 752 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow() 770 rcd->flows[flow_idx].generation = in kern_clear_hw_flow() 771 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow() 799 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow() 848 rcd->flows[i].generation = mask_generation(prandom_u32()); in hfi1_kern_init_ctxt_generations() 1458 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup() 1549 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear() 1605 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows() 1606 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows() 1627 struct tid_rdma_flow *flows; in hfi1_kern_exp_rcv_alloc_flows() local [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rep.h | 164 struct list_head flows; member 183 struct list_head flows; member
|
D | eswitch_offloads.c | 925 struct mlx5_flow_handle **flows; in esw_add_fdb_peer_miss_rules() local 939 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules() 940 if (!flows) { in esw_add_fdb_peer_miss_rules() 959 flows[MLX5_VPORT_PF] = flow; in esw_add_fdb_peer_miss_rules() 970 flows[mlx5_eswitch_ecpf_idx(esw)] = flow; in esw_add_fdb_peer_miss_rules() 984 flows[i] = flow; in esw_add_fdb_peer_miss_rules() 987 esw->fdb_table.offloads.peer_miss_rules = flows; in esw_add_fdb_peer_miss_rules() 995 mlx5_del_flow_rules(flows[i]); in esw_add_fdb_peer_miss_rules() 998 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); in esw_add_fdb_peer_miss_rules() 1001 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); in esw_add_fdb_peer_miss_rules() [all …]
|
/kernel/linux/linux-5.10/Documentation/userspace-api/media/mediactl/ |
D | media-controller-model.rst | 26 by an entity flows from the entity's output to one or more entity 31 pads, either on the same entity or on different entities. Data flows
|
/kernel/linux/linux-5.10/Documentation/admin-guide/pm/ |
D | system-wide.rst | 11 suspend-flows
|
D | suspend-flows.rst | 25 The kernel code flows associated with the suspend and resume transitions for 27 significant differences between the :ref:`suspend-to-idle <s2idle>` code flows 28 and the code flows related to the :ref:`suspend-to-RAM <s2ram>` and 35 available. Apart from that, the suspend and resume code flows for these sleep
|
/kernel/linux/linux-5.10/net/core/ |
D | pktgen.c | 399 struct flow_state *flows; member 2202 return !!(pkt_dev->flows[flow].flags & F_INIT); in f_seen() 2210 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { in f_pick() 2212 pkt_dev->flows[flow].count = 0; in f_pick() 2213 pkt_dev->flows[flow].flags = 0; in f_pick() 2222 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { in f_pick() 2223 pkt_dev->flows[flow].count = 0; in f_pick() 2224 pkt_dev->flows[flow].flags = 0; in f_pick() 2239 struct xfrm_state *x = pkt_dev->flows[flow].x; in get_ipsec_sa() 2258 pkt_dev->flows[flow].x = x; in get_ipsec_sa() [all …]
|
/kernel/linux/linux-5.10/Documentation/networking/ |
D | scaling.rst | 31 of logical flows. Packets for each flow are steered to a separate receive 188 to the same CPU is CPU load imbalance if flows vary in packet rate. 194 Flow Limit is an optional RPS feature that prioritizes small flows 195 during CPU contention by dropping packets from large flows slightly 196 ahead of those from small flows. It is active only when an RPS or RFS 202 new packet is dropped. Packets from other flows are still only 206 even large flows maintain connectivity. 224 identification of large flows and fewer false positives. The default 261 flows to the CPUs where those flows are being processed. The flow hash 266 same CPU. Indeed, with many flows and few CPUs, it is very likely that [all …]
|
D | openvswitch.rst | 16 table" that userspace populates with "flows" that map from keys based 104 A wildcarded flow can represent a group of exact match flows. Each '1' bit 108 by reduce the number of new flows need to be processed by the user space program. 120 two possible approaches: reactively install flows as they miss the kernel 130 The behavior when using overlapping wildcarded flows is undefined. It is the 133 performs best-effort detection of overlapping wildcarded flows and may reject 146 future operations. The kernel is not required to index flows by the original
|
D | pktgen.rst | 97 flows: 0 flowlen: 0 112 flows: 0 269 pktgen.conf-1-1-flows # 1 CPU 1 dev multiple flows. 287 pgset "flows 1" 381 flows
|
/kernel/linux/linux-5.10/Documentation/admin-guide/blockdev/drbd/ |
D | figures.rst | 5 Data flows that Relate some functions, and write packets
|
/kernel/linux/linux-5.10/net/dccp/ccids/ |
D | Kconfig | 20 be reasonably fair when competing for bandwidth with TCP-like flows,
|