/net/sched/ |
D | sch_fq_codel.c | 53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ member 164 flow = &q->flows[idx]; in fq_codel_drop() 204 flow = &q->flows[idx]; in fq_codel_enqueue() 265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func() 343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 381 if (q->flows) in fq_codel_change() 452 kvfree(q->flows); in fq_codel_destroy() 484 if (!q->flows) { in fq_codel_init() 485 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init() 488 if (!q->flows) { in fq_codel_init() [all …]
|
D | sch_fq_pie.c | 57 struct fq_pie_flow *flows; member 150 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 308 if (q->flows) { in fq_pie_change() 394 &q->flows[q->flows_cursor].vars, in fq_pie_timer() 395 q->flows[q->flows_cursor].backlog); in fq_pie_timer() 441 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init() 443 if (!q->flows) { in fq_pie_init() 448 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init() 532 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset() 550 kvfree(q->flows); in fq_pie_destroy()
|
D | sch_atm.c | 66 struct list_head flows; /* NB: "link" is also on this member 78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow() 356 list_for_each_entry(flow, &p->flows, list) { in atm_tc_walk() 394 list_for_each_entry(flow, &p->flows, list) { in atm_tc_enqueue() 481 list_for_each_entry(flow, &p->flows, list) { in sch_atm_dequeue() 552 INIT_LIST_HEAD(&p->flows); in atm_tc_init() 554 list_add(&p->link.list, &p->flows); in atm_tc_init() 580 list_for_each_entry(flow, &p->flows, list) in atm_tc_reset() 590 list_for_each_entry(flow, &p->flows, list) { in atm_tc_destroy() 595 list_for_each_entry_safe(flow, tmp, &p->flows, list) { in atm_tc_destroy()
|
D | sch_fq.c | 120 u32 flows; member 254 q->flows -= fcnt; in fq_gc() 304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify() 305 q->inactive_flows > q->flows/2) in fq_classify() 359 q->flows++; in fq_classify() 692 q->flows = 0; in fq_reset() 737 q->flows -= fcnt; in fq_rehash() 1027 st.flows = q->flows; in fq_dump_stats()
|
D | sch_cake.c | 150 struct cake_flow flows[CAKE_QUEUES]; member 743 q->flows[reduced_hash].set)) { in cake_hash() 761 if (!q->flows[outer_hash + k].set) { in cake_hash() 776 if (!q->flows[outer_hash + k].set) { in cake_hash() 788 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash() 789 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash() 790 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash() 817 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash() 819 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash() 840 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash() [all …]
|
D | sch_sfq.c | 663 if (ctl->flows) in sfq_change() 664 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); in sfq_change() 800 opt.v0.flows = q->maxflows; in sfq_dump()
|
D | Kconfig | 283 and Keep for responsive flows, CHOose and Kill for unresponsive 284 flows). This is a variation of RED which tries to penalize flows
|
/net/core/ |
D | pktgen.c | 414 struct flow_state *flows; member 2308 return !!(pkt_dev->flows[flow].flags & F_INIT); in f_seen() 2316 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { in f_pick() 2318 pkt_dev->flows[flow].count = 0; in f_pick() 2319 pkt_dev->flows[flow].flags = 0; in f_pick() 2328 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { in f_pick() 2329 pkt_dev->flows[flow].count = 0; in f_pick() 2330 pkt_dev->flows[flow].flags = 0; in f_pick() 2345 struct xfrm_state *x = pkt_dev->flows[flow].x; in get_ipsec_sa() 2364 pkt_dev->flows[flow].x = x; in get_ipsec_sa() [all …]
|
D | net-sysfs.c | 961 table->flows[count].cpu = RPS_NO_CPU; in store_rps_dev_flow_table_cnt()
|
D | dev.c | 4430 rflow = &flow_table->flows[flow_id]; in set_rps_cpu() 4503 rflow = &flow_table->flows[hash & flow_table->mask]; in get_rps_cpu() 4571 rflow = &flow_table->flows[flow_id]; in rps_may_expire_flow()
|
/net/dccp/ccids/ |
D | Kconfig | 20 be reasonably fair when competing for bandwidth with TCP-like flows,
|
/net/ipv4/ |
D | Kconfig | 528 other Reno and H-TCP flows. 571 can coexist safely is when the CA flows have RTTs << CC flows RTTs. 638 - Low latency (short flows, queries), 660 o Coexist with flows that use loss-based congestion control. 677 coexist with flows that use loss-based congestion control, and can
|
/net/ |
D | Kconfig | 320 backlog reaches netdev_max_backlog. If a few out of many active flows 322 maintain capacity for the other flows. This feature provides servers
|
/net/mac80211/ |
D | debugfs_netdev.c | 500 txqi->tin.flows, in ieee80211_if_fmt_aqm()
|
D | debugfs_sta.c | 175 txqi->tin.flows, in sta_aqm_read()
|
D | cfg.c | 3982 txqstats->flows = txqi->tin.flows; in ieee80211_fill_txq_stats()
|
D | tx.c | 1347 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func() 1391 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func()
|
/net/netfilter/ |
D | Kconfig | 386 policies to flows, instead of using the global timeout policy. 505 choose what flows are placed into the hardware.
|
/net/wireless/ |
D | nl80211.c | 1232 PUT_TXQVAL_U32(FLOWS, flows); in nl80211_put_txq_stats()
|