Searched refs:flows (Results 1 – 15 of 15) sorted by relevance
/net/sched/ |
D | sch_fq_codel.c | 59 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ member 169 flow = &q->flows[idx]; in fq_codel_drop() 208 flow = &q->flows[idx]; in fq_codel_enqueue() 270 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func() 355 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 393 if (q->flows) in fq_codel_change() 457 kvfree(q->flows); in fq_codel_destroy() 488 if (!q->flows) { in fq_codel_init() 489 q->flows = kvzalloc(q->flows_cnt * in fq_codel_init() 491 if (!q->flows) in fq_codel_init() [all …]
|
D | sch_atm.c | 65 struct list_head flows; /* NB: "link" is also on this member 77 list_for_each_entry(flow, &p->flows, list) { in lookup_flow() 349 list_for_each_entry(flow, &p->flows, list) { in atm_tc_walk() 386 list_for_each_entry(flow, &p->flows, list) { in atm_tc_enqueue() 470 list_for_each_entry(flow, &p->flows, list) { in sch_atm_dequeue() 540 INIT_LIST_HEAD(&p->flows); in atm_tc_init() 542 list_add(&p->link.list, &p->flows); in atm_tc_init() 567 list_for_each_entry(flow, &p->flows, list) in atm_tc_reset() 578 list_for_each_entry(flow, &p->flows, list) { in atm_tc_destroy() 583 list_for_each_entry_safe(flow, tmp, &p->flows, list) { in atm_tc_destroy()
|
D | sch_fq.c | 103 u32 flows; member 220 q->flows -= fcnt; in fq_gc() 263 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify() 264 q->inactive_flows > q->flows/2) in fq_classify() 309 q->flows++; in fq_classify() 598 q->flows = 0; in fq_reset() 643 q->flows -= fcnt; in fq_rehash() 881 st.flows = q->flows; in fq_dump_stats()
|
D | sch_sfq.c | 656 if (ctl->flows) in sfq_change() 657 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); in sfq_change() 792 opt.v0.flows = q->maxflows; in sfq_dump()
|
D | Kconfig | 236 and Keep for responsive flows, CHOose and Kill for unresponsive 237 flows). This is a variation of RED which trys to penalize flows
|
/net/core/ |
D | pktgen.c | 386 struct flow_state *flows; member 2300 return !!(pkt_dev->flows[flow].flags & F_INIT); in f_seen() 2308 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { in f_pick() 2310 pkt_dev->flows[flow].count = 0; in f_pick() 2311 pkt_dev->flows[flow].flags = 0; in f_pick() 2320 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { in f_pick() 2321 pkt_dev->flows[flow].count = 0; in f_pick() 2322 pkt_dev->flows[flow].flags = 0; in f_pick() 2337 struct xfrm_state *x = pkt_dev->flows[flow].x; in get_ipsec_sa() 2356 pkt_dev->flows[flow].x = x; in get_ipsec_sa() [all …]
|
D | net-sysfs.c | 832 table->flows[count].cpu = RPS_NO_CPU; in store_rps_dev_flow_table_cnt()
|
D | dev.c | 3637 rflow = &flow_table->flows[flow_id]; in set_rps_cpu() 3708 rflow = &flow_table->flows[hash & flow_table->mask]; in get_rps_cpu() 3776 rflow = &flow_table->flows[flow_id]; in rps_may_expire_flow()
|
/net/dccp/ccids/ |
D | Kconfig | 19 be reasonably fair when competing for bandwidth with TCP-like flows,
|
/net/ipv4/ |
D | Kconfig | 525 other Reno and H-TCP flows. 568 can coexist safely is when the CA flows have RTTs << CC flows RTTs. 635 - Low latency (short flows, queries), 657 o Coexist with flows that use loss-based congestion control. 674 modem links. It can coexist with flows that use loss-based congestion
|
/net/ |
D | Kconfig | 323 backlog reaches netdev_max_backlog. If a few out of many active flows 325 maintain capacity for the other flows. This feature provides servers
|
/net/mac80211/ |
D | debugfs_sta.c | 173 txqi->tin.flows, in sta_aqm_read()
|
D | debugfs_netdev.c | 511 txqi->tin.flows, in ieee80211_if_fmt_aqm()
|
D | tx.c | 1308 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func() 1352 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func()
|
/net/netfilter/ |
D | Kconfig | 353 policies to flows, instead of using the global timeout policy.
|