Home
last modified time | relevance | path

Searched refs:flow (Results 1 – 25 of 27) sorted by relevance

12

/net/sched/
Dsch_atm.c75 struct atm_flow_data *flow; in lookup_flow() local
77 list_for_each_entry(flow, &p->flows, list) { in lookup_flow()
78 if (flow->common.classid == classid) in lookup_flow()
79 return flow; in lookup_flow()
88 struct atm_flow_data *flow = (struct atm_flow_data *)arg; in atm_tc_graft() local
91 sch, p, flow, new, old); in atm_tc_graft()
92 if (list_empty(&flow->list)) in atm_tc_graft()
96 *old = flow->q; in atm_tc_graft()
97 flow->q = new; in atm_tc_graft()
105 struct atm_flow_data *flow = (struct atm_flow_data *)cl; in atm_tc_leaf() local
[all …]
Dsch_fq_codel.c121 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument
123 struct sk_buff *skb = flow->head; in dequeue_head()
125 flow->head = skb->next; in dequeue_head()
131 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument
134 if (flow->head == NULL) in flow_queue_add()
135 flow->head = skb; in flow_queue_add()
137 flow->tail->next = skb; in flow_queue_add()
138 flow->tail = skb; in flow_queue_add()
148 struct fq_codel_flow *flow; in fq_codel_drop() local
169 flow = &q->flows[idx]; in fq_codel_drop()
[all …]
Dsch_hhf.c183 struct hh_flow_state *flow, *next; in seek_list() local
189 list_for_each_entry_safe(flow, next, head, flowchain) { in seek_list()
190 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list()
196 if (list_is_last(&flow->flowchain, head)) in seek_list()
198 list_del(&flow->flowchain); in seek_list()
199 kfree(flow); in seek_list()
201 } else if (flow->hash_id == hash) { in seek_list()
202 return flow; in seek_list()
214 struct hh_flow_state *flow; in alloc_new_hh() local
219 list_for_each_entry(flow, head, flowchain) { in alloc_new_hh()
[all …]
Dcls_flow.c73 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_src() argument
75 __be32 src = flow_get_u32_src(flow); in flow_get_src()
83 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_dst() argument
85 __be32 dst = flow_get_u32_dst(flow); in flow_get_dst()
94 const struct flow_keys *flow) in flow_get_proto() argument
96 return flow->basic.ip_proto; in flow_get_proto()
100 const struct flow_keys *flow) in flow_get_proto_src() argument
102 if (flow->ports.ports) in flow_get_proto_src()
103 return ntohs(flow->ports.src); in flow_get_proto_src()
109 const struct flow_keys *flow) in flow_get_proto_dst() argument
[all …]
Dsch_fq.c136 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) in fq_flow_add_tail() argument
139 head->last->next = flow; in fq_flow_add_tail()
141 head->first = flow; in fq_flow_add_tail()
142 head->last = flow; in fq_flow_add_tail()
143 flow->next = NULL; in fq_flow_add_tail()
316 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) in fq_dequeue_head() argument
318 struct sk_buff *skb = flow->head; in fq_dequeue_head()
321 flow->head = skb->next; in fq_dequeue_head()
323 flow->qlen--; in fq_dequeue_head()
350 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) in flow_queue_add() argument
[all …]
DKconfig20 need a certain minimum data flow rate, or if you need to limit the
21 maximum data flow rate for traffic which matches specified criteria.
94 the flow(s) it is handling to a given virtual circuit.
233 tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
281 FQ does flow separation, and is able to respect pacing requirements
338 packet flow.
476 request a minimum and maximum data flow rate for a connection; this
490 request a minimum and maximum data flow rate for a connection; this
/net/openvswitch/
Dflow_table.c81 struct sw_flow *flow; in ovs_flow_alloc() local
84 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); in ovs_flow_alloc()
85 if (!flow) in ovs_flow_alloc()
88 flow->stats_last_writer = -1; in ovs_flow_alloc()
99 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc()
101 cpumask_set_cpu(0, &flow->cpu_used_mask); in ovs_flow_alloc()
103 return flow; in ovs_flow_alloc()
105 kmem_cache_free(flow_cache, flow); in ovs_flow_alloc()
137 static void flow_free(struct sw_flow *flow) in flow_free() argument
141 if (ovs_identifier_is_key(&flow->id)) in flow_free()
[all …]
Ddatapath.c260 struct sw_flow *flow; in ovs_dp_process_packet() local
269 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit); in ovs_dp_process_packet()
270 if (unlikely(!flow)) { in ovs_dp_process_packet()
287 ovs_flow_stats_update(flow, key->tp.flags, skb); in ovs_dp_process_packet()
288 sf_acts = rcu_dereference(flow->sf_acts); in ovs_dp_process_packet()
561 struct sw_flow *flow; in ovs_packet_cmd_execute() local
592 flow = ovs_flow_alloc(); in ovs_packet_cmd_execute()
593 err = PTR_ERR(flow); in ovs_packet_cmd_execute()
594 if (IS_ERR(flow)) in ovs_packet_cmd_execute()
598 packet, &flow->key, log); in ovs_packet_cmd_execute()
[all …]
Dflow.c71 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, in ovs_flow_stats_update() argument
78 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update()
84 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update()
85 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
87 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */ in ovs_flow_stats_update()
93 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update()
99 if (likely(flow->stats_last_writer != -1) && in ovs_flow_stats_update()
100 likely(!rcu_access_pointer(flow->stats[cpu]))) { in ovs_flow_stats_update()
118 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update()
120 cpumask_set_cpu(cpu, &flow->cpu_used_mask); in ovs_flow_stats_update()
[all …]
Dflow_netlink.h55 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
56 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
57 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
Dflow_table.h70 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
72 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
DMakefile12 flow.o \
Dflow_netlink.c1877 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb) in ovs_nla_put_identifier() argument
1879 if (ovs_identifier_is_ufid(&flow->id)) in ovs_nla_put_identifier()
1880 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len, in ovs_nla_put_identifier()
1881 flow->id.ufid); in ovs_nla_put_identifier()
1883 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key, in ovs_nla_put_identifier()
1888 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb) in ovs_nla_put_masked_key() argument
1890 return ovs_nla_put_key(&flow->key, &flow->key, in ovs_nla_put_masked_key()
1895 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) in ovs_nla_put_mask() argument
1897 return ovs_nla_put_key(&flow->key, &flow->mask->key, in ovs_nla_put_mask()
DKconfig21 programmatic extension and flow-based control of the network. This
/net/ipv4/netfilter/
Dipt_rpfilter.c76 struct flowi4 flow; in rpfilter_mt() local
92 flow.flowi4_iif = LOOPBACK_IFINDEX; in rpfilter_mt()
93 flow.daddr = iph->saddr; in rpfilter_mt()
94 flow.saddr = rpfilter_get_saddr(iph->daddr); in rpfilter_mt()
95 flow.flowi4_oif = 0; in rpfilter_mt()
96 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; in rpfilter_mt()
97 flow.flowi4_tos = RT_TOS(iph->tos); in rpfilter_mt()
98 flow.flowi4_scope = RT_SCOPE_UNIVERSE; in rpfilter_mt()
99 flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par)); in rpfilter_mt()
101 return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; in rpfilter_mt()
/net/core/
Dflow_dissector.c899 static const void *flow_keys_hash_start(const struct flow_keys *flow) in flow_keys_hash_start() argument
902 return &flow->FLOW_KEYS_HASH_START_FIELD; in flow_keys_hash_start()
905 static inline size_t flow_keys_hash_length(const struct flow_keys *flow) in flow_keys_hash_length() argument
907 size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET; in flow_keys_hash_length()
909 switch (flow->control.addr_type) { in flow_keys_hash_length()
911 len += sizeof(flow->addrs.v4addrs); in flow_keys_hash_length()
914 len += sizeof(flow->addrs.v6addrs); in flow_keys_hash_length()
917 len += sizeof(flow->addrs.tipcaddrs); in flow_keys_hash_length()
923 __be32 flow_get_u32_src(const struct flow_keys *flow) in flow_get_u32_src() argument
925 switch (flow->control.addr_type) { in flow_get_u32_src()
[all …]
Dpktgen.c2298 static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) in f_seen() argument
2300 return !!(pkt_dev->flows[flow].flags & F_INIT); in f_seen()
2305 int flow = pkt_dev->curfl; in f_pick() local
2308 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { in f_pick()
2310 pkt_dev->flows[flow].count = 0; in f_pick()
2311 pkt_dev->flows[flow].flags = 0; in f_pick()
2317 flow = prandom_u32() % pkt_dev->cflows; in f_pick()
2318 pkt_dev->curfl = flow; in f_pick()
2320 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { in f_pick()
2321 pkt_dev->flows[flow].count = 0; in f_pick()
[all …]
/net/netfilter/
Dxt_addrtype.c40 struct flowi6 flow; in match_lookup_rt6() local
45 memset(&flow, 0, sizeof(flow)); in match_lookup_rt6()
46 flow.daddr = *addr; in match_lookup_rt6()
48 flow.flowi6_oif = dev->ifindex; in match_lookup_rt6()
60 flowi6_to_flowi(&flow), false); in match_lookup_rt6()
/net/caif/
Dchnl_net.c167 static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, in chnl_flowctrl_cb() argument
172 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : in chnl_flowctrl_cb()
173 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : in chnl_flowctrl_cb()
174 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : in chnl_flowctrl_cb()
175 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : in chnl_flowctrl_cb()
176 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : in chnl_flowctrl_cb()
177 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? in chnl_flowctrl_cb()
182 switch (flow) { in chnl_flowctrl_cb()
Dcaif_socket.c195 enum caif_ctrlcmd flow, in caif_ctrl_cb() argument
199 switch (flow) { in caif_ctrl_cb()
250 pr_debug("Unexpected flow command %d\n", flow); in caif_ctrl_cb()
/net/decnet/
DTODO34 o Add session control message flow control
36 o Add NSP message flow control
/net/dccp/ccids/
DKconfig20 where a flow is "reasonably fair" if its sending rate is generally
21 within a factor of two of the sending rate of a TCP flow under the
/net/switchdev/
DKconfig12 also various flow offloading chips, including switches embedded into
/net/mac80211/
Dtx.c1299 struct fq_flow *flow; in codel_dequeue_func() local
1306 flow = &txqi->def_flow; in codel_dequeue_func()
1308 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func()
1310 return fq_flow_dequeue(fq, flow); in codel_dequeue_func()
1329 struct fq_flow *flow) in fq_tin_dequeue_func() argument
1349 if (flow == &txqi->def_flow) in fq_tin_dequeue_func()
1352 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func()
1355 &flow->backlog, in fq_tin_dequeue_func()
1367 struct fq_flow *flow, in fq_skb_free_func() argument
3119 struct fq_flow *flow; in ieee80211_amsdu_aggregate() local
[all …]
/net/wireless/
DKconfig120 capabilities in their registration flow.

12