Home
last modified time | relevance | path

Searched refs:tp (Results 1 – 25 of 100) sorted by relevance

1234

/net/ipv4/
Dtcp_input.c326 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() argument
328 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
329 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
346 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
348 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_withdraw_cwr()
353 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
361 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
368 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
371 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
373 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
[all …]
Dtcp_recovery.c12 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd() local
14 if (!tp->reord_seen) { in tcp_rack_reo_wnd()
21 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd()
33 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, in tcp_rack_reo_wnd()
34 tp->srtt_us >> 3); in tcp_rack_reo_wnd()
37 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) in tcp_rack_skb_timeout() argument
39 return tp->rack.rtt_us + reo_wnd - in tcp_rack_skb_timeout()
40 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); in tcp_rack_skb_timeout()
65 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss() local
71 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, in tcp_rack_detect_loss()
[all …]
Dtcp_output.c53 void tcp_mstamp_refresh(struct tcp_sock *tp) in tcp_mstamp_refresh() argument
57 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
58 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
69 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
76 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
77 tp->highest_sack = skb; in tcp_event_new_data_sent()
79 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
97 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
[all …]
Dtcp_rate.c42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent() local
58 if (!tp->packets_out) { in tcp_rate_skb_sent()
61 tp->first_tx_mstamp = tstamp_us; in tcp_rate_skb_sent()
62 tp->delivered_mstamp = tstamp_us; in tcp_rate_skb_sent()
65 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; in tcp_rate_skb_sent()
66 TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; in tcp_rate_skb_sent()
67 TCP_SKB_CB(skb)->tx.delivered = tp->delivered; in tcp_rate_skb_sent()
68 TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; in tcp_rate_skb_sent()
82 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered() local
91 tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp, in tcp_rate_skb_delivered()
[all …]
Dtcp.c398 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) in tcp_compute_delivery_rate() argument
400 u32 rate = READ_ONCE(tp->rate_delivered); in tcp_compute_delivery_rate()
401 u32 intv = READ_ONCE(tp->rate_interval_us); in tcp_compute_delivery_rate()
405 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; in tcp_compute_delivery_rate()
419 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() local
421 tp->out_of_order_queue = RB_ROOT; in tcp_init_sock()
424 INIT_LIST_HEAD(&tp->tsq_node); in tcp_init_sock()
425 INIT_LIST_HEAD(&tp->tsorted_sent_queue); in tcp_init_sock()
430 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_init_sock()
431 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); in tcp_init_sock()
[all …]
Dtcp_vegas.c73 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable() local
80 vegas->beg_snd_nxt = tp->snd_nxt; in vegas_enable()
160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) in tcp_vegas_ssthresh() argument
162 return min(tp->snd_ssthresh, tcp_snd_cwnd(tp)); in tcp_vegas_ssthresh()
167 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid() local
181 vegas->beg_snd_nxt = tp->snd_nxt; in tcp_vegas_cong_avoid()
220 target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT; in tcp_vegas_cong_avoid()
227 diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT; in tcp_vegas_cong_avoid()
229 if (diff > gamma && tcp_in_slow_start(tp)) { in tcp_vegas_cong_avoid()
241 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), in tcp_vegas_cong_avoid()
[all …]
Dtcp_hybla.c48 struct tcp_sock *tp = tcp_sk(sk); in hybla_init() local
57 tcp_snd_cwnd_set(tp, 2); in hybla_init()
58 tp->snd_cwnd_clamp = 65535; in hybla_init()
64 ca->minrtt_us = tp->srtt_us; in hybla_init()
65 tcp_snd_cwnd_set(tp, ca->rho); in hybla_init()
92 struct tcp_sock *tp = tcp_sk(sk); in hybla_cong_avoid() local
98 if (tp->srtt_us < ca->minrtt_us) { in hybla_cong_avoid()
100 ca->minrtt_us = tp->srtt_us; in hybla_cong_avoid()
116 if (tcp_in_slow_start(tp)) { in hybla_cong_avoid()
140 increment = ca->rho2_7ls / tcp_snd_cwnd(tp); in hybla_cong_avoid()
[all …]
Dtcp_highspeed.c102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() local
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in hstcp_init()
114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() local
120 if (tcp_in_slow_start(tp)) in hstcp_cong_avoid()
121 tcp_slow_start(tp, acked); in hstcp_cong_avoid()
130 if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid()
131 while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid()
134 } else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid()
135 while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid()
140 if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { in hstcp_cong_avoid()
[all …]
Dtcp_yeah.c43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init() local
55 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in tcp_yeah_init()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid() local
66 if (tcp_in_slow_start(tp)) { in tcp_yeah_cong_avoid()
67 acked = tcp_slow_start(tp, acked); in tcp_yeah_cong_avoid()
74 tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT), in tcp_yeah_cong_avoid()
78 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_yeah_cong_avoid()
133 bw = tcp_snd_cwnd(tp); in tcp_yeah_cong_avoid()
141 tcp_snd_cwnd(tp) > yeah->reno_count) { in tcp_yeah_cong_avoid()
143 tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON); in tcp_yeah_cong_avoid()
[all …]
Dtcp_dctcp.c67 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset() argument
69 ca->next_seq = tp->snd_nxt; in dctcp_reset()
71 ca->old_delivered = tp->delivered; in dctcp_reset()
72 ca->old_delivered_ce = tp->delivered_ce; in dctcp_reset()
77 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init() local
79 if ((tp->ecn_flags & TCP_ECN_OK) || in dctcp_init()
84 ca->prior_rcv_nxt = tp->rcv_nxt; in dctcp_init()
91 dctcp_reset(tp, ca); in dctcp_init()
105 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh() local
107 ca->loss_cwnd = tcp_snd_cwnd(tp); in dctcp_ssthresh()
[all …]
Dtcp_veno.c121 struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_cong_avoid() local
149 target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt; in tcp_veno_cong_avoid()
153 veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd; in tcp_veno_cong_avoid()
155 if (tcp_in_slow_start(tp)) { in tcp_veno_cong_avoid()
157 acked = tcp_slow_start(tp, acked); in tcp_veno_cong_avoid()
167 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_veno_cong_avoid()
172 if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { in tcp_veno_cong_avoid()
174 tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { in tcp_veno_cong_avoid()
175 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_veno_cong_avoid()
179 tp->snd_cwnd_cnt = 0; in tcp_veno_cong_avoid()
[all …]
Dtcp_nv.c126 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_reset() local
133 ca->nv_rtt_start_seq = tp->snd_una; in tcpnv_reset()
135 ca->nv_last_snd_una = tp->snd_una; in tcpnv_reset()
183 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_cong_avoid() local
194 if (tcp_in_slow_start(tp)) { in tcpnv_cong_avoid()
195 acked = tcp_slow_start(tp, acked); in tcpnv_cong_avoid()
201 cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor; in tcpnv_cong_avoid()
202 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid()
204 cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor); in tcpnv_cong_avoid()
205 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid()
[all …]
Dtcp_timer.c104 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() local
109 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) in tcp_out_of_resources()
119 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || in tcp_out_of_resources()
121 (!tp->snd_wnd && !tp->packets_out)) in tcp_out_of_resources()
234 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout() local
270 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG)) in tcp_write_timeout()
282 tp->timeout_rehash++; in tcp_write_timeout()
360 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer() local
363 if (tp->packets_out || !skb) { in tcp_probe_timer()
410 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer() local
[all …]
Dtcp_bbr.c266 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt() local
271 if (tp->srtt_us) { /* any RTT sample yet? */ in bbr_init_pacing_rate_from_rtt()
272 rtt_us = max(tp->srtt_us >> 3, 1U); in bbr_init_pacing_rate_from_rtt()
277 bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT; in bbr_init_pacing_rate_from_rtt()
285 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate() local
289 if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) in bbr_set_pacing_rate()
303 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal() local
312 segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); in bbr_tso_segs_goal()
320 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd() local
324 bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */ in bbr_save_cwnd()
[all …]
Dtcp_cdg.c143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update() local
150 u32 now_us = tp->tcp_mstamp; in tcp_cdg_hystart_update()
164 tcp_snd_cwnd(tp)); in tcp_cdg_hystart_update()
165 tp->snd_ssthresh = tcp_snd_cwnd(tp); in tcp_cdg_hystart_update()
183 tcp_snd_cwnd(tp)); in tcp_cdg_hystart_update()
184 tp->snd_ssthresh = tcp_snd_cwnd(tp); in tcp_cdg_hystart_update()
244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff() local
255 ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp)); in tcp_cdg_backoff()
265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid() local
269 if (tcp_in_slow_start(tp) && hystart_detect) in tcp_cdg_cong_avoid()
[all …]
Dtcp_cong.c396 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start() argument
398 u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh); in tcp_slow_start()
400 acked -= cwnd - tcp_snd_cwnd(tp); in tcp_slow_start()
401 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); in tcp_slow_start()
410 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai() argument
413 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai()
414 tp->snd_cwnd_cnt = 0; in tcp_cong_avoid_ai()
415 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_cong_avoid_ai()
418 tp->snd_cwnd_cnt += acked; in tcp_cong_avoid_ai()
419 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai()
[all …]
Dtcp_fastopen.c177 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_add_skb() local
179 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb()
193 tp->segs_in = 0; in tcp_fastopen_add_skb()
194 tcp_segs_in(tp, skb); in tcp_fastopen_add_skb()
202 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb()
204 tp->syn_data_acked = 1; in tcp_fastopen_add_skb()
209 tp->bytes_received = skb->len; in tcp_fastopen_add_skb()
248 struct tcp_sock *tp; in tcp_fastopen_create_child() local
266 tp = tcp_sk(child); in tcp_fastopen_create_child()
268 rcu_assign_pointer(tp->fastopen_rsk, req); in tcp_fastopen_create_child()
[all …]
Dtcp_cubic.c118 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset() local
122 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset()
324 struct tcp_sock *tp = tcp_sk(sk); in cubictcp_cong_avoid() local
330 if (tcp_in_slow_start(tp)) { in cubictcp_cong_avoid()
331 acked = tcp_slow_start(tp, acked); in cubictcp_cong_avoid()
335 bictcp_update(ca, tcp_snd_cwnd(tp), acked); in cubictcp_cong_avoid()
336 tcp_cong_avoid_ai(tp, ca->cnt, acked); in cubictcp_cong_avoid()
341 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_recalc_ssthresh() local
347 if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence) in cubictcp_recalc_ssthresh()
348 ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta)) in cubictcp_recalc_ssthresh()
[all …]
/net/sched/
Dcls_api.c50 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) in destroy_obj_hashfn() argument
52 return jhash_3words(tp->chain->index, tp->prio, in destroy_obj_hashfn()
53 (__force __u32)tp->protocol, 0); in destroy_obj_hashfn()
57 struct tcf_proto *tp) in tcf_proto_signal_destroying() argument
62 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, in tcf_proto_signal_destroying()
63 destroy_obj_hashfn(tp)); in tcf_proto_signal_destroying()
76 struct tcf_proto *tp) in tcf_proto_exists_destroying() argument
78 u32 hash = destroy_obj_hashfn(tp); in tcf_proto_exists_destroying()
85 if (tcf_proto_cmp(tp, iter)) { in tcf_proto_exists_destroying()
96 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) in tcf_proto_signal_destroyed() argument
[all …]
Dcls_matchall.c27 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, in mall_classify() argument
30 struct cls_mall_head *head = rcu_dereference_bh(tp->root); in mall_classify()
43 static int mall_init(struct tcf_proto *tp) in mall_init() argument
66 static void mall_destroy_hw_filter(struct tcf_proto *tp, in mall_destroy_hw_filter() argument
72 struct tcf_block *block = tp->chain->block; in mall_destroy_hw_filter()
74 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); in mall_destroy_hw_filter()
78 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false, in mall_destroy_hw_filter()
82 static int mall_replace_hw_filter(struct tcf_proto *tp, in mall_replace_hw_filter() argument
88 struct tcf_block *block = tp->chain->block; in mall_replace_hw_filter()
96 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); in mall_replace_hw_filter()
[all …]
Dcls_bpf.c49 struct tcf_proto *tp; member
80 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, in cls_bpf_classify() argument
83 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); in cls_bpf_classify()
141 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, in cls_bpf_offload_cmd() argument
145 struct tcf_block *block = tp->chain->block; in cls_bpf_offload_cmd()
154 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack); in cls_bpf_offload_cmd()
163 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf, in cls_bpf_offload_cmd()
169 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf, in cls_bpf_offload_cmd()
173 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf, in cls_bpf_offload_cmd()
178 cls_bpf_offload_cmd(tp, oldprog, prog, extack); in cls_bpf_offload_cmd()
[all …]
Dcls_u32.c103 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, in u32_classify() argument
111 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); in u32_classify()
286 static void *u32_get(struct tcf_proto *tp, u32 handle) in u32_get() argument
289 struct tc_u_common *tp_c = tp->data; in u32_get()
292 ht = rtnl_dereference(tp->root); in u32_get()
319 static void *tc_u_common_ptr(const struct tcf_proto *tp) in tc_u_common_ptr() argument
321 struct tcf_block *block = tp->chain->block; in tc_u_common_ptr()
350 static int u32_init(struct tcf_proto *tp) in u32_init() argument
353 void *key = tc_u_common_ptr(tp); in u32_init()
362 root_ht->prio = tp->prio; in u32_init()
[all …]
Dcls_cgroup.c21 struct tcf_proto *tp; member
25 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, in cls_cgroup_classify() argument
28 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); in cls_cgroup_classify()
44 static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) in cls_cgroup_get() argument
49 static int cls_cgroup_init(struct tcf_proto *tp) in cls_cgroup_init() argument
77 struct tcf_proto *tp, unsigned long base, in cls_cgroup_change() argument
83 struct cls_cgroup_head *head = rtnl_dereference(tp->root); in cls_cgroup_change()
104 new->tp = tp; in cls_cgroup_change()
111 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, flags, in cls_cgroup_change()
116 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); in cls_cgroup_change()
[all …]
Dcls_basic.c33 struct tcf_proto *tp; member
39 static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, in basic_classify() argument
43 struct basic_head *head = rcu_dereference_bh(tp->root); in basic_classify()
60 static void *basic_get(struct tcf_proto *tp, u32 handle) in basic_get() argument
62 struct basic_head *head = rtnl_dereference(tp->root); in basic_get()
74 static int basic_init(struct tcf_proto *tp) in basic_init() argument
83 rcu_assign_pointer(tp->root, head); in basic_init()
106 static void basic_destroy(struct tcf_proto *tp, bool rtnl_held, in basic_destroy() argument
109 struct basic_head *head = rtnl_dereference(tp->root); in basic_destroy()
114 tcf_unbind_filter(tp, &f->res); in basic_destroy()
[all …]
Dcls_fw.c39 struct tcf_proto *tp; member
50 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, in fw_classify() argument
53 struct fw_head *head = rcu_dereference_bh(tp->root); in fw_classify()
75 struct Qdisc *q = tcf_block_q(tp->chain->block); in fw_classify()
89 static void *fw_get(struct tcf_proto *tp, u32 handle) in fw_get() argument
91 struct fw_head *head = rtnl_dereference(tp->root); in fw_get()
105 static int fw_init(struct tcf_proto *tp) in fw_init() argument
130 static void fw_destroy(struct tcf_proto *tp, bool rtnl_held, in fw_destroy() argument
133 struct fw_head *head = rtnl_dereference(tp->root); in fw_destroy()
144 tcf_unbind_filter(tp, &f->res); in fw_destroy()
[all …]

1234