Home
last modified time | relevance | path

Searched refs:tp (Results 1 – 25 of 102) sorted by relevance

12345

/net/ipv4/
Dtcp_input.c341 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() argument
343 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
344 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
361 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
363 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_withdraw_cwr()
368 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
376 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
383 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
386 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
388 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
[all …]
Dtcp_recovery.c7 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd() local
9 if (!tp->reord_seen) { in tcp_rack_reo_wnd()
16 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd()
28 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, in tcp_rack_reo_wnd()
29 tp->srtt_us >> 3); in tcp_rack_reo_wnd()
32 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) in tcp_rack_skb_timeout() argument
34 return tp->rack.rtt_us + reo_wnd - in tcp_rack_skb_timeout()
35 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); in tcp_rack_skb_timeout()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss() local
66 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, in tcp_rack_detect_loss()
[all …]
Dtcp_rate.c42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent() local
58 if (!tp->packets_out) { in tcp_rate_skb_sent()
61 tp->first_tx_mstamp = tstamp_us; in tcp_rate_skb_sent()
62 tp->delivered_mstamp = tstamp_us; in tcp_rate_skb_sent()
65 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; in tcp_rate_skb_sent()
66 TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; in tcp_rate_skb_sent()
67 TCP_SKB_CB(skb)->tx.delivered = tp->delivered; in tcp_rate_skb_sent()
68 TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce; in tcp_rate_skb_sent()
69 TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; in tcp_rate_skb_sent()
83 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered() local
[all …]
Dtcp_output.c54 void tcp_mstamp_refresh(struct tcp_sock *tp) in tcp_mstamp_refresh() argument
58 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
59 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
69 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
70 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
72 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
77 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
78 tp->highest_sack = skb; in tcp_event_new_data_sent()
80 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
98 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
[all …]
Dtcp.c395 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) in tcp_compute_delivery_rate() argument
397 u32 rate = READ_ONCE(tp->rate_delivered); in tcp_compute_delivery_rate()
398 u32 intv = READ_ONCE(tp->rate_interval_us); in tcp_compute_delivery_rate()
402 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; in tcp_compute_delivery_rate()
416 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() local
418 tp->out_of_order_queue = RB_ROOT; in tcp_init_sock()
421 INIT_LIST_HEAD(&tp->tsq_node); in tcp_init_sock()
422 INIT_LIST_HEAD(&tp->tsorted_sent_queue); in tcp_init_sock()
427 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_init_sock()
428 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); in tcp_init_sock()
[all …]
Dtcp_vegas.c73 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable() local
80 vegas->beg_snd_nxt = tp->snd_nxt; in vegas_enable()
160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) in tcp_vegas_ssthresh() argument
162 return min(tp->snd_ssthresh, tcp_snd_cwnd(tp)); in tcp_vegas_ssthresh()
167 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid() local
181 vegas->beg_snd_nxt = tp->snd_nxt; in tcp_vegas_cong_avoid()
220 target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT; in tcp_vegas_cong_avoid()
227 diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT; in tcp_vegas_cong_avoid()
229 if (diff > gamma && tcp_in_slow_start(tp)) { in tcp_vegas_cong_avoid()
241 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), in tcp_vegas_cong_avoid()
[all …]
Dtcp_hybla.c48 struct tcp_sock *tp = tcp_sk(sk); in hybla_init() local
57 tcp_snd_cwnd_set(tp, 2); in hybla_init()
58 tp->snd_cwnd_clamp = 65535; in hybla_init()
64 ca->minrtt_us = tp->srtt_us; in hybla_init()
65 tcp_snd_cwnd_set(tp, ca->rho); in hybla_init()
92 struct tcp_sock *tp = tcp_sk(sk); in hybla_cong_avoid() local
98 if (tp->srtt_us < ca->minrtt_us) { in hybla_cong_avoid()
100 ca->minrtt_us = tp->srtt_us; in hybla_cong_avoid()
116 if (tcp_in_slow_start(tp)) { in hybla_cong_avoid()
140 increment = ca->rho2_7ls / tcp_snd_cwnd(tp); in hybla_cong_avoid()
[all …]
Dtcp_highspeed.c102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() local
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in hstcp_init()
114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() local
120 if (tcp_in_slow_start(tp)) in hstcp_cong_avoid()
121 tcp_slow_start(tp, acked); in hstcp_cong_avoid()
130 if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid()
131 while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid()
134 } else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid()
135 while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid()
140 if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { in hstcp_cong_avoid()
[all …]
Dtcp_yeah.c43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init() local
55 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in tcp_yeah_init()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid() local
66 if (tcp_in_slow_start(tp)) { in tcp_yeah_cong_avoid()
67 acked = tcp_slow_start(tp, acked); in tcp_yeah_cong_avoid()
74 tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT), in tcp_yeah_cong_avoid()
78 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_yeah_cong_avoid()
133 bw = tcp_snd_cwnd(tp); in tcp_yeah_cong_avoid()
141 tcp_snd_cwnd(tp) > yeah->reno_count) { in tcp_yeah_cong_avoid()
143 tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON); in tcp_yeah_cong_avoid()
[all …]
Dtcp_veno.c121 struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_cong_avoid() local
149 target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt; in tcp_veno_cong_avoid()
153 veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd; in tcp_veno_cong_avoid()
155 if (tcp_in_slow_start(tp)) { in tcp_veno_cong_avoid()
157 acked = tcp_slow_start(tp, acked); in tcp_veno_cong_avoid()
167 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_veno_cong_avoid()
172 if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { in tcp_veno_cong_avoid()
174 tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { in tcp_veno_cong_avoid()
175 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_veno_cong_avoid()
179 tp->snd_cwnd_cnt = 0; in tcp_veno_cong_avoid()
[all …]
Dtcp_dctcp.c81 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset() argument
83 ca->next_seq = tp->snd_nxt; in dctcp_reset()
85 ca->old_delivered = tp->delivered; in dctcp_reset()
86 ca->old_delivered_ce = tp->delivered_ce; in dctcp_reset()
91 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init() local
93 if ((tp->ecn_flags & TCP_ECN_OK) || in dctcp_init()
98 ca->prior_rcv_nxt = tp->rcv_nxt; in dctcp_init()
105 dctcp_reset(tp, ca); in dctcp_init()
121 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh() local
123 ca->loss_cwnd = tcp_snd_cwnd(tp); in dctcp_ssthresh()
[all …]
Dtcp_timer.c103 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() local
108 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) in tcp_out_of_resources()
118 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || in tcp_out_of_resources()
120 (!tp->snd_wnd && !tp->packets_out)) in tcp_out_of_resources()
233 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout() local
276 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG)) in tcp_write_timeout()
293 tp->timeout_rehash++; in tcp_write_timeout()
304 struct tcp_sock *tp = tcp_sk(sk); in tcp_delack_timer_handler() local
310 if (tp->compressed_ack) { in tcp_delack_timer_handler()
311 tcp_mstamp_refresh(tp); in tcp_delack_timer_handler()
[all …]
Dtcp_nv.c125 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_reset() local
132 ca->nv_rtt_start_seq = tp->snd_una; in tcpnv_reset()
134 ca->nv_last_snd_una = tp->snd_una; in tcpnv_reset()
182 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_cong_avoid() local
193 if (tcp_in_slow_start(tp)) { in tcpnv_cong_avoid()
194 acked = tcp_slow_start(tp, acked); in tcpnv_cong_avoid()
200 cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor; in tcpnv_cong_avoid()
201 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid()
203 cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor); in tcpnv_cong_avoid()
204 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid()
[all …]
Dtcp_bbr.c268 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt() local
273 if (tp->srtt_us) { /* any RTT sample yet? */ in bbr_init_pacing_rate_from_rtt()
274 rtt_us = max(tp->srtt_us >> 3, 1U); in bbr_init_pacing_rate_from_rtt()
279 bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT; in bbr_init_pacing_rate_from_rtt()
287 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate() local
291 if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) in bbr_set_pacing_rate()
305 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal() local
314 segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); in bbr_tso_segs_goal()
322 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd() local
326 bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */ in bbr_save_cwnd()
[all …]
Dtcp_cdg.c143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update() local
150 u32 now_us = tp->tcp_mstamp; in tcp_cdg_hystart_update()
164 tcp_snd_cwnd(tp)); in tcp_cdg_hystart_update()
165 tp->snd_ssthresh = tcp_snd_cwnd(tp); in tcp_cdg_hystart_update()
183 tcp_snd_cwnd(tp)); in tcp_cdg_hystart_update()
184 tp->snd_ssthresh = tcp_snd_cwnd(tp); in tcp_cdg_hystart_update()
244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff() local
255 ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp)); in tcp_cdg_backoff()
265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid() local
269 if (tcp_in_slow_start(tp) && hystart_detect) in tcp_cdg_cong_avoid()
[all …]
Dtcp_cong.c458 __bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start() argument
460 u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh); in tcp_slow_start()
462 acked -= cwnd - tcp_snd_cwnd(tp); in tcp_slow_start()
463 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); in tcp_slow_start()
472 __bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai() argument
475 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai()
476 tp->snd_cwnd_cnt = 0; in tcp_cong_avoid_ai()
477 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_cong_avoid_ai()
480 tp->snd_cwnd_cnt += acked; in tcp_cong_avoid_ai()
481 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai()
[all …]
Dtcp_fastopen.c171 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_add_skb() local
173 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb()
187 tp->segs_in = 0; in tcp_fastopen_add_skb()
188 tcp_segs_in(tp, skb); in tcp_fastopen_add_skb()
196 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb()
198 tp->syn_data_acked = 1; in tcp_fastopen_add_skb()
203 tp->bytes_received = skb->len; in tcp_fastopen_add_skb()
242 struct tcp_sock *tp; in tcp_fastopen_create_child() local
260 tp = tcp_sk(child); in tcp_fastopen_create_child()
262 rcu_assign_pointer(tp->fastopen_rsk, req); in tcp_fastopen_create_child()
[all …]
Dtcp_westwood.c165 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw() local
170 w->bk += tp->snd_una - w->snd_una; in westwood_fast_bw()
171 w->snd_una = tp->snd_una; in westwood_fast_bw()
182 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count() local
185 w->cumul_ack = tp->snd_una - w->snd_una; in westwood_acked_count()
191 w->accounted += tp->mss_cache; in westwood_acked_count()
192 w->cumul_ack = tp->mss_cache; in westwood_acked_count()
195 if (w->cumul_ack > tp->mss_cache) { in westwood_acked_count()
199 w->cumul_ack = tp->mss_cache; in westwood_acked_count()
206 w->snd_una = tp->snd_una; in westwood_acked_count()
[all …]
/net/sched/
Dcls_api.c55 const struct tcf_proto *tp; member
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, in tcf_exts_miss_cookie_base_alloc() argument
84 if (WARN_ON(!handle || !tp->ops->get_exts)) in tcf_exts_miss_cookie_base_alloc()
91 n->chain_index = tp->chain->index; in tcf_exts_miss_cookie_base_alloc()
92 n->chain = tp->chain; in tcf_exts_miss_cookie_base_alloc()
93 n->tp_prio = tp->prio; in tcf_exts_miss_cookie_base_alloc()
94 n->tp = tp; in tcf_exts_miss_cookie_base_alloc()
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, in tcf_exts_miss_cookie_base_alloc() argument
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) in destroy_obj_hashfn() argument
174 return jhash_3words(tp->chain->index, tp->prio, in destroy_obj_hashfn()
[all …]
Dcls_matchall.c29 const struct tcf_proto *tp, in mall_classify() argument
32 struct cls_mall_head *head = rcu_dereference_bh(tp->root); in mall_classify()
45 static int mall_init(struct tcf_proto *tp) in mall_init() argument
68 static void mall_destroy_hw_filter(struct tcf_proto *tp, in mall_destroy_hw_filter() argument
74 struct tcf_block *block = tp->chain->block; in mall_destroy_hw_filter()
76 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); in mall_destroy_hw_filter()
80 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false, in mall_destroy_hw_filter()
84 static int mall_replace_hw_filter(struct tcf_proto *tp, in mall_replace_hw_filter() argument
90 struct tcf_block *block = tp->chain->block; in mall_replace_hw_filter()
98 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); in mall_replace_hw_filter()
[all …]
Dcls_bpf.c50 struct tcf_proto *tp; member
82 const struct tcf_proto *tp, in cls_bpf_classify() argument
85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); in cls_bpf_classify()
145 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, in cls_bpf_offload_cmd() argument
149 struct tcf_block *block = tp->chain->block; in cls_bpf_offload_cmd()
158 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack); in cls_bpf_offload_cmd()
167 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf, in cls_bpf_offload_cmd()
173 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf, in cls_bpf_offload_cmd()
177 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf, in cls_bpf_offload_cmd()
182 cls_bpf_offload_cmd(tp, oldprog, prog, extack); in cls_bpf_offload_cmd()
[all …]
Dcls_u32.c105 const struct tcf_proto *tp, in u32_classify() argument
113 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); in u32_classify()
288 static void *u32_get(struct tcf_proto *tp, u32 handle) in u32_get() argument
291 struct tc_u_common *tp_c = tp->data; in u32_get()
294 ht = rtnl_dereference(tp->root); in u32_get()
321 static void *tc_u_common_ptr(const struct tcf_proto *tp) in tc_u_common_ptr() argument
323 struct tcf_block *block = tp->chain->block; in tc_u_common_ptr()
352 static int u32_init(struct tcf_proto *tp) in u32_init() argument
355 void *key = tc_u_common_ptr(tp); in u32_init()
364 root_ht->prio = tp->prio; in u32_init()
[all …]
Dcls_cgroup.c22 struct tcf_proto *tp; member
27 const struct tcf_proto *tp, in cls_cgroup_classify() argument
30 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); in cls_cgroup_classify()
46 static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) in cls_cgroup_get() argument
51 static int cls_cgroup_init(struct tcf_proto *tp) in cls_cgroup_init() argument
79 struct tcf_proto *tp, unsigned long base, in cls_cgroup_change() argument
85 struct cls_cgroup_head *head = rtnl_dereference(tp->root); in cls_cgroup_change()
106 new->tp = tp; in cls_cgroup_change()
113 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, flags, in cls_cgroup_change()
118 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); in cls_cgroup_change()
[all …]
Dcls_basic.c34 struct tcf_proto *tp; member
41 const struct tcf_proto *tp, in basic_classify() argument
45 struct basic_head *head = rcu_dereference_bh(tp->root); in basic_classify()
62 static void *basic_get(struct tcf_proto *tp, u32 handle) in basic_get() argument
64 struct basic_head *head = rtnl_dereference(tp->root); in basic_get()
76 static int basic_init(struct tcf_proto *tp) in basic_init() argument
85 rcu_assign_pointer(tp->root, head); in basic_init()
108 static void basic_destroy(struct tcf_proto *tp, bool rtnl_held, in basic_destroy() argument
111 struct basic_head *head = rtnl_dereference(tp->root); in basic_destroy()
116 tcf_unbind_filter(tp, &f->res); in basic_destroy()
[all …]
Dcls_fw.c40 struct tcf_proto *tp; member
52 const struct tcf_proto *tp, in fw_classify() argument
55 struct fw_head *head = rcu_dereference_bh(tp->root); in fw_classify()
77 struct Qdisc *q = tcf_block_q(tp->chain->block); in fw_classify()
91 static void *fw_get(struct tcf_proto *tp, u32 handle) in fw_get() argument
93 struct fw_head *head = rtnl_dereference(tp->root); in fw_get()
107 static int fw_init(struct tcf_proto *tp) in fw_init() argument
132 static void fw_destroy(struct tcf_proto *tp, bool rtnl_held, in fw_destroy() argument
135 struct fw_head *head = rtnl_dereference(tp->root); in fw_destroy()
146 tcf_unbind_filter(tp, &f->res); in fw_destroy()
[all …]

12345