Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 21 of 21) sorted by relevance

/net/mptcp/
Dsched.c42 struct mptcp_sched_ops *sched, *ret = NULL; in mptcp_sched_find() local
44 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) { in mptcp_sched_find()
45 if (!strcmp(sched->name, name)) { in mptcp_sched_find()
46 ret = sched; in mptcp_sched_find()
54 int mptcp_register_scheduler(struct mptcp_sched_ops *sched) in mptcp_register_scheduler() argument
56 if (!sched->get_subflow) in mptcp_register_scheduler()
60 if (mptcp_sched_find(sched->name)) { in mptcp_register_scheduler()
64 list_add_tail_rcu(&sched->list, &mptcp_sched_list); in mptcp_register_scheduler()
67 pr_debug("%s registered", sched->name); in mptcp_register_scheduler()
71 void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched) in mptcp_unregister_scheduler() argument
[all …]
Dctrl.c93 struct mptcp_sched_ops *sched; in mptcp_set_scheduler() local
97 sched = mptcp_sched_find(name); in mptcp_set_scheduler()
98 if (sched) in mptcp_set_scheduler()
DMakefile5 mib.o pm_netlink.o sockopt.o pm_userspace.o fastopen.o sched.o
Dprotocol.h323 struct mptcp_sched_ops *sched; member
666 int mptcp_register_scheduler(struct mptcp_sched_ops *sched);
667 void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched);
670 struct mptcp_sched_ops *sched);
Dprotocol.c3263 mptcp_init_sched(msk, mptcp_sk(sk)->sched); in mptcp_sk_clone_init()
/net/netfilter/ipvs/
Dip_vs_sched.c61 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler() argument
70 if (sched->done_service) in ip_vs_unbind_scheduler()
71 sched->done_service(svc); in ip_vs_unbind_scheduler()
81 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local
87 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname()
91 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname()
97 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname()
100 return sched; in ip_vs_sched_getbyname()
102 module_put(sched->module); in ip_vs_sched_getbyname()
115 struct ip_vs_scheduler *sched; in ip_vs_scheduler_get() local
[all …]
Dip_vs_ctl.c972 struct ip_vs_scheduler *sched; in __ip_vs_update_dest() local
1043 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_update_dest()
1044 if (sched && sched->add_dest) in __ip_vs_update_dest()
1045 sched->add_dest(svc, dest); in __ip_vs_update_dest()
1047 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_update_dest()
1048 if (sched && sched->upd_dest) in __ip_vs_update_dest()
1049 sched->upd_dest(svc, dest); in __ip_vs_update_dest()
1291 struct ip_vs_scheduler *sched; in __ip_vs_unlink_dest() local
1293 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_unlink_dest()
1294 if (sched && sched->del_dest) in __ip_vs_unlink_dest()
[all …]
Dip_vs_core.c342 struct ip_vs_scheduler *sched; in ip_vs_sched_persist() local
349 sched = rcu_dereference(svc->scheduler); in ip_vs_sched_persist()
350 if (sched) { in ip_vs_sched_persist()
353 dest = sched->schedule(svc, skb, iph); in ip_vs_sched_persist()
447 struct ip_vs_scheduler *sched; in ip_vs_schedule() local
524 sched = rcu_dereference(svc->scheduler); in ip_vs_schedule()
525 if (sched) { in ip_vs_schedule()
528 dest = sched->schedule(svc, skb, iph); in ip_vs_schedule()
/net/sctp/
Dstream_sched.c116 void sctp_sched_ops_register(enum sctp_sched_type sched, in sctp_sched_ops_register() argument
119 sctp_sched_ops[sched] = sched_ops; in sctp_sched_ops_register()
133 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); in sctp_sched_free_sched() local
137 sched->unsched_all(stream); in sctp_sched_free_sched()
142 sched->free_sid(stream, i); in sctp_sched_free_sched()
149 enum sctp_sched_type sched) in sctp_sched_set_sched() argument
151 struct sctp_sched_ops *old = asoc->outqueue.sched; in sctp_sched_set_sched()
157 if (sched > SCTP_SS_MAX) in sctp_sched_set_sched()
160 n = sctp_sched_ops[sched]; in sctp_sched_set_sched()
167 asoc->outqueue.sched = n; in sctp_sched_set_sched()
[all …]
Dstream.c57 struct sctp_sched_ops *sched; in sctp_stream_free_ext() local
62 sched = sctp_sched_ops_from_stream(stream); in sctp_stream_free_ext()
63 sched->free_sid(stream, sid); in sctp_stream_free_ext()
133 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); in sctp_stream_init() local
145 sched->unsched_all(stream); in sctp_stream_init()
147 sched->sched_all(stream); in sctp_stream_init()
185 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); in sctp_stream_free() local
188 sched->unsched_all(stream); in sctp_stream_free()
210 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); in sctp_stream_update() local
212 sched->unsched_all(stream); in sctp_stream_update()
[all …]
Doutqueue.c76 return q->sched->dequeue(q); in sctp_outq_dequeue_data()
389 q->sched->unsched_all(&asoc->stream); in sctp_prsctp_prune_unsent()
417 q->sched->sched_all(&asoc->stream); in sctp_prsctp_prune_unsent()
Dsm_sideeffect.c1115 asoc->outqueue.sched->enqueue(&asoc->outqueue, msg); in sctp_cmd_send_msg()
/net/sched/
Dsch_taprio.c113 struct sched_gate_list *sched) in taprio_calculate_gate_durations() argument
120 list_for_each_entry(entry, &sched->entries, list) { in taprio_calculate_gate_durations()
143 cur = list_next_entry_circular(cur, &sched->entries, list); in taprio_calculate_gate_durations()
152 sched->max_open_gate_duration[tc] < entry->gate_duration[tc]) in taprio_calculate_gate_durations()
153 sched->max_open_gate_duration[tc] = entry->gate_duration[tc]; in taprio_calculate_gate_durations()
163 static ktime_t sched_base_time(const struct sched_gate_list *sched) in sched_base_time() argument
165 if (!sched) in sched_base_time()
168 return ns_to_ktime(sched->base_time); in sched_base_time()
191 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); in taprio_free_sched_cb() local
194 list_for_each_entry_safe(entry, n, &sched->entries, list) { in taprio_free_sched_cb()
[all …]
Dact_gate.c234 struct tcf_gate_params *sched, in parse_gate_list() argument
264 list_add_tail(&entry->list, &sched->entries); in parse_gate_list()
268 sched->num_entries = i; in parse_gate_list()
273 release_entry_list(&sched->entries); in parse_gate_list()
Dsch_hfsc.c121 struct hfsc_sched *sched; /* scheduler data */ member
187 struct rb_node **p = &cl->sched->eligible.rb_node; in eltree_insert()
200 rb_insert_color(&cl->el_node, &cl->sched->eligible); in eltree_insert()
206 rb_erase(&cl->el_node, &cl->sched->eligible); in eltree_remove()
1059 cl->sched = q; in hfsc_change_class()
1418 q->root.sched = q; in hfsc_init_qdisc()
DKconfig95 See the top of <file:net/sched/sch_red.c> for more details.
106 See the top of <file:net/sched/sch_sfb.c> for more details.
117 See the top of <file:net/sched/sch_sfq.c> for more details.
129 See the top of <file:net/sched/sch_teql.c> for more details.
140 See the top of <file:net/sched/sch_tbf.c> for more details.
151 See the top of <file:net/sched/sch_cbs.c> for more details.
162 See the top of <file:net/sched/sch_etf.c> for more details.
179 See the top of <file:net/sched/sch_taprio.c> for more details.
189 (see the top of <file:net/sched/sch_red.c> for details and
/net/ipv6/
Dip6_flowlabel.c152 unsigned long sched = 0; in ip6_fl_gc() local
174 if (!sched || time_before(ttd, sched)) in ip6_fl_gc()
175 sched = ttd; in ip6_fl_gc()
180 if (!sched && atomic_read(&fl_size)) in ip6_fl_gc()
181 sched = now + FL_MAX_LINGER; in ip6_fl_gc()
182 if (sched) { in ip6_fl_gc()
183 mod_timer(&ip6_fl_gc_timer, sched); in ip6_fl_gc()
/net/sunrpc/
DMakefile11 sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
/net/
DMakefile15 obj-y += ethernet/ 802/ sched/ netlink/ bpf/ ethtool/
DKconfig256 source "net/sched/Kconfig"
/net/unix/
Daf_unix.c1460 int sched; in unix_wait_for_peer() local
1465 sched = !sock_flag(other, SOCK_DEAD) && in unix_wait_for_peer()
1471 if (sched) in unix_wait_for_peer()