Home
last modified time | relevance | path

Searched refs:delay (Results 1 – 25 of 37) sorted by relevance

12

/net/netfilter/
Dxt_LED.c66 if ((ledinfo->delay > 0) && ledinfo->always_blink && in led_tg()
74 if (ledinfo->delay > 0) { in led_tg()
76 jiffies + msecs_to_jiffies(ledinfo->delay)); in led_tg()
79 } else if (ledinfo->delay == 0) { in led_tg()
145 if (ledinfo->delay > 0) in led_tg_check()
184 if (ledinfo->delay > 0) in led_tg_destroy()
Dnf_conntrack_ecache.c84 int cpu, delay = -1; in ecache_work() local
98 delay = ECACHE_RETRY_WAIT; in ecache_work()
101 delay = 0; in ecache_work()
111 ctnet->ecache_dwork_pending = delay > 0; in ecache_work()
112 if (delay >= 0) in ecache_work()
113 schedule_delayed_work(&ctnet->ecache_dwork, delay); in ecache_work()
/net/ipv4/
Dtcp_cubic.c382 static void hystart_update(struct sock *sk, u32 delay) in hystart_update() argument
399 if (ca->curr_rtt == 0 || ca->curr_rtt > delay) in hystart_update()
400 ca->curr_rtt = delay; in hystart_update()
425 u32 delay; in bictcp_acked() local
444 delay = (rtt_us << 3) / USEC_PER_MSEC; in bictcp_acked()
445 if (delay == 0) in bictcp_acked()
446 delay = 1; in bictcp_acked()
449 if (ca->delay_min == 0 || ca->delay_min > delay) in bictcp_acked()
450 ca->delay_min = delay; in bictcp_acked()
455 hystart_update(sk, delay); in bictcp_acked()
/net/core/
Dlink_watch.c117 unsigned long delay = linkwatch_nextevent - jiffies; in linkwatch_schedule_work() local
126 delay = 0; in linkwatch_schedule_work()
130 if (delay > HZ) in linkwatch_schedule_work()
131 delay = 0; in linkwatch_schedule_work()
140 schedule_delayed_work(&linkwatch_work, delay); in linkwatch_schedule_work()
Dpktgen.c262 u64 delay; /* nano-seconds */ member
542 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, in pktgen_if_show()
989 pkt_dev->delay = ULLONG_MAX; in pktgen_if_write()
991 pkt_dev->delay = (u64)value; in pktgen_if_write()
994 (unsigned long long) pkt_dev->delay); in pktgen_if_write()
1005 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; in pktgen_if_write()
1007 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); in pktgen_if_write()
1020 pkt_dev->delay = NSEC_PER_SEC/value; in pktgen_if_write()
1022 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); in pktgen_if_write()
2202 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); in spin()
[all …]
/net/sched/
Dsch_cbq.c412 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_classic() local
415 delay += cl->offtime; in cbq_ovl_classic()
425 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); in cbq_ovl_classic()
428 if (delay <= 0) in cbq_ovl_classic()
429 delay = 1; in cbq_ovl_classic()
430 cl->undertime = q->now + delay; in cbq_ovl_classic()
435 if (q->wd_expires == 0 || q->wd_expires > delay) in cbq_ovl_classic()
436 q->wd_expires = delay; in cbq_ovl_classic()
447 delay = b->undertime - q->now; in cbq_ovl_classic()
448 if (delay < base_delay) { in cbq_ovl_classic()
[all …]
Dsch_netem.c524 psched_tdiff_t delay; in netem_enqueue() local
526 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
544 delay -= netem_skb_cb(last)->time_to_send - now; in netem_enqueue()
545 delay = max_t(psched_tdiff_t, 0, delay); in netem_enqueue()
549 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); in netem_enqueue()
552 cb->time_to_send = now + delay; in netem_enqueue()
Dsch_pie.c495 .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) / in pie_dump_stats()
/net/dccp/
Doutput.c220 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) in dccp_wait_for_ccid() argument
229 remaining = schedule_timeout(delay); in dccp_wait_for_ccid()
311 long delay, rc; in dccp_flush_write_queue() local
326 delay = msecs_to_jiffies(rc); in dccp_flush_write_queue()
327 if (delay > *time_budget) in dccp_flush_write_queue()
329 rc = dccp_wait_for_ccid(sk, delay); in dccp_flush_write_queue()
332 *time_budget -= (delay - rc); in dccp_flush_write_queue()
/net/rfkill/
Dinput.c145 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); in rfkill_ratelimit() local
146 return time_after(jiffies, last + delay) ? 0 : delay; in rfkill_ratelimit()
/net/dccp/ccids/
Dccid3.c285 s64 delay; in ccid3_hc_tx_send_packet() local
332 delay = ktime_us_delta(hc->tx_t_nom, now); in ccid3_hc_tx_send_packet()
333 ccid3_pr_debug("delay=%ld\n", (long)delay); in ccid3_hc_tx_send_packet()
342 if (delay >= TFRC_T_DELTA) in ccid3_hc_tx_send_packet()
343 return (u32)delay / USEC_PER_MSEC; in ccid3_hc_tx_send_packet()
/net/nfc/nci/
Dspi.c134 u8 acknowledge_mode, unsigned int delay, in nci_spi_allocate_spi() argument
144 nspi->xfer_udelay = delay; in nci_spi_allocate_spi()
/net/ipv6/
Dmcast.c1013 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) in mld_ifc_start_timer() argument
1015 unsigned long tv = prandom_u32() % delay; in mld_ifc_start_timer()
1028 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) in mld_dad_start_timer() argument
1030 unsigned long tv = prandom_u32() % delay; in mld_dad_start_timer()
1048 unsigned long delay = resptime; in igmp6_group_queried() local
1057 delay = ma->mca_timer.expires - jiffies; in igmp6_group_queried()
1060 if (delay >= resptime) in igmp6_group_queried()
1061 delay = prandom_u32() % resptime; in igmp6_group_queried()
1063 ma->mca_timer.expires = jiffies + delay; in igmp6_group_queried()
1064 if (!mod_timer(&ma->mca_timer, jiffies + delay)) in igmp6_group_queried()
[all …]
/net/netfilter/ipvs/
DKconfig218 tristate "shortest expected delay scheduling"
220 The shortest expected delay scheduling algorithm assigns network
221 connections to the server with the shortest expected delay. The
222 expected delay that the job will experience is (Ci + 1) / Ui if
237 that minimize its expected delay (The Shortest Expected Delay
/net/batman-adv/
Dsend.h30 unsigned long delay);
Dsend.c427 unsigned long delay) in batadv_add_bcast_packet_to_list() argument
469 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); in batadv_add_bcast_packet_to_list()
/net/decnet/
Ddn_route.c388 void dn_rt_cache_flush(int delay) in dn_rt_cache_flush() argument
393 if (delay < 0) in dn_rt_cache_flush()
394 delay = dn_rt_min_delay; in dn_rt_cache_flush()
398 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { in dn_rt_cache_flush()
404 if (delay > tmo) in dn_rt_cache_flush()
405 delay = tmo; in dn_rt_cache_flush()
408 if (delay <= 0) { in dn_rt_cache_flush()
417 dn_rt_flush_timer.expires = now + delay; in dn_rt_cache_flush()
/net/ceph/
Dmon_client.c173 unsigned int delay; in __schedule_delayed() local
176 delay = 10 * HZ; in __schedule_delayed()
178 delay = 20 * HZ; in __schedule_delayed()
179 dout("__schedule_delayed after %u\n", delay); in __schedule_delayed()
180 schedule_delayed_work(&monc->delayed_work, delay); in __schedule_delayed()
Dmessenger.c712 con->delay = 0; /* reset backoff memory */ in ceph_con_open()
2124 con->delay = 0; /* reset backoff memory */ in process_connect()
2685 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) in queue_con_delay() argument
2692 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { in queue_con_delay()
2698 dout("%s %p %lu\n", __func__, con, delay); in queue_con_delay()
2751 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); in con_backoff()
2754 con, con->delay); in con_backoff()
2887 if (con->delay == 0) in con_fault()
2888 con->delay = BASE_DELAY_INTERVAL; in con_fault()
2889 else if (con->delay < MAX_DELAY_INTERVAL) in con_fault()
[all …]
/net/802/
Dgarp.c398 unsigned long delay; in garp_join_timer_arm() local
400 delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32; in garp_join_timer_arm()
401 mod_timer(&app->join_timer, jiffies + delay); in garp_join_timer_arm()
Dmrp.c584 unsigned long delay; in mrp_join_timer_arm() local
586 delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32; in mrp_join_timer_arm()
587 mod_timer(&app->join_timer, jiffies + delay); in mrp_join_timer_arm()
/net/sunrpc/
Dcache.c466 int delay = 5; in do_cache_clean() local
468 delay = round_jiffies_relative(30*HZ); in do_cache_clean()
471 delay = 0; in do_cache_clean()
473 if (delay) in do_cache_clean()
474 schedule_delayed_work(&cache_cleaner, delay); in do_cache_clean()
Dsched.c646 void rpc_delay(struct rpc_task *task, unsigned long delay) in rpc_delay() argument
648 task->tk_timeout = delay; in rpc_delay()
Dsvc_xprt.c1021 int delay = 0; in svc_close_net() local
1027 msleep(delay++); in svc_close_net()
/net/bridge/
Dbr_fdb.c272 unsigned long delay = hold_time(br); in br_fdb_cleanup() local
285 this_timer = f->updated + delay; in br_fdb_cleanup()

12