/net/ipv4/ |
D | tcp_cubic.c | 384 static void hystart_update(struct sock *sk, u32 delay) in hystart_update() argument 427 if (ca->curr_rtt > delay) in hystart_update() 428 ca->curr_rtt = delay; in hystart_update() 450 u32 delay; in cubictcp_acked() local 460 delay = sample->rtt_us; in cubictcp_acked() 461 if (delay == 0) in cubictcp_acked() 462 delay = 1; in cubictcp_acked() 465 if (ca->delay_min == 0 || ca->delay_min > delay) in cubictcp_acked() 466 ca->delay_min = delay; in cubictcp_acked() 471 hystart_update(sk, delay); in cubictcp_acked()
|
D | Kconfig | 610 algorithm, which uses a mixed loss/delay approach to compute the 623 high speed long delay links. It uses round-trip-time to 658 o Use the delay gradient as a congestion signal. 665 delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg 675 delay. It tolerates packet loss and delay unrelated to congestion. It 679 AQM schemes that do not provide a delay signal. It requires the fq
|
/net/batman-adv/ |
D | send.c | 753 unsigned long delay, in batadv_forw_bcast_packet_to_list() argument 777 send_time += delay ? delay : msecs_to_jiffies(5); in batadv_forw_bcast_packet_to_list() 809 unsigned long delay, in batadv_forw_bcast_packet_if() argument 818 if (!delay) { in batadv_forw_bcast_packet_if() 831 ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay, in batadv_forw_bcast_packet_if() 920 unsigned long delay, in __batadv_forw_bcast_packet() argument 945 ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay, in __batadv_forw_bcast_packet() 974 unsigned long delay, in batadv_forw_bcast_packet() argument 977 return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet); in batadv_forw_bcast_packet() 995 unsigned long delay, in batadv_send_bcast_packet() argument [all …]
|
D | send.h | 44 unsigned long delay, 48 unsigned long delay,
|
/net/core/ |
D | link_watch.c | 120 unsigned long delay = linkwatch_nextevent - jiffies; in linkwatch_schedule_work() local 129 delay = 0; in linkwatch_schedule_work() 133 if (delay > HZ) in linkwatch_schedule_work() 134 delay = 0; in linkwatch_schedule_work() 143 schedule_delayed_work(&linkwatch_work, delay); in linkwatch_schedule_work()
|
D | pktgen.c | 290 u64 delay; /* nano-seconds */ member 583 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, in pktgen_if_show() 1099 pkt_dev->delay = ULLONG_MAX; in pktgen_if_write() 1101 pkt_dev->delay = (u64)value; in pktgen_if_write() 1104 (unsigned long long) pkt_dev->delay); in pktgen_if_write() 1115 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; in pktgen_if_write() 1117 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); in pktgen_if_write() 1130 pkt_dev->delay = NSEC_PER_SEC/value; in pktgen_if_write() 1132 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); in pktgen_if_write() 2294 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); in spin() [all …]
|
/net/netfilter/ |
D | nf_conntrack_ecache.c | 101 int cpu, delay = -1; in ecache_work() local 115 delay = ECACHE_RETRY_WAIT; in ecache_work() 118 delay = 0; in ecache_work() 128 ctnet->ecache_dwork_pending = delay > 0; in ecache_work() 129 if (delay >= 0) in ecache_work() 130 schedule_delayed_work(&cnet->ecache_dwork, delay); in ecache_work()
|
D | xt_LED.c | 52 if ((ledinfo->delay > 0) && ledinfo->always_blink && in led_tg() 60 if (ledinfo->delay > 0) { in led_tg() 62 jiffies + msecs_to_jiffies(ledinfo->delay)); in led_tg() 65 } else if (ledinfo->delay == 0) { in led_tg()
|
/net/nfc/nci/ |
D | spi.c | 47 t.delay.value = nspi->xfer_udelay; in __nci_spi_send() 48 t.delay.unit = SPI_DELAY_UNIT_USECS; in __nci_spi_send() 125 u8 acknowledge_mode, unsigned int delay, in nci_spi_allocate_spi() argument 135 nspi->xfer_udelay = delay; in nci_spi_allocate_spi() 222 rx.delay.value = nspi->xfer_udelay; in __nci_spi_read() 223 rx.delay.unit = SPI_DELAY_UNIT_USECS; in __nci_spi_read()
|
/net/dccp/ |
D | output.c | 219 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) in dccp_wait_for_ccid() argument 228 remaining = schedule_timeout(delay); in dccp_wait_for_ccid() 315 long delay, rc; in dccp_flush_write_queue() local 330 delay = msecs_to_jiffies(rc); in dccp_flush_write_queue() 331 if (delay > *time_budget) in dccp_flush_write_queue() 333 rc = dccp_wait_for_ccid(sk, delay); in dccp_flush_write_queue() 336 *time_budget -= (delay - rc); in dccp_flush_write_queue()
|
/net/rxrpc/ |
D | peer_event.c | 580 time64_t base, now, delay; in rxrpc_peer_keepalive_worker() local 628 delay = base - now; in rxrpc_peer_keepalive_worker() 629 if (delay < 1) in rxrpc_peer_keepalive_worker() 630 delay = 1; in rxrpc_peer_keepalive_worker() 631 delay *= HZ; in rxrpc_peer_keepalive_worker() 633 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); in rxrpc_peer_keepalive_worker()
|
/net/rfkill/ |
D | input.c | 142 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); in rfkill_ratelimit() local 143 return time_after(jiffies, last + delay) ? 0 : delay; in rfkill_ratelimit()
|
/net/ceph/ |
D | messenger.c | 602 con->delay = 0; /* reset backoff memory */ in ceph_con_open() 1388 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) in queue_con_delay() argument 1395 if (delay >= HZ) in queue_con_delay() 1396 delay = round_jiffies_relative(delay); in queue_con_delay() 1398 dout("%s %p %lu\n", __func__, con, delay); in queue_con_delay() 1399 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { in queue_con_delay() 1460 ret = queue_con_delay(con, con->delay); in con_backoff() 1463 con, con->delay); in con_backoff() 1601 if (!con->delay) { in con_fault() 1602 con->delay = BASE_DELAY_INTERVAL; in con_fault() [all …]
|
D | mon_client.c | 309 unsigned long delay; in __schedule_delayed() local 312 delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; in __schedule_delayed() 314 delay = CEPH_MONC_PING_INTERVAL; in __schedule_delayed() 316 dout("__schedule_delayed after %lu\n", delay); in __schedule_delayed() 318 round_jiffies_relative(delay)); in __schedule_delayed()
|
/net/sunrpc/xprtrdma/ |
D | transport.c | 487 unsigned long delay; in xprt_rdma_connect() local 491 delay = 0; in xprt_rdma_connect() 493 delay = xprt_reconnect_delay(xprt); in xprt_rdma_connect() 496 trace_xprtrdma_op_connect(r_xprt, delay); in xprt_rdma_connect() 498 delay); in xprt_rdma_connect()
|
/net/dccp/ccids/ |
D | ccid3.c | 277 s64 delay; in ccid3_hc_tx_send_packet() local 324 delay = ktime_us_delta(hc->tx_t_nom, now); in ccid3_hc_tx_send_packet() 325 ccid3_pr_debug("delay=%ld\n", (long)delay); in ccid3_hc_tx_send_packet() 334 if (delay >= TFRC_T_DELTA) in ccid3_hc_tx_send_packet() 335 return (u32)delay / USEC_PER_MSEC; in ccid3_hc_tx_send_packet()
|
/net/sched/ |
D | sch_netem.c | 534 s64 delay; in netem_enqueue() local 536 delay = tabledist(q->latency, q->jitter, in netem_enqueue() 571 delay -= last->time_to_send - now; in netem_enqueue() 572 delay = max_t(s64, 0, delay); in netem_enqueue() 576 delay += packet_time_ns(qdisc_pkt_len(skb), q); in netem_enqueue() 579 cb->time_to_send = now + delay; in netem_enqueue()
|
D | sch_cbs.c | 197 s64 delay; in cbs_dequeue_soft() local 199 delay = delay_from_credits(q->credits, q->idleslope); in cbs_dequeue_soft() 200 qdisc_watchdog_schedule_ns(&q->watchdog, now + delay); in cbs_dequeue_soft()
|
D | sch_cake.c | 1956 u64 delay; in cake_dequeue() local 2187 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); in cake_dequeue() 2188 b->avge_delay = cake_ewma(b->avge_delay, delay, 8); in cake_dequeue() 2189 b->peak_delay = cake_ewma(b->peak_delay, delay, in cake_dequeue() 2190 delay > b->peak_delay ? 2 : 8); in cake_dequeue() 2191 b->base_delay = cake_ewma(b->base_delay, delay, in cake_dequeue() 2192 delay < b->base_delay ? 2 : 8); in cake_dequeue()
|
/net/tipc/ |
D | crypto.c | 1399 unsigned long delay; in tipc_crypto_key_synch() local 1419 get_random_bytes(&delay, 2); in tipc_crypto_key_synch() 1420 delay %= 5; in tipc_crypto_key_synch() 1421 delay = msecs_to_jiffies(500 * ++delay); in tipc_crypto_key_synch() 1422 if (queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_key_synch() 2362 unsigned long delay = msecs_to_jiffies(5000); in tipc_crypto_work_rx() local 2405 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_work_rx() 2420 unsigned long delay; in tipc_crypto_rekeying_sched() local 2432 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; in tipc_crypto_rekeying_sched() 2433 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); in tipc_crypto_rekeying_sched()
|
/net/ipv6/ |
D | mcast.c | 1069 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay) in mld_ifc_start_work() argument 1071 unsigned long tv = prandom_u32() % delay; in mld_ifc_start_work() 1086 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay) in mld_dad_start_work() argument 1088 unsigned long tv = prandom_u32() % delay; in mld_dad_start_work() 1120 unsigned long delay = resptime; in igmp6_group_queried() local 1129 delay = ma->mca_work.timer.expires - jiffies; in igmp6_group_queried() 1132 if (delay >= resptime) in igmp6_group_queried() 1133 delay = prandom_u32() % resptime; in igmp6_group_queried() 1135 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay)) in igmp6_group_queried() 2570 unsigned long delay; in igmp6_join_group() local [all …]
|
/net/netfilter/ipvs/ |
D | Kconfig | 250 tristate "shortest expected delay scheduling" 252 The shortest expected delay scheduling algorithm assigns network 253 connections to the server with the shortest expected delay. The 254 expected delay that the job will experience is (Ci + 1) / Ui if 269 that minimize its expected delay (The Shortest Expected Delay
|
/net/802/ |
D | garp.c | 408 unsigned long delay; in garp_join_timer_arm() local 410 delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32; in garp_join_timer_arm() 411 mod_timer(&app->join_timer, jiffies + delay); in garp_join_timer_arm()
|
D | mrp.c | 593 unsigned long delay; in mrp_join_timer_arm() local 595 delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32; in mrp_join_timer_arm() 596 mod_timer(&app->join_timer, jiffies + delay); in mrp_join_timer_arm()
|
/net/sunrpc/ |
D | cache.c | 501 int delay; in do_cache_clean() local 507 delay = round_jiffies_relative(30*HZ); in do_cache_clean() 509 delay = 5; in do_cache_clean() 511 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay); in do_cache_clean()
|