• Home
  • Raw
  • Download

Lines Matching +full:peak +full:- +full:to +full:- +full:peak

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
26 -------
31 ------------
35 does not exceed B + R*(t_f-t_i).
41 s_i+....+s_k <= B + R*(t_k - t_i)
44 ----------
54 N(t_* + 0) = N(t_* - 0) - S/R.
58 Actually, QoS requires two TBF to be applied to a data stream.
60 one with rate P (peak rate) and depth M (equal to link MTU)
63 It is easy to see that P>R, and B>M. If P is infinity, this double
64 TBF is equivalent to a single one.
68 lat = max ((L-B)/R, (L-M)/P)
72 ------
75 when it is ready to transmit.
88 Note that the peak rate TBF is much more tough: with MTU 1500
89 P_crit = 150Kbytes/sec. So, if you need greater peak
90 rates, use alpha with HZ=1000 :-)
93 It is passed to the default bfifo qdisc - if the inner qdisc is
104 struct psched_ratecfg peak; member
109 s64 t_c; /* Time check-point */
110 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 /* Time to Length, convert time in ns to length in bytes
116 * to determinate how many bytes can be sent in given time.
122 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC in psched_ns_t2l()
124 u64 len = time_in_ns * r->rate_bytes_ps; in psched_ns_t2l()
128 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { in psched_ns_t2l()
133 if (len > r->overhead) in psched_ns_t2l()
134 len -= r->overhead; in psched_ns_t2l()
147 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) in tbf_offload_change()
151 qopt.handle = sch->handle; in tbf_offload_change()
152 qopt.parent = sch->parent; in tbf_offload_change()
153 qopt.replace_params.rate = q->rate; in tbf_offload_change()
154 qopt.replace_params.max_size = q->max_size; in tbf_offload_change()
155 qopt.replace_params.qstats = &sch->qstats; in tbf_offload_change()
157 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt); in tbf_offload_change()
165 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) in tbf_offload_destroy()
169 qopt.handle = sch->handle; in tbf_offload_destroy()
170 qopt.parent = sch->parent; in tbf_offload_destroy()
171 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt); in tbf_offload_destroy()
179 qopt.handle = sch->handle; in tbf_offload_dump()
180 qopt.parent = sch->parent; in tbf_offload_dump()
181 qopt.stats.bstats = &sch->bstats; in tbf_offload_dump()
182 qopt.stats.qstats = &sch->qstats; in tbf_offload_dump()
207 qdisc_skb_cb(segs)->pkt_len = segs->len; in tbf_segment()
208 len += segs->len; in tbf_segment()
209 ret = qdisc_enqueue(segs, q->qdisc, to_free); in tbf_segment()
217 sch->q.qlen += nb; in tbf_segment()
219 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); in tbf_segment()
231 if (qdisc_pkt_len(skb) > q->max_size) { in tbf_enqueue()
233 skb_gso_validate_mac_len(skb, q->max_size)) in tbf_enqueue()
237 ret = qdisc_enqueue(skb, q->qdisc, to_free); in tbf_enqueue()
244 sch->qstats.backlog += len; in tbf_enqueue()
245 sch->q.qlen++; in tbf_enqueue()
251 return q->peak.rate_bytes_ps; in tbf_peak_present()
259 skb = q->qdisc->ops->peek(q->qdisc); in tbf_dequeue()
268 toks = min_t(s64, now - q->t_c, q->buffer); in tbf_dequeue()
271 ptoks = toks + q->ptokens; in tbf_dequeue()
272 if (ptoks > q->mtu) in tbf_dequeue()
273 ptoks = q->mtu; in tbf_dequeue()
274 ptoks -= (s64) psched_l2t_ns(&q->peak, len); in tbf_dequeue()
276 toks += q->tokens; in tbf_dequeue()
277 if (toks > q->buffer) in tbf_dequeue()
278 toks = q->buffer; in tbf_dequeue()
279 toks -= (s64) psched_l2t_ns(&q->rate, len); in tbf_dequeue()
282 skb = qdisc_dequeue_peeked(q->qdisc); in tbf_dequeue()
286 q->t_c = now; in tbf_dequeue()
287 q->tokens = toks; in tbf_dequeue()
288 q->ptokens = ptoks; in tbf_dequeue()
290 sch->q.qlen--; in tbf_dequeue()
295 qdisc_watchdog_schedule_ns(&q->watchdog, in tbf_dequeue()
296 now + max_t(long, -toks, -ptoks)); in tbf_dequeue()
318 qdisc_reset(q->qdisc); in tbf_reset()
319 q->t_c = ktime_get_ns(); in tbf_reset()
320 q->tokens = q->buffer; in tbf_reset()
321 q->ptokens = q->mtu; in tbf_reset()
322 qdisc_watchdog_cancel(&q->watchdog); in tbf_reset()
345 struct psched_ratecfg peak; in tbf_change() local
355 err = -EINVAL; in tbf_change()
360 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) in tbf_change()
361 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, in tbf_change()
365 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) in tbf_change()
366 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, in tbf_change()
370 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); in tbf_change()
371 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); in tbf_change()
375 psched_ratecfg_precompute(&rate, &qopt->rate, rate64); in tbf_change()
384 if (qopt->peakrate.rate) { in tbf_change()
387 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); in tbf_change()
388 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { in tbf_change()
389 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", in tbf_change()
390 peak.rate_bytes_ps, rate.rate_bytes_ps); in tbf_change()
391 err = -EINVAL; in tbf_change()
398 mtu = psched_l2t_ns(&peak, pburst); in tbf_change()
400 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); in tbf_change()
403 memset(&peak, 0, sizeof(peak)); in tbf_change()
408 max_size, qdisc_dev(sch)->name, in tbf_change()
412 err = -EINVAL; in tbf_change()
416 if (q->qdisc != &noop_qdisc) { in tbf_change()
417 err = fifo_set_limit(q->qdisc, qopt->limit); in tbf_change()
420 } else if (qopt->limit > 0) { in tbf_change()
421 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit, in tbf_change()
428 /* child is fifo, no need to check for noop_qdisc */ in tbf_change()
434 qdisc_tree_flush_backlog(q->qdisc); in tbf_change()
435 old = q->qdisc; in tbf_change()
436 q->qdisc = child; in tbf_change()
438 q->limit = qopt->limit; in tbf_change()
440 q->mtu = mtu; in tbf_change()
442 q->mtu = PSCHED_TICKS2NS(qopt->mtu); in tbf_change()
443 q->max_size = max_size; in tbf_change()
445 q->buffer = buffer; in tbf_change()
447 q->buffer = PSCHED_TICKS2NS(qopt->buffer); in tbf_change()
448 q->tokens = q->buffer; in tbf_change()
449 q->ptokens = q->mtu; in tbf_change()
451 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); in tbf_change()
452 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); in tbf_change()
468 qdisc_watchdog_init(&q->watchdog, sch); in tbf_init()
469 q->qdisc = &noop_qdisc; in tbf_init()
472 return -EINVAL; in tbf_init()
474 q->t_c = ktime_get_ns(); in tbf_init()
483 qdisc_watchdog_cancel(&q->watchdog); in tbf_destroy()
485 qdisc_put(q->qdisc); in tbf_destroy()
503 opt.limit = q->limit; in tbf_dump()
504 psched_ratecfg_getrate(&opt.rate, &q->rate); in tbf_dump()
506 psched_ratecfg_getrate(&opt.peakrate, &q->peak); in tbf_dump()
509 opt.mtu = PSCHED_NS2TICKS(q->mtu); in tbf_dump()
510 opt.buffer = PSCHED_NS2TICKS(q->buffer); in tbf_dump()
513 if (q->rate.rate_bytes_ps >= (1ULL << 32) && in tbf_dump()
514 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, in tbf_dump()
518 q->peak.rate_bytes_ps >= (1ULL << 32) && in tbf_dump()
519 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, in tbf_dump()
527 return -1; in tbf_dump()
535 tcm->tcm_handle |= TC_H_MIN(1); in tbf_dump_class()
536 tcm->tcm_info = q->qdisc->handle; in tbf_dump_class()
549 *old = qdisc_replace(sch, new, &q->qdisc); in tbf_graft()
556 return q->qdisc; in tbf_leaf()
566 if (!walker->stop) { in tbf_walk()
567 if (walker->count >= walker->skip) in tbf_walk()
568 if (walker->fn(sch, 1, walker) < 0) { in tbf_walk()
569 walker->stop = 1; in tbf_walk()
572 walker->count++; in tbf_walk()