Lines Matching +full:1 +full:q
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 in the Netem module in the Linux kernel", available in [1]
118 TX_IN_GAP_PERIOD = 1,
125 GOOD_STATE = 1,
138 u32 a4; /* p14 for 4-states or 1-k for GE */
192 rho = (u64)state->rho + 1; in get_crandom()
193 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; in get_crandom()
202 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
204 struct clgstate *clg = &q->clg; in loss_4state()
267 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
269 struct clgstate *clg = &q->clg; in loss_gilb_ell()
288 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
290 switch (q->loss_model) { in loss_event()
293 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
298 * if it is 1 drops a packet and if needed writes the event in in loss_event()
301 return loss_4state(q); in loss_event()
306 * if it is 1 drops a packet and if needed writes the event in in loss_event()
309 return loss_gilb_ell(q); in loss_event()
347 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) in packet_time_ns() argument
349 len += q->packet_overhead; in packet_time_ns()
351 if (q->cell_size) { in packet_time_ns()
352 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); in packet_time_ns()
354 if (len > cells * q->cell_size) /* extra cell needed for remainder */ in packet_time_ns()
356 len = cells * (q->cell_size + q->cell_overhead); in packet_time_ns()
359 return div64_u64(len * NSEC_PER_SEC, q->rate); in packet_time_ns()
364 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_reset() local
365 struct rb_node *p = rb_first(&q->t_root); in tfifo_reset()
371 rb_erase(&skb->rbnode, &q->t_root); in tfifo_reset()
375 rtnl_kfree_skbs(q->t_head, q->t_tail); in tfifo_reset()
376 q->t_head = NULL; in tfifo_reset()
377 q->t_tail = NULL; in tfifo_reset()
378 q->t_len = 0; in tfifo_reset()
383 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_enqueue() local
386 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) { in tfifo_enqueue()
387 if (q->t_tail) in tfifo_enqueue()
388 q->t_tail->next = nskb; in tfifo_enqueue()
390 q->t_head = nskb; in tfifo_enqueue()
391 q->t_tail = nskb; in tfifo_enqueue()
393 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; in tfifo_enqueue()
406 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue()
408 q->t_len++; in tfifo_enqueue()
409 sch->q.qlen++; in tfifo_enqueue()
441 struct netem_sched_data *q = qdisc_priv(sch); in netem_enqueue() local
447 int count = 1; in netem_enqueue()
455 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) in netem_enqueue()
459 if (loss_event(q)) { in netem_enqueue()
460 if (q->ecn && INET_ECN_set_ce(skb)) in netem_enqueue()
474 if (q->latency || q->jitter || q->rate) in netem_enqueue()
482 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { in netem_enqueue()
484 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ in netem_enqueue()
486 q->duplicate = 0; in netem_enqueue()
488 q->duplicate = dupsave; in netem_enqueue()
498 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { in netem_enqueue()
521 1<<(prandom_u32() % 8); in netem_enqueue()
524 if (unlikely(q->t_len >= sch->limit)) { in netem_enqueue()
534 if (q->gap == 0 || /* not doing reordering */ in netem_enqueue()
535 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue()
536 q->reorder < get_crandom(&q->reorder_cor)) { in netem_enqueue()
540 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
541 &q->delay_cor, q->delay_dist); in netem_enqueue()
545 if (q->rate) { in netem_enqueue()
548 if (sch->q.tail) in netem_enqueue()
549 last = netem_skb_cb(sch->q.tail); in netem_enqueue()
550 if (q->t_root.rb_node) { in netem_enqueue()
554 t_skb = skb_rb_last(&q->t_root); in netem_enqueue()
560 if (q->t_tail) { in netem_enqueue()
562 netem_skb_cb(q->t_tail); in netem_enqueue()
580 delay += packet_time_ns(qdisc_pkt_len(skb), q); in netem_enqueue()
584 ++q->counter; in netem_enqueue()
592 q->counter = 0; in netem_enqueue()
594 __qdisc_enqueue_head(skb, &sch->q); in netem_enqueue()
604 nb = skb ? 1 : 0; in netem_enqueue()
621 /* Parent qdiscs accounted for 1 skb of size @prev_len */ in netem_enqueue()
622 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len)); in netem_enqueue()
633 static void get_slot_next(struct netem_sched_data *q, u64 now) in get_slot_next() argument
637 if (!q->slot_dist) in get_slot_next()
638 next_delay = q->slot_config.min_delay + in get_slot_next()
640 (q->slot_config.max_delay - in get_slot_next()
641 q->slot_config.min_delay) >> 32); in get_slot_next()
643 next_delay = tabledist(q->slot_config.dist_delay, in get_slot_next()
644 (s32)(q->slot_config.dist_jitter), in get_slot_next()
645 NULL, q->slot_dist); in get_slot_next()
647 q->slot.slot_next = now + next_delay; in get_slot_next()
648 q->slot.packets_left = q->slot_config.max_packets; in get_slot_next()
649 q->slot.bytes_left = q->slot_config.max_bytes; in get_slot_next()
652 static struct sk_buff *netem_peek(struct netem_sched_data *q) in netem_peek() argument
654 struct sk_buff *skb = skb_rb_first(&q->t_root); in netem_peek()
658 return q->t_head; in netem_peek()
659 if (!q->t_head) in netem_peek()
663 t2 = netem_skb_cb(q->t_head)->time_to_send; in netem_peek()
666 return q->t_head; in netem_peek()
669 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb) in netem_erase_head() argument
671 if (skb == q->t_head) { in netem_erase_head()
672 q->t_head = skb->next; in netem_erase_head()
673 if (!q->t_head) in netem_erase_head()
674 q->t_tail = NULL; in netem_erase_head()
676 rb_erase(&skb->rbnode, &q->t_root); in netem_erase_head()
682 struct netem_sched_data *q = qdisc_priv(sch); in netem_dequeue() local
686 skb = __qdisc_dequeue_head(&sch->q); in netem_dequeue()
693 skb = netem_peek(q); in netem_dequeue()
700 if (q->slot.slot_next && q->slot.slot_next < time_to_send) in netem_dequeue()
701 get_slot_next(q, now); in netem_dequeue()
703 if (time_to_send <= now && q->slot.slot_next <= now) { in netem_dequeue()
704 netem_erase_head(q, skb); in netem_dequeue()
705 q->t_len--; in netem_dequeue()
713 if (q->slot.slot_next) { in netem_dequeue()
714 q->slot.packets_left--; in netem_dequeue()
715 q->slot.bytes_left -= qdisc_pkt_len(skb); in netem_dequeue()
716 if (q->slot.packets_left <= 0 || in netem_dequeue()
717 q->slot.bytes_left <= 0) in netem_dequeue()
718 get_slot_next(q, now); in netem_dequeue()
721 if (q->qdisc) { in netem_dequeue()
726 err = qdisc_enqueue(skb, q->qdisc, &to_free); in netem_dequeue()
731 qdisc_tree_reduce_backlog(sch, 1, pkt_len); in netem_dequeue()
733 sch->q.qlen--; in netem_dequeue()
737 sch->q.qlen--; in netem_dequeue()
741 if (q->qdisc) { in netem_dequeue()
742 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
744 sch->q.qlen--; in netem_dequeue()
749 qdisc_watchdog_schedule_ns(&q->watchdog, in netem_dequeue()
751 q->slot.slot_next)); in netem_dequeue()
754 if (q->qdisc) { in netem_dequeue()
755 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
757 sch->q.qlen--; in netem_dequeue()
766 struct netem_sched_data *q = qdisc_priv(sch); in netem_reset() local
770 if (q->qdisc) in netem_reset()
771 qdisc_reset(q->qdisc); in netem_reset()
772 qdisc_watchdog_cancel(&q->watchdog); in netem_reset()
807 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) in get_slot() argument
811 q->slot_config = *c; in get_slot()
812 if (q->slot_config.max_packets == 0) in get_slot()
813 q->slot_config.max_packets = INT_MAX; in get_slot()
814 if (q->slot_config.max_bytes == 0) in get_slot()
815 q->slot_config.max_bytes = INT_MAX; in get_slot()
818 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter)); in get_slot()
820 q->slot.packets_left = q->slot_config.max_packets; in get_slot()
821 q->slot.bytes_left = q->slot_config.max_bytes; in get_slot()
822 if (q->slot_config.min_delay | q->slot_config.max_delay | in get_slot()
823 q->slot_config.dist_jitter) in get_slot()
824 q->slot.slot_next = ktime_get_ns(); in get_slot()
826 q->slot.slot_next = 0; in get_slot()
829 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) in get_correlation() argument
833 init_crandom(&q->delay_cor, c->delay_corr); in get_correlation()
834 init_crandom(&q->loss_cor, c->loss_corr); in get_correlation()
835 init_crandom(&q->dup_cor, c->dup_corr); in get_correlation()
838 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) in get_reorder() argument
842 q->reorder = r->probability; in get_reorder()
843 init_crandom(&q->reorder_cor, r->correlation); in get_reorder()
846 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) in get_corrupt() argument
850 q->corrupt = r->probability; in get_corrupt()
851 init_crandom(&q->corrupt_cor, r->correlation); in get_corrupt()
854 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) in get_rate() argument
858 q->rate = r->rate; in get_rate()
859 q->packet_overhead = r->packet_overhead; in get_rate()
860 q->cell_size = r->cell_size; in get_rate()
861 q->cell_overhead = r->cell_overhead; in get_rate()
862 if (q->cell_size) in get_rate()
863 q->cell_size_reciprocal = reciprocal_value(q->cell_size); in get_rate()
865 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; in get_rate()
868 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) in get_loss_clg() argument
885 q->loss_model = CLG_4_STATES; in get_loss_clg()
887 q->clg.state = TX_IN_GAP_PERIOD; in get_loss_clg()
888 q->clg.a1 = gi->p13; in get_loss_clg()
889 q->clg.a2 = gi->p31; in get_loss_clg()
890 q->clg.a3 = gi->p32; in get_loss_clg()
891 q->clg.a4 = gi->p14; in get_loss_clg()
892 q->clg.a5 = gi->p23; in get_loss_clg()
904 q->loss_model = CLG_GILB_ELL; in get_loss_clg()
905 q->clg.state = GOOD_STATE; in get_loss_clg()
906 q->clg.a1 = ge->p; in get_loss_clg()
907 q->clg.a2 = ge->r; in get_loss_clg()
908 q->clg.a3 = ge->h; in get_loss_clg()
909 q->clg.a4 = ge->k1; in get_loss_clg()
922 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
950 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); in parse_attr()
958 struct netem_sched_data *q = qdisc_priv(sch); in netem_change() local
959 struct nlattr *tb[TCA_NETEM_MAX + 1]; in netem_change()
988 /* backup q->clg and q->loss_model */ in netem_change()
989 old_clg = q->clg; in netem_change()
990 old_loss_model = q->loss_model; in netem_change()
993 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); in netem_change()
995 q->loss_model = old_loss_model; in netem_change()
996 q->clg = old_clg; in netem_change()
1000 q->loss_model = CLG_RANDOM; in netem_change()
1004 swap(q->delay_dist, delay_dist); in netem_change()
1006 swap(q->slot_dist, slot_dist); in netem_change()
1009 q->latency = PSCHED_TICKS2NS(qopt->latency); in netem_change()
1010 q->jitter = PSCHED_TICKS2NS(qopt->jitter); in netem_change()
1011 q->limit = qopt->limit; in netem_change()
1012 q->gap = qopt->gap; in netem_change()
1013 q->counter = 0; in netem_change()
1014 q->loss = qopt->loss; in netem_change()
1015 q->duplicate = qopt->duplicate; in netem_change()
1020 if (q->gap) in netem_change()
1021 q->reorder = ~0; in netem_change()
1024 get_correlation(q, tb[TCA_NETEM_CORR]); in netem_change()
1027 get_reorder(q, tb[TCA_NETEM_REORDER]); in netem_change()
1030 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); in netem_change()
1033 get_rate(q, tb[TCA_NETEM_RATE]); in netem_change()
1036 q->rate = max_t(u64, q->rate, in netem_change()
1040 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); in netem_change()
1043 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); in netem_change()
1046 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); in netem_change()
1049 get_slot(q, tb[TCA_NETEM_SLOT]); in netem_change()
1052 q->jitter = min_t(s64, abs(q->jitter), INT_MAX); in netem_change()
1066 struct netem_sched_data *q = qdisc_priv(sch); in netem_init() local
1069 qdisc_watchdog_init(&q->watchdog, sch); in netem_init()
1074 q->loss_model = CLG_RANDOM; in netem_init()
1083 struct netem_sched_data *q = qdisc_priv(sch); in netem_destroy() local
1085 qdisc_watchdog_cancel(&q->watchdog); in netem_destroy()
1086 if (q->qdisc) in netem_destroy()
1087 qdisc_put(q->qdisc); in netem_destroy()
1088 dist_free(q->delay_dist); in netem_destroy()
1089 dist_free(q->slot_dist); in netem_destroy()
1092 static int dump_loss_model(const struct netem_sched_data *q, in dump_loss_model() argument
1101 switch (q->loss_model) { in dump_loss_model()
1109 .p13 = q->clg.a1, in dump_loss_model()
1110 .p31 = q->clg.a2, in dump_loss_model()
1111 .p32 = q->clg.a3, in dump_loss_model()
1112 .p14 = q->clg.a4, in dump_loss_model()
1113 .p23 = q->clg.a5, in dump_loss_model()
1122 .p = q->clg.a1, in dump_loss_model()
1123 .r = q->clg.a2, in dump_loss_model()
1124 .h = q->clg.a3, in dump_loss_model()
1125 .k1 = q->clg.a4, in dump_loss_model()
1139 return -1; in dump_loss_model()
1144 const struct netem_sched_data *q = qdisc_priv(sch); in netem_dump() local
1153 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency), in netem_dump()
1155 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter), in netem_dump()
1157 qopt.limit = q->limit; in netem_dump()
1158 qopt.loss = q->loss; in netem_dump()
1159 qopt.gap = q->gap; in netem_dump()
1160 qopt.duplicate = q->duplicate; in netem_dump()
1164 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) in netem_dump()
1167 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) in netem_dump()
1170 cor.delay_corr = q->delay_cor.rho; in netem_dump()
1171 cor.loss_corr = q->loss_cor.rho; in netem_dump()
1172 cor.dup_corr = q->dup_cor.rho; in netem_dump()
1176 reorder.probability = q->reorder; in netem_dump()
1177 reorder.correlation = q->reorder_cor.rho; in netem_dump()
1181 corrupt.probability = q->corrupt; in netem_dump()
1182 corrupt.correlation = q->corrupt_cor.rho; in netem_dump()
1186 if (q->rate >= (1ULL << 32)) { in netem_dump()
1187 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, in netem_dump()
1192 rate.rate = q->rate; in netem_dump()
1194 rate.packet_overhead = q->packet_overhead; in netem_dump()
1195 rate.cell_size = q->cell_size; in netem_dump()
1196 rate.cell_overhead = q->cell_overhead; in netem_dump()
1200 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) in netem_dump()
1203 if (dump_loss_model(q, skb) != 0) in netem_dump()
1206 if (q->slot_config.min_delay | q->slot_config.max_delay | in netem_dump()
1207 q->slot_config.dist_jitter) { in netem_dump()
1208 slot = q->slot_config; in netem_dump()
1221 return -1; in netem_dump()
1227 struct netem_sched_data *q = qdisc_priv(sch); in netem_dump_class() local
1229 if (cl != 1 || !q->qdisc) /* only one class */ in netem_dump_class()
1232 tcm->tcm_handle |= TC_H_MIN(1); in netem_dump_class()
1233 tcm->tcm_info = q->qdisc->handle; in netem_dump_class()
1241 struct netem_sched_data *q = qdisc_priv(sch); in netem_graft() local
1243 *old = qdisc_replace(sch, new, &q->qdisc); in netem_graft()
1249 struct netem_sched_data *q = qdisc_priv(sch); in netem_leaf() local
1250 return q->qdisc; in netem_leaf()
1255 return 1; in netem_find()
1262 if (walker->fn(sch, 1, walker) < 0) { in netem_walk()
1263 walker->stop = 1; in netem_walk()