Lines Matching refs:q

197 static bool loss_4state(struct netem_sched_data *q)  in loss_4state()  argument
199 struct clgstate *clg = &q->clg; in loss_4state()
262 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
264 struct clgstate *clg = &q->clg; in loss_gilb_ell()
283 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
285 switch (q->loss_model) { in loss_event()
288 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
296 return loss_4state(q); in loss_event()
304 return loss_gilb_ell(q); in loss_event()
342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) in packet_len_2_sched_time() argument
346 len += q->packet_overhead; in packet_len_2_sched_time()
348 if (q->cell_size) { in packet_len_2_sched_time()
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); in packet_len_2_sched_time()
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */ in packet_len_2_sched_time()
353 len = cells * (q->cell_size + q->cell_overhead); in packet_len_2_sched_time()
358 do_div(ticks, q->rate); in packet_len_2_sched_time()
364 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_reset() local
367 while ((p = rb_first(&q->t_root))) { in tfifo_reset()
370 rb_erase(p, &q->t_root); in tfifo_reset()
379 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_enqueue() local
381 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; in tfifo_enqueue()
394 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue()
395 sch->q.qlen++; in tfifo_enqueue()
425 struct netem_sched_data *q = qdisc_priv(sch); in netem_enqueue() local
436 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) in netem_enqueue()
440 if (loss_event(q)) { in netem_enqueue()
441 if (q->ecn && INET_ECN_set_ce(skb)) in netem_enqueue()
455 if (q->latency || q->jitter) in netem_enqueue()
465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ in netem_enqueue()
467 q->duplicate = 0; in netem_enqueue()
469 q->duplicate = dupsave; in netem_enqueue()
478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { in netem_enqueue()
501 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) in netem_enqueue()
507 if (q->gap == 0 || /* not doing reordering */ in netem_enqueue()
508 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue()
509 q->reorder < get_crandom(&q->reorder_cor)) { in netem_enqueue()
513 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
514 &q->delay_cor, q->delay_dist); in netem_enqueue()
518 if (q->rate) { in netem_enqueue()
521 if (!skb_queue_empty(&sch->q)) in netem_enqueue()
522 last = skb_peek_tail(&sch->q); in netem_enqueue()
524 last = netem_rb_to_skb(rb_last(&q->t_root)); in netem_enqueue()
536 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); in netem_enqueue()
541 ++q->counter; in netem_enqueue()
549 q->counter = 0; in netem_enqueue()
551 __skb_queue_head(&sch->q, skb); in netem_enqueue()
572 sch->q.qlen += nb; in netem_enqueue()
581 struct netem_sched_data *q = qdisc_priv(sch); in netem_drop() local
587 struct rb_node *p = rb_first(&q->t_root); in netem_drop()
592 rb_erase(p, &q->t_root); in netem_drop()
593 sch->q.qlen--; in netem_drop()
600 if (!len && q->qdisc && q->qdisc->ops->drop) in netem_drop()
601 len = q->qdisc->ops->drop(q->qdisc); in netem_drop()
610 struct netem_sched_data *q = qdisc_priv(sch); in netem_dequeue() local
618 skb = __skb_dequeue(&sch->q); in netem_dequeue()
626 p = rb_first(&q->t_root); in netem_dequeue()
635 rb_erase(p, &q->t_root); in netem_dequeue()
637 sch->q.qlen--; in netem_dequeue()
652 if (q->qdisc) { in netem_dequeue()
653 int err = qdisc_enqueue(skb, q->qdisc); in netem_dequeue()
667 if (q->qdisc) { in netem_dequeue()
668 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
672 qdisc_watchdog_schedule(&q->watchdog, time_to_send); in netem_dequeue()
675 if (q->qdisc) { in netem_dequeue()
676 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
685 struct netem_sched_data *q = qdisc_priv(sch); in netem_reset() local
689 if (q->qdisc) in netem_reset()
690 qdisc_reset(q->qdisc); in netem_reset()
691 qdisc_watchdog_cancel(&q->watchdog); in netem_reset()
705 struct netem_sched_data *q = qdisc_priv(sch); in get_dist_table() local
730 swap(q->delay_dist, d); in get_dist_table()
737 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) in get_correlation() argument
741 init_crandom(&q->delay_cor, c->delay_corr); in get_correlation()
742 init_crandom(&q->loss_cor, c->loss_corr); in get_correlation()
743 init_crandom(&q->dup_cor, c->dup_corr); in get_correlation()
746 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) in get_reorder() argument
750 q->reorder = r->probability; in get_reorder()
751 init_crandom(&q->reorder_cor, r->correlation); in get_reorder()
754 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) in get_corrupt() argument
758 q->corrupt = r->probability; in get_corrupt()
759 init_crandom(&q->corrupt_cor, r->correlation); in get_corrupt()
762 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) in get_rate() argument
766 q->rate = r->rate; in get_rate()
767 q->packet_overhead = r->packet_overhead; in get_rate()
768 q->cell_size = r->cell_size; in get_rate()
769 q->cell_overhead = r->cell_overhead; in get_rate()
770 if (q->cell_size) in get_rate()
771 q->cell_size_reciprocal = reciprocal_value(q->cell_size); in get_rate()
773 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; in get_rate()
776 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) in get_loss_clg() argument
793 q->loss_model = CLG_4_STATES; in get_loss_clg()
795 q->clg.state = TX_IN_GAP_PERIOD; in get_loss_clg()
796 q->clg.a1 = gi->p13; in get_loss_clg()
797 q->clg.a2 = gi->p31; in get_loss_clg()
798 q->clg.a3 = gi->p32; in get_loss_clg()
799 q->clg.a4 = gi->p14; in get_loss_clg()
800 q->clg.a5 = gi->p23; in get_loss_clg()
812 q->loss_model = CLG_GILB_ELL; in get_loss_clg()
813 q->clg.state = GOOD_STATE; in get_loss_clg()
814 q->clg.a1 = ge->p; in get_loss_clg()
815 q->clg.a2 = ge->r; in get_loss_clg()
816 q->clg.a3 = ge->h; in get_loss_clg()
817 q->clg.a4 = ge->k1; in get_loss_clg()
861 struct netem_sched_data *q = qdisc_priv(sch); in netem_change() local
877 old_clg = q->clg; in netem_change()
878 old_loss_model = q->loss_model; in netem_change()
881 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); in netem_change()
883 q->loss_model = old_loss_model; in netem_change()
887 q->loss_model = CLG_RANDOM; in netem_change()
897 q->clg = old_clg; in netem_change()
898 q->loss_model = old_loss_model; in netem_change()
905 q->latency = qopt->latency; in netem_change()
906 q->jitter = qopt->jitter; in netem_change()
907 q->limit = qopt->limit; in netem_change()
908 q->gap = qopt->gap; in netem_change()
909 q->counter = 0; in netem_change()
910 q->loss = qopt->loss; in netem_change()
911 q->duplicate = qopt->duplicate; in netem_change()
916 if (q->gap) in netem_change()
917 q->reorder = ~0; in netem_change()
920 get_correlation(q, tb[TCA_NETEM_CORR]); in netem_change()
923 get_reorder(q, tb[TCA_NETEM_REORDER]); in netem_change()
926 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); in netem_change()
929 get_rate(q, tb[TCA_NETEM_RATE]); in netem_change()
932 q->rate = max_t(u64, q->rate, in netem_change()
936 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); in netem_change()
943 struct netem_sched_data *q = qdisc_priv(sch); in netem_init() local
949 qdisc_watchdog_init(&q->watchdog, sch); in netem_init()
951 q->loss_model = CLG_RANDOM; in netem_init()
960 struct netem_sched_data *q = qdisc_priv(sch); in netem_destroy() local
962 qdisc_watchdog_cancel(&q->watchdog); in netem_destroy()
963 if (q->qdisc) in netem_destroy()
964 qdisc_destroy(q->qdisc); in netem_destroy()
965 dist_free(q->delay_dist); in netem_destroy()
968 static int dump_loss_model(const struct netem_sched_data *q, in dump_loss_model() argument
977 switch (q->loss_model) { in dump_loss_model()
985 .p13 = q->clg.a1, in dump_loss_model()
986 .p31 = q->clg.a2, in dump_loss_model()
987 .p32 = q->clg.a3, in dump_loss_model()
988 .p14 = q->clg.a4, in dump_loss_model()
989 .p23 = q->clg.a5, in dump_loss_model()
998 .p = q->clg.a1, in dump_loss_model()
999 .r = q->clg.a2, in dump_loss_model()
1000 .h = q->clg.a3, in dump_loss_model()
1001 .k1 = q->clg.a4, in dump_loss_model()
1020 const struct netem_sched_data *q = qdisc_priv(sch); in netem_dump() local
1028 qopt.latency = q->latency; in netem_dump()
1029 qopt.jitter = q->jitter; in netem_dump()
1030 qopt.limit = q->limit; in netem_dump()
1031 qopt.loss = q->loss; in netem_dump()
1032 qopt.gap = q->gap; in netem_dump()
1033 qopt.duplicate = q->duplicate; in netem_dump()
1037 cor.delay_corr = q->delay_cor.rho; in netem_dump()
1038 cor.loss_corr = q->loss_cor.rho; in netem_dump()
1039 cor.dup_corr = q->dup_cor.rho; in netem_dump()
1043 reorder.probability = q->reorder; in netem_dump()
1044 reorder.correlation = q->reorder_cor.rho; in netem_dump()
1048 corrupt.probability = q->corrupt; in netem_dump()
1049 corrupt.correlation = q->corrupt_cor.rho; in netem_dump()
1053 if (q->rate >= (1ULL << 32)) { in netem_dump()
1054 if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate)) in netem_dump()
1058 rate.rate = q->rate; in netem_dump()
1060 rate.packet_overhead = q->packet_overhead; in netem_dump()
1061 rate.cell_size = q->cell_size; in netem_dump()
1062 rate.cell_overhead = q->cell_overhead; in netem_dump()
1066 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) in netem_dump()
1069 if (dump_loss_model(q, skb) != 0) in netem_dump()
1082 struct netem_sched_data *q = qdisc_priv(sch); in netem_dump_class() local
1084 if (cl != 1 || !q->qdisc) /* only one class */ in netem_dump_class()
1088 tcm->tcm_info = q->qdisc->handle; in netem_dump_class()
1096 struct netem_sched_data *q = qdisc_priv(sch); in netem_graft() local
1098 *old = qdisc_replace(sch, new, &q->qdisc); in netem_graft()
1104 struct netem_sched_data *q = qdisc_priv(sch); in netem_leaf() local
1105 return q->qdisc; in netem_leaf()