Lines Matching refs:q

129 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)  in fq_flow_set_throttled()  argument
131 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
144 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
145 q->throttled_flows++; in fq_flow_set_throttled()
146 q->stat_throttled++; in fq_flow_set_throttled()
149 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
150 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
176 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
205 q->flows -= fcnt; in fq_gc()
206 q->inactive_flows -= fcnt; in fq_gc()
207 q->stat_gc_flows += fcnt; in fq_gc()
216 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) in fq_classify() argument
225 return &q->internal; in fq_classify()
237 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; in fq_classify()
246 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; in fq_classify()
248 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify()
249 q->inactive_flows > q->flows/2) in fq_classify()
250 fq_gc(q, root, sk); in fq_classify()
266 f->credit = q->initial_quantum; in fq_classify()
280 q->stat_allocation_errors++; in fq_classify()
281 return &q->internal; in fq_classify()
287 f->credit = q->initial_quantum; in fq_classify()
292 q->flows++; in fq_classify()
293 q->inactive_flows++; in fq_classify()
308 sch->q.qlen--; in fq_dequeue_head()
373 struct fq_sched_data *q = qdisc_priv(sch); in fq_enqueue() local
376 if (unlikely(sch->q.qlen >= sch->limit)) in fq_enqueue()
379 f = fq_classify(skb, q); in fq_enqueue()
380 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { in fq_enqueue()
381 q->stat_flows_plimit++; in fq_enqueue()
387 q->stat_tcp_retrans++; in fq_enqueue()
390 fq_flow_add_tail(&q->new_flows, f); in fq_enqueue()
391 if (time_after(jiffies, f->age + q->flow_refill_delay)) in fq_enqueue()
392 f->credit = max_t(u32, f->credit, q->quantum); in fq_enqueue()
393 q->inactive_flows--; in fq_enqueue()
399 if (unlikely(f == &q->internal)) { in fq_enqueue()
400 q->stat_internal_packets++; in fq_enqueue()
402 sch->q.qlen++; in fq_enqueue()
407 static void fq_check_throttled(struct fq_sched_data *q, u64 now) in fq_check_throttled() argument
411 if (q->time_next_delayed_flow > now) in fq_check_throttled()
414 q->time_next_delayed_flow = ~0ULL; in fq_check_throttled()
415 while ((p = rb_first(&q->delayed)) != NULL) { in fq_check_throttled()
419 q->time_next_delayed_flow = f->time_next_packet; in fq_check_throttled()
422 rb_erase(p, &q->delayed); in fq_check_throttled()
423 q->throttled_flows--; in fq_check_throttled()
424 fq_flow_add_tail(&q->old_flows, f); in fq_check_throttled()
430 struct fq_sched_data *q = qdisc_priv(sch); in fq_dequeue() local
437 skb = fq_dequeue_head(sch, &q->internal); in fq_dequeue()
440 fq_check_throttled(q, now); in fq_dequeue()
442 head = &q->new_flows; in fq_dequeue()
444 head = &q->old_flows; in fq_dequeue()
446 if (q->time_next_delayed_flow != ~0ULL) in fq_dequeue()
447 qdisc_watchdog_schedule_ns(&q->watchdog, in fq_dequeue()
448 q->time_next_delayed_flow, in fq_dequeue()
456 f->credit += q->quantum; in fq_dequeue()
458 fq_flow_add_tail(&q->old_flows, f); in fq_dequeue()
466 fq_flow_set_throttled(q, f); in fq_dequeue()
474 if ((head == &q->new_flows) && q->old_flows.first) { in fq_dequeue()
475 fq_flow_add_tail(&q->old_flows, f); in fq_dequeue()
478 q->inactive_flows++; in fq_dequeue()
485 if (f->credit > 0 || !q->rate_enable) in fq_dequeue()
492 rate = q->flow_max_rate; in fq_dequeue()
497 u32 plen = max(qdisc_pkt_len(skb), q->quantum); in fq_dequeue()
508 q->stat_pkts_too_long++; in fq_dequeue()
520 struct fq_sched_data *q = qdisc_priv(sch); in fq_reset() local
527 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL) in fq_reset()
530 if (!q->fq_root) in fq_reset()
533 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { in fq_reset()
534 root = &q->fq_root[idx]; in fq_reset()
545 q->new_flows.first = NULL; in fq_reset()
546 q->old_flows.first = NULL; in fq_reset()
547 q->delayed = RB_ROOT; in fq_reset()
548 q->flows = 0; in fq_reset()
549 q->inactive_flows = 0; in fq_reset()
550 q->throttled_flows = 0; in fq_reset()
553 static void fq_rehash(struct fq_sched_data *q, in fq_rehash() argument
593 q->flows -= fcnt; in fq_rehash()
594 q->inactive_flows -= fcnt; in fq_rehash()
595 q->stat_gc_flows += fcnt; in fq_rehash()
615 struct fq_sched_data *q = qdisc_priv(sch); in fq_resize() local
620 if (q->fq_root && log == q->fq_trees_log) in fq_resize()
634 old_fq_root = q->fq_root; in fq_resize()
636 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); in fq_resize()
638 q->fq_root = array; in fq_resize()
639 q->fq_trees_log = log; in fq_resize()
662 struct fq_sched_data *q = qdisc_priv(sch); in fq_change() local
677 fq_log = q->fq_trees_log; in fq_change()
691 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); in fq_change()
697 q->quantum = quantum; in fq_change()
703 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); in fq_change()
710 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); in fq_change()
716 q->rate_enable = enable; in fq_change()
724 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); in fq_change()
728 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); in fq_change()
735 while (sch->q.qlen > sch->limit) { in fq_change()
752 struct fq_sched_data *q = qdisc_priv(sch); in fq_destroy() local
755 fq_free(q->fq_root); in fq_destroy()
756 qdisc_watchdog_cancel(&q->watchdog); in fq_destroy()
761 struct fq_sched_data *q = qdisc_priv(sch); in fq_init() local
765 q->flow_plimit = 100; in fq_init()
766 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); in fq_init()
767 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); in fq_init()
768 q->flow_refill_delay = msecs_to_jiffies(40); in fq_init()
769 q->flow_max_rate = ~0U; in fq_init()
770 q->rate_enable = 1; in fq_init()
771 q->new_flows.first = NULL; in fq_init()
772 q->old_flows.first = NULL; in fq_init()
773 q->delayed = RB_ROOT; in fq_init()
774 q->fq_root = NULL; in fq_init()
775 q->fq_trees_log = ilog2(1024); in fq_init()
776 q->orphan_mask = 1024 - 1; in fq_init()
777 qdisc_watchdog_init(&q->watchdog, sch); in fq_init()
782 err = fq_resize(sch, q->fq_trees_log); in fq_init()
789 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump() local
799 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || in fq_dump()
800 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || in fq_dump()
801 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || in fq_dump()
802 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || in fq_dump()
803 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || in fq_dump()
805 jiffies_to_usecs(q->flow_refill_delay)) || in fq_dump()
806 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || in fq_dump()
807 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) in fq_dump()
818 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump_stats() local
821 .gc_flows = q->stat_gc_flows, in fq_dump_stats()
822 .highprio_packets = q->stat_internal_packets, in fq_dump_stats()
823 .tcp_retrans = q->stat_tcp_retrans, in fq_dump_stats()
824 .throttled = q->stat_throttled, in fq_dump_stats()
825 .flows_plimit = q->stat_flows_plimit, in fq_dump_stats()
826 .pkts_too_long = q->stat_pkts_too_long, in fq_dump_stats()
827 .allocation_errors = q->stat_allocation_errors, in fq_dump_stats()
828 .flows = q->flows, in fq_dump_stats()
829 .inactive_flows = q->inactive_flows, in fq_dump_stats()
830 .throttled_flows = q->throttled_flows, in fq_dump_stats()
831 .time_next_delayed_flow = q->time_next_delayed_flow - now, in fq_dump_stats()