Lines Matching refs:q
69 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
72 u32 hash = skb_get_hash_perturb(skb, q->perturbation); in fq_codel_hash()
74 return reciprocal_scale(hash, q->flows_cnt); in fq_codel_hash()
80 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
87 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
90 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
92 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
106 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
138 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
148 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
149 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop()
150 maxbacklog = q->backlogs[i]; in fq_codel_drop()
154 flow = &q->flows[idx]; in fq_codel_drop()
157 q->backlogs[idx] -= len; in fq_codel_drop()
158 sch->q.qlen--; in fq_codel_drop()
177 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_enqueue() local
192 flow = &q->flows[idx]; in fq_codel_enqueue()
194 q->backlogs[idx] += qdisc_pkt_len(skb); in fq_codel_enqueue()
198 list_add_tail(&flow->flowchain, &q->new_flows); in fq_codel_enqueue()
199 q->new_flow_count++; in fq_codel_enqueue()
200 flow->deficit = q->quantum; in fq_codel_enqueue()
203 if (++sch->q.qlen <= sch->limit) in fq_codel_enqueue()
207 q->drop_overlimit++; in fq_codel_enqueue()
225 struct fq_codel_sched_data *q = qdisc_priv(sch); in dequeue() local
232 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue()
233 sch->q.qlen--; in dequeue()
240 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dequeue() local
248 head = &q->new_flows; in fq_codel_dequeue()
250 head = &q->old_flows; in fq_codel_dequeue()
257 flow->deficit += q->quantum; in fq_codel_dequeue()
258 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
262 prev_drop_count = q->cstats.drop_count; in fq_codel_dequeue()
263 prev_ecn_mark = q->cstats.ecn_mark; in fq_codel_dequeue()
266 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, in fq_codel_dequeue()
269 flow->dropped += q->cstats.drop_count - prev_drop_count; in fq_codel_dequeue()
270 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; in fq_codel_dequeue()
274 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) in fq_codel_dequeue()
275 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
285 if (q->cstats.drop_count && sch->q.qlen) { in fq_codel_dequeue()
286 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, in fq_codel_dequeue()
287 q->cstats.drop_len); in fq_codel_dequeue()
288 q->cstats.drop_count = 0; in fq_codel_dequeue()
289 q->cstats.drop_len = 0; in fq_codel_dequeue()
296 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_reset() local
299 INIT_LIST_HEAD(&q->new_flows); in fq_codel_reset()
300 INIT_LIST_HEAD(&q->old_flows); in fq_codel_reset()
301 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_reset()
302 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset()
314 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); in fq_codel_reset()
315 sch->q.qlen = 0; in fq_codel_reset()
330 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_change() local
341 if (q->flows) in fq_codel_change()
343 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); in fq_codel_change()
344 if (!q->flows_cnt || in fq_codel_change()
345 q->flows_cnt > 65536) in fq_codel_change()
353 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; in fq_codel_change()
359 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; in fq_codel_change()
365 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; in fq_codel_change()
372 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); in fq_codel_change()
375 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); in fq_codel_change()
377 while (sch->q.qlen > sch->limit) { in fq_codel_change()
380 q->cstats.drop_len += qdisc_pkt_len(skb); in fq_codel_change()
382 q->cstats.drop_count++; in fq_codel_change()
384 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); in fq_codel_change()
385 q->cstats.drop_count = 0; in fq_codel_change()
386 q->cstats.drop_len = 0; in fq_codel_change()
408 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_destroy() local
410 tcf_destroy_chain(&q->filter_list); in fq_codel_destroy()
411 fq_codel_free(q->backlogs); in fq_codel_destroy()
412 fq_codel_free(q->flows); in fq_codel_destroy()
417 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_init() local
421 q->flows_cnt = 1024; in fq_codel_init()
422 q->quantum = psched_mtu(qdisc_dev(sch)); in fq_codel_init()
423 q->perturbation = prandom_u32(); in fq_codel_init()
424 INIT_LIST_HEAD(&q->new_flows); in fq_codel_init()
425 INIT_LIST_HEAD(&q->old_flows); in fq_codel_init()
426 codel_params_init(&q->cparams, sch); in fq_codel_init()
427 codel_stats_init(&q->cstats); in fq_codel_init()
428 q->cparams.ecn = true; in fq_codel_init()
436 if (!q->flows) { in fq_codel_init()
437 q->flows = fq_codel_zalloc(q->flows_cnt * in fq_codel_init()
439 if (!q->flows) in fq_codel_init()
441 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); in fq_codel_init()
442 if (!q->backlogs) { in fq_codel_init()
443 fq_codel_free(q->flows); in fq_codel_init()
446 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_init()
447 struct fq_codel_flow *flow = q->flows + i; in fq_codel_init()
462 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dump() local
470 codel_time_to_us(q->cparams.target)) || in fq_codel_dump()
474 codel_time_to_us(q->cparams.interval)) || in fq_codel_dump()
476 q->cparams.ecn) || in fq_codel_dump()
478 q->quantum) || in fq_codel_dump()
480 q->flows_cnt)) in fq_codel_dump()
483 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && in fq_codel_dump()
485 codel_time_to_us(q->cparams.ce_threshold))) in fq_codel_dump()
496 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dump_stats() local
502 st.qdisc_stats.maxpacket = q->cstats.maxpacket; in fq_codel_dump_stats()
503 st.qdisc_stats.drop_overlimit = q->drop_overlimit; in fq_codel_dump_stats()
504 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; in fq_codel_dump_stats()
505 st.qdisc_stats.new_flow_count = q->new_flow_count; in fq_codel_dump_stats()
506 st.qdisc_stats.ce_mark = q->cstats.ce_mark; in fq_codel_dump_stats()
508 list_for_each(pos, &q->new_flows) in fq_codel_dump_stats()
511 list_for_each(pos, &q->old_flows) in fq_codel_dump_stats()
535 static void fq_codel_put(struct Qdisc *q, unsigned long cl) in fq_codel_put() argument
542 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_find_tcf() local
546 return &q->filter_list; in fq_codel_find_tcf()
559 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dump_class_stats() local
564 if (idx < q->flows_cnt) { in fq_codel_dump_class_stats()
565 const struct fq_codel_flow *flow = &q->flows[idx]; in fq_codel_dump_class_stats()
588 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
593 if (idx < q->flows_cnt) in fq_codel_dump_class_stats()
600 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_walk() local
606 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_walk()
607 if (list_empty(&q->flows[i].flowchain) || in fq_codel_walk()