Lines Matching refs:q
131 struct Qdisc *q; member
183 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
186 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
209 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
228 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
295 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
300 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
301 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
305 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
306 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
318 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
338 static inline void htb_add_class_to_row(struct htb_sched *q, in htb_add_class_to_row() argument
341 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
345 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
367 static inline void htb_remove_class_from_row(struct htb_sched *q, in htb_remove_class_from_row() argument
371 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
385 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
395 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() argument
420 htb_add_class_to_row(q, cl, mask); in htb_activate_prios()
430 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() argument
464 htb_remove_class_from_row(q, cl, mask); in htb_deactivate_prios()
521 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() argument
530 htb_deactivate_prios(q, cl); in htb_change_class_mode()
533 htb_activate_prios(q, cl); in htb_change_class_mode()
545 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() argument
547 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen); in htb_activate()
551 htb_activate_prios(q, cl); in htb_activate()
553 q->drops + cl->prio); in htb_activate()
563 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) in htb_deactivate() argument
567 htb_deactivate_prios(q, cl); in htb_deactivate()
575 struct htb_sched *q = qdisc_priv(sch); in htb_enqueue() local
580 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
581 __skb_queue_tail(&q->direct_queue, skb); in htb_enqueue()
582 q->direct_pkts++; in htb_enqueue()
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { in htb_enqueue()
600 htb_activate(q, cl); in htb_enqueue()
604 sch->q.qlen++; in htb_enqueue()
645 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, in htb_charge_class() argument
653 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
663 cl->t_c = q->now; in htb_charge_class()
667 htb_change_class_mode(q, cl, &diff); in htb_charge_class()
670 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
672 htb_add_to_wait_tree(q, cl, diff); in htb_charge_class()
690 static s64 htb_do_events(struct htb_sched *q, const int level, in htb_do_events() argument
698 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; in htb_do_events()
709 if (cl->pq_key > q->now) in htb_do_events()
713 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
714 htb_change_class_mode(q, cl, &diff); in htb_do_events()
716 htb_add_to_wait_tree(q, cl, diff); in htb_do_events()
720 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { in htb_do_events()
722 q->warned |= HTB_WARN_TOOMANYEVENTS; in htb_do_events()
725 return q->now; in htb_do_events()
813 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, in htb_dequeue_tree() argument
818 struct htb_level *hlevel = &q->hlevel[level]; in htb_dequeue_tree()
834 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
836 htb_deactivate(q, cl); in htb_dequeue_tree()
839 if ((q->row_mask[level] & (1 << prio)) == 0) in htb_dequeue_tree()
850 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); in htb_dequeue_tree()
854 qdisc_warn_nonwc("htb", cl->un.leaf.q); in htb_dequeue_tree()
856 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
867 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
872 if (!cl->un.leaf.q->q.qlen) in htb_dequeue_tree()
873 htb_deactivate(q, cl); in htb_dequeue_tree()
874 htb_charge_class(q, cl, level, skb); in htb_dequeue_tree()
882 struct htb_sched *q = qdisc_priv(sch); in htb_dequeue() local
888 skb = __skb_dequeue(&q->direct_queue); in htb_dequeue()
894 sch->q.qlen--; in htb_dequeue()
898 if (!sch->q.qlen) in htb_dequeue()
900 q->now = ktime_get_ns(); in htb_dequeue()
903 next_event = q->now + 5LLU * NSEC_PER_SEC; in htb_dequeue()
908 s64 event = q->near_ev_cache[level]; in htb_dequeue()
910 if (q->now >= event) { in htb_dequeue()
911 event = htb_do_events(q, level, start_at); in htb_dequeue()
913 event = q->now + NSEC_PER_SEC; in htb_dequeue()
914 q->near_ev_cache[level] = event; in htb_dequeue()
920 m = ~q->row_mask[level]; in htb_dequeue()
925 skb = htb_dequeue_tree(q, prio, level); in htb_dequeue()
931 if (likely(next_event > q->now)) { in htb_dequeue()
933 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { in htb_dequeue()
935 qdisc_throttled(q->watchdog.qdisc); in htb_dequeue()
936 hrtimer_start(&q->watchdog.timer, time, in htb_dequeue()
940 schedule_work(&q->work); in htb_dequeue()
949 struct htb_sched *q = qdisc_priv(sch); in htb_drop() local
954 list_for_each(p, q->drops + prio) { in htb_drop()
958 if (cl->un.leaf.q->ops->drop && in htb_drop()
959 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { in htb_drop()
961 sch->q.qlen--; in htb_drop()
962 if (!cl->un.leaf.q->q.qlen) in htb_drop()
963 htb_deactivate(q, cl); in htb_drop()
975 struct htb_sched *q = qdisc_priv(sch); in htb_reset() local
979 for (i = 0; i < q->clhash.hashsize; i++) { in htb_reset()
980 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
984 if (cl->un.leaf.q) in htb_reset()
985 qdisc_reset(cl->un.leaf.q); in htb_reset()
992 qdisc_watchdog_cancel(&q->watchdog); in htb_reset()
993 __skb_queue_purge(&q->direct_queue); in htb_reset()
994 sch->q.qlen = 0; in htb_reset()
996 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
997 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
999 INIT_LIST_HEAD(q->drops + i); in htb_reset()
1014 struct htb_sched *q = container_of(work, struct htb_sched, work); in htb_work_func() local
1015 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
1022 struct htb_sched *q = qdisc_priv(sch); in htb_init() local
1042 err = qdisc_class_hash_init(&q->clhash); in htb_init()
1046 INIT_LIST_HEAD(q->drops + i); in htb_init()
1048 qdisc_watchdog_init(&q->watchdog, sch); in htb_init()
1049 INIT_WORK(&q->work, htb_work_func); in htb_init()
1050 __skb_queue_head_init(&q->direct_queue); in htb_init()
1053 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init()
1055 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; in htb_init()
1057 if ((q->rate2quantum = gopt->rate2quantum) < 1) in htb_init()
1058 q->rate2quantum = 1; in htb_init()
1059 q->defcls = gopt->defcls; in htb_init()
1066 struct htb_sched *q = qdisc_priv(sch); in htb_dump() local
1074 gopt.direct_pkts = q->direct_pkts; in htb_dump()
1076 gopt.rate2quantum = q->rate2quantum; in htb_dump()
1077 gopt.defcls = q->defcls; in htb_dump()
1084 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) in htb_dump()
1106 if (!cl->level && cl->un.leaf.q) in htb_dump_class()
1107 tcm->tcm_info = cl->un.leaf.q->handle; in htb_dump_class()
1144 if (!cl->level && cl->un.leaf.q) in htb_dump_class_stats()
1145 qlen = cl->un.leaf.q->q.qlen; in htb_dump_class_stats()
1169 *old = qdisc_replace(sch, new, &cl->un.leaf.q); in htb_graft()
1176 return !cl->level ? cl->un.leaf.q : NULL; in htb_leaf()
1183 if (cl->un.leaf.q->q.qlen == 0) in htb_qlen_notify()
1206 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, in htb_parent_to_leaf() argument
1211 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1215 &q->hlevel[parent->level].wait_pq); in htb_parent_to_leaf()
1220 parent->un.leaf.q = new_q ? new_q : &noop_qdisc; in htb_parent_to_leaf()
1230 WARN_ON(!cl->un.leaf.q); in htb_destroy_class()
1231 qdisc_destroy(cl->un.leaf.q); in htb_destroy_class()
1240 struct htb_sched *q = qdisc_priv(sch); in htb_destroy() local
1245 cancel_work_sync(&q->work); in htb_destroy()
1246 qdisc_watchdog_cancel(&q->watchdog); in htb_destroy()
1252 tcf_destroy_chain(&q->filter_list); in htb_destroy()
1254 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1255 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) in htb_destroy()
1258 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1259 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1263 qdisc_class_hash_destroy(&q->clhash); in htb_destroy()
1264 __skb_queue_purge(&q->direct_queue); in htb_destroy()
1269 struct htb_sched *q = qdisc_priv(sch); in htb_delete() local
1290 unsigned int qlen = cl->un.leaf.q->q.qlen; in htb_delete()
1291 unsigned int backlog = cl->un.leaf.q->qstats.backlog; in htb_delete()
1293 qdisc_reset(cl->un.leaf.q); in htb_delete()
1294 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog); in htb_delete()
1298 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1303 htb_deactivate(q, cl); in htb_delete()
1307 &q->hlevel[cl->level].wait_pq); in htb_delete()
1310 htb_parent_to_leaf(q, cl, new_q); in htb_delete()
1335 struct htb_sched *q = qdisc_priv(sch); in htb_change_class() local
1427 unsigned int qlen = parent->un.leaf.q->q.qlen; in htb_change_class()
1428 unsigned int backlog = parent->un.leaf.q->qstats.backlog; in htb_change_class()
1431 qdisc_reset(parent->un.leaf.q); in htb_change_class()
1432 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog); in htb_change_class()
1433 qdisc_destroy(parent->un.leaf.q); in htb_change_class()
1435 htb_deactivate(q, parent); in htb_change_class()
1439 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); in htb_change_class()
1447 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1460 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1490 do_div(quantum, q->rate2quantum); in htb_change_class()
1514 qdisc_class_hash_grow(sch, &q->clhash); in htb_change_class()
1526 struct htb_sched *q = qdisc_priv(sch); in htb_find_tcf() local
1528 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; in htb_find_tcf()
1562 struct htb_sched *q = qdisc_priv(sch); in htb_walk() local
1569 for (i = 0; i < q->clhash.hashsize; i++) { in htb_walk()
1570 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()