Lines Matching refs:cfqg

148 	struct cfq_group *cfqg;  member
391 static struct cfq_rb_root *st_for(struct cfq_group *cfqg, in st_for() argument
395 if (!cfqg) in st_for()
399 return &cfqg->service_tree_idle; in st_for()
401 return &cfqg->service_trees[class][type]; in st_for()
454 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) in cfqg_to_blkg() argument
456 return pd_to_blkg(&cfqg->pd); in cfqg_to_blkg()
503 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, in cfqg_stats_set_start_group_wait_time() argument
506 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_set_start_group_wait_time()
510 if (cfqg == curr_cfqg) in cfqg_stats_set_start_group_wait_time()
531 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) in cfqg_stats_update_dequeue() argument
533 blkg_stat_add(&cfqg->stats.dequeue, 1); in cfqg_stats_update_dequeue()
536 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) in cfqg_stats_set_start_empty_time() argument
538 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_set_start_empty_time()
555 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) in cfqg_stats_update_idle_time() argument
557 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_update_idle_time()
569 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) in cfqg_stats_set_start_idle_time() argument
571 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_set_start_idle_time()
579 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) in cfqg_stats_update_avg_queue_size() argument
581 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_update_avg_queue_size()
591 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *c… in cfqg_stats_set_start_group_wait_time() argument
593 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } in cfqg_stats_update_dequeue() argument
594 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } in cfqg_stats_set_start_empty_time() argument
595 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } in cfqg_stats_update_idle_time() argument
596 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } in cfqg_stats_set_start_idle_time() argument
597 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } in cfqg_stats_update_avg_queue_size() argument
610 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) in cfqg_parent() argument
612 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; in cfqg_parent()
617 static inline void cfqg_get(struct cfq_group *cfqg) in cfqg_get() argument
619 return blkg_get(cfqg_to_blkg(cfqg)); in cfqg_get()
622 static inline void cfqg_put(struct cfq_group *cfqg) in cfqg_put() argument
624 return blkg_put(cfqg_to_blkg(cfqg)); in cfqg_put()
630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
637 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ argument
640 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
644 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, in cfqg_stats_update_io_add() argument
647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1); in cfqg_stats_update_io_add()
648 cfqg_stats_end_empty_time(&cfqg->stats); in cfqg_stats_update_io_add()
649 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); in cfqg_stats_update_io_add()
652 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, in cfqg_stats_update_timeslice_used() argument
655 blkg_stat_add(&cfqg->stats.time, time); in cfqg_stats_update_timeslice_used()
657 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); in cfqg_stats_update_timeslice_used()
661 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) in cfqg_stats_update_io_remove() argument
663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1); in cfqg_stats_update_io_remove()
666 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) in cfqg_stats_update_io_merged() argument
668 blkg_rwstat_add(&cfqg->stats.merged, rw, 1); in cfqg_stats_update_io_merged()
671 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, in cfqg_stats_update_dispatch() argument
674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); in cfqg_stats_update_dispatch()
675 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1); in cfqg_stats_update_dispatch()
676 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes); in cfqg_stats_update_dispatch()
679 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, in cfqg_stats_update_completion() argument
682 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_update_completion()
739 static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) in cfqg_stats_xfer_dead() argument
741 struct cfq_group *parent = cfqg_parent(cfqg); in cfqg_stats_xfer_dead()
743 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); in cfqg_stats_xfer_dead()
748 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); in cfqg_stats_xfer_dead()
749 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); in cfqg_stats_xfer_dead()
750 cfqg_stats_reset(&cfqg->stats); in cfqg_stats_xfer_dead()
751 cfqg_stats_reset(&cfqg->dead_stats); in cfqg_stats_xfer_dead()
756 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } in cfqg_parent() argument
757 static inline void cfqg_get(struct cfq_group *cfqg) { } in cfqg_get() argument
758 static inline void cfqg_put(struct cfq_group *cfqg) { } in cfqg_put() argument
765 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) argument
767 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, in cfqg_stats_update_io_add() argument
769 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, in cfqg_stats_update_timeslice_used() argument
771 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } in cfqg_stats_update_io_remove() argument
772 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } in cfqg_stats_update_io_merged() argument
773 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, in cfqg_stats_update_dispatch() argument
775 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, in cfqg_stats_update_completion() argument
784 #define for_each_cfqg_st(cfqg, i, j, st) \ argument
786 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
787 : &cfqg->service_tree_idle; \
791 &cfqg->service_trees[i][j]: NULL) \
842 struct cfq_group *cfqg) in cfq_group_busy_queues_wl() argument
845 return cfqg->service_tree_idle.count; in cfq_group_busy_queues_wl()
847 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + in cfq_group_busy_queues_wl()
848 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + in cfq_group_busy_queues_wl()
849 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; in cfq_group_busy_queues_wl()
853 struct cfq_group *cfqg) in cfqg_busy_async_queues() argument
855 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + in cfqg_busy_async_queues()
856 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; in cfqg_busy_async_queues()
979 struct cfq_group *cfqg; in update_min_vdisktime() local
982 cfqg = rb_entry_cfqg(st->left); in update_min_vdisktime()
984 cfqg->vdisktime); in update_min_vdisktime()
995 struct cfq_group *cfqg, bool rt) in cfq_group_get_avg_queues() argument
1000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); in cfq_group_get_avg_queues()
1002 min_q = min(cfqg->busy_queues_avg[rt], busy); in cfq_group_get_avg_queues()
1003 max_q = max(cfqg->busy_queues_avg[rt], busy); in cfq_group_get_avg_queues()
1004 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / in cfq_group_get_avg_queues()
1006 return cfqg->busy_queues_avg[rt]; in cfq_group_get_avg_queues()
1010 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_slice() argument
1012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; in cfq_group_slice()
1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, in cfq_scaled_cfqq_slice()
1028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); in cfq_scaled_cfqq_slice()
1237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - in cfq_slice_offset()
1242 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) in cfqg_key() argument
1244 return cfqg->vdisktime - st->min_vdisktime; in cfqg_key()
1248 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) in __cfq_group_service_tree_add() argument
1253 s64 key = cfqg_key(st, cfqg); in __cfq_group_service_tree_add()
1269 st->left = &cfqg->rb_node; in __cfq_group_service_tree_add()
1271 rb_link_node(&cfqg->rb_node, parent, node); in __cfq_group_service_tree_add()
1272 rb_insert_color(&cfqg->rb_node, &st->rb); in __cfq_group_service_tree_add()
1279 cfq_update_group_weight(struct cfq_group *cfqg) in cfq_update_group_weight() argument
1281 if (cfqg->new_weight) { in cfq_update_group_weight()
1282 cfqg->weight = cfqg->new_weight; in cfq_update_group_weight()
1283 cfqg->new_weight = 0; in cfq_update_group_weight()
1288 cfq_update_group_leaf_weight(struct cfq_group *cfqg) in cfq_update_group_leaf_weight() argument
1290 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); in cfq_update_group_leaf_weight()
1292 if (cfqg->new_leaf_weight) { in cfq_update_group_leaf_weight()
1293 cfqg->leaf_weight = cfqg->new_leaf_weight; in cfq_update_group_leaf_weight()
1294 cfqg->new_leaf_weight = 0; in cfq_update_group_leaf_weight()
1299 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) in cfq_group_service_tree_add() argument
1302 struct cfq_group *pos = cfqg; in cfq_group_service_tree_add()
1307 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); in cfq_group_service_tree_add()
1314 cfq_update_group_leaf_weight(cfqg); in cfq_group_service_tree_add()
1315 __cfq_group_service_tree_add(st, cfqg); in cfq_group_service_tree_add()
1346 cfqg->vfraction = max_t(unsigned, vfr, 1); in cfq_group_service_tree_add()
1350 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_add() argument
1356 cfqg->nr_cfqq++; in cfq_group_notify_queue_add()
1357 if (!RB_EMPTY_NODE(&cfqg->rb_node)) in cfq_group_notify_queue_add()
1368 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; in cfq_group_notify_queue_add()
1370 cfqg->vdisktime = st->min_vdisktime; in cfq_group_notify_queue_add()
1371 cfq_group_service_tree_add(st, cfqg); in cfq_group_notify_queue_add()
1375 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) in cfq_group_service_tree_del() argument
1377 struct cfq_group *pos = cfqg; in cfq_group_service_tree_del()
1403 if (!RB_EMPTY_NODE(&cfqg->rb_node)) in cfq_group_service_tree_del()
1404 cfq_rb_erase(&cfqg->rb_node, st); in cfq_group_service_tree_del()
1408 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_del() argument
1412 BUG_ON(cfqg->nr_cfqq < 1); in cfq_group_notify_queue_del()
1413 cfqg->nr_cfqq--; in cfq_group_notify_queue_del()
1416 if (cfqg->nr_cfqq) in cfq_group_notify_queue_del()
1419 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); in cfq_group_notify_queue_del()
1420 cfq_group_service_tree_del(st, cfqg); in cfq_group_notify_queue_del()
1421 cfqg->saved_wl_slice = 0; in cfq_group_notify_queue_del()
1422 cfqg_stats_update_dequeue(cfqg); in cfq_group_notify_queue_del()
1457 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, in cfq_group_served() argument
1462 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) in cfq_group_served()
1463 - cfqg->service_tree_idle.count; in cfq_group_served()
1480 vfr = cfqg->vfraction; in cfq_group_served()
1481 cfq_group_service_tree_del(st, cfqg); in cfq_group_served()
1482 cfqg->vdisktime += cfqg_scale_charge(charge, vfr); in cfq_group_served()
1483 cfq_group_service_tree_add(st, cfqg); in cfq_group_served()
1487 cfqg->saved_wl_slice = cfqd->workload_expires in cfq_group_served()
1489 cfqg->saved_wl_type = cfqd->serving_wl_type; in cfq_group_served()
1490 cfqg->saved_wl_class = cfqd->serving_wl_class; in cfq_group_served()
1492 cfqg->saved_wl_slice = 0; in cfq_group_served()
1494 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, in cfq_group_served()
1500 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); in cfq_group_served()
1501 cfqg_stats_set_start_empty_time(cfqg); in cfq_group_served()
1511 static void cfq_init_cfqg_base(struct cfq_group *cfqg) in cfq_init_cfqg_base() argument
1516 for_each_cfqg_st(cfqg, i, j, st) in cfq_init_cfqg_base()
1518 RB_CLEAR_NODE(&cfqg->rb_node); in cfq_init_cfqg_base()
1520 cfqg->ttime.last_end_request = jiffies; in cfq_init_cfqg_base()
1549 struct cfq_group *cfqg = blkg_to_cfqg(blkg); in cfq_pd_init() local
1551 cfq_init_cfqg_base(cfqg); in cfq_pd_init()
1552 cfqg->weight = blkg->blkcg->cfq_weight; in cfq_pd_init()
1553 cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; in cfq_pd_init()
1554 cfqg_stats_init(&cfqg->stats); in cfq_pd_init()
1555 cfqg_stats_init(&cfqg->dead_stats); in cfq_pd_init()
1597 struct cfq_group *cfqg = blkg_to_cfqg(blkg); in cfq_pd_reset_stats() local
1599 cfqg_stats_reset(&cfqg->stats); in cfq_pd_reset_stats()
1600 cfqg_stats_reset(&cfqg->dead_stats); in cfq_pd_reset_stats()
1611 struct cfq_group *cfqg = NULL; in cfq_lookup_create_cfqg() local
1615 cfqg = cfqd->root_group; in cfq_lookup_create_cfqg()
1621 cfqg = blkg_to_cfqg(blkg); in cfq_lookup_create_cfqg()
1624 return cfqg; in cfq_lookup_create_cfqg()
1627 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) in cfq_link_cfqq_cfqg() argument
1631 cfqg = cfqq->cfqd->root_group; in cfq_link_cfqq_cfqg()
1633 cfqq->cfqg = cfqg; in cfq_link_cfqq_cfqg()
1635 cfqg_get(cfqg); in cfq_link_cfqq_cfqg()
1641 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfqg_prfill_weight_device() local
1643 if (!cfqg->dev_weight) in cfqg_prfill_weight_device()
1645 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); in cfqg_prfill_weight_device()
1659 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfqg_prfill_leaf_weight_device() local
1661 if (!cfqg->dev_leaf_weight) in cfqg_prfill_leaf_weight_device()
1663 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); in cfqg_prfill_leaf_weight_device()
1692 struct cfq_group *cfqg; in __cfqg_set_weight_device() local
1700 cfqg = blkg_to_cfqg(ctx.blkg); in __cfqg_set_weight_device()
1703 cfqg->dev_weight = ctx.v; in __cfqg_set_weight_device()
1704 cfqg->new_weight = ctx.v ?: blkcg->cfq_weight; in __cfqg_set_weight_device()
1706 cfqg->dev_leaf_weight = ctx.v; in __cfqg_set_weight_device()
1707 cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight; in __cfqg_set_weight_device()
1745 struct cfq_group *cfqg = blkg_to_cfqg(blkg); in __cfq_set_weight() local
1747 if (!cfqg) in __cfq_set_weight()
1751 if (!cfqg->dev_weight) in __cfq_set_weight()
1752 cfqg->new_weight = blkcg->cfq_weight; in __cfq_set_weight()
1754 if (!cfqg->dev_leaf_weight) in __cfq_set_weight()
1755 cfqg->new_leaf_weight = blkcg->cfq_leaf_weight; in __cfq_set_weight()
1825 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfqg_prfill_avg_queue_size() local
1826 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); in cfqg_prfill_avg_queue_size()
1830 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); in cfqg_prfill_avg_queue_size()
2011 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { in cfq_link_cfqq_cfqg() argument
2012 cfqq->cfqg = cfqg; in cfq_link_cfqq_cfqg()
2032 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); in cfq_service_tree_add()
2097 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); in cfq_service_tree_add()
2210 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); in cfq_del_cfqq_rr()
2430 cfqg_stats_update_idle_time(cfqq->cfqg); in cfq_del_timer()
2439 cfqg_stats_update_avg_queue_size(cfqq->cfqg); in __cfq_set_active_queue()
2494 cfq_group_served(cfqd, cfqq->cfqg, cfqq); in __cfq_slice_expired()
2540 struct cfq_group *cfqg; in cfq_get_next_queue_forced() local
2548 cfqg = cfq_get_next_cfqg(cfqd); in cfq_get_next_queue_forced()
2549 if (!cfqg) in cfq_get_next_queue_forced()
2552 for_each_cfqg_st(cfqg, i, j, st) in cfq_get_next_queue_forced()
2652 if (cur_cfqq->cfqg->nr_cfqq == 1) in cfq_close_cooperator()
2665 if (cur_cfqq->cfqg != cfqq->cfqg) in cfq_close_cooperator()
2774 if (group_idle && cfqq->cfqg->nr_cfqq > 1) in cfq_arm_slice_timer()
2785 cfqg_stats_set_start_idle_time(cfqq->cfqg); in cfq_arm_slice_timer()
2808 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); in cfq_dispatch_insert()
2900 struct cfq_group *cfqg, enum wl_class_t wl_class) in cfq_choose_wl_type() argument
2910 queue = cfq_rb_first(st_for(cfqg, wl_class, i)); in cfq_choose_wl_type()
2923 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) in choose_wl_class_and_type() argument
2932 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
2934 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
2950 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
2961 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, in choose_wl_class_and_type()
2963 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
2971 group_slice = cfq_group_slice(cfqd, cfqg); in choose_wl_class_and_type()
2974 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], in choose_wl_class_and_type()
2976 cfqg)); in choose_wl_class_and_type()
2989 cfqg_busy_async_queues(cfqd, cfqg); in choose_wl_class_and_type()
3008 struct cfq_group *cfqg; in cfq_get_next_cfqg() local
3012 cfqg = cfq_rb_first_group(st); in cfq_get_next_cfqg()
3014 return cfqg; in cfq_get_next_cfqg()
3019 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); in cfq_choose_cfqg() local
3021 cfqd->serving_group = cfqg; in cfq_choose_cfqg()
3024 if (cfqg->saved_wl_slice) { in cfq_choose_cfqg()
3025 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; in cfq_choose_cfqg()
3026 cfqd->serving_wl_type = cfqg->saved_wl_type; in cfq_choose_cfqg()
3027 cfqd->serving_wl_class = cfqg->saved_wl_class; in cfq_choose_cfqg()
3031 choose_wl_class_and_type(cfqd, cfqg); in cfq_choose_cfqg()
3068 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) in cfq_select_queue()
3127 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && in cfq_select_queue()
3128 cfqq->cfqg->dispatched && in cfq_select_queue()
3129 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { in cfq_select_queue()
3373 struct cfq_group *cfqg; in cfq_put_queue() local
3384 cfqg = cfqq->cfqg; in cfq_put_queue()
3393 cfqg_put(cfqg); in cfq_put_queue()
3586 struct cfq_group *cfqg; in cfq_find_alloc_queue() local
3592 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); in cfq_find_alloc_queue()
3593 if (!cfqg) { in cfq_find_alloc_queue()
3629 cfq_link_cfqq_cfqg(cfqq, cfqg); in cfq_find_alloc_queue()
3715 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); in cfq_update_io_thinktime()
3815 if (new_cfqq->cfqg != cfqq->cfqg) in cfq_should_preempt()
3874 cfqq->cfqg->saved_wl_slice = 0; in cfq_preempt_queue()
3926 cfqg_stats_update_idle_time(cfqq->cfqg); in cfq_rq_enqueued()
4004 if (cfqq->cfqg->nr_cfqq > 1) in cfq_should_wait_busy()
4008 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) in cfq_should_wait_busy()
4050 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), in cfq_completed_request()
4063 st = st_for(cfqq->cfqg, cfqq_class(cfqq), in cfq_completed_request()
4072 cfqq->cfqg->ttime.last_end_request = now; in cfq_completed_request()
4259 cfqg_get(cfqq->cfqg); in cfq_set_request()
4261 rq->elv.priv[1] = cfqq->cfqg; in cfq_set_request()