Lines Matching refs:cfqg

153 	struct cfq_group *cfqg;  member
398 static struct cfq_rb_root *st_for(struct cfq_group *cfqg, in st_for() argument
402 if (!cfqg) in st_for()
406 return &cfqg->service_tree_idle; in st_for()
408 return &cfqg->service_trees[class][type]; in st_for()
500 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, in cfqg_stats_set_start_group_wait_time() argument
503 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_set_start_group_wait_time()
507 if (cfqg == curr_cfqg) in cfqg_stats_set_start_group_wait_time()
528 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) in cfqg_stats_update_dequeue() argument
530 blkg_stat_add(&cfqg->stats.dequeue, 1); in cfqg_stats_update_dequeue()
533 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) in cfqg_stats_set_start_empty_time() argument
535 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_set_start_empty_time()
552 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) in cfqg_stats_update_idle_time() argument
554 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_update_idle_time()
566 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) in cfqg_stats_set_start_idle_time() argument
568 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_set_start_idle_time()
576 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) in cfqg_stats_update_avg_queue_size() argument
578 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_update_avg_queue_size()
588 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *c… in cfqg_stats_set_start_group_wait_time() argument
590 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } in cfqg_stats_update_dequeue() argument
591 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } in cfqg_stats_set_start_empty_time() argument
592 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } in cfqg_stats_update_idle_time() argument
593 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } in cfqg_stats_set_start_idle_time() argument
594 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } in cfqg_stats_update_avg_queue_size() argument
611 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) in cfqg_to_blkg() argument
613 return pd_to_blkg(&cfqg->pd); in cfqg_to_blkg()
628 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) in cfqg_parent() argument
630 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; in cfqg_parent()
635 static inline void cfqg_get(struct cfq_group *cfqg) in cfqg_get() argument
637 return blkg_get(cfqg_to_blkg(cfqg)); in cfqg_get()
640 static inline void cfqg_put(struct cfq_group *cfqg) in cfqg_put() argument
642 return blkg_put(cfqg_to_blkg(cfqg)); in cfqg_put()
648 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
655 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ argument
658 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
662 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, in cfqg_stats_update_io_add() argument
665 blkg_rwstat_add(&cfqg->stats.queued, rw, 1); in cfqg_stats_update_io_add()
666 cfqg_stats_end_empty_time(&cfqg->stats); in cfqg_stats_update_io_add()
667 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); in cfqg_stats_update_io_add()
670 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, in cfqg_stats_update_timeslice_used() argument
673 blkg_stat_add(&cfqg->stats.time, time); in cfqg_stats_update_timeslice_used()
675 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); in cfqg_stats_update_timeslice_used()
679 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) in cfqg_stats_update_io_remove() argument
681 blkg_rwstat_add(&cfqg->stats.queued, rw, -1); in cfqg_stats_update_io_remove()
684 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) in cfqg_stats_update_io_merged() argument
686 blkg_rwstat_add(&cfqg->stats.merged, rw, 1); in cfqg_stats_update_io_merged()
689 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, in cfqg_stats_update_completion() argument
692 struct cfqg_stats *stats = &cfqg->stats; in cfqg_stats_update_completion()
745 static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) in cfqg_stats_xfer_dead() argument
747 struct cfq_group *parent = cfqg_parent(cfqg); in cfqg_stats_xfer_dead()
749 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); in cfqg_stats_xfer_dead()
754 cfqg_stats_add_aux(&parent->stats, &cfqg->stats); in cfqg_stats_xfer_dead()
755 cfqg_stats_reset(&cfqg->stats); in cfqg_stats_xfer_dead()
760 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } in cfqg_parent() argument
761 static inline void cfqg_get(struct cfq_group *cfqg) { } in cfqg_get() argument
762 static inline void cfqg_put(struct cfq_group *cfqg) { } in cfqg_put() argument
769 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) argument
771 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, in cfqg_stats_update_io_add() argument
773 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, in cfqg_stats_update_timeslice_used() argument
775 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } in cfqg_stats_update_io_remove() argument
776 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } in cfqg_stats_update_io_merged() argument
777 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, in cfqg_stats_update_completion() argument
786 #define for_each_cfqg_st(cfqg, i, j, st) \ argument
788 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
789 : &cfqg->service_tree_idle; \
793 &cfqg->service_trees[i][j]: NULL) \
844 struct cfq_group *cfqg) in cfq_group_busy_queues_wl() argument
847 return cfqg->service_tree_idle.count; in cfq_group_busy_queues_wl()
849 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + in cfq_group_busy_queues_wl()
850 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + in cfq_group_busy_queues_wl()
851 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; in cfq_group_busy_queues_wl()
855 struct cfq_group *cfqg) in cfqg_busy_async_queues() argument
857 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + in cfqg_busy_async_queues()
858 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; in cfqg_busy_async_queues()
980 struct cfq_group *cfqg; in update_min_vdisktime() local
983 cfqg = rb_entry_cfqg(st->left); in update_min_vdisktime()
985 cfqg->vdisktime); in update_min_vdisktime()
996 struct cfq_group *cfqg, bool rt) in cfq_group_get_avg_queues() argument
1001 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); in cfq_group_get_avg_queues()
1003 min_q = min(cfqg->busy_queues_avg[rt], busy); in cfq_group_get_avg_queues()
1004 max_q = max(cfqg->busy_queues_avg[rt], busy); in cfq_group_get_avg_queues()
1005 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / in cfq_group_get_avg_queues()
1007 return cfqg->busy_queues_avg[rt]; in cfq_group_get_avg_queues()
1011 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_slice() argument
1013 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; in cfq_group_slice()
1025 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, in cfq_scaled_cfqq_slice()
1029 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); in cfq_scaled_cfqq_slice()
1238 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - in cfq_slice_offset()
1243 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) in cfqg_key() argument
1245 return cfqg->vdisktime - st->min_vdisktime; in cfqg_key()
1249 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) in __cfq_group_service_tree_add() argument
1254 s64 key = cfqg_key(st, cfqg); in __cfq_group_service_tree_add()
1270 st->left = &cfqg->rb_node; in __cfq_group_service_tree_add()
1272 rb_link_node(&cfqg->rb_node, parent, node); in __cfq_group_service_tree_add()
1273 rb_insert_color(&cfqg->rb_node, &st->rb); in __cfq_group_service_tree_add()
1280 cfq_update_group_weight(struct cfq_group *cfqg) in cfq_update_group_weight() argument
1282 if (cfqg->new_weight) { in cfq_update_group_weight()
1283 cfqg->weight = cfqg->new_weight; in cfq_update_group_weight()
1284 cfqg->new_weight = 0; in cfq_update_group_weight()
1289 cfq_update_group_leaf_weight(struct cfq_group *cfqg) in cfq_update_group_leaf_weight() argument
1291 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); in cfq_update_group_leaf_weight()
1293 if (cfqg->new_leaf_weight) { in cfq_update_group_leaf_weight()
1294 cfqg->leaf_weight = cfqg->new_leaf_weight; in cfq_update_group_leaf_weight()
1295 cfqg->new_leaf_weight = 0; in cfq_update_group_leaf_weight()
1300 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) in cfq_group_service_tree_add() argument
1303 struct cfq_group *pos = cfqg; in cfq_group_service_tree_add()
1308 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); in cfq_group_service_tree_add()
1315 cfq_update_group_leaf_weight(cfqg); in cfq_group_service_tree_add()
1316 __cfq_group_service_tree_add(st, cfqg); in cfq_group_service_tree_add()
1347 cfqg->vfraction = max_t(unsigned, vfr, 1); in cfq_group_service_tree_add()
1351 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_add() argument
1357 cfqg->nr_cfqq++; in cfq_group_notify_queue_add()
1358 if (!RB_EMPTY_NODE(&cfqg->rb_node)) in cfq_group_notify_queue_add()
1369 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; in cfq_group_notify_queue_add()
1371 cfqg->vdisktime = st->min_vdisktime; in cfq_group_notify_queue_add()
1372 cfq_group_service_tree_add(st, cfqg); in cfq_group_notify_queue_add()
1376 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) in cfq_group_service_tree_del() argument
1378 struct cfq_group *pos = cfqg; in cfq_group_service_tree_del()
1404 if (!RB_EMPTY_NODE(&cfqg->rb_node)) in cfq_group_service_tree_del()
1405 cfq_rb_erase(&cfqg->rb_node, st); in cfq_group_service_tree_del()
1409 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_del() argument
1413 BUG_ON(cfqg->nr_cfqq < 1); in cfq_group_notify_queue_del()
1414 cfqg->nr_cfqq--; in cfq_group_notify_queue_del()
1417 if (cfqg->nr_cfqq) in cfq_group_notify_queue_del()
1420 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); in cfq_group_notify_queue_del()
1421 cfq_group_service_tree_del(st, cfqg); in cfq_group_notify_queue_del()
1422 cfqg->saved_wl_slice = 0; in cfq_group_notify_queue_del()
1423 cfqg_stats_update_dequeue(cfqg); in cfq_group_notify_queue_del()
1458 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, in cfq_group_served() argument
1463 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) in cfq_group_served()
1464 - cfqg->service_tree_idle.count; in cfq_group_served()
1481 vfr = cfqg->vfraction; in cfq_group_served()
1482 cfq_group_service_tree_del(st, cfqg); in cfq_group_served()
1483 cfqg->vdisktime += cfqg_scale_charge(charge, vfr); in cfq_group_served()
1484 cfq_group_service_tree_add(st, cfqg); in cfq_group_served()
1488 cfqg->saved_wl_slice = cfqd->workload_expires in cfq_group_served()
1490 cfqg->saved_wl_type = cfqd->serving_wl_type; in cfq_group_served()
1491 cfqg->saved_wl_class = cfqd->serving_wl_class; in cfq_group_served()
1493 cfqg->saved_wl_slice = 0; in cfq_group_served()
1495 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, in cfq_group_served()
1501 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); in cfq_group_served()
1502 cfqg_stats_set_start_empty_time(cfqg); in cfq_group_served()
1512 static void cfq_init_cfqg_base(struct cfq_group *cfqg) in cfq_init_cfqg_base() argument
1517 for_each_cfqg_st(cfqg, i, j, st) in cfq_init_cfqg_base()
1519 RB_CLEAR_NODE(&cfqg->rb_node); in cfq_init_cfqg_base()
1521 cfqg->ttime.last_end_request = jiffies; in cfq_init_cfqg_base()
1614 struct cfq_group *cfqg; in cfq_pd_alloc() local
1616 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node); in cfq_pd_alloc()
1617 if (!cfqg) in cfq_pd_alloc()
1620 cfq_init_cfqg_base(cfqg); in cfq_pd_alloc()
1621 if (cfqg_stats_init(&cfqg->stats, gfp)) { in cfq_pd_alloc()
1622 kfree(cfqg); in cfq_pd_alloc()
1626 return &cfqg->pd; in cfq_pd_alloc()
1631 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfq_pd_init() local
1634 cfqg->weight = cgd->weight; in cfq_pd_init()
1635 cfqg->leaf_weight = cgd->leaf_weight; in cfq_pd_init()
1640 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfq_pd_offline() local
1644 if (cfqg->async_cfqq[0][i]) in cfq_pd_offline()
1645 cfq_put_queue(cfqg->async_cfqq[0][i]); in cfq_pd_offline()
1646 if (cfqg->async_cfqq[1][i]) in cfq_pd_offline()
1647 cfq_put_queue(cfqg->async_cfqq[1][i]); in cfq_pd_offline()
1650 if (cfqg->async_idle_cfqq) in cfq_pd_offline()
1651 cfq_put_queue(cfqg->async_idle_cfqq); in cfq_pd_offline()
1659 cfqg_stats_xfer_dead(cfqg); in cfq_pd_offline()
1664 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfq_pd_free() local
1666 cfqg_stats_exit(&cfqg->stats); in cfq_pd_free()
1667 return kfree(cfqg); in cfq_pd_free()
1672 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfq_pd_reset_stats() local
1674 cfqg_stats_reset(&cfqg->stats); in cfq_pd_reset_stats()
1688 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) in cfq_link_cfqq_cfqg() argument
1690 cfqq->cfqg = cfqg; in cfq_link_cfqq_cfqg()
1692 cfqg_get(cfqg); in cfq_link_cfqq_cfqg()
1698 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfqg_prfill_weight_device() local
1700 if (!cfqg->dev_weight) in cfqg_prfill_weight_device()
1702 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); in cfqg_prfill_weight_device()
1716 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfqg_prfill_leaf_weight_device() local
1718 if (!cfqg->dev_leaf_weight) in cfqg_prfill_leaf_weight_device()
1720 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); in cfqg_prfill_leaf_weight_device()
1765 struct cfq_group *cfqg; in __cfqg_set_weight_device() local
1786 cfqg = blkg_to_cfqg(ctx.blkg); in __cfqg_set_weight_device()
1792 cfqg->dev_weight = v; in __cfqg_set_weight_device()
1793 cfqg->new_weight = v ?: cfqgd->weight; in __cfqg_set_weight_device()
1795 cfqg->dev_leaf_weight = v; in __cfqg_set_weight_device()
1796 cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight; in __cfqg_set_weight_device()
1843 struct cfq_group *cfqg = blkg_to_cfqg(blkg); in __cfq_set_weight() local
1845 if (!cfqg) in __cfq_set_weight()
1850 cfqg->dev_weight = 0; in __cfq_set_weight()
1851 if (!cfqg->dev_weight) in __cfq_set_weight()
1852 cfqg->new_weight = cfqgd->weight; in __cfq_set_weight()
1855 cfqg->dev_leaf_weight = 0; in __cfq_set_weight()
1856 if (!cfqg->dev_leaf_weight) in __cfq_set_weight()
1857 cfqg->new_leaf_weight = cfqgd->leaf_weight; in __cfq_set_weight()
1962 struct cfq_group *cfqg = pd_to_cfqg(pd); in cfqg_prfill_avg_queue_size() local
1963 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); in cfqg_prfill_avg_queue_size()
1967 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); in cfqg_prfill_avg_queue_size()
2188 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { in cfq_link_cfqq_cfqg() argument
2189 cfqq->cfqg = cfqg; in cfq_link_cfqq_cfqg()
2209 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); in cfq_service_tree_add()
2274 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); in cfq_service_tree_add()
2387 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); in cfq_del_cfqq_rr()
2607 cfqg_stats_update_idle_time(cfqq->cfqg); in cfq_del_timer()
2616 cfqg_stats_update_avg_queue_size(cfqq->cfqg); in __cfq_set_active_queue()
2671 cfq_group_served(cfqd, cfqq->cfqg, cfqq); in __cfq_slice_expired()
2717 struct cfq_group *cfqg; in cfq_get_next_queue_forced() local
2725 cfqg = cfq_get_next_cfqg(cfqd); in cfq_get_next_queue_forced()
2726 if (!cfqg) in cfq_get_next_queue_forced()
2729 for_each_cfqg_st(cfqg, i, j, st) in cfq_get_next_queue_forced()
2829 if (cur_cfqq->cfqg->nr_cfqq == 1) in cfq_close_cooperator()
2842 if (cur_cfqq->cfqg != cfqq->cfqg) in cfq_close_cooperator()
2951 if (group_idle && cfqq->cfqg->nr_cfqq > 1) in cfq_arm_slice_timer()
2962 cfqg_stats_set_start_idle_time(cfqq->cfqg); in cfq_arm_slice_timer()
3076 struct cfq_group *cfqg, enum wl_class_t wl_class) in cfq_choose_wl_type() argument
3086 queue = cfq_rb_first(st_for(cfqg, wl_class, i)); in cfq_choose_wl_type()
3099 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) in choose_wl_class_and_type() argument
3108 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
3110 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
3126 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
3137 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, in choose_wl_class_and_type()
3139 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
3147 group_slice = cfq_group_slice(cfqd, cfqg); in choose_wl_class_and_type()
3150 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], in choose_wl_class_and_type()
3152 cfqg)); in choose_wl_class_and_type()
3165 cfqg_busy_async_queues(cfqd, cfqg); in choose_wl_class_and_type()
3184 struct cfq_group *cfqg; in cfq_get_next_cfqg() local
3188 cfqg = cfq_rb_first_group(st); in cfq_get_next_cfqg()
3190 return cfqg; in cfq_get_next_cfqg()
3195 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); in cfq_choose_cfqg() local
3197 cfqd->serving_group = cfqg; in cfq_choose_cfqg()
3200 if (cfqg->saved_wl_slice) { in cfq_choose_cfqg()
3201 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; in cfq_choose_cfqg()
3202 cfqd->serving_wl_type = cfqg->saved_wl_type; in cfq_choose_cfqg()
3203 cfqd->serving_wl_class = cfqg->saved_wl_class; in cfq_choose_cfqg()
3207 choose_wl_class_and_type(cfqd, cfqg); in cfq_choose_cfqg()
3244 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) in cfq_select_queue()
3303 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && in cfq_select_queue()
3304 cfqq->cfqg->dispatched && in cfq_select_queue()
3305 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { in cfq_select_queue()
3549 struct cfq_group *cfqg; in cfq_put_queue() local
3560 cfqg = cfqq->cfqg; in cfq_put_queue()
3569 cfqg_put(cfqg); in cfq_put_queue()
3760 cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio) in cfq_async_queue_prio() argument
3764 return &cfqg->async_cfqq[0][ioprio]; in cfq_async_queue_prio()
3769 return &cfqg->async_cfqq[1][ioprio]; in cfq_async_queue_prio()
3771 return &cfqg->async_idle_cfqq; in cfq_async_queue_prio()
3785 struct cfq_group *cfqg; in cfq_get_queue() local
3788 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio)); in cfq_get_queue()
3789 if (!cfqg) { in cfq_get_queue()
3800 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio); in cfq_get_queue()
3815 cfq_link_cfqq_cfqg(cfqq, cfqg); in cfq_get_queue()
3850 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); in cfq_update_io_thinktime()
3950 if (new_cfqq->cfqg != cfqq->cfqg) in cfq_should_preempt()
4009 cfqq->cfqg->saved_wl_slice = 0; in cfq_preempt_queue()
4061 cfqg_stats_update_idle_time(cfqq->cfqg); in cfq_rq_enqueued()
4139 if (cfqq->cfqg->nr_cfqq > 1) in cfq_should_wait_busy()
4143 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) in cfq_should_wait_busy()
4185 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), in cfq_completed_request()
4198 st = st_for(cfqq->cfqg, cfqq_class(cfqq), in cfq_completed_request()
4207 cfqq->cfqg->ttime.last_end_request = now; in cfq_completed_request()
4394 cfqg_get(cfqq->cfqg); in cfq_set_request()
4396 rq->elv.priv[1] = cfqq->cfqg; in cfq_set_request()