blkg              289 block/bfq-cgroup.c static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
blkg              291 block/bfq-cgroup.c 	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
blkg              525 block/bfq-cgroup.c 	struct blkcg_gq *blkg = pd_to_blkg(pd);
blkg              526 block/bfq-cgroup.c 	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
blkg              527 block/bfq-cgroup.c 	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
blkg              529 block/bfq-cgroup.c 	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
blkg              570 block/bfq-cgroup.c 	struct blkcg_gq *blkg;
blkg              572 block/bfq-cgroup.c 	blkg = blkg_lookup(blkcg, bfqd->queue);
blkg              573 block/bfq-cgroup.c 	if (likely(blkg))
blkg              574 block/bfq-cgroup.c 		return blkg_to_bfqg(blkg);
blkg              924 block/bfq-cgroup.c 	struct blkcg_gq *blkg;
blkg              926 block/bfq-cgroup.c 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
blkg              927 block/bfq-cgroup.c 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
blkg             1008 block/bfq-cgroup.c 	struct blkcg_gq *blkg;
blkg             1017 block/bfq-cgroup.c 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
blkg             1018 block/bfq-cgroup.c 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
blkg             1054 block/bfq-cgroup.c 	bfqg = blkg_to_bfqg(ctx.blkg);
blkg             1104 block/bfq-cgroup.c 	struct blkcg_gq *blkg = pd_to_blkg(pd);
blkg             1109 block/bfq-cgroup.c 	lockdep_assert_held(&blkg->q->queue_lock);
blkg             1112 block/bfq-cgroup.c 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
blkg             1154 block/bfq-cgroup.c 	u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
blkg             1171 block/bfq-cgroup.c 	blkg_rwstat_recursive_sum(pd->blkg, NULL,
blkg             2082 block/bio.c    static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
blkg             2086 block/bio.c    	bio->bi_blkg = blkg_tryget_closest(blkg);
blkg             2102 block/bio.c    	struct blkcg_gq *blkg;
blkg             2107 block/bio.c    		blkg = q->root_blkg;
blkg             2109 block/bio.c    		blkg = blkg_lookup_create(css_to_blkcg(css), q);
blkg             2111 block/bio.c    	__bio_associate_blkg(bio, blkg);
blkg               72 block/blk-cgroup.c static void blkg_free(struct blkcg_gq *blkg)
blkg               76 block/blk-cgroup.c 	if (!blkg)
blkg               80 block/blk-cgroup.c 		if (blkg->pd[i])
blkg               81 block/blk-cgroup.c 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
blkg               83 block/blk-cgroup.c 	blkg_rwstat_exit(&blkg->stat_ios);
blkg               84 block/blk-cgroup.c 	blkg_rwstat_exit(&blkg->stat_bytes);
blkg               85 block/blk-cgroup.c 	percpu_ref_exit(&blkg->refcnt);
blkg               86 block/blk-cgroup.c 	kfree(blkg);
blkg               91 block/blk-cgroup.c 	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
blkg               93 block/blk-cgroup.c 	WARN_ON(!bio_list_empty(&blkg->async_bios));
blkg               96 block/blk-cgroup.c 	css_put(&blkg->blkcg->css);
blkg               97 block/blk-cgroup.c 	if (blkg->parent)
blkg               98 block/blk-cgroup.c 		blkg_put(blkg->parent);
blkg              100 block/blk-cgroup.c 	wb_congested_put(blkg->wb_congested);
blkg              102 block/blk-cgroup.c 	blkg_free(blkg);
blkg              115 block/blk-cgroup.c 	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
blkg              117 block/blk-cgroup.c 	call_rcu(&blkg->rcu_head, __blkg_release);
blkg              122 block/blk-cgroup.c 	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
blkg              128 block/blk-cgroup.c 	spin_lock_bh(&blkg->async_bio_lock);
blkg              129 block/blk-cgroup.c 	bio_list_merge(&bios, &blkg->async_bios);
blkg              130 block/blk-cgroup.c 	bio_list_init(&blkg->async_bios);
blkg              131 block/blk-cgroup.c 	spin_unlock_bh(&blkg->async_bio_lock);
blkg              148 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              152 block/blk-cgroup.c 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
blkg              153 block/blk-cgroup.c 	if (!blkg)
blkg              156 block/blk-cgroup.c 	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
blkg              159 block/blk-cgroup.c 	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
blkg              160 block/blk-cgroup.c 	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
blkg              163 block/blk-cgroup.c 	blkg->q = q;
blkg              164 block/blk-cgroup.c 	INIT_LIST_HEAD(&blkg->q_node);
blkg              165 block/blk-cgroup.c 	spin_lock_init(&blkg->async_bio_lock);
blkg              166 block/blk-cgroup.c 	bio_list_init(&blkg->async_bios);
blkg              167 block/blk-cgroup.c 	INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
blkg              168 block/blk-cgroup.c 	blkg->blkcg = blkcg;
blkg              182 block/blk-cgroup.c 		blkg->pd[i] = pd;
blkg              183 block/blk-cgroup.c 		pd->blkg = blkg;
blkg              187 block/blk-cgroup.c 	return blkg;
blkg              190 block/blk-cgroup.c 	blkg_free(blkg);
blkg              197 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              205 block/blk-cgroup.c 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
blkg              206 block/blk-cgroup.c 	if (blkg && blkg->q == q) {
blkg              209 block/blk-cgroup.c 			rcu_assign_pointer(blkcg->blkg_hint, blkg);
blkg              211 block/blk-cgroup.c 		return blkg;
blkg              226 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              261 block/blk-cgroup.c 	blkg = new_blkg;
blkg              262 block/blk-cgroup.c 	blkg->wb_congested = wb_congested;
blkg              266 block/blk-cgroup.c 		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
blkg              267 block/blk-cgroup.c 		if (WARN_ON_ONCE(!blkg->parent)) {
blkg              271 block/blk-cgroup.c 		blkg_get(blkg->parent);
blkg              278 block/blk-cgroup.c 		if (blkg->pd[i] && pol->pd_init_fn)
blkg              279 block/blk-cgroup.c 			pol->pd_init_fn(blkg->pd[i]);
blkg              284 block/blk-cgroup.c 	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
blkg              286 block/blk-cgroup.c 		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
blkg              287 block/blk-cgroup.c 		list_add(&blkg->q_node, &q->blkg_list);
blkg              292 block/blk-cgroup.c 			if (blkg->pd[i] && pol->pd_online_fn)
blkg              293 block/blk-cgroup.c 				pol->pd_online_fn(blkg->pd[i]);
blkg              296 block/blk-cgroup.c 	blkg->online = true;
blkg              300 block/blk-cgroup.c 		return blkg;
blkg              303 block/blk-cgroup.c 	blkg_put(blkg);
blkg              331 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              336 block/blk-cgroup.c 	blkg = __blkg_lookup(blkcg, q, true);
blkg              337 block/blk-cgroup.c 	if (blkg)
blkg              338 block/blk-cgroup.c 		return blkg;
blkg              351 block/blk-cgroup.c 			blkg = __blkg_lookup(parent, q, false);
blkg              352 block/blk-cgroup.c 			if (blkg) {
blkg              354 block/blk-cgroup.c 				ret_blkg = blkg;
blkg              361 block/blk-cgroup.c 		blkg = blkg_create(pos, q, NULL);
blkg              362 block/blk-cgroup.c 		if (IS_ERR(blkg))
blkg              365 block/blk-cgroup.c 			return blkg;
blkg              380 block/blk-cgroup.c 	struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
blkg              382 block/blk-cgroup.c 	if (unlikely(!blkg)) {
blkg              386 block/blk-cgroup.c 		blkg = __blkg_lookup_create(blkcg, q);
blkg              390 block/blk-cgroup.c 	return blkg;
blkg              393 block/blk-cgroup.c static void blkg_destroy(struct blkcg_gq *blkg)
blkg              395 block/blk-cgroup.c 	struct blkcg *blkcg = blkg->blkcg;
blkg              396 block/blk-cgroup.c 	struct blkcg_gq *parent = blkg->parent;
blkg              399 block/blk-cgroup.c 	lockdep_assert_held(&blkg->q->queue_lock);
blkg              403 block/blk-cgroup.c 	WARN_ON_ONCE(list_empty(&blkg->q_node));
blkg              404 block/blk-cgroup.c 	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
blkg              409 block/blk-cgroup.c 		if (blkg->pd[i] && pol->pd_offline_fn)
blkg              410 block/blk-cgroup.c 			pol->pd_offline_fn(blkg->pd[i]);
blkg              414 block/blk-cgroup.c 		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
blkg              415 block/blk-cgroup.c 		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
blkg              418 block/blk-cgroup.c 	blkg->online = false;
blkg              420 block/blk-cgroup.c 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
blkg              421 block/blk-cgroup.c 	list_del_init(&blkg->q_node);
blkg              422 block/blk-cgroup.c 	hlist_del_init_rcu(&blkg->blkcg_node);
blkg              429 block/blk-cgroup.c 	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
blkg              436 block/blk-cgroup.c 	percpu_ref_kill(&blkg->refcnt);
blkg              447 block/blk-cgroup.c 	struct blkcg_gq *blkg, *n;
blkg              450 block/blk-cgroup.c 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
blkg              451 block/blk-cgroup.c 		struct blkcg *blkcg = blkg->blkcg;
blkg              454 block/blk-cgroup.c 		blkg_destroy(blkg);
blkg              466 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              477 block/blk-cgroup.c 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
blkg              478 block/blk-cgroup.c 		blkg_rwstat_reset(&blkg->stat_bytes);
blkg              479 block/blk-cgroup.c 		blkg_rwstat_reset(&blkg->stat_ios);
blkg              484 block/blk-cgroup.c 			if (blkg->pd[i] && pol->pd_reset_stats_fn)
blkg              485 block/blk-cgroup.c 				pol->pd_reset_stats_fn(blkg->pd[i]);
blkg              494 block/blk-cgroup.c const char *blkg_dev_name(struct blkcg_gq *blkg)
blkg              497 block/blk-cgroup.c 	if (blkg->q->backing_dev_info->dev)
blkg              498 block/blk-cgroup.c 		return dev_name(blkg->q->backing_dev_info->dev);
blkg              526 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              530 block/blk-cgroup.c 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
blkg              531 block/blk-cgroup.c 		spin_lock_irq(&blkg->q->queue_lock);
blkg              532 block/blk-cgroup.c 		if (blkcg_policy_enabled(blkg->q, pol))
blkg              533 block/blk-cgroup.c 			total += prfill(sf, blkg->pd[pol->plid], data);
blkg              534 block/blk-cgroup.c 		spin_unlock_irq(&blkg->q->queue_lock);
blkg              553 block/blk-cgroup.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg              581 block/blk-cgroup.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg              623 block/blk-cgroup.c 	blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
blkg              667 block/blk-cgroup.c 	blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
blkg              715 block/blk-cgroup.c void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
blkg              722 block/blk-cgroup.c 	lockdep_assert_held(&blkg->q->queue_lock);
blkg              725 block/blk-cgroup.c 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
blkg              812 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              824 block/blk-cgroup.c 	blkg = blkg_lookup_check(blkcg, pol, q);
blkg              825 block/blk-cgroup.c 	if (IS_ERR(blkg)) {
blkg              826 block/blk-cgroup.c 		ret = PTR_ERR(blkg);
blkg              830 block/blk-cgroup.c 	if (blkg)
blkg              861 block/blk-cgroup.c 		blkg = blkg_lookup_check(pos, pol, q);
blkg              862 block/blk-cgroup.c 		if (IS_ERR(blkg)) {
blkg              863 block/blk-cgroup.c 			ret = PTR_ERR(blkg);
blkg              867 block/blk-cgroup.c 		if (blkg) {
blkg              870 block/blk-cgroup.c 			blkg = blkg_create(pos, q, new_blkg);
blkg              871 block/blk-cgroup.c 			if (IS_ERR(blkg)) {
blkg              872 block/blk-cgroup.c 				ret = PTR_ERR(blkg);
blkg              882 block/blk-cgroup.c 	ctx->blkg = blkg;
blkg              924 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg              928 block/blk-cgroup.c 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
blkg              937 block/blk-cgroup.c 		spin_lock_irq(&blkg->q->queue_lock);
blkg              939 block/blk-cgroup.c 		if (!blkg->online)
blkg              942 block/blk-cgroup.c 		dname = blkg_dev_name(blkg);
blkg              954 block/blk-cgroup.c 		blkg_rwstat_recursive_sum(blkg, NULL,
blkg              960 block/blk-cgroup.c 		blkg_rwstat_recursive_sum(blkg, NULL,
blkg              974 block/blk-cgroup.c 		if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
blkg              978 block/blk-cgroup.c 					 atomic_read(&blkg->use_delay),
blkg              979 block/blk-cgroup.c 					(unsigned long long)atomic64_read(&blkg->delay_nsec));
blkg              986 block/blk-cgroup.c 			if (!blkg->pd[i] || !pol->pd_stat_fn)
blkg              989 block/blk-cgroup.c 			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
blkg             1004 block/blk-cgroup.c 		spin_unlock_irq(&blkg->q->queue_lock);
blkg             1084 block/blk-cgroup.c 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
blkg             1086 block/blk-cgroup.c 		struct request_queue *q = blkg->q;
blkg             1089 block/blk-cgroup.c 			blkg_destroy(blkg);
blkg             1199 block/blk-cgroup.c 	struct blkcg_gq *new_blkg, *blkg;
blkg             1212 block/blk-cgroup.c 	blkg = blkg_create(&blkcg_root, q, new_blkg);
blkg             1213 block/blk-cgroup.c 	if (IS_ERR(blkg))
blkg             1215 block/blk-cgroup.c 	q->root_blkg = blkg;
blkg             1239 block/blk-cgroup.c 	return PTR_ERR(blkg);
blkg             1368 block/blk-cgroup.c 	struct blkcg_gq *blkg, *pinned_blkg = NULL;
blkg             1380 block/blk-cgroup.c 	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
blkg             1383 block/blk-cgroup.c 		if (blkg->pd[pol->plid])
blkg             1387 block/blk-cgroup.c 		if (blkg == pinned_blkg) {
blkg             1392 block/blk-cgroup.c 					      blkg->blkcg);
blkg             1402 block/blk-cgroup.c 			blkg_get(blkg);
blkg             1403 block/blk-cgroup.c 			pinned_blkg = blkg;
blkg             1410 block/blk-cgroup.c 						       blkg->blkcg);
blkg             1417 block/blk-cgroup.c 		blkg->pd[pol->plid] = pd;
blkg             1418 block/blk-cgroup.c 		pd->blkg = blkg;
blkg             1424 block/blk-cgroup.c 		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
blkg             1425 block/blk-cgroup.c 			pol->pd_init_fn(blkg->pd[pol->plid]);
blkg             1443 block/blk-cgroup.c 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
blkg             1444 block/blk-cgroup.c 		if (blkg->pd[pol->plid]) {
blkg             1445 block/blk-cgroup.c 			pol->pd_free_fn(blkg->pd[pol->plid]);
blkg             1446 block/blk-cgroup.c 			blkg->pd[pol->plid] = NULL;
blkg             1466 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg             1478 block/blk-cgroup.c 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
blkg             1479 block/blk-cgroup.c 		if (blkg->pd[pol->plid]) {
blkg             1481 block/blk-cgroup.c 				pol->pd_offline_fn(blkg->pd[pol->plid]);
blkg             1482 block/blk-cgroup.c 			pol->pd_free_fn(blkg->pd[pol->plid]);
blkg             1483 block/blk-cgroup.c 			blkg->pd[pol->plid] = NULL;
blkg             1616 block/blk-cgroup.c 	struct blkcg_gq *blkg = bio->bi_blkg;
blkg             1622 block/blk-cgroup.c 	if (!blkg->parent)
blkg             1625 block/blk-cgroup.c 	spin_lock_bh(&blkg->async_bio_lock);
blkg             1626 block/blk-cgroup.c 	bio_list_add(&blkg->async_bios, bio);
blkg             1627 block/blk-cgroup.c 	spin_unlock_bh(&blkg->async_bio_lock);
blkg             1629 block/blk-cgroup.c 	queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
blkg             1639 block/blk-cgroup.c static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
blkg             1641 block/blk-cgroup.c 	u64 old = atomic64_read(&blkg->delay_start);
blkg             1657 block/blk-cgroup.c 	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
blkg             1658 block/blk-cgroup.c 		u64 cur = atomic64_read(&blkg->delay_nsec);
blkg             1659 block/blk-cgroup.c 		u64 sub = min_t(u64, blkg->last_delay, now - old);
blkg             1660 block/blk-cgroup.c 		int cur_use = atomic_read(&blkg->use_delay);
blkg             1666 block/blk-cgroup.c 		if (cur_use < blkg->last_use)
blkg             1667 block/blk-cgroup.c 			sub = max_t(u64, sub, blkg->last_delay >> 1);
blkg             1676 block/blk-cgroup.c 			atomic64_set(&blkg->delay_nsec, 0);
blkg             1677 block/blk-cgroup.c 			blkg->last_delay = 0;
blkg             1679 block/blk-cgroup.c 			atomic64_sub(sub, &blkg->delay_nsec);
blkg             1680 block/blk-cgroup.c 			blkg->last_delay = cur - sub;
blkg             1682 block/blk-cgroup.c 		blkg->last_use = cur_use;
blkg             1692 block/blk-cgroup.c static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
blkg             1700 block/blk-cgroup.c 	while (blkg->parent) {
blkg             1701 block/blk-cgroup.c 		if (atomic_read(&blkg->use_delay)) {
blkg             1702 block/blk-cgroup.c 			blkcg_scale_delay(blkg, now);
blkg             1704 block/blk-cgroup.c 					   atomic64_read(&blkg->delay_nsec));
blkg             1706 block/blk-cgroup.c 		blkg = blkg->parent;
blkg             1752 block/blk-cgroup.c 	struct blkcg_gq *blkg;
blkg             1770 block/blk-cgroup.c 	blkg = blkg_lookup(blkcg, q);
blkg             1771 block/blk-cgroup.c 	if (!blkg)
blkg             1773 block/blk-cgroup.c 	if (!blkg_tryget(blkg))
blkg             1777 block/blk-cgroup.c 	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
blkg             1778 block/blk-cgroup.c 	blkg_put(blkg);
blkg             1828 block/blk-cgroup.c void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
blkg             1830 block/blk-cgroup.c 	blkcg_scale_delay(blkg, now);
blkg             1831 block/blk-cgroup.c 	atomic64_add(delta, &blkg->delay_nsec);
blkg              639 block/blk-iocost.c static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
blkg              641 block/blk-iocost.c 	return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
blkg             1014 block/blk-iocost.c 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
blkg             1015 block/blk-iocost.c 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
blkg             1215 block/blk-iocost.c 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
blkg             1234 block/blk-iocost.c 		blkcg_clear_delay(blkg);
blkg             1237 block/blk-iocost.c 	if (!atomic_read(&blkg->use_delay) &&
blkg             1245 block/blk-iocost.c 		blkcg_add_delay(blkg, now->now_ns, cost_ns);
blkg             1247 block/blk-iocost.c 	blkcg_use_delay(blkg);
blkg             1686 block/blk-iocost.c 	struct blkcg_gq *blkg = bio->bi_blkg;
blkg             1688 block/blk-iocost.c 	struct ioc_gq *iocg = blkg_to_iocg(blkg);
blkg             2019 block/blk-iocost.c 	struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
blkg             2020 block/blk-iocost.c 	struct ioc *ioc = q_to_ioc(blkg->q);
blkg             2041 block/blk-iocost.c 	iocg->level = blkg->blkcg->css.cgroup->level;
blkg             2043 block/blk-iocost.c 	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
blkg             2075 block/blk-iocost.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg             2106 block/blk-iocost.c 		struct blkcg_gq *blkg;
blkg             2116 block/blk-iocost.c 		hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
blkg             2117 block/blk-iocost.c 			struct ioc_gq *iocg = blkg_to_iocg(blkg);
blkg             2134 block/blk-iocost.c 	iocg = blkg_to_iocg(ctx.blkg);
blkg             2161 block/blk-iocost.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg             2332 block/blk-iocost.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg              182 block/blk-iolatency.c static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
blkg              184 block/blk-iolatency.c 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
blkg              463 block/blk-iolatency.c 	struct blkcg_gq *blkg = bio->bi_blkg;
blkg              469 block/blk-iolatency.c 	while (blkg && blkg->parent) {
blkg              470 block/blk-iolatency.c 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
blkg              472 block/blk-iolatency.c 			blkg = blkg->parent;
blkg              479 block/blk-iolatency.c 		blkg = blkg->parent;
blkg              522 block/blk-iolatency.c 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
blkg              539 block/blk-iolatency.c 	parent = blkg_to_lat(blkg->parent);
blkg              590 block/blk-iolatency.c 	struct blkcg_gq *blkg;
blkg              599 block/blk-iolatency.c 	blkg = bio->bi_blkg;
blkg              600 block/blk-iolatency.c 	if (!blkg || !bio_flagged(bio, BIO_TRACKED))
blkg              611 block/blk-iolatency.c 	while (blkg && blkg->parent) {
blkg              612 block/blk-iolatency.c 		iolat = blkg_to_lat(blkg);
blkg              614 block/blk-iolatency.c 			blkg = blkg->parent;
blkg              637 block/blk-iolatency.c 		blkg = blkg->parent;
blkg              659 block/blk-iolatency.c 	struct blkcg_gq *blkg;
blkg              664 block/blk-iolatency.c 	blkg_for_each_descendant_pre(blkg, pos_css,
blkg              675 block/blk-iolatency.c 		if (!blkg_tryget(blkg))
blkg              678 block/blk-iolatency.c 		iolat = blkg_to_lat(blkg);
blkg              712 block/blk-iolatency.c 		blkg_put(blkg);
blkg              750 block/blk-iolatency.c static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
blkg              752 block/blk-iolatency.c 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
blkg              763 block/blk-iolatency.c 		blkcg_clear_delay(blkg);
blkg              769 block/blk-iolatency.c static void iolatency_clear_scaling(struct blkcg_gq *blkg)
blkg              771 block/blk-iolatency.c 	if (blkg->parent) {
blkg              772 block/blk-iolatency.c 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
blkg              791 block/blk-iolatency.c 	struct blkcg_gq *blkg;
blkg              804 block/blk-iolatency.c 	iolat = blkg_to_lat(ctx.blkg);
blkg              830 block/blk-iolatency.c 	blkg = ctx.blkg;
blkg              833 block/blk-iolatency.c 	enable = iolatency_set_min_lat_nsec(blkg, lat_val);
blkg              835 block/blk-iolatency.c 		WARN_ON_ONCE(!blk_get_queue(blkg->q));
blkg              836 block/blk-iolatency.c 		blkg_get(blkg);
blkg              840 block/blk-iolatency.c 		iolatency_clear_scaling(blkg);
blkg              847 block/blk-iolatency.c 		struct iolatency_grp *tmp = blkg_to_lat(blkg);
blkg              850 block/blk-iolatency.c 		blk_mq_freeze_queue(blkg->q);
blkg              859 block/blk-iolatency.c 		blk_mq_unfreeze_queue(blkg->q);
blkg              861 block/blk-iolatency.c 		blkg_put(blkg);
blkg              862 block/blk-iolatency.c 		blk_put_queue(blkg->q);
blkg              871 block/blk-iolatency.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg              958 block/blk-iolatency.c 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
blkg              959 block/blk-iolatency.c 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
blkg              964 block/blk-iolatency.c 	if (blk_queue_nonrot(blkg->q))
blkg              978 block/blk-iolatency.c 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
blkg              989 block/blk-iolatency.c 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
blkg              990 block/blk-iolatency.c 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
blkg             1003 block/blk-iolatency.c 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
blkg             1007 block/blk-iolatency.c 	ret = iolatency_set_min_lat_nsec(blkg, 0);
blkg             1012 block/blk-iolatency.c 	iolatency_clear_scaling(blkg);
blkg              232 block/blk-throttle.c static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
blkg              234 block/blk-throttle.c 	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
blkg              294 block/blk-throttle.c 	struct blkcg_gq *blkg = tg_to_blkg(tg);
blkg              298 block/blk-throttle.c 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
blkg              305 block/blk-throttle.c 		if (!list_empty(&blkg->blkcg->css.children) ||
blkg              324 block/blk-throttle.c 	struct blkcg_gq *blkg = tg_to_blkg(tg);
blkg              328 block/blk-throttle.c 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
blkg              335 block/blk-throttle.c 		if (!list_empty(&blkg->blkcg->css.children) ||
blkg              521 block/blk-throttle.c 	struct blkcg_gq *blkg = tg_to_blkg(tg);
blkg              522 block/blk-throttle.c 	struct throtl_data *td = blkg->q->td;
blkg              539 block/blk-throttle.c 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
blkg              540 block/blk-throttle.c 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
blkg              575 block/blk-throttle.c 	struct blkcg_gq *blkg;
blkg              579 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
blkg              580 block/blk-throttle.c 		struct throtl_grp *tg = blkg_to_tg(blkg);
blkg             1370 block/blk-throttle.c 	struct blkcg_gq *blkg;
blkg             1384 block/blk-throttle.c 	blkg_for_each_descendant_pre(blkg, pos_css,
blkg             1386 block/blk-throttle.c 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
blkg             1391 block/blk-throttle.c 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
blkg             1392 block/blk-throttle.c 		    !blkg->parent->parent)
blkg             1394 block/blk-throttle.c 		parent_tg = blkg_to_tg(blkg->parent);
blkg             1441 block/blk-throttle.c 	tg = blkg_to_tg(ctx.blkg);
blkg             1519 block/blk-throttle.c 	const char *dname = blkg_dev_name(pd->blkg);
blkg             1601 block/blk-throttle.c 	tg = blkg_to_tg(ctx.blkg);
blkg             1847 block/blk-throttle.c 	struct blkcg_gq *blkg;
blkg             1856 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
blkg             1857 block/blk-throttle.c 		struct throtl_grp *tg = blkg_to_tg(blkg);
blkg             1895 block/blk-throttle.c 	struct blkcg_gq *blkg;
blkg             1902 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
blkg             1903 block/blk-throttle.c 		struct throtl_grp *tg = blkg_to_tg(blkg);
blkg             2117 block/blk-throttle.c bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
blkg             2121 block/blk-throttle.c 	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
blkg             2257 block/blk-throttle.c 	struct blkcg_gq *blkg;
blkg             2265 block/blk-throttle.c 	blkg = bio->bi_blkg;
blkg             2266 block/blk-throttle.c 	if (!blkg)
blkg             2268 block/blk-throttle.c 	tg = blkg_to_tg(blkg);
blkg             2340 block/blk-throttle.c 	struct blkcg_gq *blkg;
blkg             2353 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
blkg             2354 block/blk-throttle.c 		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
blkg               90 include/linux/blk-cgroup.h 	struct blkcg_gq			*blkg;
blkg              212 include/linux/blk-cgroup.h const char *blkg_dev_name(struct blkcg_gq *blkg);
blkg              228 include/linux/blk-cgroup.h void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
blkg              233 include/linux/blk-cgroup.h 	struct blkcg_gq			*blkg;
blkg              362 include/linux/blk-cgroup.h 	struct blkcg_gq *blkg;
blkg              367 include/linux/blk-cgroup.h 	blkg = rcu_dereference(blkcg->blkg_hint);
blkg              368 include/linux/blk-cgroup.h 	if (blkg && blkg->q == q)
blkg              369 include/linux/blk-cgroup.h 		return blkg;
blkg              407 include/linux/blk-cgroup.h static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
blkg              410 include/linux/blk-cgroup.h 	return blkg ? blkg->pd[pol->plid] : NULL;
blkg              427 include/linux/blk-cgroup.h 	return pd ? pd->blkg : NULL;
blkg              486 include/linux/blk-cgroup.h static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
blkg              488 include/linux/blk-cgroup.h 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
blkg              497 include/linux/blk-cgroup.h static inline void blkg_get(struct blkcg_gq *blkg)
blkg              499 include/linux/blk-cgroup.h 	percpu_ref_get(&blkg->refcnt);
blkg              509 include/linux/blk-cgroup.h static inline bool blkg_tryget(struct blkcg_gq *blkg)
blkg              511 include/linux/blk-cgroup.h 	return blkg && percpu_ref_tryget(&blkg->refcnt);
blkg              523 include/linux/blk-cgroup.h static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
blkg              529 include/linux/blk-cgroup.h 	while (blkg) {
blkg              530 include/linux/blk-cgroup.h 		if (blkg_tryget(blkg)) {
blkg              531 include/linux/blk-cgroup.h 			ret_blkg = blkg;
blkg              534 include/linux/blk-cgroup.h 		blkg = blkg->parent;
blkg              544 include/linux/blk-cgroup.h static inline void blkg_put(struct blkcg_gq *blkg)
blkg              546 include/linux/blk-cgroup.h 	percpu_ref_put(&blkg->refcnt);
blkg              704 include/linux/blk-cgroup.h extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
blkg              707 include/linux/blk-cgroup.h static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
blkg              729 include/linux/blk-cgroup.h 	struct blkcg_gq *blkg;
blkg              743 include/linux/blk-cgroup.h 	blkg = bio->bi_blkg;
blkg              745 include/linux/blk-cgroup.h 	throtl = blk_throtl_bio(q, blkg, bio);
blkg              754 include/linux/blk-cgroup.h 			blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
blkg              756 include/linux/blk-cgroup.h 		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
blkg              765 include/linux/blk-cgroup.h static inline void blkcg_use_delay(struct blkcg_gq *blkg)
blkg              767 include/linux/blk-cgroup.h 	if (atomic_add_return(1, &blkg->use_delay) == 1)
blkg              768 include/linux/blk-cgroup.h 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
blkg              771 include/linux/blk-cgroup.h static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
blkg              773 include/linux/blk-cgroup.h 	int old = atomic_read(&blkg->use_delay);
blkg              786 include/linux/blk-cgroup.h 		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
blkg              795 include/linux/blk-cgroup.h 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
blkg              799 include/linux/blk-cgroup.h static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
blkg              801 include/linux/blk-cgroup.h 	int old = atomic_read(&blkg->use_delay);
blkg              806 include/linux/blk-cgroup.h 		int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
blkg              808 include/linux/blk-cgroup.h 			atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
blkg              815 include/linux/blk-cgroup.h void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
blkg              860 include/linux/blk-cgroup.h static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
blkg              863 include/linux/blk-cgroup.h static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg              864 include/linux/blk-cgroup.h static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg              865 include/linux/blk-cgroup.h static inline void blkg_put(struct blkcg_gq *blkg) { }