Lines Matching refs:q
52 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument
55 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
91 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
106 blkg->q = q; in blkg_alloc()
113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc()
122 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
126 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc()
143 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument
153 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
154 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
156 lockdep_assert_held(q->queue_lock); in blkg_lookup_slowpath()
171 struct request_queue *q, in blkg_create() argument
179 lockdep_assert_held(q->queue_lock); in blkg_create()
187 wb_congested = wb_congested_get_create(&q->backing_dev_info, in blkg_create()
196 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT); in blkg_create()
207 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
225 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
228 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
271 struct request_queue *q) in blkg_lookup_create() argument
276 lockdep_assert_held(q->queue_lock); in blkg_lookup_create()
282 if (unlikely(blk_queue_bypass(q))) in blkg_lookup_create()
283 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); in blkg_lookup_create()
285 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
297 while (parent && !__blkg_lookup(parent, q, false)) { in blkg_lookup_create()
302 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
314 lockdep_assert_held(blkg->q->queue_lock); in blkg_destroy()
335 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
360 static void blkg_destroy_all(struct request_queue *q) in blkg_destroy_all() argument
364 lockdep_assert_held(q->queue_lock); in blkg_destroy_all()
366 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
374 q->root_blkg = NULL; in blkg_destroy_all()
375 q->root_rl.blkg = NULL; in blkg_destroy_all()
406 struct request_queue *q) in __blk_queue_next_rl() argument
415 if (rl == &q->root_rl) { in __blk_queue_next_rl()
416 ent = &q->blkg_list; in __blk_queue_next_rl()
427 if (ent == &q->root_blkg->q_node) in __blk_queue_next_rl()
429 if (ent == &q->blkg_list) in __blk_queue_next_rl()
471 if (blkg->q->backing_dev_info.dev) in blkg_dev_name()
472 return dev_name(blkg->q->backing_dev_info.dev); in blkg_dev_name()
506 spin_lock_irq(blkg->q->queue_lock); in blkcg_print_blkgs()
507 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
509 spin_unlock_irq(blkg->q->queue_lock); in blkcg_print_blkgs()
705 lockdep_assert_held(blkg->q->queue_lock); in blkg_stat_recursive_sum()
748 lockdep_assert_held(blkg->q->queue_lock); in blkg_rwstat_recursive_sum()
876 spin_lock_irq(blkg->q->queue_lock); in blkcg_print_stat()
888 spin_unlock_irq(blkg->q->queue_lock); in blkcg_print_stat()
936 struct request_queue *q = blkg->q; in blkcg_css_offline() local
938 if (spin_trylock(q->queue_lock)) { in blkcg_css_offline()
940 spin_unlock(q->queue_lock); in blkcg_css_offline()
1046 int blkcg_init_queue(struct request_queue *q) in blkcg_init_queue() argument
1052 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); in blkcg_init_queue()
1064 spin_lock_irq(q->queue_lock); in blkcg_init_queue()
1065 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_init_queue()
1066 spin_unlock_irq(q->queue_lock); in blkcg_init_queue()
1077 q->root_blkg = blkg; in blkcg_init_queue()
1078 q->root_rl.blkg = blkg; in blkcg_init_queue()
1080 ret = blk_throtl_init(q); in blkcg_init_queue()
1082 spin_lock_irq(q->queue_lock); in blkcg_init_queue()
1083 blkg_destroy_all(q); in blkcg_init_queue()
1084 spin_unlock_irq(q->queue_lock); in blkcg_init_queue()
1095 void blkcg_drain_queue(struct request_queue *q) in blkcg_drain_queue() argument
1097 lockdep_assert_held(q->queue_lock); in blkcg_drain_queue()
1103 if (!q->root_blkg) in blkcg_drain_queue()
1106 blk_throtl_drain(q); in blkcg_drain_queue()
1115 void blkcg_exit_queue(struct request_queue *q) in blkcg_exit_queue() argument
1117 spin_lock_irq(q->queue_lock); in blkcg_exit_queue()
1118 blkg_destroy_all(q); in blkcg_exit_queue()
1119 spin_unlock_irq(q->queue_lock); in blkcg_exit_queue()
1121 blk_throtl_exit(q); in blkcg_exit_queue()
1206 int blkcg_activate_policy(struct request_queue *q, in blkcg_activate_policy() argument
1213 if (blkcg_policy_enabled(q, pol)) in blkcg_activate_policy()
1216 blk_queue_bypass_start(q); in blkcg_activate_policy()
1219 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); in blkcg_activate_policy()
1226 spin_lock_irq(q->queue_lock); in blkcg_activate_policy()
1228 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1234 pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node); in blkcg_activate_policy()
1238 spin_unlock_irq(q->queue_lock); in blkcg_activate_policy()
1249 __set_bit(pol->plid, q->blkcg_pols); in blkcg_activate_policy()
1252 spin_unlock_irq(q->queue_lock); in blkcg_activate_policy()
1254 blk_queue_bypass_end(q); in blkcg_activate_policy()
1269 void blkcg_deactivate_policy(struct request_queue *q, in blkcg_deactivate_policy() argument
1274 if (!blkcg_policy_enabled(q, pol)) in blkcg_deactivate_policy()
1277 blk_queue_bypass_start(q); in blkcg_deactivate_policy()
1278 spin_lock_irq(q->queue_lock); in blkcg_deactivate_policy()
1280 __clear_bit(pol->plid, q->blkcg_pols); in blkcg_deactivate_policy()
1282 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1296 spin_unlock_irq(q->queue_lock); in blkcg_deactivate_policy()
1297 blk_queue_bypass_end(q); in blkcg_deactivate_policy()