root/block/bfq-cgroup.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. bfq_stat_init
  2. bfq_stat_exit
  3. bfq_stat_add
  4. bfq_stat_read
  5. bfq_stat_reset
  6. bfq_stat_add_aux
  7. blkg_prfill_stat
  8. BFQG_FLAG_FNS
  9. bfqg_stats_set_start_group_wait_time
  10. bfqg_stats_end_empty_time
  11. bfqg_stats_update_dequeue
  12. bfqg_stats_set_start_empty_time
  13. bfqg_stats_update_idle_time
  14. bfqg_stats_set_start_idle_time
  15. bfqg_stats_update_avg_queue_size
  16. bfqg_stats_update_io_add
  17. bfqg_stats_update_io_remove
  18. bfqg_stats_update_io_merged
  19. bfqg_stats_update_completion
  20. bfqg_stats_update_io_add
  21. bfqg_stats_update_io_remove
  22. bfqg_stats_update_io_merged
  23. bfqg_stats_update_completion
  24. bfqg_stats_update_dequeue
  25. bfqg_stats_set_start_empty_time
  26. bfqg_stats_update_idle_time
  27. bfqg_stats_set_start_idle_time
  28. bfqg_stats_update_avg_queue_size
  29. pd_to_bfqg
  30. bfqg_to_blkg
  31. blkg_to_bfqg
  32. bfqg_parent
  33. bfqq_group
  34. bfqg_get
  35. bfqg_put
  36. bfqg_and_blkg_get
  37. bfqg_and_blkg_put
  38. bfqg_stats_reset
  39. bfqg_stats_add_aux
  40. bfqg_stats_xfer_dead
  41. bfq_init_entity
  42. bfqg_stats_exit
  43. bfqg_stats_init
  44. cpd_to_bfqgd
  45. blkcg_to_bfqgd
  46. bfq_cpd_alloc
  47. bfq_cpd_init
  48. bfq_cpd_free
  49. bfq_pd_alloc
  50. bfq_pd_init
  51. bfq_pd_free
  52. bfq_pd_reset_stats
  53. bfq_group_set_parent
  54. bfq_lookup_bfqg
  55. bfq_find_set_group
  56. bfq_bfqq_move
  57. __bfq_bic_change_cgroup
  58. bfq_bic_update_cgroup
  59. bfq_flush_idle_tree
  60. bfq_reparent_leaf_entity
  61. bfq_reparent_active_queues
  62. bfq_pd_offline
  63. bfq_end_wr_async
  64. bfq_io_show_weight_legacy
  65. bfqg_prfill_weight_device
  66. bfq_io_show_weight
  67. bfq_group_set_weight
  68. bfq_io_set_weight_legacy
  69. bfq_io_set_device_weight
  70. bfq_io_set_weight
  71. bfqg_print_stat
  72. bfqg_print_rwstat
  73. bfqg_prfill_stat_recursive
  74. bfqg_prfill_rwstat_recursive
  75. bfqg_print_stat_recursive
  76. bfqg_print_rwstat_recursive
  77. bfqg_prfill_sectors
  78. bfqg_print_stat_sectors
  79. bfqg_prfill_sectors_recursive
  80. bfqg_print_stat_sectors_recursive
  81. bfqg_prfill_avg_queue_size
  82. bfqg_print_avg_queue_size
  83. bfq_create_group_hierarchy
  84. bfq_bfqq_move
  85. bfq_init_entity
  86. bfq_bic_update_cgroup
  87. bfq_end_wr_async
  88. bfq_find_set_group
  89. bfqq_group
  90. bfqg_and_blkg_get
  91. bfqg_and_blkg_put
  92. bfq_create_group_hierarchy

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * cgroups support for the BFQ I/O scheduler.
   4  */
   5 #include <linux/module.h>
   6 #include <linux/slab.h>
   7 #include <linux/blkdev.h>
   8 #include <linux/cgroup.h>
   9 #include <linux/elevator.h>
  10 #include <linux/ktime.h>
  11 #include <linux/rbtree.h>
  12 #include <linux/ioprio.h>
  13 #include <linux/sbitmap.h>
  14 #include <linux/delay.h>
  15 
  16 #include "bfq-iosched.h"
  17 
  18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
  19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
  20 {
  21         int ret;
  22 
  23         ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
  24         if (ret)
  25                 return ret;
  26 
  27         atomic64_set(&stat->aux_cnt, 0);
  28         return 0;
  29 }
  30 
  31 static void bfq_stat_exit(struct bfq_stat *stat)
  32 {
  33         percpu_counter_destroy(&stat->cpu_cnt);
  34 }
  35 
  36 /**
  37  * bfq_stat_add - add a value to a bfq_stat
  38  * @stat: target bfq_stat
  39  * @val: value to add
  40  *
  41  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
  42  * don't re-enter this function for the same counter.
  43  */
  44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
  45 {
  46         percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
  47 }
  48 
  49 /**
  50  * bfq_stat_read - read the current value of a bfq_stat
  51  * @stat: bfq_stat to read
  52  */
  53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
  54 {
  55         return percpu_counter_sum_positive(&stat->cpu_cnt);
  56 }
  57 
  58 /**
  59  * bfq_stat_reset - reset a bfq_stat
  60  * @stat: bfq_stat to reset
  61  */
  62 static inline void bfq_stat_reset(struct bfq_stat *stat)
  63 {
  64         percpu_counter_set(&stat->cpu_cnt, 0);
  65         atomic64_set(&stat->aux_cnt, 0);
  66 }
  67 
  68 /**
  69  * bfq_stat_add_aux - add a bfq_stat into another's aux count
  70  * @to: the destination bfq_stat
  71  * @from: the source
  72  *
  73  * Add @from's count including the aux one to @to's aux count.
  74  */
  75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
  76                                      struct bfq_stat *from)
  77 {
  78         atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
  79                      &to->aux_cnt);
  80 }
  81 
  82 /**
  83  * blkg_prfill_stat - prfill callback for bfq_stat
  84  * @sf: seq_file to print to
  85  * @pd: policy private data of interest
  86  * @off: offset to the bfq_stat in @pd
  87  *
  88  * prfill callback for printing a bfq_stat.
  89  */
  90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
  91                 int off)
  92 {
  93         return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
  94 }
  95 
  96 /* bfqg stats flags */
  97 enum bfqg_stats_flags {
  98         BFQG_stats_waiting = 0,
  99         BFQG_stats_idling,
 100         BFQG_stats_empty,
 101 };
 102 
 103 #define BFQG_FLAG_FNS(name)                                             \
 104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats)    \
 105 {                                                                       \
 106         stats->flags |= (1 << BFQG_stats_##name);                       \
 107 }                                                                       \
 108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats)   \
 109 {                                                                       \
 110         stats->flags &= ~(1 << BFQG_stats_##name);                      \
 111 }                                                                       \
 112 static int bfqg_stats_##name(struct bfqg_stats *stats)          \
 113 {                                                                       \
 114         return (stats->flags & (1 << BFQG_stats_##name)) != 0;          \
 115 }                                                                       \
 116 
 117 BFQG_FLAG_FNS(waiting)
 118 BFQG_FLAG_FNS(idling)
 119 BFQG_FLAG_FNS(empty)
 120 #undef BFQG_FLAG_FNS
 121 
 122 /* This should be called with the scheduler lock held. */
 123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
 124 {
 125         u64 now;
 126 
 127         if (!bfqg_stats_waiting(stats))
 128                 return;
 129 
 130         now = ktime_get_ns();
 131         if (now > stats->start_group_wait_time)
 132                 bfq_stat_add(&stats->group_wait_time,
 133                               now - stats->start_group_wait_time);
 134         bfqg_stats_clear_waiting(stats);
 135 }
 136 
 137 /* This should be called with the scheduler lock held. */
 138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
 139                                                  struct bfq_group *curr_bfqg)
 140 {
 141         struct bfqg_stats *stats = &bfqg->stats;
 142 
 143         if (bfqg_stats_waiting(stats))
 144                 return;
 145         if (bfqg == curr_bfqg)
 146                 return;
 147         stats->start_group_wait_time = ktime_get_ns();
 148         bfqg_stats_mark_waiting(stats);
 149 }
 150 
 151 /* This should be called with the scheduler lock held. */
 152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
 153 {
 154         u64 now;
 155 
 156         if (!bfqg_stats_empty(stats))
 157                 return;
 158 
 159         now = ktime_get_ns();
 160         if (now > stats->start_empty_time)
 161                 bfq_stat_add(&stats->empty_time,
 162                               now - stats->start_empty_time);
 163         bfqg_stats_clear_empty(stats);
 164 }
 165 
 166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
 167 {
 168         bfq_stat_add(&bfqg->stats.dequeue, 1);
 169 }
 170 
 171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
 172 {
 173         struct bfqg_stats *stats = &bfqg->stats;
 174 
 175         if (blkg_rwstat_total(&stats->queued))
 176                 return;
 177 
 178         /*
 179          * group is already marked empty. This can happen if bfqq got new
 180          * request in parent group and moved to this group while being added
 181          * to service tree. Just ignore the event and move on.
 182          */
 183         if (bfqg_stats_empty(stats))
 184                 return;
 185 
 186         stats->start_empty_time = ktime_get_ns();
 187         bfqg_stats_mark_empty(stats);
 188 }
 189 
 190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
 191 {
 192         struct bfqg_stats *stats = &bfqg->stats;
 193 
 194         if (bfqg_stats_idling(stats)) {
 195                 u64 now = ktime_get_ns();
 196 
 197                 if (now > stats->start_idle_time)
 198                         bfq_stat_add(&stats->idle_time,
 199                                       now - stats->start_idle_time);
 200                 bfqg_stats_clear_idling(stats);
 201         }
 202 }
 203 
 204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
 205 {
 206         struct bfqg_stats *stats = &bfqg->stats;
 207 
 208         stats->start_idle_time = ktime_get_ns();
 209         bfqg_stats_mark_idling(stats);
 210 }
 211 
 212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
 213 {
 214         struct bfqg_stats *stats = &bfqg->stats;
 215 
 216         bfq_stat_add(&stats->avg_queue_size_sum,
 217                       blkg_rwstat_total(&stats->queued));
 218         bfq_stat_add(&stats->avg_queue_size_samples, 1);
 219         bfqg_stats_update_group_wait_time(stats);
 220 }
 221 
 222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
 223                               unsigned int op)
 224 {
 225         blkg_rwstat_add(&bfqg->stats.queued, op, 1);
 226         bfqg_stats_end_empty_time(&bfqg->stats);
 227         if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
 228                 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
 229 }
 230 
 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
 232 {
 233         blkg_rwstat_add(&bfqg->stats.queued, op, -1);
 234 }
 235 
 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
 237 {
 238         blkg_rwstat_add(&bfqg->stats.merged, op, 1);
 239 }
 240 
 241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
 242                                   u64 io_start_time_ns, unsigned int op)
 243 {
 244         struct bfqg_stats *stats = &bfqg->stats;
 245         u64 now = ktime_get_ns();
 246 
 247         if (now > io_start_time_ns)
 248                 blkg_rwstat_add(&stats->service_time, op,
 249                                 now - io_start_time_ns);
 250         if (io_start_time_ns > start_time_ns)
 251                 blkg_rwstat_add(&stats->wait_time, op,
 252                                 io_start_time_ns - start_time_ns);
 253 }
 254 
 255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
 256 
 257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
 258                               unsigned int op) { }
 259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
 260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
 261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
 262                                   u64 io_start_time_ns, unsigned int op) { }
 263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
 264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
 265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
 266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
 267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
 268 
 269 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
 270 
 271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
 272 
 273 /*
 274  * blk-cgroup policy-related handlers
 275  * The following functions help in converting between blk-cgroup
 276  * internal structures and BFQ-specific structures.
 277  */
 278 
 279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
 280 {
 281         return pd ? container_of(pd, struct bfq_group, pd) : NULL;
 282 }
 283 
 284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
 285 {
 286         return pd_to_blkg(&bfqg->pd);
 287 }
 288 
 289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
 290 {
 291         return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
 292 }
 293 
 294 /*
 295  * bfq_group handlers
 296  * The following functions help in navigating the bfq_group hierarchy
 297  * by allowing to find the parent of a bfq_group or the bfq_group
 298  * associated to a bfq_queue.
 299  */
 300 
 301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
 302 {
 303         struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
 304 
 305         return pblkg ? blkg_to_bfqg(pblkg) : NULL;
 306 }
 307 
 308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 309 {
 310         struct bfq_entity *group_entity = bfqq->entity.parent;
 311 
 312         return group_entity ? container_of(group_entity, struct bfq_group,
 313                                            entity) :
 314                               bfqq->bfqd->root_group;
 315 }
 316 
 317 /*
 318  * The following two functions handle get and put of a bfq_group by
 319  * wrapping the related blk-cgroup hooks.
 320  */
 321 
 322 static void bfqg_get(struct bfq_group *bfqg)
 323 {
 324         bfqg->ref++;
 325 }
 326 
 327 static void bfqg_put(struct bfq_group *bfqg)
 328 {
 329         bfqg->ref--;
 330 
 331         if (bfqg->ref == 0)
 332                 kfree(bfqg);
 333 }
 334 
 335 void bfqg_and_blkg_get(struct bfq_group *bfqg)
 336 {
 337         /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
 338         bfqg_get(bfqg);
 339 
 340         blkg_get(bfqg_to_blkg(bfqg));
 341 }
 342 
 343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
 344 {
 345         blkg_put(bfqg_to_blkg(bfqg));
 346 
 347         bfqg_put(bfqg);
 348 }
 349 
 350 /* @stats = 0 */
 351 static void bfqg_stats_reset(struct bfqg_stats *stats)
 352 {
 353 #ifdef CONFIG_BFQ_CGROUP_DEBUG
 354         /* queued stats shouldn't be cleared */
 355         blkg_rwstat_reset(&stats->merged);
 356         blkg_rwstat_reset(&stats->service_time);
 357         blkg_rwstat_reset(&stats->wait_time);
 358         bfq_stat_reset(&stats->time);
 359         bfq_stat_reset(&stats->avg_queue_size_sum);
 360         bfq_stat_reset(&stats->avg_queue_size_samples);
 361         bfq_stat_reset(&stats->dequeue);
 362         bfq_stat_reset(&stats->group_wait_time);
 363         bfq_stat_reset(&stats->idle_time);
 364         bfq_stat_reset(&stats->empty_time);
 365 #endif
 366 }
 367 
 368 /* @to += @from */
 369 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
 370 {
 371         if (!to || !from)
 372                 return;
 373 
 374 #ifdef CONFIG_BFQ_CGROUP_DEBUG
 375         /* queued stats shouldn't be cleared */
 376         blkg_rwstat_add_aux(&to->merged, &from->merged);
 377         blkg_rwstat_add_aux(&to->service_time, &from->service_time);
 378         blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
 379         bfq_stat_add_aux(&from->time, &from->time);
 380         bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 381         bfq_stat_add_aux(&to->avg_queue_size_samples,
 382                           &from->avg_queue_size_samples);
 383         bfq_stat_add_aux(&to->dequeue, &from->dequeue);
 384         bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
 385         bfq_stat_add_aux(&to->idle_time, &from->idle_time);
 386         bfq_stat_add_aux(&to->empty_time, &from->empty_time);
 387 #endif
 388 }
 389 
 390 /*
 391  * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
 392  * recursive stats can still account for the amount used by this bfqg after
 393  * it's gone.
 394  */
 395 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
 396 {
 397         struct bfq_group *parent;
 398 
 399         if (!bfqg) /* root_group */
 400                 return;
 401 
 402         parent = bfqg_parent(bfqg);
 403 
 404         lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
 405 
 406         if (unlikely(!parent))
 407                 return;
 408 
 409         bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
 410         bfqg_stats_reset(&bfqg->stats);
 411 }
 412 
 413 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
 414 {
 415         struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
 416 
 417         entity->weight = entity->new_weight;
 418         entity->orig_weight = entity->new_weight;
 419         if (bfqq) {
 420                 bfqq->ioprio = bfqq->new_ioprio;
 421                 bfqq->ioprio_class = bfqq->new_ioprio_class;
 422                 /*
 423                  * Make sure that bfqg and its associated blkg do not
 424                  * disappear before entity.
 425                  */
 426                 bfqg_and_blkg_get(bfqg);
 427         }
 428         entity->parent = bfqg->my_entity; /* NULL for root group */
 429         entity->sched_data = &bfqg->sched_data;
 430 }
 431 
 432 static void bfqg_stats_exit(struct bfqg_stats *stats)
 433 {
 434 #ifdef CONFIG_BFQ_CGROUP_DEBUG
 435         blkg_rwstat_exit(&stats->merged);
 436         blkg_rwstat_exit(&stats->service_time);
 437         blkg_rwstat_exit(&stats->wait_time);
 438         blkg_rwstat_exit(&stats->queued);
 439         bfq_stat_exit(&stats->time);
 440         bfq_stat_exit(&stats->avg_queue_size_sum);
 441         bfq_stat_exit(&stats->avg_queue_size_samples);
 442         bfq_stat_exit(&stats->dequeue);
 443         bfq_stat_exit(&stats->group_wait_time);
 444         bfq_stat_exit(&stats->idle_time);
 445         bfq_stat_exit(&stats->empty_time);
 446 #endif
 447 }
 448 
 449 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
 450 {
 451 #ifdef CONFIG_BFQ_CGROUP_DEBUG
 452         if (blkg_rwstat_init(&stats->merged, gfp) ||
 453             blkg_rwstat_init(&stats->service_time, gfp) ||
 454             blkg_rwstat_init(&stats->wait_time, gfp) ||
 455             blkg_rwstat_init(&stats->queued, gfp) ||
 456             bfq_stat_init(&stats->time, gfp) ||
 457             bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
 458             bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
 459             bfq_stat_init(&stats->dequeue, gfp) ||
 460             bfq_stat_init(&stats->group_wait_time, gfp) ||
 461             bfq_stat_init(&stats->idle_time, gfp) ||
 462             bfq_stat_init(&stats->empty_time, gfp)) {
 463                 bfqg_stats_exit(stats);
 464                 return -ENOMEM;
 465         }
 466 #endif
 467 
 468         return 0;
 469 }
 470 
 471 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
 472 {
 473         return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
 474 }
 475 
 476 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
 477 {
 478         return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
 479 }
 480 
 481 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
 482 {
 483         struct bfq_group_data *bgd;
 484 
 485         bgd = kzalloc(sizeof(*bgd), gfp);
 486         if (!bgd)
 487                 return NULL;
 488         return &bgd->pd;
 489 }
 490 
 491 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
 492 {
 493         struct bfq_group_data *d = cpd_to_bfqgd(cpd);
 494 
 495         d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
 496                 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
 497 }
 498 
 499 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
 500 {
 501         kfree(cpd_to_bfqgd(cpd));
 502 }
 503 
 504 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
 505                                              struct blkcg *blkcg)
 506 {
 507         struct bfq_group *bfqg;
 508 
 509         bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
 510         if (!bfqg)
 511                 return NULL;
 512 
 513         if (bfqg_stats_init(&bfqg->stats, gfp)) {
 514                 kfree(bfqg);
 515                 return NULL;
 516         }
 517 
 518         /* see comments in bfq_bic_update_cgroup for why refcounting */
 519         bfqg_get(bfqg);
 520         return &bfqg->pd;
 521 }
 522 
 523 static void bfq_pd_init(struct blkg_policy_data *pd)
 524 {
 525         struct blkcg_gq *blkg = pd_to_blkg(pd);
 526         struct bfq_group *bfqg = blkg_to_bfqg(blkg);
 527         struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
 528         struct bfq_entity *entity = &bfqg->entity;
 529         struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
 530 
 531         entity->orig_weight = entity->weight = entity->new_weight = d->weight;
 532         entity->my_sched_data = &bfqg->sched_data;
 533         bfqg->my_entity = entity; /*
 534                                    * the root_group's will be set to NULL
 535                                    * in bfq_init_queue()
 536                                    */
 537         bfqg->bfqd = bfqd;
 538         bfqg->active_entities = 0;
 539         bfqg->rq_pos_tree = RB_ROOT;
 540 }
 541 
 542 static void bfq_pd_free(struct blkg_policy_data *pd)
 543 {
 544         struct bfq_group *bfqg = pd_to_bfqg(pd);
 545 
 546         bfqg_stats_exit(&bfqg->stats);
 547         bfqg_put(bfqg);
 548 }
 549 
 550 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
 551 {
 552         struct bfq_group *bfqg = pd_to_bfqg(pd);
 553 
 554         bfqg_stats_reset(&bfqg->stats);
 555 }
 556 
 557 static void bfq_group_set_parent(struct bfq_group *bfqg,
 558                                         struct bfq_group *parent)
 559 {
 560         struct bfq_entity *entity;
 561 
 562         entity = &bfqg->entity;
 563         entity->parent = parent->my_entity;
 564         entity->sched_data = &parent->sched_data;
 565 }
 566 
 567 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
 568                                          struct blkcg *blkcg)
 569 {
 570         struct blkcg_gq *blkg;
 571 
 572         blkg = blkg_lookup(blkcg, bfqd->queue);
 573         if (likely(blkg))
 574                 return blkg_to_bfqg(blkg);
 575         return NULL;
 576 }
 577 
 578 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 579                                      struct blkcg *blkcg)
 580 {
 581         struct bfq_group *bfqg, *parent;
 582         struct bfq_entity *entity;
 583 
 584         bfqg = bfq_lookup_bfqg(bfqd, blkcg);
 585 
 586         if (unlikely(!bfqg))
 587                 return NULL;
 588 
 589         /*
 590          * Update chain of bfq_groups as we might be handling a leaf group
 591          * which, along with some of its relatives, has not been hooked yet
 592          * to the private hierarchy of BFQ.
 593          */
 594         entity = &bfqg->entity;
 595         for_each_entity(entity) {
 596                 struct bfq_group *curr_bfqg = container_of(entity,
 597                                                 struct bfq_group, entity);
 598                 if (curr_bfqg != bfqd->root_group) {
 599                         parent = bfqg_parent(curr_bfqg);
 600                         if (!parent)
 601                                 parent = bfqd->root_group;
 602                         bfq_group_set_parent(curr_bfqg, parent);
 603                 }
 604         }
 605 
 606         return bfqg;
 607 }
 608 
 609 /**
 610  * bfq_bfqq_move - migrate @bfqq to @bfqg.
 611  * @bfqd: queue descriptor.
 612  * @bfqq: the queue to move.
 613  * @bfqg: the group to move to.
 614  *
 615  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
 616  * it on the new one.  Avoid putting the entity on the old group idle tree.
 617  *
 618  * Must be called under the scheduler lock, to make sure that the blkg
 619  * owning @bfqg does not disappear (see comments in
 620  * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
 621  * objects).
 622  */
 623 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 624                    struct bfq_group *bfqg)
 625 {
 626         struct bfq_entity *entity = &bfqq->entity;
 627 
 628         /*
 629          * Get extra reference to prevent bfqq from being freed in
 630          * next possible expire or deactivate.
 631          */
 632         bfqq->ref++;
 633 
 634         /* If bfqq is empty, then bfq_bfqq_expire also invokes
 635          * bfq_del_bfqq_busy, thereby removing bfqq and its entity
 636          * from data structures related to current group. Otherwise we
 637          * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
 638          * we do below.
 639          */
 640         if (bfqq == bfqd->in_service_queue)
 641                 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
 642                                 false, BFQQE_PREEMPTED);
 643 
 644         if (bfq_bfqq_busy(bfqq))
 645                 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
 646         else if (entity->on_st)
 647                 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
 648         bfqg_and_blkg_put(bfqq_group(bfqq));
 649 
 650         entity->parent = bfqg->my_entity;
 651         entity->sched_data = &bfqg->sched_data;
 652         /* pin down bfqg and its associated blkg  */
 653         bfqg_and_blkg_get(bfqg);
 654 
 655         if (bfq_bfqq_busy(bfqq)) {
 656                 if (unlikely(!bfqd->nonrot_with_queueing))
 657                         bfq_pos_tree_add_move(bfqd, bfqq);
 658                 bfq_activate_bfqq(bfqd, bfqq);
 659         }
 660 
 661         if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
 662                 bfq_schedule_dispatch(bfqd);
 663         /* release extra ref taken above, bfqq may happen to be freed now */
 664         bfq_put_queue(bfqq);
 665 }
 666 
 667 /**
 668  * __bfq_bic_change_cgroup - move @bic to @cgroup.
 669  * @bfqd: the queue descriptor.
 670  * @bic: the bic to move.
 671  * @blkcg: the blk-cgroup to move to.
 672  *
 673  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
 674  * sure that the reference to cgroup is valid across the call (see
 675  * comments in bfq_bic_update_cgroup on this issue)
 676  *
 677  * NOTE: an alternative approach might have been to store the current
 678  * cgroup in bfqq and getting a reference to it, reducing the lookup
 679  * time here, at the price of slightly more complex code.
 680  */
 681 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
 682                                                 struct bfq_io_cq *bic,
 683                                                 struct blkcg *blkcg)
 684 {
 685         struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
 686         struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
 687         struct bfq_group *bfqg;
 688         struct bfq_entity *entity;
 689 
 690         bfqg = bfq_find_set_group(bfqd, blkcg);
 691 
 692         if (unlikely(!bfqg))
 693                 bfqg = bfqd->root_group;
 694 
 695         if (async_bfqq) {
 696                 entity = &async_bfqq->entity;
 697 
 698                 if (entity->sched_data != &bfqg->sched_data) {
 699                         bic_set_bfqq(bic, NULL, 0);
 700                         bfq_release_process_ref(bfqd, async_bfqq);
 701                 }
 702         }
 703 
 704         if (sync_bfqq) {
 705                 entity = &sync_bfqq->entity;
 706                 if (entity->sched_data != &bfqg->sched_data)
 707                         bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
 708         }
 709 
 710         return bfqg;
 711 }
 712 
 713 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
 714 {
 715         struct bfq_data *bfqd = bic_to_bfqd(bic);
 716         struct bfq_group *bfqg = NULL;
 717         uint64_t serial_nr;
 718 
 719         rcu_read_lock();
 720         serial_nr = __bio_blkcg(bio)->css.serial_nr;
 721 
 722         /*
 723          * Check whether blkcg has changed.  The condition may trigger
 724          * spuriously on a newly created cic but there's no harm.
 725          */
 726         if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
 727                 goto out;
 728 
 729         bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
 730         /*
 731          * Update blkg_path for bfq_log_* functions. We cache this
 732          * path, and update it here, for the following
 733          * reasons. Operations on blkg objects in blk-cgroup are
 734          * protected with the request_queue lock, and not with the
 735          * lock that protects the instances of this scheduler
 736          * (bfqd->lock). This exposes BFQ to the following sort of
 737          * race.
 738          *
 739          * The blkg_lookup performed in bfq_get_queue, protected
 740          * through rcu, may happen to return the address of a copy of
 741          * the original blkg. If this is the case, then the
 742          * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
 743          * the blkg, is useless: it does not prevent blk-cgroup code
 744          * from destroying both the original blkg and all objects
 745          * directly or indirectly referred by the copy of the
 746          * blkg.
 747          *
 748          * On the bright side, destroy operations on a blkg invoke, as
 749          * a first step, hooks of the scheduler associated with the
 750          * blkg. And these hooks are executed with bfqd->lock held for
 751          * BFQ. As a consequence, for any blkg associated with the
 752          * request queue this instance of the scheduler is attached
 753          * to, we are guaranteed that such a blkg is not destroyed, and
 754          * that all the pointers it contains are consistent, while we
 755          * are holding bfqd->lock. A blkg_lookup performed with
 756          * bfqd->lock held then returns a fully consistent blkg, which
 757          * remains consistent until this lock is held.
 758          *
 759          * Thanks to the last fact, and to the fact that: (1) bfqg has
 760          * been obtained through a blkg_lookup in the above
 761          * assignment, and (2) bfqd->lock is being held, here we can
 762          * safely use the policy data for the involved blkg (i.e., the
 763          * field bfqg->pd) to get to the blkg associated with bfqg,
 764          * and then we can safely use any field of blkg. After we
 765          * release bfqd->lock, even just getting blkg through this
 766          * bfqg may cause dangling references to be traversed, as
 767          * bfqg->pd may not exist any more.
 768          *
 769          * In view of the above facts, here we cache, in the bfqg, any
 770          * blkg data we may need for this bic, and for its associated
 771          * bfq_queue. As of now, we need to cache only the path of the
 772          * blkg, which is used in the bfq_log_* functions.
 773          *
 774          * Finally, note that bfqg itself needs to be protected from
 775          * destruction on the blkg_free of the original blkg (which
 776          * invokes bfq_pd_free). We use an additional private
 777          * refcounter for bfqg, to let it disappear only after no
 778          * bfq_queue refers to it any longer.
 779          */
 780         blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
 781         bic->blkcg_serial_nr = serial_nr;
 782 out:
 783         rcu_read_unlock();
 784 }
 785 
 786 /**
 787  * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
 788  * @st: the service tree being flushed.
 789  */
 790 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
 791 {
 792         struct bfq_entity *entity = st->first_idle;
 793 
 794         for (; entity ; entity = st->first_idle)
 795                 __bfq_deactivate_entity(entity, false);
 796 }
 797 
 798 /**
 799  * bfq_reparent_leaf_entity - move leaf entity to the root_group.
 800  * @bfqd: the device data structure with the root group.
 801  * @entity: the entity to move, if entity is a leaf; or the parent entity
 802  *          of an active leaf entity to move, if entity is not a leaf.
 803  */
 804 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
 805                                      struct bfq_entity *entity,
 806                                      int ioprio_class)
 807 {
 808         struct bfq_queue *bfqq;
 809         struct bfq_entity *child_entity = entity;
 810 
 811         while (child_entity->my_sched_data) { /* leaf not reached yet */
 812                 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
 813                 struct bfq_service_tree *child_st = child_sd->service_tree +
 814                         ioprio_class;
 815                 struct rb_root *child_active = &child_st->active;
 816 
 817                 child_entity = bfq_entity_of(rb_first(child_active));
 818 
 819                 if (!child_entity)
 820                         child_entity = child_sd->in_service_entity;
 821         }
 822 
 823         bfqq = bfq_entity_to_bfqq(child_entity);
 824         bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
 825 }
 826 
 827 /**
 828  * bfq_reparent_active_queues - move to the root group all active queues.
 829  * @bfqd: the device data structure with the root group.
 830  * @bfqg: the group to move from.
 831  * @st: the service tree to start the search from.
 832  */
 833 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
 834                                        struct bfq_group *bfqg,
 835                                        struct bfq_service_tree *st,
 836                                        int ioprio_class)
 837 {
 838         struct rb_root *active = &st->active;
 839         struct bfq_entity *entity;
 840 
 841         while ((entity = bfq_entity_of(rb_first(active))))
 842                 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
 843 
 844         if (bfqg->sched_data.in_service_entity)
 845                 bfq_reparent_leaf_entity(bfqd,
 846                                          bfqg->sched_data.in_service_entity,
 847                                          ioprio_class);
 848 }
 849 
 850 /**
 851  * bfq_pd_offline - deactivate the entity associated with @pd,
 852  *                  and reparent its children entities.
 853  * @pd: descriptor of the policy going offline.
 854  *
 855  * blkio already grabs the queue_lock for us, so no need to use
 856  * RCU-based magic
 857  */
 858 static void bfq_pd_offline(struct blkg_policy_data *pd)
 859 {
 860         struct bfq_service_tree *st;
 861         struct bfq_group *bfqg = pd_to_bfqg(pd);
 862         struct bfq_data *bfqd = bfqg->bfqd;
 863         struct bfq_entity *entity = bfqg->my_entity;
 864         unsigned long flags;
 865         int i;
 866 
 867         spin_lock_irqsave(&bfqd->lock, flags);
 868 
 869         if (!entity) /* root group */
 870                 goto put_async_queues;
 871 
 872         /*
 873          * Empty all service_trees belonging to this group before
 874          * deactivating the group itself.
 875          */
 876         for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
 877                 st = bfqg->sched_data.service_tree + i;
 878 
 879                 /*
 880                  * It may happen that some queues are still active
 881                  * (busy) upon group destruction (if the corresponding
 882                  * processes have been forced to terminate). We move
 883                  * all the leaf entities corresponding to these queues
 884                  * to the root_group.
 885                  * Also, it may happen that the group has an entity
 886                  * in service, which is disconnected from the active
 887                  * tree: it must be moved, too.
 888                  * There is no need to put the sync queues, as the
 889                  * scheduler has taken no reference.
 890                  */
 891                 bfq_reparent_active_queues(bfqd, bfqg, st, i);
 892 
 893                 /*
 894                  * The idle tree may still contain bfq_queues
 895                  * belonging to exited task because they never
 896                  * migrated to a different cgroup from the one being
 897                  * destroyed now. In addition, even
 898                  * bfq_reparent_active_queues() may happen to add some
 899                  * entities to the idle tree. It happens if, in some
 900                  * of the calls to bfq_bfqq_move() performed by
 901                  * bfq_reparent_active_queues(), the queue to move is
 902                  * empty and gets expired.
 903                  */
 904                 bfq_flush_idle_tree(st);
 905         }
 906 
 907         __bfq_deactivate_entity(entity, false);
 908 
 909 put_async_queues:
 910         bfq_put_async_queues(bfqd, bfqg);
 911 
 912         spin_unlock_irqrestore(&bfqd->lock, flags);
 913         /*
 914          * @blkg is going offline and will be ignored by
 915          * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
 916          * that they don't get lost.  If IOs complete after this point, the
 917          * stats for them will be lost.  Oh well...
 918          */
 919         bfqg_stats_xfer_dead(bfqg);
 920 }
 921 
 922 void bfq_end_wr_async(struct bfq_data *bfqd)
 923 {
 924         struct blkcg_gq *blkg;
 925 
 926         list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
 927                 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
 928 
 929                 bfq_end_wr_async_queues(bfqd, bfqg);
 930         }
 931         bfq_end_wr_async_queues(bfqd, bfqd->root_group);
 932 }
 933 
 934 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
 935 {
 936         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 937         struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
 938         unsigned int val = 0;
 939 
 940         if (bfqgd)
 941                 val = bfqgd->weight;
 942 
 943         seq_printf(sf, "%u\n", val);
 944 
 945         return 0;
 946 }
 947 
 948 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
 949                                      struct blkg_policy_data *pd, int off)
 950 {
 951         struct bfq_group *bfqg = pd_to_bfqg(pd);
 952 
 953         if (!bfqg->entity.dev_weight)
 954                 return 0;
 955         return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
 956 }
 957 
 958 static int bfq_io_show_weight(struct seq_file *sf, void *v)
 959 {
 960         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 961         struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
 962 
 963         seq_printf(sf, "default %u\n", bfqgd->weight);
 964         blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
 965                           &blkcg_policy_bfq, 0, false);
 966         return 0;
 967 }
 968 
 969 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
 970 {
 971         weight = dev_weight ?: weight;
 972 
 973         bfqg->entity.dev_weight = dev_weight;
 974         /*
 975          * Setting the prio_changed flag of the entity
 976          * to 1 with new_weight == weight would re-set
 977          * the value of the weight to its ioprio mapping.
 978          * Set the flag only if necessary.
 979          */
 980         if ((unsigned short)weight != bfqg->entity.new_weight) {
 981                 bfqg->entity.new_weight = (unsigned short)weight;
 982                 /*
 983                  * Make sure that the above new value has been
 984                  * stored in bfqg->entity.new_weight before
 985                  * setting the prio_changed flag. In fact,
 986                  * this flag may be read asynchronously (in
 987                  * critical sections protected by a different
 988                  * lock than that held here), and finding this
 989                  * flag set may cause the execution of the code
 990                  * for updating parameters whose value may
 991                  * depend also on bfqg->entity.new_weight (in
 992                  * __bfq_entity_update_weight_prio).
 993                  * This barrier makes sure that the new value
 994                  * of bfqg->entity.new_weight is correctly
 995                  * seen in that code.
 996                  */
 997                 smp_wmb();
 998                 bfqg->entity.prio_changed = 1;
 999         }
1000 }
1001 
1002 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1003                                     struct cftype *cftype,
1004                                     u64 val)
1005 {
1006         struct blkcg *blkcg = css_to_blkcg(css);
1007         struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1008         struct blkcg_gq *blkg;
1009         int ret = -ERANGE;
1010 
1011         if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1012                 return ret;
1013 
1014         ret = 0;
1015         spin_lock_irq(&blkcg->lock);
1016         bfqgd->weight = (unsigned short)val;
1017         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1018                 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1019 
1020                 if (bfqg)
1021                         bfq_group_set_weight(bfqg, val, 0);
1022         }
1023         spin_unlock_irq(&blkcg->lock);
1024 
1025         return ret;
1026 }
1027 
1028 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1029                                         char *buf, size_t nbytes,
1030                                         loff_t off)
1031 {
1032         int ret;
1033         struct blkg_conf_ctx ctx;
1034         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1035         struct bfq_group *bfqg;
1036         u64 v;
1037 
1038         ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1039         if (ret)
1040                 return ret;
1041 
1042         if (sscanf(ctx.body, "%llu", &v) == 1) {
1043                 /* require "default" on dfl */
1044                 ret = -ERANGE;
1045                 if (!v)
1046                         goto out;
1047         } else if (!strcmp(strim(ctx.body), "default")) {
1048                 v = 0;
1049         } else {
1050                 ret = -EINVAL;
1051                 goto out;
1052         }
1053 
1054         bfqg = blkg_to_bfqg(ctx.blkg);
1055 
1056         ret = -ERANGE;
1057         if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1058                 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1059                 ret = 0;
1060         }
1061 out:
1062         blkg_conf_finish(&ctx);
1063         return ret ?: nbytes;
1064 }
1065 
1066 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1067                                  char *buf, size_t nbytes,
1068                                  loff_t off)
1069 {
1070         char *endp;
1071         int ret;
1072         u64 v;
1073 
1074         buf = strim(buf);
1075 
1076         /* "WEIGHT" or "default WEIGHT" sets the default weight */
1077         v = simple_strtoull(buf, &endp, 0);
1078         if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1079                 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1080                 return ret ?: nbytes;
1081         }
1082 
1083         return bfq_io_set_device_weight(of, buf, nbytes, off);
1084 }
1085 
1086 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1087 static int bfqg_print_stat(struct seq_file *sf, void *v)
1088 {
1089         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1090                           &blkcg_policy_bfq, seq_cft(sf)->private, false);
1091         return 0;
1092 }
1093 
1094 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1095 {
1096         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1097                           &blkcg_policy_bfq, seq_cft(sf)->private, true);
1098         return 0;
1099 }
1100 
1101 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1102                                       struct blkg_policy_data *pd, int off)
1103 {
1104         struct blkcg_gq *blkg = pd_to_blkg(pd);
1105         struct blkcg_gq *pos_blkg;
1106         struct cgroup_subsys_state *pos_css;
1107         u64 sum = 0;
1108 
1109         lockdep_assert_held(&blkg->q->queue_lock);
1110 
1111         rcu_read_lock();
1112         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1113                 struct bfq_stat *stat;
1114 
1115                 if (!pos_blkg->online)
1116                         continue;
1117 
1118                 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1119                 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1120         }
1121         rcu_read_unlock();
1122 
1123         return __blkg_prfill_u64(sf, pd, sum);
1124 }
1125 
1126 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1127                                         struct blkg_policy_data *pd, int off)
1128 {
1129         struct blkg_rwstat_sample sum;
1130 
1131         blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1132         return __blkg_prfill_rwstat(sf, pd, &sum);
1133 }
1134 
1135 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1136 {
1137         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1138                           bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1139                           seq_cft(sf)->private, false);
1140         return 0;
1141 }
1142 
1143 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1144 {
1145         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1146                           bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1147                           seq_cft(sf)->private, true);
1148         return 0;
1149 }
1150 
1151 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1152                                int off)
1153 {
1154         u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1155 
1156         return __blkg_prfill_u64(sf, pd, sum >> 9);
1157 }
1158 
1159 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1160 {
1161         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1162                           bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1163         return 0;
1164 }
1165 
1166 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1167                                          struct blkg_policy_data *pd, int off)
1168 {
1169         struct blkg_rwstat_sample tmp;
1170 
1171         blkg_rwstat_recursive_sum(pd->blkg, NULL,
1172                         offsetof(struct blkcg_gq, stat_bytes), &tmp);
1173 
1174         return __blkg_prfill_u64(sf, pd,
1175                 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1176 }
1177 
1178 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1179 {
1180         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1181                           bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1182                           false);
1183         return 0;
1184 }
1185 
1186 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1187                                       struct blkg_policy_data *pd, int off)
1188 {
1189         struct bfq_group *bfqg = pd_to_bfqg(pd);
1190         u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1191         u64 v = 0;
1192 
1193         if (samples) {
1194                 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1195                 v = div64_u64(v, samples);
1196         }
1197         __blkg_prfill_u64(sf, pd, v);
1198         return 0;
1199 }
1200 
1201 /* print avg_queue_size */
1202 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1203 {
1204         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1205                           bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1206                           0, false);
1207         return 0;
1208 }
1209 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1210 
1211 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1212 {
1213         int ret;
1214 
1215         ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1216         if (ret)
1217                 return NULL;
1218 
1219         return blkg_to_bfqg(bfqd->queue->root_blkg);
1220 }
1221 
1222 struct blkcg_policy blkcg_policy_bfq = {
1223         .dfl_cftypes            = bfq_blkg_files,
1224         .legacy_cftypes         = bfq_blkcg_legacy_files,
1225 
1226         .cpd_alloc_fn           = bfq_cpd_alloc,
1227         .cpd_init_fn            = bfq_cpd_init,
1228         .cpd_bind_fn            = bfq_cpd_init,
1229         .cpd_free_fn            = bfq_cpd_free,
1230 
1231         .pd_alloc_fn            = bfq_pd_alloc,
1232         .pd_init_fn             = bfq_pd_init,
1233         .pd_offline_fn          = bfq_pd_offline,
1234         .pd_free_fn             = bfq_pd_free,
1235         .pd_reset_stats_fn      = bfq_pd_reset_stats,
1236 };
1237 
1238 struct cftype bfq_blkcg_legacy_files[] = {
1239         {
1240                 .name = "bfq.weight",
1241                 .flags = CFTYPE_NOT_ON_ROOT,
1242                 .seq_show = bfq_io_show_weight_legacy,
1243                 .write_u64 = bfq_io_set_weight_legacy,
1244         },
1245         {
1246                 .name = "bfq.weight_device",
1247                 .flags = CFTYPE_NOT_ON_ROOT,
1248                 .seq_show = bfq_io_show_weight,
1249                 .write = bfq_io_set_weight,
1250         },
1251 
1252         /* statistics, covers only the tasks in the bfqg */
1253         {
1254                 .name = "bfq.io_service_bytes",
1255                 .private = (unsigned long)&blkcg_policy_bfq,
1256                 .seq_show = blkg_print_stat_bytes,
1257         },
1258         {
1259                 .name = "bfq.io_serviced",
1260                 .private = (unsigned long)&blkcg_policy_bfq,
1261                 .seq_show = blkg_print_stat_ios,
1262         },
1263 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1264         {
1265                 .name = "bfq.time",
1266                 .private = offsetof(struct bfq_group, stats.time),
1267                 .seq_show = bfqg_print_stat,
1268         },
1269         {
1270                 .name = "bfq.sectors",
1271                 .seq_show = bfqg_print_stat_sectors,
1272         },
1273         {
1274                 .name = "bfq.io_service_time",
1275                 .private = offsetof(struct bfq_group, stats.service_time),
1276                 .seq_show = bfqg_print_rwstat,
1277         },
1278         {
1279                 .name = "bfq.io_wait_time",
1280                 .private = offsetof(struct bfq_group, stats.wait_time),
1281                 .seq_show = bfqg_print_rwstat,
1282         },
1283         {
1284                 .name = "bfq.io_merged",
1285                 .private = offsetof(struct bfq_group, stats.merged),
1286                 .seq_show = bfqg_print_rwstat,
1287         },
1288         {
1289                 .name = "bfq.io_queued",
1290                 .private = offsetof(struct bfq_group, stats.queued),
1291                 .seq_show = bfqg_print_rwstat,
1292         },
1293 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1294 
1295         /* the same statistics which cover the bfqg and its descendants */
1296         {
1297                 .name = "bfq.io_service_bytes_recursive",
1298                 .private = (unsigned long)&blkcg_policy_bfq,
1299                 .seq_show = blkg_print_stat_bytes_recursive,
1300         },
1301         {
1302                 .name = "bfq.io_serviced_recursive",
1303                 .private = (unsigned long)&blkcg_policy_bfq,
1304                 .seq_show = blkg_print_stat_ios_recursive,
1305         },
1306 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1307         {
1308                 .name = "bfq.time_recursive",
1309                 .private = offsetof(struct bfq_group, stats.time),
1310                 .seq_show = bfqg_print_stat_recursive,
1311         },
1312         {
1313                 .name = "bfq.sectors_recursive",
1314                 .seq_show = bfqg_print_stat_sectors_recursive,
1315         },
1316         {
1317                 .name = "bfq.io_service_time_recursive",
1318                 .private = offsetof(struct bfq_group, stats.service_time),
1319                 .seq_show = bfqg_print_rwstat_recursive,
1320         },
1321         {
1322                 .name = "bfq.io_wait_time_recursive",
1323                 .private = offsetof(struct bfq_group, stats.wait_time),
1324                 .seq_show = bfqg_print_rwstat_recursive,
1325         },
1326         {
1327                 .name = "bfq.io_merged_recursive",
1328                 .private = offsetof(struct bfq_group, stats.merged),
1329                 .seq_show = bfqg_print_rwstat_recursive,
1330         },
1331         {
1332                 .name = "bfq.io_queued_recursive",
1333                 .private = offsetof(struct bfq_group, stats.queued),
1334                 .seq_show = bfqg_print_rwstat_recursive,
1335         },
1336         {
1337                 .name = "bfq.avg_queue_size",
1338                 .seq_show = bfqg_print_avg_queue_size,
1339         },
1340         {
1341                 .name = "bfq.group_wait_time",
1342                 .private = offsetof(struct bfq_group, stats.group_wait_time),
1343                 .seq_show = bfqg_print_stat,
1344         },
1345         {
1346                 .name = "bfq.idle_time",
1347                 .private = offsetof(struct bfq_group, stats.idle_time),
1348                 .seq_show = bfqg_print_stat,
1349         },
1350         {
1351                 .name = "bfq.empty_time",
1352                 .private = offsetof(struct bfq_group, stats.empty_time),
1353                 .seq_show = bfqg_print_stat,
1354         },
1355         {
1356                 .name = "bfq.dequeue",
1357                 .private = offsetof(struct bfq_group, stats.dequeue),
1358                 .seq_show = bfqg_print_stat,
1359         },
1360 #endif  /* CONFIG_BFQ_CGROUP_DEBUG */
1361         { }     /* terminate */
1362 };
1363 
1364 struct cftype bfq_blkg_files[] = {
1365         {
1366                 .name = "bfq.weight",
1367                 .flags = CFTYPE_NOT_ON_ROOT,
1368                 .seq_show = bfq_io_show_weight,
1369                 .write = bfq_io_set_weight,
1370         },
1371         {} /* terminate */
1372 };
1373 
1374 #else   /* CONFIG_BFQ_GROUP_IOSCHED */
1375 
1376 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1377                    struct bfq_group *bfqg) {}
1378 
1379 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1380 {
1381         struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1382 
1383         entity->weight = entity->new_weight;
1384         entity->orig_weight = entity->new_weight;
1385         if (bfqq) {
1386                 bfqq->ioprio = bfqq->new_ioprio;
1387                 bfqq->ioprio_class = bfqq->new_ioprio_class;
1388         }
1389         entity->sched_data = &bfqg->sched_data;
1390 }
1391 
1392 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1393 
1394 void bfq_end_wr_async(struct bfq_data *bfqd)
1395 {
1396         bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1397 }
1398 
1399 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1400 {
1401         return bfqd->root_group;
1402 }
1403 
1404 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1405 {
1406         return bfqq->bfqd->root_group;
1407 }
1408 
1409 void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1410 
1411 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1412 
1413 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1414 {
1415         struct bfq_group *bfqg;
1416         int i;
1417 
1418         bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1419         if (!bfqg)
1420                 return NULL;
1421 
1422         for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1423                 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1424 
1425         return bfqg;
1426 }
1427 #endif  /* CONFIG_BFQ_GROUP_IOSCHED */

/* [<][>][^][v][top][bottom][index][help] */