root/block/blk-mq.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. blk_mq_map_queue_type
  2. blk_mq_map_queue
  3. blk_mq_rq_state
  4. __blk_mq_get_ctx
  5. blk_mq_get_ctx
  6. blk_mq_tags_from_data
  7. blk_mq_hctx_stopped
  8. blk_mq_hw_queue_mapped
  9. blk_mq_put_dispatch_budget
  10. blk_mq_get_dispatch_budget
  11. __blk_mq_put_driver_tag
  12. blk_mq_put_driver_tag
  13. blk_mq_clear_mq_map
  14. blk_mq_plug

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef INT_BLK_MQ_H
   3 #define INT_BLK_MQ_H
   4 
   5 #include "blk-stat.h"
   6 #include "blk-mq-tag.h"
   7 
   8 struct blk_mq_tag_set;
   9 
  10 struct blk_mq_ctxs {
  11         struct kobject kobj;
  12         struct blk_mq_ctx __percpu      *queue_ctx;
  13 };
  14 
  15 /**
  16  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
  17  */
  18 struct blk_mq_ctx {
  19         struct {
  20                 spinlock_t              lock;
  21                 struct list_head        rq_lists[HCTX_MAX_TYPES];
  22         } ____cacheline_aligned_in_smp;
  23 
  24         unsigned int            cpu;
  25         unsigned short          index_hw[HCTX_MAX_TYPES];
  26         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
  27 
  28         /* incremented at dispatch time */
  29         unsigned long           rq_dispatched[2];
  30         unsigned long           rq_merged;
  31 
  32         /* incremented at completion time */
  33         unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
  34 
  35         struct request_queue    *queue;
  36         struct blk_mq_ctxs      *ctxs;
  37         struct kobject          kobj;
  38 } ____cacheline_aligned_in_smp;
  39 
  40 void blk_mq_exit_queue(struct request_queue *q);
  41 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  42 void blk_mq_wake_waiters(struct request_queue *q);
  43 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
  44 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  45                                 bool kick_requeue_list);
  46 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
  47 bool blk_mq_get_driver_tag(struct request *rq);
  48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  49                                         struct blk_mq_ctx *start);
  50 
  51 /*
  52  * Internal helpers for allocating/freeing the request map
  53  */
  54 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  55                      unsigned int hctx_idx);
  56 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
  57 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  58                                         unsigned int hctx_idx,
  59                                         unsigned int nr_tags,
  60                                         unsigned int reserved_tags);
  61 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  62                      unsigned int hctx_idx, unsigned int depth);
  63 
  64 /*
  65  * Internal helpers for request insertion into sw queues
  66  */
  67 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  68                                 bool at_head);
  69 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  70                                   bool run_queue);
  71 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  72                                 struct list_head *list);
  73 
  74 /* Used by blk_insert_cloned_request() to issue request directly */
  75 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
  76 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  77                                     struct list_head *list);
  78 
  79 /*
  80  * CPU -> queue mappings
  81  */
  82 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
  83 
  84 /*
  85  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
  86  * @q: request queue
  87  * @type: the hctx type index
  88  * @cpu: CPU
  89  */
  90 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
  91                                                           enum hctx_type type,
  92                                                           unsigned int cpu)
  93 {
  94         return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
  95 }
  96 
  97 /*
  98  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
  99  * @q: request queue
 100  * @flags: request command flags
 101  * @cpu: cpu ctx
 102  */
 103 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 104                                                      unsigned int flags,
 105                                                      struct blk_mq_ctx *ctx)
 106 {
 107         enum hctx_type type = HCTX_TYPE_DEFAULT;
 108 
 109         /*
 110          * The caller ensure that if REQ_HIPRI, poll must be enabled.
 111          */
 112         if (flags & REQ_HIPRI)
 113                 type = HCTX_TYPE_POLL;
 114         else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
 115                 type = HCTX_TYPE_READ;
 116         
 117         return ctx->hctxs[type];
 118 }
 119 
 120 /*
 121  * sysfs helpers
 122  */
 123 extern void blk_mq_sysfs_init(struct request_queue *q);
 124 extern void blk_mq_sysfs_deinit(struct request_queue *q);
 125 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
 126 extern int blk_mq_sysfs_register(struct request_queue *q);
 127 extern void blk_mq_sysfs_unregister(struct request_queue *q);
 128 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 129 
 130 void blk_mq_release(struct request_queue *q);
 131 
 132 /**
 133  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
 134  * @rq: target request.
 135  */
 136 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
 137 {
 138         return READ_ONCE(rq->state);
 139 }
 140 
 141 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 142                                            unsigned int cpu)
 143 {
 144         return per_cpu_ptr(q->queue_ctx, cpu);
 145 }
 146 
 147 /*
 148  * This assumes per-cpu software queueing queues. They could be per-node
 149  * as well, for instance. For now this is hardcoded as-is. Note that we don't
 150  * care about preemption, since we know the ctx's are persistent. This does
 151  * mean that we can't rely on ctx always matching the currently running CPU.
 152  */
 153 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
 154 {
 155         return __blk_mq_get_ctx(q, raw_smp_processor_id());
 156 }
 157 
 158 struct blk_mq_alloc_data {
 159         /* input parameter */
 160         struct request_queue *q;
 161         blk_mq_req_flags_t flags;
 162         unsigned int shallow_depth;
 163         unsigned int cmd_flags;
 164 
 165         /* input & output parameter */
 166         struct blk_mq_ctx *ctx;
 167         struct blk_mq_hw_ctx *hctx;
 168 };
 169 
 170 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
 171 {
 172         if (data->flags & BLK_MQ_REQ_INTERNAL)
 173                 return data->hctx->sched_tags;
 174 
 175         return data->hctx->tags;
 176 }
 177 
 178 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 179 {
 180         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
 181 }
 182 
 183 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 184 {
 185         return hctx->nr_ctx && hctx->tags;
 186 }
 187 
 188 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
 189 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
 190                          unsigned int inflight[2]);
 191 
 192 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 193 {
 194         struct request_queue *q = hctx->queue;
 195 
 196         if (q->mq_ops->put_budget)
 197                 q->mq_ops->put_budget(hctx);
 198 }
 199 
 200 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 201 {
 202         struct request_queue *q = hctx->queue;
 203 
 204         if (q->mq_ops->get_budget)
 205                 return q->mq_ops->get_budget(hctx);
 206         return true;
 207 }
 208 
 209 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
 210                                            struct request *rq)
 211 {
 212         blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
 213         rq->tag = -1;
 214 
 215         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
 216                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
 217                 atomic_dec(&hctx->nr_active);
 218         }
 219 }
 220 
 221 static inline void blk_mq_put_driver_tag(struct request *rq)
 222 {
 223         if (rq->tag == -1 || rq->internal_tag == -1)
 224                 return;
 225 
 226         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
 227 }
 228 
 229 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 230 {
 231         int cpu;
 232 
 233         for_each_possible_cpu(cpu)
 234                 qmap->mq_map[cpu] = 0;
 235 }
 236 
 237 /*
 238  * blk_mq_plug() - Get caller context plug
 239  * @q: request queue
 240  * @bio : the bio being submitted by the caller context
 241  *
 242  * Plugging, by design, may delay the insertion of BIOs into the elevator in
 243  * order to increase BIO merging opportunities. This however can cause BIO
 244  * insertion order to change from the order in which submit_bio() is being
 245  * executed in the case of multiple contexts concurrently issuing BIOs to a
 246  * device, even if these context are synchronized to tightly control BIO issuing
 247  * order. While this is not a problem with regular block devices, this ordering
 248  * change can cause write BIO failures with zoned block devices as these
 249  * require sequential write patterns to zones. Prevent this from happening by
 250  * ignoring the plug state of a BIO issuing context if the target request queue
 251  * is for a zoned block device and the BIO to plug is a write operation.
 252  *
 253  * Return current->plug if the bio can be plugged and NULL otherwise
 254  */
 255 static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
 256                                            struct bio *bio)
 257 {
 258         /*
 259          * For regular block devices or read operations, use the context plug
 260          * which may be NULL if blk_start_plug() was not executed.
 261          */
 262         if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
 263                 return current->plug;
 264 
 265         /* Zoned block device write operation case: do not plug the BIO */
 266         return NULL;
 267 }
 268 
 269 #endif

/* [<][>][^][v][top][bottom][index][help] */