This source file includes following definitions.
- blk_mq_map_queue_type
- blk_mq_map_queue
- blk_mq_rq_state
- __blk_mq_get_ctx
- blk_mq_get_ctx
- blk_mq_tags_from_data
- blk_mq_hctx_stopped
- blk_mq_hw_queue_mapped
- blk_mq_put_dispatch_budget
- blk_mq_get_dispatch_budget
- __blk_mq_put_driver_tag
- blk_mq_put_driver_tag
- blk_mq_clear_mq_map
- blk_mq_plug
   1 
   2 #ifndef INT_BLK_MQ_H
   3 #define INT_BLK_MQ_H
   4 
   5 #include "blk-stat.h"
   6 #include "blk-mq-tag.h"
   7 
   8 struct blk_mq_tag_set;
   9 
  10 struct blk_mq_ctxs {
  11         struct kobject kobj;
  12         struct blk_mq_ctx __percpu      *queue_ctx;
  13 };
  14 
  15 
  16 
  17 
  18 struct blk_mq_ctx {
  19         struct {
  20                 spinlock_t              lock;
  21                 struct list_head        rq_lists[HCTX_MAX_TYPES];
  22         } ____cacheline_aligned_in_smp;
  23 
  24         unsigned int            cpu;
  25         unsigned short          index_hw[HCTX_MAX_TYPES];
  26         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
  27 
  28         
  29         unsigned long           rq_dispatched[2];
  30         unsigned long           rq_merged;
  31 
  32         
  33         unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
  34 
  35         struct request_queue    *queue;
  36         struct blk_mq_ctxs      *ctxs;
  37         struct kobject          kobj;
  38 } ____cacheline_aligned_in_smp;
  39 
  40 void blk_mq_exit_queue(struct request_queue *q);
  41 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  42 void blk_mq_wake_waiters(struct request_queue *q);
  43 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
  44 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  45                                 bool kick_requeue_list);
  46 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
  47 bool blk_mq_get_driver_tag(struct request *rq);
  48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  49                                         struct blk_mq_ctx *start);
  50 
  51 
  52 
  53 
  54 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  55                      unsigned int hctx_idx);
  56 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
  57 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  58                                         unsigned int hctx_idx,
  59                                         unsigned int nr_tags,
  60                                         unsigned int reserved_tags);
  61 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  62                      unsigned int hctx_idx, unsigned int depth);
  63 
  64 
  65 
  66 
  67 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  68                                 bool at_head);
  69 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  70                                   bool run_queue);
  71 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  72                                 struct list_head *list);
  73 
  74 
  75 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
  76 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  77                                     struct list_head *list);
  78 
  79 
  80 
  81 
  82 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
  83 
  84 
  85 
  86 
  87 
  88 
  89 
  90 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
  91                                                           enum hctx_type type,
  92                                                           unsigned int cpu)
  93 {
  94         return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
  95 }
  96 
  97 
  98 
  99 
 100 
 101 
 102 
 103 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 104                                                      unsigned int flags,
 105                                                      struct blk_mq_ctx *ctx)
 106 {
 107         enum hctx_type type = HCTX_TYPE_DEFAULT;
 108 
 109         
 110 
 111 
 112         if (flags & REQ_HIPRI)
 113                 type = HCTX_TYPE_POLL;
 114         else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
 115                 type = HCTX_TYPE_READ;
 116         
 117         return ctx->hctxs[type];
 118 }
 119 
 120 
 121 
 122 
 123 extern void blk_mq_sysfs_init(struct request_queue *q);
 124 extern void blk_mq_sysfs_deinit(struct request_queue *q);
 125 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
 126 extern int blk_mq_sysfs_register(struct request_queue *q);
 127 extern void blk_mq_sysfs_unregister(struct request_queue *q);
 128 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 129 
 130 void blk_mq_release(struct request_queue *q);
 131 
 132 
 133 
 134 
 135 
 136 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
 137 {
 138         return READ_ONCE(rq->state);
 139 }
 140 
 141 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 142                                            unsigned int cpu)
 143 {
 144         return per_cpu_ptr(q->queue_ctx, cpu);
 145 }
 146 
 147 
 148 
 149 
 150 
 151 
 152 
 153 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
 154 {
 155         return __blk_mq_get_ctx(q, raw_smp_processor_id());
 156 }
 157 
 158 struct blk_mq_alloc_data {
 159         
 160         struct request_queue *q;
 161         blk_mq_req_flags_t flags;
 162         unsigned int shallow_depth;
 163         unsigned int cmd_flags;
 164 
 165         
 166         struct blk_mq_ctx *ctx;
 167         struct blk_mq_hw_ctx *hctx;
 168 };
 169 
 170 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
 171 {
 172         if (data->flags & BLK_MQ_REQ_INTERNAL)
 173                 return data->hctx->sched_tags;
 174 
 175         return data->hctx->tags;
 176 }
 177 
 178 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 179 {
 180         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
 181 }
 182 
 183 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 184 {
 185         return hctx->nr_ctx && hctx->tags;
 186 }
 187 
 188 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
 189 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
 190                          unsigned int inflight[2]);
 191 
 192 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 193 {
 194         struct request_queue *q = hctx->queue;
 195 
 196         if (q->mq_ops->put_budget)
 197                 q->mq_ops->put_budget(hctx);
 198 }
 199 
 200 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 201 {
 202         struct request_queue *q = hctx->queue;
 203 
 204         if (q->mq_ops->get_budget)
 205                 return q->mq_ops->get_budget(hctx);
 206         return true;
 207 }
 208 
 209 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
 210                                            struct request *rq)
 211 {
 212         blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
 213         rq->tag = -1;
 214 
 215         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
 216                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
 217                 atomic_dec(&hctx->nr_active);
 218         }
 219 }
 220 
 221 static inline void blk_mq_put_driver_tag(struct request *rq)
 222 {
 223         if (rq->tag == -1 || rq->internal_tag == -1)
 224                 return;
 225 
 226         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
 227 }
 228 
 229 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 230 {
 231         int cpu;
 232 
 233         for_each_possible_cpu(cpu)
 234                 qmap->mq_map[cpu] = 0;
 235 }
 236 
 237 
 238 
 239 
 240 
 241 
 242 
 243 
 244 
 245 
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
 256                                            struct bio *bio)
 257 {
 258         
 259 
 260 
 261 
 262         if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
 263                 return current->plug;
 264 
 265         
 266         return NULL;
 267 }
 268 
 269 #endif