1#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7struct blk_flush_queue;
8
9struct blk_mq_cpu_notifier {
10	struct list_head list;
11	void *data;
12	int (*notify)(void *data, unsigned long action, unsigned int cpu);
13};
14
15struct blk_mq_ctxmap {
16	unsigned int size;
17	unsigned int bits_per_word;
18	struct blk_align_bitmap *map;
19};
20
21struct blk_mq_hw_ctx {
22	struct {
23		spinlock_t		lock;
24		struct list_head	dispatch;
25	} ____cacheline_aligned_in_smp;
26
27	unsigned long		state;		/* BLK_MQ_S_* flags */
28	struct delayed_work	run_work;
29	struct delayed_work	delay_work;
30	cpumask_var_t		cpumask;
31	int			next_cpu;
32	int			next_cpu_batch;
33
34	unsigned long		flags;		/* BLK_MQ_F_* flags */
35
36	struct request_queue	*queue;
37	struct blk_flush_queue	*fq;
38
39	void			*driver_data;
40
41	struct blk_mq_ctxmap	ctx_map;
42
43	unsigned int		nr_ctx;
44	struct blk_mq_ctx	**ctxs;
45
46	atomic_t		wait_index;
47
48	struct blk_mq_tags	*tags;
49
50	unsigned long		queued;
51	unsigned long		run;
52#define BLK_MQ_MAX_DISPATCH_ORDER	10
53	unsigned long		dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
54
55	unsigned int		numa_node;
56	unsigned int		queue_num;
57
58	atomic_t		nr_active;
59
60	struct blk_mq_cpu_notifier	cpu_notifier;
61	struct kobject		kobj;
62};
63
64struct blk_mq_tag_set {
65	struct blk_mq_ops	*ops;
66	unsigned int		nr_hw_queues;
67	unsigned int		queue_depth;	/* max hw supported */
68	unsigned int		reserved_tags;
69	unsigned int		cmd_size;	/* per-request extra data */
70	int			numa_node;
71	unsigned int		timeout;
72	unsigned int		flags;		/* BLK_MQ_F_* */
73	void			*driver_data;
74
75	struct blk_mq_tags	**tags;
76
77	struct mutex		tag_list_lock;
78	struct list_head	tag_list;
79};
80
81struct blk_mq_queue_data {
82	struct request *rq;
83	struct list_head *list;
84	bool last;
85};
86
87typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
88typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
89typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
90typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
91typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
92typedef int (init_request_fn)(void *, struct request *, unsigned int,
93		unsigned int, unsigned int);
94typedef void (exit_request_fn)(void *, struct request *, unsigned int,
95		unsigned int);
96
97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
98		bool);
99
100struct blk_mq_ops {
101	/*
102	 * Queue request
103	 */
104	queue_rq_fn		*queue_rq;
105
106	/*
107	 * Map to specific hardware queue
108	 */
109	map_queue_fn		*map_queue;
110
111	/*
112	 * Called on request timeout
113	 */
114	timeout_fn		*timeout;
115
116	softirq_done_fn		*complete;
117
118	/*
119	 * Called when the block layer side of a hardware queue has been
120	 * set up, allowing the driver to allocate/init matching structures.
121	 * Ditto for exit/teardown.
122	 */
123	init_hctx_fn		*init_hctx;
124	exit_hctx_fn		*exit_hctx;
125
126	/*
127	 * Called for every command allocated by the block layer to allow
128	 * the driver to set up driver specific data.
129	 *
130	 * Tag greater than or equal to queue_depth is for setting up
131	 * flush request.
132	 *
133	 * Ditto for exit/teardown.
134	 */
135	init_request_fn		*init_request;
136	exit_request_fn		*exit_request;
137};
138
139enum {
140	BLK_MQ_RQ_QUEUE_OK	= 0,	/* queued fine */
141	BLK_MQ_RQ_QUEUE_BUSY	= 1,	/* requeue IO for later */
142	BLK_MQ_RQ_QUEUE_ERROR	= 2,	/* end IO with error */
143
144	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
145	BLK_MQ_F_TAG_SHARED	= 1 << 1,
146	BLK_MQ_F_SG_MERGE	= 1 << 2,
147	BLK_MQ_F_SYSFS_UP	= 1 << 3,
148	BLK_MQ_F_DEFER_ISSUE	= 1 << 4,
149	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
150	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
151
152	BLK_MQ_S_STOPPED	= 0,
153	BLK_MQ_S_TAG_ACTIVE	= 1,
154
155	BLK_MQ_MAX_DEPTH	= 10240,
156
157	BLK_MQ_CPU_WORK_BATCH	= 8,
158};
159#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
160	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
161		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
162#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
163	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
164		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
165
166struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
167struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
168						  struct request_queue *q);
169void blk_mq_finish_init(struct request_queue *q);
170int blk_mq_register_disk(struct gendisk *);
171void blk_mq_unregister_disk(struct gendisk *);
172
173int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
174void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
175
176void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
177
178void blk_mq_insert_request(struct request *, bool, bool, bool);
179void blk_mq_free_request(struct request *rq);
180void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
181bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
182struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
183		gfp_t gfp, bool reserved);
184struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
185
186enum {
187	BLK_MQ_UNIQUE_TAG_BITS = 16,
188	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
189};
190
191u32 blk_mq_unique_tag(struct request *rq);
192
193static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
194{
195	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
196}
197
198static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
199{
200	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
201}
202
203struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
204struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
205
206int blk_mq_request_started(struct request *rq);
207void blk_mq_start_request(struct request *rq);
208void blk_mq_end_request(struct request *rq, int error);
209void __blk_mq_end_request(struct request *rq, int error);
210
211void blk_mq_requeue_request(struct request *rq);
212void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
213void blk_mq_cancel_requeue_work(struct request_queue *q);
214void blk_mq_kick_requeue_list(struct request_queue *q);
215void blk_mq_abort_requeue_list(struct request_queue *q);
216void blk_mq_complete_request(struct request *rq);
217
218void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
219void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
220void blk_mq_stop_hw_queues(struct request_queue *q);
221void blk_mq_start_hw_queues(struct request_queue *q);
222void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
223void blk_mq_run_hw_queues(struct request_queue *q, bool async);
224void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
225void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
226		void *priv);
227void blk_mq_freeze_queue(struct request_queue *q);
228void blk_mq_unfreeze_queue(struct request_queue *q);
229void blk_mq_freeze_queue_start(struct request_queue *q);
230
231/*
232 * Driver command data is immediately after the request. So subtract request
233 * size to get back to the original request, add request size to get the PDU.
234 */
235static inline struct request *blk_mq_rq_from_pdu(void *pdu)
236{
237	return pdu - sizeof(struct request);
238}
239static inline void *blk_mq_rq_to_pdu(struct request *rq)
240{
241	return rq + 1;
242}
243
244#define queue_for_each_hw_ctx(q, hctx, i)				\
245	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
246	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
247
248#define queue_for_each_ctx(q, ctx, i)					\
249	for ((i) = 0; (i) < (q)->nr_queues &&				\
250	     ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
251
252#define hctx_for_each_ctx(hctx, ctx, i)					\
253	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
254	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
255
256#define blk_ctx_sum(q, sum)						\
257({									\
258	struct blk_mq_ctx *__x;						\
259	unsigned int __ret = 0, __i;					\
260									\
261	queue_for_each_ctx((q), __x, __i)				\
262		__ret += sum;						\
263	__ret;								\
264})
265
266#endif
267