root/include/linux/blk_types.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. blk_path_error
  2. __bio_issue_time
  3. bio_issue_time
  4. bio_issue_size
  5. bio_issue_init
  6. bio_set_op_attrs
  7. op_is_write
  8. op_is_flush
  9. op_is_sync
  10. op_is_discard
  11. op_stat_group
  12. blk_qc_t_valid
  13. blk_qc_t_to_queue_num
  14. blk_qc_t_to_tag
  15. blk_qc_t_is_internal

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Block data types and constants.  Directly include this file only to
   4  * break include dependency loop.
   5  */
   6 #ifndef __LINUX_BLK_TYPES_H
   7 #define __LINUX_BLK_TYPES_H
   8 
   9 #include <linux/types.h>
  10 #include <linux/bvec.h>
  11 #include <linux/ktime.h>
  12 
  13 struct bio_set;
  14 struct bio;
  15 struct bio_integrity_payload;
  16 struct page;
  17 struct block_device;
  18 struct io_context;
  19 struct cgroup_subsys_state;
  20 typedef void (bio_end_io_t) (struct bio *);
  21 
  22 /*
  23  * Block error status values.  See block/blk-core:blk_errors for the details.
  24  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
  25  */
  26 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
  27 typedef u32 __bitwise blk_status_t;
  28 #else
  29 typedef u8 __bitwise blk_status_t;
  30 #endif
  31 #define BLK_STS_OK 0
  32 #define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
  33 #define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
  34 #define BLK_STS_NOSPC           ((__force blk_status_t)3)
  35 #define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
  36 #define BLK_STS_TARGET          ((__force blk_status_t)5)
  37 #define BLK_STS_NEXUS           ((__force blk_status_t)6)
  38 #define BLK_STS_MEDIUM          ((__force blk_status_t)7)
  39 #define BLK_STS_PROTECTION      ((__force blk_status_t)8)
  40 #define BLK_STS_RESOURCE        ((__force blk_status_t)9)
  41 #define BLK_STS_IOERR           ((__force blk_status_t)10)
  42 
  43 /* hack for device mapper, don't use elsewhere: */
  44 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
  45 
  46 #define BLK_STS_AGAIN           ((__force blk_status_t)12)
  47 
  48 /*
  49  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
  50  * device related resources are unavailable, but the driver can guarantee
  51  * that the queue will be rerun in the future once resources become
  52  * available again. This is typically the case for device specific
  53  * resources that are consumed for IO. If the driver fails allocating these
  54  * resources, we know that inflight (or pending) IO will free these
  55  * resource upon completion.
  56  *
  57  * This is different from BLK_STS_RESOURCE in that it explicitly references
  58  * a device specific resource. For resources of wider scope, allocation
  59  * failure can happen without having pending IO. This means that we can't
  60  * rely on request completions freeing these resources, as IO may not be in
  61  * flight. Examples of that are kernel memory allocations, DMA mappings, or
  62  * any other system wide resources.
  63  */
  64 #define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
  65 
  66 /**
  67  * blk_path_error - returns true if error may be path related
  68  * @error: status the request was completed with
  69  *
  70  * Description:
  71  *     This classifies block error status into non-retryable errors and ones
  72  *     that may be successful if retried on a failover path.
  73  *
  74  * Return:
  75  *     %false - retrying failover path will not help
  76  *     %true  - may succeed if retried
  77  */
  78 static inline bool blk_path_error(blk_status_t error)
  79 {
  80         switch (error) {
  81         case BLK_STS_NOTSUPP:
  82         case BLK_STS_NOSPC:
  83         case BLK_STS_TARGET:
  84         case BLK_STS_NEXUS:
  85         case BLK_STS_MEDIUM:
  86         case BLK_STS_PROTECTION:
  87                 return false;
  88         }
  89 
  90         /* Anything else could be a path failure, so should be retried */
  91         return true;
  92 }
  93 
  94 /*
  95  * From most significant bit:
  96  * 1 bit: reserved for other usage, see below
  97  * 12 bits: original size of bio
  98  * 51 bits: issue time of bio
  99  */
 100 #define BIO_ISSUE_RES_BITS      1
 101 #define BIO_ISSUE_SIZE_BITS     12
 102 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
 103 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
 104 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
 105 #define BIO_ISSUE_SIZE_MASK     \
 106         (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
 107 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
 108 
 109 /* Reserved bit for blk-throtl */
 110 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
 111 
 112 struct bio_issue {
 113         u64 value;
 114 };
 115 
 116 static inline u64 __bio_issue_time(u64 time)
 117 {
 118         return time & BIO_ISSUE_TIME_MASK;
 119 }
 120 
 121 static inline u64 bio_issue_time(struct bio_issue *issue)
 122 {
 123         return __bio_issue_time(issue->value);
 124 }
 125 
 126 static inline sector_t bio_issue_size(struct bio_issue *issue)
 127 {
 128         return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
 129 }
 130 
 131 static inline void bio_issue_init(struct bio_issue *issue,
 132                                        sector_t size)
 133 {
 134         size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
 135         issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
 136                         (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
 137                         ((u64)size << BIO_ISSUE_SIZE_SHIFT));
 138 }
 139 
 140 /*
 141  * main unit of I/O for the block layer and lower layers (ie drivers and
 142  * stacking drivers)
 143  */
 144 struct bio {
 145         struct bio              *bi_next;       /* request queue link */
 146         struct gendisk          *bi_disk;
 147         unsigned int            bi_opf;         /* bottom bits req flags,
 148                                                  * top bits REQ_OP. Use
 149                                                  * accessors.
 150                                                  */
 151         unsigned short          bi_flags;       /* status, etc and bvec pool number */
 152         unsigned short          bi_ioprio;
 153         unsigned short          bi_write_hint;
 154         blk_status_t            bi_status;
 155         u8                      bi_partno;
 156 
 157         struct bvec_iter        bi_iter;
 158 
 159         atomic_t                __bi_remaining;
 160         bio_end_io_t            *bi_end_io;
 161 
 162         void                    *bi_private;
 163 #ifdef CONFIG_BLK_CGROUP
 164         /*
 165          * Represents the association of the css and request_queue for the bio.
 166          * If a bio goes direct to device, it will not have a blkg as it will
 167          * not have a request_queue associated with it.  The reference is put
 168          * on release of the bio.
 169          */
 170         struct blkcg_gq         *bi_blkg;
 171         struct bio_issue        bi_issue;
 172 #ifdef CONFIG_BLK_CGROUP_IOCOST
 173         u64                     bi_iocost_cost;
 174 #endif
 175 #endif
 176         union {
 177 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 178                 struct bio_integrity_payload *bi_integrity; /* data integrity */
 179 #endif
 180         };
 181 
 182         unsigned short          bi_vcnt;        /* how many bio_vec's */
 183 
 184         /*
 185          * Everything starting with bi_max_vecs will be preserved by bio_reset()
 186          */
 187 
 188         unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 189 
 190         atomic_t                __bi_cnt;       /* pin count */
 191 
 192         struct bio_vec          *bi_io_vec;     /* the actual vec list */
 193 
 194         struct bio_set          *bi_pool;
 195 
 196         /*
 197          * We can inline a number of vecs at the end of the bio, to avoid
 198          * double allocations for a small number of bio_vecs. This member
 199          * MUST obviously be kept at the very end of the bio.
 200          */
 201         struct bio_vec          bi_inline_vecs[0];
 202 };
 203 
 204 #define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
 205 
 206 /*
 207  * bio flags
 208  */
 209 enum {
 210         BIO_NO_PAGE_REF,        /* don't put release vec pages */
 211         BIO_CLONED,             /* doesn't own data */
 212         BIO_BOUNCED,            /* bio is a bounce bio */
 213         BIO_USER_MAPPED,        /* contains user pages */
 214         BIO_NULL_MAPPED,        /* contains invalid user pages */
 215         BIO_WORKINGSET,         /* contains userspace workingset pages */
 216         BIO_QUIET,              /* Make BIO Quiet */
 217         BIO_CHAIN,              /* chained bio, ->bi_remaining in effect */
 218         BIO_REFFED,             /* bio has elevated ->bi_cnt */
 219         BIO_THROTTLED,          /* This bio has already been subjected to
 220                                  * throttling rules. Don't do it again. */
 221         BIO_TRACE_COMPLETION,   /* bio_endio() should trace the final completion
 222                                  * of this bio. */
 223         BIO_QUEUE_ENTERED,      /* can use blk_queue_enter_live() */
 224         BIO_TRACKED,            /* set if bio goes through the rq_qos path */
 225         BIO_FLAG_LAST
 226 };
 227 
 228 /* See BVEC_POOL_OFFSET below before adding new flags */
 229 
 230 /*
 231  * We support 6 different bvec pools, the last one is magic in that it
 232  * is backed by a mempool.
 233  */
 234 #define BVEC_POOL_NR            6
 235 #define BVEC_POOL_MAX           (BVEC_POOL_NR - 1)
 236 
 237 /*
 238  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
 239  * 1 to the actual index so that 0 indicates that there are no bvecs to be
 240  * freed.
 241  */
 242 #define BVEC_POOL_BITS          (3)
 243 #define BVEC_POOL_OFFSET        (16 - BVEC_POOL_BITS)
 244 #define BVEC_POOL_IDX(bio)      ((bio)->bi_flags >> BVEC_POOL_OFFSET)
 245 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
 246 # error "BVEC_POOL_BITS is too small"
 247 #endif
 248 
 249 /*
 250  * Flags starting here get preserved by bio_reset() - this includes
 251  * only BVEC_POOL_IDX()
 252  */
 253 #define BIO_RESET_BITS  BVEC_POOL_OFFSET
 254 
 255 typedef __u32 __bitwise blk_mq_req_flags_t;
 256 
 257 /*
 258  * Operations and flags common to the bio and request structures.
 259  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
 260  *
 261  * The least significant bit of the operation number indicates the data
 262  * transfer direction:
 263  *
 264  *   - if the least significant bit is set transfers are TO the device
 265  *   - if the least significant bit is not set transfers are FROM the device
 266  *
 267  * If a operation does not transfer data the least significant bit has no
 268  * meaning.
 269  */
 270 #define REQ_OP_BITS     8
 271 #define REQ_OP_MASK     ((1 << REQ_OP_BITS) - 1)
 272 #define REQ_FLAG_BITS   24
 273 
 274 enum req_opf {
 275         /* read sectors from the device */
 276         REQ_OP_READ             = 0,
 277         /* write sectors to the device */
 278         REQ_OP_WRITE            = 1,
 279         /* flush the volatile write cache */
 280         REQ_OP_FLUSH            = 2,
 281         /* discard sectors */
 282         REQ_OP_DISCARD          = 3,
 283         /* securely erase sectors */
 284         REQ_OP_SECURE_ERASE     = 5,
 285         /* reset a zone write pointer */
 286         REQ_OP_ZONE_RESET       = 6,
 287         /* write the same sector many times */
 288         REQ_OP_WRITE_SAME       = 7,
 289         /* reset all the zone present on the device */
 290         REQ_OP_ZONE_RESET_ALL   = 8,
 291         /* write the zero filled sector many times */
 292         REQ_OP_WRITE_ZEROES     = 9,
 293 
 294         /* SCSI passthrough using struct scsi_request */
 295         REQ_OP_SCSI_IN          = 32,
 296         REQ_OP_SCSI_OUT         = 33,
 297         /* Driver private requests */
 298         REQ_OP_DRV_IN           = 34,
 299         REQ_OP_DRV_OUT          = 35,
 300 
 301         REQ_OP_LAST,
 302 };
 303 
 304 enum req_flag_bits {
 305         __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
 306                 REQ_OP_BITS,
 307         __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
 308         __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
 309         __REQ_SYNC,             /* request is sync (sync write or read) */
 310         __REQ_META,             /* metadata io request */
 311         __REQ_PRIO,             /* boost priority in cfq */
 312         __REQ_NOMERGE,          /* don't touch this for merging */
 313         __REQ_IDLE,             /* anticipate more IO after this one */
 314         __REQ_INTEGRITY,        /* I/O includes block integrity payload */
 315         __REQ_FUA,              /* forced unit access */
 316         __REQ_PREFLUSH,         /* request for cache flush */
 317         __REQ_RAHEAD,           /* read ahead, can fail anytime */
 318         __REQ_BACKGROUND,       /* background IO */
 319         __REQ_NOWAIT,           /* Don't wait if request will block */
 320         __REQ_NOWAIT_INLINE,    /* Return would-block error inline */
 321         /*
 322          * When a shared kthread needs to issue a bio for a cgroup, doing
 323          * so synchronously can lead to priority inversions as the kthread
 324          * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
 325          * submit_bio() punt the actual issuing to a dedicated per-blkcg
 326          * work item to avoid such priority inversions.
 327          */
 328         __REQ_CGROUP_PUNT,
 329 
 330         /* command specific flags for REQ_OP_WRITE_ZEROES: */
 331         __REQ_NOUNMAP,          /* do not free blocks when zeroing */
 332 
 333         __REQ_HIPRI,
 334 
 335         /* for driver use */
 336         __REQ_DRV,
 337         __REQ_SWAP,             /* swapping request. */
 338         __REQ_NR_BITS,          /* stops here */
 339 };
 340 
 341 #define REQ_FAILFAST_DEV        (1ULL << __REQ_FAILFAST_DEV)
 342 #define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ_FAILFAST_TRANSPORT)
 343 #define REQ_FAILFAST_DRIVER     (1ULL << __REQ_FAILFAST_DRIVER)
 344 #define REQ_SYNC                (1ULL << __REQ_SYNC)
 345 #define REQ_META                (1ULL << __REQ_META)
 346 #define REQ_PRIO                (1ULL << __REQ_PRIO)
 347 #define REQ_NOMERGE             (1ULL << __REQ_NOMERGE)
 348 #define REQ_IDLE                (1ULL << __REQ_IDLE)
 349 #define REQ_INTEGRITY           (1ULL << __REQ_INTEGRITY)
 350 #define REQ_FUA                 (1ULL << __REQ_FUA)
 351 #define REQ_PREFLUSH            (1ULL << __REQ_PREFLUSH)
 352 #define REQ_RAHEAD              (1ULL << __REQ_RAHEAD)
 353 #define REQ_BACKGROUND          (1ULL << __REQ_BACKGROUND)
 354 #define REQ_NOWAIT              (1ULL << __REQ_NOWAIT)
 355 #define REQ_NOWAIT_INLINE       (1ULL << __REQ_NOWAIT_INLINE)
 356 #define REQ_CGROUP_PUNT         (1ULL << __REQ_CGROUP_PUNT)
 357 
 358 #define REQ_NOUNMAP             (1ULL << __REQ_NOUNMAP)
 359 #define REQ_HIPRI               (1ULL << __REQ_HIPRI)
 360 
 361 #define REQ_DRV                 (1ULL << __REQ_DRV)
 362 #define REQ_SWAP                (1ULL << __REQ_SWAP)
 363 
 364 #define REQ_FAILFAST_MASK \
 365         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 366 
 367 #define REQ_NOMERGE_FLAGS \
 368         (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
 369 
 370 enum stat_group {
 371         STAT_READ,
 372         STAT_WRITE,
 373         STAT_DISCARD,
 374 
 375         NR_STAT_GROUPS
 376 };
 377 
 378 #define bio_op(bio) \
 379         ((bio)->bi_opf & REQ_OP_MASK)
 380 #define req_op(req) \
 381         ((req)->cmd_flags & REQ_OP_MASK)
 382 
 383 /* obsolete, don't use in new code */
 384 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
 385                 unsigned op_flags)
 386 {
 387         bio->bi_opf = op | op_flags;
 388 }
 389 
 390 static inline bool op_is_write(unsigned int op)
 391 {
 392         return (op & 1);
 393 }
 394 
 395 /*
 396  * Check if the bio or request is one that needs special treatment in the
 397  * flush state machine.
 398  */
 399 static inline bool op_is_flush(unsigned int op)
 400 {
 401         return op & (REQ_FUA | REQ_PREFLUSH);
 402 }
 403 
 404 /*
 405  * Reads are always treated as synchronous, as are requests with the FUA or
 406  * PREFLUSH flag.  Other operations may be marked as synchronous using the
 407  * REQ_SYNC flag.
 408  */
 409 static inline bool op_is_sync(unsigned int op)
 410 {
 411         return (op & REQ_OP_MASK) == REQ_OP_READ ||
 412                 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
 413 }
 414 
 415 static inline bool op_is_discard(unsigned int op)
 416 {
 417         return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
 418 }
 419 
 420 static inline int op_stat_group(unsigned int op)
 421 {
 422         if (op_is_discard(op))
 423                 return STAT_DISCARD;
 424         return op_is_write(op);
 425 }
 426 
 427 typedef unsigned int blk_qc_t;
 428 #define BLK_QC_T_NONE           -1U
 429 #define BLK_QC_T_EAGAIN         -2U
 430 #define BLK_QC_T_SHIFT          16
 431 #define BLK_QC_T_INTERNAL       (1U << 31)
 432 
 433 static inline bool blk_qc_t_valid(blk_qc_t cookie)
 434 {
 435         return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
 436 }
 437 
 438 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
 439 {
 440         return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
 441 }
 442 
 443 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
 444 {
 445         return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
 446 }
 447 
 448 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
 449 {
 450         return (cookie & BLK_QC_T_INTERNAL) != 0;
 451 }
 452 
 453 struct blk_rq_stat {
 454         u64 mean;
 455         u64 min;
 456         u64 max;
 457         u32 nr_samples;
 458         u64 batch;
 459 };
 460 
 461 #endif /* __LINUX_BLK_TYPES_H */

/* [<][>][^][v][top][bottom][index][help] */