root/include/linux/iocontext.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_io_context_active
  2. ioc_task_link
  3. put_io_context
  4. exit_io_context

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef IOCONTEXT_H
   3 #define IOCONTEXT_H
   4 
   5 #include <linux/radix-tree.h>
   6 #include <linux/rcupdate.h>
   7 #include <linux/workqueue.h>
   8 
   9 enum {
  10         ICQ_EXITED              = 1 << 2,
  11         ICQ_DESTROYED           = 1 << 3,
  12 };
  13 
  14 /*
  15  * An io_cq (icq) is association between an io_context (ioc) and a
  16  * request_queue (q).  This is used by elevators which need to track
  17  * information per ioc - q pair.
  18  *
  19  * Elevator can request use of icq by setting elevator_type->icq_size and
  20  * ->icq_align.  Both size and align must be larger than that of struct
  21  * io_cq and elevator can use the tail area for private information.  The
  22  * recommended way to do this is defining a struct which contains io_cq as
  23  * the first member followed by private members and using its size and
  24  * align.  For example,
  25  *
  26  *      struct snail_io_cq {
  27  *              struct io_cq    icq;
  28  *              int             poke_snail;
  29  *              int             feed_snail;
  30  *      };
  31  *
  32  *      struct elevator_type snail_elv_type {
  33  *              .ops =          { ... },
  34  *              .icq_size =     sizeof(struct snail_io_cq),
  35  *              .icq_align =    __alignof__(struct snail_io_cq),
  36  *              ...
  37  *      };
  38  *
  39  * If icq_size is set, block core will manage icq's.  All requests will
  40  * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
  41  * is called and be holding a reference to the associated io_context.
  42  *
  43  * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
  44  * called and, on destruction, ->elevator_exit_icq_fn().  Both functions
  45  * are called with both the associated io_context and queue locks held.
  46  *
  47  * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
  48  * queue lock but the returned icq is valid only until the queue lock is
  49  * released.  Elevators can not and should not try to create or destroy
  50  * icq's.
  51  *
  52  * As icq's are linked from both ioc and q, the locking rules are a bit
  53  * complex.
  54  *
  55  * - ioc lock nests inside q lock.
  56  *
  57  * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
  58  *   q->icq_list and icq->q_node by q lock.
  59  *
  60  * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
  61  *   itself is protected by q lock.  However, both the indexes and icq
  62  *   itself are also RCU managed and lookup can be performed holding only
  63  *   the q lock.
  64  *
  65  * - icq's are not reference counted.  They are destroyed when either the
  66  *   ioc or q goes away.  Each request with icq set holds an extra
  67  *   reference to ioc to ensure it stays until the request is completed.
  68  *
  69  * - Linking and unlinking icq's are performed while holding both ioc and q
  70  *   locks.  Due to the lock ordering, q exit is simple but ioc exit
  71  *   requires reverse-order double lock dance.
  72  */
  73 struct io_cq {
  74         struct request_queue    *q;
  75         struct io_context       *ioc;
  76 
  77         /*
  78          * q_node and ioc_node link io_cq through icq_list of q and ioc
  79          * respectively.  Both fields are unused once ioc_exit_icq() is
  80          * called and shared with __rcu_icq_cache and __rcu_head which are
  81          * used for RCU free of io_cq.
  82          */
  83         union {
  84                 struct list_head        q_node;
  85                 struct kmem_cache       *__rcu_icq_cache;
  86         };
  87         union {
  88                 struct hlist_node       ioc_node;
  89                 struct rcu_head         __rcu_head;
  90         };
  91 
  92         unsigned int            flags;
  93 };
  94 
  95 /*
  96  * I/O subsystem state of the associated processes.  It is refcounted
  97  * and kmalloc'ed. These could be shared between processes.
  98  */
  99 struct io_context {
 100         atomic_long_t refcount;
 101         atomic_t active_ref;
 102         atomic_t nr_tasks;
 103 
 104         /* all the fields below are protected by this lock */
 105         spinlock_t lock;
 106 
 107         unsigned short ioprio;
 108 
 109         /*
 110          * For request batching
 111          */
 112         int nr_batch_requests;     /* Number of requests left in the batch */
 113         unsigned long last_waited; /* Time last woken after wait for request */
 114 
 115         struct radix_tree_root  icq_tree;
 116         struct io_cq __rcu      *icq_hint;
 117         struct hlist_head       icq_list;
 118 
 119         struct work_struct release_work;
 120 };
 121 
 122 /**
 123  * get_io_context_active - get active reference on ioc
 124  * @ioc: ioc of interest
 125  *
 126  * Only iocs with active reference can issue new IOs.  This function
 127  * acquires an active reference on @ioc.  The caller must already have an
 128  * active reference on @ioc.
 129  */
 130 static inline void get_io_context_active(struct io_context *ioc)
 131 {
 132         WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
 133         WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
 134         atomic_long_inc(&ioc->refcount);
 135         atomic_inc(&ioc->active_ref);
 136 }
 137 
 138 static inline void ioc_task_link(struct io_context *ioc)
 139 {
 140         get_io_context_active(ioc);
 141 
 142         WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
 143         atomic_inc(&ioc->nr_tasks);
 144 }
 145 
 146 struct task_struct;
 147 #ifdef CONFIG_BLOCK
 148 void put_io_context(struct io_context *ioc);
 149 void put_io_context_active(struct io_context *ioc);
 150 void exit_io_context(struct task_struct *task);
 151 struct io_context *get_task_io_context(struct task_struct *task,
 152                                        gfp_t gfp_flags, int node);
 153 #else
 154 struct io_context;
 155 static inline void put_io_context(struct io_context *ioc) { }
 156 static inline void exit_io_context(struct task_struct *task) { }
 157 #endif
 158 
 159 #endif

/* [<][>][^][v][top][bottom][index][help] */