root/include/linux/irqdesc.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. irq_lock_sparse
  2. irq_unlock_sparse
  3. irq_data_to_desc
  4. irq_desc_get_irq
  5. irq_desc_get_irq_data
  6. irq_desc_get_chip
  7. irq_desc_get_chip_data
  8. irq_desc_get_handler_data
  9. generic_handle_irq_desc
  10. handle_domain_irq
  11. irq_desc_has_action
  12. irq_has_action
  13. irq_set_handler_locked
  14. irq_set_chip_handler_name_locked
  15. irq_balancing_disabled
  16. irq_is_percpu
  17. irq_is_percpu_devid
  18. irq_set_lockdep_class
  19. __irq_set_preflow_handler

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_IRQDESC_H
   3 #define _LINUX_IRQDESC_H
   4 
   5 #include <linux/rcupdate.h>
   6 #include <linux/kobject.h>
   7 #include <linux/mutex.h>
   8 
   9 /*
  10  * Core internal functions to deal with irq descriptors
  11  */
  12 
  13 struct irq_affinity_notify;
  14 struct proc_dir_entry;
  15 struct module;
  16 struct irq_desc;
  17 struct irq_domain;
  18 struct pt_regs;
  19 
  20 /**
  21  * struct irq_desc - interrupt descriptor
  22  * @irq_common_data:    per irq and chip data passed down to chip functions
  23  * @kstat_irqs:         irq stats per cpu
  24  * @handle_irq:         highlevel irq-events handler
  25  * @preflow_handler:    handler called before the flow handler (currently used by sparc)
  26  * @action:             the irq action chain
  27  * @status:             status information
  28  * @core_internal_state__do_not_mess_with_it: core internal status information
  29  * @depth:              disable-depth, for nested irq_disable() calls
  30  * @wake_depth:         enable depth, for multiple irq_set_irq_wake() callers
  31  * @tot_count:          stats field for non-percpu irqs
  32  * @irq_count:          stats field to detect stalled irqs
  33  * @last_unhandled:     aging timer for unhandled count
  34  * @irqs_unhandled:     stats field for spurious unhandled interrupts
  35  * @threads_handled:    stats field for deferred spurious detection of threaded handlers
  36  * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
  37  * @lock:               locking for SMP
  38  * @affinity_hint:      hint to user space for preferred irq affinity
  39  * @affinity_notify:    context for notification of affinity changes
  40  * @pending_mask:       pending rebalanced interrupts
  41  * @threads_oneshot:    bitfield to handle shared oneshot threads
  42  * @threads_active:     number of irqaction threads currently running
  43  * @wait_for_threads:   wait queue for sync_irq to wait for threaded handlers
  44  * @nr_actions:         number of installed actions on this descriptor
  45  * @no_suspend_depth:   number of irqactions on a irq descriptor with
  46  *                      IRQF_NO_SUSPEND set
  47  * @force_resume_depth: number of irqactions on a irq descriptor with
  48  *                      IRQF_FORCE_RESUME set
  49  * @rcu:                rcu head for delayed free
  50  * @kobj:               kobject used to represent this struct in sysfs
  51  * @request_mutex:      mutex to protect request/free before locking desc->lock
  52  * @dir:                /proc/irq/ procfs entry
  53  * @debugfs_file:       dentry for the debugfs file
  54  * @name:               flow handler name for /proc/interrupts output
  55  */
  56 struct irq_desc {
  57         struct irq_common_data  irq_common_data;
  58         struct irq_data         irq_data;
  59         unsigned int __percpu   *kstat_irqs;
  60         irq_flow_handler_t      handle_irq;
  61 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
  62         irq_preflow_handler_t   preflow_handler;
  63 #endif
  64         struct irqaction        *action;        /* IRQ action list */
  65         unsigned int            status_use_accessors;
  66         unsigned int            core_internal_state__do_not_mess_with_it;
  67         unsigned int            depth;          /* nested irq disables */
  68         unsigned int            wake_depth;     /* nested wake enables */
  69         unsigned int            tot_count;
  70         unsigned int            irq_count;      /* For detecting broken IRQs */
  71         unsigned long           last_unhandled; /* Aging timer for unhandled count */
  72         unsigned int            irqs_unhandled;
  73         atomic_t                threads_handled;
  74         int                     threads_handled_last;
  75         raw_spinlock_t          lock;
  76         struct cpumask          *percpu_enabled;
  77         const struct cpumask    *percpu_affinity;
  78 #ifdef CONFIG_SMP
  79         const struct cpumask    *affinity_hint;
  80         struct irq_affinity_notify *affinity_notify;
  81 #ifdef CONFIG_GENERIC_PENDING_IRQ
  82         cpumask_var_t           pending_mask;
  83 #endif
  84 #endif
  85         unsigned long           threads_oneshot;
  86         atomic_t                threads_active;
  87         wait_queue_head_t       wait_for_threads;
  88 #ifdef CONFIG_PM_SLEEP
  89         unsigned int            nr_actions;
  90         unsigned int            no_suspend_depth;
  91         unsigned int            cond_suspend_depth;
  92         unsigned int            force_resume_depth;
  93 #endif
  94 #ifdef CONFIG_PROC_FS
  95         struct proc_dir_entry   *dir;
  96 #endif
  97 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  98         struct dentry           *debugfs_file;
  99         const char              *dev_name;
 100 #endif
 101 #ifdef CONFIG_SPARSE_IRQ
 102         struct rcu_head         rcu;
 103         struct kobject          kobj;
 104 #endif
 105         struct mutex            request_mutex;
 106         int                     parent_irq;
 107         struct module           *owner;
 108         const char              *name;
 109 } ____cacheline_internodealigned_in_smp;
 110 
 111 #ifdef CONFIG_SPARSE_IRQ
 112 extern void irq_lock_sparse(void);
 113 extern void irq_unlock_sparse(void);
 114 #else
 115 static inline void irq_lock_sparse(void) { }
 116 static inline void irq_unlock_sparse(void) { }
 117 extern struct irq_desc irq_desc[NR_IRQS];
 118 #endif
 119 
 120 static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
 121 {
 122         return container_of(data->common, struct irq_desc, irq_common_data);
 123 }
 124 
 125 static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
 126 {
 127         return desc->irq_data.irq;
 128 }
 129 
 130 static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
 131 {
 132         return &desc->irq_data;
 133 }
 134 
 135 static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
 136 {
 137         return desc->irq_data.chip;
 138 }
 139 
 140 static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
 141 {
 142         return desc->irq_data.chip_data;
 143 }
 144 
 145 static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
 146 {
 147         return desc->irq_common_data.handler_data;
 148 }
 149 
 150 /*
 151  * Architectures call this to let the generic IRQ layer
 152  * handle an interrupt.
 153  */
 154 static inline void generic_handle_irq_desc(struct irq_desc *desc)
 155 {
 156         desc->handle_irq(desc);
 157 }
 158 
 159 int generic_handle_irq(unsigned int irq);
 160 
 161 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
 162 /*
 163  * Convert a HW interrupt number to a logical one using a IRQ domain,
 164  * and handle the result interrupt number. Return -EINVAL if
 165  * conversion failed. Providing a NULL domain indicates that the
 166  * conversion has already been done.
 167  */
 168 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
 169                         bool lookup, struct pt_regs *regs);
 170 
 171 static inline int handle_domain_irq(struct irq_domain *domain,
 172                                     unsigned int hwirq, struct pt_regs *regs)
 173 {
 174         return __handle_domain_irq(domain, hwirq, true, regs);
 175 }
 176 
 177 #ifdef CONFIG_IRQ_DOMAIN
 178 int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
 179                       struct pt_regs *regs);
 180 #endif
 181 #endif
 182 
 183 /* Test to see if a driver has successfully requested an irq */
 184 static inline int irq_desc_has_action(struct irq_desc *desc)
 185 {
 186         return desc->action != NULL;
 187 }
 188 
 189 static inline int irq_has_action(unsigned int irq)
 190 {
 191         return irq_desc_has_action(irq_to_desc(irq));
 192 }
 193 
 194 /**
 195  * irq_set_handler_locked - Set irq handler from a locked region
 196  * @data:       Pointer to the irq_data structure which identifies the irq
 197  * @handler:    Flow control handler function for this interrupt
 198  *
 199  * Sets the handler in the irq descriptor associated to @data.
 200  *
 201  * Must be called with irq_desc locked and valid parameters. Typical
 202  * call site is the irq_set_type() callback.
 203  */
 204 static inline void irq_set_handler_locked(struct irq_data *data,
 205                                           irq_flow_handler_t handler)
 206 {
 207         struct irq_desc *desc = irq_data_to_desc(data);
 208 
 209         desc->handle_irq = handler;
 210 }
 211 
 212 /**
 213  * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
 214  * @data:       Pointer to the irq_data structure for which the chip is set
 215  * @chip:       Pointer to the new irq chip
 216  * @handler:    Flow control handler function for this interrupt
 217  * @name:       Name of the interrupt
 218  *
 219  * Replace the irq chip at the proper hierarchy level in @data and
 220  * sets the handler and name in the associated irq descriptor.
 221  *
 222  * Must be called with irq_desc locked and valid parameters.
 223  */
 224 static inline void
 225 irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
 226                                  irq_flow_handler_t handler, const char *name)
 227 {
 228         struct irq_desc *desc = irq_data_to_desc(data);
 229 
 230         desc->handle_irq = handler;
 231         desc->name = name;
 232         data->chip = chip;
 233 }
 234 
 235 static inline bool irq_balancing_disabled(unsigned int irq)
 236 {
 237         struct irq_desc *desc;
 238 
 239         desc = irq_to_desc(irq);
 240         return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
 241 }
 242 
 243 static inline bool irq_is_percpu(unsigned int irq)
 244 {
 245         struct irq_desc *desc;
 246 
 247         desc = irq_to_desc(irq);
 248         return desc->status_use_accessors & IRQ_PER_CPU;
 249 }
 250 
 251 static inline bool irq_is_percpu_devid(unsigned int irq)
 252 {
 253         struct irq_desc *desc;
 254 
 255         desc = irq_to_desc(irq);
 256         return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
 257 }
 258 
 259 static inline void
 260 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
 261                       struct lock_class_key *request_class)
 262 {
 263         struct irq_desc *desc = irq_to_desc(irq);
 264 
 265         if (desc) {
 266                 lockdep_set_class(&desc->lock, lock_class);
 267                 lockdep_set_class(&desc->request_mutex, request_class);
 268         }
 269 }
 270 
 271 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 272 static inline void
 273 __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
 274 {
 275         struct irq_desc *desc;
 276 
 277         desc = irq_to_desc(irq);
 278         desc->preflow_handler = handler;
 279 }
 280 #endif
 281 
 282 #endif

/* [<][>][^][v][top][bottom][index][help] */