1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
4#include <linux/preempt_mask.h>
5#include <linux/lockdep.h>
6#include <linux/ftrace_irq.h>
7#include <linux/vtime.h>
8#include <asm/hardirq.h>
9
10
11extern void synchronize_irq(unsigned int irq);
12extern bool synchronize_hardirq(unsigned int irq);
13
14#if defined(CONFIG_TINY_RCU)
15
16static inline void rcu_nmi_enter(void)
17{
18}
19
20static inline void rcu_nmi_exit(void)
21{
22}
23
24#else
25extern void rcu_nmi_enter(void);
26extern void rcu_nmi_exit(void);
27#endif
28
29/*
30 * It is safe to do non-atomic ops on ->hardirq_context,
31 * because NMI handlers may not preempt and the ops are
32 * always balanced, so the interrupted value of ->hardirq_context
33 * will always be restored.
34 */
35#define __irq_enter()					\
36	do {						\
37		account_irq_enter_time(current);	\
38		preempt_count_add(HARDIRQ_OFFSET);	\
39		trace_hardirq_enter();			\
40	} while (0)
41
42/*
43 * Enter irq context (on NO_HZ, update jiffies):
44 */
45extern void irq_enter(void);
46
47/*
48 * Exit irq context without processing softirqs:
49 */
50#define __irq_exit()					\
51	do {						\
52		trace_hardirq_exit();			\
53		account_irq_exit_time(current);		\
54		preempt_count_sub(HARDIRQ_OFFSET);	\
55	} while (0)
56
57/*
58 * Exit irq context and process softirqs if needed:
59 */
60extern void irq_exit(void);
61
62#define nmi_enter()						\
63	do {							\
64		lockdep_off();					\
65		ftrace_nmi_enter();				\
66		BUG_ON(in_nmi());				\
67		preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);	\
68		rcu_nmi_enter();				\
69		trace_hardirq_enter();				\
70	} while (0)
71
72#define nmi_exit()						\
73	do {							\
74		trace_hardirq_exit();				\
75		rcu_nmi_exit();					\
76		BUG_ON(!in_nmi());				\
77		preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);	\
78		ftrace_nmi_exit();				\
79		lockdep_on();					\
80	} while (0)
81
82#endif /* LINUX_HARDIRQ_H */
83