1#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
9#include <linux/linkage.h>
10#include <linux/list.h>
11
12/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED	0x80000000
17
18#include <asm/preempt.h>
19
20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() \
24	({ preempt_count_sub(1); should_resched(0); })
25#else
26#define preempt_count_add(val)	__preempt_count_add(val)
27#define preempt_count_sub(val)	__preempt_count_sub(val)
28#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
29#endif
30
31#define __preempt_count_inc() __preempt_count_add(1)
32#define __preempt_count_dec() __preempt_count_sub(1)
33
34#define preempt_count_inc() preempt_count_add(1)
35#define preempt_count_dec() preempt_count_sub(1)
36
37#ifdef CONFIG_PREEMPT_COUNT
38
39#define preempt_disable() \
40do { \
41	preempt_count_inc(); \
42	barrier(); \
43} while (0)
44
45#define sched_preempt_enable_no_resched() \
46do { \
47	barrier(); \
48	preempt_count_dec(); \
49} while (0)
50
51#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
52
53#ifdef CONFIG_PREEMPT
54#define preempt_enable() \
55do { \
56	barrier(); \
57	if (unlikely(preempt_count_dec_and_test())) \
58		__preempt_schedule(); \
59} while (0)
60
61#define preempt_check_resched() \
62do { \
63	if (should_resched(0)) \
64		__preempt_schedule(); \
65} while (0)
66
67#else
68#define preempt_enable() \
69do { \
70	barrier(); \
71	preempt_count_dec(); \
72} while (0)
73#define preempt_check_resched() do { } while (0)
74#endif
75
76#define preempt_disable_notrace() \
77do { \
78	__preempt_count_inc(); \
79	barrier(); \
80} while (0)
81
82#define preempt_enable_no_resched_notrace() \
83do { \
84	barrier(); \
85	__preempt_count_dec(); \
86} while (0)
87
88#ifdef CONFIG_PREEMPT
89
90#ifndef CONFIG_CONTEXT_TRACKING
91#define __preempt_schedule_context() __preempt_schedule()
92#endif
93
94#define preempt_enable_notrace() \
95do { \
96	barrier(); \
97	if (unlikely(__preempt_count_dec_and_test())) \
98		__preempt_schedule_context(); \
99} while (0)
100#else
101#define preempt_enable_notrace() \
102do { \
103	barrier(); \
104	__preempt_count_dec(); \
105} while (0)
106#endif
107
108#else /* !CONFIG_PREEMPT_COUNT */
109
110/*
111 * Even if we don't have any preemption, we need preempt disable/enable
112 * to be barriers, so that we don't have things like get_user/put_user
113 * that can cause faults and scheduling migrate into our preempt-protected
114 * region.
115 */
116#define preempt_disable()			barrier()
117#define sched_preempt_enable_no_resched()	barrier()
118#define preempt_enable_no_resched()		barrier()
119#define preempt_enable()			barrier()
120#define preempt_check_resched()			do { } while (0)
121
122#define preempt_disable_notrace()		barrier()
123#define preempt_enable_no_resched_notrace()	barrier()
124#define preempt_enable_notrace()		barrier()
125
126#endif /* CONFIG_PREEMPT_COUNT */
127
128#ifdef MODULE
129/*
130 * Modules have no business playing preemption tricks.
131 */
132#undef sched_preempt_enable_no_resched
133#undef preempt_enable_no_resched
134#undef preempt_enable_no_resched_notrace
135#undef preempt_check_resched
136#endif
137
138#define preempt_set_need_resched() \
139do { \
140	set_preempt_need_resched(); \
141} while (0)
142#define preempt_fold_need_resched() \
143do { \
144	if (tif_need_resched()) \
145		set_preempt_need_resched(); \
146} while (0)
147
148#ifdef CONFIG_PREEMPT_NOTIFIERS
149
150struct preempt_notifier;
151
152/**
153 * preempt_ops - notifiers called when a task is preempted and rescheduled
154 * @sched_in: we're about to be rescheduled:
155 *    notifier: struct preempt_notifier for the task being scheduled
156 *    cpu:  cpu we're scheduled on
157 * @sched_out: we've just been preempted
158 *    notifier: struct preempt_notifier for the task being preempted
159 *    next: the task that's kicking us out
160 *
161 * Please note that sched_in and out are called under different
162 * contexts.  sched_out is called with rq lock held and irq disabled
163 * while sched_in is called without rq lock and irq enabled.  This
164 * difference is intentional and depended upon by its users.
165 */
166struct preempt_ops {
167	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
168	void (*sched_out)(struct preempt_notifier *notifier,
169			  struct task_struct *next);
170};
171
172/**
173 * preempt_notifier - key for installing preemption notifiers
174 * @link: internal use
175 * @ops: defines the notifier functions to be called
176 *
177 * Usually used in conjunction with container_of().
178 */
179struct preempt_notifier {
180	struct hlist_node link;
181	struct preempt_ops *ops;
182};
183
184void preempt_notifier_register(struct preempt_notifier *notifier);
185void preempt_notifier_unregister(struct preempt_notifier *notifier);
186
187static inline void preempt_notifier_init(struct preempt_notifier *notifier,
188				     struct preempt_ops *ops)
189{
190	INIT_HLIST_NODE(&notifier->link);
191	notifier->ops = ops;
192}
193
194#endif
195
196#endif /* __LINUX_PREEMPT_H */
197