1#ifndef _ASM_X86_MWAIT_H
2#define _ASM_X86_MWAIT_H
3
4#include <linux/sched.h>
5
6#define MWAIT_SUBSTATE_MASK		0xf
7#define MWAIT_CSTATE_MASK		0xf
8#define MWAIT_SUBSTATE_SIZE		4
9#define MWAIT_HINT2CSTATE(hint)		(((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
10#define MWAIT_HINT2SUBSTATE(hint)	((hint) & MWAIT_CSTATE_MASK)
11
12#define CPUID_MWAIT_LEAF		5
13#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
14#define CPUID5_ECX_INTERRUPT_BREAK	0x2
15
16#define MWAIT_ECX_INTERRUPT_BREAK	0x1
17
18static inline void __monitor(const void *eax, unsigned long ecx,
19			     unsigned long edx)
20{
21	/* "monitor %eax, %ecx, %edx;" */
22	asm volatile(".byte 0x0f, 0x01, 0xc8;"
23		     :: "a" (eax), "c" (ecx), "d"(edx));
24}
25
26static inline void __mwait(unsigned long eax, unsigned long ecx)
27{
28	/* "mwait %eax, %ecx;" */
29	asm volatile(".byte 0x0f, 0x01, 0xc9;"
30		     :: "a" (eax), "c" (ecx));
31}
32
33static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
34{
35	trace_hardirqs_on();
36	/* "mwait %eax, %ecx;" */
37	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
38		     :: "a" (eax), "c" (ecx));
39}
40
41/*
42 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
43 * which can obviate IPI to trigger checking of need_resched.
44 * We execute MONITOR against need_resched and enter optimized wait state
45 * through MWAIT. Whenever someone changes need_resched, we would be woken
46 * up from MWAIT (without an IPI).
47 *
48 * New with Core Duo processors, MWAIT can take some hints based on CPU
49 * capability.
50 */
51static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
52{
53	if (!current_set_polling_and_test()) {
54		if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
55			mb();
56			clflush((void *)&current_thread_info()->flags);
57			mb();
58		}
59
60		__monitor((void *)&current_thread_info()->flags, 0, 0);
61		if (!need_resched())
62			__mwait(eax, ecx);
63	}
64	current_clr_polling();
65}
66
67#endif /* _ASM_X86_MWAIT_H */
68