1#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
4#include <linux/types.h>
5#include <asm/processor.h>
6
7#define XSTATE_CPUID		0x0000000d
8
9#define XSTATE_FP		0x1
10#define XSTATE_SSE		0x2
11#define XSTATE_YMM		0x4
12#define XSTATE_BNDREGS		0x8
13#define XSTATE_BNDCSR		0x10
14#define XSTATE_OPMASK		0x20
15#define XSTATE_ZMM_Hi256	0x40
16#define XSTATE_Hi16_ZMM		0x80
17
18#define XSTATE_FPSSE	(XSTATE_FP | XSTATE_SSE)
19#define XSTATE_AVX512	(XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
20/* Bit 63 of XCR0 is reserved for future expansion */
21#define XSTATE_EXTEND_MASK	(~(XSTATE_FPSSE | (1ULL << 63)))
22
23#define FXSAVE_SIZE	512
24
25#define XSAVE_HDR_SIZE	    64
26#define XSAVE_HDR_OFFSET    FXSAVE_SIZE
27
28#define XSAVE_YMM_SIZE	    256
29#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
30
31/* Supported features which support lazy state saving */
32#define XSTATE_LAZY	(XSTATE_FP | XSTATE_SSE | XSTATE_YMM		      \
33			| XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
34
35/* Supported features which require eager state saving */
36#define XSTATE_EAGER	(XSTATE_BNDREGS | XSTATE_BNDCSR)
37
38/* All currently supported features */
39#define XCNTXT_MASK	(XSTATE_LAZY | XSTATE_EAGER)
40
41#ifdef CONFIG_X86_64
42#define REX_PREFIX	"0x48, "
43#else
44#define REX_PREFIX
45#endif
46
47extern unsigned int xstate_size;
48extern u64 pcntxt_mask;
49extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
50extern struct xsave_struct *init_xstate_buf;
51
52extern void xsave_init(void);
53extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
54extern int init_fpu(struct task_struct *child);
55
56/* These macros all use (%edi)/(%rdi) as the single memory argument. */
57#define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
58#define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
59#define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
60#define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
61#define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
62
63#define xstate_fault	".section .fixup,\"ax\"\n"	\
64			"3:  movl $-1,%[err]\n"		\
65			"    jmp  2b\n"			\
66			".previous\n"			\
67			_ASM_EXTABLE(1b, 3b)		\
68			: [err] "=r" (err)
69
70/*
71 * This function is called only during boot time when x86 caps are not set
72 * up and alternative can not be used yet.
73 */
74static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
75{
76	u32 lmask = mask;
77	u32 hmask = mask >> 32;
78	int err = 0;
79
80	WARN_ON(system_state != SYSTEM_BOOTING);
81
82	if (boot_cpu_has(X86_FEATURE_XSAVES))
83		asm volatile("1:"XSAVES"\n\t"
84			"2:\n\t"
85			     xstate_fault
86			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
87			:   "memory");
88	else
89		asm volatile("1:"XSAVE"\n\t"
90			"2:\n\t"
91			     xstate_fault
92			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
93			:   "memory");
94	return err;
95}
96
97/*
98 * This function is called only during boot time when x86 caps are not set
99 * up and alternative can not be used yet.
100 */
101static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
102{
103	u32 lmask = mask;
104	u32 hmask = mask >> 32;
105	int err = 0;
106
107	WARN_ON(system_state != SYSTEM_BOOTING);
108
109	if (boot_cpu_has(X86_FEATURE_XSAVES))
110		asm volatile("1:"XRSTORS"\n\t"
111			"2:\n\t"
112			     xstate_fault
113			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
114			:   "memory");
115	else
116		asm volatile("1:"XRSTOR"\n\t"
117			"2:\n\t"
118			     xstate_fault
119			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
120			:   "memory");
121	return err;
122}
123
124/*
125 * Save processor xstate to xsave area.
126 */
127static inline int xsave_state(struct xsave_struct *fx, u64 mask)
128{
129	u32 lmask = mask;
130	u32 hmask = mask >> 32;
131	int err = 0;
132
133	/*
134	 * If xsaves is enabled, xsaves replaces xsaveopt because
135	 * it supports compact format and supervisor states in addition to
136	 * modified optimization in xsaveopt.
137	 *
138	 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
139	 * because xsaveopt supports modified optimization which is not
140	 * supported by xsave.
141	 *
142	 * If none of xsaves and xsaveopt is enabled, use xsave.
143	 */
144	alternative_input_2(
145		"1:"XSAVE,
146		XSAVEOPT,
147		X86_FEATURE_XSAVEOPT,
148		XSAVES,
149		X86_FEATURE_XSAVES,
150		[fx] "D" (fx), "a" (lmask), "d" (hmask) :
151		"memory");
152	asm volatile("2:\n\t"
153		     xstate_fault
154		     : "0" (0)
155		     : "memory");
156
157	return err;
158}
159
160/*
161 * Restore processor xstate from xsave area.
162 */
163static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
164{
165	int err = 0;
166	u32 lmask = mask;
167	u32 hmask = mask >> 32;
168
169	/*
170	 * Use xrstors to restore context if it is enabled. xrstors supports
171	 * compacted format of xsave area which is not supported by xrstor.
172	 */
173	alternative_input(
174		"1: " XRSTOR,
175		XRSTORS,
176		X86_FEATURE_XSAVES,
177		"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
178		: "memory");
179
180	asm volatile("2:\n"
181		     xstate_fault
182		     : "0" (0)
183		     : "memory");
184
185	return err;
186}
187
188/*
189 * Save xstate context for old process during context switch.
190 */
191static inline void fpu_xsave(struct fpu *fpu)
192{
193	xsave_state(&fpu->state->xsave, -1);
194}
195
196/*
197 * Restore xstate context for new process during context switch.
198 */
199static inline int fpu_xrstor_checking(struct xsave_struct *fx)
200{
201	return xrstor_state(fx, -1);
202}
203
204/*
205 * Save xstate to user space xsave area.
206 *
207 * We don't use modified optimization because xrstor/xrstors might track
208 * a different application.
209 *
210 * We don't use compacted format xsave area for
211 * backward compatibility for old applications which don't understand
212 * compacted format of xsave area.
213 */
214static inline int xsave_user(struct xsave_struct __user *buf)
215{
216	int err;
217
218	/*
219	 * Clear the xsave header first, so that reserved fields are
220	 * initialized to zero.
221	 */
222	err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
223	if (unlikely(err))
224		return -EFAULT;
225
226	__asm__ __volatile__(ASM_STAC "\n"
227			     "1:"XSAVE"\n"
228			     "2: " ASM_CLAC "\n"
229			     xstate_fault
230			     : "D" (buf), "a" (-1), "d" (-1), "0" (0)
231			     : "memory");
232	return err;
233}
234
235/*
236 * Restore xstate from user space xsave area.
237 */
238static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
239{
240	int err = 0;
241	struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
242	u32 lmask = mask;
243	u32 hmask = mask >> 32;
244
245	__asm__ __volatile__(ASM_STAC "\n"
246			     "1:"XRSTOR"\n"
247			     "2: " ASM_CLAC "\n"
248			     xstate_fault
249			     : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
250			     : "memory");	/* memory required? */
251	return err;
252}
253
254void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
255void setup_xstate_comp(void);
256
257#endif
258