1/*
2 * sigreturn.c - tests for x86 sigreturn(2) and exit-to-userspace
3 * Copyright (c) 2014-2015 Andrew Lutomirski
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12 * General Public License for more details.
13 *
14 * This is a series of tests that exercises the sigreturn(2) syscall and
15 * the IRET / SYSRET paths in the kernel.
16 *
17 * For now, this focuses on the effects of unusual CS and SS values,
18 * and it has a bunch of tests to make sure that ESP/RSP is restored
19 * properly.
20 *
21 * The basic idea behind these tests is to raise(SIGUSR1) to create a
22 * sigcontext frame, plug in the values to be tested, and then return,
23 * which implicitly invokes sigreturn(2) and programs the user context
24 * as desired.
25 *
26 * For tests for which we expect sigreturn and the subsequent return to
27 * user mode to succeed, we return to a short trampoline that generates
28 * SIGTRAP so that the meat of the tests can be ordinary C code in a
29 * SIGTRAP handler.
30 *
31 * The inner workings of each test is documented below.
32 *
33 * Do not run on outdated, unpatched kernels at risk of nasty crashes.
34 */
35
36#define _GNU_SOURCE
37
38#include <sys/time.h>
39#include <time.h>
40#include <stdlib.h>
41#include <sys/syscall.h>
42#include <unistd.h>
43#include <stdio.h>
44#include <string.h>
45#include <inttypes.h>
46#include <sys/mman.h>
47#include <sys/signal.h>
48#include <sys/ucontext.h>
49#include <asm/ldt.h>
50#include <err.h>
51#include <setjmp.h>
52#include <stddef.h>
53#include <stdbool.h>
54#include <sys/ptrace.h>
55#include <sys/user.h>
56
57/*
58 * In principle, this test can run on Linux emulation layers (e.g.
59 * Illumos "LX branded zones").  Solaris-based kernels reserve LDT
60 * entries 0-5 for their own internal purposes, so start our LDT
61 * allocations above that reservation.  (The tests don't pass on LX
62 * branded zones, but at least this lets them run.)
63 */
64#define LDT_OFFSET 6
65
66/* An aligned stack accessible through some of our segments. */
67static unsigned char stack16[65536] __attribute__((aligned(4096)));
68
69/*
70 * An aligned int3 instruction used as a trampoline.  Some of the tests
71 * want to fish out their ss values, so this trampoline copies ss to eax
72 * before the int3.
73 */
74asm (".pushsection .text\n\t"
75     ".type int3, @function\n\t"
76     ".align 4096\n\t"
77     "int3:\n\t"
78     "mov %ss,%eax\n\t"
79     "int3\n\t"
80     ".size int3, . - int3\n\t"
81     ".align 4096, 0xcc\n\t"
82     ".popsection");
83extern char int3[4096];
84
85/*
86 * At startup, we prepapre:
87 *
88 * - ldt_nonexistent_sel: An LDT entry that doesn't exist (all-zero
89 *   descriptor or out of bounds).
90 * - code16_sel: A 16-bit LDT code segment pointing to int3.
91 * - data16_sel: A 16-bit LDT data segment pointing to stack16.
92 * - npcode32_sel: A 32-bit not-present LDT code segment pointing to int3.
93 * - npdata32_sel: A 32-bit not-present LDT data segment pointing to stack16.
94 * - gdt_data16_idx: A 16-bit GDT data segment pointing to stack16.
95 * - gdt_npdata32_idx: A 32-bit not-present GDT data segment pointing to
96 *   stack16.
97 *
98 * For no particularly good reason, xyz_sel is a selector value with the
99 * RPL and LDT bits filled in, whereas xyz_idx is just an index into the
100 * descriptor table.  These variables will be zero if their respective
101 * segments could not be allocated.
102 */
103static unsigned short ldt_nonexistent_sel;
104static unsigned short code16_sel, data16_sel, npcode32_sel, npdata32_sel;
105
106static unsigned short gdt_data16_idx, gdt_npdata32_idx;
107
108static unsigned short GDT3(int idx)
109{
110	return (idx << 3) | 3;
111}
112
113static unsigned short LDT3(int idx)
114{
115	return (idx << 3) | 7;
116}
117
118/* Our sigaltstack scratch space. */
119static char altstack_data[SIGSTKSZ];
120
121static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
122		       int flags)
123{
124	struct sigaction sa;
125	memset(&sa, 0, sizeof(sa));
126	sa.sa_sigaction = handler;
127	sa.sa_flags = SA_SIGINFO | flags;
128	sigemptyset(&sa.sa_mask);
129	if (sigaction(sig, &sa, 0))
130		err(1, "sigaction");
131}
132
133static void clearhandler(int sig)
134{
135	struct sigaction sa;
136	memset(&sa, 0, sizeof(sa));
137	sa.sa_handler = SIG_DFL;
138	sigemptyset(&sa.sa_mask);
139	if (sigaction(sig, &sa, 0))
140		err(1, "sigaction");
141}
142
143static void add_ldt(const struct user_desc *desc, unsigned short *var,
144		    const char *name)
145{
146	if (syscall(SYS_modify_ldt, 1, desc, sizeof(*desc)) == 0) {
147		*var = LDT3(desc->entry_number);
148	} else {
149		printf("[NOTE]\tFailed to create %s segment\n", name);
150		*var = 0;
151	}
152}
153
154static void setup_ldt(void)
155{
156	if ((unsigned long)stack16 > (1ULL << 32) - sizeof(stack16))
157		errx(1, "stack16 is too high\n");
158	if ((unsigned long)int3 > (1ULL << 32) - sizeof(int3))
159		errx(1, "int3 is too high\n");
160
161	ldt_nonexistent_sel = LDT3(LDT_OFFSET + 2);
162
163	const struct user_desc code16_desc = {
164		.entry_number    = LDT_OFFSET + 0,
165		.base_addr       = (unsigned long)int3,
166		.limit           = 4095,
167		.seg_32bit       = 0,
168		.contents        = 2, /* Code, not conforming */
169		.read_exec_only  = 0,
170		.limit_in_pages  = 0,
171		.seg_not_present = 0,
172		.useable         = 0
173	};
174	add_ldt(&code16_desc, &code16_sel, "code16");
175
176	const struct user_desc data16_desc = {
177		.entry_number    = LDT_OFFSET + 1,
178		.base_addr       = (unsigned long)stack16,
179		.limit           = 0xffff,
180		.seg_32bit       = 0,
181		.contents        = 0, /* Data, grow-up */
182		.read_exec_only  = 0,
183		.limit_in_pages  = 0,
184		.seg_not_present = 0,
185		.useable         = 0
186	};
187	add_ldt(&data16_desc, &data16_sel, "data16");
188
189	const struct user_desc npcode32_desc = {
190		.entry_number    = LDT_OFFSET + 3,
191		.base_addr       = (unsigned long)int3,
192		.limit           = 4095,
193		.seg_32bit       = 1,
194		.contents        = 2, /* Code, not conforming */
195		.read_exec_only  = 0,
196		.limit_in_pages  = 0,
197		.seg_not_present = 1,
198		.useable         = 0
199	};
200	add_ldt(&npcode32_desc, &npcode32_sel, "npcode32");
201
202	const struct user_desc npdata32_desc = {
203		.entry_number    = LDT_OFFSET + 4,
204		.base_addr       = (unsigned long)stack16,
205		.limit           = 0xffff,
206		.seg_32bit       = 1,
207		.contents        = 0, /* Data, grow-up */
208		.read_exec_only  = 0,
209		.limit_in_pages  = 0,
210		.seg_not_present = 1,
211		.useable         = 0
212	};
213	add_ldt(&npdata32_desc, &npdata32_sel, "npdata32");
214
215	struct user_desc gdt_data16_desc = {
216		.entry_number    = -1,
217		.base_addr       = (unsigned long)stack16,
218		.limit           = 0xffff,
219		.seg_32bit       = 0,
220		.contents        = 0, /* Data, grow-up */
221		.read_exec_only  = 0,
222		.limit_in_pages  = 0,
223		.seg_not_present = 0,
224		.useable         = 0
225	};
226
227	if (syscall(SYS_set_thread_area, &gdt_data16_desc) == 0) {
228		/*
229		 * This probably indicates vulnerability to CVE-2014-8133.
230		 * Merely getting here isn't definitive, though, and we'll
231		 * diagnose the problem for real later on.
232		 */
233		printf("[WARN]\tset_thread_area allocated data16 at index %d\n",
234		       gdt_data16_desc.entry_number);
235		gdt_data16_idx = gdt_data16_desc.entry_number;
236	} else {
237		printf("[OK]\tset_thread_area refused 16-bit data\n");
238	}
239
240	struct user_desc gdt_npdata32_desc = {
241		.entry_number    = -1,
242		.base_addr       = (unsigned long)stack16,
243		.limit           = 0xffff,
244		.seg_32bit       = 1,
245		.contents        = 0, /* Data, grow-up */
246		.read_exec_only  = 0,
247		.limit_in_pages  = 0,
248		.seg_not_present = 1,
249		.useable         = 0
250	};
251
252	if (syscall(SYS_set_thread_area, &gdt_npdata32_desc) == 0) {
253		/*
254		 * As a hardening measure, newer kernels don't allow this.
255		 */
256		printf("[WARN]\tset_thread_area allocated npdata32 at index %d\n",
257		       gdt_npdata32_desc.entry_number);
258		gdt_npdata32_idx = gdt_npdata32_desc.entry_number;
259	} else {
260		printf("[OK]\tset_thread_area refused 16-bit data\n");
261	}
262}
263
264/* State used by our signal handlers. */
265static gregset_t initial_regs, requested_regs, resulting_regs;
266
267/* Instructions for the SIGUSR1 handler. */
268static volatile unsigned short sig_cs, sig_ss;
269static volatile sig_atomic_t sig_trapped, sig_err, sig_trapno;
270
271/* Abstractions for some 32-bit vs 64-bit differences. */
272#ifdef __x86_64__
273# define REG_IP REG_RIP
274# define REG_SP REG_RSP
275# define REG_AX REG_RAX
276
277struct selectors {
278	unsigned short cs, gs, fs, ss;
279};
280
281static unsigned short *ssptr(ucontext_t *ctx)
282{
283	struct selectors *sels = (void *)&ctx->uc_mcontext.gregs[REG_CSGSFS];
284	return &sels->ss;
285}
286
287static unsigned short *csptr(ucontext_t *ctx)
288{
289	struct selectors *sels = (void *)&ctx->uc_mcontext.gregs[REG_CSGSFS];
290	return &sels->cs;
291}
292#else
293# define REG_IP REG_EIP
294# define REG_SP REG_ESP
295# define REG_AX REG_EAX
296
297static greg_t *ssptr(ucontext_t *ctx)
298{
299	return &ctx->uc_mcontext.gregs[REG_SS];
300}
301
302static greg_t *csptr(ucontext_t *ctx)
303{
304	return &ctx->uc_mcontext.gregs[REG_CS];
305}
306#endif
307
308/* Number of errors in the current test case. */
309static volatile sig_atomic_t nerrs;
310
311/*
312 * SIGUSR1 handler.  Sets CS and SS as requested and points IP to the
313 * int3 trampoline.  Sets SP to a large known value so that we can see
314 * whether the value round-trips back to user mode correctly.
315 */
316static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
317{
318	ucontext_t *ctx = (ucontext_t*)ctx_void;
319
320	memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
321
322	*csptr(ctx) = sig_cs;
323	*ssptr(ctx) = sig_ss;
324
325	ctx->uc_mcontext.gregs[REG_IP] =
326		sig_cs == code16_sel ? 0 : (unsigned long)&int3;
327	ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
328	ctx->uc_mcontext.gregs[REG_AX] = 0;
329
330	memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
331	requested_regs[REG_AX] = *ssptr(ctx);	/* The asm code does this. */
332
333	return;
334}
335
336/*
337 * Called after a successful sigreturn.  Restores our state so that
338 * the original raise(SIGUSR1) returns.
339 */
340static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
341{
342	ucontext_t *ctx = (ucontext_t*)ctx_void;
343
344	sig_err = ctx->uc_mcontext.gregs[REG_ERR];
345	sig_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO];
346
347	unsigned short ss;
348	asm ("mov %%ss,%0" : "=r" (ss));
349
350	greg_t asm_ss = ctx->uc_mcontext.gregs[REG_AX];
351	if (asm_ss != sig_ss && sig == SIGTRAP) {
352		/* Sanity check failure. */
353		printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
354		       ss, *ssptr(ctx), (unsigned long long)asm_ss);
355		nerrs++;
356	}
357
358	memcpy(&resulting_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
359	memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t));
360
361	sig_trapped = sig;
362}
363
364/*
365 * Checks a given selector for its code bitness or returns -1 if it's not
366 * a usable code segment selector.
367 */
368int cs_bitness(unsigned short cs)
369{
370	uint32_t valid = 0, ar;
371	asm ("lar %[cs], %[ar]\n\t"
372	     "jnz 1f\n\t"
373	     "mov $1, %[valid]\n\t"
374	     "1:"
375	     : [ar] "=r" (ar), [valid] "+rm" (valid)
376	     : [cs] "r" (cs));
377
378	if (!valid)
379		return -1;
380
381	bool db = (ar & (1 << 22));
382	bool l = (ar & (1 << 21));
383
384	if (!(ar & (1<<11)))
385	    return -1;	/* Not code. */
386
387	if (l && !db)
388		return 64;
389	else if (!l && db)
390		return 32;
391	else if (!l && !db)
392		return 16;
393	else
394		return -1;	/* Unknown bitness. */
395}
396
397/* Finds a usable code segment of the requested bitness. */
398int find_cs(int bitness)
399{
400	unsigned short my_cs;
401
402	asm ("mov %%cs,%0" :  "=r" (my_cs));
403
404	if (cs_bitness(my_cs) == bitness)
405		return my_cs;
406	if (cs_bitness(my_cs + (2 << 3)) == bitness)
407		return my_cs + (2 << 3);
408	if (my_cs > (2<<3) && cs_bitness(my_cs - (2 << 3)) == bitness)
409	    return my_cs - (2 << 3);
410	if (cs_bitness(code16_sel) == bitness)
411		return code16_sel;
412
413	printf("[WARN]\tCould not find %d-bit CS\n", bitness);
414	return -1;
415}
416
417static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
418{
419	int cs = find_cs(cs_bits);
420	if (cs == -1) {
421		printf("[SKIP]\tCode segment unavailable for %d-bit CS, %d-bit SS\n",
422		       cs_bits, use_16bit_ss ? 16 : 32);
423		return 0;
424	}
425
426	if (force_ss != -1) {
427		sig_ss = force_ss;
428	} else {
429		if (use_16bit_ss) {
430			if (!data16_sel) {
431				printf("[SKIP]\tData segment unavailable for %d-bit CS, 16-bit SS\n",
432				       cs_bits);
433				return 0;
434			}
435			sig_ss = data16_sel;
436		} else {
437			asm volatile ("mov %%ss,%0" : "=r" (sig_ss));
438		}
439	}
440
441	sig_cs = cs;
442
443	printf("[RUN]\tValid sigreturn: %d-bit CS (%hx), %d-bit SS (%hx%s)\n",
444	       cs_bits, sig_cs, use_16bit_ss ? 16 : 32, sig_ss,
445	       (sig_ss & 4) ? "" : ", GDT");
446
447	raise(SIGUSR1);
448
449	nerrs = 0;
450
451	/*
452	 * Check that each register had an acceptable value when the
453	 * int3 trampoline was invoked.
454	 */
455	for (int i = 0; i < NGREG; i++) {
456		greg_t req = requested_regs[i], res = resulting_regs[i];
457		if (i == REG_TRAPNO || i == REG_IP)
458			continue;	/* don't care */
459		if (i == REG_SP) {
460			printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
461			       (unsigned long long)res);
462
463			/*
464			 * In many circumstances, the high 32 bits of rsp
465			 * are zeroed.  For example, we could be a real
466			 * 32-bit program, or we could hit any of a number
467			 * of poorly-documented IRET or segmented ESP
468			 * oddities.  If this happens, it's okay.
469			 */
470			if (res == (req & 0xFFFFFFFF))
471				continue;  /* OK; not expected to work */
472		}
473
474		bool ignore_reg = false;
475#if __i386__
476		if (i == REG_UESP)
477			ignore_reg = true;
478#else
479		if (i == REG_CSGSFS) {
480			struct selectors *req_sels =
481				(void *)&requested_regs[REG_CSGSFS];
482			struct selectors *res_sels =
483				(void *)&resulting_regs[REG_CSGSFS];
484			if (req_sels->cs != res_sels->cs) {
485				printf("[FAIL]\tCS mismatch: requested 0x%hx; got 0x%hx\n",
486				       req_sels->cs, res_sels->cs);
487				nerrs++;
488			}
489
490			if (req_sels->ss != res_sels->ss) {
491				printf("[FAIL]\tSS mismatch: requested 0x%hx; got 0x%hx\n",
492				       req_sels->ss, res_sels->ss);
493				nerrs++;
494			}
495
496			continue;
497		}
498#endif
499
500		/* Sanity check on the kernel */
501		if (i == REG_AX && requested_regs[i] != resulting_regs[i]) {
502			printf("[FAIL]\tAX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
503			       (unsigned long long)requested_regs[i],
504			       (unsigned long long)resulting_regs[i]);
505			nerrs++;
506			continue;
507		}
508
509		if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
510			/*
511			 * SP is particularly interesting here.  The
512			 * usual cause of failures is that we hit the
513			 * nasty IRET case of returning to a 16-bit SS,
514			 * in which case bits 16:31 of the *kernel*
515			 * stack pointer persist in ESP.
516			 */
517			printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
518			       i, (unsigned long long)requested_regs[i],
519			       (unsigned long long)resulting_regs[i]);
520			nerrs++;
521		}
522	}
523
524	if (nerrs == 0)
525		printf("[OK]\tall registers okay\n");
526
527	return nerrs;
528}
529
530static int test_bad_iret(int cs_bits, unsigned short ss, int force_cs)
531{
532	int cs = force_cs == -1 ? find_cs(cs_bits) : force_cs;
533	if (cs == -1)
534		return 0;
535
536	sig_cs = cs;
537	sig_ss = ss;
538
539	printf("[RUN]\t%d-bit CS (%hx), bogus SS (%hx)\n",
540	       cs_bits, sig_cs, sig_ss);
541
542	sig_trapped = 0;
543	raise(SIGUSR1);
544	if (sig_trapped) {
545		char errdesc[32] = "";
546		if (sig_err) {
547			const char *src = (sig_err & 1) ? " EXT" : "";
548			const char *table;
549			if ((sig_err & 0x6) == 0x0)
550				table = "GDT";
551			else if ((sig_err & 0x6) == 0x4)
552				table = "LDT";
553			else if ((sig_err & 0x6) == 0x2)
554				table = "IDT";
555			else
556				table = "???";
557
558			sprintf(errdesc, "%s%s index %d, ",
559				table, src, sig_err >> 3);
560		}
561
562		char trapname[32];
563		if (sig_trapno == 13)
564			strcpy(trapname, "GP");
565		else if (sig_trapno == 11)
566			strcpy(trapname, "NP");
567		else if (sig_trapno == 12)
568			strcpy(trapname, "SS");
569		else if (sig_trapno == 32)
570			strcpy(trapname, "IRET");  /* X86_TRAP_IRET */
571		else
572			sprintf(trapname, "%d", sig_trapno);
573
574		printf("[OK]\tGot #%s(0x%lx) (i.e. %s%s)\n",
575		       trapname, (unsigned long)sig_err,
576		       errdesc, strsignal(sig_trapped));
577		return 0;
578	} else {
579		printf("[FAIL]\tDid not get SIGSEGV\n");
580		return 1;
581	}
582}
583
584int main()
585{
586	int total_nerrs = 0;
587	unsigned short my_cs, my_ss;
588
589	asm volatile ("mov %%cs,%0" : "=r" (my_cs));
590	asm volatile ("mov %%ss,%0" : "=r" (my_ss));
591	setup_ldt();
592
593	stack_t stack = {
594		.ss_sp = altstack_data,
595		.ss_size = SIGSTKSZ,
596	};
597	if (sigaltstack(&stack, NULL) != 0)
598		err(1, "sigaltstack");
599
600	sethandler(SIGUSR1, sigusr1, 0);
601	sethandler(SIGTRAP, sigtrap, SA_ONSTACK);
602
603	/* Easy cases: return to a 32-bit SS in each possible CS bitness. */
604	total_nerrs += test_valid_sigreturn(64, false, -1);
605	total_nerrs += test_valid_sigreturn(32, false, -1);
606	total_nerrs += test_valid_sigreturn(16, false, -1);
607
608	/*
609	 * Test easy espfix cases: return to a 16-bit LDT SS in each possible
610	 * CS bitness.  NB: with a long mode CS, the SS bitness is irrelevant.
611	 *
612	 * This catches the original missing-espfix-on-64-bit-kernels issue
613	 * as well as CVE-2014-8134.
614	 */
615	total_nerrs += test_valid_sigreturn(64, true, -1);
616	total_nerrs += test_valid_sigreturn(32, true, -1);
617	total_nerrs += test_valid_sigreturn(16, true, -1);
618
619	if (gdt_data16_idx) {
620		/*
621		 * For performance reasons, Linux skips espfix if SS points
622		 * to the GDT.  If we were able to allocate a 16-bit SS in
623		 * the GDT, see if it leaks parts of the kernel stack pointer.
624		 *
625		 * This tests for CVE-2014-8133.
626		 */
627		total_nerrs += test_valid_sigreturn(64, true,
628						    GDT3(gdt_data16_idx));
629		total_nerrs += test_valid_sigreturn(32, true,
630						    GDT3(gdt_data16_idx));
631		total_nerrs += test_valid_sigreturn(16, true,
632						    GDT3(gdt_data16_idx));
633	}
634
635	/*
636	 * We're done testing valid sigreturn cases.  Now we test states
637	 * for which sigreturn itself will succeed but the subsequent
638	 * entry to user mode will fail.
639	 *
640	 * Depending on the failure mode and the kernel bitness, these
641	 * entry failures can generate SIGSEGV, SIGBUS, or SIGILL.
642	 */
643	clearhandler(SIGTRAP);
644	sethandler(SIGSEGV, sigtrap, SA_ONSTACK);
645	sethandler(SIGBUS, sigtrap, SA_ONSTACK);
646	sethandler(SIGILL, sigtrap, SA_ONSTACK);  /* 32-bit kernels do this */
647
648	/* Easy failures: invalid SS, resulting in #GP(0) */
649	test_bad_iret(64, ldt_nonexistent_sel, -1);
650	test_bad_iret(32, ldt_nonexistent_sel, -1);
651	test_bad_iret(16, ldt_nonexistent_sel, -1);
652
653	/* These fail because SS isn't a data segment, resulting in #GP(SS) */
654	test_bad_iret(64, my_cs, -1);
655	test_bad_iret(32, my_cs, -1);
656	test_bad_iret(16, my_cs, -1);
657
658	/* Try to return to a not-present code segment, triggering #NP(SS). */
659	test_bad_iret(32, my_ss, npcode32_sel);
660
661	/*
662	 * Try to return to a not-present but otherwise valid data segment.
663	 * This will cause IRET to fail with #SS on the espfix stack.  This
664	 * exercises CVE-2014-9322.
665	 *
666	 * Note that, if espfix is enabled, 64-bit Linux will lose track
667	 * of the actual cause of failure and report #GP(0) instead.
668	 * This would be very difficult for Linux to avoid, because
669	 * espfix64 causes IRET failures to be promoted to #DF, so the
670	 * original exception frame is never pushed onto the stack.
671	 */
672	test_bad_iret(32, npdata32_sel, -1);
673
674	/*
675	 * Try to return to a not-present but otherwise valid data
676	 * segment without invoking espfix.  Newer kernels don't allow
677	 * this to happen in the first place.  On older kernels, though,
678	 * this can trigger CVE-2014-9322.
679	 */
680	if (gdt_npdata32_idx)
681		test_bad_iret(32, GDT3(gdt_npdata32_idx), -1);
682
683	return total_nerrs ? 1 : 0;
684}
685