1#ifndef _ASM_X86_DESC_H
2#define _ASM_X86_DESC_H
3
4#include <asm/desc_defs.h>
5#include <asm/ldt.h>
6#include <asm/mmu.h>
7
8#include <linux/smp.h>
9#include <linux/percpu.h>
10
11static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
12{
13	desc->limit0		= info->limit & 0x0ffff;
14
15	desc->base0		= (info->base_addr & 0x0000ffff);
16	desc->base1		= (info->base_addr & 0x00ff0000) >> 16;
17
18	desc->type		= (info->read_exec_only ^ 1) << 1;
19	desc->type	       |= info->contents << 2;
20
21	desc->s			= 1;
22	desc->dpl		= 0x3;
23	desc->p			= info->seg_not_present ^ 1;
24	desc->limit		= (info->limit & 0xf0000) >> 16;
25	desc->avl		= info->useable;
26	desc->d			= info->seg_32bit;
27	desc->g			= info->limit_in_pages;
28
29	desc->base2		= (info->base_addr & 0xff000000) >> 24;
30	/*
31	 * Don't allow setting of the lm bit. It would confuse
32	 * user_64bit_mode and would get overridden by sysret anyway.
33	 */
34	desc->l			= 0;
35}
36
37extern struct desc_ptr idt_descr;
38extern gate_desc idt_table[];
39extern struct desc_ptr debug_idt_descr;
40extern gate_desc debug_idt_table[];
41
42struct gdt_page {
43	struct desc_struct gdt[GDT_ENTRIES];
44} __attribute__((aligned(PAGE_SIZE)));
45
46DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
47
48static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
49{
50	return per_cpu(gdt_page, cpu).gdt;
51}
52
53#ifdef CONFIG_X86_64
54
55static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
56			     unsigned dpl, unsigned ist, unsigned seg)
57{
58	gate->offset_low	= PTR_LOW(func);
59	gate->segment		= __KERNEL_CS;
60	gate->ist		= ist;
61	gate->p			= 1;
62	gate->dpl		= dpl;
63	gate->zero0		= 0;
64	gate->zero1		= 0;
65	gate->type		= type;
66	gate->offset_middle	= PTR_MIDDLE(func);
67	gate->offset_high	= PTR_HIGH(func);
68}
69
70#else
71static inline void pack_gate(gate_desc *gate, unsigned char type,
72			     unsigned long base, unsigned dpl, unsigned flags,
73			     unsigned short seg)
74{
75	gate->a = (seg << 16) | (base & 0xffff);
76	gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
77}
78
79#endif
80
81static inline int desc_empty(const void *ptr)
82{
83	const u32 *desc = ptr;
84
85	return !(desc[0] | desc[1]);
86}
87
88#ifdef CONFIG_PARAVIRT
89#include <asm/paravirt.h>
90#else
91#define load_TR_desc()				native_load_tr_desc()
92#define load_gdt(dtr)				native_load_gdt(dtr)
93#define load_idt(dtr)				native_load_idt(dtr)
94#define load_tr(tr)				asm volatile("ltr %0"::"m" (tr))
95#define load_ldt(ldt)				asm volatile("lldt %0"::"m" (ldt))
96
97#define store_gdt(dtr)				native_store_gdt(dtr)
98#define store_idt(dtr)				native_store_idt(dtr)
99#define store_tr(tr)				(tr = native_store_tr())
100
101#define load_TLS(t, cpu)			native_load_tls(t, cpu)
102#define set_ldt					native_set_ldt
103
104#define write_ldt_entry(dt, entry, desc)	native_write_ldt_entry(dt, entry, desc)
105#define write_gdt_entry(dt, entry, desc, type)	native_write_gdt_entry(dt, entry, desc, type)
106#define write_idt_entry(dt, entry, g)		native_write_idt_entry(dt, entry, g)
107
108static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
109{
110}
111
112static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
113{
114}
115#endif	/* CONFIG_PARAVIRT */
116
117#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
118
119static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
120{
121	memcpy(&idt[entry], gate, sizeof(*gate));
122}
123
124static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
125{
126	memcpy(&ldt[entry], desc, 8);
127}
128
129static inline void
130native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
131{
132	unsigned int size;
133
134	switch (type) {
135	case DESC_TSS:	size = sizeof(tss_desc);	break;
136	case DESC_LDT:	size = sizeof(ldt_desc);	break;
137	default:	size = sizeof(*gdt);		break;
138	}
139
140	memcpy(&gdt[entry], desc, size);
141}
142
143static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
144				   unsigned long limit, unsigned char type,
145				   unsigned char flags)
146{
147	desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
148	desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
149		(limit & 0x000f0000) | ((type & 0xff) << 8) |
150		((flags & 0xf) << 20);
151	desc->p = 1;
152}
153
154
155static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
156{
157#ifdef CONFIG_X86_64
158	struct ldttss_desc64 *desc = d;
159
160	memset(desc, 0, sizeof(*desc));
161
162	desc->limit0		= size & 0xFFFF;
163	desc->base0		= PTR_LOW(addr);
164	desc->base1		= PTR_MIDDLE(addr) & 0xFF;
165	desc->type		= type;
166	desc->p			= 1;
167	desc->limit1		= (size >> 16) & 0xF;
168	desc->base2		= (PTR_MIDDLE(addr) >> 8) & 0xFF;
169	desc->base3		= PTR_HIGH(addr);
170#else
171	pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
172#endif
173}
174
175static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
176{
177	struct desc_struct *d = get_cpu_gdt_table(cpu);
178	tss_desc tss;
179
180	/*
181	 * sizeof(unsigned long) coming from an extra "long" at the end
182	 * of the iobitmap. See tss_struct definition in processor.h
183	 *
184	 * -1? seg base+limit should be pointing to the address of the
185	 * last valid byte
186	 */
187	set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
188			      IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
189			      sizeof(unsigned long) - 1);
190	write_gdt_entry(d, entry, &tss, DESC_TSS);
191}
192
193#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
194
195static inline void native_set_ldt(const void *addr, unsigned int entries)
196{
197	if (likely(entries == 0))
198		asm volatile("lldt %w0"::"q" (0));
199	else {
200		unsigned cpu = smp_processor_id();
201		ldt_desc ldt;
202
203		set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
204				      entries * LDT_ENTRY_SIZE - 1);
205		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
206				&ldt, DESC_LDT);
207		asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
208	}
209}
210
211static inline void native_load_tr_desc(void)
212{
213	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
214}
215
216static inline void native_load_gdt(const struct desc_ptr *dtr)
217{
218	asm volatile("lgdt %0"::"m" (*dtr));
219}
220
221static inline void native_load_idt(const struct desc_ptr *dtr)
222{
223	asm volatile("lidt %0"::"m" (*dtr));
224}
225
226static inline void native_store_gdt(struct desc_ptr *dtr)
227{
228	asm volatile("sgdt %0":"=m" (*dtr));
229}
230
231static inline void native_store_idt(struct desc_ptr *dtr)
232{
233	asm volatile("sidt %0":"=m" (*dtr));
234}
235
236static inline unsigned long native_store_tr(void)
237{
238	unsigned long tr;
239
240	asm volatile("str %0":"=r" (tr));
241
242	return tr;
243}
244
245static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
246{
247	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
248	unsigned int i;
249
250	for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
251		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
252}
253
254/* This intentionally ignores lm, since 32-bit apps don't have that field. */
255#define LDT_empty(info)					\
256	((info)->base_addr		== 0	&&	\
257	 (info)->limit			== 0	&&	\
258	 (info)->contents		== 0	&&	\
259	 (info)->read_exec_only		== 1	&&	\
260	 (info)->seg_32bit		== 0	&&	\
261	 (info)->limit_in_pages		== 0	&&	\
262	 (info)->seg_not_present	== 1	&&	\
263	 (info)->useable		== 0)
264
265/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
266static inline bool LDT_zero(const struct user_desc *info)
267{
268	return (info->base_addr		== 0 &&
269		info->limit		== 0 &&
270		info->contents		== 0 &&
271		info->read_exec_only	== 0 &&
272		info->seg_32bit		== 0 &&
273		info->limit_in_pages	== 0 &&
274		info->seg_not_present	== 0 &&
275		info->useable		== 0);
276}
277
278static inline void clear_LDT(void)
279{
280	set_ldt(NULL, 0);
281}
282
283static inline unsigned long get_desc_base(const struct desc_struct *desc)
284{
285	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
286}
287
288static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
289{
290	desc->base0 = base & 0xffff;
291	desc->base1 = (base >> 16) & 0xff;
292	desc->base2 = (base >> 24) & 0xff;
293}
294
295static inline unsigned long get_desc_limit(const struct desc_struct *desc)
296{
297	return desc->limit0 | (desc->limit << 16);
298}
299
300static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
301{
302	desc->limit0 = limit & 0xffff;
303	desc->limit = (limit >> 16) & 0xf;
304}
305
306#ifdef CONFIG_X86_64
307static inline void set_nmi_gate(int gate, void *addr)
308{
309	gate_desc s;
310
311	pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
312	write_idt_entry(debug_idt_table, gate, &s);
313}
314#endif
315
316#ifdef CONFIG_TRACING
317extern struct desc_ptr trace_idt_descr;
318extern gate_desc trace_idt_table[];
319static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
320{
321	write_idt_entry(trace_idt_table, entry, gate);
322}
323
324static inline void _trace_set_gate(int gate, unsigned type, void *addr,
325				   unsigned dpl, unsigned ist, unsigned seg)
326{
327	gate_desc s;
328
329	pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
330	/*
331	 * does not need to be atomic because it is only done once at
332	 * setup time
333	 */
334	write_trace_idt_entry(gate, &s);
335}
336#else
337static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
338{
339}
340
341#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
342#endif
343
344static inline void _set_gate(int gate, unsigned type, void *addr,
345			     unsigned dpl, unsigned ist, unsigned seg)
346{
347	gate_desc s;
348
349	pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
350	/*
351	 * does not need to be atomic because it is only done once at
352	 * setup time
353	 */
354	write_idt_entry(idt_table, gate, &s);
355	write_trace_idt_entry(gate, &s);
356}
357
358/*
359 * This needs to use 'idt_table' rather than 'idt', and
360 * thus use the _nonmapped_ version of the IDT, as the
361 * Pentium F0 0F bugfix can have resulted in the mapped
362 * IDT being write-protected.
363 */
364#define set_intr_gate_notrace(n, addr)					\
365	do {								\
366		BUG_ON((unsigned)n > 0xFF);				\
367		_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0,	\
368			  __KERNEL_CS);					\
369	} while (0)
370
371#define set_intr_gate(n, addr)						\
372	do {								\
373		set_intr_gate_notrace(n, addr);				\
374		_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
375				0, 0, __KERNEL_CS);			\
376	} while (0)
377
378extern int first_system_vector;
379/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
380extern unsigned long used_vectors[];
381
382static inline void alloc_system_vector(int vector)
383{
384	if (!test_bit(vector, used_vectors)) {
385		set_bit(vector, used_vectors);
386		if (first_system_vector > vector)
387			first_system_vector = vector;
388	} else {
389		BUG();
390	}
391}
392
393#define alloc_intr_gate(n, addr)				\
394	do {							\
395		alloc_system_vector(n);				\
396		set_intr_gate(n, addr);				\
397	} while (0)
398
399/*
400 * This routine sets up an interrupt gate at directory privilege level 3.
401 */
402static inline void set_system_intr_gate(unsigned int n, void *addr)
403{
404	BUG_ON((unsigned)n > 0xFF);
405	_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
406}
407
408static inline void set_system_trap_gate(unsigned int n, void *addr)
409{
410	BUG_ON((unsigned)n > 0xFF);
411	_set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
412}
413
414static inline void set_trap_gate(unsigned int n, void *addr)
415{
416	BUG_ON((unsigned)n > 0xFF);
417	_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
418}
419
420static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
421{
422	BUG_ON((unsigned)n > 0xFF);
423	_set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
424}
425
426static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
427{
428	BUG_ON((unsigned)n > 0xFF);
429	_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
430}
431
432static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
433{
434	BUG_ON((unsigned)n > 0xFF);
435	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
436}
437
438#ifdef CONFIG_X86_64
439DECLARE_PER_CPU(u32, debug_idt_ctr);
440static inline bool is_debug_idt_enabled(void)
441{
442	if (this_cpu_read(debug_idt_ctr))
443		return true;
444
445	return false;
446}
447
448static inline void load_debug_idt(void)
449{
450	load_idt((const struct desc_ptr *)&debug_idt_descr);
451}
452#else
453static inline bool is_debug_idt_enabled(void)
454{
455	return false;
456}
457
458static inline void load_debug_idt(void)
459{
460}
461#endif
462
463#ifdef CONFIG_TRACING
464extern atomic_t trace_idt_ctr;
465static inline bool is_trace_idt_enabled(void)
466{
467	if (atomic_read(&trace_idt_ctr))
468		return true;
469
470	return false;
471}
472
473static inline void load_trace_idt(void)
474{
475	load_idt((const struct desc_ptr *)&trace_idt_descr);
476}
477#else
478static inline bool is_trace_idt_enabled(void)
479{
480	return false;
481}
482
483static inline void load_trace_idt(void)
484{
485}
486#endif
487
488/*
489 * The load_current_idt() must be called with interrupts disabled
490 * to avoid races. That way the IDT will always be set back to the expected
491 * descriptor. It's also called when a CPU is being initialized, and
492 * that doesn't need to disable interrupts, as nothing should be
493 * bothering the CPU then.
494 */
495static inline void load_current_idt(void)
496{
497	if (is_debug_idt_enabled())
498		load_debug_idt();
499	else if (is_trace_idt_enabled())
500		load_trace_idt();
501	else
502		load_idt((const struct desc_ptr *)&idt_descr);
503}
504#endif /* _ASM_X86_DESC_H */
505