1#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
3
4#include <asm/desc.h>
5#include <linux/atomic.h>
6#include <linux/mm_types.h>
7
8#include <trace/events/tlb.h>
9
10#include <asm/pgalloc.h>
11#include <asm/tlbflush.h>
12#include <asm/paravirt.h>
13#include <asm/mpx.h>
14#ifndef CONFIG_PARAVIRT
15static inline void paravirt_activate_mm(struct mm_struct *prev,
16					struct mm_struct *next)
17{
18}
19#endif	/* !CONFIG_PARAVIRT */
20
21#ifdef CONFIG_PERF_EVENTS
22extern struct static_key rdpmc_always_available;
23
24static inline void load_mm_cr4(struct mm_struct *mm)
25{
26	if (static_key_false(&rdpmc_always_available) ||
27	    atomic_read(&mm->context.perf_rdpmc_allowed))
28		cr4_set_bits(X86_CR4_PCE);
29	else
30		cr4_clear_bits(X86_CR4_PCE);
31}
32#else
33static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif
35
36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41	/*
42	 * Xen requires page-aligned LDTs with special permissions.  This is
43	 * needed to prevent us from installing evil descriptors such as
44	 * call gates.  On native, we could merge the ldt_struct and LDT
45	 * allocations, but it's not worth trying to optimize.
46	 */
47	struct desc_struct *entries;
48	int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53	struct ldt_struct *ldt;
54
55	/* lockless_dereference synchronizes with smp_store_release */
56	ldt = lockless_dereference(mm->context.ldt);
57
58	/*
59	 * Any change to mm->context.ldt is followed by an IPI to all
60	 * CPUs with the mm active.  The LDT will not be freed until
61	 * after the IPI is handled by all such CPUs.  This means that,
62	 * if the ldt_struct changes before we return, the values we see
63	 * will be safe, and the new values will be loaded before we run
64	 * any user code.
65	 *
66	 * NB: don't try to convert this to use RCU without extreme care.
67	 * We would still need IRQs off, because we don't want to change
68	 * the local LDT after an IPI loaded a newer value than the one
69	 * that we can see.
70	 */
71
72	if (unlikely(ldt))
73		set_ldt(ldt->entries, ldt->size);
74	else
75		clear_LDT();
76
77	DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
81 * Used for LDT copy/destruction.
82 */
83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
84void destroy_context(struct mm_struct *mm);
85
86
87static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
88{
89#ifdef CONFIG_SMP
90	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
91		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
92#endif
93}
94
95static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
96			     struct task_struct *tsk)
97{
98	unsigned cpu = smp_processor_id();
99
100	if (likely(prev != next)) {
101#ifdef CONFIG_SMP
102		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
103		this_cpu_write(cpu_tlbstate.active_mm, next);
104#endif
105		cpumask_set_cpu(cpu, mm_cpumask(next));
106
107		/*
108		 * Re-load page tables.
109		 *
110		 * This logic has an ordering constraint:
111		 *
112		 *  CPU 0: Write to a PTE for 'next'
113		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
114		 *  CPU 1: set bit 1 in next's mm_cpumask
115		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
116		 *
117		 * We need to prevent an outcome in which CPU 1 observes
118		 * the new PTE value and CPU 0 observes bit 1 clear in
119		 * mm_cpumask.  (If that occurs, then the IPI will never
120		 * be sent, and CPU 0's TLB will contain a stale entry.)
121		 *
122		 * The bad outcome can occur if either CPU's load is
123		 * reordered before that CPU's store, so both CPUs must
124		 * execute full barriers to prevent this from happening.
125		 *
126		 * Thus, switch_mm needs a full barrier between the
127		 * store to mm_cpumask and any operation that could load
128		 * from next->pgd.  TLB fills are special and can happen
129		 * due to instruction fetches or for no reason at all,
130		 * and neither LOCK nor MFENCE orders them.
131		 * Fortunately, load_cr3() is serializing and gives the
132		 * ordering guarantee we need.
133		 *
134		 */
135		load_cr3(next->pgd);
136
137		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
138
139		/* Stop flush ipis for the previous mm */
140		cpumask_clear_cpu(cpu, mm_cpumask(prev));
141
142		/* Load per-mm CR4 state */
143		load_mm_cr4(next);
144
145		/*
146		 * Load the LDT, if the LDT is different.
147		 *
148		 * It's possible that prev->context.ldt doesn't match
149		 * the LDT register.  This can happen if leave_mm(prev)
150		 * was called and then modify_ldt changed
151		 * prev->context.ldt but suppressed an IPI to this CPU.
152		 * In this case, prev->context.ldt != NULL, because we
153		 * never set context.ldt to NULL while the mm still
154		 * exists.  That means that next->context.ldt !=
155		 * prev->context.ldt, because mms never share an LDT.
156		 */
157		if (unlikely(prev->context.ldt != next->context.ldt))
158			load_mm_ldt(next);
159	}
160#ifdef CONFIG_SMP
161	  else {
162		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
163		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
164
165		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
166			/*
167			 * On established mms, the mm_cpumask is only changed
168			 * from irq context, from ptep_clear_flush() while in
169			 * lazy tlb mode, and here. Irqs are blocked during
170			 * schedule, protecting us from simultaneous changes.
171			 */
172			cpumask_set_cpu(cpu, mm_cpumask(next));
173
174			/*
175			 * We were in lazy tlb mode and leave_mm disabled
176			 * tlb flush IPI delivery. We must reload CR3
177			 * to make sure to use no freed page tables.
178			 *
179			 * As above, load_cr3() is serializing and orders TLB
180			 * fills with respect to the mm_cpumask write.
181			 */
182			load_cr3(next->pgd);
183			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
184			load_mm_cr4(next);
185			load_mm_ldt(next);
186		}
187	}
188#endif
189}
190
191#define activate_mm(prev, next)			\
192do {						\
193	paravirt_activate_mm((prev), (next));	\
194	switch_mm((prev), (next), NULL);	\
195} while (0);
196
197#ifdef CONFIG_X86_32
198#define deactivate_mm(tsk, mm)			\
199do {						\
200	lazy_load_gs(0);			\
201} while (0)
202#else
203#define deactivate_mm(tsk, mm)			\
204do {						\
205	load_gs_index(0);			\
206	loadsegment(fs, 0);			\
207} while (0)
208#endif
209
210static inline void arch_dup_mmap(struct mm_struct *oldmm,
211				 struct mm_struct *mm)
212{
213	paravirt_arch_dup_mmap(oldmm, mm);
214}
215
216static inline void arch_exit_mmap(struct mm_struct *mm)
217{
218	paravirt_arch_exit_mmap(mm);
219}
220
221static inline void arch_bprm_mm_init(struct mm_struct *mm,
222		struct vm_area_struct *vma)
223{
224	mpx_mm_init(mm);
225}
226
227static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
228			      unsigned long start, unsigned long end)
229{
230	/*
231	 * mpx_notify_unmap() goes and reads a rarely-hot
232	 * cacheline in the mm_struct.  That can be expensive
233	 * enough to be seen in profiles.
234	 *
235	 * The mpx_notify_unmap() call and its contents have been
236	 * observed to affect munmap() performance on hardware
237	 * where MPX is not present.
238	 *
239	 * The unlikely() optimizes for the fast case: no MPX
240	 * in the CPU, or no MPX use in the process.  Even if
241	 * we get this wrong (in the unlikely event that MPX
242	 * is widely enabled on some system) the overhead of
243	 * MPX itself (reading bounds tables) is expected to
244	 * overwhelm the overhead of getting this unlikely()
245	 * consistently wrong.
246	 */
247	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
248		mpx_notify_unmap(mm, vma, start, end);
249}
250
251#endif /* _ASM_X86_MMU_CONTEXT_H */
252