root/arch/x86/mm/cpu_entry_area.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_cpu_entry_area
  2. cea_set_pte
  3. cea_map_percpu_pages
  4. percpu_setup_debug_store
  5. percpu_setup_exception_stacks
  6. percpu_setup_exception_stacks
  7. setup_cpu_entry_area
  8. setup_cpu_entry_area_ptes
  9. setup_cpu_entry_areas

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 #include <linux/spinlock.h>
   4 #include <linux/percpu.h>
   5 #include <linux/kallsyms.h>
   6 #include <linux/kcore.h>
   7 
   8 #include <asm/cpu_entry_area.h>
   9 #include <asm/pgtable.h>
  10 #include <asm/fixmap.h>
  11 #include <asm/desc.h>
  12 
  13 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
  14 
  15 #ifdef CONFIG_X86_64
  16 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
  17 DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
  18 #endif
  19 
  20 struct cpu_entry_area *get_cpu_entry_area(int cpu)
  21 {
  22         unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
  23         BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  24 
  25         return (struct cpu_entry_area *) va;
  26 }
  27 EXPORT_SYMBOL(get_cpu_entry_area);
  28 
  29 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
  30 {
  31         unsigned long va = (unsigned long) cea_vaddr;
  32         pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
  33 
  34         /*
  35          * The cpu_entry_area is shared between the user and kernel
  36          * page tables.  All of its ptes can safely be global.
  37          * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
  38          * non-present PTEs, so be careful not to set it in that
  39          * case to avoid confusion.
  40          */
  41         if (boot_cpu_has(X86_FEATURE_PGE) &&
  42             (pgprot_val(flags) & _PAGE_PRESENT))
  43                 pte = pte_set_flags(pte, _PAGE_GLOBAL);
  44 
  45         set_pte_vaddr(va, pte);
  46 }
  47 
  48 static void __init
  49 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
  50 {
  51         for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
  52                 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
  53 }
  54 
  55 static void __init percpu_setup_debug_store(unsigned int cpu)
  56 {
  57 #ifdef CONFIG_CPU_SUP_INTEL
  58         unsigned int npages;
  59         void *cea;
  60 
  61         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
  62                 return;
  63 
  64         cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
  65         npages = sizeof(struct debug_store) / PAGE_SIZE;
  66         BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
  67         cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
  68                              PAGE_KERNEL);
  69 
  70         cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
  71         /*
  72          * Force the population of PMDs for not yet allocated per cpu
  73          * memory like debug store buffers.
  74          */
  75         npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
  76         for (; npages; npages--, cea += PAGE_SIZE)
  77                 cea_set_pte(cea, 0, PAGE_NONE);
  78 #endif
  79 }
  80 
  81 #ifdef CONFIG_X86_64
  82 
  83 #define cea_map_stack(name) do {                                        \
  84         npages = sizeof(estacks->name## _stack) / PAGE_SIZE;            \
  85         cea_map_percpu_pages(cea->estacks.name## _stack,                \
  86                         estacks->name## _stack, npages, PAGE_KERNEL);   \
  87         } while (0)
  88 
  89 static void __init percpu_setup_exception_stacks(unsigned int cpu)
  90 {
  91         struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
  92         struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
  93         unsigned int npages;
  94 
  95         BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  96 
  97         per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
  98 
  99         /*
 100          * The exceptions stack mappings in the per cpu area are protected
 101          * by guard pages so each stack must be mapped separately. DB2 is
 102          * not mapped; it just exists to catch triple nesting of #DB.
 103          */
 104         cea_map_stack(DF);
 105         cea_map_stack(NMI);
 106         cea_map_stack(DB1);
 107         cea_map_stack(DB);
 108         cea_map_stack(MCE);
 109 }
 110 #else
 111 static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
 112 #endif
 113 
 114 /* Setup the fixmap mappings only once per-processor */
 115 static void __init setup_cpu_entry_area(unsigned int cpu)
 116 {
 117         struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
 118 #ifdef CONFIG_X86_64
 119         /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
 120         pgprot_t gdt_prot = PAGE_KERNEL_RO;
 121         pgprot_t tss_prot = PAGE_KERNEL_RO;
 122 #else
 123         /*
 124          * On native 32-bit systems, the GDT cannot be read-only because
 125          * our double fault handler uses a task gate, and entering through
 126          * a task gate needs to change an available TSS to busy.  If the
 127          * GDT is read-only, that will triple fault.  The TSS cannot be
 128          * read-only because the CPU writes to it on task switches.
 129          *
 130          * On Xen PV, the GDT must be read-only because the hypervisor
 131          * requires it.
 132          */
 133         pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
 134                 PAGE_KERNEL_RO : PAGE_KERNEL;
 135         pgprot_t tss_prot = PAGE_KERNEL;
 136 #endif
 137 
 138         cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
 139 
 140         cea_map_percpu_pages(&cea->entry_stack_page,
 141                              per_cpu_ptr(&entry_stack_storage, cpu), 1,
 142                              PAGE_KERNEL);
 143 
 144         /*
 145          * The Intel SDM says (Volume 3, 7.2.1):
 146          *
 147          *  Avoid placing a page boundary in the part of the TSS that the
 148          *  processor reads during a task switch (the first 104 bytes). The
 149          *  processor may not correctly perform address translations if a
 150          *  boundary occurs in this area. During a task switch, the processor
 151          *  reads and writes into the first 104 bytes of each TSS (using
 152          *  contiguous physical addresses beginning with the physical address
 153          *  of the first byte of the TSS). So, after TSS access begins, if
 154          *  part of the 104 bytes is not physically contiguous, the processor
 155          *  will access incorrect information without generating a page-fault
 156          *  exception.
 157          *
 158          * There are also a lot of errata involving the TSS spanning a page
 159          * boundary.  Assert that we're not doing that.
 160          */
 161         BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
 162                       offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
 163         BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
 164         cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
 165                              sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
 166 
 167 #ifdef CONFIG_X86_32
 168         per_cpu(cpu_entry_area, cpu) = cea;
 169 #endif
 170 
 171         percpu_setup_exception_stacks(cpu);
 172 
 173         percpu_setup_debug_store(cpu);
 174 }
 175 
 176 static __init void setup_cpu_entry_area_ptes(void)
 177 {
 178 #ifdef CONFIG_X86_32
 179         unsigned long start, end;
 180 
 181         /* The +1 is for the readonly IDT: */
 182         BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
 183         BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
 184         BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
 185 
 186         start = CPU_ENTRY_AREA_BASE;
 187         end = start + CPU_ENTRY_AREA_MAP_SIZE;
 188 
 189         /* Careful here: start + PMD_SIZE might wrap around */
 190         for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
 191                 populate_extra_pte(start);
 192 #endif
 193 }
 194 
 195 void __init setup_cpu_entry_areas(void)
 196 {
 197         unsigned int cpu;
 198 
 199         setup_cpu_entry_area_ptes();
 200 
 201         for_each_possible_cpu(cpu)
 202                 setup_cpu_entry_area(cpu);
 203 
 204         /*
 205          * This is the last essential update to swapper_pgdir which needs
 206          * to be synchronized to initial_page_table on 32bit.
 207          */
 208         sync_initial_page_table();
 209 }

/* [<][>][^][v][top][bottom][index][help] */