root/arch/arm64/include/asm/mmu.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. arm64_kernel_unmapped_at_el0
  2. arm64_kernel_use_ng_mappings
  3. arm64_get_bp_hardening_data
  4. arm64_apply_bp_hardening
  5. arm64_get_bp_hardening_data
  6. arm64_apply_bp_hardening

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2012 ARM Ltd.
   4  */
   5 #ifndef __ASM_MMU_H
   6 #define __ASM_MMU_H
   7 
   8 #include <asm/cputype.h>
   9 
  10 #define MMCF_AARCH32    0x1     /* mm context flag for AArch32 executables */
  11 #define USER_ASID_BIT   48
  12 #define USER_ASID_FLAG  (UL(1) << USER_ASID_BIT)
  13 #define TTBR_ASID_MASK  (UL(0xffff) << 48)
  14 
  15 #define BP_HARDEN_EL2_SLOTS 4
  16 
  17 #ifndef __ASSEMBLY__
  18 
  19 typedef struct {
  20         atomic64_t      id;
  21         void            *vdso;
  22         unsigned long   flags;
  23 } mm_context_t;
  24 
  25 /*
  26  * This macro is only used by the TLBI code, which cannot race with an
  27  * ASID change and therefore doesn't need to reload the counter using
  28  * atomic64_read.
  29  */
  30 #define ASID(mm)        ((mm)->context.id.counter & 0xffff)
  31 
  32 static inline bool arm64_kernel_unmapped_at_el0(void)
  33 {
  34         return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
  35                cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
  36 }
  37 
  38 static inline bool arm64_kernel_use_ng_mappings(void)
  39 {
  40         bool tx1_bug;
  41 
  42         /* What's a kpti? Use global mappings if we don't know. */
  43         if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
  44                 return false;
  45 
  46         /*
  47          * Note: this function is called before the CPU capabilities have
  48          * been configured, so our early mappings will be global. If we
  49          * later determine that kpti is required, then
  50          * kpti_install_ng_mappings() will make them non-global.
  51          */
  52         if (arm64_kernel_unmapped_at_el0())
  53                 return true;
  54 
  55         if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  56                 return false;
  57 
  58         /*
  59          * KASLR is enabled so we're going to be enabling kpti on non-broken
  60          * CPUs regardless of their susceptibility to Meltdown. Rather
  61          * than force everybody to go through the G -> nG dance later on,
  62          * just put down non-global mappings from the beginning.
  63          */
  64         if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
  65                 tx1_bug = false;
  66 #ifndef MODULE
  67         } else if (!static_branch_likely(&arm64_const_caps_ready)) {
  68                 extern const struct midr_range cavium_erratum_27456_cpus[];
  69 
  70                 tx1_bug = is_midr_in_range_list(read_cpuid_id(),
  71                                                 cavium_erratum_27456_cpus);
  72 #endif
  73         } else {
  74                 tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
  75         }
  76 
  77         return !tx1_bug && kaslr_offset() > 0;
  78 }
  79 
  80 typedef void (*bp_hardening_cb_t)(void);
  81 
  82 struct bp_hardening_data {
  83         int                     hyp_vectors_slot;
  84         bp_hardening_cb_t       fn;
  85 };
  86 
  87 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
  88      defined(CONFIG_HARDEN_EL2_VECTORS))
  89 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
  90 extern atomic_t arm64_el2_vector_last_slot;
  91 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
  92 
  93 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  94 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
  95 
  96 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
  97 {
  98         return this_cpu_ptr(&bp_hardening_data);
  99 }
 100 
 101 static inline void arm64_apply_bp_hardening(void)
 102 {
 103         struct bp_hardening_data *d;
 104 
 105         if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
 106                 return;
 107 
 108         d = arm64_get_bp_hardening_data();
 109         if (d->fn)
 110                 d->fn();
 111 }
 112 #else
 113 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
 114 {
 115         return NULL;
 116 }
 117 
 118 static inline void arm64_apply_bp_hardening(void)       { }
 119 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 120 
 121 extern void arm64_memblock_init(void);
 122 extern void paging_init(void);
 123 extern void bootmem_init(void);
 124 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 125 extern void init_mem_pgprot(void);
 126 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 127                                unsigned long virt, phys_addr_t size,
 128                                pgprot_t prot, bool page_mappings_only);
 129 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 130 extern void mark_linear_text_alias_ro(void);
 131 
 132 #define INIT_MM_CONTEXT(name)   \
 133         .pgd = init_pg_dir,
 134 
 135 #endif  /* !__ASSEMBLY__ */
 136 #endif

/* [<][>][^][v][top][bottom][index][help] */