This source file includes following definitions.
- setup_vectors_base
- set_vbar
- security_extensions_enabled
- setup_vectors_base
- arm_mm_memblock_reserve
- adjust_lowmem_bounds_mpu
- mpu_setup
- adjust_lowmem_bounds
- paging_init
- setup_mm_for_reboot
- flush_dcache_page
- flush_kernel_dcache_page
- copy_to_user_page
- __arm_ioremap_pfn
- __arm_ioremap_caller
- ioremap
- ioremap_cache
- ioremap_wc
- pci_remap_cfgspace
- arch_memremap_wb
- __iounmap
- iounmap
   1 
   2 
   3 
   4 
   5 
   6 
   7 #include <linux/module.h>
   8 #include <linux/mm.h>
   9 #include <linux/pagemap.h>
  10 #include <linux/io.h>
  11 #include <linux/memblock.h>
  12 #include <linux/kernel.h>
  13 
  14 #include <asm/cacheflush.h>
  15 #include <asm/cp15.h>
  16 #include <asm/sections.h>
  17 #include <asm/page.h>
  18 #include <asm/setup.h>
  19 #include <asm/traps.h>
  20 #include <asm/mach/arch.h>
  21 #include <asm/cputype.h>
  22 #include <asm/mpu.h>
  23 #include <asm/procinfo.h>
  24 
  25 #include "mm.h"
  26 
  27 unsigned long vectors_base;
  28 
  29 #ifdef CONFIG_ARM_MPU
  30 struct mpu_rgn_info mpu_rgn_info;
  31 #endif
  32 
  33 #ifdef CONFIG_CPU_CP15
  34 #ifdef CONFIG_CPU_HIGH_VECTOR
  35 unsigned long setup_vectors_base(void)
  36 {
  37         unsigned long reg = get_cr();
  38 
  39         set_cr(reg | CR_V);
  40         return 0xffff0000;
  41 }
  42 #else 
  43 
  44 static inline void set_vbar(unsigned long val)
  45 {
  46         asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
  47 }
  48 
  49 
  50 
  51 
  52 
  53 static inline bool security_extensions_enabled(void)
  54 {
  55         
  56         if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
  57                 return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
  58                         cpuid_feature_extract(CPUID_EXT_PFR1, 20);
  59         return 0;
  60 }
  61 
  62 unsigned long setup_vectors_base(void)
  63 {
  64         unsigned long base = 0, reg = get_cr();
  65 
  66         set_cr(reg & ~CR_V);
  67         if (security_extensions_enabled()) {
  68                 if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
  69                         base = CONFIG_DRAM_BASE;
  70                 set_vbar(base);
  71         } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
  72                 if (CONFIG_DRAM_BASE != 0)
  73                         pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
  74         }
  75 
  76         return base;
  77 }
  78 #endif 
  79 #endif 
  80 
  81 void __init arm_mm_memblock_reserve(void)
  82 {
  83 #ifndef CONFIG_CPU_V7M
  84         vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
  85         
  86 
  87 
  88 
  89 
  90         memblock_reserve(vectors_base, 2 * PAGE_SIZE);
  91 #else 
  92         
  93 
  94 
  95 
  96 #endif
  97         
  98 
  99 
 100 
 101         memblock_reserve(0, 1);
 102 }
 103 
 104 static void __init adjust_lowmem_bounds_mpu(void)
 105 {
 106         unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
 107 
 108         switch (pmsa) {
 109         case MMFR0_PMSAv7:
 110                 pmsav7_adjust_lowmem_bounds();
 111                 break;
 112         case MMFR0_PMSAv8:
 113                 pmsav8_adjust_lowmem_bounds();
 114                 break;
 115         default:
 116                 break;
 117         }
 118 }
 119 
 120 static void __init mpu_setup(void)
 121 {
 122         unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
 123 
 124         switch (pmsa) {
 125         case MMFR0_PMSAv7:
 126                 pmsav7_setup();
 127                 break;
 128         case MMFR0_PMSAv8:
 129                 pmsav8_setup();
 130                 break;
 131         default:
 132                 break;
 133         }
 134 }
 135 
 136 void __init adjust_lowmem_bounds(void)
 137 {
 138         phys_addr_t end;
 139         adjust_lowmem_bounds_mpu();
 140         end = memblock_end_of_DRAM();
 141         high_memory = __va(end - 1) + 1;
 142         memblock_set_current_limit(end);
 143 }
 144 
 145 
 146 
 147 
 148 
 149 void __init paging_init(const struct machine_desc *mdesc)
 150 {
 151         early_trap_init((void *)vectors_base);
 152         mpu_setup();
 153         bootmem_init();
 154 }
 155 
 156 
 157 
 158 
 159 void setup_mm_for_reboot(void)
 160 {
 161 }
 162 
 163 void flush_dcache_page(struct page *page)
 164 {
 165         __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 166 }
 167 EXPORT_SYMBOL(flush_dcache_page);
 168 
 169 void flush_kernel_dcache_page(struct page *page)
 170 {
 171         __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 172 }
 173 EXPORT_SYMBOL(flush_kernel_dcache_page);
 174 
 175 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 176                        unsigned long uaddr, void *dst, const void *src,
 177                        unsigned long len)
 178 {
 179         memcpy(dst, src, len);
 180         if (vma->vm_flags & VM_EXEC)
 181                 __cpuc_coherent_user_range(uaddr, uaddr + len);
 182 }
 183 
 184 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
 185                                 size_t size, unsigned int mtype)
 186 {
 187         if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
 188                 return NULL;
 189         return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
 190 }
 191 EXPORT_SYMBOL(__arm_ioremap_pfn);
 192 
 193 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
 194                                    unsigned int mtype, void *caller)
 195 {
 196         return (void __iomem *)phys_addr;
 197 }
 198 
 199 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
 200 
 201 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 202 {
 203         return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
 204                                     __builtin_return_address(0));
 205 }
 206 EXPORT_SYMBOL(ioremap);
 207 
 208 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
 209         __alias(ioremap_cached);
 210 
 211 void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
 212 {
 213         return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
 214                                     __builtin_return_address(0));
 215 }
 216 EXPORT_SYMBOL(ioremap_cache);
 217 EXPORT_SYMBOL(ioremap_cached);
 218 
 219 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 220 {
 221         return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
 222                                     __builtin_return_address(0));
 223 }
 224 EXPORT_SYMBOL(ioremap_wc);
 225 
 226 #ifdef CONFIG_PCI
 227 
 228 #include <asm/mach/map.h>
 229 
 230 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
 231 {
 232         return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
 233                                    __builtin_return_address(0));
 234 }
 235 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
 236 #endif
 237 
 238 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
 239 {
 240         return (void *)phys_addr;
 241 }
 242 
 243 void __iounmap(volatile void __iomem *addr)
 244 {
 245 }
 246 EXPORT_SYMBOL(__iounmap);
 247 
 248 void (*arch_iounmap)(volatile void __iomem *);
 249 
 250 void iounmap(volatile void __iomem *addr)
 251 {
 252 }
 253 EXPORT_SYMBOL(iounmap);