root/arch/riscv/mm/init.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. zone_sizes_init
  2. setup_zero_page
  3. mem_init
  4. setup_initrd
  5. setup_bootmem
  6. __set_fixmap
  7. get_pte_virt
  8. alloc_pte
  9. create_pte_mapping
  10. get_pmd_virt
  11. alloc_pmd
  12. create_pmd_mapping
  13. create_pgd_mapping
  14. best_map_size
  15. setup_vm
  16. setup_vm_final
  17. paging_init
  18. vmemmap_populate

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2012 Regents of the University of California
   4  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
   5  */
   6 
   7 #include <linux/init.h>
   8 #include <linux/mm.h>
   9 #include <linux/memblock.h>
  10 #include <linux/initrd.h>
  11 #include <linux/swap.h>
  12 #include <linux/sizes.h>
  13 #include <linux/of_fdt.h>
  14 #include <linux/libfdt.h>
  15 
  16 #include <asm/fixmap.h>
  17 #include <asm/tlbflush.h>
  18 #include <asm/sections.h>
  19 #include <asm/pgtable.h>
  20 #include <asm/io.h>
  21 
  22 #include "../kernel/head.h"
  23 
  24 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  25                                                         __page_aligned_bss;
  26 EXPORT_SYMBOL(empty_zero_page);
  27 
  28 extern char _start[];
  29 
  30 static void __init zone_sizes_init(void)
  31 {
  32         unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
  33 
  34 #ifdef CONFIG_ZONE_DMA32
  35         max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
  36                         (unsigned long) PFN_PHYS(max_low_pfn)));
  37 #endif
  38         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  39 
  40         free_area_init_nodes(max_zone_pfns);
  41 }
  42 
  43 void setup_zero_page(void)
  44 {
  45         memset((void *)empty_zero_page, 0, PAGE_SIZE);
  46 }
  47 
  48 void __init mem_init(void)
  49 {
  50 #ifdef CONFIG_FLATMEM
  51         BUG_ON(!mem_map);
  52 #endif /* CONFIG_FLATMEM */
  53 
  54         high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
  55         memblock_free_all();
  56 
  57         mem_init_print_info(NULL);
  58 }
  59 
  60 #ifdef CONFIG_BLK_DEV_INITRD
  61 static void __init setup_initrd(void)
  62 {
  63         unsigned long size;
  64 
  65         if (initrd_start >= initrd_end) {
  66                 pr_info("initrd not found or empty");
  67                 goto disable;
  68         }
  69         if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
  70                 pr_err("initrd extends beyond end of memory");
  71                 goto disable;
  72         }
  73 
  74         size = initrd_end - initrd_start;
  75         memblock_reserve(__pa(initrd_start), size);
  76         initrd_below_start_ok = 1;
  77 
  78         pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
  79                 (void *)(initrd_start), size);
  80         return;
  81 disable:
  82         pr_cont(" - disabling initrd\n");
  83         initrd_start = 0;
  84         initrd_end = 0;
  85 }
  86 #endif /* CONFIG_BLK_DEV_INITRD */
  87 
  88 static phys_addr_t dtb_early_pa __initdata;
  89 
  90 void __init setup_bootmem(void)
  91 {
  92         struct memblock_region *reg;
  93         phys_addr_t mem_size = 0;
  94         phys_addr_t vmlinux_end = __pa(&_end);
  95         phys_addr_t vmlinux_start = __pa(&_start);
  96 
  97         /* Find the memory region containing the kernel */
  98         for_each_memblock(memory, reg) {
  99                 phys_addr_t end = reg->base + reg->size;
 100 
 101                 if (reg->base <= vmlinux_start && vmlinux_end <= end) {
 102                         mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
 103 
 104                         /*
 105                          * Remove memblock from the end of usable area to the
 106                          * end of region
 107                          */
 108                         if (reg->base + mem_size < end)
 109                                 memblock_remove(reg->base + mem_size,
 110                                                 end - reg->base - mem_size);
 111                 }
 112         }
 113         BUG_ON(mem_size == 0);
 114 
 115         /* Reserve from the start of the kernel to the end of the kernel */
 116         memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 117 
 118         set_max_mapnr(PFN_DOWN(mem_size));
 119         max_pfn = PFN_DOWN(memblock_end_of_DRAM());
 120         max_low_pfn = max_pfn;
 121 
 122 #ifdef CONFIG_BLK_DEV_INITRD
 123         setup_initrd();
 124 #endif /* CONFIG_BLK_DEV_INITRD */
 125 
 126         /*
 127          * Avoid using early_init_fdt_reserve_self() since __pa() does
 128          * not work for DTB pointers that are fixmap addresses
 129          */
 130         memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
 131 
 132         early_init_fdt_scan_reserved_mem();
 133         memblock_allow_resize();
 134         memblock_dump_all();
 135 
 136         for_each_memblock(memory, reg) {
 137                 unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
 138                 unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
 139 
 140                 memblock_set_node(PFN_PHYS(start_pfn),
 141                                   PFN_PHYS(end_pfn - start_pfn),
 142                                   &memblock.memory, 0);
 143         }
 144 }
 145 
 146 unsigned long va_pa_offset;
 147 EXPORT_SYMBOL(va_pa_offset);
 148 unsigned long pfn_base;
 149 EXPORT_SYMBOL(pfn_base);
 150 
 151 void *dtb_early_va;
 152 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 153 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 154 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 155 static bool mmu_enabled;
 156 
 157 #define MAX_EARLY_MAPPING_SIZE  SZ_128M
 158 
 159 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 160 
 161 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 162 {
 163         unsigned long addr = __fix_to_virt(idx);
 164         pte_t *ptep;
 165 
 166         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
 167 
 168         ptep = &fixmap_pte[pte_index(addr)];
 169 
 170         if (pgprot_val(prot)) {
 171                 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
 172         } else {
 173                 pte_clear(&init_mm, addr, ptep);
 174                 local_flush_tlb_page(addr);
 175         }
 176 }
 177 
 178 static pte_t *__init get_pte_virt(phys_addr_t pa)
 179 {
 180         if (mmu_enabled) {
 181                 clear_fixmap(FIX_PTE);
 182                 return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
 183         } else {
 184                 return (pte_t *)((uintptr_t)pa);
 185         }
 186 }
 187 
 188 static phys_addr_t __init alloc_pte(uintptr_t va)
 189 {
 190         /*
 191          * We only create PMD or PGD early mappings so we
 192          * should never reach here with MMU disabled.
 193          */
 194         BUG_ON(!mmu_enabled);
 195 
 196         return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 197 }
 198 
 199 static void __init create_pte_mapping(pte_t *ptep,
 200                                       uintptr_t va, phys_addr_t pa,
 201                                       phys_addr_t sz, pgprot_t prot)
 202 {
 203         uintptr_t pte_index = pte_index(va);
 204 
 205         BUG_ON(sz != PAGE_SIZE);
 206 
 207         if (pte_none(ptep[pte_index]))
 208                 ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
 209 }
 210 
 211 #ifndef __PAGETABLE_PMD_FOLDED
 212 
 213 pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
 214 pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 215 
 216 #if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
 217 #define NUM_EARLY_PMDS          1UL
 218 #else
 219 #define NUM_EARLY_PMDS          (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
 220 #endif
 221 pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
 222 
 223 static pmd_t *__init get_pmd_virt(phys_addr_t pa)
 224 {
 225         if (mmu_enabled) {
 226                 clear_fixmap(FIX_PMD);
 227                 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
 228         } else {
 229                 return (pmd_t *)((uintptr_t)pa);
 230         }
 231 }
 232 
 233 static phys_addr_t __init alloc_pmd(uintptr_t va)
 234 {
 235         uintptr_t pmd_num;
 236 
 237         if (mmu_enabled)
 238                 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 239 
 240         pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
 241         BUG_ON(pmd_num >= NUM_EARLY_PMDS);
 242         return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
 243 }
 244 
 245 static void __init create_pmd_mapping(pmd_t *pmdp,
 246                                       uintptr_t va, phys_addr_t pa,
 247                                       phys_addr_t sz, pgprot_t prot)
 248 {
 249         pte_t *ptep;
 250         phys_addr_t pte_phys;
 251         uintptr_t pmd_index = pmd_index(va);
 252 
 253         if (sz == PMD_SIZE) {
 254                 if (pmd_none(pmdp[pmd_index]))
 255                         pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
 256                 return;
 257         }
 258 
 259         if (pmd_none(pmdp[pmd_index])) {
 260                 pte_phys = alloc_pte(va);
 261                 pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
 262                 ptep = get_pte_virt(pte_phys);
 263                 memset(ptep, 0, PAGE_SIZE);
 264         } else {
 265                 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
 266                 ptep = get_pte_virt(pte_phys);
 267         }
 268 
 269         create_pte_mapping(ptep, va, pa, sz, prot);
 270 }
 271 
 272 #define pgd_next_t              pmd_t
 273 #define alloc_pgd_next(__va)    alloc_pmd(__va)
 274 #define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
 275 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)      \
 276         create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
 277 #define PTE_PARENT_SIZE         PMD_SIZE
 278 #define fixmap_pgd_next         fixmap_pmd
 279 #else
 280 #define pgd_next_t              pte_t
 281 #define alloc_pgd_next(__va)    alloc_pte(__va)
 282 #define get_pgd_next_virt(__pa) get_pte_virt(__pa)
 283 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)      \
 284         create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 285 #define PTE_PARENT_SIZE         PGDIR_SIZE
 286 #define fixmap_pgd_next         fixmap_pte
 287 #endif
 288 
 289 static void __init create_pgd_mapping(pgd_t *pgdp,
 290                                       uintptr_t va, phys_addr_t pa,
 291                                       phys_addr_t sz, pgprot_t prot)
 292 {
 293         pgd_next_t *nextp;
 294         phys_addr_t next_phys;
 295         uintptr_t pgd_index = pgd_index(va);
 296 
 297         if (sz == PGDIR_SIZE) {
 298                 if (pgd_val(pgdp[pgd_index]) == 0)
 299                         pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
 300                 return;
 301         }
 302 
 303         if (pgd_val(pgdp[pgd_index]) == 0) {
 304                 next_phys = alloc_pgd_next(va);
 305                 pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
 306                 nextp = get_pgd_next_virt(next_phys);
 307                 memset(nextp, 0, PAGE_SIZE);
 308         } else {
 309                 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
 310                 nextp = get_pgd_next_virt(next_phys);
 311         }
 312 
 313         create_pgd_next_mapping(nextp, va, pa, sz, prot);
 314 }
 315 
 316 static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
 317 {
 318         uintptr_t map_size = PAGE_SIZE;
 319 
 320         /* Upgrade to PMD/PGDIR mappings whenever possible */
 321         if (!(base & (PTE_PARENT_SIZE - 1)) &&
 322             !(size & (PTE_PARENT_SIZE - 1)))
 323                 map_size = PTE_PARENT_SIZE;
 324 
 325         return map_size;
 326 }
 327 
 328 /*
 329  * setup_vm() is called from head.S with MMU-off.
 330  *
 331  * Following requirements should be honoured for setup_vm() to work
 332  * correctly:
 333  * 1) It should use PC-relative addressing for accessing kernel symbols.
 334  *    To achieve this we always use GCC cmodel=medany.
 335  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 336  *    so disable compiler instrumentation when FTRACE is enabled.
 337  *
 338  * Currently, the above requirements are honoured by using custom CFLAGS
 339  * for init.o in mm/Makefile.
 340  */
 341 
 342 #ifndef __riscv_cmodel_medany
 343 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 344 #endif
 345 
 346 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 347 {
 348         uintptr_t va, end_va;
 349         uintptr_t load_pa = (uintptr_t)(&_start);
 350         uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
 351         uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
 352 
 353         va_pa_offset = PAGE_OFFSET - load_pa;
 354         pfn_base = PFN_DOWN(load_pa);
 355 
 356         /*
 357          * Enforce boot alignment requirements of RV32 and
 358          * RV64 by only allowing PMD or PGD mappings.
 359          */
 360         BUG_ON(map_size == PAGE_SIZE);
 361 
 362         /* Sanity check alignment and size */
 363         BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
 364         BUG_ON((load_pa % map_size) != 0);
 365         BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
 366 
 367         /* Setup early PGD for fixmap */
 368         create_pgd_mapping(early_pg_dir, FIXADDR_START,
 369                            (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
 370 
 371 #ifndef __PAGETABLE_PMD_FOLDED
 372         /* Setup fixmap PMD */
 373         create_pmd_mapping(fixmap_pmd, FIXADDR_START,
 374                            (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
 375         /* Setup trampoline PGD and PMD */
 376         create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
 377                            (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
 378         create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
 379                            load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
 380 #else
 381         /* Setup trampoline PGD */
 382         create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
 383                            load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
 384 #endif
 385 
 386         /*
 387          * Setup early PGD covering entire kernel which will allows
 388          * us to reach paging_init(). We map all memory banks later
 389          * in setup_vm_final() below.
 390          */
 391         end_va = PAGE_OFFSET + load_sz;
 392         for (va = PAGE_OFFSET; va < end_va; va += map_size)
 393                 create_pgd_mapping(early_pg_dir, va,
 394                                    load_pa + (va - PAGE_OFFSET),
 395                                    map_size, PAGE_KERNEL_EXEC);
 396 
 397         /* Create fixed mapping for early FDT parsing */
 398         end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
 399         for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
 400                 create_pte_mapping(fixmap_pte, va,
 401                                    dtb_pa + (va - __fix_to_virt(FIX_FDT)),
 402                                    PAGE_SIZE, PAGE_KERNEL);
 403 
 404         /* Save pointer to DTB for early FDT parsing */
 405         dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
 406         /* Save physical address for memblock reservation */
 407         dtb_early_pa = dtb_pa;
 408 }
 409 
 410 static void __init setup_vm_final(void)
 411 {
 412         uintptr_t va, map_size;
 413         phys_addr_t pa, start, end;
 414         struct memblock_region *reg;
 415 
 416         /* Set mmu_enabled flag */
 417         mmu_enabled = true;
 418 
 419         /* Setup swapper PGD for fixmap */
 420         create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
 421                            __pa(fixmap_pgd_next),
 422                            PGDIR_SIZE, PAGE_TABLE);
 423 
 424         /* Map all memory banks */
 425         for_each_memblock(memory, reg) {
 426                 start = reg->base;
 427                 end = start + reg->size;
 428 
 429                 if (start >= end)
 430                         break;
 431                 if (memblock_is_nomap(reg))
 432                         continue;
 433                 if (start <= __pa(PAGE_OFFSET) &&
 434                     __pa(PAGE_OFFSET) < end)
 435                         start = __pa(PAGE_OFFSET);
 436 
 437                 map_size = best_map_size(start, end - start);
 438                 for (pa = start; pa < end; pa += map_size) {
 439                         va = (uintptr_t)__va(pa);
 440                         create_pgd_mapping(swapper_pg_dir, va, pa,
 441                                            map_size, PAGE_KERNEL_EXEC);
 442                 }
 443         }
 444 
 445         /* Clear fixmap PTE and PMD mappings */
 446         clear_fixmap(FIX_PTE);
 447         clear_fixmap(FIX_PMD);
 448 
 449         /* Move to swapper page table */
 450         csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
 451         local_flush_tlb_all();
 452 }
 453 
 454 void __init paging_init(void)
 455 {
 456         setup_vm_final();
 457         memblocks_present();
 458         sparse_init();
 459         setup_zero_page();
 460         zone_sizes_init();
 461 }
 462 
 463 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 464 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 465                                struct vmem_altmap *altmap)
 466 {
 467         return vmemmap_populate_basepages(start, end, node);
 468 }
 469 #endif

/* [<][>][^][v][top][bottom][index][help] */