root/arch/powerpc/mm/mem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. virt_to_kpte
  2. phys_mem_access_prot
  3. memory_add_physaddr_to_nid
  4. create_section_mapping
  5. remove_section_mapping
  6. flush_dcache_range_chunked
  7. arch_add_memory
  8. arch_remove_memory
  9. mem_topology_setup
  10. initmem_init
  11. mark_nonram_nosave
  12. mark_nonram_nosave
  13. paging_init
  14. mem_init
  15. free_initmem
  16. flush_coherent_icache
  17. invalidate_icache_range
  18. flush_icache_range
  19. flush_dcache_icache_phys
  20. flush_dcache_page
  21. flush_dcache_icache_page
  22. __flush_dcache_icache
  23. clear_user_page
  24. copy_user_page
  25. flush_icache_user_range
  26. add_system_ram_resources
  27. devmem_is_allowed

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  PowerPC version
   4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5  *
   6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   8  *    Copyright (C) 1996 Paul Mackerras
   9  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  10  *
  11  *  Derived from "arch/i386/mm/init.c"
  12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  13  */
  14 
  15 #include <linux/export.h>
  16 #include <linux/sched.h>
  17 #include <linux/kernel.h>
  18 #include <linux/errno.h>
  19 #include <linux/string.h>
  20 #include <linux/gfp.h>
  21 #include <linux/types.h>
  22 #include <linux/mm.h>
  23 #include <linux/stddef.h>
  24 #include <linux/init.h>
  25 #include <linux/memblock.h>
  26 #include <linux/highmem.h>
  27 #include <linux/initrd.h>
  28 #include <linux/pagemap.h>
  29 #include <linux/suspend.h>
  30 #include <linux/hugetlb.h>
  31 #include <linux/slab.h>
  32 #include <linux/vmalloc.h>
  33 #include <linux/memremap.h>
  34 
  35 #include <asm/pgalloc.h>
  36 #include <asm/prom.h>
  37 #include <asm/io.h>
  38 #include <asm/mmu_context.h>
  39 #include <asm/pgtable.h>
  40 #include <asm/mmu.h>
  41 #include <asm/smp.h>
  42 #include <asm/machdep.h>
  43 #include <asm/btext.h>
  44 #include <asm/tlb.h>
  45 #include <asm/sections.h>
  46 #include <asm/sparsemem.h>
  47 #include <asm/vdso.h>
  48 #include <asm/fixmap.h>
  49 #include <asm/swiotlb.h>
  50 #include <asm/rtas.h>
  51 
  52 #include <mm/mmu_decl.h>
  53 
  54 #ifndef CPU_FTR_COHERENT_ICACHE
  55 #define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
  56 #define CPU_FTR_NOEXECUTE       0
  57 #endif
  58 
  59 unsigned long long memory_limit;
  60 bool init_mem_is_free;
  61 
  62 #ifdef CONFIG_HIGHMEM
  63 pte_t *kmap_pte;
  64 EXPORT_SYMBOL(kmap_pte);
  65 pgprot_t kmap_prot;
  66 EXPORT_SYMBOL(kmap_prot);
  67 
  68 static inline pte_t *virt_to_kpte(unsigned long vaddr)
  69 {
  70         return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  71                         vaddr), vaddr), vaddr);
  72 }
  73 #endif
  74 
  75 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  76                               unsigned long size, pgprot_t vma_prot)
  77 {
  78         if (ppc_md.phys_mem_access_prot)
  79                 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
  80 
  81         if (!page_is_ram(pfn))
  82                 vma_prot = pgprot_noncached(vma_prot);
  83 
  84         return vma_prot;
  85 }
  86 EXPORT_SYMBOL(phys_mem_access_prot);
  87 
  88 #ifdef CONFIG_MEMORY_HOTPLUG
  89 
  90 #ifdef CONFIG_NUMA
  91 int memory_add_physaddr_to_nid(u64 start)
  92 {
  93         return hot_add_scn_to_nid(start);
  94 }
  95 #endif
  96 
  97 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
  98 {
  99         return -ENODEV;
 100 }
 101 
 102 int __weak remove_section_mapping(unsigned long start, unsigned long end)
 103 {
 104         return -ENODEV;
 105 }
 106 
 107 #define FLUSH_CHUNK_SIZE SZ_1G
 108 /**
 109  * flush_dcache_range_chunked(): Write any modified data cache blocks out to
 110  * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
 111  * Does not invalidate the corresponding instruction cache blocks.
 112  *
 113  * @start: the start address
 114  * @stop: the stop address (exclusive)
 115  * @chunk: the max size of the chunks
 116  */
 117 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
 118                                        unsigned long chunk)
 119 {
 120         unsigned long i;
 121 
 122         for (i = start; i < stop; i += chunk) {
 123                 flush_dcache_range(i, min(stop, i + chunk));
 124                 cond_resched();
 125         }
 126 }
 127 
 128 int __ref arch_add_memory(int nid, u64 start, u64 size,
 129                         struct mhp_restrictions *restrictions)
 130 {
 131         unsigned long start_pfn = start >> PAGE_SHIFT;
 132         unsigned long nr_pages = size >> PAGE_SHIFT;
 133         int rc;
 134 
 135         resize_hpt_for_hotplug(memblock_phys_mem_size());
 136 
 137         start = (unsigned long)__va(start);
 138         rc = create_section_mapping(start, start + size, nid);
 139         if (rc) {
 140                 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
 141                         start, start + size, rc);
 142                 return -EFAULT;
 143         }
 144 
 145         flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
 146 
 147         return __add_pages(nid, start_pfn, nr_pages, restrictions);
 148 }
 149 
 150 void __ref arch_remove_memory(int nid, u64 start, u64 size,
 151                              struct vmem_altmap *altmap)
 152 {
 153         unsigned long start_pfn = start >> PAGE_SHIFT;
 154         unsigned long nr_pages = size >> PAGE_SHIFT;
 155         int ret;
 156 
 157         __remove_pages(start_pfn, nr_pages, altmap);
 158 
 159         /* Remove htab bolted mappings for this section of memory */
 160         start = (unsigned long)__va(start);
 161         flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
 162 
 163         ret = remove_section_mapping(start, start + size);
 164         WARN_ON_ONCE(ret);
 165 
 166         /* Ensure all vmalloc mappings are flushed in case they also
 167          * hit that section of memory
 168          */
 169         vm_unmap_aliases();
 170 
 171         if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
 172                 pr_warn("Hash collision while resizing HPT\n");
 173 }
 174 #endif
 175 
 176 #ifndef CONFIG_NEED_MULTIPLE_NODES
 177 void __init mem_topology_setup(void)
 178 {
 179         max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 180         min_low_pfn = MEMORY_START >> PAGE_SHIFT;
 181 #ifdef CONFIG_HIGHMEM
 182         max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
 183 #endif
 184 
 185         /* Place all memblock_regions in the same node and merge contiguous
 186          * memblock_regions
 187          */
 188         memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
 189 }
 190 
 191 void __init initmem_init(void)
 192 {
 193         /* XXX need to clip this if using highmem? */
 194         sparse_memory_present_with_active_regions(0);
 195         sparse_init();
 196 }
 197 
 198 /* mark pages that don't exist as nosave */
 199 static int __init mark_nonram_nosave(void)
 200 {
 201         struct memblock_region *reg, *prev = NULL;
 202 
 203         for_each_memblock(memory, reg) {
 204                 if (prev &&
 205                     memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
 206                         register_nosave_region(memblock_region_memory_end_pfn(prev),
 207                                                memblock_region_memory_base_pfn(reg));
 208                 prev = reg;
 209         }
 210         return 0;
 211 }
 212 #else /* CONFIG_NEED_MULTIPLE_NODES */
 213 static int __init mark_nonram_nosave(void)
 214 {
 215         return 0;
 216 }
 217 #endif
 218 
 219 /*
 220  * Zones usage:
 221  *
 222  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
 223  * everything else. GFP_DMA32 page allocations automatically fall back to
 224  * ZONE_DMA.
 225  *
 226  * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
 227  * inform the generic DMA mapping code.  32-bit only devices (if not handled
 228  * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
 229  * otherwise served by ZONE_DMA.
 230  */
 231 static unsigned long max_zone_pfns[MAX_NR_ZONES];
 232 
 233 /*
 234  * paging_init() sets up the page tables - in fact we've already done this.
 235  */
 236 void __init paging_init(void)
 237 {
 238         unsigned long long total_ram = memblock_phys_mem_size();
 239         phys_addr_t top_of_ram = memblock_end_of_DRAM();
 240 
 241 #ifdef CONFIG_PPC32
 242         unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
 243         unsigned long end = __fix_to_virt(FIX_HOLE);
 244 
 245         for (; v < end; v += PAGE_SIZE)
 246                 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
 247 #endif
 248 
 249 #ifdef CONFIG_HIGHMEM
 250         map_kernel_page(PKMAP_BASE, 0, __pgprot(0));    /* XXX gross */
 251         pkmap_page_table = virt_to_kpte(PKMAP_BASE);
 252 
 253         kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
 254         kmap_prot = PAGE_KERNEL;
 255 #endif /* CONFIG_HIGHMEM */
 256 
 257         printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
 258                (unsigned long long)top_of_ram, total_ram);
 259         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 260                (long int)((top_of_ram - total_ram) >> 20));
 261 
 262 #ifdef CONFIG_ZONE_DMA
 263         max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
 264                                       1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
 265 #endif
 266         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 267 #ifdef CONFIG_HIGHMEM
 268         max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
 269 #endif
 270 
 271         free_area_init_nodes(max_zone_pfns);
 272 
 273         mark_nonram_nosave();
 274 }
 275 
 276 void __init mem_init(void)
 277 {
 278         /*
 279          * book3s is limited to 16 page sizes due to encoding this in
 280          * a 4-bit field for slices.
 281          */
 282         BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
 283 
 284 #ifdef CONFIG_SWIOTLB
 285         /*
 286          * Some platforms (e.g. 85xx) limit DMA-able memory way below
 287          * 4G. We force memblock to bottom-up mode to ensure that the
 288          * memory allocated in swiotlb_init() is DMA-able.
 289          * As it's the last memblock allocation, no need to reset it
 290          * back to to-down.
 291          */
 292         memblock_set_bottom_up(true);
 293         swiotlb_init(0);
 294 #endif
 295 
 296         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 297         set_max_mapnr(max_pfn);
 298         memblock_free_all();
 299 
 300 #ifdef CONFIG_HIGHMEM
 301         {
 302                 unsigned long pfn, highmem_mapnr;
 303 
 304                 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
 305                 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 306                         phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
 307                         struct page *page = pfn_to_page(pfn);
 308                         if (!memblock_is_reserved(paddr))
 309                                 free_highmem_page(page);
 310                 }
 311         }
 312 #endif /* CONFIG_HIGHMEM */
 313 
 314 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
 315         /*
 316          * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
 317          * functions.... do it here for the non-smp case.
 318          */
 319         per_cpu(next_tlbcam_idx, smp_processor_id()) =
 320                 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 321 #endif
 322 
 323         mem_init_print_info(NULL);
 324 #ifdef CONFIG_PPC32
 325         pr_info("Kernel virtual memory layout:\n");
 326 #ifdef CONFIG_KASAN
 327         pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
 328                 KASAN_SHADOW_START, KASAN_SHADOW_END);
 329 #endif
 330         pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
 331 #ifdef CONFIG_HIGHMEM
 332         pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
 333                 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
 334 #endif /* CONFIG_HIGHMEM */
 335         if (ioremap_bot != IOREMAP_TOP)
 336                 pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
 337                         ioremap_bot, IOREMAP_TOP);
 338         pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
 339                 VMALLOC_START, VMALLOC_END);
 340 #endif /* CONFIG_PPC32 */
 341 }
 342 
 343 void free_initmem(void)
 344 {
 345         ppc_md.progress = ppc_printk_progress;
 346         mark_initmem_nx();
 347         init_mem_is_free = true;
 348         free_initmem_default(POISON_FREE_INITMEM);
 349 }
 350 
 351 /**
 352  * flush_coherent_icache() - if a CPU has a coherent icache, flush it
 353  * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
 354  * Return true if the cache was flushed, false otherwise
 355  */
 356 static inline bool flush_coherent_icache(unsigned long addr)
 357 {
 358         /*
 359          * For a snooping icache, we still need a dummy icbi to purge all the
 360          * prefetched instructions from the ifetch buffers. We also need a sync
 361          * before the icbi to order the the actual stores to memory that might
 362          * have modified instructions with the icbi.
 363          */
 364         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
 365                 mb(); /* sync */
 366                 allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
 367                 icbi((void *)addr);
 368                 prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
 369                 mb(); /* sync */
 370                 isync();
 371                 return true;
 372         }
 373 
 374         return false;
 375 }
 376 
 377 /**
 378  * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
 379  * @start: the start address
 380  * @stop: the stop address (exclusive)
 381  */
 382 static void invalidate_icache_range(unsigned long start, unsigned long stop)
 383 {
 384         unsigned long shift = l1_icache_shift();
 385         unsigned long bytes = l1_icache_bytes();
 386         char *addr = (char *)(start & ~(bytes - 1));
 387         unsigned long size = stop - (unsigned long)addr + (bytes - 1);
 388         unsigned long i;
 389 
 390         for (i = 0; i < size >> shift; i++, addr += bytes)
 391                 icbi(addr);
 392 
 393         mb(); /* sync */
 394         isync();
 395 }
 396 
 397 /**
 398  * flush_icache_range: Write any modified data cache blocks out to memory
 399  * and invalidate the corresponding blocks in the instruction cache
 400  *
 401  * Generic code will call this after writing memory, before executing from it.
 402  *
 403  * @start: the start address
 404  * @stop: the stop address (exclusive)
 405  */
 406 void flush_icache_range(unsigned long start, unsigned long stop)
 407 {
 408         if (flush_coherent_icache(start))
 409                 return;
 410 
 411         clean_dcache_range(start, stop);
 412 
 413         if (IS_ENABLED(CONFIG_44x)) {
 414                 /*
 415                  * Flash invalidate on 44x because we are passed kmapped
 416                  * addresses and this doesn't work for userspace pages due to
 417                  * the virtually tagged icache.
 418                  */
 419                 iccci((void *)start);
 420                 mb(); /* sync */
 421                 isync();
 422         } else
 423                 invalidate_icache_range(start, stop);
 424 }
 425 EXPORT_SYMBOL(flush_icache_range);
 426 
 427 #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
 428 /**
 429  * flush_dcache_icache_phys() - Flush a page by it's physical address
 430  * @physaddr: the physical address of the page
 431  */
 432 static void flush_dcache_icache_phys(unsigned long physaddr)
 433 {
 434         unsigned long bytes = l1_dcache_bytes();
 435         unsigned long nb = PAGE_SIZE / bytes;
 436         unsigned long addr = physaddr & PAGE_MASK;
 437         unsigned long msr, msr0;
 438         unsigned long loop1 = addr, loop2 = addr;
 439 
 440         msr0 = mfmsr();
 441         msr = msr0 & ~MSR_DR;
 442         /*
 443          * This must remain as ASM to prevent potential memory accesses
 444          * while the data MMU is disabled
 445          */
 446         asm volatile(
 447                 "   mtctr %2;\n"
 448                 "   mtmsr %3;\n"
 449                 "   isync;\n"
 450                 "0: dcbst   0, %0;\n"
 451                 "   addi    %0, %0, %4;\n"
 452                 "   bdnz    0b;\n"
 453                 "   sync;\n"
 454                 "   mtctr %2;\n"
 455                 "1: icbi    0, %1;\n"
 456                 "   addi    %1, %1, %4;\n"
 457                 "   bdnz    1b;\n"
 458                 "   sync;\n"
 459                 "   mtmsr %5;\n"
 460                 "   isync;\n"
 461                 : "+&r" (loop1), "+&r" (loop2)
 462                 : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
 463                 : "ctr", "memory");
 464 }
 465 #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
 466 
 467 /*
 468  * This is called when a page has been modified by the kernel.
 469  * It just marks the page as not i-cache clean.  We do the i-cache
 470  * flush later when the page is given to a user process, if necessary.
 471  */
 472 void flush_dcache_page(struct page *page)
 473 {
 474         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 475                 return;
 476         /* avoid an atomic op if possible */
 477         if (test_bit(PG_arch_1, &page->flags))
 478                 clear_bit(PG_arch_1, &page->flags);
 479 }
 480 EXPORT_SYMBOL(flush_dcache_page);
 481 
 482 void flush_dcache_icache_page(struct page *page)
 483 {
 484 #ifdef CONFIG_HUGETLB_PAGE
 485         if (PageCompound(page)) {
 486                 flush_dcache_icache_hugepage(page);
 487                 return;
 488         }
 489 #endif
 490 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
 491         /* On 8xx there is no need to kmap since highmem is not supported */
 492         __flush_dcache_icache(page_address(page));
 493 #else
 494         if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
 495                 void *start = kmap_atomic(page);
 496                 __flush_dcache_icache(start);
 497                 kunmap_atomic(start);
 498         } else {
 499                 unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
 500 
 501                 if (flush_coherent_icache(addr))
 502                         return;
 503                 flush_dcache_icache_phys(addr);
 504         }
 505 #endif
 506 }
 507 EXPORT_SYMBOL(flush_dcache_icache_page);
 508 
 509 /**
 510  * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
 511  * Note: this is necessary because the instruction cache does *not*
 512  * snoop from the data cache.
 513  *
 514  * @page: the address of the page to flush
 515  */
 516 void __flush_dcache_icache(void *p)
 517 {
 518         unsigned long addr = (unsigned long)p;
 519 
 520         if (flush_coherent_icache(addr))
 521                 return;
 522 
 523         clean_dcache_range(addr, addr + PAGE_SIZE);
 524 
 525         /*
 526          * We don't flush the icache on 44x. Those have a virtual icache and we
 527          * don't have access to the virtual address here (it's not the page
 528          * vaddr but where it's mapped in user space). The flushing of the
 529          * icache on these is handled elsewhere, when a change in the address
 530          * space occurs, before returning to user space.
 531          */
 532 
 533         if (cpu_has_feature(MMU_FTR_TYPE_44x))
 534                 return;
 535 
 536         invalidate_icache_range(addr, addr + PAGE_SIZE);
 537 }
 538 
 539 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 540 {
 541         clear_page(page);
 542 
 543         /*
 544          * We shouldn't have to do this, but some versions of glibc
 545          * require it (ld.so assumes zero filled pages are icache clean)
 546          * - Anton
 547          */
 548         flush_dcache_page(pg);
 549 }
 550 EXPORT_SYMBOL(clear_user_page);
 551 
 552 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 553                     struct page *pg)
 554 {
 555         copy_page(vto, vfrom);
 556 
 557         /*
 558          * We should be able to use the following optimisation, however
 559          * there are two problems.
 560          * Firstly a bug in some versions of binutils meant PLT sections
 561          * were not marked executable.
 562          * Secondly the first word in the GOT section is blrl, used
 563          * to establish the GOT address. Until recently the GOT was
 564          * not marked executable.
 565          * - Anton
 566          */
 567 #if 0
 568         if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
 569                 return;
 570 #endif
 571 
 572         flush_dcache_page(pg);
 573 }
 574 
 575 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 576                              unsigned long addr, int len)
 577 {
 578         unsigned long maddr;
 579 
 580         maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
 581         flush_icache_range(maddr, maddr + len);
 582         kunmap(page);
 583 }
 584 EXPORT_SYMBOL(flush_icache_user_range);
 585 
 586 /*
 587  * System memory should not be in /proc/iomem but various tools expect it
 588  * (eg kdump).
 589  */
 590 static int __init add_system_ram_resources(void)
 591 {
 592         struct memblock_region *reg;
 593 
 594         for_each_memblock(memory, reg) {
 595                 struct resource *res;
 596                 unsigned long base = reg->base;
 597                 unsigned long size = reg->size;
 598 
 599                 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 600                 WARN_ON(!res);
 601 
 602                 if (res) {
 603                         res->name = "System RAM";
 604                         res->start = base;
 605                         res->end = base + size - 1;
 606                         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 607                         WARN_ON(request_resource(&iomem_resource, res) < 0);
 608                 }
 609         }
 610 
 611         return 0;
 612 }
 613 subsys_initcall(add_system_ram_resources);
 614 
 615 #ifdef CONFIG_STRICT_DEVMEM
 616 /*
 617  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 618  * is valid. The argument is a physical page number.
 619  *
 620  * Access has to be given to non-kernel-ram areas as well, these contain the
 621  * PCI mmio resources as well as potential bios/acpi data regions.
 622  */
 623 int devmem_is_allowed(unsigned long pfn)
 624 {
 625         if (page_is_rtas_user_buf(pfn))
 626                 return 1;
 627         if (iomem_is_exclusive(PFN_PHYS(pfn)))
 628                 return 0;
 629         if (!page_is_ram(pfn))
 630                 return 1;
 631         return 0;
 632 }
 633 #endif /* CONFIG_STRICT_DEVMEM */
 634 
 635 /*
 636  * This is defined in kernel/resource.c but only powerpc needs to export it, for
 637  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
 638  */
 639 EXPORT_SYMBOL_GPL(walk_system_ram_range);

/* [<][>][^][v][top][bottom][index][help] */