root/arch/alpha/mm/numa.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_mem_layout
  2. setup_memory_node
  3. setup_memory
  4. paging_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/arch/alpha/mm/numa.c
   4  *
   5  *  DISCONTIGMEM NUMA alpha support.
   6  *
   7  *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
   8  */
   9 
  10 #include <linux/types.h>
  11 #include <linux/kernel.h>
  12 #include <linux/mm.h>
  13 #include <linux/memblock.h>
  14 #include <linux/swap.h>
  15 #include <linux/initrd.h>
  16 #include <linux/pfn.h>
  17 #include <linux/module.h>
  18 
  19 #include <asm/hwrpb.h>
  20 #include <asm/pgalloc.h>
  21 #include <asm/sections.h>
  22 
  23 pg_data_t node_data[MAX_NUMNODES];
  24 EXPORT_SYMBOL(node_data);
  25 
  26 #undef DEBUG_DISCONTIG
  27 #ifdef DEBUG_DISCONTIG
  28 #define DBGDCONT(args...) printk(args)
  29 #else
  30 #define DBGDCONT(args...)
  31 #endif
  32 
  33 #define for_each_mem_cluster(memdesc, _cluster, i)              \
  34         for ((_cluster) = (memdesc)->cluster, (i) = 0;          \
  35              (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
  36 
  37 static void __init show_mem_layout(void)
  38 {
  39         struct memclust_struct * cluster;
  40         struct memdesc_struct * memdesc;
  41         int i;
  42 
  43         /* Find free clusters, and init and free the bootmem accordingly.  */
  44         memdesc = (struct memdesc_struct *)
  45           (hwrpb->mddt_offset + (unsigned long) hwrpb);
  46 
  47         printk("Raw memory layout:\n");
  48         for_each_mem_cluster(memdesc, cluster, i) {
  49                 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
  50                        i, cluster->usage, cluster->start_pfn,
  51                        cluster->start_pfn + cluster->numpages);
  52         }
  53 }
  54 
  55 static void __init
  56 setup_memory_node(int nid, void *kernel_end)
  57 {
  58         extern unsigned long mem_size_limit;
  59         struct memclust_struct * cluster;
  60         struct memdesc_struct * memdesc;
  61         unsigned long start_kernel_pfn, end_kernel_pfn;
  62         unsigned long start, end;
  63         unsigned long node_pfn_start, node_pfn_end;
  64         unsigned long node_min_pfn, node_max_pfn;
  65         int i;
  66         int show_init = 0;
  67 
  68         /* Find the bounds of current node */
  69         node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT;
  70         node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT);
  71         
  72         /* Find free clusters, and init and free the bootmem accordingly.  */
  73         memdesc = (struct memdesc_struct *)
  74           (hwrpb->mddt_offset + (unsigned long) hwrpb);
  75 
  76         /* find the bounds of this node (node_min_pfn/node_max_pfn) */
  77         node_min_pfn = ~0UL;
  78         node_max_pfn = 0UL;
  79         for_each_mem_cluster(memdesc, cluster, i) {
  80                 /* Bit 0 is console/PALcode reserved.  Bit 1 is
  81                    non-volatile memory -- we might want to mark
  82                    this for later.  */
  83                 if (cluster->usage & 3)
  84                         continue;
  85 
  86                 start = cluster->start_pfn;
  87                 end = start + cluster->numpages;
  88 
  89                 if (start >= node_pfn_end || end <= node_pfn_start)
  90                         continue;
  91 
  92                 if (!show_init) {
  93                         show_init = 1;
  94                         printk("Initializing bootmem allocator on Node ID %d\n", nid);
  95                 }
  96                 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
  97                        i, cluster->usage, cluster->start_pfn,
  98                        cluster->start_pfn + cluster->numpages);
  99 
 100                 if (start < node_pfn_start)
 101                         start = node_pfn_start;
 102                 if (end > node_pfn_end)
 103                         end = node_pfn_end;
 104 
 105                 if (start < node_min_pfn)
 106                         node_min_pfn = start;
 107                 if (end > node_max_pfn)
 108                         node_max_pfn = end;
 109         }
 110 
 111         if (mem_size_limit && node_max_pfn > mem_size_limit) {
 112                 static int msg_shown = 0;
 113                 if (!msg_shown) {
 114                         msg_shown = 1;
 115                         printk("setup: forcing memory size to %ldK (from %ldK).\n",
 116                                mem_size_limit << (PAGE_SHIFT - 10),
 117                                node_max_pfn    << (PAGE_SHIFT - 10));
 118                 }
 119                 node_max_pfn = mem_size_limit;
 120         }
 121 
 122         if (node_min_pfn >= node_max_pfn)
 123                 return;
 124 
 125         /* Update global {min,max}_low_pfn from node information. */
 126         if (node_min_pfn < min_low_pfn)
 127                 min_low_pfn = node_min_pfn;
 128         if (node_max_pfn > max_low_pfn)
 129                 max_pfn = max_low_pfn = node_max_pfn;
 130 
 131 #if 0 /* we'll try this one again in a little while */
 132         /* Cute trick to make sure our local node data is on local memory */
 133         node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
 134 #endif
 135         printk(" Detected node memory:   start %8lu, end %8lu\n",
 136                node_min_pfn, node_max_pfn);
 137 
 138         DBGDCONT(" DISCONTIG: node_data[%d]   is at 0x%p\n", nid, NODE_DATA(nid));
 139 
 140         /* Find the bounds of kernel memory.  */
 141         start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
 142         end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
 143 
 144         if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
 145                 panic("kernel loaded out of ram");
 146 
 147         memblock_add(PFN_PHYS(node_min_pfn),
 148                      (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
 149 
 150         /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
 151            Note that we round this down, not up - node memory
 152            has much larger alignment than 8Mb, so it's safe. */
 153         node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
 154 
 155         NODE_DATA(nid)->node_start_pfn = node_min_pfn;
 156         NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
 157 
 158         node_set_online(nid);
 159 }
 160 
 161 void __init
 162 setup_memory(void *kernel_end)
 163 {
 164         unsigned long kernel_size;
 165         int nid;
 166 
 167         show_mem_layout();
 168 
 169         nodes_clear(node_online_map);
 170 
 171         min_low_pfn = ~0UL;
 172         max_low_pfn = 0UL;
 173         for (nid = 0; nid < MAX_NUMNODES; nid++)
 174                 setup_memory_node(nid, kernel_end);
 175 
 176         kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
 177         memblock_reserve(KERNEL_START_PHYS, kernel_size);
 178 
 179 #ifdef CONFIG_BLK_DEV_INITRD
 180         initrd_start = INITRD_START;
 181         if (initrd_start) {
 182                 extern void *move_initrd(unsigned long);
 183 
 184                 initrd_end = initrd_start+INITRD_SIZE;
 185                 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
 186                        (void *) initrd_start, INITRD_SIZE);
 187 
 188                 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
 189                         if (!move_initrd(PFN_PHYS(max_low_pfn)))
 190                                 printk("initrd extends beyond end of memory "
 191                                        "(0x%08lx > 0x%p)\ndisabling initrd\n",
 192                                        initrd_end,
 193                                        phys_to_virt(PFN_PHYS(max_low_pfn)));
 194                 } else {
 195                         nid = kvaddr_to_nid(initrd_start);
 196                         memblock_reserve(virt_to_phys((void *)initrd_start),
 197                                          INITRD_SIZE);
 198                 }
 199         }
 200 #endif /* CONFIG_BLK_DEV_INITRD */
 201 }
 202 
 203 void __init paging_init(void)
 204 {
 205         unsigned int    nid;
 206         unsigned long   zones_size[MAX_NR_ZONES] = {0, };
 207         unsigned long   dma_local_pfn;
 208 
 209         /*
 210          * The old global MAX_DMA_ADDRESS per-arch API doesn't fit
 211          * in the NUMA model, for now we convert it to a pfn and
 212          * we interpret this pfn as a local per-node information.
 213          * This issue isn't very important since none of these machines
 214          * have legacy ISA slots anyways.
 215          */
 216         dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 217 
 218         for_each_online_node(nid) {
 219                 unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
 220                 unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_present_pages;
 221 
 222                 if (dma_local_pfn >= end_pfn - start_pfn)
 223                         zones_size[ZONE_DMA] = end_pfn - start_pfn;
 224                 else {
 225                         zones_size[ZONE_DMA] = dma_local_pfn;
 226                         zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
 227                 }
 228                 node_set_state(nid, N_NORMAL_MEMORY);
 229                 free_area_init_node(nid, zones_size, start_pfn, NULL);
 230         }
 231 
 232         /* Initialize the kernel's ZERO_PGE. */
 233         memset((void *)ZERO_PGE, 0, PAGE_SIZE);
 234 }

/* [<][>][^][v][top][bottom][index][help] */