This source file includes following definitions.
- memblock_discard
- memblock_is_hotpluggable
- memblock_is_mirror
- memblock_is_nomap
- memblock_set_region_node
- memblock_get_region_node
- memblock_set_region_node
- memblock_get_region_node
- memblock_phys_alloc
- memblock_alloc
- memblock_alloc_raw
- memblock_alloc_from
- memblock_alloc_low
- memblock_alloc_node
- memblock_free_early
- memblock_free_early_nid
- memblock_free_late
- memblock_set_bottom_up
- memblock_bottom_up
- memblock_dump_all
- memblock_region_memory_base_pfn
- memblock_region_memory_end_pfn
- memblock_region_reserved_base_pfn
- memblock_region_reserved_end_pfn
- early_memtest
   1 
   2 #ifndef _LINUX_MEMBLOCK_H
   3 #define _LINUX_MEMBLOCK_H
   4 #ifdef __KERNEL__
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 #include <linux/init.h>
  13 #include <linux/mm.h>
  14 #include <asm/dma.h>
  15 
  16 extern unsigned long max_low_pfn;
  17 extern unsigned long min_low_pfn;
  18 
  19 
  20 
  21 
  22 extern unsigned long max_pfn;
  23 
  24 
  25 
  26 extern unsigned long long max_possible_pfn;
  27 
  28 
  29 
  30 
  31 
  32 
  33 
  34 
  35 enum memblock_flags {
  36         MEMBLOCK_NONE           = 0x0,  
  37         MEMBLOCK_HOTPLUG        = 0x1,  
  38         MEMBLOCK_MIRROR         = 0x2,  
  39         MEMBLOCK_NOMAP          = 0x4,  
  40 };
  41 
  42 
  43 
  44 
  45 
  46 
  47 
  48 
  49 struct memblock_region {
  50         phys_addr_t base;
  51         phys_addr_t size;
  52         enum memblock_flags flags;
  53 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  54         int nid;
  55 #endif
  56 };
  57 
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 struct memblock_type {
  67         unsigned long cnt;
  68         unsigned long max;
  69         phys_addr_t total_size;
  70         struct memblock_region *regions;
  71         char *name;
  72 };
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 
  81 
  82 struct memblock {
  83         bool bottom_up;  
  84         phys_addr_t current_limit;
  85         struct memblock_type memory;
  86         struct memblock_type reserved;
  87 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  88         struct memblock_type physmem;
  89 #endif
  90 };
  91 
  92 extern struct memblock memblock;
  93 extern int memblock_debug;
  94 
  95 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
  96 #define __init_memblock __meminit
  97 #define __initdata_memblock __meminitdata
  98 void memblock_discard(void);
  99 #else
 100 #define __init_memblock
 101 #define __initdata_memblock
 102 static inline void memblock_discard(void) {}
 103 #endif
 104 
 105 #define memblock_dbg(fmt, ...) \
 106         if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 107 
 108 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
 109                                    phys_addr_t size, phys_addr_t align);
 110 void memblock_allow_resize(void);
 111 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 112 int memblock_add(phys_addr_t base, phys_addr_t size);
 113 int memblock_remove(phys_addr_t base, phys_addr_t size);
 114 int memblock_free(phys_addr_t base, phys_addr_t size);
 115 int memblock_reserve(phys_addr_t base, phys_addr_t size);
 116 void memblock_trim_memory(phys_addr_t align);
 117 bool memblock_overlaps_region(struct memblock_type *type,
 118                               phys_addr_t base, phys_addr_t size);
 119 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 120 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 121 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
 122 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 123 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 124 
 125 unsigned long memblock_free_all(void);
 126 void reset_node_managed_pages(pg_data_t *pgdat);
 127 void reset_all_zones_managed_pages(void);
 128 
 129 
 130 int memblock_add_range(struct memblock_type *type,
 131                        phys_addr_t base, phys_addr_t size,
 132                        int nid, enum memblock_flags flags);
 133 
 134 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 135                       struct memblock_type *type_a,
 136                       struct memblock_type *type_b, phys_addr_t *out_start,
 137                       phys_addr_t *out_end, int *out_nid);
 138 
 139 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
 140                           struct memblock_type *type_a,
 141                           struct memblock_type *type_b, phys_addr_t *out_start,
 142                           phys_addr_t *out_end, int *out_nid);
 143 
 144 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
 145                                 phys_addr_t *out_end);
 146 
 147 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
 148 
 149 
 150 
 151 
 152 
 153 
 154 
 155 
 156 
 157 
 158 
 159 
 160 
 161 #define for_each_mem_range(i, type_a, type_b, nid, flags,               \
 162                            p_start, p_end, p_nid)                       \
 163         for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,    \
 164                                      p_start, p_end, p_nid);            \
 165              i != (u64)ULLONG_MAX;                                      \
 166              __next_mem_range(&i, nid, flags, type_a, type_b,           \
 167                               p_start, p_end, p_nid))
 168 
 169 
 170 
 171 
 172 
 173 
 174 
 175 
 176 
 177 
 178 
 179 
 180 
 181 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags,           \
 182                                p_start, p_end, p_nid)                   \
 183         for (i = (u64)ULLONG_MAX,                                       \
 184                      __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
 185                                           p_start, p_end, p_nid);       \
 186              i != (u64)ULLONG_MAX;                                      \
 187              __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
 188                                   p_start, p_end, p_nid))
 189 
 190 
 191 
 192 
 193 
 194 
 195 
 196 
 197 
 198 
 199 #define for_each_reserved_mem_region(i, p_start, p_end)                 \
 200         for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);   \
 201              i != (u64)ULLONG_MAX;                                      \
 202              __next_reserved_mem_region(&i, p_start, p_end))
 203 
 204 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
 205 {
 206         return m->flags & MEMBLOCK_HOTPLUG;
 207 }
 208 
 209 static inline bool memblock_is_mirror(struct memblock_region *m)
 210 {
 211         return m->flags & MEMBLOCK_MIRROR;
 212 }
 213 
 214 static inline bool memblock_is_nomap(struct memblock_region *m)
 215 {
 216         return m->flags & MEMBLOCK_NOMAP;
 217 }
 218 
 219 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 220 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
 221                             unsigned long  *end_pfn);
 222 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
 223                           unsigned long *out_end_pfn, int *out_nid);
 224 
 225 
 226 
 227 
 228 
 229 
 230 
 231 
 232 
 233 
 234 
 235 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
 236         for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
 237              i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
 238 #endif 
 239 
 240 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 241 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
 242                                   unsigned long *out_spfn,
 243                                   unsigned long *out_epfn);
 244 
 245 
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 
 256 
 257 
 258 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
 259         for (i = 0,                                                     \
 260              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);    \
 261              i != U64_MAX;                                      \
 262              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 263 
 264 
 265 
 266 
 267 
 268 
 269 
 270 
 271 
 272 
 273 
 274 
 275 
 276 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
 277         for (; i != U64_MAX;                                      \
 278              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 279 #endif 
 280 
 281 
 282 
 283 
 284 
 285 
 286 
 287 
 288 
 289 
 290 
 291 
 292 
 293 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
 294         for_each_mem_range(i, &memblock.memory, &memblock.reserved,     \
 295                            nid, flags, p_start, p_end, p_nid)
 296 
 297 
 298 
 299 
 300 
 301 
 302 
 303 
 304 
 305 
 306 
 307 
 308 
 309 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
 310                                         p_nid)                          \
 311         for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
 312                                nid, flags, p_start, p_end, p_nid)
 313 
 314 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 315 int memblock_set_node(phys_addr_t base, phys_addr_t size,
 316                       struct memblock_type *type, int nid);
 317 
 318 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 319 {
 320         r->nid = nid;
 321 }
 322 
 323 static inline int memblock_get_region_node(const struct memblock_region *r)
 324 {
 325         return r->nid;
 326 }
 327 #else
 328 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 329 {
 330 }
 331 
 332 static inline int memblock_get_region_node(const struct memblock_region *r)
 333 {
 334         return 0;
 335 }
 336 #endif 
 337 
 338 
 339 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
 340 #define MEMBLOCK_ALLOC_ACCESSIBLE       0
 341 #define MEMBLOCK_ALLOC_KASAN            1
 342 
 343 
 344 #define MEMBLOCK_LOW_LIMIT 0
 345 
 346 #ifndef ARCH_LOW_ADDRESS_LIMIT
 347 #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 348 #endif
 349 
 350 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
 351                                       phys_addr_t start, phys_addr_t end);
 352 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 353 
 354 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
 355                                               phys_addr_t align)
 356 {
 357         return memblock_phys_alloc_range(size, align, 0,
 358                                          MEMBLOCK_ALLOC_ACCESSIBLE);
 359 }
 360 
 361 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
 362                                  phys_addr_t min_addr, phys_addr_t max_addr,
 363                                  int nid);
 364 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
 365                              phys_addr_t min_addr, phys_addr_t max_addr,
 366                              int nid);
 367 
 368 static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
 369 {
 370         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 371                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 372 }
 373 
 374 static inline void * __init memblock_alloc_raw(phys_addr_t size,
 375                                                phys_addr_t align)
 376 {
 377         return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
 378                                           MEMBLOCK_ALLOC_ACCESSIBLE,
 379                                           NUMA_NO_NODE);
 380 }
 381 
 382 static inline void * __init memblock_alloc_from(phys_addr_t size,
 383                                                 phys_addr_t align,
 384                                                 phys_addr_t min_addr)
 385 {
 386         return memblock_alloc_try_nid(size, align, min_addr,
 387                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 388 }
 389 
 390 static inline void * __init memblock_alloc_low(phys_addr_t size,
 391                                                phys_addr_t align)
 392 {
 393         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 394                                       ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
 395 }
 396 
 397 static inline void * __init memblock_alloc_node(phys_addr_t size,
 398                                                 phys_addr_t align, int nid)
 399 {
 400         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 401                                       MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 402 }
 403 
 404 static inline void __init memblock_free_early(phys_addr_t base,
 405                                               phys_addr_t size)
 406 {
 407         memblock_free(base, size);
 408 }
 409 
 410 static inline void __init memblock_free_early_nid(phys_addr_t base,
 411                                                   phys_addr_t size, int nid)
 412 {
 413         memblock_free(base, size);
 414 }
 415 
 416 static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
 417 {
 418         __memblock_free_late(base, size);
 419 }
 420 
 421 
 422 
 423 
 424 static inline void __init memblock_set_bottom_up(bool enable)
 425 {
 426         memblock.bottom_up = enable;
 427 }
 428 
 429 
 430 
 431 
 432 
 433 
 434 static inline bool memblock_bottom_up(void)
 435 {
 436         return memblock.bottom_up;
 437 }
 438 
 439 phys_addr_t memblock_phys_mem_size(void);
 440 phys_addr_t memblock_reserved_size(void);
 441 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
 442 phys_addr_t memblock_start_of_DRAM(void);
 443 phys_addr_t memblock_end_of_DRAM(void);
 444 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
 445 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
 446 void memblock_mem_limit_remove_map(phys_addr_t limit);
 447 bool memblock_is_memory(phys_addr_t addr);
 448 bool memblock_is_map_memory(phys_addr_t addr);
 449 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 450 bool memblock_is_reserved(phys_addr_t addr);
 451 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 452 
 453 extern void __memblock_dump_all(void);
 454 
 455 static inline void memblock_dump_all(void)
 456 {
 457         if (memblock_debug)
 458                 __memblock_dump_all();
 459 }
 460 
 461 
 462 
 463 
 464 
 465 
 466 
 467 void memblock_set_current_limit(phys_addr_t limit);
 468 
 469 
 470 phys_addr_t memblock_get_current_limit(void);
 471 
 472 
 473 
 474 
 475 
 476 
 477 
 478 
 479 
 480 
 481 
 482 
 483 
 484 
 485 
 486 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
 487 {
 488         return PFN_UP(reg->base);
 489 }
 490 
 491 
 492 
 493 
 494 
 495 
 496 
 497 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
 498 {
 499         return PFN_DOWN(reg->base + reg->size);
 500 }
 501 
 502 
 503 
 504 
 505 
 506 
 507 
 508 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
 509 {
 510         return PFN_DOWN(reg->base);
 511 }
 512 
 513 
 514 
 515 
 516 
 517 
 518 
 519 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
 520 {
 521         return PFN_UP(reg->base + reg->size);
 522 }
 523 
 524 #define for_each_memblock(memblock_type, region)                                        \
 525         for (region = memblock.memblock_type.regions;                                   \
 526              region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);    \
 527              region++)
 528 
 529 #define for_each_memblock_type(i, memblock_type, rgn)                   \
 530         for (i = 0, rgn = &memblock_type->regions[0];                   \
 531              i < memblock_type->cnt;                                    \
 532              i++, rgn = &memblock_type->regions[i])
 533 
 534 extern void *alloc_large_system_hash(const char *tablename,
 535                                      unsigned long bucketsize,
 536                                      unsigned long numentries,
 537                                      int scale,
 538                                      int flags,
 539                                      unsigned int *_hash_shift,
 540                                      unsigned int *_hash_mask,
 541                                      unsigned long low_limit,
 542                                      unsigned long high_limit);
 543 
 544 #define HASH_EARLY      0x00000001      
 545 #define HASH_SMALL      0x00000002      
 546 
 547 #define HASH_ZERO       0x00000004      
 548 
 549 
 550 
 551 
 552 #ifdef CONFIG_NUMA
 553 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
 554 extern int hashdist;            
 555 #else
 556 #define hashdist (0)
 557 #endif
 558 
 559 #ifdef CONFIG_MEMTEST
 560 extern void early_memtest(phys_addr_t start, phys_addr_t end);
 561 #else
 562 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 563 {
 564 }
 565 #endif
 566 
 567 #endif 
 568 
 569 #endif