root/mm/util.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kfree_const
  2. kstrdup
  3. kstrdup_const
  4. kstrndup
  5. kmemdup
  6. kmemdup_nul
  7. memdup_user
  8. vmemdup_user
  9. strndup_user
  10. memdup_user_nul
  11. __vma_link_list
  12. vma_is_stack_for_current
  13. randomize_stack_top
  14. arch_randomize_brk
  15. arch_mmap_rnd
  16. mmap_is_legacy
  17. mmap_base
  18. arch_pick_mmap_layout
  19. arch_pick_mmap_layout
  20. __account_locked_vm
  21. account_locked_vm
  22. vm_mmap_pgoff
  23. vm_mmap
  24. kvmalloc_node
  25. kvfree
  26. kvfree_sensitive
  27. __page_rmapping
  28. page_rmapping
  29. page_mapped
  30. page_anon_vma
  31. page_mapping
  32. page_mapping_file
  33. __page_mapcount
  34. overcommit_ratio_handler
  35. overcommit_kbytes_handler
  36. vm_commit_limit
  37. vm_memory_committed
  38. __vm_enough_memory
  39. get_cmdline
  40. memcmp_pages

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 #include <linux/mm.h>
   3 #include <linux/slab.h>
   4 #include <linux/string.h>
   5 #include <linux/compiler.h>
   6 #include <linux/export.h>
   7 #include <linux/err.h>
   8 #include <linux/sched.h>
   9 #include <linux/sched/mm.h>
  10 #include <linux/sched/signal.h>
  11 #include <linux/sched/task_stack.h>
  12 #include <linux/security.h>
  13 #include <linux/swap.h>
  14 #include <linux/swapops.h>
  15 #include <linux/mman.h>
  16 #include <linux/hugetlb.h>
  17 #include <linux/vmalloc.h>
  18 #include <linux/userfaultfd_k.h>
  19 #include <linux/elf.h>
  20 #include <linux/elf-randomize.h>
  21 #include <linux/personality.h>
  22 #include <linux/random.h>
  23 #include <linux/processor.h>
  24 #include <linux/sizes.h>
  25 #include <linux/compat.h>
  26 
  27 #include <linux/uaccess.h>
  28 
  29 #include "internal.h"
  30 
  31 /**
  32  * kfree_const - conditionally free memory
  33  * @x: pointer to the memory
  34  *
  35  * Function calls kfree only if @x is not in .rodata section.
  36  */
  37 void kfree_const(const void *x)
  38 {
  39         if (!is_kernel_rodata((unsigned long)x))
  40                 kfree(x);
  41 }
  42 EXPORT_SYMBOL(kfree_const);
  43 
  44 /**
  45  * kstrdup - allocate space for and copy an existing string
  46  * @s: the string to duplicate
  47  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  48  *
  49  * Return: newly allocated copy of @s or %NULL in case of error
  50  */
  51 char *kstrdup(const char *s, gfp_t gfp)
  52 {
  53         size_t len;
  54         char *buf;
  55 
  56         if (!s)
  57                 return NULL;
  58 
  59         len = strlen(s) + 1;
  60         buf = kmalloc_track_caller(len, gfp);
  61         if (buf)
  62                 memcpy(buf, s, len);
  63         return buf;
  64 }
  65 EXPORT_SYMBOL(kstrdup);
  66 
  67 /**
  68  * kstrdup_const - conditionally duplicate an existing const string
  69  * @s: the string to duplicate
  70  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  71  *
  72  * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
  73  *
  74  * Return: source string if it is in .rodata section otherwise
  75  * fallback to kstrdup.
  76  */
  77 const char *kstrdup_const(const char *s, gfp_t gfp)
  78 {
  79         if (is_kernel_rodata((unsigned long)s))
  80                 return s;
  81 
  82         return kstrdup(s, gfp);
  83 }
  84 EXPORT_SYMBOL(kstrdup_const);
  85 
  86 /**
  87  * kstrndup - allocate space for and copy an existing string
  88  * @s: the string to duplicate
  89  * @max: read at most @max chars from @s
  90  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  91  *
  92  * Note: Use kmemdup_nul() instead if the size is known exactly.
  93  *
  94  * Return: newly allocated copy of @s or %NULL in case of error
  95  */
  96 char *kstrndup(const char *s, size_t max, gfp_t gfp)
  97 {
  98         size_t len;
  99         char *buf;
 100 
 101         if (!s)
 102                 return NULL;
 103 
 104         len = strnlen(s, max);
 105         buf = kmalloc_track_caller(len+1, gfp);
 106         if (buf) {
 107                 memcpy(buf, s, len);
 108                 buf[len] = '\0';
 109         }
 110         return buf;
 111 }
 112 EXPORT_SYMBOL(kstrndup);
 113 
 114 /**
 115  * kmemdup - duplicate region of memory
 116  *
 117  * @src: memory region to duplicate
 118  * @len: memory region length
 119  * @gfp: GFP mask to use
 120  *
 121  * Return: newly allocated copy of @src or %NULL in case of error
 122  */
 123 void *kmemdup(const void *src, size_t len, gfp_t gfp)
 124 {
 125         void *p;
 126 
 127         p = kmalloc_track_caller(len, gfp);
 128         if (p)
 129                 memcpy(p, src, len);
 130         return p;
 131 }
 132 EXPORT_SYMBOL(kmemdup);
 133 
 134 /**
 135  * kmemdup_nul - Create a NUL-terminated string from unterminated data
 136  * @s: The data to stringify
 137  * @len: The size of the data
 138  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 139  *
 140  * Return: newly allocated copy of @s with NUL-termination or %NULL in
 141  * case of error
 142  */
 143 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 144 {
 145         char *buf;
 146 
 147         if (!s)
 148                 return NULL;
 149 
 150         buf = kmalloc_track_caller(len + 1, gfp);
 151         if (buf) {
 152                 memcpy(buf, s, len);
 153                 buf[len] = '\0';
 154         }
 155         return buf;
 156 }
 157 EXPORT_SYMBOL(kmemdup_nul);
 158 
 159 /**
 160  * memdup_user - duplicate memory region from user space
 161  *
 162  * @src: source address in user space
 163  * @len: number of bytes to copy
 164  *
 165  * Return: an ERR_PTR() on failure.  Result is physically
 166  * contiguous, to be freed by kfree().
 167  */
 168 void *memdup_user(const void __user *src, size_t len)
 169 {
 170         void *p;
 171 
 172         p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 173         if (!p)
 174                 return ERR_PTR(-ENOMEM);
 175 
 176         if (copy_from_user(p, src, len)) {
 177                 kfree(p);
 178                 return ERR_PTR(-EFAULT);
 179         }
 180 
 181         return p;
 182 }
 183 EXPORT_SYMBOL(memdup_user);
 184 
 185 /**
 186  * vmemdup_user - duplicate memory region from user space
 187  *
 188  * @src: source address in user space
 189  * @len: number of bytes to copy
 190  *
 191  * Return: an ERR_PTR() on failure.  Result may be not
 192  * physically contiguous.  Use kvfree() to free.
 193  */
 194 void *vmemdup_user(const void __user *src, size_t len)
 195 {
 196         void *p;
 197 
 198         p = kvmalloc(len, GFP_USER);
 199         if (!p)
 200                 return ERR_PTR(-ENOMEM);
 201 
 202         if (copy_from_user(p, src, len)) {
 203                 kvfree(p);
 204                 return ERR_PTR(-EFAULT);
 205         }
 206 
 207         return p;
 208 }
 209 EXPORT_SYMBOL(vmemdup_user);
 210 
 211 /**
 212  * strndup_user - duplicate an existing string from user space
 213  * @s: The string to duplicate
 214  * @n: Maximum number of bytes to copy, including the trailing NUL.
 215  *
 216  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 217  */
 218 char *strndup_user(const char __user *s, long n)
 219 {
 220         char *p;
 221         long length;
 222 
 223         length = strnlen_user(s, n);
 224 
 225         if (!length)
 226                 return ERR_PTR(-EFAULT);
 227 
 228         if (length > n)
 229                 return ERR_PTR(-EINVAL);
 230 
 231         p = memdup_user(s, length);
 232 
 233         if (IS_ERR(p))
 234                 return p;
 235 
 236         p[length - 1] = '\0';
 237 
 238         return p;
 239 }
 240 EXPORT_SYMBOL(strndup_user);
 241 
 242 /**
 243  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 244  *
 245  * @src: source address in user space
 246  * @len: number of bytes to copy
 247  *
 248  * Return: an ERR_PTR() on failure.
 249  */
 250 void *memdup_user_nul(const void __user *src, size_t len)
 251 {
 252         char *p;
 253 
 254         /*
 255          * Always use GFP_KERNEL, since copy_from_user() can sleep and
 256          * cause pagefault, which makes it pointless to use GFP_NOFS
 257          * or GFP_ATOMIC.
 258          */
 259         p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 260         if (!p)
 261                 return ERR_PTR(-ENOMEM);
 262 
 263         if (copy_from_user(p, src, len)) {
 264                 kfree(p);
 265                 return ERR_PTR(-EFAULT);
 266         }
 267         p[len] = '\0';
 268 
 269         return p;
 270 }
 271 EXPORT_SYMBOL(memdup_user_nul);
 272 
 273 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 274                 struct vm_area_struct *prev, struct rb_node *rb_parent)
 275 {
 276         struct vm_area_struct *next;
 277 
 278         vma->vm_prev = prev;
 279         if (prev) {
 280                 next = prev->vm_next;
 281                 prev->vm_next = vma;
 282         } else {
 283                 mm->mmap = vma;
 284                 if (rb_parent)
 285                         next = rb_entry(rb_parent,
 286                                         struct vm_area_struct, vm_rb);
 287                 else
 288                         next = NULL;
 289         }
 290         vma->vm_next = next;
 291         if (next)
 292                 next->vm_prev = vma;
 293 }
 294 
 295 /* Check if the vma is being used as a stack by this task */
 296 int vma_is_stack_for_current(struct vm_area_struct *vma)
 297 {
 298         struct task_struct * __maybe_unused t = current;
 299 
 300         return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 301 }
 302 
 303 #ifndef STACK_RND_MASK
 304 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 305 #endif
 306 
 307 unsigned long randomize_stack_top(unsigned long stack_top)
 308 {
 309         unsigned long random_variable = 0;
 310 
 311         if (current->flags & PF_RANDOMIZE) {
 312                 random_variable = get_random_long();
 313                 random_variable &= STACK_RND_MASK;
 314                 random_variable <<= PAGE_SHIFT;
 315         }
 316 #ifdef CONFIG_STACK_GROWSUP
 317         return PAGE_ALIGN(stack_top) + random_variable;
 318 #else
 319         return PAGE_ALIGN(stack_top) - random_variable;
 320 #endif
 321 }
 322 
 323 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 324 unsigned long arch_randomize_brk(struct mm_struct *mm)
 325 {
 326         /* Is the current task 32bit ? */
 327         if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 328                 return randomize_page(mm->brk, SZ_32M);
 329 
 330         return randomize_page(mm->brk, SZ_1G);
 331 }
 332 
 333 unsigned long arch_mmap_rnd(void)
 334 {
 335         unsigned long rnd;
 336 
 337 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 338         if (is_compat_task())
 339                 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 340         else
 341 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 342                 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 343 
 344         return rnd << PAGE_SHIFT;
 345 }
 346 
 347 static int mmap_is_legacy(struct rlimit *rlim_stack)
 348 {
 349         if (current->personality & ADDR_COMPAT_LAYOUT)
 350                 return 1;
 351 
 352         if (rlim_stack->rlim_cur == RLIM_INFINITY)
 353                 return 1;
 354 
 355         return sysctl_legacy_va_layout;
 356 }
 357 
 358 /*
 359  * Leave enough space between the mmap area and the stack to honour ulimit in
 360  * the face of randomisation.
 361  */
 362 #define MIN_GAP         (SZ_128M)
 363 #define MAX_GAP         (STACK_TOP / 6 * 5)
 364 
 365 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 366 {
 367         unsigned long gap = rlim_stack->rlim_cur;
 368         unsigned long pad = stack_guard_gap;
 369 
 370         /* Account for stack randomization if necessary */
 371         if (current->flags & PF_RANDOMIZE)
 372                 pad += (STACK_RND_MASK << PAGE_SHIFT);
 373 
 374         /* Values close to RLIM_INFINITY can overflow. */
 375         if (gap + pad > gap)
 376                 gap += pad;
 377 
 378         if (gap < MIN_GAP)
 379                 gap = MIN_GAP;
 380         else if (gap > MAX_GAP)
 381                 gap = MAX_GAP;
 382 
 383         return PAGE_ALIGN(STACK_TOP - gap - rnd);
 384 }
 385 
 386 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 387 {
 388         unsigned long random_factor = 0UL;
 389 
 390         if (current->flags & PF_RANDOMIZE)
 391                 random_factor = arch_mmap_rnd();
 392 
 393         if (mmap_is_legacy(rlim_stack)) {
 394                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 395                 mm->get_unmapped_area = arch_get_unmapped_area;
 396         } else {
 397                 mm->mmap_base = mmap_base(random_factor, rlim_stack);
 398                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 399         }
 400 }
 401 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 402 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 403 {
 404         mm->mmap_base = TASK_UNMAPPED_BASE;
 405         mm->get_unmapped_area = arch_get_unmapped_area;
 406 }
 407 #endif
 408 
 409 /**
 410  * __account_locked_vm - account locked pages to an mm's locked_vm
 411  * @mm:          mm to account against
 412  * @pages:       number of pages to account
 413  * @inc:         %true if @pages should be considered positive, %false if not
 414  * @task:        task used to check RLIMIT_MEMLOCK
 415  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 416  *
 417  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 418  * that mmap_sem is held as writer.
 419  *
 420  * Return:
 421  * * 0       on success
 422  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 423  */
 424 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 425                         struct task_struct *task, bool bypass_rlim)
 426 {
 427         unsigned long locked_vm, limit;
 428         int ret = 0;
 429 
 430         lockdep_assert_held_write(&mm->mmap_sem);
 431 
 432         locked_vm = mm->locked_vm;
 433         if (inc) {
 434                 if (!bypass_rlim) {
 435                         limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 436                         if (locked_vm + pages > limit)
 437                                 ret = -ENOMEM;
 438                 }
 439                 if (!ret)
 440                         mm->locked_vm = locked_vm + pages;
 441         } else {
 442                 WARN_ON_ONCE(pages > locked_vm);
 443                 mm->locked_vm = locked_vm - pages;
 444         }
 445 
 446         pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 447                  (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 448                  locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 449                  ret ? " - exceeded" : "");
 450 
 451         return ret;
 452 }
 453 EXPORT_SYMBOL_GPL(__account_locked_vm);
 454 
 455 /**
 456  * account_locked_vm - account locked pages to an mm's locked_vm
 457  * @mm:          mm to account against, may be NULL
 458  * @pages:       number of pages to account
 459  * @inc:         %true if @pages should be considered positive, %false if not
 460  *
 461  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 462  *
 463  * Return:
 464  * * 0       on success, or if mm is NULL
 465  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 466  */
 467 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 468 {
 469         int ret;
 470 
 471         if (pages == 0 || !mm)
 472                 return 0;
 473 
 474         down_write(&mm->mmap_sem);
 475         ret = __account_locked_vm(mm, pages, inc, current,
 476                                   capable(CAP_IPC_LOCK));
 477         up_write(&mm->mmap_sem);
 478 
 479         return ret;
 480 }
 481 EXPORT_SYMBOL_GPL(account_locked_vm);
 482 
 483 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 484         unsigned long len, unsigned long prot,
 485         unsigned long flag, unsigned long pgoff)
 486 {
 487         unsigned long ret;
 488         struct mm_struct *mm = current->mm;
 489         unsigned long populate;
 490         LIST_HEAD(uf);
 491 
 492         ret = security_mmap_file(file, prot, flag);
 493         if (!ret) {
 494                 if (down_write_killable(&mm->mmap_sem))
 495                         return -EINTR;
 496                 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
 497                                     &populate, &uf);
 498                 up_write(&mm->mmap_sem);
 499                 userfaultfd_unmap_complete(mm, &uf);
 500                 if (populate)
 501                         mm_populate(ret, populate);
 502         }
 503         return ret;
 504 }
 505 
 506 unsigned long vm_mmap(struct file *file, unsigned long addr,
 507         unsigned long len, unsigned long prot,
 508         unsigned long flag, unsigned long offset)
 509 {
 510         if (unlikely(offset + PAGE_ALIGN(len) < offset))
 511                 return -EINVAL;
 512         if (unlikely(offset_in_page(offset)))
 513                 return -EINVAL;
 514 
 515         return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 516 }
 517 EXPORT_SYMBOL(vm_mmap);
 518 
 519 /**
 520  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 521  * failure, fall back to non-contiguous (vmalloc) allocation.
 522  * @size: size of the request.
 523  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 524  * @node: numa node to allocate from
 525  *
 526  * Uses kmalloc to get the memory but if the allocation fails then falls back
 527  * to the vmalloc allocator. Use kvfree for freeing the memory.
 528  *
 529  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 530  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 531  * preferable to the vmalloc fallback, due to visible performance drawbacks.
 532  *
 533  * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 534  * fall back to vmalloc.
 535  *
 536  * Return: pointer to the allocated memory of %NULL in case of failure
 537  */
 538 void *kvmalloc_node(size_t size, gfp_t flags, int node)
 539 {
 540         gfp_t kmalloc_flags = flags;
 541         void *ret;
 542 
 543         /*
 544          * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
 545          * so the given set of flags has to be compatible.
 546          */
 547         if ((flags & GFP_KERNEL) != GFP_KERNEL)
 548                 return kmalloc_node(size, flags, node);
 549 
 550         /*
 551          * We want to attempt a large physically contiguous block first because
 552          * it is less likely to fragment multiple larger blocks and therefore
 553          * contribute to a long term fragmentation less than vmalloc fallback.
 554          * However make sure that larger requests are not too disruptive - no
 555          * OOM killer and no allocation failure warnings as we have a fallback.
 556          */
 557         if (size > PAGE_SIZE) {
 558                 kmalloc_flags |= __GFP_NOWARN;
 559 
 560                 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 561                         kmalloc_flags |= __GFP_NORETRY;
 562         }
 563 
 564         ret = kmalloc_node(size, kmalloc_flags, node);
 565 
 566         /*
 567          * It doesn't really make sense to fallback to vmalloc for sub page
 568          * requests
 569          */
 570         if (ret || size <= PAGE_SIZE)
 571                 return ret;
 572 
 573         return __vmalloc_node_flags_caller(size, node, flags,
 574                         __builtin_return_address(0));
 575 }
 576 EXPORT_SYMBOL(kvmalloc_node);
 577 
 578 /**
 579  * kvfree() - Free memory.
 580  * @addr: Pointer to allocated memory.
 581  *
 582  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 583  * It is slightly more efficient to use kfree() or vfree() if you are certain
 584  * that you know which one to use.
 585  *
 586  * Context: Either preemptible task context or not-NMI interrupt.
 587  */
 588 void kvfree(const void *addr)
 589 {
 590         if (is_vmalloc_addr(addr))
 591                 vfree(addr);
 592         else
 593                 kfree(addr);
 594 }
 595 EXPORT_SYMBOL(kvfree);
 596 
 597 /**
 598  * kvfree_sensitive - Free a data object containing sensitive information.
 599  * @addr: address of the data object to be freed.
 600  * @len: length of the data object.
 601  *
 602  * Use the special memzero_explicit() function to clear the content of a
 603  * kvmalloc'ed object containing sensitive data to make sure that the
 604  * compiler won't optimize out the data clearing.
 605  */
 606 void kvfree_sensitive(const void *addr, size_t len)
 607 {
 608         if (likely(!ZERO_OR_NULL_PTR(addr))) {
 609                 memzero_explicit((void *)addr, len);
 610                 kvfree(addr);
 611         }
 612 }
 613 EXPORT_SYMBOL(kvfree_sensitive);
 614 
 615 static inline void *__page_rmapping(struct page *page)
 616 {
 617         unsigned long mapping;
 618 
 619         mapping = (unsigned long)page->mapping;
 620         mapping &= ~PAGE_MAPPING_FLAGS;
 621 
 622         return (void *)mapping;
 623 }
 624 
 625 /* Neutral page->mapping pointer to address_space or anon_vma or other */
 626 void *page_rmapping(struct page *page)
 627 {
 628         page = compound_head(page);
 629         return __page_rmapping(page);
 630 }
 631 
 632 /*
 633  * Return true if this page is mapped into pagetables.
 634  * For compound page it returns true if any subpage of compound page is mapped.
 635  */
 636 bool page_mapped(struct page *page)
 637 {
 638         int i;
 639 
 640         if (likely(!PageCompound(page)))
 641                 return atomic_read(&page->_mapcount) >= 0;
 642         page = compound_head(page);
 643         if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 644                 return true;
 645         if (PageHuge(page))
 646                 return false;
 647         for (i = 0; i < compound_nr(page); i++) {
 648                 if (atomic_read(&page[i]._mapcount) >= 0)
 649                         return true;
 650         }
 651         return false;
 652 }
 653 EXPORT_SYMBOL(page_mapped);
 654 
 655 struct anon_vma *page_anon_vma(struct page *page)
 656 {
 657         unsigned long mapping;
 658 
 659         page = compound_head(page);
 660         mapping = (unsigned long)page->mapping;
 661         if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 662                 return NULL;
 663         return __page_rmapping(page);
 664 }
 665 
 666 struct address_space *page_mapping(struct page *page)
 667 {
 668         struct address_space *mapping;
 669 
 670         page = compound_head(page);
 671 
 672         /* This happens if someone calls flush_dcache_page on slab page */
 673         if (unlikely(PageSlab(page)))
 674                 return NULL;
 675 
 676         if (unlikely(PageSwapCache(page))) {
 677                 swp_entry_t entry;
 678 
 679                 entry.val = page_private(page);
 680                 return swap_address_space(entry);
 681         }
 682 
 683         mapping = page->mapping;
 684         if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 685                 return NULL;
 686 
 687         return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 688 }
 689 EXPORT_SYMBOL(page_mapping);
 690 
 691 /*
 692  * For file cache pages, return the address_space, otherwise return NULL
 693  */
 694 struct address_space *page_mapping_file(struct page *page)
 695 {
 696         if (unlikely(PageSwapCache(page)))
 697                 return NULL;
 698         return page_mapping(page);
 699 }
 700 
 701 /* Slow path of page_mapcount() for compound pages */
 702 int __page_mapcount(struct page *page)
 703 {
 704         int ret;
 705 
 706         ret = atomic_read(&page->_mapcount) + 1;
 707         /*
 708          * For file THP page->_mapcount contains total number of mapping
 709          * of the page: no need to look into compound_mapcount.
 710          */
 711         if (!PageAnon(page) && !PageHuge(page))
 712                 return ret;
 713         page = compound_head(page);
 714         ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 715         if (PageDoubleMap(page))
 716                 ret--;
 717         return ret;
 718 }
 719 EXPORT_SYMBOL_GPL(__page_mapcount);
 720 
 721 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 722 int sysctl_overcommit_ratio __read_mostly = 50;
 723 unsigned long sysctl_overcommit_kbytes __read_mostly;
 724 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 725 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 726 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 727 
 728 int overcommit_ratio_handler(struct ctl_table *table, int write,
 729                              void __user *buffer, size_t *lenp,
 730                              loff_t *ppos)
 731 {
 732         int ret;
 733 
 734         ret = proc_dointvec(table, write, buffer, lenp, ppos);
 735         if (ret == 0 && write)
 736                 sysctl_overcommit_kbytes = 0;
 737         return ret;
 738 }
 739 
 740 int overcommit_kbytes_handler(struct ctl_table *table, int write,
 741                              void __user *buffer, size_t *lenp,
 742                              loff_t *ppos)
 743 {
 744         int ret;
 745 
 746         ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 747         if (ret == 0 && write)
 748                 sysctl_overcommit_ratio = 0;
 749         return ret;
 750 }
 751 
 752 /*
 753  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 754  */
 755 unsigned long vm_commit_limit(void)
 756 {
 757         unsigned long allowed;
 758 
 759         if (sysctl_overcommit_kbytes)
 760                 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 761         else
 762                 allowed = ((totalram_pages() - hugetlb_total_pages())
 763                            * sysctl_overcommit_ratio / 100);
 764         allowed += total_swap_pages;
 765 
 766         return allowed;
 767 }
 768 
 769 /*
 770  * Make sure vm_committed_as in one cacheline and not cacheline shared with
 771  * other variables. It can be updated by several CPUs frequently.
 772  */
 773 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 774 
 775 /*
 776  * The global memory commitment made in the system can be a metric
 777  * that can be used to drive ballooning decisions when Linux is hosted
 778  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 779  * balancing memory across competing virtual machines that are hosted.
 780  * Several metrics drive this policy engine including the guest reported
 781  * memory commitment.
 782  */
 783 unsigned long vm_memory_committed(void)
 784 {
 785         return percpu_counter_read_positive(&vm_committed_as);
 786 }
 787 EXPORT_SYMBOL_GPL(vm_memory_committed);
 788 
 789 /*
 790  * Check that a process has enough memory to allocate a new virtual
 791  * mapping. 0 means there is enough memory for the allocation to
 792  * succeed and -ENOMEM implies there is not.
 793  *
 794  * We currently support three overcommit policies, which are set via the
 795  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 796  *
 797  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 798  * Additional code 2002 Jul 20 by Robert Love.
 799  *
 800  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 801  *
 802  * Note this is a helper function intended to be used by LSMs which
 803  * wish to use this logic.
 804  */
 805 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 806 {
 807         long allowed;
 808 
 809         VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
 810                         -(s64)vm_committed_as_batch * num_online_cpus(),
 811                         "memory commitment underflow");
 812 
 813         vm_acct_memory(pages);
 814 
 815         /*
 816          * Sometimes we want to use more memory than we have
 817          */
 818         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 819                 return 0;
 820 
 821         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 822                 if (pages > totalram_pages() + total_swap_pages)
 823                         goto error;
 824                 return 0;
 825         }
 826 
 827         allowed = vm_commit_limit();
 828         /*
 829          * Reserve some for root
 830          */
 831         if (!cap_sys_admin)
 832                 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 833 
 834         /*
 835          * Don't let a single process grow so big a user can't recover
 836          */
 837         if (mm) {
 838                 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 839 
 840                 allowed -= min_t(long, mm->total_vm / 32, reserve);
 841         }
 842 
 843         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 844                 return 0;
 845 error:
 846         vm_unacct_memory(pages);
 847 
 848         return -ENOMEM;
 849 }
 850 
 851 /**
 852  * get_cmdline() - copy the cmdline value to a buffer.
 853  * @task:     the task whose cmdline value to copy.
 854  * @buffer:   the buffer to copy to.
 855  * @buflen:   the length of the buffer. Larger cmdline values are truncated
 856  *            to this length.
 857  *
 858  * Return: the size of the cmdline field copied. Note that the copy does
 859  * not guarantee an ending NULL byte.
 860  */
 861 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 862 {
 863         int res = 0;
 864         unsigned int len;
 865         struct mm_struct *mm = get_task_mm(task);
 866         unsigned long arg_start, arg_end, env_start, env_end;
 867         if (!mm)
 868                 goto out;
 869         if (!mm->arg_end)
 870                 goto out_mm;    /* Shh! No looking before we're done */
 871 
 872         spin_lock(&mm->arg_lock);
 873         arg_start = mm->arg_start;
 874         arg_end = mm->arg_end;
 875         env_start = mm->env_start;
 876         env_end = mm->env_end;
 877         spin_unlock(&mm->arg_lock);
 878 
 879         len = arg_end - arg_start;
 880 
 881         if (len > buflen)
 882                 len = buflen;
 883 
 884         res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 885 
 886         /*
 887          * If the nul at the end of args has been overwritten, then
 888          * assume application is using setproctitle(3).
 889          */
 890         if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 891                 len = strnlen(buffer, res);
 892                 if (len < res) {
 893                         res = len;
 894                 } else {
 895                         len = env_end - env_start;
 896                         if (len > buflen - res)
 897                                 len = buflen - res;
 898                         res += access_process_vm(task, env_start,
 899                                                  buffer+res, len,
 900                                                  FOLL_FORCE);
 901                         res = strnlen(buffer, res);
 902                 }
 903         }
 904 out_mm:
 905         mmput(mm);
 906 out:
 907         return res;
 908 }
 909 
 910 int memcmp_pages(struct page *page1, struct page *page2)
 911 {
 912         char *addr1, *addr2;
 913         int ret;
 914 
 915         addr1 = kmap_atomic(page1);
 916         addr2 = kmap_atomic(page2);
 917         ret = memcmp(addr1, addr2, PAGE_SIZE);
 918         kunmap_atomic(addr2);
 919         kunmap_atomic(addr1);
 920         return ret;
 921 }

/* [<][>][^][v][top][bottom][index][help] */