root/arch/s390/mm/mmap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. stack_maxrandom_size
  2. mmap_is_legacy
  3. arch_mmap_rnd
  4. mmap_base_legacy
  5. mmap_base
  6. arch_get_unmapped_area
  7. arch_get_unmapped_area_topdown
  8. arch_pick_mmap_layout

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  *  flexible mmap layout support
   4  *
   5  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
   6  * All Rights Reserved.
   7  *
   8  * Started by Ingo Molnar <mingo@elte.hu>
   9  */
  10 
  11 #include <linux/elf-randomize.h>
  12 #include <linux/personality.h>
  13 #include <linux/mm.h>
  14 #include <linux/mman.h>
  15 #include <linux/sched/signal.h>
  16 #include <linux/sched/mm.h>
  17 #include <linux/random.h>
  18 #include <linux/compat.h>
  19 #include <linux/security.h>
  20 #include <asm/pgalloc.h>
  21 #include <asm/elf.h>
  22 
  23 static unsigned long stack_maxrandom_size(void)
  24 {
  25         if (!(current->flags & PF_RANDOMIZE))
  26                 return 0;
  27         return STACK_RND_MASK << PAGE_SHIFT;
  28 }
  29 
  30 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
  31 {
  32         if (current->personality & ADDR_COMPAT_LAYOUT)
  33                 return 1;
  34         if (rlim_stack->rlim_cur == RLIM_INFINITY)
  35                 return 1;
  36         return sysctl_legacy_va_layout;
  37 }
  38 
  39 unsigned long arch_mmap_rnd(void)
  40 {
  41         return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
  42 }
  43 
  44 static unsigned long mmap_base_legacy(unsigned long rnd)
  45 {
  46         return TASK_UNMAPPED_BASE + rnd;
  47 }
  48 
  49 static inline unsigned long mmap_base(unsigned long rnd,
  50                                       struct rlimit *rlim_stack)
  51 {
  52         unsigned long gap = rlim_stack->rlim_cur;
  53         unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
  54         unsigned long gap_min, gap_max;
  55 
  56         /* Values close to RLIM_INFINITY can overflow. */
  57         if (gap + pad > gap)
  58                 gap += pad;
  59 
  60         /*
  61          * Top of mmap area (just below the process stack).
  62          * Leave at least a ~32 MB hole.
  63          */
  64         gap_min = 32 * 1024 * 1024UL;
  65         gap_max = (STACK_TOP / 6) * 5;
  66 
  67         if (gap < gap_min)
  68                 gap = gap_min;
  69         else if (gap > gap_max)
  70                 gap = gap_max;
  71 
  72         return PAGE_ALIGN(STACK_TOP - gap - rnd);
  73 }
  74 
  75 unsigned long
  76 arch_get_unmapped_area(struct file *filp, unsigned long addr,
  77                 unsigned long len, unsigned long pgoff, unsigned long flags)
  78 {
  79         struct mm_struct *mm = current->mm;
  80         struct vm_area_struct *vma;
  81         struct vm_unmapped_area_info info;
  82         int rc;
  83 
  84         if (len > TASK_SIZE - mmap_min_addr)
  85                 return -ENOMEM;
  86 
  87         if (flags & MAP_FIXED)
  88                 goto check_asce_limit;
  89 
  90         if (addr) {
  91                 addr = PAGE_ALIGN(addr);
  92                 vma = find_vma(mm, addr);
  93                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  94                     (!vma || addr + len <= vm_start_gap(vma)))
  95                         goto check_asce_limit;
  96         }
  97 
  98         info.flags = 0;
  99         info.length = len;
 100         info.low_limit = mm->mmap_base;
 101         info.high_limit = TASK_SIZE;
 102         if (filp || (flags & MAP_SHARED))
 103                 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
 104         else
 105                 info.align_mask = 0;
 106         info.align_offset = pgoff << PAGE_SHIFT;
 107         addr = vm_unmapped_area(&info);
 108         if (addr & ~PAGE_MASK)
 109                 return addr;
 110 
 111 check_asce_limit:
 112         if (addr + len > current->mm->context.asce_limit &&
 113             addr + len <= TASK_SIZE) {
 114                 rc = crst_table_upgrade(mm, addr + len);
 115                 if (rc)
 116                         return (unsigned long) rc;
 117         }
 118 
 119         return addr;
 120 }
 121 
 122 unsigned long
 123 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 124                           const unsigned long len, const unsigned long pgoff,
 125                           const unsigned long flags)
 126 {
 127         struct vm_area_struct *vma;
 128         struct mm_struct *mm = current->mm;
 129         unsigned long addr = addr0;
 130         struct vm_unmapped_area_info info;
 131         int rc;
 132 
 133         /* requested length too big for entire address space */
 134         if (len > TASK_SIZE - mmap_min_addr)
 135                 return -ENOMEM;
 136 
 137         if (flags & MAP_FIXED)
 138                 goto check_asce_limit;
 139 
 140         /* requesting a specific address */
 141         if (addr) {
 142                 addr = PAGE_ALIGN(addr);
 143                 vma = find_vma(mm, addr);
 144                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
 145                                 (!vma || addr + len <= vm_start_gap(vma)))
 146                         goto check_asce_limit;
 147         }
 148 
 149         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 150         info.length = len;
 151         info.low_limit = max(PAGE_SIZE, mmap_min_addr);
 152         info.high_limit = mm->mmap_base;
 153         if (filp || (flags & MAP_SHARED))
 154                 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
 155         else
 156                 info.align_mask = 0;
 157         info.align_offset = pgoff << PAGE_SHIFT;
 158         addr = vm_unmapped_area(&info);
 159 
 160         /*
 161          * A failed mmap() very likely causes application failure,
 162          * so fall back to the bottom-up function here. This scenario
 163          * can happen with large stack limits and large mmap()
 164          * allocations.
 165          */
 166         if (addr & ~PAGE_MASK) {
 167                 VM_BUG_ON(addr != -ENOMEM);
 168                 info.flags = 0;
 169                 info.low_limit = TASK_UNMAPPED_BASE;
 170                 info.high_limit = TASK_SIZE;
 171                 addr = vm_unmapped_area(&info);
 172                 if (addr & ~PAGE_MASK)
 173                         return addr;
 174         }
 175 
 176 check_asce_limit:
 177         if (addr + len > current->mm->context.asce_limit &&
 178             addr + len <= TASK_SIZE) {
 179                 rc = crst_table_upgrade(mm, addr + len);
 180                 if (rc)
 181                         return (unsigned long) rc;
 182         }
 183 
 184         return addr;
 185 }
 186 
 187 /*
 188  * This function, called very early during the creation of a new
 189  * process VM image, sets up which VM layout function to use:
 190  */
 191 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 192 {
 193         unsigned long random_factor = 0UL;
 194 
 195         if (current->flags & PF_RANDOMIZE)
 196                 random_factor = arch_mmap_rnd();
 197 
 198         /*
 199          * Fall back to the standard layout if the personality
 200          * bit is set, or if the expected stack growth is unlimited:
 201          */
 202         if (mmap_is_legacy(rlim_stack)) {
 203                 mm->mmap_base = mmap_base_legacy(random_factor);
 204                 mm->get_unmapped_area = arch_get_unmapped_area;
 205         } else {
 206                 mm->mmap_base = mmap_base(random_factor, rlim_stack);
 207                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 208         }
 209 }

/* [<][>][^][v][top][bottom][index][help] */