root/arch/powerpc/mm/slice.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. slice_print_mask
  2. slice_print_mask
  3. slice_addr_is_low
  4. slice_range_to_mask
  5. slice_area_is_free
  6. slice_low_has_vma
  7. slice_high_has_vma
  8. slice_mask_for_free
  9. slice_check_range_fits
  10. slice_flush_segments
  11. slice_convert
  12. slice_scan_available
  13. slice_find_area_bottomup
  14. slice_find_area_topdown
  15. slice_find_area
  16. slice_copy_mask
  17. slice_or_mask
  18. slice_andnot_mask
  19. slice_get_unmapped_area
  20. arch_get_unmapped_area
  21. arch_get_unmapped_area_topdown
  22. get_slice_psize
  23. slice_init_new_context_exec
  24. slice_setup_new_exec
  25. slice_set_range_psize
  26. slice_is_hugepage_only_range

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * address space "slices" (meta-segments) support
   4  *
   5  * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
   6  *
   7  * Based on hugetlb implementation
   8  *
   9  * Copyright (C) 2003 David Gibson, IBM Corporation.
  10  */
  11 
  12 #undef DEBUG
  13 
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/pagemap.h>
  17 #include <linux/err.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/export.h>
  20 #include <linux/hugetlb.h>
  21 #include <linux/sched/mm.h>
  22 #include <linux/security.h>
  23 #include <asm/mman.h>
  24 #include <asm/mmu.h>
  25 #include <asm/copro.h>
  26 #include <asm/hugetlb.h>
  27 #include <asm/mmu_context.h>
  28 
  29 static DEFINE_SPINLOCK(slice_convert_lock);
  30 
  31 #ifdef DEBUG
  32 int _slice_debug = 1;
  33 
  34 static void slice_print_mask(const char *label, const struct slice_mask *mask)
  35 {
  36         if (!_slice_debug)
  37                 return;
  38         pr_devel("%s low_slice: %*pbl\n", label,
  39                         (int)SLICE_NUM_LOW, &mask->low_slices);
  40         pr_devel("%s high_slice: %*pbl\n", label,
  41                         (int)SLICE_NUM_HIGH, mask->high_slices);
  42 }
  43 
  44 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
  45 
  46 #else
  47 
  48 static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
  49 #define slice_dbg(fmt...)
  50 
  51 #endif
  52 
  53 static inline notrace bool slice_addr_is_low(unsigned long addr)
  54 {
  55         u64 tmp = (u64)addr;
  56 
  57         return tmp < SLICE_LOW_TOP;
  58 }
  59 
  60 static void slice_range_to_mask(unsigned long start, unsigned long len,
  61                                 struct slice_mask *ret)
  62 {
  63         unsigned long end = start + len - 1;
  64 
  65         ret->low_slices = 0;
  66         if (SLICE_NUM_HIGH)
  67                 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
  68 
  69         if (slice_addr_is_low(start)) {
  70                 unsigned long mend = min(end,
  71                                          (unsigned long)(SLICE_LOW_TOP - 1));
  72 
  73                 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
  74                         - (1u << GET_LOW_SLICE_INDEX(start));
  75         }
  76 
  77         if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
  78                 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
  79                 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
  80                 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
  81 
  82                 bitmap_set(ret->high_slices, start_index, count);
  83         }
  84 }
  85 
  86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
  87                               unsigned long len)
  88 {
  89         struct vm_area_struct *vma;
  90 
  91         if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
  92                 return 0;
  93         vma = find_vma(mm, addr);
  94         return (!vma || (addr + len) <= vm_start_gap(vma));
  95 }
  96 
  97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
  98 {
  99         return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
 100                                    1ul << SLICE_LOW_SHIFT);
 101 }
 102 
 103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
 104 {
 105         unsigned long start = slice << SLICE_HIGH_SHIFT;
 106         unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
 107 
 108         /* Hack, so that each addresses is controlled by exactly one
 109          * of the high or low area bitmaps, the first high area starts
 110          * at 4GB, not 0 */
 111         if (start == 0)
 112                 start = (unsigned long)SLICE_LOW_TOP;
 113 
 114         return !slice_area_is_free(mm, start, end - start);
 115 }
 116 
 117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
 118                                 unsigned long high_limit)
 119 {
 120         unsigned long i;
 121 
 122         ret->low_slices = 0;
 123         if (SLICE_NUM_HIGH)
 124                 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
 125 
 126         for (i = 0; i < SLICE_NUM_LOW; i++)
 127                 if (!slice_low_has_vma(mm, i))
 128                         ret->low_slices |= 1u << i;
 129 
 130         if (slice_addr_is_low(high_limit - 1))
 131                 return;
 132 
 133         for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
 134                 if (!slice_high_has_vma(mm, i))
 135                         __set_bit(i, ret->high_slices);
 136 }
 137 
 138 static bool slice_check_range_fits(struct mm_struct *mm,
 139                            const struct slice_mask *available,
 140                            unsigned long start, unsigned long len)
 141 {
 142         unsigned long end = start + len - 1;
 143         u64 low_slices = 0;
 144 
 145         if (slice_addr_is_low(start)) {
 146                 unsigned long mend = min(end,
 147                                          (unsigned long)(SLICE_LOW_TOP - 1));
 148 
 149                 low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 150                                 - (1u << GET_LOW_SLICE_INDEX(start));
 151         }
 152         if ((low_slices & available->low_slices) != low_slices)
 153                 return false;
 154 
 155         if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
 156                 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
 157                 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
 158                 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
 159                 unsigned long i;
 160 
 161                 for (i = start_index; i < start_index + count; i++) {
 162                         if (!test_bit(i, available->high_slices))
 163                                 return false;
 164                 }
 165         }
 166 
 167         return true;
 168 }
 169 
 170 static void slice_flush_segments(void *parm)
 171 {
 172 #ifdef CONFIG_PPC64
 173         struct mm_struct *mm = parm;
 174         unsigned long flags;
 175 
 176         if (mm != current->active_mm)
 177                 return;
 178 
 179         copy_mm_to_paca(current->active_mm);
 180 
 181         local_irq_save(flags);
 182         slb_flush_and_restore_bolted();
 183         local_irq_restore(flags);
 184 #endif
 185 }
 186 
 187 static void slice_convert(struct mm_struct *mm,
 188                                 const struct slice_mask *mask, int psize)
 189 {
 190         int index, mask_index;
 191         /* Write the new slice psize bits */
 192         unsigned char *hpsizes, *lpsizes;
 193         struct slice_mask *psize_mask, *old_mask;
 194         unsigned long i, flags;
 195         int old_psize;
 196 
 197         slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
 198         slice_print_mask(" mask", mask);
 199 
 200         psize_mask = slice_mask_for_size(&mm->context, psize);
 201 
 202         /* We need to use a spinlock here to protect against
 203          * concurrent 64k -> 4k demotion ...
 204          */
 205         spin_lock_irqsave(&slice_convert_lock, flags);
 206 
 207         lpsizes = mm_ctx_low_slices(&mm->context);
 208         for (i = 0; i < SLICE_NUM_LOW; i++) {
 209                 if (!(mask->low_slices & (1u << i)))
 210                         continue;
 211 
 212                 mask_index = i & 0x1;
 213                 index = i >> 1;
 214 
 215                 /* Update the slice_mask */
 216                 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
 217                 old_mask = slice_mask_for_size(&mm->context, old_psize);
 218                 old_mask->low_slices &= ~(1u << i);
 219                 psize_mask->low_slices |= 1u << i;
 220 
 221                 /* Update the sizes array */
 222                 lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
 223                                 (((unsigned long)psize) << (mask_index * 4));
 224         }
 225 
 226         hpsizes = mm_ctx_high_slices(&mm->context);
 227         for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
 228                 if (!test_bit(i, mask->high_slices))
 229                         continue;
 230 
 231                 mask_index = i & 0x1;
 232                 index = i >> 1;
 233 
 234                 /* Update the slice_mask */
 235                 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
 236                 old_mask = slice_mask_for_size(&mm->context, old_psize);
 237                 __clear_bit(i, old_mask->high_slices);
 238                 __set_bit(i, psize_mask->high_slices);
 239 
 240                 /* Update the sizes array */
 241                 hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
 242                                 (((unsigned long)psize) << (mask_index * 4));
 243         }
 244 
 245         slice_dbg(" lsps=%lx, hsps=%lx\n",
 246                   (unsigned long)mm_ctx_low_slices(&mm->context),
 247                   (unsigned long)mm_ctx_high_slices(&mm->context));
 248 
 249         spin_unlock_irqrestore(&slice_convert_lock, flags);
 250 
 251         copro_flush_all_slbs(mm);
 252 }
 253 
 254 /*
 255  * Compute which slice addr is part of;
 256  * set *boundary_addr to the start or end boundary of that slice
 257  * (depending on 'end' parameter);
 258  * return boolean indicating if the slice is marked as available in the
 259  * 'available' slice_mark.
 260  */
 261 static bool slice_scan_available(unsigned long addr,
 262                                  const struct slice_mask *available,
 263                                  int end, unsigned long *boundary_addr)
 264 {
 265         unsigned long slice;
 266         if (slice_addr_is_low(addr)) {
 267                 slice = GET_LOW_SLICE_INDEX(addr);
 268                 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
 269                 return !!(available->low_slices & (1u << slice));
 270         } else {
 271                 slice = GET_HIGH_SLICE_INDEX(addr);
 272                 *boundary_addr = (slice + end) ?
 273                         ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
 274                 return !!test_bit(slice, available->high_slices);
 275         }
 276 }
 277 
 278 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
 279                                               unsigned long len,
 280                                               const struct slice_mask *available,
 281                                               int psize, unsigned long high_limit)
 282 {
 283         int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 284         unsigned long addr, found, next_end;
 285         struct vm_unmapped_area_info info;
 286 
 287         info.flags = 0;
 288         info.length = len;
 289         info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
 290         info.align_offset = 0;
 291 
 292         addr = TASK_UNMAPPED_BASE;
 293         /*
 294          * Check till the allow max value for this mmap request
 295          */
 296         while (addr < high_limit) {
 297                 info.low_limit = addr;
 298                 if (!slice_scan_available(addr, available, 1, &addr))
 299                         continue;
 300 
 301  next_slice:
 302                 /*
 303                  * At this point [info.low_limit; addr) covers
 304                  * available slices only and ends at a slice boundary.
 305                  * Check if we need to reduce the range, or if we can
 306                  * extend it to cover the next available slice.
 307                  */
 308                 if (addr >= high_limit)
 309                         addr = high_limit;
 310                 else if (slice_scan_available(addr, available, 1, &next_end)) {
 311                         addr = next_end;
 312                         goto next_slice;
 313                 }
 314                 info.high_limit = addr;
 315 
 316                 found = vm_unmapped_area(&info);
 317                 if (!(found & ~PAGE_MASK))
 318                         return found;
 319         }
 320 
 321         return -ENOMEM;
 322 }
 323 
 324 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
 325                                              unsigned long len,
 326                                              const struct slice_mask *available,
 327                                              int psize, unsigned long high_limit)
 328 {
 329         int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 330         unsigned long addr, found, prev;
 331         struct vm_unmapped_area_info info;
 332         unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
 333 
 334         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 335         info.length = len;
 336         info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
 337         info.align_offset = 0;
 338 
 339         addr = mm->mmap_base;
 340         /*
 341          * If we are trying to allocate above DEFAULT_MAP_WINDOW
 342          * Add the different to the mmap_base.
 343          * Only for that request for which high_limit is above
 344          * DEFAULT_MAP_WINDOW we should apply this.
 345          */
 346         if (high_limit > DEFAULT_MAP_WINDOW)
 347                 addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
 348 
 349         while (addr > min_addr) {
 350                 info.high_limit = addr;
 351                 if (!slice_scan_available(addr - 1, available, 0, &addr))
 352                         continue;
 353 
 354  prev_slice:
 355                 /*
 356                  * At this point [addr; info.high_limit) covers
 357                  * available slices only and starts at a slice boundary.
 358                  * Check if we need to reduce the range, or if we can
 359                  * extend it to cover the previous available slice.
 360                  */
 361                 if (addr < min_addr)
 362                         addr = min_addr;
 363                 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
 364                         addr = prev;
 365                         goto prev_slice;
 366                 }
 367                 info.low_limit = addr;
 368 
 369                 found = vm_unmapped_area(&info);
 370                 if (!(found & ~PAGE_MASK))
 371                         return found;
 372         }
 373 
 374         /*
 375          * A failed mmap() very likely causes application failure,
 376          * so fall back to the bottom-up function here. This scenario
 377          * can happen with large stack limits and large mmap()
 378          * allocations.
 379          */
 380         return slice_find_area_bottomup(mm, len, available, psize, high_limit);
 381 }
 382 
 383 
 384 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
 385                                      const struct slice_mask *mask, int psize,
 386                                      int topdown, unsigned long high_limit)
 387 {
 388         if (topdown)
 389                 return slice_find_area_topdown(mm, len, mask, psize, high_limit);
 390         else
 391                 return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
 392 }
 393 
 394 static inline void slice_copy_mask(struct slice_mask *dst,
 395                                         const struct slice_mask *src)
 396 {
 397         dst->low_slices = src->low_slices;
 398         if (!SLICE_NUM_HIGH)
 399                 return;
 400         bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
 401 }
 402 
 403 static inline void slice_or_mask(struct slice_mask *dst,
 404                                         const struct slice_mask *src1,
 405                                         const struct slice_mask *src2)
 406 {
 407         dst->low_slices = src1->low_slices | src2->low_slices;
 408         if (!SLICE_NUM_HIGH)
 409                 return;
 410         bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
 411 }
 412 
 413 static inline void slice_andnot_mask(struct slice_mask *dst,
 414                                         const struct slice_mask *src1,
 415                                         const struct slice_mask *src2)
 416 {
 417         dst->low_slices = src1->low_slices & ~src2->low_slices;
 418         if (!SLICE_NUM_HIGH)
 419                 return;
 420         bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
 421 }
 422 
 423 #ifdef CONFIG_PPC_64K_PAGES
 424 #define MMU_PAGE_BASE   MMU_PAGE_64K
 425 #else
 426 #define MMU_PAGE_BASE   MMU_PAGE_4K
 427 #endif
 428 
 429 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 430                                       unsigned long flags, unsigned int psize,
 431                                       int topdown)
 432 {
 433         struct slice_mask good_mask;
 434         struct slice_mask potential_mask;
 435         const struct slice_mask *maskp;
 436         const struct slice_mask *compat_maskp = NULL;
 437         int fixed = (flags & MAP_FIXED);
 438         int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 439         unsigned long page_size = 1UL << pshift;
 440         struct mm_struct *mm = current->mm;
 441         unsigned long newaddr;
 442         unsigned long high_limit;
 443 
 444         high_limit = DEFAULT_MAP_WINDOW;
 445         if (addr >= high_limit || (fixed && (addr + len > high_limit)))
 446                 high_limit = TASK_SIZE;
 447 
 448         if (len > high_limit)
 449                 return -ENOMEM;
 450         if (len & (page_size - 1))
 451                 return -EINVAL;
 452         if (fixed) {
 453                 if (addr & (page_size - 1))
 454                         return -EINVAL;
 455                 if (addr > high_limit - len)
 456                         return -ENOMEM;
 457         }
 458 
 459         if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
 460                 /*
 461                  * Increasing the slb_addr_limit does not require
 462                  * slice mask cache to be recalculated because it should
 463                  * be already initialised beyond the old address limit.
 464                  */
 465                 mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
 466 
 467                 on_each_cpu(slice_flush_segments, mm, 1);
 468         }
 469 
 470         /* Sanity checks */
 471         BUG_ON(mm->task_size == 0);
 472         BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
 473         VM_BUG_ON(radix_enabled());
 474 
 475         slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
 476         slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
 477                   addr, len, flags, topdown);
 478 
 479         /* If hint, make sure it matches our alignment restrictions */
 480         if (!fixed && addr) {
 481                 addr = _ALIGN_UP(addr, page_size);
 482                 slice_dbg(" aligned addr=%lx\n", addr);
 483                 /* Ignore hint if it's too large or overlaps a VMA */
 484                 if (addr > high_limit - len || addr < mmap_min_addr ||
 485                     !slice_area_is_free(mm, addr, len))
 486                         addr = 0;
 487         }
 488 
 489         /* First make up a "good" mask of slices that have the right size
 490          * already
 491          */
 492         maskp = slice_mask_for_size(&mm->context, psize);
 493 
 494         /*
 495          * Here "good" means slices that are already the right page size,
 496          * "compat" means slices that have a compatible page size (i.e.
 497          * 4k in a 64k pagesize kernel), and "free" means slices without
 498          * any VMAs.
 499          *
 500          * If MAP_FIXED:
 501          *      check if fits in good | compat => OK
 502          *      check if fits in good | compat | free => convert free
 503          *      else bad
 504          * If have hint:
 505          *      check if hint fits in good => OK
 506          *      check if hint fits in good | free => convert free
 507          * Otherwise:
 508          *      search in good, found => OK
 509          *      search in good | free, found => convert free
 510          *      search in good | compat | free, found => convert free.
 511          */
 512 
 513         /*
 514          * If we support combo pages, we can allow 64k pages in 4k slices
 515          * The mask copies could be avoided in most cases here if we had
 516          * a pointer to good mask for the next code to use.
 517          */
 518         if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
 519                 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
 520                 if (fixed)
 521                         slice_or_mask(&good_mask, maskp, compat_maskp);
 522                 else
 523                         slice_copy_mask(&good_mask, maskp);
 524         } else {
 525                 slice_copy_mask(&good_mask, maskp);
 526         }
 527 
 528         slice_print_mask(" good_mask", &good_mask);
 529         if (compat_maskp)
 530                 slice_print_mask(" compat_mask", compat_maskp);
 531 
 532         /* First check hint if it's valid or if we have MAP_FIXED */
 533         if (addr != 0 || fixed) {
 534                 /* Check if we fit in the good mask. If we do, we just return,
 535                  * nothing else to do
 536                  */
 537                 if (slice_check_range_fits(mm, &good_mask, addr, len)) {
 538                         slice_dbg(" fits good !\n");
 539                         newaddr = addr;
 540                         goto return_addr;
 541                 }
 542         } else {
 543                 /* Now let's see if we can find something in the existing
 544                  * slices for that size
 545                  */
 546                 newaddr = slice_find_area(mm, len, &good_mask,
 547                                           psize, topdown, high_limit);
 548                 if (newaddr != -ENOMEM) {
 549                         /* Found within the good mask, we don't have to setup,
 550                          * we thus return directly
 551                          */
 552                         slice_dbg(" found area at 0x%lx\n", newaddr);
 553                         goto return_addr;
 554                 }
 555         }
 556         /*
 557          * We don't fit in the good mask, check what other slices are
 558          * empty and thus can be converted
 559          */
 560         slice_mask_for_free(mm, &potential_mask, high_limit);
 561         slice_or_mask(&potential_mask, &potential_mask, &good_mask);
 562         slice_print_mask(" potential", &potential_mask);
 563 
 564         if (addr != 0 || fixed) {
 565                 if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
 566                         slice_dbg(" fits potential !\n");
 567                         newaddr = addr;
 568                         goto convert;
 569                 }
 570         }
 571 
 572         /* If we have MAP_FIXED and failed the above steps, then error out */
 573         if (fixed)
 574                 return -EBUSY;
 575 
 576         slice_dbg(" search...\n");
 577 
 578         /* If we had a hint that didn't work out, see if we can fit
 579          * anywhere in the good area.
 580          */
 581         if (addr) {
 582                 newaddr = slice_find_area(mm, len, &good_mask,
 583                                           psize, topdown, high_limit);
 584                 if (newaddr != -ENOMEM) {
 585                         slice_dbg(" found area at 0x%lx\n", newaddr);
 586                         goto return_addr;
 587                 }
 588         }
 589 
 590         /* Now let's see if we can find something in the existing slices
 591          * for that size plus free slices
 592          */
 593         newaddr = slice_find_area(mm, len, &potential_mask,
 594                                   psize, topdown, high_limit);
 595 
 596         if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
 597             psize == MMU_PAGE_64K) {
 598                 /* retry the search with 4k-page slices included */
 599                 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
 600                 newaddr = slice_find_area(mm, len, &potential_mask,
 601                                           psize, topdown, high_limit);
 602         }
 603 
 604         if (newaddr == -ENOMEM)
 605                 return -ENOMEM;
 606 
 607         slice_range_to_mask(newaddr, len, &potential_mask);
 608         slice_dbg(" found potential area at 0x%lx\n", newaddr);
 609         slice_print_mask(" mask", &potential_mask);
 610 
 611  convert:
 612         /*
 613          * Try to allocate the context before we do slice convert
 614          * so that we handle the context allocation failure gracefully.
 615          */
 616         if (need_extra_context(mm, newaddr)) {
 617                 if (alloc_extended_context(mm, newaddr) < 0)
 618                         return -ENOMEM;
 619         }
 620 
 621         slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
 622         if (compat_maskp && !fixed)
 623                 slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
 624         if (potential_mask.low_slices ||
 625                 (SLICE_NUM_HIGH &&
 626                  !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
 627                 slice_convert(mm, &potential_mask, psize);
 628                 if (psize > MMU_PAGE_BASE)
 629                         on_each_cpu(slice_flush_segments, mm, 1);
 630         }
 631         return newaddr;
 632 
 633 return_addr:
 634         if (need_extra_context(mm, newaddr)) {
 635                 if (alloc_extended_context(mm, newaddr) < 0)
 636                         return -ENOMEM;
 637         }
 638         return newaddr;
 639 }
 640 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
 641 
 642 unsigned long arch_get_unmapped_area(struct file *filp,
 643                                      unsigned long addr,
 644                                      unsigned long len,
 645                                      unsigned long pgoff,
 646                                      unsigned long flags)
 647 {
 648         return slice_get_unmapped_area(addr, len, flags,
 649                                        mm_ctx_user_psize(&current->mm->context), 0);
 650 }
 651 
 652 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
 653                                              const unsigned long addr0,
 654                                              const unsigned long len,
 655                                              const unsigned long pgoff,
 656                                              const unsigned long flags)
 657 {
 658         return slice_get_unmapped_area(addr0, len, flags,
 659                                        mm_ctx_user_psize(&current->mm->context), 1);
 660 }
 661 
 662 unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
 663 {
 664         unsigned char *psizes;
 665         int index, mask_index;
 666 
 667         VM_BUG_ON(radix_enabled());
 668 
 669         if (slice_addr_is_low(addr)) {
 670                 psizes = mm_ctx_low_slices(&mm->context);
 671                 index = GET_LOW_SLICE_INDEX(addr);
 672         } else {
 673                 psizes = mm_ctx_high_slices(&mm->context);
 674                 index = GET_HIGH_SLICE_INDEX(addr);
 675         }
 676         mask_index = index & 0x1;
 677         return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
 678 }
 679 EXPORT_SYMBOL_GPL(get_slice_psize);
 680 
 681 void slice_init_new_context_exec(struct mm_struct *mm)
 682 {
 683         unsigned char *hpsizes, *lpsizes;
 684         struct slice_mask *mask;
 685         unsigned int psize = mmu_virtual_psize;
 686 
 687         slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
 688 
 689         /*
 690          * In the case of exec, use the default limit. In the
 691          * case of fork it is just inherited from the mm being
 692          * duplicated.
 693          */
 694         mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
 695         mm_ctx_set_user_psize(&mm->context, psize);
 696 
 697         /*
 698          * Set all slice psizes to the default.
 699          */
 700         lpsizes = mm_ctx_low_slices(&mm->context);
 701         memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
 702 
 703         hpsizes = mm_ctx_high_slices(&mm->context);
 704         memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
 705 
 706         /*
 707          * Slice mask cache starts zeroed, fill the default size cache.
 708          */
 709         mask = slice_mask_for_size(&mm->context, psize);
 710         mask->low_slices = ~0UL;
 711         if (SLICE_NUM_HIGH)
 712                 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
 713 }
 714 
 715 #ifdef CONFIG_PPC_BOOK3S_64
 716 void slice_setup_new_exec(void)
 717 {
 718         struct mm_struct *mm = current->mm;
 719 
 720         slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
 721 
 722         if (!is_32bit_task())
 723                 return;
 724 
 725         mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
 726 }
 727 #endif
 728 
 729 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
 730                            unsigned long len, unsigned int psize)
 731 {
 732         struct slice_mask mask;
 733 
 734         VM_BUG_ON(radix_enabled());
 735 
 736         slice_range_to_mask(start, len, &mask);
 737         slice_convert(mm, &mask, psize);
 738 }
 739 
 740 #ifdef CONFIG_HUGETLB_PAGE
 741 /*
 742  * is_hugepage_only_range() is used by generic code to verify whether
 743  * a normal mmap mapping (non hugetlbfs) is valid on a given area.
 744  *
 745  * until the generic code provides a more generic hook and/or starts
 746  * calling arch get_unmapped_area for MAP_FIXED (which our implementation
 747  * here knows how to deal with), we hijack it to keep standard mappings
 748  * away from us.
 749  *
 750  * because of that generic code limitation, MAP_FIXED mapping cannot
 751  * "convert" back a slice with no VMAs to the standard page size, only
 752  * get_unmapped_area() can. It would be possible to fix it here but I
 753  * prefer working on fixing the generic code instead.
 754  *
 755  * WARNING: This will not work if hugetlbfs isn't enabled since the
 756  * generic code will redefine that function as 0 in that. This is ok
 757  * for now as we only use slices with hugetlbfs enabled. This should
 758  * be fixed as the generic code gets fixed.
 759  */
 760 int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 761                            unsigned long len)
 762 {
 763         const struct slice_mask *maskp;
 764         unsigned int psize = mm_ctx_user_psize(&mm->context);
 765 
 766         VM_BUG_ON(radix_enabled());
 767 
 768         maskp = slice_mask_for_size(&mm->context, psize);
 769 
 770         /* We need to account for 4k slices too */
 771         if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
 772                 const struct slice_mask *compat_maskp;
 773                 struct slice_mask available;
 774 
 775                 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
 776                 slice_or_mask(&available, maskp, compat_maskp);
 777                 return !slice_check_range_fits(mm, &available, addr, len);
 778         }
 779 
 780         return !slice_check_range_fits(mm, maskp, addr, len);
 781 }
 782 #endif

/* [<][>][^][v][top][bottom][index][help] */