root/arch/arm64/mm/fault.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. esr_to_fault_info
  2. esr_to_debug_fault_info
  3. data_abort_decode
  4. mem_abort_decode
  5. is_ttbr0_addr
  6. is_ttbr1_addr
  7. mm_to_pgd_phys
  8. show_pte
  9. ptep_set_access_flags
  10. is_el1_instruction_abort
  11. is_el1_permission_fault
  12. is_spurious_el1_translation_fault
  13. die_kernel_fault
  14. __do_kernel_fault
  15. set_thread_esr
  16. do_bad_area
  17. __do_page_fault
  18. is_el0_instruction_abort
  19. is_write_abort
  20. do_page_fault
  21. do_translation_fault
  22. do_alignment_fault
  23. do_bad
  24. do_sea
  25. do_mem_abort
  26. do_el0_irq_bp_hardening
  27. do_el0_ia_bp_hardening
  28. do_sp_pc_abort
  29. hook_debug_fault_code
  30. debug_exception_enter
  31. debug_exception_exit
  32. cortex_a76_erratum_1463225_debug_handler
  33. cortex_a76_erratum_1463225_debug_handler
  34. do_debug_exception

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Based on arch/arm/mm/fault.c
   4  *
   5  * Copyright (C) 1995  Linus Torvalds
   6  * Copyright (C) 1995-2004 Russell King
   7  * Copyright (C) 2012 ARM Ltd.
   8  */
   9 
  10 #include <linux/acpi.h>
  11 #include <linux/bitfield.h>
  12 #include <linux/extable.h>
  13 #include <linux/signal.h>
  14 #include <linux/mm.h>
  15 #include <linux/hardirq.h>
  16 #include <linux/init.h>
  17 #include <linux/kprobes.h>
  18 #include <linux/uaccess.h>
  19 #include <linux/page-flags.h>
  20 #include <linux/sched/signal.h>
  21 #include <linux/sched/debug.h>
  22 #include <linux/highmem.h>
  23 #include <linux/perf_event.h>
  24 #include <linux/preempt.h>
  25 #include <linux/hugetlb.h>
  26 
  27 #include <asm/acpi.h>
  28 #include <asm/bug.h>
  29 #include <asm/cmpxchg.h>
  30 #include <asm/cpufeature.h>
  31 #include <asm/exception.h>
  32 #include <asm/daifflags.h>
  33 #include <asm/debug-monitors.h>
  34 #include <asm/esr.h>
  35 #include <asm/kasan.h>
  36 #include <asm/sysreg.h>
  37 #include <asm/system_misc.h>
  38 #include <asm/pgtable.h>
  39 #include <asm/tlbflush.h>
  40 #include <asm/traps.h>
  41 
  42 struct fault_info {
  43         int     (*fn)(unsigned long addr, unsigned int esr,
  44                       struct pt_regs *regs);
  45         int     sig;
  46         int     code;
  47         const char *name;
  48 };
  49 
  50 static const struct fault_info fault_info[];
  51 static struct fault_info debug_fault_info[];
  52 
  53 static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
  54 {
  55         return fault_info + (esr & ESR_ELx_FSC);
  56 }
  57 
  58 static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
  59 {
  60         return debug_fault_info + DBG_ESR_EVT(esr);
  61 }
  62 
  63 static void data_abort_decode(unsigned int esr)
  64 {
  65         pr_alert("Data abort info:\n");
  66 
  67         if (esr & ESR_ELx_ISV) {
  68                 pr_alert("  Access size = %u byte(s)\n",
  69                          1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT));
  70                 pr_alert("  SSE = %lu, SRT = %lu\n",
  71                          (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT,
  72                          (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT);
  73                 pr_alert("  SF = %lu, AR = %lu\n",
  74                          (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
  75                          (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
  76         } else {
  77                 pr_alert("  ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
  78         }
  79 
  80         pr_alert("  CM = %lu, WnR = %lu\n",
  81                  (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
  82                  (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
  83 }
  84 
  85 static void mem_abort_decode(unsigned int esr)
  86 {
  87         pr_alert("Mem abort info:\n");
  88 
  89         pr_alert("  ESR = 0x%08x\n", esr);
  90         pr_alert("  EC = 0x%02lx: %s, IL = %u bits\n",
  91                  ESR_ELx_EC(esr), esr_get_class_string(esr),
  92                  (esr & ESR_ELx_IL) ? 32 : 16);
  93         pr_alert("  SET = %lu, FnV = %lu\n",
  94                  (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
  95                  (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT);
  96         pr_alert("  EA = %lu, S1PTW = %lu\n",
  97                  (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
  98                  (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
  99 
 100         if (esr_is_data_abort(esr))
 101                 data_abort_decode(esr);
 102 }
 103 
 104 static inline bool is_ttbr0_addr(unsigned long addr)
 105 {
 106         /* entry assembly clears tags for TTBR0 addrs */
 107         return addr < TASK_SIZE;
 108 }
 109 
 110 static inline bool is_ttbr1_addr(unsigned long addr)
 111 {
 112         /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
 113         return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
 114 }
 115 
 116 static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
 117 {
 118         /* Either init_pg_dir or swapper_pg_dir */
 119         if (mm == &init_mm)
 120                 return __pa_symbol(mm->pgd);
 121 
 122         return (unsigned long)virt_to_phys(mm->pgd);
 123 }
 124 
 125 /*
 126  * Dump out the page tables associated with 'addr' in the currently active mm.
 127  */
 128 static void show_pte(unsigned long addr)
 129 {
 130         struct mm_struct *mm;
 131         pgd_t *pgdp;
 132         pgd_t pgd;
 133 
 134         if (is_ttbr0_addr(addr)) {
 135                 /* TTBR0 */
 136                 mm = current->active_mm;
 137                 if (mm == &init_mm) {
 138                         pr_alert("[%016lx] user address but active_mm is swapper\n",
 139                                  addr);
 140                         return;
 141                 }
 142         } else if (is_ttbr1_addr(addr)) {
 143                 /* TTBR1 */
 144                 mm = &init_mm;
 145         } else {
 146                 pr_alert("[%016lx] address between user and kernel address ranges\n",
 147                          addr);
 148                 return;
 149         }
 150 
 151         pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
 152                  mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
 153                  vabits_actual, mm_to_pgd_phys(mm));
 154         pgdp = pgd_offset(mm, addr);
 155         pgd = READ_ONCE(*pgdp);
 156         pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
 157 
 158         do {
 159                 pud_t *pudp, pud;
 160                 pmd_t *pmdp, pmd;
 161                 pte_t *ptep, pte;
 162 
 163                 if (pgd_none(pgd) || pgd_bad(pgd))
 164                         break;
 165 
 166                 pudp = pud_offset(pgdp, addr);
 167                 pud = READ_ONCE(*pudp);
 168                 pr_cont(", pud=%016llx", pud_val(pud));
 169                 if (pud_none(pud) || pud_bad(pud))
 170                         break;
 171 
 172                 pmdp = pmd_offset(pudp, addr);
 173                 pmd = READ_ONCE(*pmdp);
 174                 pr_cont(", pmd=%016llx", pmd_val(pmd));
 175                 if (pmd_none(pmd) || pmd_bad(pmd))
 176                         break;
 177 
 178                 ptep = pte_offset_map(pmdp, addr);
 179                 pte = READ_ONCE(*ptep);
 180                 pr_cont(", pte=%016llx", pte_val(pte));
 181                 pte_unmap(ptep);
 182         } while(0);
 183 
 184         pr_cont("\n");
 185 }
 186 
 187 /*
 188  * This function sets the access flags (dirty, accessed), as well as write
 189  * permission, and only to a more permissive setting.
 190  *
 191  * It needs to cope with hardware update of the accessed/dirty state by other
 192  * agents in the system and can safely skip the __sync_icache_dcache() call as,
 193  * like set_pte_at(), the PTE is never changed from no-exec to exec here.
 194  *
 195  * Returns whether or not the PTE actually changed.
 196  */
 197 int ptep_set_access_flags(struct vm_area_struct *vma,
 198                           unsigned long address, pte_t *ptep,
 199                           pte_t entry, int dirty)
 200 {
 201         pteval_t old_pteval, pteval;
 202         pte_t pte = READ_ONCE(*ptep);
 203 
 204         if (pte_same(pte, entry))
 205                 return 0;
 206 
 207         /* only preserve the access flags and write permission */
 208         pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
 209 
 210         /*
 211          * Setting the flags must be done atomically to avoid racing with the
 212          * hardware update of the access/dirty state. The PTE_RDONLY bit must
 213          * be set to the most permissive (lowest value) of *ptep and entry
 214          * (calculated as: a & b == ~(~a | ~b)).
 215          */
 216         pte_val(entry) ^= PTE_RDONLY;
 217         pteval = pte_val(pte);
 218         do {
 219                 old_pteval = pteval;
 220                 pteval ^= PTE_RDONLY;
 221                 pteval |= pte_val(entry);
 222                 pteval ^= PTE_RDONLY;
 223                 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
 224         } while (pteval != old_pteval);
 225 
 226         flush_tlb_fix_spurious_fault(vma, address);
 227         return 1;
 228 }
 229 
 230 static bool is_el1_instruction_abort(unsigned int esr)
 231 {
 232         return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
 233 }
 234 
 235 static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
 236                                            struct pt_regs *regs)
 237 {
 238         unsigned int ec       = ESR_ELx_EC(esr);
 239         unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 240 
 241         if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
 242                 return false;
 243 
 244         if (fsc_type == ESR_ELx_FSC_PERM)
 245                 return true;
 246 
 247         if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
 248                 return fsc_type == ESR_ELx_FSC_FAULT &&
 249                         (regs->pstate & PSR_PAN_BIT);
 250 
 251         return false;
 252 }
 253 
 254 static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
 255                                                         unsigned int esr,
 256                                                         struct pt_regs *regs)
 257 {
 258         unsigned long flags;
 259         u64 par, dfsc;
 260 
 261         if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
 262             (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
 263                 return false;
 264 
 265         local_irq_save(flags);
 266         asm volatile("at s1e1r, %0" :: "r" (addr));
 267         isb();
 268         par = read_sysreg(par_el1);
 269         local_irq_restore(flags);
 270 
 271         /*
 272          * If we now have a valid translation, treat the translation fault as
 273          * spurious.
 274          */
 275         if (!(par & SYS_PAR_EL1_F))
 276                 return true;
 277 
 278         /*
 279          * If we got a different type of fault from the AT instruction,
 280          * treat the translation fault as spurious.
 281          */
 282         dfsc = FIELD_GET(SYS_PAR_EL1_FST, par);
 283         return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
 284 }
 285 
 286 static void die_kernel_fault(const char *msg, unsigned long addr,
 287                              unsigned int esr, struct pt_regs *regs)
 288 {
 289         bust_spinlocks(1);
 290 
 291         pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg,
 292                  addr);
 293 
 294         mem_abort_decode(esr);
 295 
 296         show_pte(addr);
 297         die("Oops", regs, esr);
 298         bust_spinlocks(0);
 299         do_exit(SIGKILL);
 300 }
 301 
 302 static void __do_kernel_fault(unsigned long addr, unsigned int esr,
 303                               struct pt_regs *regs)
 304 {
 305         const char *msg;
 306 
 307         /*
 308          * Are we prepared to handle this kernel fault?
 309          * We are almost certainly not prepared to handle instruction faults.
 310          */
 311         if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
 312                 return;
 313 
 314         if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
 315             "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
 316                 return;
 317 
 318         if (is_el1_permission_fault(addr, esr, regs)) {
 319                 if (esr & ESR_ELx_WNR)
 320                         msg = "write to read-only memory";
 321                 else
 322                         msg = "read from unreadable memory";
 323         } else if (addr < PAGE_SIZE) {
 324                 msg = "NULL pointer dereference";
 325         } else {
 326                 msg = "paging request";
 327         }
 328 
 329         die_kernel_fault(msg, addr, esr, regs);
 330 }
 331 
 332 static void set_thread_esr(unsigned long address, unsigned int esr)
 333 {
 334         current->thread.fault_address = address;
 335 
 336         /*
 337          * If the faulting address is in the kernel, we must sanitize the ESR.
 338          * From userspace's point of view, kernel-only mappings don't exist
 339          * at all, so we report them as level 0 translation faults.
 340          * (This is not quite the way that "no mapping there at all" behaves:
 341          * an alignment fault not caused by the memory type would take
 342          * precedence over translation fault for a real access to empty
 343          * space. Unfortunately we can't easily distinguish "alignment fault
 344          * not caused by memory type" from "alignment fault caused by memory
 345          * type", so we ignore this wrinkle and just return the translation
 346          * fault.)
 347          */
 348         if (!is_ttbr0_addr(current->thread.fault_address)) {
 349                 switch (ESR_ELx_EC(esr)) {
 350                 case ESR_ELx_EC_DABT_LOW:
 351                         /*
 352                          * These bits provide only information about the
 353                          * faulting instruction, which userspace knows already.
 354                          * We explicitly clear bits which are architecturally
 355                          * RES0 in case they are given meanings in future.
 356                          * We always report the ESR as if the fault was taken
 357                          * to EL1 and so ISV and the bits in ISS[23:14] are
 358                          * clear. (In fact it always will be a fault to EL1.)
 359                          */
 360                         esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
 361                                 ESR_ELx_CM | ESR_ELx_WNR;
 362                         esr |= ESR_ELx_FSC_FAULT;
 363                         break;
 364                 case ESR_ELx_EC_IABT_LOW:
 365                         /*
 366                          * Claim a level 0 translation fault.
 367                          * All other bits are architecturally RES0 for faults
 368                          * reported with that DFSC value, so we clear them.
 369                          */
 370                         esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
 371                         esr |= ESR_ELx_FSC_FAULT;
 372                         break;
 373                 default:
 374                         /*
 375                          * This should never happen (entry.S only brings us
 376                          * into this code for insn and data aborts from a lower
 377                          * exception level). Fail safe by not providing an ESR
 378                          * context record at all.
 379                          */
 380                         WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
 381                         esr = 0;
 382                         break;
 383                 }
 384         }
 385 
 386         current->thread.fault_code = esr;
 387 }
 388 
 389 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 390 {
 391         /*
 392          * If we are in kernel mode at this point, we have no context to
 393          * handle this fault with.
 394          */
 395         if (user_mode(regs)) {
 396                 const struct fault_info *inf = esr_to_fault_info(esr);
 397 
 398                 set_thread_esr(addr, esr);
 399                 arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
 400                                       inf->name);
 401         } else {
 402                 __do_kernel_fault(addr, esr, regs);
 403         }
 404 }
 405 
 406 #define VM_FAULT_BADMAP         0x010000
 407 #define VM_FAULT_BADACCESS      0x020000
 408 
 409 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
 410                            unsigned int mm_flags, unsigned long vm_flags)
 411 {
 412         struct vm_area_struct *vma = find_vma(mm, addr);
 413 
 414         if (unlikely(!vma))
 415                 return VM_FAULT_BADMAP;
 416 
 417         /*
 418          * Ok, we have a good vm_area for this memory access, so we can handle
 419          * it.
 420          */
 421         if (unlikely(vma->vm_start > addr)) {
 422                 if (!(vma->vm_flags & VM_GROWSDOWN))
 423                         return VM_FAULT_BADMAP;
 424                 if (expand_stack(vma, addr))
 425                         return VM_FAULT_BADMAP;
 426         }
 427 
 428         /*
 429          * Check that the permissions on the VMA allow for the fault which
 430          * occurred.
 431          */
 432         if (!(vma->vm_flags & vm_flags))
 433                 return VM_FAULT_BADACCESS;
 434         return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
 435 }
 436 
 437 static bool is_el0_instruction_abort(unsigned int esr)
 438 {
 439         return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
 440 }
 441 
 442 /*
 443  * Note: not valid for EL1 DC IVAC, but we never use that such that it
 444  * should fault. EL0 cannot issue DC IVAC (undef).
 445  */
 446 static bool is_write_abort(unsigned int esr)
 447 {
 448         return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
 449 }
 450 
 451 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 452                                    struct pt_regs *regs)
 453 {
 454         const struct fault_info *inf;
 455         struct mm_struct *mm = current->mm;
 456         vm_fault_t fault, major = 0;
 457         unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
 458         unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 459 
 460         if (kprobe_page_fault(regs, esr))
 461                 return 0;
 462 
 463         /*
 464          * If we're in an interrupt or have no user context, we must not take
 465          * the fault.
 466          */
 467         if (faulthandler_disabled() || !mm)
 468                 goto no_context;
 469 
 470         if (user_mode(regs))
 471                 mm_flags |= FAULT_FLAG_USER;
 472 
 473         if (is_el0_instruction_abort(esr)) {
 474                 vm_flags = VM_EXEC;
 475                 mm_flags |= FAULT_FLAG_INSTRUCTION;
 476         } else if (is_write_abort(esr)) {
 477                 vm_flags = VM_WRITE;
 478                 mm_flags |= FAULT_FLAG_WRITE;
 479         }
 480 
 481         if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
 482                 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
 483                 if (regs->orig_addr_limit == KERNEL_DS)
 484                         die_kernel_fault("access to user memory with fs=KERNEL_DS",
 485                                          addr, esr, regs);
 486 
 487                 if (is_el1_instruction_abort(esr))
 488                         die_kernel_fault("execution of user memory",
 489                                          addr, esr, regs);
 490 
 491                 if (!search_exception_tables(regs->pc))
 492                         die_kernel_fault("access to user memory outside uaccess routines",
 493                                          addr, esr, regs);
 494         }
 495 
 496         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 497 
 498         /*
 499          * As per x86, we may deadlock here. However, since the kernel only
 500          * validly references user space from well defined areas of the code,
 501          * we can bug out early if this is from code which shouldn't.
 502          */
 503         if (!down_read_trylock(&mm->mmap_sem)) {
 504                 if (!user_mode(regs) && !search_exception_tables(regs->pc))
 505                         goto no_context;
 506 retry:
 507                 down_read(&mm->mmap_sem);
 508         } else {
 509                 /*
 510                  * The above down_read_trylock() might have succeeded in which
 511                  * case, we'll have missed the might_sleep() from down_read().
 512                  */
 513                 might_sleep();
 514 #ifdef CONFIG_DEBUG_VM
 515                 if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
 516                         up_read(&mm->mmap_sem);
 517                         goto no_context;
 518                 }
 519 #endif
 520         }
 521 
 522         fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
 523         major |= fault & VM_FAULT_MAJOR;
 524 
 525         if (fault & VM_FAULT_RETRY) {
 526                 /*
 527                  * If we need to retry but a fatal signal is pending,
 528                  * handle the signal first. We do not need to release
 529                  * the mmap_sem because it would already be released
 530                  * in __lock_page_or_retry in mm/filemap.c.
 531                  */
 532                 if (fatal_signal_pending(current)) {
 533                         if (!user_mode(regs))
 534                                 goto no_context;
 535                         return 0;
 536                 }
 537 
 538                 /*
 539                  * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
 540                  * starvation.
 541                  */
 542                 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
 543                         mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
 544                         mm_flags |= FAULT_FLAG_TRIED;
 545                         goto retry;
 546                 }
 547         }
 548         up_read(&mm->mmap_sem);
 549 
 550         /*
 551          * Handle the "normal" (no error) case first.
 552          */
 553         if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
 554                               VM_FAULT_BADACCESS)))) {
 555                 /*
 556                  * Major/minor page fault accounting is only done
 557                  * once. If we go through a retry, it is extremely
 558                  * likely that the page will be found in page cache at
 559                  * that point.
 560                  */
 561                 if (major) {
 562                         current->maj_flt++;
 563                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
 564                                       addr);
 565                 } else {
 566                         current->min_flt++;
 567                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
 568                                       addr);
 569                 }
 570 
 571                 return 0;
 572         }
 573 
 574         /*
 575          * If we are in kernel mode at this point, we have no context to
 576          * handle this fault with.
 577          */
 578         if (!user_mode(regs))
 579                 goto no_context;
 580 
 581         if (fault & VM_FAULT_OOM) {
 582                 /*
 583                  * We ran out of memory, call the OOM killer, and return to
 584                  * userspace (which will retry the fault, or kill us if we got
 585                  * oom-killed).
 586                  */
 587                 pagefault_out_of_memory();
 588                 return 0;
 589         }
 590 
 591         inf = esr_to_fault_info(esr);
 592         set_thread_esr(addr, esr);
 593         if (fault & VM_FAULT_SIGBUS) {
 594                 /*
 595                  * We had some memory, but were unable to successfully fix up
 596                  * this page fault.
 597                  */
 598                 arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
 599                                       inf->name);
 600         } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
 601                 unsigned int lsb;
 602 
 603                 lsb = PAGE_SHIFT;
 604                 if (fault & VM_FAULT_HWPOISON_LARGE)
 605                         lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 606 
 607                 arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb,
 608                                        inf->name);
 609         } else {
 610                 /*
 611                  * Something tried to access memory that isn't in our memory
 612                  * map.
 613                  */
 614                 arm64_force_sig_fault(SIGSEGV,
 615                                       fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
 616                                       (void __user *)addr,
 617                                       inf->name);
 618         }
 619 
 620         return 0;
 621 
 622 no_context:
 623         __do_kernel_fault(addr, esr, regs);
 624         return 0;
 625 }
 626 
 627 static int __kprobes do_translation_fault(unsigned long addr,
 628                                           unsigned int esr,
 629                                           struct pt_regs *regs)
 630 {
 631         if (is_ttbr0_addr(addr))
 632                 return do_page_fault(addr, esr, regs);
 633 
 634         do_bad_area(addr, esr, regs);
 635         return 0;
 636 }
 637 
 638 static int do_alignment_fault(unsigned long addr, unsigned int esr,
 639                               struct pt_regs *regs)
 640 {
 641         do_bad_area(addr, esr, regs);
 642         return 0;
 643 }
 644 
 645 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 646 {
 647         return 1; /* "fault" */
 648 }
 649 
 650 static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 651 {
 652         const struct fault_info *inf;
 653         void __user *siaddr;
 654 
 655         inf = esr_to_fault_info(esr);
 656 
 657         /*
 658          * Return value ignored as we rely on signal merging.
 659          * Future patches will make this more robust.
 660          */
 661         apei_claim_sea(regs);
 662 
 663         if (esr & ESR_ELx_FnV)
 664                 siaddr = NULL;
 665         else
 666                 siaddr  = (void __user *)addr;
 667         arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
 668 
 669         return 0;
 670 }
 671 
 672 static const struct fault_info fault_info[] = {
 673         { do_bad,               SIGKILL, SI_KERNEL,     "ttbr address size fault"       },
 674         { do_bad,               SIGKILL, SI_KERNEL,     "level 1 address size fault"    },
 675         { do_bad,               SIGKILL, SI_KERNEL,     "level 2 address size fault"    },
 676         { do_bad,               SIGKILL, SI_KERNEL,     "level 3 address size fault"    },
 677         { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 0 translation fault"     },
 678         { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 1 translation fault"     },
 679         { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 2 translation fault"     },
 680         { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 3 translation fault"     },
 681         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 8"                     },
 682         { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 access flag fault"     },
 683         { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag fault"     },
 684         { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 access flag fault"     },
 685         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 12"                    },
 686         { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 permission fault"      },
 687         { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 permission fault"      },
 688         { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 3 permission fault"      },
 689         { do_sea,               SIGBUS,  BUS_OBJERR,    "synchronous external abort"    },
 690         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 17"                    },
 691         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 18"                    },
 692         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 19"                    },
 693         { do_sea,               SIGKILL, SI_KERNEL,     "level 0 (translation table walk)"      },
 694         { do_sea,               SIGKILL, SI_KERNEL,     "level 1 (translation table walk)"      },
 695         { do_sea,               SIGKILL, SI_KERNEL,     "level 2 (translation table walk)"      },
 696         { do_sea,               SIGKILL, SI_KERNEL,     "level 3 (translation table walk)"      },
 697         { do_sea,               SIGBUS,  BUS_OBJERR,    "synchronous parity or ECC error" },    // Reserved when RAS is implemented
 698         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 25"                    },
 699         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 26"                    },
 700         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 27"                    },
 701         { do_sea,               SIGKILL, SI_KERNEL,     "level 0 synchronous parity error (translation table walk)"     },      // Reserved when RAS is implemented
 702         { do_sea,               SIGKILL, SI_KERNEL,     "level 1 synchronous parity error (translation table walk)"     },      // Reserved when RAS is implemented
 703         { do_sea,               SIGKILL, SI_KERNEL,     "level 2 synchronous parity error (translation table walk)"     },      // Reserved when RAS is implemented
 704         { do_sea,               SIGKILL, SI_KERNEL,     "level 3 synchronous parity error (translation table walk)"     },      // Reserved when RAS is implemented
 705         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 32"                    },
 706         { do_alignment_fault,   SIGBUS,  BUS_ADRALN,    "alignment fault"               },
 707         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 34"                    },
 708         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 35"                    },
 709         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 36"                    },
 710         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 37"                    },
 711         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 38"                    },
 712         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 39"                    },
 713         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 40"                    },
 714         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 41"                    },
 715         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 42"                    },
 716         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 43"                    },
 717         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 44"                    },
 718         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 45"                    },
 719         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 46"                    },
 720         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 47"                    },
 721         { do_bad,               SIGKILL, SI_KERNEL,     "TLB conflict abort"            },
 722         { do_bad,               SIGKILL, SI_KERNEL,     "Unsupported atomic hardware update fault"      },
 723         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 50"                    },
 724         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 51"                    },
 725         { do_bad,               SIGKILL, SI_KERNEL,     "implementation fault (lockdown abort)" },
 726         { do_bad,               SIGBUS,  BUS_OBJERR,    "implementation fault (unsupported exclusive)" },
 727         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 54"                    },
 728         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 55"                    },
 729         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 56"                    },
 730         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 57"                    },
 731         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 58"                    },
 732         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 59"                    },
 733         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 60"                    },
 734         { do_bad,               SIGKILL, SI_KERNEL,     "section domain fault"          },
 735         { do_bad,               SIGKILL, SI_KERNEL,     "page domain fault"             },
 736         { do_bad,               SIGKILL, SI_KERNEL,     "unknown 63"                    },
 737 };
 738 
 739 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
 740                                          struct pt_regs *regs)
 741 {
 742         const struct fault_info *inf = esr_to_fault_info(esr);
 743 
 744         if (!inf->fn(addr, esr, regs))
 745                 return;
 746 
 747         if (!user_mode(regs)) {
 748                 pr_alert("Unhandled fault at 0x%016lx\n", addr);
 749                 mem_abort_decode(esr);
 750                 show_pte(addr);
 751         }
 752 
 753         arm64_notify_die(inf->name, regs,
 754                          inf->sig, inf->code, (void __user *)addr, esr);
 755 }
 756 
 757 asmlinkage void __exception do_el0_irq_bp_hardening(void)
 758 {
 759         /* PC has already been checked in entry.S */
 760         arm64_apply_bp_hardening();
 761 }
 762 
 763 asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
 764                                                    unsigned int esr,
 765                                                    struct pt_regs *regs)
 766 {
 767         /*
 768          * We've taken an instruction abort from userspace and not yet
 769          * re-enabled IRQs. If the address is a kernel address, apply
 770          * BP hardening prior to enabling IRQs and pre-emption.
 771          */
 772         if (!is_ttbr0_addr(addr))
 773                 arm64_apply_bp_hardening();
 774 
 775         local_daif_restore(DAIF_PROCCTX);
 776         do_mem_abort(addr, esr, regs);
 777 }
 778 
 779 
 780 asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
 781                                            unsigned int esr,
 782                                            struct pt_regs *regs)
 783 {
 784         if (user_mode(regs)) {
 785                 if (!is_ttbr0_addr(instruction_pointer(regs)))
 786                         arm64_apply_bp_hardening();
 787                 local_daif_restore(DAIF_PROCCTX);
 788         }
 789 
 790         arm64_notify_die("SP/PC alignment exception", regs,
 791                          SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
 792 }
 793 
 794 int __init early_brk64(unsigned long addr, unsigned int esr,
 795                        struct pt_regs *regs);
 796 
 797 /*
 798  * __refdata because early_brk64 is __init, but the reference to it is
 799  * clobbered at arch_initcall time.
 800  * See traps.c and debug-monitors.c:debug_traps_init().
 801  */
 802 static struct fault_info __refdata debug_fault_info[] = {
 803         { do_bad,       SIGTRAP,        TRAP_HWBKPT,    "hardware breakpoint"   },
 804         { do_bad,       SIGTRAP,        TRAP_HWBKPT,    "hardware single-step"  },
 805         { do_bad,       SIGTRAP,        TRAP_HWBKPT,    "hardware watchpoint"   },
 806         { do_bad,       SIGKILL,        SI_KERNEL,      "unknown 3"             },
 807         { do_bad,       SIGTRAP,        TRAP_BRKPT,     "aarch32 BKPT"          },
 808         { do_bad,       SIGKILL,        SI_KERNEL,      "aarch32 vector catch"  },
 809         { early_brk64,  SIGTRAP,        TRAP_BRKPT,     "aarch64 BRK"           },
 810         { do_bad,       SIGKILL,        SI_KERNEL,      "unknown 7"             },
 811 };
 812 
 813 void __init hook_debug_fault_code(int nr,
 814                                   int (*fn)(unsigned long, unsigned int, struct pt_regs *),
 815                                   int sig, int code, const char *name)
 816 {
 817         BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
 818 
 819         debug_fault_info[nr].fn         = fn;
 820         debug_fault_info[nr].sig        = sig;
 821         debug_fault_info[nr].code       = code;
 822         debug_fault_info[nr].name       = name;
 823 }
 824 
 825 /*
 826  * In debug exception context, we explicitly disable preemption despite
 827  * having interrupts disabled.
 828  * This serves two purposes: it makes it much less likely that we would
 829  * accidentally schedule in exception context and it will force a warning
 830  * if we somehow manage to schedule by accident.
 831  */
 832 static void debug_exception_enter(struct pt_regs *regs)
 833 {
 834         /*
 835          * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
 836          * already disabled to preserve the last enabled/disabled addresses.
 837          */
 838         if (interrupts_enabled(regs))
 839                 trace_hardirqs_off();
 840 
 841         if (user_mode(regs)) {
 842                 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 843         } else {
 844                 /*
 845                  * We might have interrupted pretty much anything.  In
 846                  * fact, if we're a debug exception, we can even interrupt
 847                  * NMI processing. We don't want this code makes in_nmi()
 848                  * to return true, but we need to notify RCU.
 849                  */
 850                 rcu_nmi_enter();
 851         }
 852 
 853         preempt_disable();
 854 
 855         /* This code is a bit fragile.  Test it. */
 856         RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
 857 }
 858 NOKPROBE_SYMBOL(debug_exception_enter);
 859 
 860 static void debug_exception_exit(struct pt_regs *regs)
 861 {
 862         preempt_enable_no_resched();
 863 
 864         if (!user_mode(regs))
 865                 rcu_nmi_exit();
 866 
 867         if (interrupts_enabled(regs))
 868                 trace_hardirqs_on();
 869 }
 870 NOKPROBE_SYMBOL(debug_exception_exit);
 871 
 872 #ifdef CONFIG_ARM64_ERRATUM_1463225
 873 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 874 
 875 static int __exception
 876 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
 877 {
 878         if (user_mode(regs))
 879                 return 0;
 880 
 881         if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
 882                 return 0;
 883 
 884         /*
 885          * We've taken a dummy step exception from the kernel to ensure
 886          * that interrupts are re-enabled on the syscall path. Return back
 887          * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
 888          * masked so that we can safely restore the mdscr and get on with
 889          * handling the syscall.
 890          */
 891         regs->pstate |= PSR_D_BIT;
 892         return 1;
 893 }
 894 #else
 895 static int __exception
 896 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
 897 {
 898         return 0;
 899 }
 900 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
 901 
 902 asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
 903                                                unsigned int esr,
 904                                                struct pt_regs *regs)
 905 {
 906         const struct fault_info *inf = esr_to_debug_fault_info(esr);
 907         unsigned long pc = instruction_pointer(regs);
 908 
 909         if (cortex_a76_erratum_1463225_debug_handler(regs))
 910                 return;
 911 
 912         debug_exception_enter(regs);
 913 
 914         if (user_mode(regs) && !is_ttbr0_addr(pc))
 915                 arm64_apply_bp_hardening();
 916 
 917         if (inf->fn(addr_if_watchpoint, esr, regs)) {
 918                 arm64_notify_die(inf->name, regs,
 919                                  inf->sig, inf->code, (void __user *)pc, esr);
 920         }
 921 
 922         debug_exception_exit(regs);
 923 }
 924 NOKPROBE_SYMBOL(do_debug_exception);

/* [<][>][^][v][top][bottom][index][help] */