root/kernel/panic.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. no_blink
  2. panic_smp_self_stop
  3. nmi_panic_self_stop
  4. crash_smp_send_stop
  5. nmi_panic
  6. panic_print_sys_info
  7. panic
  8. print_tainted
  9. test_taint
  10. get_taint
  11. add_taint
  12. spin_msec
  13. do_oops_enter_exit
  14. oops_may_print
  15. oops_enter
  16. init_oops_id
  17. print_oops_end_marker
  18. oops_exit
  19. __warn
  20. warn_slowpath_fmt
  21. __warn_printk
  22. clear_warn_once_set
  23. register_warn_debugfs
  24. __stack_chk_fail
  25. refcount_error_report
  26. oops_setup

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  linux/kernel/panic.c
   4  *
   5  *  Copyright (C) 1991, 1992  Linus Torvalds
   6  */
   7 
   8 /*
   9  * This function is used through-out the kernel (including mm and fs)
  10  * to indicate a major problem.
  11  */
  12 #include <linux/debug_locks.h>
  13 #include <linux/sched/debug.h>
  14 #include <linux/interrupt.h>
  15 #include <linux/kgdb.h>
  16 #include <linux/kmsg_dump.h>
  17 #include <linux/kallsyms.h>
  18 #include <linux/notifier.h>
  19 #include <linux/vt_kern.h>
  20 #include <linux/module.h>
  21 #include <linux/random.h>
  22 #include <linux/ftrace.h>
  23 #include <linux/reboot.h>
  24 #include <linux/delay.h>
  25 #include <linux/kexec.h>
  26 #include <linux/sched.h>
  27 #include <linux/sysrq.h>
  28 #include <linux/init.h>
  29 #include <linux/nmi.h>
  30 #include <linux/console.h>
  31 #include <linux/bug.h>
  32 #include <linux/ratelimit.h>
  33 #include <linux/debugfs.h>
  34 #include <asm/sections.h>
  35 
  36 #define PANIC_TIMER_STEP 100
  37 #define PANIC_BLINK_SPD 18
  38 
  39 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
  40 static unsigned long tainted_mask =
  41         IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
  42 static int pause_on_oops;
  43 static int pause_on_oops_flag;
  44 static DEFINE_SPINLOCK(pause_on_oops_lock);
  45 bool crash_kexec_post_notifiers;
  46 int panic_on_warn __read_mostly;
  47 
  48 int panic_timeout = CONFIG_PANIC_TIMEOUT;
  49 EXPORT_SYMBOL_GPL(panic_timeout);
  50 
  51 #define PANIC_PRINT_TASK_INFO           0x00000001
  52 #define PANIC_PRINT_MEM_INFO            0x00000002
  53 #define PANIC_PRINT_TIMER_INFO          0x00000004
  54 #define PANIC_PRINT_LOCK_INFO           0x00000008
  55 #define PANIC_PRINT_FTRACE_INFO         0x00000010
  56 #define PANIC_PRINT_ALL_PRINTK_MSG      0x00000020
  57 unsigned long panic_print;
  58 
  59 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
  60 
  61 EXPORT_SYMBOL(panic_notifier_list);
  62 
  63 static long no_blink(int state)
  64 {
  65         return 0;
  66 }
  67 
  68 /* Returns how long it waited in ms */
  69 long (*panic_blink)(int state);
  70 EXPORT_SYMBOL(panic_blink);
  71 
  72 /*
  73  * Stop ourself in panic -- architecture code may override this
  74  */
  75 void __weak panic_smp_self_stop(void)
  76 {
  77         while (1)
  78                 cpu_relax();
  79 }
  80 
  81 /*
  82  * Stop ourselves in NMI context if another CPU has already panicked. Arch code
  83  * may override this to prepare for crash dumping, e.g. save regs info.
  84  */
  85 void __weak nmi_panic_self_stop(struct pt_regs *regs)
  86 {
  87         panic_smp_self_stop();
  88 }
  89 
  90 /*
  91  * Stop other CPUs in panic.  Architecture dependent code may override this
  92  * with more suitable version.  For example, if the architecture supports
  93  * crash dump, it should save registers of each stopped CPU and disable
  94  * per-CPU features such as virtualization extensions.
  95  */
  96 void __weak crash_smp_send_stop(void)
  97 {
  98         static int cpus_stopped;
  99 
 100         /*
 101          * This function can be called twice in panic path, but obviously
 102          * we execute this only once.
 103          */
 104         if (cpus_stopped)
 105                 return;
 106 
 107         /*
 108          * Note smp_send_stop is the usual smp shutdown function, which
 109          * unfortunately means it may not be hardened to work in a panic
 110          * situation.
 111          */
 112         smp_send_stop();
 113         cpus_stopped = 1;
 114 }
 115 
 116 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
 117 
 118 /*
 119  * A variant of panic() called from NMI context. We return if we've already
 120  * panicked on this CPU. If another CPU already panicked, loop in
 121  * nmi_panic_self_stop() which can provide architecture dependent code such
 122  * as saving register state for crash dump.
 123  */
 124 void nmi_panic(struct pt_regs *regs, const char *msg)
 125 {
 126         int old_cpu, cpu;
 127 
 128         cpu = raw_smp_processor_id();
 129         old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);
 130 
 131         if (old_cpu == PANIC_CPU_INVALID)
 132                 panic("%s", msg);
 133         else if (old_cpu != cpu)
 134                 nmi_panic_self_stop(regs);
 135 }
 136 EXPORT_SYMBOL(nmi_panic);
 137 
 138 static void panic_print_sys_info(void)
 139 {
 140         if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
 141                 console_flush_on_panic(CONSOLE_REPLAY_ALL);
 142 
 143         if (panic_print & PANIC_PRINT_TASK_INFO)
 144                 show_state();
 145 
 146         if (panic_print & PANIC_PRINT_MEM_INFO)
 147                 show_mem(0, NULL);
 148 
 149         if (panic_print & PANIC_PRINT_TIMER_INFO)
 150                 sysrq_timer_list_show();
 151 
 152         if (panic_print & PANIC_PRINT_LOCK_INFO)
 153                 debug_show_all_locks();
 154 
 155         if (panic_print & PANIC_PRINT_FTRACE_INFO)
 156                 ftrace_dump(DUMP_ALL);
 157 }
 158 
 159 /**
 160  *      panic - halt the system
 161  *      @fmt: The text string to print
 162  *
 163  *      Display a message, then perform cleanups.
 164  *
 165  *      This function never returns.
 166  */
 167 void panic(const char *fmt, ...)
 168 {
 169         static char buf[1024];
 170         va_list args;
 171         long i, i_next = 0, len;
 172         int state = 0;
 173         int old_cpu, this_cpu;
 174         bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
 175 
 176         /*
 177          * Disable local interrupts. This will prevent panic_smp_self_stop
 178          * from deadlocking the first cpu that invokes the panic, since
 179          * there is nothing to prevent an interrupt handler (that runs
 180          * after setting panic_cpu) from invoking panic() again.
 181          */
 182         local_irq_disable();
 183         preempt_disable_notrace();
 184 
 185         /*
 186          * It's possible to come here directly from a panic-assertion and
 187          * not have preempt disabled. Some functions called from here want
 188          * preempt to be disabled. No point enabling it later though...
 189          *
 190          * Only one CPU is allowed to execute the panic code from here. For
 191          * multiple parallel invocations of panic, all other CPUs either
 192          * stop themself or will wait until they are stopped by the 1st CPU
 193          * with smp_send_stop().
 194          *
 195          * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
 196          * comes here, so go ahead.
 197          * `old_cpu == this_cpu' means we came from nmi_panic() which sets
 198          * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
 199          */
 200         this_cpu = raw_smp_processor_id();
 201         old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 202 
 203         if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
 204                 panic_smp_self_stop();
 205 
 206         console_verbose();
 207         bust_spinlocks(1);
 208         va_start(args, fmt);
 209         len = vscnprintf(buf, sizeof(buf), fmt, args);
 210         va_end(args);
 211 
 212         if (len && buf[len - 1] == '\n')
 213                 buf[len - 1] = '\0';
 214 
 215         pr_emerg("Kernel panic - not syncing: %s\n", buf);
 216 #ifdef CONFIG_DEBUG_BUGVERBOSE
 217         /*
 218          * Avoid nested stack-dumping if a panic occurs during oops processing
 219          */
 220         if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
 221                 dump_stack();
 222 #endif
 223 
 224         /*
 225          * If kgdb is enabled, give it a chance to run before we stop all
 226          * the other CPUs or else we won't be able to debug processes left
 227          * running on them.
 228          */
 229         kgdb_panic(buf);
 230 
 231         /*
 232          * If we have crashed and we have a crash kernel loaded let it handle
 233          * everything else.
 234          * If we want to run this after calling panic_notifiers, pass
 235          * the "crash_kexec_post_notifiers" option to the kernel.
 236          *
 237          * Bypass the panic_cpu check and call __crash_kexec directly.
 238          */
 239         if (!_crash_kexec_post_notifiers) {
 240                 printk_safe_flush_on_panic();
 241                 __crash_kexec(NULL);
 242 
 243                 /*
 244                  * Note smp_send_stop is the usual smp shutdown function, which
 245                  * unfortunately means it may not be hardened to work in a
 246                  * panic situation.
 247                  */
 248                 smp_send_stop();
 249         } else {
 250                 /*
 251                  * If we want to do crash dump after notifier calls and
 252                  * kmsg_dump, we will need architecture dependent extra
 253                  * works in addition to stopping other CPUs.
 254                  */
 255                 crash_smp_send_stop();
 256         }
 257 
 258         /*
 259          * Run any panic handlers, including those that might need to
 260          * add information to the kmsg dump output.
 261          */
 262         atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 263 
 264         /* Call flush even twice. It tries harder with a single online CPU */
 265         printk_safe_flush_on_panic();
 266         kmsg_dump(KMSG_DUMP_PANIC);
 267 
 268         /*
 269          * If you doubt kdump always works fine in any situation,
 270          * "crash_kexec_post_notifiers" offers you a chance to run
 271          * panic_notifiers and dumping kmsg before kdump.
 272          * Note: since some panic_notifiers can make crashed kernel
 273          * more unstable, it can increase risks of the kdump failure too.
 274          *
 275          * Bypass the panic_cpu check and call __crash_kexec directly.
 276          */
 277         if (_crash_kexec_post_notifiers)
 278                 __crash_kexec(NULL);
 279 
 280 #ifdef CONFIG_VT
 281         unblank_screen();
 282 #endif
 283         console_unblank();
 284 
 285         /*
 286          * We may have ended up stopping the CPU holding the lock (in
 287          * smp_send_stop()) while still having some valuable data in the console
 288          * buffer.  Try to acquire the lock then release it regardless of the
 289          * result.  The release will also print the buffers out.  Locks debug
 290          * should be disabled to avoid reporting bad unlock balance when
 291          * panic() is not being callled from OOPS.
 292          */
 293         debug_locks_off();
 294         console_flush_on_panic(CONSOLE_FLUSH_PENDING);
 295 
 296         panic_print_sys_info();
 297 
 298         if (!panic_blink)
 299                 panic_blink = no_blink;
 300 
 301         if (panic_timeout > 0) {
 302                 /*
 303                  * Delay timeout seconds before rebooting the machine.
 304                  * We can't use the "normal" timers since we just panicked.
 305                  */
 306                 pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
 307 
 308                 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
 309                         touch_nmi_watchdog();
 310                         if (i >= i_next) {
 311                                 i += panic_blink(state ^= 1);
 312                                 i_next = i + 3600 / PANIC_BLINK_SPD;
 313                         }
 314                         mdelay(PANIC_TIMER_STEP);
 315                 }
 316         }
 317         if (panic_timeout != 0) {
 318                 /*
 319                  * This will not be a clean reboot, with everything
 320                  * shutting down.  But if there is a chance of
 321                  * rebooting the system it will be rebooted.
 322                  */
 323                 if (panic_reboot_mode != REBOOT_UNDEFINED)
 324                         reboot_mode = panic_reboot_mode;
 325                 emergency_restart();
 326         }
 327 #ifdef __sparc__
 328         {
 329                 extern int stop_a_enabled;
 330                 /* Make sure the user can actually press Stop-A (L1-A) */
 331                 stop_a_enabled = 1;
 332                 pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
 333                          "twice on console to return to the boot prom\n");
 334         }
 335 #endif
 336 #if defined(CONFIG_S390)
 337         disabled_wait();
 338 #endif
 339         pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
 340 
 341         /* Do not scroll important messages printed above */
 342         suppress_printk = 1;
 343         local_irq_enable();
 344         for (i = 0; ; i += PANIC_TIMER_STEP) {
 345                 touch_softlockup_watchdog();
 346                 if (i >= i_next) {
 347                         i += panic_blink(state ^= 1);
 348                         i_next = i + 3600 / PANIC_BLINK_SPD;
 349                 }
 350                 mdelay(PANIC_TIMER_STEP);
 351         }
 352 }
 353 
 354 EXPORT_SYMBOL(panic);
 355 
 356 /*
 357  * TAINT_FORCED_RMMOD could be a per-module flag but the module
 358  * is being removed anyway.
 359  */
 360 const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
 361         [ TAINT_PROPRIETARY_MODULE ]    = { 'P', 'G', true },
 362         [ TAINT_FORCED_MODULE ]         = { 'F', ' ', true },
 363         [ TAINT_CPU_OUT_OF_SPEC ]       = { 'S', ' ', false },
 364         [ TAINT_FORCED_RMMOD ]          = { 'R', ' ', false },
 365         [ TAINT_MACHINE_CHECK ]         = { 'M', ' ', false },
 366         [ TAINT_BAD_PAGE ]              = { 'B', ' ', false },
 367         [ TAINT_USER ]                  = { 'U', ' ', false },
 368         [ TAINT_DIE ]                   = { 'D', ' ', false },
 369         [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false },
 370         [ TAINT_WARN ]                  = { 'W', ' ', false },
 371         [ TAINT_CRAP ]                  = { 'C', ' ', true },
 372         [ TAINT_FIRMWARE_WORKAROUND ]   = { 'I', ' ', false },
 373         [ TAINT_OOT_MODULE ]            = { 'O', ' ', true },
 374         [ TAINT_UNSIGNED_MODULE ]       = { 'E', ' ', true },
 375         [ TAINT_SOFTLOCKUP ]            = { 'L', ' ', false },
 376         [ TAINT_LIVEPATCH ]             = { 'K', ' ', true },
 377         [ TAINT_AUX ]                   = { 'X', ' ', true },
 378         [ TAINT_RANDSTRUCT ]            = { 'T', ' ', true },
 379 };
 380 
 381 /**
 382  * print_tainted - return a string to represent the kernel taint state.
 383  *
 384  * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst
 385  *
 386  * The string is overwritten by the next call to print_tainted(),
 387  * but is always NULL terminated.
 388  */
 389 const char *print_tainted(void)
 390 {
 391         static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
 392 
 393         BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);
 394 
 395         if (tainted_mask) {
 396                 char *s;
 397                 int i;
 398 
 399                 s = buf + sprintf(buf, "Tainted: ");
 400                 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
 401                         const struct taint_flag *t = &taint_flags[i];
 402                         *s++ = test_bit(i, &tainted_mask) ?
 403                                         t->c_true : t->c_false;
 404                 }
 405                 *s = 0;
 406         } else
 407                 snprintf(buf, sizeof(buf), "Not tainted");
 408 
 409         return buf;
 410 }
 411 
 412 int test_taint(unsigned flag)
 413 {
 414         return test_bit(flag, &tainted_mask);
 415 }
 416 EXPORT_SYMBOL(test_taint);
 417 
 418 unsigned long get_taint(void)
 419 {
 420         return tainted_mask;
 421 }
 422 
 423 /**
 424  * add_taint: add a taint flag if not already set.
 425  * @flag: one of the TAINT_* constants.
 426  * @lockdep_ok: whether lock debugging is still OK.
 427  *
 428  * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
 429  * some notewortht-but-not-corrupting cases, it can be set to true.
 430  */
 431 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
 432 {
 433         if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
 434                 pr_warn("Disabling lock debugging due to kernel taint\n");
 435 
 436         set_bit(flag, &tainted_mask);
 437 }
 438 EXPORT_SYMBOL(add_taint);
 439 
 440 static void spin_msec(int msecs)
 441 {
 442         int i;
 443 
 444         for (i = 0; i < msecs; i++) {
 445                 touch_nmi_watchdog();
 446                 mdelay(1);
 447         }
 448 }
 449 
 450 /*
 451  * It just happens that oops_enter() and oops_exit() are identically
 452  * implemented...
 453  */
 454 static void do_oops_enter_exit(void)
 455 {
 456         unsigned long flags;
 457         static int spin_counter;
 458 
 459         if (!pause_on_oops)
 460                 return;
 461 
 462         spin_lock_irqsave(&pause_on_oops_lock, flags);
 463         if (pause_on_oops_flag == 0) {
 464                 /* This CPU may now print the oops message */
 465                 pause_on_oops_flag = 1;
 466         } else {
 467                 /* We need to stall this CPU */
 468                 if (!spin_counter) {
 469                         /* This CPU gets to do the counting */
 470                         spin_counter = pause_on_oops;
 471                         do {
 472                                 spin_unlock(&pause_on_oops_lock);
 473                                 spin_msec(MSEC_PER_SEC);
 474                                 spin_lock(&pause_on_oops_lock);
 475                         } while (--spin_counter);
 476                         pause_on_oops_flag = 0;
 477                 } else {
 478                         /* This CPU waits for a different one */
 479                         while (spin_counter) {
 480                                 spin_unlock(&pause_on_oops_lock);
 481                                 spin_msec(1);
 482                                 spin_lock(&pause_on_oops_lock);
 483                         }
 484                 }
 485         }
 486         spin_unlock_irqrestore(&pause_on_oops_lock, flags);
 487 }
 488 
 489 /*
 490  * Return true if the calling CPU is allowed to print oops-related info.
 491  * This is a bit racy..
 492  */
 493 int oops_may_print(void)
 494 {
 495         return pause_on_oops_flag == 0;
 496 }
 497 
 498 /*
 499  * Called when the architecture enters its oops handler, before it prints
 500  * anything.  If this is the first CPU to oops, and it's oopsing the first
 501  * time then let it proceed.
 502  *
 503  * This is all enabled by the pause_on_oops kernel boot option.  We do all
 504  * this to ensure that oopses don't scroll off the screen.  It has the
 505  * side-effect of preventing later-oopsing CPUs from mucking up the display,
 506  * too.
 507  *
 508  * It turns out that the CPU which is allowed to print ends up pausing for
 509  * the right duration, whereas all the other CPUs pause for twice as long:
 510  * once in oops_enter(), once in oops_exit().
 511  */
 512 void oops_enter(void)
 513 {
 514         tracing_off();
 515         /* can't trust the integrity of the kernel anymore: */
 516         debug_locks_off();
 517         do_oops_enter_exit();
 518 }
 519 
 520 /*
 521  * 64-bit random ID for oopses:
 522  */
 523 static u64 oops_id;
 524 
 525 static int init_oops_id(void)
 526 {
 527         if (!oops_id)
 528                 get_random_bytes(&oops_id, sizeof(oops_id));
 529         else
 530                 oops_id++;
 531 
 532         return 0;
 533 }
 534 late_initcall(init_oops_id);
 535 
 536 void print_oops_end_marker(void)
 537 {
 538         init_oops_id();
 539         pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
 540 }
 541 
 542 /*
 543  * Called when the architecture exits its oops handler, after printing
 544  * everything.
 545  */
 546 void oops_exit(void)
 547 {
 548         do_oops_enter_exit();
 549         print_oops_end_marker();
 550         kmsg_dump(KMSG_DUMP_OOPS);
 551 }
 552 
 553 struct warn_args {
 554         const char *fmt;
 555         va_list args;
 556 };
 557 
 558 void __warn(const char *file, int line, void *caller, unsigned taint,
 559             struct pt_regs *regs, struct warn_args *args)
 560 {
 561         disable_trace_on_warning();
 562 
 563         if (file)
 564                 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
 565                         raw_smp_processor_id(), current->pid, file, line,
 566                         caller);
 567         else
 568                 pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
 569                         raw_smp_processor_id(), current->pid, caller);
 570 
 571         if (args)
 572                 vprintk(args->fmt, args->args);
 573 
 574         if (panic_on_warn) {
 575                 /*
 576                  * This thread may hit another WARN() in the panic path.
 577                  * Resetting this prevents additional WARN() from panicking the
 578                  * system on this thread.  Other threads are blocked by the
 579                  * panic_mutex in panic().
 580                  */
 581                 panic_on_warn = 0;
 582                 panic("panic_on_warn set ...\n");
 583         }
 584 
 585         print_modules();
 586 
 587         if (regs)
 588                 show_regs(regs);
 589         else
 590                 dump_stack();
 591 
 592         print_irqtrace_events(current);
 593 
 594         print_oops_end_marker();
 595 
 596         /* Just a warning, don't kill lockdep. */
 597         add_taint(taint, LOCKDEP_STILL_OK);
 598 }
 599 
 600 #ifndef __WARN_FLAGS
 601 void warn_slowpath_fmt(const char *file, int line, unsigned taint,
 602                        const char *fmt, ...)
 603 {
 604         struct warn_args args;
 605 
 606         pr_warn(CUT_HERE);
 607 
 608         if (!fmt) {
 609                 __warn(file, line, __builtin_return_address(0), taint,
 610                        NULL, NULL);
 611                 return;
 612         }
 613 
 614         args.fmt = fmt;
 615         va_start(args.args, fmt);
 616         __warn(file, line, __builtin_return_address(0), taint, NULL, &args);
 617         va_end(args.args);
 618 }
 619 EXPORT_SYMBOL(warn_slowpath_fmt);
 620 #else
 621 void __warn_printk(const char *fmt, ...)
 622 {
 623         va_list args;
 624 
 625         pr_warn(CUT_HERE);
 626 
 627         va_start(args, fmt);
 628         vprintk(fmt, args);
 629         va_end(args);
 630 }
 631 EXPORT_SYMBOL(__warn_printk);
 632 #endif
 633 
 634 #ifdef CONFIG_BUG
 635 
 636 /* Support resetting WARN*_ONCE state */
 637 
 638 static int clear_warn_once_set(void *data, u64 val)
 639 {
 640         generic_bug_clear_once();
 641         memset(__start_once, 0, __end_once - __start_once);
 642         return 0;
 643 }
 644 
 645 DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
 646                          "%lld\n");
 647 
 648 static __init int register_warn_debugfs(void)
 649 {
 650         /* Don't care about failure */
 651         debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
 652                                    &clear_warn_once_fops);
 653         return 0;
 654 }
 655 
 656 device_initcall(register_warn_debugfs);
 657 #endif
 658 
 659 #ifdef CONFIG_STACKPROTECTOR
 660 
 661 /*
 662  * Called when gcc's -fstack-protector feature is used, and
 663  * gcc detects corruption of the on-stack canary value
 664  */
 665 __visible void __stack_chk_fail(void)
 666 {
 667         panic("stack-protector: Kernel stack is corrupted in: %pB",
 668                 __builtin_return_address(0));
 669 }
 670 EXPORT_SYMBOL(__stack_chk_fail);
 671 
 672 #endif
 673 
 674 #ifdef CONFIG_ARCH_HAS_REFCOUNT
 675 void refcount_error_report(struct pt_regs *regs, const char *err)
 676 {
 677         WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n",
 678                 err, (void *)instruction_pointer(regs),
 679                 current->comm, task_pid_nr(current),
 680                 from_kuid_munged(&init_user_ns, current_uid()),
 681                 from_kuid_munged(&init_user_ns, current_euid()));
 682 }
 683 #endif
 684 
 685 core_param(panic, panic_timeout, int, 0644);
 686 core_param(panic_print, panic_print, ulong, 0644);
 687 core_param(pause_on_oops, pause_on_oops, int, 0644);
 688 core_param(panic_on_warn, panic_on_warn, int, 0644);
 689 core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
 690 
 691 static int __init oops_setup(char *s)
 692 {
 693         if (!s)
 694                 return -EINVAL;
 695         if (!strcmp(s, "panic"))
 696                 panic_on_oops = 1;
 697         return 0;
 698 }
 699 early_param("oops", oops_setup);

/* [<][>][^][v][top][bottom][index][help] */