root/arch/arm64/kernel/smp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. op_cpu_kill
  2. boot_secondary
  3. __cpu_up
  4. init_gic_priority_masking
  5. secondary_start_kernel
  6. op_cpu_disable
  7. __cpu_disable
  8. op_cpu_kill
  9. __cpu_die
  10. cpu_die
  11. cpu_die_early
  12. hyp_mode_check
  13. smp_cpus_done
  14. smp_prepare_boot_cpu
  15. of_get_cpu_mpidr
  16. is_mpidr_duplicate
  17. smp_cpu_setup
  18. acpi_cpu_get_madt_gicc
  19. acpi_map_gic_cpu_interface
  20. acpi_parse_gic_cpu_interface
  21. acpi_parse_and_init_cpus
  22. of_parse_and_init_cpus
  23. smp_init_cpus
  24. smp_prepare_cpus
  25. set_smp_cross_call
  26. smp_cross_call
  27. show_ipi_list
  28. smp_irq_stat_cpu
  29. arch_send_call_function_ipi_mask
  30. arch_send_call_function_single_ipi
  31. arch_send_wakeup_ipi_mask
  32. arch_irq_work_raise
  33. local_cpu_stop
  34. panic_smp_self_stop
  35. ipi_cpu_crash_stop
  36. handle_IPI
  37. smp_send_reschedule
  38. tick_broadcast
  39. num_other_online_cpus
  40. smp_send_stop
  41. crash_smp_send_stop
  42. smp_crash_stop_failed
  43. setup_profiling_timer
  44. have_cpu_die
  45. cpus_are_stuck_in_kernel

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * SMP initialisation and IPI support
   4  * Based on arch/arm/kernel/smp.c
   5  *
   6  * Copyright (C) 2012 ARM Ltd.
   7  */
   8 
   9 #include <linux/acpi.h>
  10 #include <linux/arm_sdei.h>
  11 #include <linux/delay.h>
  12 #include <linux/init.h>
  13 #include <linux/spinlock.h>
  14 #include <linux/sched/mm.h>
  15 #include <linux/sched/hotplug.h>
  16 #include <linux/sched/task_stack.h>
  17 #include <linux/interrupt.h>
  18 #include <linux/cache.h>
  19 #include <linux/profile.h>
  20 #include <linux/errno.h>
  21 #include <linux/mm.h>
  22 #include <linux/err.h>
  23 #include <linux/cpu.h>
  24 #include <linux/smp.h>
  25 #include <linux/seq_file.h>
  26 #include <linux/irq.h>
  27 #include <linux/irqchip/arm-gic-v3.h>
  28 #include <linux/percpu.h>
  29 #include <linux/clockchips.h>
  30 #include <linux/completion.h>
  31 #include <linux/of.h>
  32 #include <linux/irq_work.h>
  33 #include <linux/kexec.h>
  34 
  35 #include <asm/alternative.h>
  36 #include <asm/atomic.h>
  37 #include <asm/cacheflush.h>
  38 #include <asm/cpu.h>
  39 #include <asm/cputype.h>
  40 #include <asm/cpu_ops.h>
  41 #include <asm/daifflags.h>
  42 #include <asm/mmu_context.h>
  43 #include <asm/numa.h>
  44 #include <asm/pgtable.h>
  45 #include <asm/pgalloc.h>
  46 #include <asm/processor.h>
  47 #include <asm/smp_plat.h>
  48 #include <asm/sections.h>
  49 #include <asm/tlbflush.h>
  50 #include <asm/ptrace.h>
  51 #include <asm/virt.h>
  52 
  53 #define CREATE_TRACE_POINTS
  54 #include <trace/events/ipi.h>
  55 
  56 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
  57 EXPORT_PER_CPU_SYMBOL(cpu_number);
  58 
  59 /*
  60  * as from 2.5, kernels no longer have an init_tasks structure
  61  * so we need some other way of telling a new secondary core
  62  * where to place its SVC stack
  63  */
  64 struct secondary_data secondary_data;
  65 /* Number of CPUs which aren't online, but looping in kernel text. */
  66 int cpus_stuck_in_kernel;
  67 
  68 enum ipi_msg_type {
  69         IPI_RESCHEDULE,
  70         IPI_CALL_FUNC,
  71         IPI_CPU_STOP,
  72         IPI_CPU_CRASH_STOP,
  73         IPI_TIMER,
  74         IPI_IRQ_WORK,
  75         IPI_WAKEUP
  76 };
  77 
  78 #ifdef CONFIG_HOTPLUG_CPU
  79 static int op_cpu_kill(unsigned int cpu);
  80 #else
  81 static inline int op_cpu_kill(unsigned int cpu)
  82 {
  83         return -ENOSYS;
  84 }
  85 #endif
  86 
  87 
  88 /*
  89  * Boot a secondary CPU, and assign it the specified idle task.
  90  * This also gives us the initial stack to use for this CPU.
  91  */
  92 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
  93 {
  94         if (cpu_ops[cpu]->cpu_boot)
  95                 return cpu_ops[cpu]->cpu_boot(cpu);
  96 
  97         return -EOPNOTSUPP;
  98 }
  99 
 100 static DECLARE_COMPLETION(cpu_running);
 101 
 102 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 103 {
 104         int ret;
 105         long status;
 106 
 107         /*
 108          * We need to tell the secondary core where to find its stack and the
 109          * page tables.
 110          */
 111         secondary_data.task = idle;
 112         secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
 113         update_cpu_boot_status(CPU_MMU_OFF);
 114         __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 115 
 116         /*
 117          * Now bring the CPU into our world.
 118          */
 119         ret = boot_secondary(cpu, idle);
 120         if (ret == 0) {
 121                 /*
 122                  * CPU was successfully started, wait for it to come online or
 123                  * time out.
 124                  */
 125                 wait_for_completion_timeout(&cpu_running,
 126                                             msecs_to_jiffies(5000));
 127 
 128                 if (!cpu_online(cpu)) {
 129                         pr_crit("CPU%u: failed to come online\n", cpu);
 130                         ret = -EIO;
 131                 }
 132         } else {
 133                 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 134                 return ret;
 135         }
 136 
 137         secondary_data.task = NULL;
 138         secondary_data.stack = NULL;
 139         __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 140         status = READ_ONCE(secondary_data.status);
 141         if (ret && status) {
 142 
 143                 if (status == CPU_MMU_OFF)
 144                         status = READ_ONCE(__early_cpu_boot_status);
 145 
 146                 switch (status & CPU_BOOT_STATUS_MASK) {
 147                 default:
 148                         pr_err("CPU%u: failed in unknown state : 0x%lx\n",
 149                                         cpu, status);
 150                         cpus_stuck_in_kernel++;
 151                         break;
 152                 case CPU_KILL_ME:
 153                         if (!op_cpu_kill(cpu)) {
 154                                 pr_crit("CPU%u: died during early boot\n", cpu);
 155                                 break;
 156                         }
 157                         pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
 158                         /* Fall through */
 159                 case CPU_STUCK_IN_KERNEL:
 160                         pr_crit("CPU%u: is stuck in kernel\n", cpu);
 161                         if (status & CPU_STUCK_REASON_52_BIT_VA)
 162                                 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
 163                         if (status & CPU_STUCK_REASON_NO_GRAN)
 164                                 pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
 165                         cpus_stuck_in_kernel++;
 166                         break;
 167                 case CPU_PANIC_KERNEL:
 168                         panic("CPU%u detected unsupported configuration\n", cpu);
 169                 }
 170         }
 171 
 172         return ret;
 173 }
 174 
 175 static void init_gic_priority_masking(void)
 176 {
 177         u32 cpuflags;
 178 
 179         if (WARN_ON(!gic_enable_sre()))
 180                 return;
 181 
 182         cpuflags = read_sysreg(daif);
 183 
 184         WARN_ON(!(cpuflags & PSR_I_BIT));
 185 
 186         gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 187 }
 188 
 189 /*
 190  * This is the secondary CPU boot entry.  We're using this CPUs
 191  * idle thread stack, but a set of temporary page tables.
 192  */
 193 asmlinkage notrace void secondary_start_kernel(void)
 194 {
 195         u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
 196         struct mm_struct *mm = &init_mm;
 197         unsigned int cpu;
 198 
 199         cpu = task_cpu(current);
 200         set_my_cpu_offset(per_cpu_offset(cpu));
 201 
 202         /*
 203          * All kernel threads share the same mm context; grab a
 204          * reference and switch to it.
 205          */
 206         mmgrab(mm);
 207         current->active_mm = mm;
 208 
 209         /*
 210          * TTBR0 is only used for the identity mapping at this stage. Make it
 211          * point to zero page to avoid speculatively fetching new entries.
 212          */
 213         cpu_uninstall_idmap();
 214 
 215         if (system_uses_irq_prio_masking())
 216                 init_gic_priority_masking();
 217 
 218         preempt_disable();
 219         trace_hardirqs_off();
 220 
 221         /*
 222          * If the system has established the capabilities, make sure
 223          * this CPU ticks all of those. If it doesn't, the CPU will
 224          * fail to come online.
 225          */
 226         check_local_cpu_capabilities();
 227 
 228         if (cpu_ops[cpu]->cpu_postboot)
 229                 cpu_ops[cpu]->cpu_postboot();
 230 
 231         /*
 232          * Log the CPU info before it is marked online and might get read.
 233          */
 234         cpuinfo_store_cpu();
 235 
 236         /*
 237          * Enable GIC and timers.
 238          */
 239         notify_cpu_starting(cpu);
 240 
 241         store_cpu_topology(cpu);
 242         numa_add_cpu(cpu);
 243 
 244         /*
 245          * OK, now it's safe to let the boot CPU continue.  Wait for
 246          * the CPU migration code to notice that the CPU is online
 247          * before we continue.
 248          */
 249         pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
 250                                          cpu, (unsigned long)mpidr,
 251                                          read_cpuid_id());
 252         update_cpu_boot_status(CPU_BOOT_SUCCESS);
 253         set_cpu_online(cpu, true);
 254         complete(&cpu_running);
 255 
 256         local_daif_restore(DAIF_PROCCTX);
 257 
 258         /*
 259          * OK, it's off to the idle thread for us
 260          */
 261         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 262 }
 263 
 264 #ifdef CONFIG_HOTPLUG_CPU
 265 static int op_cpu_disable(unsigned int cpu)
 266 {
 267         /*
 268          * If we don't have a cpu_die method, abort before we reach the point
 269          * of no return. CPU0 may not have an cpu_ops, so test for it.
 270          */
 271         if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
 272                 return -EOPNOTSUPP;
 273 
 274         /*
 275          * We may need to abort a hot unplug for some other mechanism-specific
 276          * reason.
 277          */
 278         if (cpu_ops[cpu]->cpu_disable)
 279                 return cpu_ops[cpu]->cpu_disable(cpu);
 280 
 281         return 0;
 282 }
 283 
 284 /*
 285  * __cpu_disable runs on the processor to be shutdown.
 286  */
 287 int __cpu_disable(void)
 288 {
 289         unsigned int cpu = smp_processor_id();
 290         int ret;
 291 
 292         ret = op_cpu_disable(cpu);
 293         if (ret)
 294                 return ret;
 295 
 296         remove_cpu_topology(cpu);
 297         numa_remove_cpu(cpu);
 298 
 299         /*
 300          * Take this CPU offline.  Once we clear this, we can't return,
 301          * and we must not schedule until we're ready to give up the cpu.
 302          */
 303         set_cpu_online(cpu, false);
 304 
 305         /*
 306          * OK - migrate IRQs away from this CPU
 307          */
 308         irq_migrate_all_off_this_cpu();
 309 
 310         return 0;
 311 }
 312 
 313 static int op_cpu_kill(unsigned int cpu)
 314 {
 315         /*
 316          * If we have no means of synchronising with the dying CPU, then assume
 317          * that it is really dead. We can only wait for an arbitrary length of
 318          * time and hope that it's dead, so let's skip the wait and just hope.
 319          */
 320         if (!cpu_ops[cpu]->cpu_kill)
 321                 return 0;
 322 
 323         return cpu_ops[cpu]->cpu_kill(cpu);
 324 }
 325 
 326 /*
 327  * called on the thread which is asking for a CPU to be shutdown -
 328  * waits until shutdown has completed, or it is timed out.
 329  */
 330 void __cpu_die(unsigned int cpu)
 331 {
 332         int err;
 333 
 334         if (!cpu_wait_death(cpu, 5)) {
 335                 pr_crit("CPU%u: cpu didn't die\n", cpu);
 336                 return;
 337         }
 338         pr_notice("CPU%u: shutdown\n", cpu);
 339 
 340         /*
 341          * Now that the dying CPU is beyond the point of no return w.r.t.
 342          * in-kernel synchronisation, try to get the firwmare to help us to
 343          * verify that it has really left the kernel before we consider
 344          * clobbering anything it might still be using.
 345          */
 346         err = op_cpu_kill(cpu);
 347         if (err)
 348                 pr_warn("CPU%d may not have shut down cleanly: %d\n",
 349                         cpu, err);
 350 }
 351 
 352 /*
 353  * Called from the idle thread for the CPU which has been shutdown.
 354  *
 355  */
 356 void cpu_die(void)
 357 {
 358         unsigned int cpu = smp_processor_id();
 359 
 360         idle_task_exit();
 361 
 362         local_daif_mask();
 363 
 364         /* Tell __cpu_die() that this CPU is now safe to dispose of */
 365         (void)cpu_report_death();
 366 
 367         /*
 368          * Actually shutdown the CPU. This must never fail. The specific hotplug
 369          * mechanism must perform all required cache maintenance to ensure that
 370          * no dirty lines are lost in the process of shutting down the CPU.
 371          */
 372         cpu_ops[cpu]->cpu_die(cpu);
 373 
 374         BUG();
 375 }
 376 #endif
 377 
 378 /*
 379  * Kill the calling secondary CPU, early in bringup before it is turned
 380  * online.
 381  */
 382 void cpu_die_early(void)
 383 {
 384         int cpu = smp_processor_id();
 385 
 386         pr_crit("CPU%d: will not boot\n", cpu);
 387 
 388         /* Mark this CPU absent */
 389         set_cpu_present(cpu, 0);
 390 
 391 #ifdef CONFIG_HOTPLUG_CPU
 392         update_cpu_boot_status(CPU_KILL_ME);
 393         /* Check if we can park ourselves */
 394         if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
 395                 cpu_ops[cpu]->cpu_die(cpu);
 396 #endif
 397         update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
 398 
 399         cpu_park_loop();
 400 }
 401 
 402 static void __init hyp_mode_check(void)
 403 {
 404         if (is_hyp_mode_available())
 405                 pr_info("CPU: All CPU(s) started at EL2\n");
 406         else if (is_hyp_mode_mismatched())
 407                 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
 408                            "CPU: CPUs started in inconsistent modes");
 409         else
 410                 pr_info("CPU: All CPU(s) started at EL1\n");
 411 }
 412 
 413 void __init smp_cpus_done(unsigned int max_cpus)
 414 {
 415         pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
 416         setup_cpu_features();
 417         hyp_mode_check();
 418         apply_alternatives_all();
 419         mark_linear_text_alias_ro();
 420 }
 421 
 422 void __init smp_prepare_boot_cpu(void)
 423 {
 424         set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 425         cpuinfo_store_boot_cpu();
 426 
 427         /*
 428          * We now know enough about the boot CPU to apply the
 429          * alternatives that cannot wait until interrupt handling
 430          * and/or scheduling is enabled.
 431          */
 432         apply_boot_alternatives();
 433 
 434         /* Conditionally switch to GIC PMR for interrupt masking */
 435         if (system_uses_irq_prio_masking())
 436                 init_gic_priority_masking();
 437 }
 438 
 439 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
 440 {
 441         const __be32 *cell;
 442         u64 hwid;
 443 
 444         /*
 445          * A cpu node with missing "reg" property is
 446          * considered invalid to build a cpu_logical_map
 447          * entry.
 448          */
 449         cell = of_get_property(dn, "reg", NULL);
 450         if (!cell) {
 451                 pr_err("%pOF: missing reg property\n", dn);
 452                 return INVALID_HWID;
 453         }
 454 
 455         hwid = of_read_number(cell, of_n_addr_cells(dn));
 456         /*
 457          * Non affinity bits must be set to 0 in the DT
 458          */
 459         if (hwid & ~MPIDR_HWID_BITMASK) {
 460                 pr_err("%pOF: invalid reg property\n", dn);
 461                 return INVALID_HWID;
 462         }
 463         return hwid;
 464 }
 465 
 466 /*
 467  * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
 468  * entries and check for duplicates. If any is found just ignore the
 469  * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
 470  * matching valid MPIDR values.
 471  */
 472 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
 473 {
 474         unsigned int i;
 475 
 476         for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
 477                 if (cpu_logical_map(i) == hwid)
 478                         return true;
 479         return false;
 480 }
 481 
 482 /*
 483  * Initialize cpu operations for a logical cpu and
 484  * set it in the possible mask on success
 485  */
 486 static int __init smp_cpu_setup(int cpu)
 487 {
 488         if (cpu_read_ops(cpu))
 489                 return -ENODEV;
 490 
 491         if (cpu_ops[cpu]->cpu_init(cpu))
 492                 return -ENODEV;
 493 
 494         set_cpu_possible(cpu, true);
 495 
 496         return 0;
 497 }
 498 
 499 static bool bootcpu_valid __initdata;
 500 static unsigned int cpu_count = 1;
 501 
 502 #ifdef CONFIG_ACPI
 503 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
 504 
 505 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
 506 {
 507         return &cpu_madt_gicc[cpu];
 508 }
 509 
 510 /*
 511  * acpi_map_gic_cpu_interface - parse processor MADT entry
 512  *
 513  * Carry out sanity checks on MADT processor entry and initialize
 514  * cpu_logical_map on success
 515  */
 516 static void __init
 517 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
 518 {
 519         u64 hwid = processor->arm_mpidr;
 520 
 521         if (!(processor->flags & ACPI_MADT_ENABLED)) {
 522                 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
 523                 return;
 524         }
 525 
 526         if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
 527                 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
 528                 return;
 529         }
 530 
 531         if (is_mpidr_duplicate(cpu_count, hwid)) {
 532                 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
 533                 return;
 534         }
 535 
 536         /* Check if GICC structure of boot CPU is available in the MADT */
 537         if (cpu_logical_map(0) == hwid) {
 538                 if (bootcpu_valid) {
 539                         pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
 540                                hwid);
 541                         return;
 542                 }
 543                 bootcpu_valid = true;
 544                 cpu_madt_gicc[0] = *processor;
 545                 return;
 546         }
 547 
 548         if (cpu_count >= NR_CPUS)
 549                 return;
 550 
 551         /* map the logical cpu id to cpu MPIDR */
 552         cpu_logical_map(cpu_count) = hwid;
 553 
 554         cpu_madt_gicc[cpu_count] = *processor;
 555 
 556         /*
 557          * Set-up the ACPI parking protocol cpu entries
 558          * while initializing the cpu_logical_map to
 559          * avoid parsing MADT entries multiple times for
 560          * nothing (ie a valid cpu_logical_map entry should
 561          * contain a valid parking protocol data set to
 562          * initialize the cpu if the parking protocol is
 563          * the only available enable method).
 564          */
 565         acpi_set_mailbox_entry(cpu_count, processor);
 566 
 567         cpu_count++;
 568 }
 569 
 570 static int __init
 571 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
 572                              const unsigned long end)
 573 {
 574         struct acpi_madt_generic_interrupt *processor;
 575 
 576         processor = (struct acpi_madt_generic_interrupt *)header;
 577         if (BAD_MADT_GICC_ENTRY(processor, end))
 578                 return -EINVAL;
 579 
 580         acpi_table_print_madt_entry(&header->common);
 581 
 582         acpi_map_gic_cpu_interface(processor);
 583 
 584         return 0;
 585 }
 586 
 587 static void __init acpi_parse_and_init_cpus(void)
 588 {
 589         int i;
 590 
 591         /*
 592          * do a walk of MADT to determine how many CPUs
 593          * we have including disabled CPUs, and get information
 594          * we need for SMP init.
 595          */
 596         acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
 597                                       acpi_parse_gic_cpu_interface, 0);
 598 
 599         /*
 600          * In ACPI, SMP and CPU NUMA information is provided in separate
 601          * static tables, namely the MADT and the SRAT.
 602          *
 603          * Thus, it is simpler to first create the cpu logical map through
 604          * an MADT walk and then map the logical cpus to their node ids
 605          * as separate steps.
 606          */
 607         acpi_map_cpus_to_nodes();
 608 
 609         for (i = 0; i < nr_cpu_ids; i++)
 610                 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
 611 }
 612 #else
 613 #define acpi_parse_and_init_cpus(...)   do { } while (0)
 614 #endif
 615 
 616 /*
 617  * Enumerate the possible CPU set from the device tree and build the
 618  * cpu logical map array containing MPIDR values related to logical
 619  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 620  */
 621 static void __init of_parse_and_init_cpus(void)
 622 {
 623         struct device_node *dn;
 624 
 625         for_each_of_cpu_node(dn) {
 626                 u64 hwid = of_get_cpu_mpidr(dn);
 627 
 628                 if (hwid == INVALID_HWID)
 629                         goto next;
 630 
 631                 if (is_mpidr_duplicate(cpu_count, hwid)) {
 632                         pr_err("%pOF: duplicate cpu reg properties in the DT\n",
 633                                 dn);
 634                         goto next;
 635                 }
 636 
 637                 /*
 638                  * The numbering scheme requires that the boot CPU
 639                  * must be assigned logical id 0. Record it so that
 640                  * the logical map built from DT is validated and can
 641                  * be used.
 642                  */
 643                 if (hwid == cpu_logical_map(0)) {
 644                         if (bootcpu_valid) {
 645                                 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
 646                                         dn);
 647                                 goto next;
 648                         }
 649 
 650                         bootcpu_valid = true;
 651                         early_map_cpu_to_node(0, of_node_to_nid(dn));
 652 
 653                         /*
 654                          * cpu_logical_map has already been
 655                          * initialized and the boot cpu doesn't need
 656                          * the enable-method so continue without
 657                          * incrementing cpu.
 658                          */
 659                         continue;
 660                 }
 661 
 662                 if (cpu_count >= NR_CPUS)
 663                         goto next;
 664 
 665                 pr_debug("cpu logical map 0x%llx\n", hwid);
 666                 cpu_logical_map(cpu_count) = hwid;
 667 
 668                 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
 669 next:
 670                 cpu_count++;
 671         }
 672 }
 673 
 674 /*
 675  * Enumerate the possible CPU set from the device tree or ACPI and build the
 676  * cpu logical map array containing MPIDR values related to logical
 677  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 678  */
 679 void __init smp_init_cpus(void)
 680 {
 681         int i;
 682 
 683         if (acpi_disabled)
 684                 of_parse_and_init_cpus();
 685         else
 686                 acpi_parse_and_init_cpus();
 687 
 688         if (cpu_count > nr_cpu_ids)
 689                 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
 690                         cpu_count, nr_cpu_ids);
 691 
 692         if (!bootcpu_valid) {
 693                 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
 694                 return;
 695         }
 696 
 697         /*
 698          * We need to set the cpu_logical_map entries before enabling
 699          * the cpus so that cpu processor description entries (DT cpu nodes
 700          * and ACPI MADT entries) can be retrieved by matching the cpu hwid
 701          * with entries in cpu_logical_map while initializing the cpus.
 702          * If the cpu set-up fails, invalidate the cpu_logical_map entry.
 703          */
 704         for (i = 1; i < nr_cpu_ids; i++) {
 705                 if (cpu_logical_map(i) != INVALID_HWID) {
 706                         if (smp_cpu_setup(i))
 707                                 cpu_logical_map(i) = INVALID_HWID;
 708                 }
 709         }
 710 }
 711 
 712 void __init smp_prepare_cpus(unsigned int max_cpus)
 713 {
 714         int err;
 715         unsigned int cpu;
 716         unsigned int this_cpu;
 717 
 718         init_cpu_topology();
 719 
 720         this_cpu = smp_processor_id();
 721         store_cpu_topology(this_cpu);
 722         numa_store_cpu_info(this_cpu);
 723         numa_add_cpu(this_cpu);
 724 
 725         /*
 726          * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
 727          * secondary CPUs present.
 728          */
 729         if (max_cpus == 0)
 730                 return;
 731 
 732         /*
 733          * Initialise the present map (which describes the set of CPUs
 734          * actually populated at the present time) and release the
 735          * secondaries from the bootloader.
 736          */
 737         for_each_possible_cpu(cpu) {
 738 
 739                 per_cpu(cpu_number, cpu) = cpu;
 740 
 741                 if (cpu == smp_processor_id())
 742                         continue;
 743 
 744                 if (!cpu_ops[cpu])
 745                         continue;
 746 
 747                 err = cpu_ops[cpu]->cpu_prepare(cpu);
 748                 if (err)
 749                         continue;
 750 
 751                 set_cpu_present(cpu, true);
 752                 numa_store_cpu_info(cpu);
 753         }
 754 }
 755 
 756 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
 757 
 758 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 759 {
 760         __smp_cross_call = fn;
 761 }
 762 
 763 static const char *ipi_types[NR_IPI] __tracepoint_string = {
 764 #define S(x,s)  [x] = s
 765         S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 766         S(IPI_CALL_FUNC, "Function call interrupts"),
 767         S(IPI_CPU_STOP, "CPU stop interrupts"),
 768         S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
 769         S(IPI_TIMER, "Timer broadcast interrupts"),
 770         S(IPI_IRQ_WORK, "IRQ work interrupts"),
 771         S(IPI_WAKEUP, "CPU wake-up interrupts"),
 772 };
 773 
 774 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 775 {
 776         trace_ipi_raise(target, ipi_types[ipinr]);
 777         __smp_cross_call(target, ipinr);
 778 }
 779 
 780 void show_ipi_list(struct seq_file *p, int prec)
 781 {
 782         unsigned int cpu, i;
 783 
 784         for (i = 0; i < NR_IPI; i++) {
 785                 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 786                            prec >= 4 ? " " : "");
 787                 for_each_online_cpu(cpu)
 788                         seq_printf(p, "%10u ",
 789                                    __get_irq_stat(cpu, ipi_irqs[i]));
 790                 seq_printf(p, "      %s\n", ipi_types[i]);
 791         }
 792 }
 793 
 794 u64 smp_irq_stat_cpu(unsigned int cpu)
 795 {
 796         u64 sum = 0;
 797         int i;
 798 
 799         for (i = 0; i < NR_IPI; i++)
 800                 sum += __get_irq_stat(cpu, ipi_irqs[i]);
 801 
 802         return sum;
 803 }
 804 
 805 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 806 {
 807         smp_cross_call(mask, IPI_CALL_FUNC);
 808 }
 809 
 810 void arch_send_call_function_single_ipi(int cpu)
 811 {
 812         smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 813 }
 814 
 815 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 816 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 817 {
 818         smp_cross_call(mask, IPI_WAKEUP);
 819 }
 820 #endif
 821 
 822 #ifdef CONFIG_IRQ_WORK
 823 void arch_irq_work_raise(void)
 824 {
 825         if (__smp_cross_call)
 826                 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
 827 }
 828 #endif
 829 
 830 static void local_cpu_stop(void)
 831 {
 832         set_cpu_online(smp_processor_id(), false);
 833 
 834         local_daif_mask();
 835         sdei_mask_local_cpu();
 836         cpu_park_loop();
 837 }
 838 
 839 /*
 840  * We need to implement panic_smp_self_stop() for parallel panic() calls, so
 841  * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
 842  * CPUs that have already stopped themselves.
 843  */
 844 void panic_smp_self_stop(void)
 845 {
 846         local_cpu_stop();
 847 }
 848 
 849 #ifdef CONFIG_KEXEC_CORE
 850 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
 851 #endif
 852 
 853 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 854 {
 855 #ifdef CONFIG_KEXEC_CORE
 856         crash_save_cpu(regs, cpu);
 857 
 858         atomic_dec(&waiting_for_crash_ipi);
 859 
 860         local_irq_disable();
 861         sdei_mask_local_cpu();
 862 
 863 #ifdef CONFIG_HOTPLUG_CPU
 864         if (cpu_ops[cpu]->cpu_die)
 865                 cpu_ops[cpu]->cpu_die(cpu);
 866 #endif
 867 
 868         /* just in case */
 869         cpu_park_loop();
 870 #endif
 871 }
 872 
 873 /*
 874  * Main handler for inter-processor interrupts
 875  */
 876 void handle_IPI(int ipinr, struct pt_regs *regs)
 877 {
 878         unsigned int cpu = smp_processor_id();
 879         struct pt_regs *old_regs = set_irq_regs(regs);
 880 
 881         if ((unsigned)ipinr < NR_IPI) {
 882                 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
 883                 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
 884         }
 885 
 886         switch (ipinr) {
 887         case IPI_RESCHEDULE:
 888                 scheduler_ipi();
 889                 break;
 890 
 891         case IPI_CALL_FUNC:
 892                 irq_enter();
 893                 generic_smp_call_function_interrupt();
 894                 irq_exit();
 895                 break;
 896 
 897         case IPI_CPU_STOP:
 898                 irq_enter();
 899                 local_cpu_stop();
 900                 irq_exit();
 901                 break;
 902 
 903         case IPI_CPU_CRASH_STOP:
 904                 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
 905                         irq_enter();
 906                         ipi_cpu_crash_stop(cpu, regs);
 907 
 908                         unreachable();
 909                 }
 910                 break;
 911 
 912 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 913         case IPI_TIMER:
 914                 irq_enter();
 915                 tick_receive_broadcast();
 916                 irq_exit();
 917                 break;
 918 #endif
 919 
 920 #ifdef CONFIG_IRQ_WORK
 921         case IPI_IRQ_WORK:
 922                 irq_enter();
 923                 irq_work_run();
 924                 irq_exit();
 925                 break;
 926 #endif
 927 
 928 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 929         case IPI_WAKEUP:
 930                 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
 931                           "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
 932                           cpu);
 933                 break;
 934 #endif
 935 
 936         default:
 937                 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
 938                 break;
 939         }
 940 
 941         if ((unsigned)ipinr < NR_IPI)
 942                 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
 943         set_irq_regs(old_regs);
 944 }
 945 
 946 void smp_send_reschedule(int cpu)
 947 {
 948         smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 949 }
 950 
 951 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 952 void tick_broadcast(const struct cpumask *mask)
 953 {
 954         smp_cross_call(mask, IPI_TIMER);
 955 }
 956 #endif
 957 
 958 /*
 959  * The number of CPUs online, not counting this CPU (which may not be
 960  * fully online and so not counted in num_online_cpus()).
 961  */
 962 static inline unsigned int num_other_online_cpus(void)
 963 {
 964         unsigned int this_cpu_online = cpu_online(smp_processor_id());
 965 
 966         return num_online_cpus() - this_cpu_online;
 967 }
 968 
 969 void smp_send_stop(void)
 970 {
 971         unsigned long timeout;
 972 
 973         if (num_other_online_cpus()) {
 974                 cpumask_t mask;
 975 
 976                 cpumask_copy(&mask, cpu_online_mask);
 977                 cpumask_clear_cpu(smp_processor_id(), &mask);
 978 
 979                 if (system_state <= SYSTEM_RUNNING)
 980                         pr_crit("SMP: stopping secondary CPUs\n");
 981                 smp_cross_call(&mask, IPI_CPU_STOP);
 982         }
 983 
 984         /* Wait up to one second for other CPUs to stop */
 985         timeout = USEC_PER_SEC;
 986         while (num_other_online_cpus() && timeout--)
 987                 udelay(1);
 988 
 989         if (num_other_online_cpus())
 990                 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
 991                            cpumask_pr_args(cpu_online_mask));
 992 
 993         sdei_mask_local_cpu();
 994 }
 995 
 996 #ifdef CONFIG_KEXEC_CORE
 997 void crash_smp_send_stop(void)
 998 {
 999         static int cpus_stopped;
1000         cpumask_t mask;
1001         unsigned long timeout;
1002 
1003         /*
1004          * This function can be called twice in panic path, but obviously
1005          * we execute this only once.
1006          */
1007         if (cpus_stopped)
1008                 return;
1009 
1010         cpus_stopped = 1;
1011 
1012         /*
1013          * If this cpu is the only one alive at this point in time, online or
1014          * not, there are no stop messages to be sent around, so just back out.
1015          */
1016         if (num_other_online_cpus() == 0) {
1017                 sdei_mask_local_cpu();
1018                 return;
1019         }
1020 
1021         cpumask_copy(&mask, cpu_online_mask);
1022         cpumask_clear_cpu(smp_processor_id(), &mask);
1023 
1024         atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
1025 
1026         pr_crit("SMP: stopping secondary CPUs\n");
1027         smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
1028 
1029         /* Wait up to one second for other CPUs to stop */
1030         timeout = USEC_PER_SEC;
1031         while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
1032                 udelay(1);
1033 
1034         if (atomic_read(&waiting_for_crash_ipi) > 0)
1035                 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
1036                            cpumask_pr_args(&mask));
1037 
1038         sdei_mask_local_cpu();
1039 }
1040 
1041 bool smp_crash_stop_failed(void)
1042 {
1043         return (atomic_read(&waiting_for_crash_ipi) > 0);
1044 }
1045 #endif
1046 
1047 /*
1048  * not supported here
1049  */
1050 int setup_profiling_timer(unsigned int multiplier)
1051 {
1052         return -EINVAL;
1053 }
1054 
1055 static bool have_cpu_die(void)
1056 {
1057 #ifdef CONFIG_HOTPLUG_CPU
1058         int any_cpu = raw_smp_processor_id();
1059 
1060         if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
1061                 return true;
1062 #endif
1063         return false;
1064 }
1065 
1066 bool cpus_are_stuck_in_kernel(void)
1067 {
1068         bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1069 
1070         return !!cpus_stuck_in_kernel || smp_spin_tables;
1071 }

/* [<][>][^][v][top][bottom][index][help] */