root/drivers/cpuidle/cpuidle-pseries.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. idle_loop_prolog
  2. idle_loop_epilog
  3. snooze_loop
  4. check_and_cede_processor
  5. dedicated_cede_loop
  6. shared_cede_loop
  7. pseries_cpuidle_cpu_online
  8. pseries_cpuidle_cpu_dead
  9. pseries_cpuidle_driver_init
  10. pseries_idle_probe
  11. pseries_processor_idle_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  cpuidle-pseries - idle state cpuidle driver.
   4  *  Adapted from drivers/idle/intel_idle.c and
   5  *  drivers/acpi/processor_idle.c
   6  *
   7  */
   8 
   9 #include <linux/kernel.h>
  10 #include <linux/module.h>
  11 #include <linux/init.h>
  12 #include <linux/moduleparam.h>
  13 #include <linux/cpuidle.h>
  14 #include <linux/cpu.h>
  15 #include <linux/notifier.h>
  16 
  17 #include <asm/paca.h>
  18 #include <asm/reg.h>
  19 #include <asm/machdep.h>
  20 #include <asm/firmware.h>
  21 #include <asm/runlatch.h>
  22 #include <asm/plpar_wrappers.h>
  23 
  24 struct cpuidle_driver pseries_idle_driver = {
  25         .name             = "pseries_idle",
  26         .owner            = THIS_MODULE,
  27 };
  28 
  29 static int max_idle_state __read_mostly;
  30 static struct cpuidle_state *cpuidle_state_table __read_mostly;
  31 static u64 snooze_timeout __read_mostly;
  32 static bool snooze_timeout_en __read_mostly;
  33 
  34 static inline void idle_loop_prolog(unsigned long *in_purr)
  35 {
  36         ppc64_runlatch_off();
  37         *in_purr = mfspr(SPRN_PURR);
  38         /*
  39          * Indicate to the HV that we are idle. Now would be
  40          * a good time to find other work to dispatch.
  41          */
  42         get_lppaca()->idle = 1;
  43 }
  44 
  45 static inline void idle_loop_epilog(unsigned long in_purr)
  46 {
  47         u64 wait_cycles;
  48 
  49         wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
  50         wait_cycles += mfspr(SPRN_PURR) - in_purr;
  51         get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
  52         get_lppaca()->idle = 0;
  53 
  54         ppc64_runlatch_on();
  55 }
  56 
  57 static int snooze_loop(struct cpuidle_device *dev,
  58                         struct cpuidle_driver *drv,
  59                         int index)
  60 {
  61         unsigned long in_purr;
  62         u64 snooze_exit_time;
  63 
  64         set_thread_flag(TIF_POLLING_NRFLAG);
  65 
  66         idle_loop_prolog(&in_purr);
  67         local_irq_enable();
  68         snooze_exit_time = get_tb() + snooze_timeout;
  69 
  70         while (!need_resched()) {
  71                 HMT_low();
  72                 HMT_very_low();
  73                 if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
  74                         /*
  75                          * Task has not woken up but we are exiting the polling
  76                          * loop anyway. Require a barrier after polling is
  77                          * cleared to order subsequent test of need_resched().
  78                          */
  79                         clear_thread_flag(TIF_POLLING_NRFLAG);
  80                         smp_mb();
  81                         break;
  82                 }
  83         }
  84 
  85         HMT_medium();
  86         clear_thread_flag(TIF_POLLING_NRFLAG);
  87 
  88         local_irq_disable();
  89 
  90         idle_loop_epilog(in_purr);
  91 
  92         return index;
  93 }
  94 
  95 static void check_and_cede_processor(void)
  96 {
  97         /*
  98          * Ensure our interrupt state is properly tracked,
  99          * also checks if no interrupt has occurred while we
 100          * were soft-disabled
 101          */
 102         if (prep_irq_for_idle()) {
 103                 cede_processor();
 104 #ifdef CONFIG_TRACE_IRQFLAGS
 105                 /* Ensure that H_CEDE returns with IRQs on */
 106                 if (WARN_ON(!(mfmsr() & MSR_EE)))
 107                         __hard_irq_enable();
 108 #endif
 109         }
 110 }
 111 
 112 static int dedicated_cede_loop(struct cpuidle_device *dev,
 113                                 struct cpuidle_driver *drv,
 114                                 int index)
 115 {
 116         unsigned long in_purr;
 117 
 118         idle_loop_prolog(&in_purr);
 119         get_lppaca()->donate_dedicated_cpu = 1;
 120 
 121         HMT_medium();
 122         check_and_cede_processor();
 123 
 124         local_irq_disable();
 125         get_lppaca()->donate_dedicated_cpu = 0;
 126 
 127         idle_loop_epilog(in_purr);
 128 
 129         return index;
 130 }
 131 
 132 static int shared_cede_loop(struct cpuidle_device *dev,
 133                         struct cpuidle_driver *drv,
 134                         int index)
 135 {
 136         unsigned long in_purr;
 137 
 138         idle_loop_prolog(&in_purr);
 139 
 140         /*
 141          * Yield the processor to the hypervisor.  We return if
 142          * an external interrupt occurs (which are driven prior
 143          * to returning here) or if a prod occurs from another
 144          * processor. When returning here, external interrupts
 145          * are enabled.
 146          */
 147         check_and_cede_processor();
 148 
 149         local_irq_disable();
 150         idle_loop_epilog(in_purr);
 151 
 152         return index;
 153 }
 154 
 155 /*
 156  * States for dedicated partition case.
 157  */
 158 static struct cpuidle_state dedicated_states[] = {
 159         { /* Snooze */
 160                 .name = "snooze",
 161                 .desc = "snooze",
 162                 .exit_latency = 0,
 163                 .target_residency = 0,
 164                 .enter = &snooze_loop },
 165         { /* CEDE */
 166                 .name = "CEDE",
 167                 .desc = "CEDE",
 168                 .exit_latency = 10,
 169                 .target_residency = 100,
 170                 .enter = &dedicated_cede_loop },
 171 };
 172 
 173 /*
 174  * States for shared partition case.
 175  */
 176 static struct cpuidle_state shared_states[] = {
 177         { /* Snooze */
 178                 .name = "snooze",
 179                 .desc = "snooze",
 180                 .exit_latency = 0,
 181                 .target_residency = 0,
 182                 .enter = &snooze_loop },
 183         { /* Shared Cede */
 184                 .name = "Shared Cede",
 185                 .desc = "Shared Cede",
 186                 .exit_latency = 10,
 187                 .target_residency = 100,
 188                 .enter = &shared_cede_loop },
 189 };
 190 
 191 static int pseries_cpuidle_cpu_online(unsigned int cpu)
 192 {
 193         struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 194 
 195         if (dev && cpuidle_get_driver()) {
 196                 cpuidle_pause_and_lock();
 197                 cpuidle_enable_device(dev);
 198                 cpuidle_resume_and_unlock();
 199         }
 200         return 0;
 201 }
 202 
 203 static int pseries_cpuidle_cpu_dead(unsigned int cpu)
 204 {
 205         struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 206 
 207         if (dev && cpuidle_get_driver()) {
 208                 cpuidle_pause_and_lock();
 209                 cpuidle_disable_device(dev);
 210                 cpuidle_resume_and_unlock();
 211         }
 212         return 0;
 213 }
 214 
 215 /*
 216  * pseries_cpuidle_driver_init()
 217  */
 218 static int pseries_cpuidle_driver_init(void)
 219 {
 220         int idle_state;
 221         struct cpuidle_driver *drv = &pseries_idle_driver;
 222 
 223         drv->state_count = 0;
 224 
 225         for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
 226                 /* Is the state not enabled? */
 227                 if (cpuidle_state_table[idle_state].enter == NULL)
 228                         continue;
 229 
 230                 drv->states[drv->state_count] = /* structure copy */
 231                         cpuidle_state_table[idle_state];
 232 
 233                 drv->state_count += 1;
 234         }
 235 
 236         return 0;
 237 }
 238 
 239 /*
 240  * pseries_idle_probe()
 241  * Choose state table for shared versus dedicated partition
 242  */
 243 static int pseries_idle_probe(void)
 244 {
 245 
 246         if (cpuidle_disable != IDLE_NO_OVERRIDE)
 247                 return -ENODEV;
 248 
 249         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
 250                 /*
 251                  * Use local_paca instead of get_lppaca() since
 252                  * preemption is not disabled, and it is not required in
 253                  * fact, since lppaca_ptr does not need to be the value
 254                  * associated to the current CPU, it can be from any CPU.
 255                  */
 256                 if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
 257                         cpuidle_state_table = shared_states;
 258                         max_idle_state = ARRAY_SIZE(shared_states);
 259                 } else {
 260                         cpuidle_state_table = dedicated_states;
 261                         max_idle_state = ARRAY_SIZE(dedicated_states);
 262                 }
 263         } else
 264                 return -ENODEV;
 265 
 266         if (max_idle_state > 1) {
 267                 snooze_timeout_en = true;
 268                 snooze_timeout = cpuidle_state_table[1].target_residency *
 269                                  tb_ticks_per_usec;
 270         }
 271         return 0;
 272 }
 273 
 274 static int __init pseries_processor_idle_init(void)
 275 {
 276         int retval;
 277 
 278         retval = pseries_idle_probe();
 279         if (retval)
 280                 return retval;
 281 
 282         pseries_cpuidle_driver_init();
 283         retval = cpuidle_register(&pseries_idle_driver, NULL);
 284         if (retval) {
 285                 printk(KERN_DEBUG "Registration of pseries driver failed.\n");
 286                 return retval;
 287         }
 288 
 289         retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
 290                                            "cpuidle/pseries:online",
 291                                            pseries_cpuidle_cpu_online, NULL);
 292         WARN_ON(retval < 0);
 293         retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
 294                                            "cpuidle/pseries:DEAD", NULL,
 295                                            pseries_cpuidle_cpu_dead);
 296         WARN_ON(retval < 0);
 297         printk(KERN_DEBUG "pseries_idle_driver registered\n");
 298         return 0;
 299 }
 300 
 301 device_initcall(pseries_processor_idle_init);

/* [<][>][^][v][top][bottom][index][help] */