root/drivers/acpi/cppc_acpi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_feedback_ctrs
  2. check_pcc_chan
  3. send_pcc_cmd
  4. cppc_chan_tx_done
  5. acpi_get_psd
  6. acpi_get_psd_map
  7. register_pcc_channel
  8. cpc_ffh_supported
  9. pcc_data_alloc
  10. is_cppc_supported
  11. acpi_cppc_processor_probe
  12. acpi_cppc_processor_exit
  13. cpc_read_ffh
  14. cpc_write_ffh
  15. cpc_read
  16. cpc_write
  17. cppc_get_desired_perf
  18. cppc_get_perf_caps
  19. cppc_get_perf_ctrs
  20. cppc_set_perf
  21. cppc_get_transition_latency

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   4  *
   5  * (C) Copyright 2014, 2015 Linaro Ltd.
   6  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   7  *
   8  * CPPC describes a few methods for controlling CPU performance using
   9  * information from a per CPU table called CPC. This table is described in
  10  * the ACPI v5.0+ specification. The table consists of a list of
  11  * registers which may be memory mapped or hardware registers and also may
  12  * include some static integer values.
  13  *
  14  * CPU performance is on an abstract continuous scale as against a discretized
  15  * P-state scale which is tied to CPU frequency only. In brief, the basic
  16  * operation involves:
  17  *
  18  * - OS makes a CPU performance request. (Can provide min and max bounds)
  19  *
  20  * - Platform (such as BMC) is free to optimize request within requested bounds
  21  *   depending on power/thermal budgets etc.
  22  *
  23  * - Platform conveys its decision back to OS
  24  *
  25  * The communication between OS and platform occurs through another medium
  26  * called (PCC) Platform Communication Channel. This is a generic mailbox like
  27  * mechanism which includes doorbell semantics to indicate register updates.
  28  * See drivers/mailbox/pcc.c for details on PCC.
  29  *
  30  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  31  * above specifications.
  32  */
  33 
  34 #define pr_fmt(fmt)     "ACPI CPPC: " fmt
  35 
  36 #include <linux/cpufreq.h>
  37 #include <linux/delay.h>
  38 #include <linux/iopoll.h>
  39 #include <linux/ktime.h>
  40 #include <linux/rwsem.h>
  41 #include <linux/wait.h>
  42 
  43 #include <acpi/cppc_acpi.h>
  44 
  45 struct cppc_pcc_data {
  46         struct mbox_chan *pcc_channel;
  47         void __iomem *pcc_comm_addr;
  48         bool pcc_channel_acquired;
  49         unsigned int deadline_us;
  50         unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  51 
  52         bool pending_pcc_write_cmd;     /* Any pending/batched PCC write cmds? */
  53         bool platform_owns_pcc;         /* Ownership of PCC subspace */
  54         unsigned int pcc_write_cnt;     /* Running count of PCC write commands */
  55 
  56         /*
  57          * Lock to provide controlled access to the PCC channel.
  58          *
  59          * For performance critical usecases(currently cppc_set_perf)
  60          *      We need to take read_lock and check if channel belongs to OSPM
  61          * before reading or writing to PCC subspace
  62          *      We need to take write_lock before transferring the channel
  63          * ownership to the platform via a Doorbell
  64          *      This allows us to batch a number of CPPC requests if they happen
  65          * to originate in about the same time
  66          *
  67          * For non-performance critical usecases(init)
  68          *      Take write_lock for all purposes which gives exclusive access
  69          */
  70         struct rw_semaphore pcc_lock;
  71 
  72         /* Wait queue for CPUs whose requests were batched */
  73         wait_queue_head_t pcc_write_wait_q;
  74         ktime_t last_cmd_cmpl_time;
  75         ktime_t last_mpar_reset;
  76         int mpar_count;
  77         int refcount;
  78 };
  79 
  80 /* Array to represent the PCC channel per subspace ID */
  81 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  82 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
  83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  84 
  85 /*
  86  * The cpc_desc structure contains the ACPI register details
  87  * as described in the per CPU _CPC tables. The details
  88  * include the type of register (e.g. PCC, System IO, FFH etc.)
  89  * and destination addresses which lets us READ/WRITE CPU performance
  90  * information using the appropriate I/O methods.
  91  */
  92 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  93 
  94 /* pcc mapped address + header size + offset within PCC subspace */
  95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  96                                                 0x8 + (offs))
  97 
  98 /* Check if a CPC register is in PCC */
  99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&             \
 100                                 (cpc)->cpc_entry.reg.space_id ==        \
 101                                 ACPI_ADR_SPACE_PLATFORM_COMM)
 102 
 103 /* Evalutes to True if reg is a NULL register descriptor */
 104 #define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 105                                 (reg)->address == 0 &&                  \
 106                                 (reg)->bit_width == 0 &&                \
 107                                 (reg)->bit_offset == 0 &&               \
 108                                 (reg)->access_width == 0)
 109 
 110 /* Evalutes to True if an optional cpc field is supported */
 111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?          \
 112                                 !!(cpc)->cpc_entry.int_value :          \
 113                                 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
 114 /*
 115  * Arbitrary Retries in case the remote processor is slow to respond
 116  * to PCC commands. Keeping it high enough to cover emulators where
 117  * the processors run painfully slow.
 118  */
 119 #define NUM_RETRIES 500ULL
 120 
 121 struct cppc_attr {
 122         struct attribute attr;
 123         ssize_t (*show)(struct kobject *kobj,
 124                         struct attribute *attr, char *buf);
 125         ssize_t (*store)(struct kobject *kobj,
 126                         struct attribute *attr, const char *c, ssize_t count);
 127 };
 128 
 129 #define define_one_cppc_ro(_name)               \
 130 static struct cppc_attr _name =                 \
 131 __ATTR(_name, 0444, show_##_name, NULL)
 132 
 133 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 134 
 135 #define show_cppc_data(access_fn, struct_name, member_name)             \
 136         static ssize_t show_##member_name(struct kobject *kobj,         \
 137                                         struct attribute *attr, char *buf) \
 138         {                                                               \
 139                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
 140                 struct struct_name st_name = {0};                       \
 141                 int ret;                                                \
 142                                                                         \
 143                 ret = access_fn(cpc_ptr->cpu_id, &st_name);             \
 144                 if (ret)                                                \
 145                         return ret;                                     \
 146                                                                         \
 147                 return scnprintf(buf, PAGE_SIZE, "%llu\n",              \
 148                                 (u64)st_name.member_name);              \
 149         }                                                               \
 150         define_one_cppc_ro(member_name)
 151 
 152 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
 153 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
 154 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
 155 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
 156 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
 157 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
 158 
 159 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 160 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 161 
 162 static ssize_t show_feedback_ctrs(struct kobject *kobj,
 163                 struct attribute *attr, char *buf)
 164 {
 165         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 166         struct cppc_perf_fb_ctrs fb_ctrs = {0};
 167         int ret;
 168 
 169         ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 170         if (ret)
 171                 return ret;
 172 
 173         return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
 174                         fb_ctrs.reference, fb_ctrs.delivered);
 175 }
 176 define_one_cppc_ro(feedback_ctrs);
 177 
 178 static struct attribute *cppc_attrs[] = {
 179         &feedback_ctrs.attr,
 180         &reference_perf.attr,
 181         &wraparound_time.attr,
 182         &highest_perf.attr,
 183         &lowest_perf.attr,
 184         &lowest_nonlinear_perf.attr,
 185         &nominal_perf.attr,
 186         &nominal_freq.attr,
 187         &lowest_freq.attr,
 188         NULL
 189 };
 190 
 191 static struct kobj_type cppc_ktype = {
 192         .sysfs_ops = &kobj_sysfs_ops,
 193         .default_attrs = cppc_attrs,
 194 };
 195 
 196 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
 197 {
 198         int ret, status;
 199         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 200         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 201                 pcc_ss_data->pcc_comm_addr;
 202 
 203         if (!pcc_ss_data->platform_owns_pcc)
 204                 return 0;
 205 
 206         /*
 207          * Poll PCC status register every 3us(delay_us) for maximum of
 208          * deadline_us(timeout_us) until PCC command complete bit is set(cond)
 209          */
 210         ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
 211                                         status & PCC_CMD_COMPLETE_MASK, 3,
 212                                         pcc_ss_data->deadline_us);
 213 
 214         if (likely(!ret)) {
 215                 pcc_ss_data->platform_owns_pcc = false;
 216                 if (chk_err_bit && (status & PCC_ERROR_MASK))
 217                         ret = -EIO;
 218         }
 219 
 220         if (unlikely(ret))
 221                 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
 222                        pcc_ss_id, ret);
 223 
 224         return ret;
 225 }
 226 
 227 /*
 228  * This function transfers the ownership of the PCC to the platform
 229  * So it must be called while holding write_lock(pcc_lock)
 230  */
 231 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
 232 {
 233         int ret = -EIO, i;
 234         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 235         struct acpi_pcct_shared_memory *generic_comm_base =
 236                 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
 237         unsigned int time_delta;
 238 
 239         /*
 240          * For CMD_WRITE we know for a fact the caller should have checked
 241          * the channel before writing to PCC space
 242          */
 243         if (cmd == CMD_READ) {
 244                 /*
 245                  * If there are pending cpc_writes, then we stole the channel
 246                  * before write completion, so first send a WRITE command to
 247                  * platform
 248                  */
 249                 if (pcc_ss_data->pending_pcc_write_cmd)
 250                         send_pcc_cmd(pcc_ss_id, CMD_WRITE);
 251 
 252                 ret = check_pcc_chan(pcc_ss_id, false);
 253                 if (ret)
 254                         goto end;
 255         } else /* CMD_WRITE */
 256                 pcc_ss_data->pending_pcc_write_cmd = FALSE;
 257 
 258         /*
 259          * Handle the Minimum Request Turnaround Time(MRTT)
 260          * "The minimum amount of time that OSPM must wait after the completion
 261          * of a command before issuing the next command, in microseconds"
 262          */
 263         if (pcc_ss_data->pcc_mrtt) {
 264                 time_delta = ktime_us_delta(ktime_get(),
 265                                             pcc_ss_data->last_cmd_cmpl_time);
 266                 if (pcc_ss_data->pcc_mrtt > time_delta)
 267                         udelay(pcc_ss_data->pcc_mrtt - time_delta);
 268         }
 269 
 270         /*
 271          * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 272          * "The maximum number of periodic requests that the subspace channel can
 273          * support, reported in commands per minute. 0 indicates no limitation."
 274          *
 275          * This parameter should be ideally zero or large enough so that it can
 276          * handle maximum number of requests that all the cores in the system can
 277          * collectively generate. If it is not, we will follow the spec and just
 278          * not send the request to the platform after hitting the MPAR limit in
 279          * any 60s window
 280          */
 281         if (pcc_ss_data->pcc_mpar) {
 282                 if (pcc_ss_data->mpar_count == 0) {
 283                         time_delta = ktime_ms_delta(ktime_get(),
 284                                                     pcc_ss_data->last_mpar_reset);
 285                         if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
 286                                 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
 287                                          pcc_ss_id);
 288                                 ret = -EIO;
 289                                 goto end;
 290                         }
 291                         pcc_ss_data->last_mpar_reset = ktime_get();
 292                         pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
 293                 }
 294                 pcc_ss_data->mpar_count--;
 295         }
 296 
 297         /* Write to the shared comm region. */
 298         writew_relaxed(cmd, &generic_comm_base->command);
 299 
 300         /* Flip CMD COMPLETE bit */
 301         writew_relaxed(0, &generic_comm_base->status);
 302 
 303         pcc_ss_data->platform_owns_pcc = true;
 304 
 305         /* Ring doorbell */
 306         ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
 307         if (ret < 0) {
 308                 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
 309                        pcc_ss_id, cmd, ret);
 310                 goto end;
 311         }
 312 
 313         /* wait for completion and check for PCC errro bit */
 314         ret = check_pcc_chan(pcc_ss_id, true);
 315 
 316         if (pcc_ss_data->pcc_mrtt)
 317                 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
 318 
 319         if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
 320                 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
 321         else
 322                 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
 323 
 324 end:
 325         if (cmd == CMD_WRITE) {
 326                 if (unlikely(ret)) {
 327                         for_each_possible_cpu(i) {
 328                                 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 329                                 if (!desc)
 330                                         continue;
 331 
 332                                 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
 333                                         desc->write_cmd_status = ret;
 334                         }
 335                 }
 336                 pcc_ss_data->pcc_write_cnt++;
 337                 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
 338         }
 339 
 340         return ret;
 341 }
 342 
 343 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 344 {
 345         if (ret < 0)
 346                 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 347                                 *(u16 *)msg, ret);
 348         else
 349                 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 350                                 *(u16 *)msg, ret);
 351 }
 352 
 353 struct mbox_client cppc_mbox_cl = {
 354         .tx_done = cppc_chan_tx_done,
 355         .knows_txdone = true,
 356 };
 357 
 358 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 359 {
 360         int result = -EFAULT;
 361         acpi_status status = AE_OK;
 362         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 363         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 364         struct acpi_buffer state = {0, NULL};
 365         union acpi_object  *psd = NULL;
 366         struct acpi_psd_package *pdomain;
 367 
 368         status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
 369                                             &buffer, ACPI_TYPE_PACKAGE);
 370         if (status == AE_NOT_FOUND)     /* _PSD is optional */
 371                 return 0;
 372         if (ACPI_FAILURE(status))
 373                 return -ENODEV;
 374 
 375         psd = buffer.pointer;
 376         if (!psd || psd->package.count != 1) {
 377                 pr_debug("Invalid _PSD data\n");
 378                 goto end;
 379         }
 380 
 381         pdomain = &(cpc_ptr->domain_info);
 382 
 383         state.length = sizeof(struct acpi_psd_package);
 384         state.pointer = pdomain;
 385 
 386         status = acpi_extract_package(&(psd->package.elements[0]),
 387                 &format, &state);
 388         if (ACPI_FAILURE(status)) {
 389                 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 390                 goto end;
 391         }
 392 
 393         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 394                 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 395                 goto end;
 396         }
 397 
 398         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 399                 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 400                 goto end;
 401         }
 402 
 403         if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 404             pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 405             pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 406                 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 407                 goto end;
 408         }
 409 
 410         result = 0;
 411 end:
 412         kfree(buffer.pointer);
 413         return result;
 414 }
 415 
 416 /**
 417  * acpi_get_psd_map - Map the CPUs in a common freq domain.
 418  * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 419  *
 420  *      Return: 0 for success or negative value for err.
 421  */
 422 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
 423 {
 424         int count_target;
 425         int retval = 0;
 426         unsigned int i, j;
 427         cpumask_var_t covered_cpus;
 428         struct cppc_cpudata *pr, *match_pr;
 429         struct acpi_psd_package *pdomain;
 430         struct acpi_psd_package *match_pdomain;
 431         struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 432 
 433         if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 434                 return -ENOMEM;
 435 
 436         /*
 437          * Now that we have _PSD data from all CPUs, let's setup P-state
 438          * domain info.
 439          */
 440         for_each_possible_cpu(i) {
 441                 pr = all_cpu_data[i];
 442                 if (!pr)
 443                         continue;
 444 
 445                 if (cpumask_test_cpu(i, covered_cpus))
 446                         continue;
 447 
 448                 cpc_ptr = per_cpu(cpc_desc_ptr, i);
 449                 if (!cpc_ptr) {
 450                         retval = -EFAULT;
 451                         goto err_ret;
 452                 }
 453 
 454                 pdomain = &(cpc_ptr->domain_info);
 455                 cpumask_set_cpu(i, pr->shared_cpu_map);
 456                 cpumask_set_cpu(i, covered_cpus);
 457                 if (pdomain->num_processors <= 1)
 458                         continue;
 459 
 460                 /* Validate the Domain info */
 461                 count_target = pdomain->num_processors;
 462                 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 463                         pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 464                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 465                         pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
 466                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 467                         pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 468 
 469                 for_each_possible_cpu(j) {
 470                         if (i == j)
 471                                 continue;
 472 
 473                         match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 474                         if (!match_cpc_ptr) {
 475                                 retval = -EFAULT;
 476                                 goto err_ret;
 477                         }
 478 
 479                         match_pdomain = &(match_cpc_ptr->domain_info);
 480                         if (match_pdomain->domain != pdomain->domain)
 481                                 continue;
 482 
 483                         /* Here i and j are in the same domain */
 484                         if (match_pdomain->num_processors != count_target) {
 485                                 retval = -EFAULT;
 486                                 goto err_ret;
 487                         }
 488 
 489                         if (pdomain->coord_type != match_pdomain->coord_type) {
 490                                 retval = -EFAULT;
 491                                 goto err_ret;
 492                         }
 493 
 494                         cpumask_set_cpu(j, covered_cpus);
 495                         cpumask_set_cpu(j, pr->shared_cpu_map);
 496                 }
 497 
 498                 for_each_possible_cpu(j) {
 499                         if (i == j)
 500                                 continue;
 501 
 502                         match_pr = all_cpu_data[j];
 503                         if (!match_pr)
 504                                 continue;
 505 
 506                         match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 507                         if (!match_cpc_ptr) {
 508                                 retval = -EFAULT;
 509                                 goto err_ret;
 510                         }
 511 
 512                         match_pdomain = &(match_cpc_ptr->domain_info);
 513                         if (match_pdomain->domain != pdomain->domain)
 514                                 continue;
 515 
 516                         match_pr->shared_type = pr->shared_type;
 517                         cpumask_copy(match_pr->shared_cpu_map,
 518                                      pr->shared_cpu_map);
 519                 }
 520         }
 521 
 522 err_ret:
 523         for_each_possible_cpu(i) {
 524                 pr = all_cpu_data[i];
 525                 if (!pr)
 526                         continue;
 527 
 528                 /* Assume no coordination on any error parsing domain info */
 529                 if (retval) {
 530                         cpumask_clear(pr->shared_cpu_map);
 531                         cpumask_set_cpu(i, pr->shared_cpu_map);
 532                         pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 533                 }
 534         }
 535 
 536         free_cpumask_var(covered_cpus);
 537         return retval;
 538 }
 539 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 540 
 541 static int register_pcc_channel(int pcc_ss_idx)
 542 {
 543         struct acpi_pcct_hw_reduced *cppc_ss;
 544         u64 usecs_lat;
 545 
 546         if (pcc_ss_idx >= 0) {
 547                 pcc_data[pcc_ss_idx]->pcc_channel =
 548                         pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
 549 
 550                 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
 551                         pr_err("Failed to find PCC channel for subspace %d\n",
 552                                pcc_ss_idx);
 553                         return -ENODEV;
 554                 }
 555 
 556                 /*
 557                  * The PCC mailbox controller driver should
 558                  * have parsed the PCCT (global table of all
 559                  * PCC channels) and stored pointers to the
 560                  * subspace communication region in con_priv.
 561                  */
 562                 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
 563 
 564                 if (!cppc_ss) {
 565                         pr_err("No PCC subspace found for %d CPPC\n",
 566                                pcc_ss_idx);
 567                         return -ENODEV;
 568                 }
 569 
 570                 /*
 571                  * cppc_ss->latency is just a Nominal value. In reality
 572                  * the remote processor could be much slower to reply.
 573                  * So add an arbitrary amount of wait on top of Nominal.
 574                  */
 575                 usecs_lat = NUM_RETRIES * cppc_ss->latency;
 576                 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
 577                 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
 578                 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
 579                 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
 580 
 581                 pcc_data[pcc_ss_idx]->pcc_comm_addr =
 582                         acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
 583                 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
 584                         pr_err("Failed to ioremap PCC comm region mem for %d\n",
 585                                pcc_ss_idx);
 586                         return -ENOMEM;
 587                 }
 588 
 589                 /* Set flag so that we don't come here for each CPU. */
 590                 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
 591         }
 592 
 593         return 0;
 594 }
 595 
 596 /**
 597  * cpc_ffh_supported() - check if FFH reading supported
 598  *
 599  * Check if the architecture has support for functional fixed hardware
 600  * read/write capability.
 601  *
 602  * Return: true for supported, false for not supported
 603  */
 604 bool __weak cpc_ffh_supported(void)
 605 {
 606         return false;
 607 }
 608 
 609 /**
 610  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 611  *
 612  * Check and allocate the cppc_pcc_data memory.
 613  * In some processor configurations it is possible that same subspace
 614  * is shared between multiple CPUs. This is seen especially in CPUs
 615  * with hardware multi-threading support.
 616  *
 617  * Return: 0 for success, errno for failure
 618  */
 619 int pcc_data_alloc(int pcc_ss_id)
 620 {
 621         if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
 622                 return -EINVAL;
 623 
 624         if (pcc_data[pcc_ss_id]) {
 625                 pcc_data[pcc_ss_id]->refcount++;
 626         } else {
 627                 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
 628                                               GFP_KERNEL);
 629                 if (!pcc_data[pcc_ss_id])
 630                         return -ENOMEM;
 631                 pcc_data[pcc_ss_id]->refcount++;
 632         }
 633 
 634         return 0;
 635 }
 636 
 637 /* Check if CPPC revision + num_ent combination is supported */
 638 static bool is_cppc_supported(int revision, int num_ent)
 639 {
 640         int expected_num_ent;
 641 
 642         switch (revision) {
 643         case CPPC_V2_REV:
 644                 expected_num_ent = CPPC_V2_NUM_ENT;
 645                 break;
 646         case CPPC_V3_REV:
 647                 expected_num_ent = CPPC_V3_NUM_ENT;
 648                 break;
 649         default:
 650                 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
 651                         revision);
 652                 return false;
 653         }
 654 
 655         if (expected_num_ent != num_ent) {
 656                 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
 657                         num_ent, expected_num_ent, revision);
 658                 return false;
 659         }
 660 
 661         return true;
 662 }
 663 
 664 /*
 665  * An example CPC table looks like the following.
 666  *
 667  *      Name(_CPC, Package()
 668  *                      {
 669  *                      17,
 670  *                      NumEntries
 671  *                      1,
 672  *                      // Revision
 673  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 674  *                      // Highest Performance
 675  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 676  *                      // Nominal Performance
 677  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 678  *                      // Lowest Nonlinear Performance
 679  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 680  *                      // Lowest Performance
 681  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 682  *                      // Guaranteed Performance Register
 683  *                      ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 684  *                      // Desired Performance Register
 685  *                      ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 686  *                      ..
 687  *                      ..
 688  *                      ..
 689  *
 690  *              }
 691  * Each Register() encodes how to access that specific register.
 692  * e.g. a sample PCC entry has the following encoding:
 693  *
 694  *      Register (
 695  *              PCC,
 696  *              AddressSpaceKeyword
 697  *              8,
 698  *              //RegisterBitWidth
 699  *              8,
 700  *              //RegisterBitOffset
 701  *              0x30,
 702  *              //RegisterAddress
 703  *              9
 704  *              //AccessSize (subspace ID)
 705  *              0
 706  *              )
 707  *      }
 708  */
 709 
 710 /**
 711  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 712  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 713  *
 714  *      Return: 0 for success or negative value for err.
 715  */
 716 int acpi_cppc_processor_probe(struct acpi_processor *pr)
 717 {
 718         struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 719         union acpi_object *out_obj, *cpc_obj;
 720         struct cpc_desc *cpc_ptr;
 721         struct cpc_reg *gas_t;
 722         struct device *cpu_dev;
 723         acpi_handle handle = pr->handle;
 724         unsigned int num_ent, i, cpc_rev;
 725         int pcc_subspace_id = -1;
 726         acpi_status status;
 727         int ret = -EFAULT;
 728 
 729         /* Parse the ACPI _CPC table for this CPU. */
 730         status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 731                         ACPI_TYPE_PACKAGE);
 732         if (ACPI_FAILURE(status)) {
 733                 ret = -ENODEV;
 734                 goto out_buf_free;
 735         }
 736 
 737         out_obj = (union acpi_object *) output.pointer;
 738 
 739         cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 740         if (!cpc_ptr) {
 741                 ret = -ENOMEM;
 742                 goto out_buf_free;
 743         }
 744 
 745         /* First entry is NumEntries. */
 746         cpc_obj = &out_obj->package.elements[0];
 747         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
 748                 num_ent = cpc_obj->integer.value;
 749         } else {
 750                 pr_debug("Unexpected entry type(%d) for NumEntries\n",
 751                                 cpc_obj->type);
 752                 goto out_free;
 753         }
 754         cpc_ptr->num_entries = num_ent;
 755 
 756         /* Second entry should be revision. */
 757         cpc_obj = &out_obj->package.elements[1];
 758         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
 759                 cpc_rev = cpc_obj->integer.value;
 760         } else {
 761                 pr_debug("Unexpected entry type(%d) for Revision\n",
 762                                 cpc_obj->type);
 763                 goto out_free;
 764         }
 765         cpc_ptr->version = cpc_rev;
 766 
 767         if (!is_cppc_supported(cpc_rev, num_ent))
 768                 goto out_free;
 769 
 770         /* Iterate through remaining entries in _CPC */
 771         for (i = 2; i < num_ent; i++) {
 772                 cpc_obj = &out_obj->package.elements[i];
 773 
 774                 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
 775                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 776                         cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 777                 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 778                         gas_t = (struct cpc_reg *)
 779                                 cpc_obj->buffer.pointer;
 780 
 781                         /*
 782                          * The PCC Subspace index is encoded inside
 783                          * the CPC table entries. The same PCC index
 784                          * will be used for all the PCC entries,
 785                          * so extract it only once.
 786                          */
 787                         if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 788                                 if (pcc_subspace_id < 0) {
 789                                         pcc_subspace_id = gas_t->access_width;
 790                                         if (pcc_data_alloc(pcc_subspace_id))
 791                                                 goto out_free;
 792                                 } else if (pcc_subspace_id != gas_t->access_width) {
 793                                         pr_debug("Mismatched PCC ids.\n");
 794                                         goto out_free;
 795                                 }
 796                         } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 797                                 if (gas_t->address) {
 798                                         void __iomem *addr;
 799 
 800                                         addr = ioremap(gas_t->address, gas_t->bit_width/8);
 801                                         if (!addr)
 802                                                 goto out_free;
 803                                         cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 804                                 }
 805                         } else {
 806                                 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 807                                         /* Support only PCC ,SYS MEM and FFH type regs */
 808                                         pr_debug("Unsupported register type: %d\n", gas_t->space_id);
 809                                         goto out_free;
 810                                 }
 811                         }
 812 
 813                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 814                         memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 815                 } else {
 816                         pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
 817                         goto out_free;
 818                 }
 819         }
 820         per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
 821 
 822         /*
 823          * Initialize the remaining cpc_regs as unsupported.
 824          * Example: In case FW exposes CPPC v2, the below loop will initialize
 825          * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
 826          */
 827         for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
 828                 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
 829                 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
 830         }
 831 
 832 
 833         /* Store CPU Logical ID */
 834         cpc_ptr->cpu_id = pr->id;
 835 
 836         /* Parse PSD data for this CPU */
 837         ret = acpi_get_psd(cpc_ptr, handle);
 838         if (ret)
 839                 goto out_free;
 840 
 841         /* Register PCC channel once for all PCC subspace ID. */
 842         if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
 843                 ret = register_pcc_channel(pcc_subspace_id);
 844                 if (ret)
 845                         goto out_free;
 846 
 847                 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
 848                 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
 849         }
 850 
 851         /* Everything looks okay */
 852         pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 853 
 854         /* Add per logical CPU nodes for reading its feedback counters. */
 855         cpu_dev = get_cpu_device(pr->id);
 856         if (!cpu_dev) {
 857                 ret = -EINVAL;
 858                 goto out_free;
 859         }
 860 
 861         /* Plug PSD data into this CPU's CPC descriptor. */
 862         per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 863 
 864         ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 865                         "acpi_cppc");
 866         if (ret) {
 867                 per_cpu(cpc_desc_ptr, pr->id) = NULL;
 868                 kobject_put(&cpc_ptr->kobj);
 869                 goto out_free;
 870         }
 871 
 872         kfree(output.pointer);
 873         return 0;
 874 
 875 out_free:
 876         /* Free all the mapped sys mem areas for this CPU */
 877         for (i = 2; i < cpc_ptr->num_entries; i++) {
 878                 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 879 
 880                 if (addr)
 881                         iounmap(addr);
 882         }
 883         kfree(cpc_ptr);
 884 
 885 out_buf_free:
 886         kfree(output.pointer);
 887         return ret;
 888 }
 889 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 890 
 891 /**
 892  * acpi_cppc_processor_exit - Cleanup CPC structs.
 893  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 894  *
 895  * Return: Void
 896  */
 897 void acpi_cppc_processor_exit(struct acpi_processor *pr)
 898 {
 899         struct cpc_desc *cpc_ptr;
 900         unsigned int i;
 901         void __iomem *addr;
 902         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
 903 
 904         if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
 905                 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
 906                         pcc_data[pcc_ss_id]->refcount--;
 907                         if (!pcc_data[pcc_ss_id]->refcount) {
 908                                 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
 909                                 kfree(pcc_data[pcc_ss_id]);
 910                                 pcc_data[pcc_ss_id] = NULL;
 911                         }
 912                 }
 913         }
 914 
 915         cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 916         if (!cpc_ptr)
 917                 return;
 918 
 919         /* Free all the mapped sys mem areas for this CPU */
 920         for (i = 2; i < cpc_ptr->num_entries; i++) {
 921                 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 922                 if (addr)
 923                         iounmap(addr);
 924         }
 925 
 926         kobject_put(&cpc_ptr->kobj);
 927         kfree(cpc_ptr);
 928 }
 929 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 930 
 931 /**
 932  * cpc_read_ffh() - Read FFH register
 933  * @cpunum:     CPU number to read
 934  * @reg:        cppc register information
 935  * @val:        place holder for return value
 936  *
 937  * Read bit_width bits from a specified address and bit_offset
 938  *
 939  * Return: 0 for success and error code
 940  */
 941 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 942 {
 943         return -ENOTSUPP;
 944 }
 945 
 946 /**
 947  * cpc_write_ffh() - Write FFH register
 948  * @cpunum:     CPU number to write
 949  * @reg:        cppc register information
 950  * @val:        value to write
 951  *
 952  * Write value of bit_width bits to a specified address and bit_offset
 953  *
 954  * Return: 0 for success and error code
 955  */
 956 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 957 {
 958         return -ENOTSUPP;
 959 }
 960 
 961 /*
 962  * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 963  * as fast as possible. We have already mapped the PCC subspace during init, so
 964  * we can directly write to it.
 965  */
 966 
 967 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 968 {
 969         int ret_val = 0;
 970         void __iomem *vaddr = 0;
 971         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 972         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 973 
 974         if (reg_res->type == ACPI_TYPE_INTEGER) {
 975                 *val = reg_res->cpc_entry.int_value;
 976                 return ret_val;
 977         }
 978 
 979         *val = 0;
 980         if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 981                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
 982         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 983                 vaddr = reg_res->sys_mem_vaddr;
 984         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 985                 return cpc_read_ffh(cpu, reg, val);
 986         else
 987                 return acpi_os_read_memory((acpi_physical_address)reg->address,
 988                                 val, reg->bit_width);
 989 
 990         switch (reg->bit_width) {
 991                 case 8:
 992                         *val = readb_relaxed(vaddr);
 993                         break;
 994                 case 16:
 995                         *val = readw_relaxed(vaddr);
 996                         break;
 997                 case 32:
 998                         *val = readl_relaxed(vaddr);
 999                         break;
1000                 case 64:
1001                         *val = readq_relaxed(vaddr);
1002                         break;
1003                 default:
1004                         pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1005                                  reg->bit_width, pcc_ss_id);
1006                         ret_val = -EFAULT;
1007         }
1008 
1009         return ret_val;
1010 }
1011 
1012 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1013 {
1014         int ret_val = 0;
1015         void __iomem *vaddr = 0;
1016         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1017         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1018 
1019         if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1020                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1021         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1022                 vaddr = reg_res->sys_mem_vaddr;
1023         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1024                 return cpc_write_ffh(cpu, reg, val);
1025         else
1026                 return acpi_os_write_memory((acpi_physical_address)reg->address,
1027                                 val, reg->bit_width);
1028 
1029         switch (reg->bit_width) {
1030                 case 8:
1031                         writeb_relaxed(val, vaddr);
1032                         break;
1033                 case 16:
1034                         writew_relaxed(val, vaddr);
1035                         break;
1036                 case 32:
1037                         writel_relaxed(val, vaddr);
1038                         break;
1039                 case 64:
1040                         writeq_relaxed(val, vaddr);
1041                         break;
1042                 default:
1043                         pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1044                                  reg->bit_width, pcc_ss_id);
1045                         ret_val = -EFAULT;
1046                         break;
1047         }
1048 
1049         return ret_val;
1050 }
1051 
1052 /**
1053  * cppc_get_desired_perf - Get the value of desired performance register.
1054  * @cpunum: CPU from which to get desired performance.
1055  * @desired_perf: address of a variable to store the returned desired performance
1056  *
1057  * Return: 0 for success, -EIO otherwise.
1058  */
1059 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1060 {
1061         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1062         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1063         struct cpc_register_resource *desired_reg;
1064         struct cppc_pcc_data *pcc_ss_data = NULL;
1065 
1066         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1067 
1068         if (CPC_IN_PCC(desired_reg)) {
1069                 int ret = 0;
1070 
1071                 if (pcc_ss_id < 0)
1072                         return -EIO;
1073 
1074                 pcc_ss_data = pcc_data[pcc_ss_id];
1075 
1076                 down_write(&pcc_ss_data->pcc_lock);
1077 
1078                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1079                         cpc_read(cpunum, desired_reg, desired_perf);
1080                 else
1081                         ret = -EIO;
1082 
1083                 up_write(&pcc_ss_data->pcc_lock);
1084 
1085                 return ret;
1086         }
1087 
1088         cpc_read(cpunum, desired_reg, desired_perf);
1089 
1090         return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1093 
1094 /**
1095  * cppc_get_perf_caps - Get a CPU's performance capabilities.
1096  * @cpunum: CPU from which to get capabilities info.
1097  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1098  *
1099  * Return: 0 for success with perf_caps populated else -ERRNO.
1100  */
1101 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1102 {
1103         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1104         struct cpc_register_resource *highest_reg, *lowest_reg,
1105                 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1106                 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1107         u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1108         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1109         struct cppc_pcc_data *pcc_ss_data = NULL;
1110         int ret = 0, regs_in_pcc = 0;
1111 
1112         if (!cpc_desc) {
1113                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1114                 return -ENODEV;
1115         }
1116 
1117         highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1118         lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1119         lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1120         nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1121         low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1122         nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1123         guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1124 
1125         /* Are any of the regs PCC ?*/
1126         if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1127                 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1128                 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1129                 if (pcc_ss_id < 0) {
1130                         pr_debug("Invalid pcc_ss_id\n");
1131                         return -ENODEV;
1132                 }
1133                 pcc_ss_data = pcc_data[pcc_ss_id];
1134                 regs_in_pcc = 1;
1135                 down_write(&pcc_ss_data->pcc_lock);
1136                 /* Ring doorbell once to update PCC subspace */
1137                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1138                         ret = -EIO;
1139                         goto out_err;
1140                 }
1141         }
1142 
1143         cpc_read(cpunum, highest_reg, &high);
1144         perf_caps->highest_perf = high;
1145 
1146         cpc_read(cpunum, lowest_reg, &low);
1147         perf_caps->lowest_perf = low;
1148 
1149         cpc_read(cpunum, nominal_reg, &nom);
1150         perf_caps->nominal_perf = nom;
1151 
1152         if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1153             IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1154                 perf_caps->guaranteed_perf = 0;
1155         } else {
1156                 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1157                 perf_caps->guaranteed_perf = guaranteed;
1158         }
1159 
1160         cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1161         perf_caps->lowest_nonlinear_perf = min_nonlinear;
1162 
1163         if (!high || !low || !nom || !min_nonlinear)
1164                 ret = -EFAULT;
1165 
1166         /* Read optional lowest and nominal frequencies if present */
1167         if (CPC_SUPPORTED(low_freq_reg))
1168                 cpc_read(cpunum, low_freq_reg, &low_f);
1169 
1170         if (CPC_SUPPORTED(nom_freq_reg))
1171                 cpc_read(cpunum, nom_freq_reg, &nom_f);
1172 
1173         perf_caps->lowest_freq = low_f;
1174         perf_caps->nominal_freq = nom_f;
1175 
1176 
1177 out_err:
1178         if (regs_in_pcc)
1179                 up_write(&pcc_ss_data->pcc_lock);
1180         return ret;
1181 }
1182 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1183 
1184 /**
1185  * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1186  * @cpunum: CPU from which to read counters.
1187  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1188  *
1189  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1190  */
1191 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1192 {
1193         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1194         struct cpc_register_resource *delivered_reg, *reference_reg,
1195                 *ref_perf_reg, *ctr_wrap_reg;
1196         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1197         struct cppc_pcc_data *pcc_ss_data = NULL;
1198         u64 delivered, reference, ref_perf, ctr_wrap_time;
1199         int ret = 0, regs_in_pcc = 0;
1200 
1201         if (!cpc_desc) {
1202                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1203                 return -ENODEV;
1204         }
1205 
1206         delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1207         reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1208         ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1209         ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1210 
1211         /*
1212          * If reference perf register is not supported then we should
1213          * use the nominal perf value
1214          */
1215         if (!CPC_SUPPORTED(ref_perf_reg))
1216                 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1217 
1218         /* Are any of the regs PCC ?*/
1219         if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1220                 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1221                 if (pcc_ss_id < 0) {
1222                         pr_debug("Invalid pcc_ss_id\n");
1223                         return -ENODEV;
1224                 }
1225                 pcc_ss_data = pcc_data[pcc_ss_id];
1226                 down_write(&pcc_ss_data->pcc_lock);
1227                 regs_in_pcc = 1;
1228                 /* Ring doorbell once to update PCC subspace */
1229                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1230                         ret = -EIO;
1231                         goto out_err;
1232                 }
1233         }
1234 
1235         cpc_read(cpunum, delivered_reg, &delivered);
1236         cpc_read(cpunum, reference_reg, &reference);
1237         cpc_read(cpunum, ref_perf_reg, &ref_perf);
1238 
1239         /*
1240          * Per spec, if ctr_wrap_time optional register is unsupported, then the
1241          * performance counters are assumed to never wrap during the lifetime of
1242          * platform
1243          */
1244         ctr_wrap_time = (u64)(~((u64)0));
1245         if (CPC_SUPPORTED(ctr_wrap_reg))
1246                 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1247 
1248         if (!delivered || !reference || !ref_perf) {
1249                 ret = -EFAULT;
1250                 goto out_err;
1251         }
1252 
1253         perf_fb_ctrs->delivered = delivered;
1254         perf_fb_ctrs->reference = reference;
1255         perf_fb_ctrs->reference_perf = ref_perf;
1256         perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1257 out_err:
1258         if (regs_in_pcc)
1259                 up_write(&pcc_ss_data->pcc_lock);
1260         return ret;
1261 }
1262 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1263 
1264 /**
1265  * cppc_set_perf - Set a CPU's performance controls.
1266  * @cpu: CPU for which to set performance controls.
1267  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1268  *
1269  * Return: 0 for success, -ERRNO otherwise.
1270  */
1271 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1272 {
1273         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1274         struct cpc_register_resource *desired_reg;
1275         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1276         struct cppc_pcc_data *pcc_ss_data = NULL;
1277         int ret = 0;
1278 
1279         if (!cpc_desc) {
1280                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1281                 return -ENODEV;
1282         }
1283 
1284         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1285 
1286         /*
1287          * This is Phase-I where we want to write to CPC registers
1288          * -> We want all CPUs to be able to execute this phase in parallel
1289          *
1290          * Since read_lock can be acquired by multiple CPUs simultaneously we
1291          * achieve that goal here
1292          */
1293         if (CPC_IN_PCC(desired_reg)) {
1294                 if (pcc_ss_id < 0) {
1295                         pr_debug("Invalid pcc_ss_id\n");
1296                         return -ENODEV;
1297                 }
1298                 pcc_ss_data = pcc_data[pcc_ss_id];
1299                 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1300                 if (pcc_ss_data->platform_owns_pcc) {
1301                         ret = check_pcc_chan(pcc_ss_id, false);
1302                         if (ret) {
1303                                 up_read(&pcc_ss_data->pcc_lock);
1304                                 return ret;
1305                         }
1306                 }
1307                 /*
1308                  * Update the pending_write to make sure a PCC CMD_READ will not
1309                  * arrive and steal the channel during the switch to write lock
1310                  */
1311                 pcc_ss_data->pending_pcc_write_cmd = true;
1312                 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1313                 cpc_desc->write_cmd_status = 0;
1314         }
1315 
1316         /*
1317          * Skip writing MIN/MAX until Linux knows how to come up with
1318          * useful values.
1319          */
1320         cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1321 
1322         if (CPC_IN_PCC(desired_reg))
1323                 up_read(&pcc_ss_data->pcc_lock);        /* END Phase-I */
1324         /*
1325          * This is Phase-II where we transfer the ownership of PCC to Platform
1326          *
1327          * Short Summary: Basically if we think of a group of cppc_set_perf
1328          * requests that happened in short overlapping interval. The last CPU to
1329          * come out of Phase-I will enter Phase-II and ring the doorbell.
1330          *
1331          * We have the following requirements for Phase-II:
1332          *     1. We want to execute Phase-II only when there are no CPUs
1333          * currently executing in Phase-I
1334          *     2. Once we start Phase-II we want to avoid all other CPUs from
1335          * entering Phase-I.
1336          *     3. We want only one CPU among all those who went through Phase-I
1337          * to run phase-II
1338          *
1339          * If write_trylock fails to get the lock and doesn't transfer the
1340          * PCC ownership to the platform, then one of the following will be TRUE
1341          *     1. There is at-least one CPU in Phase-I which will later execute
1342          * write_trylock, so the CPUs in Phase-I will be responsible for
1343          * executing the Phase-II.
1344          *     2. Some other CPU has beaten this CPU to successfully execute the
1345          * write_trylock and has already acquired the write_lock. We know for a
1346          * fact it (other CPU acquiring the write_lock) couldn't have happened
1347          * before this CPU's Phase-I as we held the read_lock.
1348          *     3. Some other CPU executing pcc CMD_READ has stolen the
1349          * down_write, in which case, send_pcc_cmd will check for pending
1350          * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1351          * So this CPU can be certain that its request will be delivered
1352          *    So in all cases, this CPU knows that its request will be delivered
1353          * by another CPU and can return
1354          *
1355          * After getting the down_write we still need to check for
1356          * pending_pcc_write_cmd to take care of the following scenario
1357          *    The thread running this code could be scheduled out between
1358          * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1359          * could have delivered the request to Platform by triggering the
1360          * doorbell and transferred the ownership of PCC to platform. So this
1361          * avoids triggering an unnecessary doorbell and more importantly before
1362          * triggering the doorbell it makes sure that the PCC channel ownership
1363          * is still with OSPM.
1364          *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1365          * there was a pcc CMD_READ waiting on down_write and it steals the lock
1366          * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1367          * case during a CMD_READ and if there are pending writes it delivers
1368          * the write command before servicing the read command
1369          */
1370         if (CPC_IN_PCC(desired_reg)) {
1371                 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1372                         /* Update only if there are pending write commands */
1373                         if (pcc_ss_data->pending_pcc_write_cmd)
1374                                 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1375                         up_write(&pcc_ss_data->pcc_lock);       /* END Phase-II */
1376                 } else
1377                         /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1378                         wait_event(pcc_ss_data->pcc_write_wait_q,
1379                                    cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1380 
1381                 /* send_pcc_cmd updates the status in case of failure */
1382                 ret = cpc_desc->write_cmd_status;
1383         }
1384         return ret;
1385 }
1386 EXPORT_SYMBOL_GPL(cppc_set_perf);
1387 
1388 /**
1389  * cppc_get_transition_latency - returns frequency transition latency in ns
1390  *
1391  * ACPI CPPC does not explicitly specifiy how a platform can specify the
1392  * transition latency for perfromance change requests. The closest we have
1393  * is the timing information from the PCCT tables which provides the info
1394  * on the number and frequency of PCC commands the platform can handle.
1395  */
1396 unsigned int cppc_get_transition_latency(int cpu_num)
1397 {
1398         /*
1399          * Expected transition latency is based on the PCCT timing values
1400          * Below are definition from ACPI spec:
1401          * pcc_nominal- Expected latency to process a command, in microseconds
1402          * pcc_mpar   - The maximum number of periodic requests that the subspace
1403          *              channel can support, reported in commands per minute. 0
1404          *              indicates no limitation.
1405          * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1406          *              completion of a command before issuing the next command,
1407          *              in microseconds.
1408          */
1409         unsigned int latency_ns = 0;
1410         struct cpc_desc *cpc_desc;
1411         struct cpc_register_resource *desired_reg;
1412         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1413         struct cppc_pcc_data *pcc_ss_data;
1414 
1415         cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1416         if (!cpc_desc)
1417                 return CPUFREQ_ETERNAL;
1418 
1419         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1420         if (!CPC_IN_PCC(desired_reg))
1421                 return CPUFREQ_ETERNAL;
1422 
1423         if (pcc_ss_id < 0)
1424                 return CPUFREQ_ETERNAL;
1425 
1426         pcc_ss_data = pcc_data[pcc_ss_id];
1427         if (pcc_ss_data->pcc_mpar)
1428                 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1429 
1430         latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1431         latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1432 
1433         return latency_ns;
1434 }
1435 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);

/* [<][>][^][v][top][bottom][index][help] */