root/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dm_pp_apply_display_requirements
  2. get_default_clock_levels
  3. dc_to_smu_clock_type
  4. dc_to_pp_clock_type
  5. pp_to_dc_powerlevel_state
  6. pp_to_dc_clock_levels
  7. pp_to_dc_clock_levels_with_latency
  8. pp_to_dc_clock_levels_with_voltage
  9. dm_pp_get_clock_levels_by_type
  10. dm_pp_get_clock_levels_by_type_with_latency
  11. dm_pp_get_clock_levels_by_type_with_voltage
  12. dm_pp_notify_wm_clock_changes
  13. dm_pp_apply_power_level_change_request
  14. dm_pp_apply_clock_for_voltage_request
  15. dm_pp_get_static_clocks
  16. pp_rv_set_wm_ranges
  17. pp_rv_set_pme_wa_enable
  18. pp_rv_set_active_display_count
  19. pp_rv_set_min_deep_sleep_dcfclk
  20. pp_rv_set_hard_min_dcefclk_by_freq
  21. pp_rv_set_hard_min_fclk_by_freq
  22. pp_nv_set_wm_ranges
  23. pp_nv_set_pme_wa_enable
  24. pp_nv_set_display_count
  25. pp_nv_set_min_deep_sleep_dcfclk
  26. pp_nv_set_hard_min_dcefclk_by_freq
  27. pp_nv_set_hard_min_uclk_by_freq
  28. pp_nv_set_pstate_handshake_support
  29. pp_nv_set_voltage_by_freq
  30. pp_nv_get_maximum_sustainable_clocks
  31. pp_nv_get_uclk_dpm_states
  32. dm_pp_get_funcs

   1 /*
   2  * Copyright 2018 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: AMD
  23  */
  24 #include <linux/string.h>
  25 #include <linux/acpi.h>
  26 
  27 #include <drm/drm_probe_helper.h>
  28 #include <drm/amdgpu_drm.h>
  29 #include "dm_services.h"
  30 #include "amdgpu.h"
  31 #include "amdgpu_dm.h"
  32 #include "amdgpu_dm_irq.h"
  33 #include "amdgpu_pm.h"
  34 #include "dm_pp_smu.h"
  35 #include "amdgpu_smu.h"
  36 
  37 
  38 bool dm_pp_apply_display_requirements(
  39                 const struct dc_context *ctx,
  40                 const struct dm_pp_display_configuration *pp_display_cfg)
  41 {
  42         struct amdgpu_device *adev = ctx->driver_context;
  43         struct smu_context *smu = &adev->smu;
  44         int i;
  45 
  46         if (adev->pm.dpm_enabled) {
  47 
  48                 memset(&adev->pm.pm_display_cfg, 0,
  49                                 sizeof(adev->pm.pm_display_cfg));
  50 
  51                 adev->pm.pm_display_cfg.cpu_cc6_disable =
  52                         pp_display_cfg->cpu_cc6_disable;
  53 
  54                 adev->pm.pm_display_cfg.cpu_pstate_disable =
  55                         pp_display_cfg->cpu_pstate_disable;
  56 
  57                 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
  58                         pp_display_cfg->cpu_pstate_separation_time;
  59 
  60                 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
  61                         pp_display_cfg->nb_pstate_switch_disable;
  62 
  63                 adev->pm.pm_display_cfg.num_display =
  64                                 pp_display_cfg->display_count;
  65                 adev->pm.pm_display_cfg.num_path_including_non_display =
  66                                 pp_display_cfg->display_count;
  67 
  68                 adev->pm.pm_display_cfg.min_core_set_clock =
  69                                 pp_display_cfg->min_engine_clock_khz/10;
  70                 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
  71                                 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
  72                 adev->pm.pm_display_cfg.min_mem_set_clock =
  73                                 pp_display_cfg->min_memory_clock_khz/10;
  74 
  75                 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
  76                                 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
  77                 adev->pm.pm_display_cfg.min_dcef_set_clk =
  78                                 pp_display_cfg->min_dcfclock_khz/10;
  79 
  80                 adev->pm.pm_display_cfg.multi_monitor_in_sync =
  81                                 pp_display_cfg->all_displays_in_sync;
  82                 adev->pm.pm_display_cfg.min_vblank_time =
  83                                 pp_display_cfg->avail_mclk_switch_time_us;
  84 
  85                 adev->pm.pm_display_cfg.display_clk =
  86                                 pp_display_cfg->disp_clk_khz/10;
  87 
  88                 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
  89                                 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
  90 
  91                 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
  92                 adev->pm.pm_display_cfg.line_time_in_us =
  93                                 pp_display_cfg->line_time_in_us;
  94 
  95                 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
  96                 adev->pm.pm_display_cfg.crossfire_display_index = -1;
  97                 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
  98 
  99                 for (i = 0; i < pp_display_cfg->display_count; i++) {
 100                         const struct dm_pp_single_disp_config *dc_cfg =
 101                                                 &pp_display_cfg->disp_configs[i];
 102                         adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
 103                 }
 104 
 105                 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
 106                         adev->powerplay.pp_funcs->display_configuration_change(
 107                                 adev->powerplay.pp_handle,
 108                                 &adev->pm.pm_display_cfg);
 109                 else
 110                         smu_display_configuration_change(smu,
 111                                                          &adev->pm.pm_display_cfg);
 112 
 113                 amdgpu_pm_compute_clocks(adev);
 114         }
 115 
 116         return true;
 117 }
 118 
 119 static void get_default_clock_levels(
 120                 enum dm_pp_clock_type clk_type,
 121                 struct dm_pp_clock_levels *clks)
 122 {
 123         uint32_t disp_clks_in_khz[6] = {
 124                         300000, 400000, 496560, 626090, 685720, 757900 };
 125         uint32_t sclks_in_khz[6] = {
 126                         300000, 360000, 423530, 514290, 626090, 720000 };
 127         uint32_t mclks_in_khz[2] = { 333000, 800000 };
 128 
 129         switch (clk_type) {
 130         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 131                 clks->num_levels = 6;
 132                 memmove(clks->clocks_in_khz, disp_clks_in_khz,
 133                                 sizeof(disp_clks_in_khz));
 134                 break;
 135         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 136                 clks->num_levels = 6;
 137                 memmove(clks->clocks_in_khz, sclks_in_khz,
 138                                 sizeof(sclks_in_khz));
 139                 break;
 140         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 141                 clks->num_levels = 2;
 142                 memmove(clks->clocks_in_khz, mclks_in_khz,
 143                                 sizeof(mclks_in_khz));
 144                 break;
 145         default:
 146                 clks->num_levels = 0;
 147                 break;
 148         }
 149 }
 150 
 151 static enum smu_clk_type dc_to_smu_clock_type(
 152                 enum dm_pp_clock_type dm_pp_clk_type)
 153 {
 154         enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
 155 
 156         switch (dm_pp_clk_type) {
 157         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 158                 smu_clk_type = SMU_DISPCLK;
 159                 break;
 160         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 161                 smu_clk_type = SMU_GFXCLK;
 162                 break;
 163         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 164                 smu_clk_type = SMU_MCLK;
 165                 break;
 166         case DM_PP_CLOCK_TYPE_DCEFCLK:
 167                 smu_clk_type = SMU_DCEFCLK;
 168                 break;
 169         case DM_PP_CLOCK_TYPE_SOCCLK:
 170                 smu_clk_type = SMU_SOCCLK;
 171                 break;
 172         default:
 173                 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
 174                           dm_pp_clk_type);
 175                 break;
 176         }
 177 
 178         return smu_clk_type;
 179 }
 180 
 181 static enum amd_pp_clock_type dc_to_pp_clock_type(
 182                 enum dm_pp_clock_type dm_pp_clk_type)
 183 {
 184         enum amd_pp_clock_type amd_pp_clk_type = 0;
 185 
 186         switch (dm_pp_clk_type) {
 187         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 188                 amd_pp_clk_type = amd_pp_disp_clock;
 189                 break;
 190         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 191                 amd_pp_clk_type = amd_pp_sys_clock;
 192                 break;
 193         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 194                 amd_pp_clk_type = amd_pp_mem_clock;
 195                 break;
 196         case DM_PP_CLOCK_TYPE_DCEFCLK:
 197                 amd_pp_clk_type  = amd_pp_dcef_clock;
 198                 break;
 199         case DM_PP_CLOCK_TYPE_DCFCLK:
 200                 amd_pp_clk_type = amd_pp_dcf_clock;
 201                 break;
 202         case DM_PP_CLOCK_TYPE_PIXELCLK:
 203                 amd_pp_clk_type = amd_pp_pixel_clock;
 204                 break;
 205         case DM_PP_CLOCK_TYPE_FCLK:
 206                 amd_pp_clk_type = amd_pp_f_clock;
 207                 break;
 208         case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
 209                 amd_pp_clk_type = amd_pp_phy_clock;
 210                 break;
 211         case DM_PP_CLOCK_TYPE_DPPCLK:
 212                 amd_pp_clk_type = amd_pp_dpp_clock;
 213                 break;
 214         default:
 215                 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
 216                                 dm_pp_clk_type);
 217                 break;
 218         }
 219 
 220         return amd_pp_clk_type;
 221 }
 222 
 223 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
 224                         enum PP_DAL_POWERLEVEL max_clocks_state)
 225 {
 226         switch (max_clocks_state) {
 227         case PP_DAL_POWERLEVEL_0:
 228                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
 229         case PP_DAL_POWERLEVEL_1:
 230                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
 231         case PP_DAL_POWERLEVEL_2:
 232                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
 233         case PP_DAL_POWERLEVEL_3:
 234                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
 235         case PP_DAL_POWERLEVEL_4:
 236                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
 237         case PP_DAL_POWERLEVEL_5:
 238                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
 239         case PP_DAL_POWERLEVEL_6:
 240                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
 241         case PP_DAL_POWERLEVEL_7:
 242                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
 243         default:
 244                 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
 245                                 max_clocks_state);
 246                 return DM_PP_CLOCKS_STATE_INVALID;
 247         }
 248 }
 249 
 250 static void pp_to_dc_clock_levels(
 251                 const struct amd_pp_clocks *pp_clks,
 252                 struct dm_pp_clock_levels *dc_clks,
 253                 enum dm_pp_clock_type dc_clk_type)
 254 {
 255         uint32_t i;
 256 
 257         if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
 258                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 259                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 260                                 pp_clks->count,
 261                                 DM_PP_MAX_CLOCK_LEVELS);
 262 
 263                 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 264         } else
 265                 dc_clks->num_levels = pp_clks->count;
 266 
 267         DRM_INFO("DM_PPLIB: values for %s clock\n",
 268                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 269 
 270         for (i = 0; i < dc_clks->num_levels; i++) {
 271                 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
 272                 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
 273         }
 274 }
 275 
 276 static void pp_to_dc_clock_levels_with_latency(
 277                 const struct pp_clock_levels_with_latency *pp_clks,
 278                 struct dm_pp_clock_levels_with_latency *clk_level_info,
 279                 enum dm_pp_clock_type dc_clk_type)
 280 {
 281         uint32_t i;
 282 
 283         if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 284                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 285                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 286                                 pp_clks->num_levels,
 287                                 DM_PP_MAX_CLOCK_LEVELS);
 288 
 289                 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 290         } else
 291                 clk_level_info->num_levels = pp_clks->num_levels;
 292 
 293         DRM_DEBUG("DM_PPLIB: values for %s clock\n",
 294                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 295 
 296         for (i = 0; i < clk_level_info->num_levels; i++) {
 297                 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
 298                 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
 299                 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
 300         }
 301 }
 302 
 303 static void pp_to_dc_clock_levels_with_voltage(
 304                 const struct pp_clock_levels_with_voltage *pp_clks,
 305                 struct dm_pp_clock_levels_with_voltage *clk_level_info,
 306                 enum dm_pp_clock_type dc_clk_type)
 307 {
 308         uint32_t i;
 309 
 310         if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 311                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 312                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 313                                 pp_clks->num_levels,
 314                                 DM_PP_MAX_CLOCK_LEVELS);
 315 
 316                 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 317         } else
 318                 clk_level_info->num_levels = pp_clks->num_levels;
 319 
 320         DRM_INFO("DM_PPLIB: values for %s clock\n",
 321                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 322 
 323         for (i = 0; i < clk_level_info->num_levels; i++) {
 324                 DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
 325                          pp_clks->data[i].voltage_in_mv);
 326                 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
 327                 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
 328         }
 329 }
 330 
 331 bool dm_pp_get_clock_levels_by_type(
 332                 const struct dc_context *ctx,
 333                 enum dm_pp_clock_type clk_type,
 334                 struct dm_pp_clock_levels *dc_clks)
 335 {
 336         struct amdgpu_device *adev = ctx->driver_context;
 337         void *pp_handle = adev->powerplay.pp_handle;
 338         struct amd_pp_clocks pp_clks = { 0 };
 339         struct amd_pp_simple_clock_info validation_clks = { 0 };
 340         uint32_t i;
 341 
 342         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
 343                 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
 344                         dc_to_pp_clock_type(clk_type), &pp_clks)) {
 345                         /* Error in pplib. Provide default values. */
 346                         get_default_clock_levels(clk_type, dc_clks);
 347                         return true;
 348                 }
 349         } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
 350                 if (smu_get_clock_by_type(&adev->smu,
 351                                           dc_to_pp_clock_type(clk_type),
 352                                           &pp_clks)) {
 353                         get_default_clock_levels(clk_type, dc_clks);
 354                         return true;
 355                 }
 356         }
 357 
 358         pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 359 
 360         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
 361                 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
 362                                                 pp_handle, &validation_clks)) {
 363                         /* Error in pplib. Provide default values. */
 364                         DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 365                         validation_clks.engine_max_clock = 72000;
 366                         validation_clks.memory_max_clock = 80000;
 367                         validation_clks.level = 0;
 368                 }
 369         } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
 370                 if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
 371                         DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 372                         validation_clks.engine_max_clock = 72000;
 373                         validation_clks.memory_max_clock = 80000;
 374                         validation_clks.level = 0;
 375                 }
 376         }
 377 
 378         DRM_INFO("DM_PPLIB: Validation clocks:\n");
 379         DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
 380                         validation_clks.engine_max_clock);
 381         DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
 382                         validation_clks.memory_max_clock);
 383         DRM_INFO("DM_PPLIB:    level           : %d\n",
 384                         validation_clks.level);
 385 
 386         /* Translate 10 kHz to kHz. */
 387         validation_clks.engine_max_clock *= 10;
 388         validation_clks.memory_max_clock *= 10;
 389 
 390         /* Determine the highest non-boosted level from the Validation Clocks */
 391         if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
 392                 for (i = 0; i < dc_clks->num_levels; i++) {
 393                         if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
 394                                 /* This clock is higher the validation clock.
 395                                  * Than means the previous one is the highest
 396                                  * non-boosted one. */
 397                                 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
 398                                                 dc_clks->num_levels, i);
 399                                 dc_clks->num_levels = i > 0 ? i : 1;
 400                                 break;
 401                         }
 402                 }
 403         } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
 404                 for (i = 0; i < dc_clks->num_levels; i++) {
 405                         if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
 406                                 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
 407                                                 dc_clks->num_levels, i);
 408                                 dc_clks->num_levels = i > 0 ? i : 1;
 409                                 break;
 410                         }
 411                 }
 412         }
 413 
 414         return true;
 415 }
 416 
 417 bool dm_pp_get_clock_levels_by_type_with_latency(
 418         const struct dc_context *ctx,
 419         enum dm_pp_clock_type clk_type,
 420         struct dm_pp_clock_levels_with_latency *clk_level_info)
 421 {
 422         struct amdgpu_device *adev = ctx->driver_context;
 423         void *pp_handle = adev->powerplay.pp_handle;
 424         struct pp_clock_levels_with_latency pp_clks = { 0 };
 425         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 426         int ret;
 427 
 428         if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
 429                 ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
 430                                                 dc_to_pp_clock_type(clk_type),
 431                                                 &pp_clks);
 432                 if (ret)
 433                         return false;
 434         } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
 435                 if (smu_get_clock_by_type_with_latency(&adev->smu,
 436                                                        dc_to_smu_clock_type(clk_type),
 437                                                        &pp_clks))
 438                         return false;
 439         }
 440 
 441 
 442         pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 443 
 444         return true;
 445 }
 446 
 447 bool dm_pp_get_clock_levels_by_type_with_voltage(
 448         const struct dc_context *ctx,
 449         enum dm_pp_clock_type clk_type,
 450         struct dm_pp_clock_levels_with_voltage *clk_level_info)
 451 {
 452         struct amdgpu_device *adev = ctx->driver_context;
 453         void *pp_handle = adev->powerplay.pp_handle;
 454         struct pp_clock_levels_with_voltage pp_clk_info = {0};
 455         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 456         int ret;
 457 
 458         if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
 459                 ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
 460                                                 dc_to_pp_clock_type(clk_type),
 461                                                 &pp_clk_info);
 462                 if (ret)
 463                         return false;
 464         } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
 465                 if (smu_get_clock_by_type_with_voltage(&adev->smu,
 466                                                        dc_to_pp_clock_type(clk_type),
 467                                                        &pp_clk_info))
 468                         return false;
 469         }
 470 
 471         pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
 472 
 473         return true;
 474 }
 475 
 476 bool dm_pp_notify_wm_clock_changes(
 477         const struct dc_context *ctx,
 478         struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
 479 {
 480         /* TODO: to be implemented */
 481         return false;
 482 }
 483 
 484 bool dm_pp_apply_power_level_change_request(
 485         const struct dc_context *ctx,
 486         struct dm_pp_power_level_change_request *level_change_req)
 487 {
 488         /* TODO: to be implemented */
 489         return false;
 490 }
 491 
 492 bool dm_pp_apply_clock_for_voltage_request(
 493         const struct dc_context *ctx,
 494         struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
 495 {
 496         struct amdgpu_device *adev = ctx->driver_context;
 497         struct pp_display_clock_request pp_clock_request = {0};
 498         int ret = 0;
 499 
 500         pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
 501         pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
 502 
 503         if (!pp_clock_request.clock_type)
 504                 return false;
 505 
 506         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
 507                 ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
 508                         adev->powerplay.pp_handle,
 509                         &pp_clock_request);
 510         else if (adev->smu.funcs &&
 511                  adev->smu.funcs->display_clock_voltage_request)
 512                 ret = smu_display_clock_voltage_request(&adev->smu,
 513                                                         &pp_clock_request);
 514         if (ret)
 515                 return false;
 516         return true;
 517 }
 518 
 519 bool dm_pp_get_static_clocks(
 520         const struct dc_context *ctx,
 521         struct dm_pp_static_clock_info *static_clk_info)
 522 {
 523         struct amdgpu_device *adev = ctx->driver_context;
 524         struct amd_pp_clock_info pp_clk_info = {0};
 525         int ret = 0;
 526 
 527         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
 528                 ret = adev->powerplay.pp_funcs->get_current_clocks(
 529                         adev->powerplay.pp_handle,
 530                         &pp_clk_info);
 531         else if (adev->smu.funcs)
 532                 ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
 533         if (ret)
 534                 return false;
 535 
 536         static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
 537         static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
 538         static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
 539 
 540         return true;
 541 }
 542 
 543 void pp_rv_set_wm_ranges(struct pp_smu *pp,
 544                 struct pp_smu_wm_range_sets *ranges)
 545 {
 546         const struct dc_context *ctx = pp->dm;
 547         struct amdgpu_device *adev = ctx->driver_context;
 548         void *pp_handle = adev->powerplay.pp_handle;
 549         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 550         struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
 551         struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
 552         struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
 553         int32_t i;
 554 
 555         wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 556         wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 557 
 558         for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 559                 if (ranges->reader_wm_sets[i].wm_inst > 3)
 560                         wm_dce_clocks[i].wm_set_id = WM_SET_A;
 561                 else
 562                         wm_dce_clocks[i].wm_set_id =
 563                                         ranges->reader_wm_sets[i].wm_inst;
 564                 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
 565                                 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 566                 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
 567                                 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 568                 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
 569                                 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 570                 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
 571                                 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 572         }
 573 
 574         for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
 575                 if (ranges->writer_wm_sets[i].wm_inst > 3)
 576                         wm_soc_clocks[i].wm_set_id = WM_SET_A;
 577                 else
 578                         wm_soc_clocks[i].wm_set_id =
 579                                         ranges->writer_wm_sets[i].wm_inst;
 580                 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
 581                                 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 582                 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
 583                                 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 584                 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
 585                                 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 586                 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
 587                                 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 588         }
 589 
 590         if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
 591                 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
 592                                                            &wm_with_clock_ranges);
 593         else if (adev->smu.funcs &&
 594                  adev->smu.funcs->set_watermarks_for_clock_ranges)
 595                 smu_set_watermarks_for_clock_ranges(&adev->smu,
 596                                                     &wm_with_clock_ranges);
 597 }
 598 
 599 void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
 600 {
 601         const struct dc_context *ctx = pp->dm;
 602         struct amdgpu_device *adev = ctx->driver_context;
 603         void *pp_handle = adev->powerplay.pp_handle;
 604         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 605 
 606         if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
 607                 pp_funcs->notify_smu_enable_pwe(pp_handle);
 608         else if (adev->smu.funcs)
 609                 smu_notify_smu_enable_pwe(&adev->smu);
 610 }
 611 
 612 void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
 613 {
 614         const struct dc_context *ctx = pp->dm;
 615         struct amdgpu_device *adev = ctx->driver_context;
 616         void *pp_handle = adev->powerplay.pp_handle;
 617         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 618 
 619         if (!pp_funcs || !pp_funcs->set_active_display_count)
 620                 return;
 621 
 622         pp_funcs->set_active_display_count(pp_handle, count);
 623 }
 624 
 625 void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
 626 {
 627         const struct dc_context *ctx = pp->dm;
 628         struct amdgpu_device *adev = ctx->driver_context;
 629         void *pp_handle = adev->powerplay.pp_handle;
 630         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 631 
 632         if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
 633                 return;
 634 
 635         pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
 636 }
 637 
 638 void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
 639 {
 640         const struct dc_context *ctx = pp->dm;
 641         struct amdgpu_device *adev = ctx->driver_context;
 642         void *pp_handle = adev->powerplay.pp_handle;
 643         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 644 
 645         if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
 646                 return;
 647 
 648         pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
 649 }
 650 
 651 void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
 652 {
 653         const struct dc_context *ctx = pp->dm;
 654         struct amdgpu_device *adev = ctx->driver_context;
 655         void *pp_handle = adev->powerplay.pp_handle;
 656         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 657 
 658         if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
 659                 return;
 660 
 661         pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
 662 }
 663 
 664 enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 665                 struct pp_smu_wm_range_sets *ranges)
 666 {
 667         const struct dc_context *ctx = pp->dm;
 668         struct amdgpu_device *adev = ctx->driver_context;
 669         struct smu_context *smu = &adev->smu;
 670         struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
 671         struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
 672                         wm_with_clock_ranges.wm_dmif_clocks_ranges;
 673         struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
 674                         wm_with_clock_ranges.wm_mcif_clocks_ranges;
 675         int32_t i;
 676 
 677         wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 678         wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 679 
 680         for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 681                 if (ranges->reader_wm_sets[i].wm_inst > 3)
 682                         wm_dce_clocks[i].wm_set_id = WM_SET_A;
 683                 else
 684                         wm_dce_clocks[i].wm_set_id =
 685                                         ranges->reader_wm_sets[i].wm_inst;
 686                 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
 687                         ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 688                 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
 689                         ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 690                 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
 691                         ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 692                 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
 693                         ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 694         }
 695 
 696         for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
 697                 if (ranges->writer_wm_sets[i].wm_inst > 3)
 698                         wm_soc_clocks[i].wm_set_id = WM_SET_A;
 699                 else
 700                         wm_soc_clocks[i].wm_set_id =
 701                                         ranges->writer_wm_sets[i].wm_inst;
 702                 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
 703                         ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 704                 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
 705                         ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 706                 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
 707                         ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 708                 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
 709                         ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 710         }
 711 
 712         if (!smu->funcs)
 713                 return PP_SMU_RESULT_UNSUPPORTED;
 714 
 715         /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
 716          * 1: fail
 717          */
 718         if (smu_set_watermarks_for_clock_ranges(&adev->smu,
 719                         &wm_with_clock_ranges))
 720                 return PP_SMU_RESULT_UNSUPPORTED;
 721 
 722         return PP_SMU_RESULT_OK;
 723 }
 724 
 725 enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
 726 {
 727         const struct dc_context *ctx = pp->dm;
 728         struct amdgpu_device *adev = ctx->driver_context;
 729         struct smu_context *smu = &adev->smu;
 730 
 731         if (!smu->funcs)
 732                 return PP_SMU_RESULT_UNSUPPORTED;
 733 
 734         /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL;  1: fail */
 735         if (smu_set_azalia_d3_pme(smu))
 736                 return PP_SMU_RESULT_FAIL;
 737 
 738         return PP_SMU_RESULT_OK;
 739 }
 740 
 741 enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 742 {
 743         const struct dc_context *ctx = pp->dm;
 744         struct amdgpu_device *adev = ctx->driver_context;
 745         struct smu_context *smu = &adev->smu;
 746 
 747         if (!smu->funcs)
 748                 return PP_SMU_RESULT_UNSUPPORTED;
 749 
 750         /* 0: successful or smu.funcs->set_display_count = NULL;  1: fail */
 751         if (smu_set_display_count(smu, count))
 752                 return PP_SMU_RESULT_FAIL;
 753 
 754         return PP_SMU_RESULT_OK;
 755 }
 756 
 757 enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 758 {
 759         const struct dc_context *ctx = pp->dm;
 760         struct amdgpu_device *adev = ctx->driver_context;
 761         struct smu_context *smu = &adev->smu;
 762 
 763         if (!smu->funcs)
 764                 return PP_SMU_RESULT_UNSUPPORTED;
 765 
 766         /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
 767         if (smu_set_deep_sleep_dcefclk(smu, mhz))
 768                 return PP_SMU_RESULT_FAIL;
 769 
 770         return PP_SMU_RESULT_OK;
 771 }
 772 
 773 enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 774                 struct pp_smu *pp, int mhz)
 775 {
 776         const struct dc_context *ctx = pp->dm;
 777         struct amdgpu_device *adev = ctx->driver_context;
 778         struct smu_context *smu = &adev->smu;
 779         struct pp_display_clock_request clock_req;
 780 
 781         if (!smu->funcs)
 782                 return PP_SMU_RESULT_UNSUPPORTED;
 783 
 784         clock_req.clock_type = amd_pp_dcef_clock;
 785         clock_req.clock_freq_in_khz = mhz * 1000;
 786 
 787         /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 788          * 1: fail
 789          */
 790         if (smu_display_clock_voltage_request(smu, &clock_req))
 791                 return PP_SMU_RESULT_FAIL;
 792 
 793         return PP_SMU_RESULT_OK;
 794 }
 795 
 796 enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 797 {
 798         const struct dc_context *ctx = pp->dm;
 799         struct amdgpu_device *adev = ctx->driver_context;
 800         struct smu_context *smu = &adev->smu;
 801         struct pp_display_clock_request clock_req;
 802 
 803         if (!smu->funcs)
 804                 return PP_SMU_RESULT_UNSUPPORTED;
 805 
 806         clock_req.clock_type = amd_pp_mem_clock;
 807         clock_req.clock_freq_in_khz = mhz * 1000;
 808 
 809         /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 810          * 1: fail
 811          */
 812         if (smu_display_clock_voltage_request(smu, &clock_req))
 813                 return PP_SMU_RESULT_FAIL;
 814 
 815         return PP_SMU_RESULT_OK;
 816 }
 817 
 818 enum pp_smu_status pp_nv_set_pstate_handshake_support(
 819         struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
 820 {
 821         const struct dc_context *ctx = pp->dm;
 822         struct amdgpu_device *adev = ctx->driver_context;
 823         struct smu_context *smu = &adev->smu;
 824 
 825         if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
 826                 return PP_SMU_RESULT_FAIL;
 827 
 828         return PP_SMU_RESULT_OK;
 829 }
 830 
 831 enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 832                 enum pp_smu_nv_clock_id clock_id, int mhz)
 833 {
 834         const struct dc_context *ctx = pp->dm;
 835         struct amdgpu_device *adev = ctx->driver_context;
 836         struct smu_context *smu = &adev->smu;
 837         struct pp_display_clock_request clock_req;
 838 
 839         if (!smu->funcs)
 840                 return PP_SMU_RESULT_UNSUPPORTED;
 841 
 842         switch (clock_id) {
 843         case PP_SMU_NV_DISPCLK:
 844                 clock_req.clock_type = amd_pp_disp_clock;
 845                 break;
 846         case PP_SMU_NV_PHYCLK:
 847                 clock_req.clock_type = amd_pp_phy_clock;
 848                 break;
 849         case PP_SMU_NV_PIXELCLK:
 850                 clock_req.clock_type = amd_pp_pixel_clock;
 851                 break;
 852         default:
 853                 break;
 854         }
 855         clock_req.clock_freq_in_khz = mhz * 1000;
 856 
 857         /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 858          * 1: fail
 859          */
 860         if (smu_display_clock_voltage_request(smu, &clock_req))
 861                 return PP_SMU_RESULT_FAIL;
 862 
 863         return PP_SMU_RESULT_OK;
 864 }
 865 
 866 enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 867                 struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
 868 {
 869         const struct dc_context *ctx = pp->dm;
 870         struct amdgpu_device *adev = ctx->driver_context;
 871         struct smu_context *smu = &adev->smu;
 872 
 873         if (!smu->funcs)
 874                 return PP_SMU_RESULT_UNSUPPORTED;
 875 
 876         if (!smu->funcs->get_max_sustainable_clocks_by_dc)
 877                 return PP_SMU_RESULT_UNSUPPORTED;
 878 
 879         if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
 880                 return PP_SMU_RESULT_OK;
 881 
 882         return PP_SMU_RESULT_FAIL;
 883 }
 884 
 885 enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 886                 unsigned int *clock_values_in_khz, unsigned int *num_states)
 887 {
 888         const struct dc_context *ctx = pp->dm;
 889         struct amdgpu_device *adev = ctx->driver_context;
 890         struct smu_context *smu = &adev->smu;
 891 
 892         if (!smu->ppt_funcs)
 893                 return PP_SMU_RESULT_UNSUPPORTED;
 894 
 895         if (!smu->ppt_funcs->get_uclk_dpm_states)
 896                 return PP_SMU_RESULT_UNSUPPORTED;
 897 
 898         if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
 899                         clock_values_in_khz, num_states))
 900                 return PP_SMU_RESULT_OK;
 901 
 902         return PP_SMU_RESULT_FAIL;
 903 }
 904 
 905 void dm_pp_get_funcs(
 906                 struct dc_context *ctx,
 907                 struct pp_smu_funcs *funcs)
 908 {
 909         switch (ctx->dce_version) {
 910         case DCN_VERSION_1_0:
 911         case DCN_VERSION_1_01:
 912                 funcs->ctx.ver = PP_SMU_VER_RV;
 913                 funcs->rv_funcs.pp_smu.dm = ctx;
 914                 funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
 915                 funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
 916                 funcs->rv_funcs.set_display_count =
 917                                 pp_rv_set_active_display_count;
 918                 funcs->rv_funcs.set_min_deep_sleep_dcfclk =
 919                                 pp_rv_set_min_deep_sleep_dcfclk;
 920                 funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
 921                                 pp_rv_set_hard_min_dcefclk_by_freq;
 922                 funcs->rv_funcs.set_hard_min_fclk_by_freq =
 923                                 pp_rv_set_hard_min_fclk_by_freq;
 924                 break;
 925 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
 926         case DCN_VERSION_2_0:
 927                 funcs->ctx.ver = PP_SMU_VER_NV;
 928                 funcs->nv_funcs.pp_smu.dm = ctx;
 929                 funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
 930                 funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
 931                                 pp_nv_set_hard_min_dcefclk_by_freq;
 932                 funcs->nv_funcs.set_min_deep_sleep_dcfclk =
 933                                 pp_nv_set_min_deep_sleep_dcfclk;
 934                 funcs->nv_funcs.set_voltage_by_freq =
 935                                 pp_nv_set_voltage_by_freq;
 936                 funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
 937 
 938                 /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
 939                 funcs->nv_funcs.set_pme_wa_enable = NULL;
 940                 /* todo debug waring message */
 941                 funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
 942                 /* todo  compare data with window driver*/
 943                 funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
 944                 /*todo  compare data with window driver */
 945                 funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
 946                 funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
 947                 break;
 948 #endif
 949         default:
 950                 DRM_ERROR("smu version is not supported !\n");
 951                 break;
 952         }
 953 }

/* [<][>][^][v][top][bottom][index][help] */