root/drivers/gpu/drm/msm/adreno/a6xx_gmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. a6xx_gmu_fault
  2. a6xx_gmu_irq
  3. a6xx_hfi_irq
  4. a6xx_gmu_sptprac_is_on
  5. a6xx_gmu_gx_is_on
  6. __a6xx_gmu_set_freq
  7. a6xx_gmu_set_freq
  8. a6xx_gmu_get_freq
  9. a6xx_gmu_check_idle_level
  10. a6xx_gmu_wait_for_idle
  11. a6xx_gmu_start
  12. a6xx_gmu_hfi_start
  13. a6xx_gmu_set_oob
  14. a6xx_gmu_clear_oob
  15. a6xx_sptprac_enable
  16. a6xx_sptprac_disable
  17. a6xx_gmu_gfx_rail_on
  18. a6xx_gmu_notify_slumber
  19. a6xx_rpmh_start
  20. a6xx_rpmh_stop
  21. pdc_write
  22. a6xx_gmu_rpmh_init
  23. a6xx_gmu_power_config
  24. a6xx_gmu_fw_start
  25. a6xx_gmu_irq_disable
  26. a6xx_gmu_rpmh_off
  27. a6xx_gmu_force_off
  28. a6xx_gmu_resume
  29. a6xx_gmu_isidle
  30. a6xx_gmu_shutdown
  31. a6xx_gmu_stop
  32. a6xx_gmu_memory_free
  33. a6xx_gmu_memory_alloc
  34. a6xx_gmu_memory_probe
  35. a6xx_gmu_get_arc_level
  36. a6xx_gmu_rpmh_arc_votes_init
  37. a6xx_gmu_rpmh_votes_init
  38. a6xx_gmu_build_freq_table
  39. a6xx_gmu_pwrlevels_probe
  40. a6xx_gmu_clocks_probe
  41. a6xx_gmu_get_mmio
  42. a6xx_gmu_get_irq
  43. a6xx_gmu_remove
  44. a6xx_gmu_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
   3 
   4 #include <linux/clk.h>
   5 #include <linux/interconnect.h>
   6 #include <linux/pm_domain.h>
   7 #include <linux/pm_opp.h>
   8 #include <soc/qcom/cmd-db.h>
   9 
  10 #include "a6xx_gpu.h"
  11 #include "a6xx_gmu.xml.h"
  12 
  13 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
  14 {
  15         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
  16         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
  17         struct msm_gpu *gpu = &adreno_gpu->base;
  18         struct drm_device *dev = gpu->dev;
  19         struct msm_drm_private *priv = dev->dev_private;
  20 
  21         /* FIXME: add a banner here */
  22         gmu->hung = true;
  23 
  24         /* Turn off the hangcheck timer while we are resetting */
  25         del_timer(&gpu->hangcheck_timer);
  26 
  27         /* Queue the GPU handler because we need to treat this as a recovery */
  28         queue_work(priv->wq, &gpu->recover_work);
  29 }
  30 
  31 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
  32 {
  33         struct a6xx_gmu *gmu = data;
  34         u32 status;
  35 
  36         status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
  37         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
  38 
  39         if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
  40                 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
  41 
  42                 a6xx_gmu_fault(gmu);
  43         }
  44 
  45         if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
  46                 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
  47 
  48         if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
  49                 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
  50                         gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
  51 
  52         return IRQ_HANDLED;
  53 }
  54 
  55 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
  56 {
  57         struct a6xx_gmu *gmu = data;
  58         u32 status;
  59 
  60         status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
  61         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
  62 
  63         if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
  64                 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
  65 
  66                 a6xx_gmu_fault(gmu);
  67         }
  68 
  69         return IRQ_HANDLED;
  70 }
  71 
  72 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
  73 {
  74         u32 val;
  75 
  76         /* This can be called from gpu state code so make sure GMU is valid */
  77         if (!gmu->initialized)
  78                 return false;
  79 
  80         val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
  81 
  82         return !(val &
  83                 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
  84                 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
  85 }
  86 
  87 /* Check to see if the GX rail is still powered */
  88 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
  89 {
  90         u32 val;
  91 
  92         /* This can be called from gpu state code so make sure GMU is valid */
  93         if (!gmu->initialized)
  94                 return false;
  95 
  96         val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
  97 
  98         return !(val &
  99                 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
 100                 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
 101 }
 102 
 103 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
 104 {
 105         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 106         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 107         struct msm_gpu *gpu = &adreno_gpu->base;
 108         int ret;
 109 
 110         gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
 111 
 112         gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
 113                 ((3 & 0xf) << 28) | index);
 114 
 115         /*
 116          * Send an invalid index as a vote for the bus bandwidth and let the
 117          * firmware decide on the right vote
 118          */
 119         gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
 120 
 121         /* Set and clear the OOB for DCVS to trigger the GMU */
 122         a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
 123         a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
 124 
 125         ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
 126         if (ret)
 127                 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
 128 
 129         gmu->freq = gmu->gpu_freqs[index];
 130 
 131         /*
 132          * Eventually we will want to scale the path vote with the frequency but
 133          * for now leave it at max so that the performance is nominal.
 134          */
 135         icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
 136 }
 137 
 138 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
 139 {
 140         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 141         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 142         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 143         u32 perf_index = 0;
 144 
 145         if (freq == gmu->freq)
 146                 return;
 147 
 148         for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
 149                 if (freq == gmu->gpu_freqs[perf_index])
 150                         break;
 151 
 152         __a6xx_gmu_set_freq(gmu, perf_index);
 153 }
 154 
 155 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
 156 {
 157         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 158         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 159         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 160 
 161         return  gmu->freq;
 162 }
 163 
 164 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
 165 {
 166         u32 val;
 167         int local = gmu->idle_level;
 168 
 169         /* SPTP and IFPC both report as IFPC */
 170         if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
 171                 local = GMU_IDLE_STATE_IFPC;
 172 
 173         val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
 174 
 175         if (val == local) {
 176                 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
 177                         !a6xx_gmu_gx_is_on(gmu))
 178                         return true;
 179         }
 180 
 181         return false;
 182 }
 183 
 184 /* Wait for the GMU to get to its most idle state */
 185 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
 186 {
 187         return spin_until(a6xx_gmu_check_idle_level(gmu));
 188 }
 189 
 190 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
 191 {
 192         int ret;
 193         u32 val;
 194 
 195         gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
 196         gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
 197 
 198         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
 199                 val == 0xbabeface, 100, 10000);
 200 
 201         if (ret)
 202                 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
 203 
 204         return ret;
 205 }
 206 
 207 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
 208 {
 209         u32 val;
 210         int ret;
 211 
 212         gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
 213 
 214         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
 215                 val & 1, 100, 10000);
 216         if (ret)
 217                 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
 218 
 219         return ret;
 220 }
 221 
 222 /* Trigger a OOB (out of band) request to the GMU */
 223 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 224 {
 225         int ret;
 226         u32 val;
 227         int request, ack;
 228         const char *name;
 229 
 230         switch (state) {
 231         case GMU_OOB_GPU_SET:
 232                 request = GMU_OOB_GPU_SET_REQUEST;
 233                 ack = GMU_OOB_GPU_SET_ACK;
 234                 name = "GPU_SET";
 235                 break;
 236         case GMU_OOB_BOOT_SLUMBER:
 237                 request = GMU_OOB_BOOT_SLUMBER_REQUEST;
 238                 ack = GMU_OOB_BOOT_SLUMBER_ACK;
 239                 name = "BOOT_SLUMBER";
 240                 break;
 241         case GMU_OOB_DCVS_SET:
 242                 request = GMU_OOB_DCVS_REQUEST;
 243                 ack = GMU_OOB_DCVS_ACK;
 244                 name = "GPU_DCVS";
 245                 break;
 246         default:
 247                 return -EINVAL;
 248         }
 249 
 250         /* Trigger the equested OOB operation */
 251         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
 252 
 253         /* Wait for the acknowledge interrupt */
 254         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
 255                 val & (1 << ack), 100, 10000);
 256 
 257         if (ret)
 258                 DRM_DEV_ERROR(gmu->dev,
 259                         "Timeout waiting for GMU OOB set %s: 0x%x\n",
 260                                 name,
 261                                 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
 262 
 263         /* Clear the acknowledge interrupt */
 264         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
 265 
 266         return ret;
 267 }
 268 
 269 /* Clear a pending OOB state in the GMU */
 270 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 271 {
 272         switch (state) {
 273         case GMU_OOB_GPU_SET:
 274                 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
 275                         1 << GMU_OOB_GPU_SET_CLEAR);
 276                 break;
 277         case GMU_OOB_BOOT_SLUMBER:
 278                 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
 279                         1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
 280                 break;
 281         case GMU_OOB_DCVS_SET:
 282                 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
 283                         1 << GMU_OOB_DCVS_CLEAR);
 284                 break;
 285         }
 286 }
 287 
 288 /* Enable CPU control of SPTP power power collapse */
 289 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
 290 {
 291         int ret;
 292         u32 val;
 293 
 294         gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
 295 
 296         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
 297                 (val & 0x38) == 0x28, 1, 100);
 298 
 299         if (ret) {
 300                 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
 301                         gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 302         }
 303 
 304         return 0;
 305 }
 306 
 307 /* Disable CPU control of SPTP power power collapse */
 308 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
 309 {
 310         u32 val;
 311         int ret;
 312 
 313         /* Make sure retention is on */
 314         gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
 315 
 316         gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
 317 
 318         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
 319                 (val & 0x04), 100, 10000);
 320 
 321         if (ret)
 322                 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
 323                         gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 324 }
 325 
 326 /* Let the GMU know we are starting a boot sequence */
 327 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
 328 {
 329         u32 vote;
 330 
 331         /* Let the GMU know we are getting ready for boot */
 332         gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
 333 
 334         /* Choose the "default" power level as the highest available */
 335         vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
 336 
 337         gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
 338         gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
 339 
 340         /* Let the GMU know the boot sequence has started */
 341         return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
 342 }
 343 
 344 /* Let the GMU know that we are about to go into slumber */
 345 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
 346 {
 347         int ret;
 348 
 349         /* Disable the power counter so the GMU isn't busy */
 350         gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
 351 
 352         /* Disable SPTP_PC if the CPU is responsible for it */
 353         if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
 354                 a6xx_sptprac_disable(gmu);
 355 
 356         /* Tell the GMU to get ready to slumber */
 357         gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
 358 
 359         ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
 360         a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
 361 
 362         if (!ret) {
 363                 /* Check to see if the GMU really did slumber */
 364                 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
 365                         != 0x0f) {
 366                         DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
 367                         ret = -ETIMEDOUT;
 368                 }
 369         }
 370 
 371         /* Put fence into allow mode */
 372         gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
 373         return ret;
 374 }
 375 
 376 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
 377 {
 378         int ret;
 379         u32 val;
 380 
 381         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
 382         /* Wait for the register to finish posting */
 383         wmb();
 384 
 385         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
 386                 val & (1 << 1), 100, 10000);
 387         if (ret) {
 388                 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
 389                 return ret;
 390         }
 391 
 392         ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
 393                 !val, 100, 10000);
 394 
 395         if (ret) {
 396                 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
 397                 return ret;
 398         }
 399 
 400         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
 401 
 402         /* Set up CX GMU counter 0 to count busy ticks */
 403         gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
 404         gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
 405 
 406         /* Enable the power counter */
 407         gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
 408         return 0;
 409 }
 410 
 411 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
 412 {
 413         int ret;
 414         u32 val;
 415 
 416         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
 417 
 418         ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
 419                 val, val & (1 << 16), 100, 10000);
 420         if (ret)
 421                 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
 422 
 423         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
 424 }
 425 
 426 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
 427 {
 428         return msm_writel(value, ptr + (offset << 2));
 429 }
 430 
 431 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
 432                 const char *name);
 433 
 434 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
 435 {
 436         struct platform_device *pdev = to_platform_device(gmu->dev);
 437         void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
 438         void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
 439 
 440         if (!pdcptr || !seqptr)
 441                 goto err;
 442 
 443         /* Disable SDE clock gating */
 444         gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
 445 
 446         /* Setup RSC PDC handshake for sleep and wakeup */
 447         gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
 448         gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
 449         gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
 450         gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
 451         gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
 452         gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
 453         gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
 454         gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
 455         gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
 456         gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
 457         gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
 458 
 459         /* Load RSC sequencer uCode for sleep and wakeup */
 460         gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
 461         gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
 462         gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
 463         gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
 464         gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
 465 
 466         /* Load PDC sequencer uCode for power up and power down sequence */
 467         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
 468         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
 469         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
 470         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
 471         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
 472 
 473         /* Set TCS commands used by PDC sequence for low power modes */
 474         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
 475         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
 476         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
 477         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
 478         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
 479         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
 480         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
 481         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
 482         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
 483         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
 484         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
 485         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
 486         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
 487         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
 488         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
 489         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
 490         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
 491         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
 492         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
 493         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
 494         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
 495         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
 496         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
 497         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
 498 
 499         /* Setup GPU PDC */
 500         pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
 501         pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
 502 
 503         /* ensure no writes happen before the uCode is fully written */
 504         wmb();
 505 
 506 err:
 507         if (!IS_ERR_OR_NULL(pdcptr))
 508                 iounmap(pdcptr);
 509         if (!IS_ERR_OR_NULL(seqptr))
 510                 iounmap(seqptr);
 511 }
 512 
 513 /*
 514  * The lowest 16 bits of this value are the number of XO clock cycles for main
 515  * hysteresis which is set at 0x1680 cycles (300 us).  The higher 16 bits are
 516  * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
 517  */
 518 
 519 #define GMU_PWR_COL_HYST 0x000a1680
 520 
 521 /* Set up the idle state for the GMU */
 522 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
 523 {
 524         /* Disable GMU WB/RB buffer */
 525         gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
 526 
 527         gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
 528 
 529         switch (gmu->idle_level) {
 530         case GMU_IDLE_STATE_IFPC:
 531                 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
 532                         GMU_PWR_COL_HYST);
 533                 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 534                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
 535                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
 536                 /* Fall through */
 537         case GMU_IDLE_STATE_SPTP:
 538                 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
 539                         GMU_PWR_COL_HYST);
 540                 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 541                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
 542                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
 543         }
 544 
 545         /* Enable RPMh GPU client */
 546         gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
 547                 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
 548                 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
 549                 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
 550                 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
 551                 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
 552                 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
 553 }
 554 
 555 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
 556 {
 557         static bool rpmh_init;
 558         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 559         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 560         int i, ret;
 561         u32 chipid;
 562         u32 *image;
 563 
 564         if (state == GMU_WARM_BOOT) {
 565                 ret = a6xx_rpmh_start(gmu);
 566                 if (ret)
 567                         return ret;
 568         } else {
 569                 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
 570                         "GMU firmware is not loaded\n"))
 571                         return -ENOENT;
 572 
 573                 /* Sanity check the size of the firmware that was loaded */
 574                 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
 575                         DRM_DEV_ERROR(gmu->dev,
 576                                 "GMU firmware is bigger than the available region\n");
 577                         return -EINVAL;
 578                 }
 579 
 580                 /* Turn on register retention */
 581                 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
 582 
 583                 /* We only need to load the RPMh microcode once */
 584                 if (!rpmh_init) {
 585                         a6xx_gmu_rpmh_init(gmu);
 586                         rpmh_init = true;
 587                 } else {
 588                         ret = a6xx_rpmh_start(gmu);
 589                         if (ret)
 590                                 return ret;
 591                 }
 592 
 593                 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
 594 
 595                 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
 596                         gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
 597                                 image[i]);
 598         }
 599 
 600         gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
 601         gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
 602 
 603         /* Write the iova of the HFI table */
 604         gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
 605         gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
 606 
 607         gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
 608                 (1 << 31) | (0xa << 18) | (0xa0));
 609 
 610         chipid = adreno_gpu->rev.core << 24;
 611         chipid |= adreno_gpu->rev.major << 16;
 612         chipid |= adreno_gpu->rev.minor << 12;
 613         chipid |= adreno_gpu->rev.patchid << 8;
 614 
 615         gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
 616 
 617         /* Set up the lowest idle level on the GMU */
 618         a6xx_gmu_power_config(gmu);
 619 
 620         ret = a6xx_gmu_start(gmu);
 621         if (ret)
 622                 return ret;
 623 
 624         ret = a6xx_gmu_gfx_rail_on(gmu);
 625         if (ret)
 626                 return ret;
 627 
 628         /* Enable SPTP_PC if the CPU is responsible for it */
 629         if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
 630                 ret = a6xx_sptprac_enable(gmu);
 631                 if (ret)
 632                         return ret;
 633         }
 634 
 635         ret = a6xx_gmu_hfi_start(gmu);
 636         if (ret)
 637                 return ret;
 638 
 639         /* FIXME: Do we need this wmb() here? */
 640         wmb();
 641 
 642         return 0;
 643 }
 644 
 645 #define A6XX_HFI_IRQ_MASK \
 646         (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
 647 
 648 #define A6XX_GMU_IRQ_MASK \
 649         (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
 650          A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
 651          A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
 652 
 653 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
 654 {
 655         disable_irq(gmu->gmu_irq);
 656         disable_irq(gmu->hfi_irq);
 657 
 658         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
 659         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
 660 }
 661 
 662 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
 663 {
 664         u32 val;
 665 
 666         /* Make sure there are no outstanding RPMh votes */
 667         gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
 668                 (val & 1), 100, 10000);
 669         gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
 670                 (val & 1), 100, 10000);
 671         gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
 672                 (val & 1), 100, 10000);
 673         gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
 674                 (val & 1), 100, 1000);
 675 }
 676 
 677 /* Force the GMU off in case it isn't responsive */
 678 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
 679 {
 680         /* Flush all the queues */
 681         a6xx_hfi_stop(gmu);
 682 
 683         /* Stop the interrupts */
 684         a6xx_gmu_irq_disable(gmu);
 685 
 686         /* Force off SPTP in case the GMU is managing it */
 687         a6xx_sptprac_disable(gmu);
 688 
 689         /* Make sure there are no outstanding RPMh votes */
 690         a6xx_gmu_rpmh_off(gmu);
 691 }
 692 
 693 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
 694 {
 695         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 696         struct msm_gpu *gpu = &adreno_gpu->base;
 697         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 698         int status, ret;
 699 
 700         if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
 701                 return 0;
 702 
 703         gmu->hung = false;
 704 
 705         /* Turn on the resources */
 706         pm_runtime_get_sync(gmu->dev);
 707 
 708         /* Use a known rate to bring up the GMU */
 709         clk_set_rate(gmu->core_clk, 200000000);
 710         ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
 711         if (ret) {
 712                 pm_runtime_put(gmu->dev);
 713                 return ret;
 714         }
 715 
 716         /* Set the bus quota to a reasonable value for boot */
 717         icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
 718 
 719         /* Enable the GMU interrupt */
 720         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
 721         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
 722         enable_irq(gmu->gmu_irq);
 723 
 724         /* Check to see if we are doing a cold or warm boot */
 725         status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
 726                 GMU_WARM_BOOT : GMU_COLD_BOOT;
 727 
 728         ret = a6xx_gmu_fw_start(gmu, status);
 729         if (ret)
 730                 goto out;
 731 
 732         ret = a6xx_hfi_start(gmu, status);
 733         if (ret)
 734                 goto out;
 735 
 736         /*
 737          * Turn on the GMU firmware fault interrupt after we know the boot
 738          * sequence is successful
 739          */
 740         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
 741         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
 742         enable_irq(gmu->hfi_irq);
 743 
 744         /* Set the GPU to the highest power frequency */
 745         __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
 746 
 747         /*
 748          * "enable" the GX power domain which won't actually do anything but it
 749          * will make sure that the refcounting is correct in case we need to
 750          * bring down the GX after a GMU failure
 751          */
 752         if (!IS_ERR_OR_NULL(gmu->gxpd))
 753                 pm_runtime_get(gmu->gxpd);
 754 
 755 out:
 756         /* On failure, shut down the GMU to leave it in a good state */
 757         if (ret) {
 758                 disable_irq(gmu->gmu_irq);
 759                 a6xx_rpmh_stop(gmu);
 760                 pm_runtime_put(gmu->dev);
 761         }
 762 
 763         return ret;
 764 }
 765 
 766 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
 767 {
 768         u32 reg;
 769 
 770         if (!gmu->initialized)
 771                 return true;
 772 
 773         reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
 774 
 775         if (reg &  A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
 776                 return false;
 777 
 778         return true;
 779 }
 780 
 781 /* Gracefully try to shut down the GMU and by extension the GPU */
 782 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
 783 {
 784         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 785         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 786         struct msm_gpu *gpu = &adreno_gpu->base;
 787         u32 val;
 788 
 789         /*
 790          * The GMU may still be in slumber unless the GPU started so check and
 791          * skip putting it back into slumber if so
 792          */
 793         val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
 794 
 795         if (val != 0xf) {
 796                 int ret = a6xx_gmu_wait_for_idle(gmu);
 797 
 798                 /* If the GMU isn't responding assume it is hung */
 799                 if (ret) {
 800                         a6xx_gmu_force_off(gmu);
 801                         return;
 802                 }
 803 
 804                 /* Clear the VBIF pipe before shutting down */
 805                 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
 806                 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
 807                         == 0xf);
 808                 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
 809 
 810                 /* tell the GMU we want to slumber */
 811                 a6xx_gmu_notify_slumber(gmu);
 812 
 813                 ret = gmu_poll_timeout(gmu,
 814                         REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
 815                         !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
 816                         100, 10000);
 817 
 818                 /*
 819                  * Let the user know we failed to slumber but don't worry too
 820                  * much because we are powering down anyway
 821                  */
 822 
 823                 if (ret)
 824                         DRM_DEV_ERROR(gmu->dev,
 825                                 "Unable to slumber GMU: status = 0%x/0%x\n",
 826                                 gmu_read(gmu,
 827                                         REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
 828                                 gmu_read(gmu,
 829                                         REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
 830         }
 831 
 832         /* Turn off HFI */
 833         a6xx_hfi_stop(gmu);
 834 
 835         /* Stop the interrupts and mask the hardware */
 836         a6xx_gmu_irq_disable(gmu);
 837 
 838         /* Tell RPMh to power off the GPU */
 839         a6xx_rpmh_stop(gmu);
 840 }
 841 
 842 
 843 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
 844 {
 845         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
 846         struct msm_gpu *gpu = &a6xx_gpu->base.base;
 847 
 848         if (!pm_runtime_active(gmu->dev))
 849                 return 0;
 850 
 851         /*
 852          * Force the GMU off if we detected a hang, otherwise try to shut it
 853          * down gracefully
 854          */
 855         if (gmu->hung)
 856                 a6xx_gmu_force_off(gmu);
 857         else
 858                 a6xx_gmu_shutdown(gmu);
 859 
 860         /* Remove the bus vote */
 861         icc_set_bw(gpu->icc_path, 0, 0);
 862 
 863         /*
 864          * Make sure the GX domain is off before turning off the GMU (CX)
 865          * domain. Usually the GMU does this but only if the shutdown sequence
 866          * was successful
 867          */
 868         if (!IS_ERR_OR_NULL(gmu->gxpd))
 869                 pm_runtime_put_sync(gmu->gxpd);
 870 
 871         clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
 872 
 873         pm_runtime_put_sync(gmu->dev);
 874 
 875         return 0;
 876 }
 877 
 878 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
 879 {
 880         int count, i;
 881         u64 iova;
 882 
 883         if (IS_ERR_OR_NULL(bo))
 884                 return;
 885 
 886         count = bo->size >> PAGE_SHIFT;
 887         iova = bo->iova;
 888 
 889         for (i = 0; i < count; i++, iova += PAGE_SIZE) {
 890                 iommu_unmap(gmu->domain, iova, PAGE_SIZE);
 891                 __free_pages(bo->pages[i], 0);
 892         }
 893 
 894         kfree(bo->pages);
 895         kfree(bo);
 896 }
 897 
 898 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 899                 size_t size)
 900 {
 901         struct a6xx_gmu_bo *bo;
 902         int ret, count, i;
 903 
 904         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 905         if (!bo)
 906                 return ERR_PTR(-ENOMEM);
 907 
 908         bo->size = PAGE_ALIGN(size);
 909 
 910         count = bo->size >> PAGE_SHIFT;
 911 
 912         bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
 913         if (!bo->pages) {
 914                 kfree(bo);
 915                 return ERR_PTR(-ENOMEM);
 916         }
 917 
 918         for (i = 0; i < count; i++) {
 919                 bo->pages[i] = alloc_page(GFP_KERNEL);
 920                 if (!bo->pages[i])
 921                         goto err;
 922         }
 923 
 924         bo->iova = gmu->uncached_iova_base;
 925 
 926         for (i = 0; i < count; i++) {
 927                 ret = iommu_map(gmu->domain,
 928                         bo->iova + (PAGE_SIZE * i),
 929                         page_to_phys(bo->pages[i]), PAGE_SIZE,
 930                         IOMMU_READ | IOMMU_WRITE);
 931 
 932                 if (ret) {
 933                         DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
 934 
 935                         for (i = i - 1 ; i >= 0; i--)
 936                                 iommu_unmap(gmu->domain,
 937                                         bo->iova + (PAGE_SIZE * i),
 938                                         PAGE_SIZE);
 939 
 940                         goto err;
 941                 }
 942         }
 943 
 944         bo->virt = vmap(bo->pages, count, VM_IOREMAP,
 945                 pgprot_writecombine(PAGE_KERNEL));
 946         if (!bo->virt)
 947                 goto err;
 948 
 949         /* Align future IOVA addresses on 1MB boundaries */
 950         gmu->uncached_iova_base += ALIGN(size, SZ_1M);
 951 
 952         return bo;
 953 
 954 err:
 955         for (i = 0; i < count; i++) {
 956                 if (bo->pages[i])
 957                         __free_pages(bo->pages[i], 0);
 958         }
 959 
 960         kfree(bo->pages);
 961         kfree(bo);
 962 
 963         return ERR_PTR(-ENOMEM);
 964 }
 965 
 966 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
 967 {
 968         int ret;
 969 
 970         /*
 971          * The GMU address space is hardcoded to treat the range
 972          * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
 973          * between the GMU and the CPU will live in this space
 974          */
 975         gmu->uncached_iova_base = 0x60000000;
 976 
 977 
 978         gmu->domain = iommu_domain_alloc(&platform_bus_type);
 979         if (!gmu->domain)
 980                 return -ENODEV;
 981 
 982         ret = iommu_attach_device(gmu->domain, gmu->dev);
 983 
 984         if (ret) {
 985                 iommu_domain_free(gmu->domain);
 986                 gmu->domain = NULL;
 987         }
 988 
 989         return ret;
 990 }
 991 
 992 /* Return the 'arc-level' for the given frequency */
 993 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
 994                                            unsigned long freq)
 995 {
 996         struct dev_pm_opp *opp;
 997         unsigned int val;
 998 
 999         if (!freq)
1000                 return 0;
1001 
1002         opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1003         if (IS_ERR(opp))
1004                 return 0;
1005 
1006         val = dev_pm_opp_get_level(opp);
1007 
1008         dev_pm_opp_put(opp);
1009 
1010         return val;
1011 }
1012 
1013 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1014                 unsigned long *freqs, int freqs_count, const char *id)
1015 {
1016         int i, j;
1017         const u16 *pri, *sec;
1018         size_t pri_count, sec_count;
1019 
1020         pri = cmd_db_read_aux_data(id, &pri_count);
1021         if (IS_ERR(pri))
1022                 return PTR_ERR(pri);
1023         /*
1024          * The data comes back as an array of unsigned shorts so adjust the
1025          * count accordingly
1026          */
1027         pri_count >>= 1;
1028         if (!pri_count)
1029                 return -EINVAL;
1030 
1031         sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1032         if (IS_ERR(sec))
1033                 return PTR_ERR(sec);
1034 
1035         sec_count >>= 1;
1036         if (!sec_count)
1037                 return -EINVAL;
1038 
1039         /* Construct a vote for each frequency */
1040         for (i = 0; i < freqs_count; i++) {
1041                 u8 pindex = 0, sindex = 0;
1042                 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1043 
1044                 /* Get the primary index that matches the arc level */
1045                 for (j = 0; j < pri_count; j++) {
1046                         if (pri[j] >= level) {
1047                                 pindex = j;
1048                                 break;
1049                         }
1050                 }
1051 
1052                 if (j == pri_count) {
1053                         DRM_DEV_ERROR(dev,
1054                                 "Level %u not found in in the RPMh list\n",
1055                                         level);
1056                         DRM_DEV_ERROR(dev, "Available levels:\n");
1057                         for (j = 0; j < pri_count; j++)
1058                                 DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
1059 
1060                         return -EINVAL;
1061                 }
1062 
1063                 /*
1064                  * Look for a level in in the secondary list that matches. If
1065                  * nothing fits, use the maximum non zero vote
1066                  */
1067 
1068                 for (j = 0; j < sec_count; j++) {
1069                         if (sec[j] >= level) {
1070                                 sindex = j;
1071                                 break;
1072                         } else if (sec[j]) {
1073                                 sindex = j;
1074                         }
1075                 }
1076 
1077                 /* Construct the vote */
1078                 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1079                         (sindex << 8) | pindex;
1080         }
1081 
1082         return 0;
1083 }
1084 
1085 /*
1086  * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1087  * to construct the list of votes on the CPU and send it over. Query the RPMh
1088  * voltage levels and build the votes
1089  */
1090 
1091 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1092 {
1093         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1094         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1095         struct msm_gpu *gpu = &adreno_gpu->base;
1096         int ret;
1097 
1098         /* Build the GX votes */
1099         ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1100                 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1101 
1102         /* Build the CX votes */
1103         ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1104                 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1105 
1106         return ret;
1107 }
1108 
1109 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1110                 u32 size)
1111 {
1112         int count = dev_pm_opp_get_opp_count(dev);
1113         struct dev_pm_opp *opp;
1114         int i, index = 0;
1115         unsigned long freq = 1;
1116 
1117         /*
1118          * The OPP table doesn't contain the "off" frequency level so we need to
1119          * add 1 to the table size to account for it
1120          */
1121 
1122         if (WARN(count + 1 > size,
1123                 "The GMU frequency table is being truncated\n"))
1124                 count = size - 1;
1125 
1126         /* Set the "off" frequency */
1127         freqs[index++] = 0;
1128 
1129         for (i = 0; i < count; i++) {
1130                 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1131                 if (IS_ERR(opp))
1132                         break;
1133 
1134                 dev_pm_opp_put(opp);
1135                 freqs[index++] = freq++;
1136         }
1137 
1138         return index;
1139 }
1140 
1141 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1142 {
1143         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1144         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1145         struct msm_gpu *gpu = &adreno_gpu->base;
1146 
1147         int ret = 0;
1148 
1149         /*
1150          * The GMU handles its own frequency switching so build a list of
1151          * available frequencies to send during initialization
1152          */
1153         ret = dev_pm_opp_of_add_table(gmu->dev);
1154         if (ret) {
1155                 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1156                 return ret;
1157         }
1158 
1159         gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1160                 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1161 
1162         /*
1163          * The GMU also handles GPU frequency switching so build a list
1164          * from the GPU OPP table
1165          */
1166         gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1167                 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1168 
1169         /* Build the list of RPMh votes that we'll send to the GMU */
1170         return a6xx_gmu_rpmh_votes_init(gmu);
1171 }
1172 
1173 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1174 {
1175         int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1176 
1177         if (ret < 1)
1178                 return ret;
1179 
1180         gmu->nr_clocks = ret;
1181 
1182         gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1183                 gmu->nr_clocks, "gmu");
1184 
1185         return 0;
1186 }
1187 
1188 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1189                 const char *name)
1190 {
1191         void __iomem *ret;
1192         struct resource *res = platform_get_resource_byname(pdev,
1193                         IORESOURCE_MEM, name);
1194 
1195         if (!res) {
1196                 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1197                 return ERR_PTR(-EINVAL);
1198         }
1199 
1200         ret = ioremap(res->start, resource_size(res));
1201         if (!ret) {
1202                 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1203                 return ERR_PTR(-EINVAL);
1204         }
1205 
1206         return ret;
1207 }
1208 
1209 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1210                 const char *name, irq_handler_t handler)
1211 {
1212         int irq, ret;
1213 
1214         irq = platform_get_irq_byname(pdev, name);
1215 
1216         ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1217         if (ret) {
1218                 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1219                               name, ret);
1220                 return ret;
1221         }
1222 
1223         disable_irq(irq);
1224 
1225         return irq;
1226 }
1227 
1228 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1229 {
1230         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1231 
1232         if (!gmu->initialized)
1233                 return;
1234 
1235         pm_runtime_force_suspend(gmu->dev);
1236 
1237         if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1238                 pm_runtime_disable(gmu->gxpd);
1239                 dev_pm_domain_detach(gmu->gxpd, false);
1240         }
1241 
1242         iounmap(gmu->mmio);
1243         gmu->mmio = NULL;
1244 
1245         a6xx_gmu_memory_free(gmu, gmu->hfi);
1246 
1247         iommu_detach_device(gmu->domain, gmu->dev);
1248 
1249         iommu_domain_free(gmu->domain);
1250 
1251         free_irq(gmu->gmu_irq, gmu);
1252         free_irq(gmu->hfi_irq, gmu);
1253 
1254         /* Drop reference taken in of_find_device_by_node */
1255         put_device(gmu->dev);
1256 
1257         gmu->initialized = false;
1258 }
1259 
1260 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1261 {
1262         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1263         struct platform_device *pdev = of_find_device_by_node(node);
1264         int ret;
1265 
1266         if (!pdev)
1267                 return -ENODEV;
1268 
1269         gmu->dev = &pdev->dev;
1270 
1271         of_dma_configure(gmu->dev, node, true);
1272 
1273         /* Fow now, don't do anything fancy until we get our feet under us */
1274         gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1275 
1276         pm_runtime_enable(gmu->dev);
1277 
1278         /* Get the list of clocks */
1279         ret = a6xx_gmu_clocks_probe(gmu);
1280         if (ret)
1281                 goto err_put_device;
1282 
1283         /* Set up the IOMMU context bank */
1284         ret = a6xx_gmu_memory_probe(gmu);
1285         if (ret)
1286                 goto err_put_device;
1287 
1288         /* Allocate memory for for the HFI queues */
1289         gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1290         if (IS_ERR(gmu->hfi))
1291                 goto err_memory;
1292 
1293         /* Allocate memory for the GMU debug region */
1294         gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1295         if (IS_ERR(gmu->debug))
1296                 goto err_memory;
1297 
1298         /* Map the GMU registers */
1299         gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1300         if (IS_ERR(gmu->mmio))
1301                 goto err_memory;
1302 
1303         /* Get the HFI and GMU interrupts */
1304         gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1305         gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1306 
1307         if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1308                 goto err_mmio;
1309 
1310         /*
1311          * Get a link to the GX power domain to reset the GPU in case of GMU
1312          * crash
1313          */
1314         gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1315 
1316         /* Get the power levels for the GMU and GPU */
1317         a6xx_gmu_pwrlevels_probe(gmu);
1318 
1319         /* Set up the HFI queues */
1320         a6xx_hfi_init(gmu);
1321 
1322         gmu->initialized = true;
1323 
1324         return 0;
1325 
1326 err_mmio:
1327         iounmap(gmu->mmio);
1328         free_irq(gmu->gmu_irq, gmu);
1329         free_irq(gmu->hfi_irq, gmu);
1330 err_memory:
1331         a6xx_gmu_memory_free(gmu, gmu->hfi);
1332 
1333         if (gmu->domain) {
1334                 iommu_detach_device(gmu->domain, gmu->dev);
1335 
1336                 iommu_domain_free(gmu->domain);
1337         }
1338         ret = -ENODEV;
1339 
1340 err_put_device:
1341         /* Drop reference taken in of_find_device_by_node */
1342         put_device(gmu->dev);
1343 
1344         return ret;
1345 }

/* [<][>][^][v][top][bottom][index][help] */