root/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vce_v2_0_ring_get_rptr
  2. vce_v2_0_ring_get_wptr
  3. vce_v2_0_ring_set_wptr
  4. vce_v2_0_lmi_clean
  5. vce_v2_0_firmware_loaded
  6. vce_v2_0_disable_cg
  7. vce_v2_0_init_cg
  8. vce_v2_0_mc_resume
  9. vce_v2_0_is_idle
  10. vce_v2_0_wait_for_idle
  11. vce_v2_0_start
  12. vce_v2_0_stop
  13. vce_v2_0_set_sw_cg
  14. vce_v2_0_set_dyn_cg
  15. vce_v2_0_enable_mgcg
  16. vce_v2_0_early_init
  17. vce_v2_0_sw_init
  18. vce_v2_0_sw_fini
  19. vce_v2_0_hw_init
  20. vce_v2_0_hw_fini
  21. vce_v2_0_suspend
  22. vce_v2_0_resume
  23. vce_v2_0_soft_reset
  24. vce_v2_0_set_interrupt_state
  25. vce_v2_0_process_interrupt
  26. vce_v2_0_set_clockgating_state
  27. vce_v2_0_set_powergating_state
  28. vce_v2_0_set_ring_funcs
  29. vce_v2_0_set_irq_funcs

   1 /*
   2  * Copyright 2013 Advanced Micro Devices, Inc.
   3  * All Rights Reserved.
   4  *
   5  * Permission is hereby granted, free of charge, to any person obtaining a
   6  * copy of this software and associated documentation files (the
   7  * "Software"), to deal in the Software without restriction, including
   8  * without limitation the rights to use, copy, modify, merge, publish,
   9  * distribute, sub license, and/or sell copies of the Software, and to
  10  * permit persons to whom the Software is furnished to do so, subject to
  11  * the following conditions:
  12  *
  13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20  *
  21  * The above copyright notice and this permission notice (including the
  22  * next paragraph) shall be included in all copies or substantial portions
  23  * of the Software.
  24  *
  25  * Authors: Christian König <christian.koenig@amd.com>
  26  */
  27 
  28 #include <linux/firmware.h>
  29 
  30 #include "amdgpu.h"
  31 #include "amdgpu_vce.h"
  32 #include "cikd.h"
  33 #include "vce/vce_2_0_d.h"
  34 #include "vce/vce_2_0_sh_mask.h"
  35 #include "smu/smu_7_0_1_d.h"
  36 #include "smu/smu_7_0_1_sh_mask.h"
  37 #include "oss/oss_2_0_d.h"
  38 #include "oss/oss_2_0_sh_mask.h"
  39 
  40 #define VCE_V2_0_FW_SIZE        (256 * 1024)
  41 #define VCE_V2_0_STACK_SIZE     (64 * 1024)
  42 #define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
  43 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
  44 
  45 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
  46 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
  47 
  48 /**
  49  * vce_v2_0_ring_get_rptr - get read pointer
  50  *
  51  * @ring: amdgpu_ring pointer
  52  *
  53  * Returns the current hardware read pointer
  54  */
  55 static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
  56 {
  57         struct amdgpu_device *adev = ring->adev;
  58 
  59         if (ring->me == 0)
  60                 return RREG32(mmVCE_RB_RPTR);
  61         else
  62                 return RREG32(mmVCE_RB_RPTR2);
  63 }
  64 
  65 /**
  66  * vce_v2_0_ring_get_wptr - get write pointer
  67  *
  68  * @ring: amdgpu_ring pointer
  69  *
  70  * Returns the current hardware write pointer
  71  */
  72 static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
  73 {
  74         struct amdgpu_device *adev = ring->adev;
  75 
  76         if (ring->me == 0)
  77                 return RREG32(mmVCE_RB_WPTR);
  78         else
  79                 return RREG32(mmVCE_RB_WPTR2);
  80 }
  81 
  82 /**
  83  * vce_v2_0_ring_set_wptr - set write pointer
  84  *
  85  * @ring: amdgpu_ring pointer
  86  *
  87  * Commits the write pointer to the hardware
  88  */
  89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
  90 {
  91         struct amdgpu_device *adev = ring->adev;
  92 
  93         if (ring->me == 0)
  94                 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
  95         else
  96                 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
  97 }
  98 
  99 static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
 100 {
 101         int i, j;
 102 
 103         for (i = 0; i < 10; ++i) {
 104                 for (j = 0; j < 100; ++j) {
 105                         uint32_t status = RREG32(mmVCE_LMI_STATUS);
 106 
 107                         if (status & 0x337f)
 108                                 return 0;
 109                         mdelay(10);
 110                 }
 111         }
 112 
 113         return -ETIMEDOUT;
 114 }
 115 
 116 static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
 117 {
 118         int i, j;
 119 
 120         for (i = 0; i < 10; ++i) {
 121                 for (j = 0; j < 100; ++j) {
 122                         uint32_t status = RREG32(mmVCE_STATUS);
 123 
 124                         if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
 125                                 return 0;
 126                         mdelay(10);
 127                 }
 128 
 129                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
 130                 WREG32_P(mmVCE_SOFT_RESET,
 131                         VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
 132                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 133                 mdelay(10);
 134                 WREG32_P(mmVCE_SOFT_RESET, 0,
 135                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 136                 mdelay(10);
 137         }
 138 
 139         return -ETIMEDOUT;
 140 }
 141 
 142 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
 143 {
 144         WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
 145 }
 146 
 147 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
 148 {
 149         u32 tmp;
 150 
 151         tmp = RREG32(mmVCE_CLOCK_GATING_A);
 152         tmp &= ~0xfff;
 153         tmp |= ((0 << 0) | (4 << 4));
 154         tmp |= 0x40000;
 155         WREG32(mmVCE_CLOCK_GATING_A, tmp);
 156 
 157         tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 158         tmp &= ~0xfff;
 159         tmp |= ((0 << 0) | (4 << 4));
 160         WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 161 
 162         tmp = RREG32(mmVCE_CLOCK_GATING_B);
 163         tmp |= 0x10;
 164         tmp &= ~0x100000;
 165         WREG32(mmVCE_CLOCK_GATING_B, tmp);
 166 }
 167 
 168 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
 169 {
 170         uint32_t size, offset;
 171 
 172         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
 173         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
 174         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
 175         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
 176 
 177         WREG32(mmVCE_LMI_CTRL, 0x00398000);
 178         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
 179         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
 180         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
 181         WREG32(mmVCE_LMI_VM_CTRL, 0);
 182 
 183         WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
 184 
 185         offset = AMDGPU_VCE_FIRMWARE_OFFSET;
 186         size = VCE_V2_0_FW_SIZE;
 187         WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
 188         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
 189 
 190         offset += size;
 191         size = VCE_V2_0_STACK_SIZE;
 192         WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
 193         WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
 194 
 195         offset += size;
 196         size = VCE_V2_0_DATA_SIZE;
 197         WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
 198         WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 199 
 200         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
 201         WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 202 }
 203 
 204 static bool vce_v2_0_is_idle(void *handle)
 205 {
 206         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 207 
 208         return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
 209 }
 210 
 211 static int vce_v2_0_wait_for_idle(void *handle)
 212 {
 213         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 214         unsigned i;
 215 
 216         for (i = 0; i < adev->usec_timeout; i++) {
 217                 if (vce_v2_0_is_idle(handle))
 218                         return 0;
 219         }
 220         return -ETIMEDOUT;
 221 }
 222 
 223 /**
 224  * vce_v2_0_start - start VCE block
 225  *
 226  * @adev: amdgpu_device pointer
 227  *
 228  * Setup and start the VCE block
 229  */
 230 static int vce_v2_0_start(struct amdgpu_device *adev)
 231 {
 232         struct amdgpu_ring *ring;
 233         int r;
 234 
 235         /* set BUSY flag */
 236         WREG32_P(mmVCE_STATUS, 1, ~1);
 237 
 238         vce_v2_0_init_cg(adev);
 239         vce_v2_0_disable_cg(adev);
 240 
 241         vce_v2_0_mc_resume(adev);
 242 
 243         ring = &adev->vce.ring[0];
 244         WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
 245         WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
 246         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
 247         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 248         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
 249 
 250         ring = &adev->vce.ring[1];
 251         WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
 252         WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
 253         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
 254         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 255         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 256 
 257         WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 258         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 259         mdelay(100);
 260         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 261 
 262         r = vce_v2_0_firmware_loaded(adev);
 263 
 264         /* clear BUSY flag */
 265         WREG32_P(mmVCE_STATUS, 0, ~1);
 266 
 267         if (r) {
 268                 DRM_ERROR("VCE not responding, giving up!!!\n");
 269                 return r;
 270         }
 271 
 272         return 0;
 273 }
 274 
 275 static int vce_v2_0_stop(struct amdgpu_device *adev)
 276 {
 277         int i;
 278         int status;
 279 
 280         if (vce_v2_0_lmi_clean(adev)) {
 281                 DRM_INFO("vce is not idle \n");
 282                 return 0;
 283         }
 284 
 285         if (vce_v2_0_wait_for_idle(adev)) {
 286                 DRM_INFO("VCE is busy, Can't set clock gating");
 287                 return 0;
 288         }
 289 
 290         /* Stall UMC and register bus before resetting VCPU */
 291         WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
 292 
 293         for (i = 0; i < 100; ++i) {
 294                 status = RREG32(mmVCE_LMI_STATUS);
 295                 if (status & 0x240)
 296                         break;
 297                 mdelay(1);
 298         }
 299 
 300         WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
 301 
 302         /* put LMI, VCPU, RBC etc... into reset */
 303         WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
 304 
 305         WREG32(mmVCE_STATUS, 0);
 306 
 307         return 0;
 308 }
 309 
 310 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
 311 {
 312         u32 tmp;
 313 
 314         if (gated) {
 315                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
 316                 tmp |= 0xe70000;
 317                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
 318 
 319                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 320                 tmp |= 0xff000000;
 321                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 322 
 323                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 324                 tmp &= ~0x3fc;
 325                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 326 
 327                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 328         } else {
 329                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
 330                 tmp |= 0xe7;
 331                 tmp &= ~0xe70000;
 332                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
 333 
 334                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 335                 tmp |= 0x1fe000;
 336                 tmp &= ~0xff000000;
 337                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 338 
 339                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 340                 tmp |= 0x3fc;
 341                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 342         }
 343 }
 344 
 345 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 346 {
 347         u32 orig, tmp;
 348 
 349 /* LMI_MC/LMI_UMC always set in dynamic,
 350  * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
 351  */
 352         tmp = RREG32(mmVCE_CLOCK_GATING_B);
 353         tmp &= ~0x00060006;
 354 
 355 /* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
 356         if (gated) {
 357                 tmp |= 0xe10000;
 358                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
 359         } else {
 360                 tmp |= 0xe1;
 361                 tmp &= ~0xe10000;
 362                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
 363         }
 364 
 365         orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 366         tmp &= ~0x1fe000;
 367         tmp &= ~0xff000000;
 368         if (tmp != orig)
 369                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 370 
 371         orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 372         tmp &= ~0x3fc;
 373         if (tmp != orig)
 374                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 375 
 376         /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
 377         WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
 378 
 379         if(gated)
 380                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 381 }
 382 
 383 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
 384                                                                 bool sw_cg)
 385 {
 386         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
 387                 if (sw_cg)
 388                         vce_v2_0_set_sw_cg(adev, true);
 389                 else
 390                         vce_v2_0_set_dyn_cg(adev, true);
 391         } else {
 392                 vce_v2_0_disable_cg(adev);
 393 
 394                 if (sw_cg)
 395                         vce_v2_0_set_sw_cg(adev, false);
 396                 else
 397                         vce_v2_0_set_dyn_cg(adev, false);
 398         }
 399 }
 400 
 401 static int vce_v2_0_early_init(void *handle)
 402 {
 403         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 404 
 405         adev->vce.num_rings = 2;
 406 
 407         vce_v2_0_set_ring_funcs(adev);
 408         vce_v2_0_set_irq_funcs(adev);
 409 
 410         return 0;
 411 }
 412 
 413 static int vce_v2_0_sw_init(void *handle)
 414 {
 415         struct amdgpu_ring *ring;
 416         int r, i;
 417         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 418 
 419         /* VCE */
 420         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
 421         if (r)
 422                 return r;
 423 
 424         r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
 425                 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
 426         if (r)
 427                 return r;
 428 
 429         r = amdgpu_vce_resume(adev);
 430         if (r)
 431                 return r;
 432 
 433         for (i = 0; i < adev->vce.num_rings; i++) {
 434                 ring = &adev->vce.ring[i];
 435                 sprintf(ring->name, "vce%d", i);
 436                 r = amdgpu_ring_init(adev, ring, 512,
 437                                      &adev->vce.irq, 0);
 438                 if (r)
 439                         return r;
 440         }
 441 
 442         r = amdgpu_vce_entity_init(adev);
 443 
 444         return r;
 445 }
 446 
 447 static int vce_v2_0_sw_fini(void *handle)
 448 {
 449         int r;
 450         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 451 
 452         r = amdgpu_vce_suspend(adev);
 453         if (r)
 454                 return r;
 455 
 456         return amdgpu_vce_sw_fini(adev);
 457 }
 458 
 459 static int vce_v2_0_hw_init(void *handle)
 460 {
 461         int r, i;
 462         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 463 
 464         amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 465         vce_v2_0_enable_mgcg(adev, true, false);
 466 
 467         for (i = 0; i < adev->vce.num_rings; i++) {
 468                 r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 469                 if (r)
 470                         return r;
 471         }
 472 
 473         DRM_INFO("VCE initialized successfully.\n");
 474 
 475         return 0;
 476 }
 477 
 478 static int vce_v2_0_hw_fini(void *handle)
 479 {
 480         return 0;
 481 }
 482 
 483 static int vce_v2_0_suspend(void *handle)
 484 {
 485         int r;
 486         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 487 
 488         r = vce_v2_0_hw_fini(adev);
 489         if (r)
 490                 return r;
 491 
 492         return amdgpu_vce_suspend(adev);
 493 }
 494 
 495 static int vce_v2_0_resume(void *handle)
 496 {
 497         int r;
 498         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 499 
 500         r = amdgpu_vce_resume(adev);
 501         if (r)
 502                 return r;
 503 
 504         return vce_v2_0_hw_init(adev);
 505 }
 506 
 507 static int vce_v2_0_soft_reset(void *handle)
 508 {
 509         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 510 
 511         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
 512         mdelay(5);
 513 
 514         return vce_v2_0_start(adev);
 515 }
 516 
 517 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
 518                                         struct amdgpu_irq_src *source,
 519                                         unsigned type,
 520                                         enum amdgpu_interrupt_state state)
 521 {
 522         uint32_t val = 0;
 523 
 524         if (state == AMDGPU_IRQ_STATE_ENABLE)
 525                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
 526 
 527         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
 528         return 0;
 529 }
 530 
 531 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
 532                                       struct amdgpu_irq_src *source,
 533                                       struct amdgpu_iv_entry *entry)
 534 {
 535         DRM_DEBUG("IH: VCE\n");
 536         switch (entry->src_data[0]) {
 537         case 0:
 538         case 1:
 539                 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
 540                 break;
 541         default:
 542                 DRM_ERROR("Unhandled interrupt: %d %d\n",
 543                           entry->src_id, entry->src_data[0]);
 544                 break;
 545         }
 546 
 547         return 0;
 548 }
 549 
 550 static int vce_v2_0_set_clockgating_state(void *handle,
 551                                           enum amd_clockgating_state state)
 552 {
 553         bool gate = false;
 554         bool sw_cg = false;
 555 
 556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 557 
 558         if (state == AMD_CG_STATE_GATE) {
 559                 gate = true;
 560                 sw_cg = true;
 561         }
 562 
 563         vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 564 
 565         return 0;
 566 }
 567 
 568 static int vce_v2_0_set_powergating_state(void *handle,
 569                                           enum amd_powergating_state state)
 570 {
 571         /* This doesn't actually powergate the VCE block.
 572          * That's done in the dpm code via the SMC.  This
 573          * just re-inits the block as necessary.  The actual
 574          * gating still happens in the dpm code.  We should
 575          * revisit this when there is a cleaner line between
 576          * the smc and the hw blocks
 577          */
 578         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 579 
 580         if (state == AMD_PG_STATE_GATE)
 581                 return vce_v2_0_stop(adev);
 582         else
 583                 return vce_v2_0_start(adev);
 584 }
 585 
 586 static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 587         .name = "vce_v2_0",
 588         .early_init = vce_v2_0_early_init,
 589         .late_init = NULL,
 590         .sw_init = vce_v2_0_sw_init,
 591         .sw_fini = vce_v2_0_sw_fini,
 592         .hw_init = vce_v2_0_hw_init,
 593         .hw_fini = vce_v2_0_hw_fini,
 594         .suspend = vce_v2_0_suspend,
 595         .resume = vce_v2_0_resume,
 596         .is_idle = vce_v2_0_is_idle,
 597         .wait_for_idle = vce_v2_0_wait_for_idle,
 598         .soft_reset = vce_v2_0_soft_reset,
 599         .set_clockgating_state = vce_v2_0_set_clockgating_state,
 600         .set_powergating_state = vce_v2_0_set_powergating_state,
 601 };
 602 
 603 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 604         .type = AMDGPU_RING_TYPE_VCE,
 605         .align_mask = 0xf,
 606         .nop = VCE_CMD_NO_OP,
 607         .support_64bit_ptrs = false,
 608         .no_user_fence = true,
 609         .get_rptr = vce_v2_0_ring_get_rptr,
 610         .get_wptr = vce_v2_0_ring_get_wptr,
 611         .set_wptr = vce_v2_0_ring_set_wptr,
 612         .parse_cs = amdgpu_vce_ring_parse_cs,
 613         .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
 614         .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 615         .emit_ib = amdgpu_vce_ring_emit_ib,
 616         .emit_fence = amdgpu_vce_ring_emit_fence,
 617         .test_ring = amdgpu_vce_ring_test_ring,
 618         .test_ib = amdgpu_vce_ring_test_ib,
 619         .insert_nop = amdgpu_ring_insert_nop,
 620         .pad_ib = amdgpu_ring_generic_pad_ib,
 621         .begin_use = amdgpu_vce_ring_begin_use,
 622         .end_use = amdgpu_vce_ring_end_use,
 623 };
 624 
 625 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 626 {
 627         int i;
 628 
 629         for (i = 0; i < adev->vce.num_rings; i++) {
 630                 adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 631                 adev->vce.ring[i].me = i;
 632         }
 633 }
 634 
 635 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
 636         .set = vce_v2_0_set_interrupt_state,
 637         .process = vce_v2_0_process_interrupt,
 638 };
 639 
 640 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 641 {
 642         adev->vce.irq.num_types = 1;
 643         adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
 644 };
 645 
 646 const struct amdgpu_ip_block_version vce_v2_0_ip_block =
 647 {
 648                 .type = AMD_IP_BLOCK_TYPE_VCE,
 649                 .major = 2,
 650                 .minor = 0,
 651                 .rev = 0,
 652                 .funcs = &vce_v2_0_ip_funcs,
 653 };

/* [<][>][^][v][top][bottom][index][help] */