root/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gmc_v9_0_ecc_interrupt_state
  2. gmc_v9_0_process_ras_data_cb
  3. gmc_v9_0_process_ecc_irq
  4. gmc_v9_0_vm_fault_interrupt_state
  5. gmc_v9_0_process_interrupt
  6. gmc_v9_0_set_irq_funcs
  7. gmc_v9_0_get_invalidate_req
  8. gmc_v9_0_use_invalidate_semaphore
  9. gmc_v9_0_flush_gpu_tlb
  10. gmc_v9_0_emit_flush_gpu_tlb
  11. gmc_v9_0_emit_pasid_mapping
  12. gmc_v9_0_get_vm_pte_flags
  13. gmc_v9_0_get_vm_pde
  14. gmc_v9_0_set_gmc_funcs
  15. gmc_v9_0_set_umc_funcs
  16. gmc_v9_0_set_mmhub_funcs
  17. gmc_v9_0_early_init
  18. gmc_v9_0_keep_stolen_memory
  19. gmc_v9_0_allocate_vm_inv_eng
  20. gmc_v9_0_ecc_ras_block_late_init
  21. gmc_v9_0_ecc_late_init
  22. gmc_v9_0_late_init
  23. gmc_v9_0_vram_gtt_location
  24. gmc_v9_0_mc_init
  25. gmc_v9_0_gart_init
  26. gmc_v9_0_get_vbios_fb_size
  27. gmc_v9_0_sw_init
  28. gmc_v9_0_sw_fini
  29. gmc_v9_0_init_golden_registers
  30. gmc_v9_0_restore_registers
  31. gmc_v9_0_gart_enable
  32. gmc_v9_0_hw_init
  33. gmc_v9_0_save_registers
  34. gmc_v9_0_gart_disable
  35. gmc_v9_0_hw_fini
  36. gmc_v9_0_suspend
  37. gmc_v9_0_resume
  38. gmc_v9_0_is_idle
  39. gmc_v9_0_wait_for_idle
  40. gmc_v9_0_soft_reset
  41. gmc_v9_0_set_clockgating_state
  42. gmc_v9_0_get_clockgating_state
  43. gmc_v9_0_set_powergating_state

   1 /*
   2  * Copyright 2016 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 #include <linux/firmware.h>
  25 #include <linux/pci.h>
  26 
  27 #include <drm/drm_cache.h>
  28 
  29 #include "amdgpu.h"
  30 #include "gmc_v9_0.h"
  31 #include "amdgpu_atomfirmware.h"
  32 #include "amdgpu_gem.h"
  33 
  34 #include "hdp/hdp_4_0_offset.h"
  35 #include "hdp/hdp_4_0_sh_mask.h"
  36 #include "gc/gc_9_0_sh_mask.h"
  37 #include "dce/dce_12_0_offset.h"
  38 #include "dce/dce_12_0_sh_mask.h"
  39 #include "vega10_enum.h"
  40 #include "mmhub/mmhub_1_0_offset.h"
  41 #include "athub/athub_1_0_offset.h"
  42 #include "oss/osssys_4_0_offset.h"
  43 
  44 #include "soc15.h"
  45 #include "soc15_common.h"
  46 #include "umc/umc_6_0_sh_mask.h"
  47 
  48 #include "gfxhub_v1_0.h"
  49 #include "mmhub_v1_0.h"
  50 #include "athub_v1_0.h"
  51 #include "gfxhub_v1_1.h"
  52 #include "mmhub_v9_4.h"
  53 #include "umc_v6_1.h"
  54 
  55 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
  56 
  57 #include "amdgpu_ras.h"
  58 
  59 /* add these here since we already include dce12 headers and these are for DCN */
  60 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
  61 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
  62 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
  63 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
  64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
  65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
  66 
  67 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
  68 #define AMDGPU_NUM_OF_VMIDS                     8
  69 
  70 static const u32 golden_settings_vega10_hdp[] =
  71 {
  72         0xf64, 0x0fffffff, 0x00000000,
  73         0xf65, 0x0fffffff, 0x00000000,
  74         0xf66, 0x0fffffff, 0x00000000,
  75         0xf67, 0x0fffffff, 0x00000000,
  76         0xf68, 0x0fffffff, 0x00000000,
  77         0xf6a, 0x0fffffff, 0x00000000,
  78         0xf6b, 0x0fffffff, 0x00000000,
  79         0xf6c, 0x0fffffff, 0x00000000,
  80         0xf6d, 0x0fffffff, 0x00000000,
  81         0xf6e, 0x0fffffff, 0x00000000,
  82 };
  83 
  84 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
  85 {
  86         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
  87         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
  88 };
  89 
  90 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
  91 {
  92         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
  93         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
  94 };
  95 
  96 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
  97         (0x000143c0 + 0x00000000),
  98         (0x000143c0 + 0x00000800),
  99         (0x000143c0 + 0x00001000),
 100         (0x000143c0 + 0x00001800),
 101         (0x000543c0 + 0x00000000),
 102         (0x000543c0 + 0x00000800),
 103         (0x000543c0 + 0x00001000),
 104         (0x000543c0 + 0x00001800),
 105         (0x000943c0 + 0x00000000),
 106         (0x000943c0 + 0x00000800),
 107         (0x000943c0 + 0x00001000),
 108         (0x000943c0 + 0x00001800),
 109         (0x000d43c0 + 0x00000000),
 110         (0x000d43c0 + 0x00000800),
 111         (0x000d43c0 + 0x00001000),
 112         (0x000d43c0 + 0x00001800),
 113         (0x001143c0 + 0x00000000),
 114         (0x001143c0 + 0x00000800),
 115         (0x001143c0 + 0x00001000),
 116         (0x001143c0 + 0x00001800),
 117         (0x001543c0 + 0x00000000),
 118         (0x001543c0 + 0x00000800),
 119         (0x001543c0 + 0x00001000),
 120         (0x001543c0 + 0x00001800),
 121         (0x001943c0 + 0x00000000),
 122         (0x001943c0 + 0x00000800),
 123         (0x001943c0 + 0x00001000),
 124         (0x001943c0 + 0x00001800),
 125         (0x001d43c0 + 0x00000000),
 126         (0x001d43c0 + 0x00000800),
 127         (0x001d43c0 + 0x00001000),
 128         (0x001d43c0 + 0x00001800),
 129 };
 130 
 131 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
 132         (0x000143e0 + 0x00000000),
 133         (0x000143e0 + 0x00000800),
 134         (0x000143e0 + 0x00001000),
 135         (0x000143e0 + 0x00001800),
 136         (0x000543e0 + 0x00000000),
 137         (0x000543e0 + 0x00000800),
 138         (0x000543e0 + 0x00001000),
 139         (0x000543e0 + 0x00001800),
 140         (0x000943e0 + 0x00000000),
 141         (0x000943e0 + 0x00000800),
 142         (0x000943e0 + 0x00001000),
 143         (0x000943e0 + 0x00001800),
 144         (0x000d43e0 + 0x00000000),
 145         (0x000d43e0 + 0x00000800),
 146         (0x000d43e0 + 0x00001000),
 147         (0x000d43e0 + 0x00001800),
 148         (0x001143e0 + 0x00000000),
 149         (0x001143e0 + 0x00000800),
 150         (0x001143e0 + 0x00001000),
 151         (0x001143e0 + 0x00001800),
 152         (0x001543e0 + 0x00000000),
 153         (0x001543e0 + 0x00000800),
 154         (0x001543e0 + 0x00001000),
 155         (0x001543e0 + 0x00001800),
 156         (0x001943e0 + 0x00000000),
 157         (0x001943e0 + 0x00000800),
 158         (0x001943e0 + 0x00001000),
 159         (0x001943e0 + 0x00001800),
 160         (0x001d43e0 + 0x00000000),
 161         (0x001d43e0 + 0x00000800),
 162         (0x001d43e0 + 0x00001000),
 163         (0x001d43e0 + 0x00001800),
 164 };
 165 
 166 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
 167         (0x000143c2 + 0x00000000),
 168         (0x000143c2 + 0x00000800),
 169         (0x000143c2 + 0x00001000),
 170         (0x000143c2 + 0x00001800),
 171         (0x000543c2 + 0x00000000),
 172         (0x000543c2 + 0x00000800),
 173         (0x000543c2 + 0x00001000),
 174         (0x000543c2 + 0x00001800),
 175         (0x000943c2 + 0x00000000),
 176         (0x000943c2 + 0x00000800),
 177         (0x000943c2 + 0x00001000),
 178         (0x000943c2 + 0x00001800),
 179         (0x000d43c2 + 0x00000000),
 180         (0x000d43c2 + 0x00000800),
 181         (0x000d43c2 + 0x00001000),
 182         (0x000d43c2 + 0x00001800),
 183         (0x001143c2 + 0x00000000),
 184         (0x001143c2 + 0x00000800),
 185         (0x001143c2 + 0x00001000),
 186         (0x001143c2 + 0x00001800),
 187         (0x001543c2 + 0x00000000),
 188         (0x001543c2 + 0x00000800),
 189         (0x001543c2 + 0x00001000),
 190         (0x001543c2 + 0x00001800),
 191         (0x001943c2 + 0x00000000),
 192         (0x001943c2 + 0x00000800),
 193         (0x001943c2 + 0x00001000),
 194         (0x001943c2 + 0x00001800),
 195         (0x001d43c2 + 0x00000000),
 196         (0x001d43c2 + 0x00000800),
 197         (0x001d43c2 + 0x00001000),
 198         (0x001d43c2 + 0x00001800),
 199 };
 200 
 201 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
 202                 struct amdgpu_irq_src *src,
 203                 unsigned type,
 204                 enum amdgpu_interrupt_state state)
 205 {
 206         u32 bits, i, tmp, reg;
 207 
 208         bits = 0x7f;
 209 
 210         switch (state) {
 211         case AMDGPU_IRQ_STATE_DISABLE:
 212                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
 213                         reg = ecc_umc_mcumc_ctrl_addrs[i];
 214                         tmp = RREG32(reg);
 215                         tmp &= ~bits;
 216                         WREG32(reg, tmp);
 217                 }
 218                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
 219                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
 220                         tmp = RREG32(reg);
 221                         tmp &= ~bits;
 222                         WREG32(reg, tmp);
 223                 }
 224                 break;
 225         case AMDGPU_IRQ_STATE_ENABLE:
 226                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
 227                         reg = ecc_umc_mcumc_ctrl_addrs[i];
 228                         tmp = RREG32(reg);
 229                         tmp |= bits;
 230                         WREG32(reg, tmp);
 231                 }
 232                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
 233                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
 234                         tmp = RREG32(reg);
 235                         tmp |= bits;
 236                         WREG32(reg, tmp);
 237                 }
 238                 break;
 239         default:
 240                 break;
 241         }
 242 
 243         return 0;
 244 }
 245 
 246 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
 247                 struct ras_err_data *err_data,
 248                 struct amdgpu_iv_entry *entry)
 249 {
 250         kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
 251         if (adev->umc.funcs->query_ras_error_count)
 252                 adev->umc.funcs->query_ras_error_count(adev, err_data);
 253         /* umc query_ras_error_address is also responsible for clearing
 254          * error status
 255          */
 256         if (adev->umc.funcs->query_ras_error_address)
 257                 adev->umc.funcs->query_ras_error_address(adev, err_data);
 258 
 259         /* only uncorrectable error needs gpu reset */
 260         if (err_data->ue_count)
 261                 amdgpu_ras_reset_gpu(adev, 0);
 262 
 263         return AMDGPU_RAS_SUCCESS;
 264 }
 265 
 266 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
 267                 struct amdgpu_irq_src *source,
 268                 struct amdgpu_iv_entry *entry)
 269 {
 270         struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
 271         struct ras_dispatch_if ih_data = {
 272                 .entry = entry,
 273         };
 274 
 275         if (!ras_if)
 276                 return 0;
 277 
 278         ih_data.head = *ras_if;
 279 
 280         amdgpu_ras_interrupt_dispatch(adev, &ih_data);
 281         return 0;
 282 }
 283 
 284 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 285                                         struct amdgpu_irq_src *src,
 286                                         unsigned type,
 287                                         enum amdgpu_interrupt_state state)
 288 {
 289         struct amdgpu_vmhub *hub;
 290         u32 tmp, reg, bits, i, j;
 291 
 292         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 293                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 294                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 295                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 296                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 297                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 298                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 299 
 300         switch (state) {
 301         case AMDGPU_IRQ_STATE_DISABLE:
 302                 for (j = 0; j < adev->num_vmhubs; j++) {
 303                         hub = &adev->vmhub[j];
 304                         for (i = 0; i < 16; i++) {
 305                                 reg = hub->vm_context0_cntl + i;
 306                                 tmp = RREG32(reg);
 307                                 tmp &= ~bits;
 308                                 WREG32(reg, tmp);
 309                         }
 310                 }
 311                 break;
 312         case AMDGPU_IRQ_STATE_ENABLE:
 313                 for (j = 0; j < adev->num_vmhubs; j++) {
 314                         hub = &adev->vmhub[j];
 315                         for (i = 0; i < 16; i++) {
 316                                 reg = hub->vm_context0_cntl + i;
 317                                 tmp = RREG32(reg);
 318                                 tmp |= bits;
 319                                 WREG32(reg, tmp);
 320                         }
 321                 }
 322         default:
 323                 break;
 324         }
 325 
 326         return 0;
 327 }
 328 
 329 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
 330                                 struct amdgpu_irq_src *source,
 331                                 struct amdgpu_iv_entry *entry)
 332 {
 333         struct amdgpu_vmhub *hub;
 334         bool retry_fault = !!(entry->src_data[1] & 0x80);
 335         uint32_t status = 0;
 336         u64 addr;
 337         char hub_name[10];
 338 
 339         addr = (u64)entry->src_data[0] << 12;
 340         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 341 
 342         if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
 343                                                     entry->timestamp))
 344                 return 1; /* This also prevents sending it to KFD */
 345 
 346         if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
 347                 snprintf(hub_name, sizeof(hub_name), "mmhub0");
 348                 hub = &adev->vmhub[AMDGPU_MMHUB_0];
 349         } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
 350                 snprintf(hub_name, sizeof(hub_name), "mmhub1");
 351                 hub = &adev->vmhub[AMDGPU_MMHUB_1];
 352         } else {
 353                 snprintf(hub_name, sizeof(hub_name), "gfxhub0");
 354                 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
 355         }
 356 
 357         /* If it's the first fault for this address, process it normally */
 358         if (!amdgpu_sriov_vf(adev)) {
 359                 /*
 360                  * Issue a dummy read to wait for the status register to
 361                  * be updated to avoid reading an incorrect value due to
 362                  * the new fast GRBM interface.
 363                  */
 364                 if (entry->vmid_src == AMDGPU_GFXHUB_0)
 365                         RREG32(hub->vm_l2_pro_fault_status);
 366 
 367                 status = RREG32(hub->vm_l2_pro_fault_status);
 368                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 369         }
 370 
 371         if (printk_ratelimit()) {
 372                 struct amdgpu_task_info task_info;
 373 
 374                 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
 375                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
 376 
 377                 dev_err(adev->dev,
 378                         "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
 379                         "pasid:%u, for process %s pid %d thread %s pid %d)\n",
 380                         hub_name, retry_fault ? "retry" : "no-retry",
 381                         entry->src_id, entry->ring_id, entry->vmid,
 382                         entry->pasid, task_info.process_name, task_info.tgid,
 383                         task_info.task_name, task_info.pid);
 384                 dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
 385                         addr, entry->client_id);
 386                 if (!amdgpu_sriov_vf(adev)) {
 387                         dev_err(adev->dev,
 388                                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
 389                                 status);
 390                         dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
 391                                 REG_GET_FIELD(status,
 392                                 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
 393                         dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
 394                                 REG_GET_FIELD(status,
 395                                 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
 396                         dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
 397                                 REG_GET_FIELD(status,
 398                                 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
 399                         dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
 400                                 REG_GET_FIELD(status,
 401                                 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
 402                         dev_err(adev->dev, "\t RW: 0x%lx\n",
 403                                 REG_GET_FIELD(status,
 404                                 VM_L2_PROTECTION_FAULT_STATUS, RW));
 405 
 406                 }
 407         }
 408 
 409         return 0;
 410 }
 411 
 412 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
 413         .set = gmc_v9_0_vm_fault_interrupt_state,
 414         .process = gmc_v9_0_process_interrupt,
 415 };
 416 
 417 
 418 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
 419         .set = gmc_v9_0_ecc_interrupt_state,
 420         .process = gmc_v9_0_process_ecc_irq,
 421 };
 422 
 423 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
 424 {
 425         adev->gmc.vm_fault.num_types = 1;
 426         adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 427 
 428         adev->gmc.ecc_irq.num_types = 1;
 429         adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
 430 }
 431 
 432 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
 433                                         uint32_t flush_type)
 434 {
 435         u32 req = 0;
 436 
 437         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 438                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
 439         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 440         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 441         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 442         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
 443         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
 444         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
 445         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 446                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
 447 
 448         return req;
 449 }
 450 
 451 /**
 452  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
 453  *
 454  * @adev: amdgpu_device pointer
 455  * @vmhub: vmhub type
 456  *
 457  */
 458 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 459                                        uint32_t vmhub)
 460 {
 461         return ((vmhub == AMDGPU_MMHUB_0 ||
 462                  vmhub == AMDGPU_MMHUB_1) &&
 463                 (!amdgpu_sriov_vf(adev)) &&
 464                 (!(adev->asic_type == CHIP_RAVEN &&
 465                    adev->rev_id < 0x8 &&
 466                    adev->pdev->device == 0x15d8)));
 467 }
 468 
 469 /*
 470  * GART
 471  * VMID 0 is the physical GPU addresses as used by the kernel.
 472  * VMIDs 1-15 are used for userspace clients and are handled
 473  * by the amdgpu vm/hsa code.
 474  */
 475 
 476 /**
 477  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
 478  *
 479  * @adev: amdgpu_device pointer
 480  * @vmid: vm instance to flush
 481  * @flush_type: the flush type
 482  *
 483  * Flush the TLB for the requested page table using certain type.
 484  */
 485 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 486                                         uint32_t vmhub, uint32_t flush_type)
 487 {
 488         bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
 489         const unsigned eng = 17;
 490         u32 j, inv_req, tmp;
 491         struct amdgpu_vmhub *hub;
 492 
 493         BUG_ON(vmhub >= adev->num_vmhubs);
 494 
 495         hub = &adev->vmhub[vmhub];
 496         inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 497 
 498         /* This is necessary for a HW workaround under SRIOV as well
 499          * as GFXOFF under bare metal
 500          */
 501         if (adev->gfx.kiq.ring.sched.ready &&
 502                         (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
 503                         !adev->in_gpu_reset) {
 504                 uint32_t req = hub->vm_inv_eng0_req + eng;
 505                 uint32_t ack = hub->vm_inv_eng0_ack + eng;
 506 
 507                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
 508                                 1 << vmid);
 509                 return;
 510         }
 511 
 512         spin_lock(&adev->gmc.invalidate_lock);
 513 
 514         /*
 515          * It may lose gpuvm invalidate acknowldege state across power-gating
 516          * off cycle, add semaphore acquire before invalidation and semaphore
 517          * release after invalidation to avoid entering power gated state
 518          * to WA the Issue
 519          */
 520 
 521         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 522         if (use_semaphore) {
 523                 for (j = 0; j < adev->usec_timeout; j++) {
 524                         /* a read return value of 1 means semaphore acuqire */
 525                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
 526                         if (tmp & 0x1)
 527                                 break;
 528                         udelay(1);
 529                 }
 530 
 531                 if (j >= adev->usec_timeout)
 532                         DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 533         }
 534 
 535         WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
 536 
 537         /*
 538          * Issue a dummy read to wait for the ACK register to be cleared
 539          * to avoid a false ACK due to the new fast GRBM interface.
 540          */
 541         if (vmhub == AMDGPU_GFXHUB_0)
 542                 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
 543 
 544         for (j = 0; j < adev->usec_timeout; j++) {
 545                 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
 546                 if (tmp & (1 << vmid))
 547                         break;
 548                 udelay(1);
 549         }
 550 
 551         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 552         if (use_semaphore)
 553                 /*
 554                  * add semaphore release after invalidation,
 555                  * write with 0 means semaphore release
 556                  */
 557                 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
 558 
 559         spin_unlock(&adev->gmc.invalidate_lock);
 560 
 561         if (j < adev->usec_timeout)
 562                 return;
 563 
 564         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 565 }
 566 
 567 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 568                                             unsigned vmid, uint64_t pd_addr)
 569 {
 570         bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
 571         struct amdgpu_device *adev = ring->adev;
 572         struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
 573         uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
 574         unsigned eng = ring->vm_inv_eng;
 575 
 576         /*
 577          * It may lose gpuvm invalidate acknowldege state across power-gating
 578          * off cycle, add semaphore acquire before invalidation and semaphore
 579          * release after invalidation to avoid entering power gated state
 580          * to WA the Issue
 581          */
 582 
 583         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 584         if (use_semaphore)
 585                 /* a read return value of 1 means semaphore acuqire */
 586                 amdgpu_ring_emit_reg_wait(ring,
 587                                           hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
 588 
 589         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
 590                               lower_32_bits(pd_addr));
 591 
 592         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
 593                               upper_32_bits(pd_addr));
 594 
 595         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
 596                                             hub->vm_inv_eng0_ack + eng,
 597                                             req, 1 << vmid);
 598 
 599         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 600         if (use_semaphore)
 601                 /*
 602                  * add semaphore release after invalidation,
 603                  * write with 0 means semaphore release
 604                  */
 605                 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
 606 
 607         return pd_addr;
 608 }
 609 
 610 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
 611                                         unsigned pasid)
 612 {
 613         struct amdgpu_device *adev = ring->adev;
 614         uint32_t reg;
 615 
 616         /* Do nothing because there's no lut register for mmhub1. */
 617         if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
 618                 return;
 619 
 620         if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
 621                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
 622         else
 623                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
 624 
 625         amdgpu_ring_emit_wreg(ring, reg, pasid);
 626 }
 627 
 628 /*
 629  * PTE format on VEGA 10:
 630  * 63:59 reserved
 631  * 58:57 mtype
 632  * 56 F
 633  * 55 L
 634  * 54 P
 635  * 53 SW
 636  * 52 T
 637  * 50:48 reserved
 638  * 47:12 4k physical page base address
 639  * 11:7 fragment
 640  * 6 write
 641  * 5 read
 642  * 4 exe
 643  * 3 Z
 644  * 2 snooped
 645  * 1 system
 646  * 0 valid
 647  *
 648  * PDE format on VEGA 10:
 649  * 63:59 block fragment size
 650  * 58:55 reserved
 651  * 54 P
 652  * 53:48 reserved
 653  * 47:6 physical base address of PD or PTE
 654  * 5:3 reserved
 655  * 2 C
 656  * 1 system
 657  * 0 valid
 658  */
 659 
 660 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
 661                                                 uint32_t flags)
 662 
 663 {
 664         uint64_t pte_flag = 0;
 665 
 666         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 667                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
 668         if (flags & AMDGPU_VM_PAGE_READABLE)
 669                 pte_flag |= AMDGPU_PTE_READABLE;
 670         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 671                 pte_flag |= AMDGPU_PTE_WRITEABLE;
 672 
 673         switch (flags & AMDGPU_VM_MTYPE_MASK) {
 674         case AMDGPU_VM_MTYPE_DEFAULT:
 675                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
 676                 break;
 677         case AMDGPU_VM_MTYPE_NC:
 678                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
 679                 break;
 680         case AMDGPU_VM_MTYPE_WC:
 681                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
 682                 break;
 683         case AMDGPU_VM_MTYPE_CC:
 684                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
 685                 break;
 686         case AMDGPU_VM_MTYPE_UC:
 687                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
 688                 break;
 689         default:
 690                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
 691                 break;
 692         }
 693 
 694         if (flags & AMDGPU_VM_PAGE_PRT)
 695                 pte_flag |= AMDGPU_PTE_PRT;
 696 
 697         return pte_flag;
 698 }
 699 
 700 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
 701                                 uint64_t *addr, uint64_t *flags)
 702 {
 703         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
 704                 *addr = adev->vm_manager.vram_base_offset + *addr -
 705                         adev->gmc.vram_start;
 706         BUG_ON(*addr & 0xFFFF00000000003FULL);
 707 
 708         if (!adev->gmc.translate_further)
 709                 return;
 710 
 711         if (level == AMDGPU_VM_PDB1) {
 712                 /* Set the block fragment size */
 713                 if (!(*flags & AMDGPU_PDE_PTE))
 714                         *flags |= AMDGPU_PDE_BFS(0x9);
 715 
 716         } else if (level == AMDGPU_VM_PDB0) {
 717                 if (*flags & AMDGPU_PDE_PTE)
 718                         *flags &= ~AMDGPU_PDE_PTE;
 719                 else
 720                         *flags |= AMDGPU_PTE_TF;
 721         }
 722 }
 723 
 724 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
 725         .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
 726         .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
 727         .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
 728         .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
 729         .get_vm_pde = gmc_v9_0_get_vm_pde
 730 };
 731 
 732 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
 733 {
 734         adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
 735 }
 736 
 737 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
 738 {
 739         switch (adev->asic_type) {
 740         case CHIP_VEGA20:
 741                 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
 742                 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
 743                 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
 744                 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
 745                 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
 746                 adev->umc.funcs = &umc_v6_1_funcs;
 747                 break;
 748         default:
 749                 break;
 750         }
 751 }
 752 
 753 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
 754 {
 755         switch (adev->asic_type) {
 756         case CHIP_VEGA20:
 757                 adev->mmhub_funcs = &mmhub_v1_0_funcs;
 758                 break;
 759         default:
 760                 break;
 761         }
 762 }
 763 
 764 static int gmc_v9_0_early_init(void *handle)
 765 {
 766         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 767 
 768         gmc_v9_0_set_gmc_funcs(adev);
 769         gmc_v9_0_set_irq_funcs(adev);
 770         gmc_v9_0_set_umc_funcs(adev);
 771         gmc_v9_0_set_mmhub_funcs(adev);
 772 
 773         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 774         adev->gmc.shared_aperture_end =
 775                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 776         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 777         adev->gmc.private_aperture_end =
 778                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 779 
 780         return 0;
 781 }
 782 
 783 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
 784 {
 785 
 786         /*
 787          * TODO:
 788          * Currently there is a bug where some memory client outside
 789          * of the driver writes to first 8M of VRAM on S3 resume,
 790          * this overrides GART which by default gets placed in first 8M and
 791          * causes VM_FAULTS once GTT is accessed.
 792          * Keep the stolen memory reservation until the while this is not solved.
 793          * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
 794          */
 795         switch (adev->asic_type) {
 796         case CHIP_VEGA10:
 797         case CHIP_RAVEN:
 798         case CHIP_ARCTURUS:
 799         case CHIP_RENOIR:
 800                 return true;
 801         case CHIP_VEGA12:
 802         case CHIP_VEGA20:
 803         default:
 804                 return false;
 805         }
 806 }
 807 
 808 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
 809 {
 810         struct amdgpu_ring *ring;
 811         unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
 812                 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
 813                 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
 814         unsigned i;
 815         unsigned vmhub, inv_eng;
 816 
 817         for (i = 0; i < adev->num_rings; ++i) {
 818                 ring = adev->rings[i];
 819                 vmhub = ring->funcs->vmhub;
 820 
 821                 inv_eng = ffs(vm_inv_engs[vmhub]);
 822                 if (!inv_eng) {
 823                         dev_err(adev->dev, "no VM inv eng for ring %s\n",
 824                                 ring->name);
 825                         return -EINVAL;
 826                 }
 827 
 828                 ring->vm_inv_eng = inv_eng - 1;
 829                 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
 830 
 831                 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
 832                          ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
 833         }
 834 
 835         return 0;
 836 }
 837 
 838 static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
 839                         struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
 840 {
 841         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 842         struct ras_common_if **ras_if = NULL;
 843         struct ras_ih_if ih_info = {
 844                 .cb = gmc_v9_0_process_ras_data_cb,
 845         };
 846         int r;
 847 
 848         if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
 849                 ras_if = &adev->gmc.umc_ras_if;
 850         else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
 851                 ras_if = &adev->gmc.mmhub_ras_if;
 852         else
 853                 BUG();
 854 
 855         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
 856                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
 857                 return 0;
 858         }
 859 
 860         /* handle resume path. */
 861         if (*ras_if) {
 862                 /* resend ras TA enable cmd during resume.
 863                  * prepare to handle failure.
 864                  */
 865                 ih_info.head = **ras_if;
 866                 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 867                 if (r) {
 868                         if (r == -EAGAIN) {
 869                                 /* request a gpu reset. will run again. */
 870                                 amdgpu_ras_request_reset_on_boot(adev,
 871                                                 ras_block->block);
 872                                 return 0;
 873                         }
 874                         /* fail to enable ras, cleanup all. */
 875                         goto irq;
 876                 }
 877                 /* enable successfully. continue. */
 878                 goto resume;
 879         }
 880 
 881         *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
 882         if (!*ras_if)
 883                 return -ENOMEM;
 884 
 885         **ras_if = *ras_block;
 886 
 887         r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 888         if (r) {
 889                 if (r == -EAGAIN) {
 890                         amdgpu_ras_request_reset_on_boot(adev,
 891                                         ras_block->block);
 892                         r = 0;
 893                 }
 894                 goto feature;
 895         }
 896 
 897         ih_info.head = **ras_if;
 898         fs_info->head = **ras_if;
 899 
 900         if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
 901                 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
 902                 if (r)
 903                         goto interrupt;
 904         }
 905 
 906         amdgpu_ras_debugfs_create(adev, fs_info);
 907 
 908         r = amdgpu_ras_sysfs_create(adev, fs_info);
 909         if (r)
 910                 goto sysfs;
 911 resume:
 912         if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
 913                 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
 914                 if (r)
 915                         goto irq;
 916         }
 917 
 918         return 0;
 919 irq:
 920         amdgpu_ras_sysfs_remove(adev, *ras_if);
 921 sysfs:
 922         amdgpu_ras_debugfs_remove(adev, *ras_if);
 923         if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
 924                 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
 925 interrupt:
 926         amdgpu_ras_feature_enable(adev, *ras_if, 0);
 927 feature:
 928         kfree(*ras_if);
 929         *ras_if = NULL;
 930         return r;
 931 }
 932 
 933 static int gmc_v9_0_ecc_late_init(void *handle)
 934 {
 935         int r;
 936 
 937         struct ras_fs_if umc_fs_info = {
 938                 .sysfs_name = "umc_err_count",
 939                 .debugfs_name = "umc_err_inject",
 940         };
 941         struct ras_common_if umc_ras_block = {
 942                 .block = AMDGPU_RAS_BLOCK__UMC,
 943                 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
 944                 .sub_block_index = 0,
 945                 .name = "umc",
 946         };
 947         struct ras_fs_if mmhub_fs_info = {
 948                 .sysfs_name = "mmhub_err_count",
 949                 .debugfs_name = "mmhub_err_inject",
 950         };
 951         struct ras_common_if mmhub_ras_block = {
 952                 .block = AMDGPU_RAS_BLOCK__MMHUB,
 953                 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
 954                 .sub_block_index = 0,
 955                 .name = "mmhub",
 956         };
 957 
 958         r = gmc_v9_0_ecc_ras_block_late_init(handle,
 959                         &umc_fs_info, &umc_ras_block);
 960         if (r)
 961                 return r;
 962 
 963         r = gmc_v9_0_ecc_ras_block_late_init(handle,
 964                         &mmhub_fs_info, &mmhub_ras_block);
 965         return r;
 966 }
 967 
 968 static int gmc_v9_0_late_init(void *handle)
 969 {
 970         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 971         bool r;
 972 
 973         if (!gmc_v9_0_keep_stolen_memory(adev))
 974                 amdgpu_bo_late_init(adev);
 975 
 976         r = gmc_v9_0_allocate_vm_inv_eng(adev);
 977         if (r)
 978                 return r;
 979         /* Check if ecc is available */
 980         if (!amdgpu_sriov_vf(adev)) {
 981                 switch (adev->asic_type) {
 982                 case CHIP_VEGA10:
 983                 case CHIP_VEGA20:
 984                         r = amdgpu_atomfirmware_mem_ecc_supported(adev);
 985                         if (!r) {
 986                                 DRM_INFO("ECC is not present.\n");
 987                                 if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
 988                                         adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
 989                         } else {
 990                                 DRM_INFO("ECC is active.\n");
 991                         }
 992 
 993                         r = amdgpu_atomfirmware_sram_ecc_supported(adev);
 994                         if (!r) {
 995                                 DRM_INFO("SRAM ECC is not present.\n");
 996                         } else {
 997                                 DRM_INFO("SRAM ECC is active.\n");
 998                         }
 999                         break;
1000                 default:
1001                         break;
1002                 }
1003         }
1004 
1005         r = gmc_v9_0_ecc_late_init(handle);
1006         if (r)
1007                 return r;
1008 
1009         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1010 }
1011 
1012 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1013                                         struct amdgpu_gmc *mc)
1014 {
1015         u64 base = 0;
1016 
1017         if (adev->asic_type == CHIP_ARCTURUS)
1018                 base = mmhub_v9_4_get_fb_location(adev);
1019         else if (!amdgpu_sriov_vf(adev))
1020                 base = mmhub_v1_0_get_fb_location(adev);
1021 
1022         /* add the xgmi offset of the physical node */
1023         base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1024         amdgpu_gmc_vram_location(adev, mc, base);
1025         amdgpu_gmc_gart_location(adev, mc);
1026         amdgpu_gmc_agp_location(adev, mc);
1027         /* base offset of vram pages */
1028         adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
1029 
1030         /* XXX: add the xgmi offset of the physical node? */
1031         adev->vm_manager.vram_base_offset +=
1032                 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1033 }
1034 
1035 /**
1036  * gmc_v9_0_mc_init - initialize the memory controller driver params
1037  *
1038  * @adev: amdgpu_device pointer
1039  *
1040  * Look up the amount of vram, vram width, and decide how to place
1041  * vram and gart within the GPU's physical address space.
1042  * Returns 0 for success.
1043  */
1044 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1045 {
1046         int chansize, numchan;
1047         int r;
1048 
1049         if (amdgpu_sriov_vf(adev)) {
1050                 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1051                  * and DF related registers is not readable, seems hardcord is the
1052                  * only way to set the correct vram_width
1053                  */
1054                 adev->gmc.vram_width = 2048;
1055         } else if (amdgpu_emu_mode != 1) {
1056                 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
1057         }
1058 
1059         if (!adev->gmc.vram_width) {
1060                 /* hbm memory channel size */
1061                 if (adev->flags & AMD_IS_APU)
1062                         chansize = 64;
1063                 else
1064                         chansize = 128;
1065 
1066                 numchan = adev->df_funcs->get_hbm_channel_number(adev);
1067                 adev->gmc.vram_width = numchan * chansize;
1068         }
1069 
1070         /* size in MB on si */
1071         adev->gmc.mc_vram_size =
1072                 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1073         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1074 
1075         if (!(adev->flags & AMD_IS_APU)) {
1076                 r = amdgpu_device_resize_fb_bar(adev);
1077                 if (r)
1078                         return r;
1079         }
1080         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1081         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1082 
1083 #ifdef CONFIG_X86_64
1084         if (adev->flags & AMD_IS_APU) {
1085                 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
1086                 adev->gmc.aper_size = adev->gmc.real_vram_size;
1087         }
1088 #endif
1089         /* In case the PCI BAR is larger than the actual amount of vram */
1090         adev->gmc.visible_vram_size = adev->gmc.aper_size;
1091         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1092                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1093 
1094         /* set the gart size */
1095         if (amdgpu_gart_size == -1) {
1096                 switch (adev->asic_type) {
1097                 case CHIP_VEGA10:  /* all engines support GPUVM */
1098                 case CHIP_VEGA12:  /* all engines support GPUVM */
1099                 case CHIP_VEGA20:
1100                 case CHIP_ARCTURUS:
1101                 default:
1102                         adev->gmc.gart_size = 512ULL << 20;
1103                         break;
1104                 case CHIP_RAVEN:   /* DCE SG support */
1105                 case CHIP_RENOIR:
1106                         adev->gmc.gart_size = 1024ULL << 20;
1107                         break;
1108                 }
1109         } else {
1110                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1111         }
1112 
1113         gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1114 
1115         return 0;
1116 }
1117 
1118 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1119 {
1120         int r;
1121 
1122         if (adev->gart.bo) {
1123                 WARN(1, "VEGA10 PCIE GART already initialized\n");
1124                 return 0;
1125         }
1126         /* Initialize common gart structure */
1127         r = amdgpu_gart_init(adev);
1128         if (r)
1129                 return r;
1130         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1131         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1132                                  AMDGPU_PTE_EXECUTABLE;
1133         return amdgpu_gart_table_vram_alloc(adev);
1134 }
1135 
1136 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1137 {
1138         u32 d1vga_control;
1139         unsigned size;
1140 
1141         /*
1142          * TODO Remove once GART corruption is resolved
1143          * Check related code in gmc_v9_0_sw_fini
1144          * */
1145         if (gmc_v9_0_keep_stolen_memory(adev))
1146                 return 9 * 1024 * 1024;
1147 
1148         d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1149         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1150                 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1151         } else {
1152                 u32 viewport;
1153 
1154                 switch (adev->asic_type) {
1155                 case CHIP_RAVEN:
1156                 case CHIP_RENOIR:
1157                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1158                         size = (REG_GET_FIELD(viewport,
1159                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1160                                 REG_GET_FIELD(viewport,
1161                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1162                                 4);
1163                         break;
1164                 case CHIP_VEGA10:
1165                 case CHIP_VEGA12:
1166                 case CHIP_VEGA20:
1167                 default:
1168                         viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1169                         size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1170                                 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1171                                 4);
1172                         break;
1173                 }
1174         }
1175         /* return 0 if the pre-OS buffer uses up most of vram */
1176         if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1177                 return 0;
1178 
1179         return size;
1180 }
1181 
1182 static int gmc_v9_0_sw_init(void *handle)
1183 {
1184         int r;
1185         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1186 
1187         gfxhub_v1_0_init(adev);
1188         if (adev->asic_type == CHIP_ARCTURUS)
1189                 mmhub_v9_4_init(adev);
1190         else
1191                 mmhub_v1_0_init(adev);
1192 
1193         spin_lock_init(&adev->gmc.invalidate_lock);
1194 
1195         adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1196         switch (adev->asic_type) {
1197         case CHIP_RAVEN:
1198                 adev->num_vmhubs = 2;
1199 
1200                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1201                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1202                 } else {
1203                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
1204                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1205                         adev->gmc.translate_further =
1206                                 adev->vm_manager.num_level > 1;
1207                 }
1208                 break;
1209         case CHIP_VEGA10:
1210         case CHIP_VEGA12:
1211         case CHIP_VEGA20:
1212         case CHIP_RENOIR:
1213                 adev->num_vmhubs = 2;
1214 
1215 
1216                 /*
1217                  * To fulfill 4-level page support,
1218                  * vm size is 256TB (48bit), maximum size of Vega10,
1219                  * block size 512 (9bit)
1220                  */
1221                 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1222                 if (amdgpu_sriov_vf(adev))
1223                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1224                 else
1225                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1226                 break;
1227         case CHIP_ARCTURUS:
1228                 adev->num_vmhubs = 3;
1229 
1230                 /* Keep the vm size same with Vega20 */
1231                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1232                 break;
1233         default:
1234                 break;
1235         }
1236 
1237         /* This interrupt is VMC page fault.*/
1238         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1239                                 &adev->gmc.vm_fault);
1240         if (r)
1241                 return r;
1242 
1243         if (adev->asic_type == CHIP_ARCTURUS) {
1244                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1245                                         &adev->gmc.vm_fault);
1246                 if (r)
1247                         return r;
1248         }
1249 
1250         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1251                                 &adev->gmc.vm_fault);
1252 
1253         if (r)
1254                 return r;
1255 
1256         /* interrupt sent to DF. */
1257         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1258                         &adev->gmc.ecc_irq);
1259         if (r)
1260                 return r;
1261 
1262         /* Set the internal MC address mask
1263          * This is the max address of the GPU's
1264          * internal address space.
1265          */
1266         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1267 
1268         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1269         if (r) {
1270                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1271                 return r;
1272         }
1273         adev->need_swiotlb = drm_need_swiotlb(44);
1274 
1275         if (adev->gmc.xgmi.supported) {
1276                 r = gfxhub_v1_1_get_xgmi_info(adev);
1277                 if (r)
1278                         return r;
1279         }
1280 
1281         r = gmc_v9_0_mc_init(adev);
1282         if (r)
1283                 return r;
1284 
1285         adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1286 
1287         /* Memory manager */
1288         r = amdgpu_bo_init(adev);
1289         if (r)
1290                 return r;
1291 
1292         r = gmc_v9_0_gart_init(adev);
1293         if (r)
1294                 return r;
1295 
1296         /*
1297          * number of VMs
1298          * VMID 0 is reserved for System
1299          * amdgpu graphics/compute will use VMIDs 1-7
1300          * amdkfd will use VMIDs 8-15
1301          */
1302         adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1303         adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1304         adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1305 
1306         amdgpu_vm_manager_init(adev);
1307 
1308         return 0;
1309 }
1310 
1311 static int gmc_v9_0_sw_fini(void *handle)
1312 {
1313         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1314         void *stolen_vga_buf;
1315 
1316         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1317                         adev->gmc.umc_ras_if) {
1318                 struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1319                 struct ras_ih_if ih_info = {
1320                         .head = *ras_if,
1321                 };
1322 
1323                 /* remove fs first */
1324                 amdgpu_ras_debugfs_remove(adev, ras_if);
1325                 amdgpu_ras_sysfs_remove(adev, ras_if);
1326                 /* remove the IH */
1327                 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1328                 amdgpu_ras_feature_enable(adev, ras_if, 0);
1329                 kfree(ras_if);
1330         }
1331 
1332         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1333                         adev->gmc.mmhub_ras_if) {
1334                 struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1335 
1336                 /* remove fs and disable ras feature */
1337                 amdgpu_ras_debugfs_remove(adev, ras_if);
1338                 amdgpu_ras_sysfs_remove(adev, ras_if);
1339                 amdgpu_ras_feature_enable(adev, ras_if, 0);
1340                 kfree(ras_if);
1341         }
1342 
1343         amdgpu_gem_force_release(adev);
1344         amdgpu_vm_manager_fini(adev);
1345 
1346         if (gmc_v9_0_keep_stolen_memory(adev))
1347                 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1348 
1349         amdgpu_gart_table_vram_free(adev);
1350         amdgpu_bo_fini(adev);
1351         amdgpu_gart_fini(adev);
1352 
1353         return 0;
1354 }
1355 
1356 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1357 {
1358 
1359         switch (adev->asic_type) {
1360         case CHIP_VEGA10:
1361                 if (amdgpu_sriov_vf(adev))
1362                         break;
1363                 /* fall through */
1364         case CHIP_VEGA20:
1365                 soc15_program_register_sequence(adev,
1366                                                 golden_settings_mmhub_1_0_0,
1367                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1368                 soc15_program_register_sequence(adev,
1369                                                 golden_settings_athub_1_0_0,
1370                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1371                 break;
1372         case CHIP_VEGA12:
1373                 break;
1374         case CHIP_RAVEN:
1375                 /* TODO for renoir */
1376                 soc15_program_register_sequence(adev,
1377                                                 golden_settings_athub_1_0_0,
1378                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1379                 break;
1380         default:
1381                 break;
1382         }
1383 }
1384 
1385 /**
1386  * gmc_v9_0_restore_registers - restores regs
1387  *
1388  * @adev: amdgpu_device pointer
1389  *
1390  * This restores register values, saved at suspend.
1391  */
1392 static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1393 {
1394         if (adev->asic_type == CHIP_RAVEN)
1395                 WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1396 }
1397 
1398 /**
1399  * gmc_v9_0_gart_enable - gart enable
1400  *
1401  * @adev: amdgpu_device pointer
1402  */
1403 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1404 {
1405         int r, i;
1406         bool value;
1407         u32 tmp;
1408 
1409         amdgpu_device_program_register_sequence(adev,
1410                                                 golden_settings_vega10_hdp,
1411                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
1412 
1413         if (adev->gart.bo == NULL) {
1414                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1415                 return -EINVAL;
1416         }
1417         r = amdgpu_gart_table_vram_pin(adev);
1418         if (r)
1419                 return r;
1420 
1421         switch (adev->asic_type) {
1422         case CHIP_RAVEN:
1423                 /* TODO for renoir */
1424                 mmhub_v1_0_update_power_gating(adev, true);
1425                 break;
1426         default:
1427                 break;
1428         }
1429 
1430         r = gfxhub_v1_0_gart_enable(adev);
1431         if (r)
1432                 return r;
1433 
1434         if (adev->asic_type == CHIP_ARCTURUS)
1435                 r = mmhub_v9_4_gart_enable(adev);
1436         else
1437                 r = mmhub_v1_0_gart_enable(adev);
1438         if (r)
1439                 return r;
1440 
1441         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1442 
1443         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1444         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1445 
1446         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1447         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1448 
1449         /* After HDP is initialized, flush HDP.*/
1450         adev->nbio_funcs->hdp_flush(adev, NULL);
1451 
1452         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1453                 value = false;
1454         else
1455                 value = true;
1456 
1457         gfxhub_v1_0_set_fault_enable_default(adev, value);
1458         if (adev->asic_type == CHIP_ARCTURUS)
1459                 mmhub_v9_4_set_fault_enable_default(adev, value);
1460         else
1461                 mmhub_v1_0_set_fault_enable_default(adev, value);
1462 
1463         for (i = 0; i < adev->num_vmhubs; ++i)
1464                 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1465 
1466         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1467                  (unsigned)(adev->gmc.gart_size >> 20),
1468                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1469         adev->gart.ready = true;
1470         return 0;
1471 }
1472 
1473 static int gmc_v9_0_hw_init(void *handle)
1474 {
1475         int r;
1476         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1477 
1478         /* The sequence of these two function calls matters.*/
1479         gmc_v9_0_init_golden_registers(adev);
1480 
1481         if (adev->mode_info.num_crtc) {
1482                 /* Lockout access through VGA aperture*/
1483                 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1484 
1485                 /* disable VGA render */
1486                 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1487         }
1488 
1489         r = gmc_v9_0_gart_enable(adev);
1490 
1491         return r;
1492 }
1493 
1494 /**
1495  * gmc_v9_0_save_registers - saves regs
1496  *
1497  * @adev: amdgpu_device pointer
1498  *
1499  * This saves potential register values that should be
1500  * restored upon resume
1501  */
1502 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1503 {
1504         if (adev->asic_type == CHIP_RAVEN)
1505                 adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1506 }
1507 
1508 /**
1509  * gmc_v9_0_gart_disable - gart disable
1510  *
1511  * @adev: amdgpu_device pointer
1512  *
1513  * This disables all VM page table.
1514  */
1515 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1516 {
1517         gfxhub_v1_0_gart_disable(adev);
1518         if (adev->asic_type == CHIP_ARCTURUS)
1519                 mmhub_v9_4_gart_disable(adev);
1520         else
1521                 mmhub_v1_0_gart_disable(adev);
1522         amdgpu_gart_table_vram_unpin(adev);
1523 }
1524 
1525 static int gmc_v9_0_hw_fini(void *handle)
1526 {
1527         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1528 
1529         if (amdgpu_sriov_vf(adev)) {
1530                 /* full access mode, so don't touch any GMC register */
1531                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1532                 return 0;
1533         }
1534 
1535         amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1536         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1537         gmc_v9_0_gart_disable(adev);
1538 
1539         return 0;
1540 }
1541 
1542 static int gmc_v9_0_suspend(void *handle)
1543 {
1544         int r;
1545         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 
1547         r = gmc_v9_0_hw_fini(adev);
1548         if (r)
1549                 return r;
1550 
1551         gmc_v9_0_save_registers(adev);
1552 
1553         return 0;
1554 }
1555 
1556 static int gmc_v9_0_resume(void *handle)
1557 {
1558         int r;
1559         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1560 
1561         gmc_v9_0_restore_registers(adev);
1562         r = gmc_v9_0_hw_init(adev);
1563         if (r)
1564                 return r;
1565 
1566         amdgpu_vmid_reset_all(adev);
1567 
1568         return 0;
1569 }
1570 
1571 static bool gmc_v9_0_is_idle(void *handle)
1572 {
1573         /* MC is always ready in GMC v9.*/
1574         return true;
1575 }
1576 
1577 static int gmc_v9_0_wait_for_idle(void *handle)
1578 {
1579         /* There is no need to wait for MC idle in GMC v9.*/
1580         return 0;
1581 }
1582 
1583 static int gmc_v9_0_soft_reset(void *handle)
1584 {
1585         /* XXX for emulation.*/
1586         return 0;
1587 }
1588 
1589 static int gmc_v9_0_set_clockgating_state(void *handle,
1590                                         enum amd_clockgating_state state)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593 
1594         if (adev->asic_type == CHIP_ARCTURUS)
1595                 mmhub_v9_4_set_clockgating(adev, state);
1596         else
1597                 mmhub_v1_0_set_clockgating(adev, state);
1598 
1599         athub_v1_0_set_clockgating(adev, state);
1600 
1601         return 0;
1602 }
1603 
1604 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1605 {
1606         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1607 
1608         if (adev->asic_type == CHIP_ARCTURUS)
1609                 mmhub_v9_4_get_clockgating(adev, flags);
1610         else
1611                 mmhub_v1_0_get_clockgating(adev, flags);
1612 
1613         athub_v1_0_get_clockgating(adev, flags);
1614 }
1615 
1616 static int gmc_v9_0_set_powergating_state(void *handle,
1617                                         enum amd_powergating_state state)
1618 {
1619         return 0;
1620 }
1621 
1622 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1623         .name = "gmc_v9_0",
1624         .early_init = gmc_v9_0_early_init,
1625         .late_init = gmc_v9_0_late_init,
1626         .sw_init = gmc_v9_0_sw_init,
1627         .sw_fini = gmc_v9_0_sw_fini,
1628         .hw_init = gmc_v9_0_hw_init,
1629         .hw_fini = gmc_v9_0_hw_fini,
1630         .suspend = gmc_v9_0_suspend,
1631         .resume = gmc_v9_0_resume,
1632         .is_idle = gmc_v9_0_is_idle,
1633         .wait_for_idle = gmc_v9_0_wait_for_idle,
1634         .soft_reset = gmc_v9_0_soft_reset,
1635         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1636         .set_powergating_state = gmc_v9_0_set_powergating_state,
1637         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1638 };
1639 
1640 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1641 {
1642         .type = AMD_IP_BLOCK_TYPE_GMC,
1643         .major = 9,
1644         .minor = 0,
1645         .rev = 0,
1646         .funcs = &gmc_v9_0_ip_funcs,
1647 };

/* [<][>][^][v][top][bottom][index][help] */