root/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xgpu_vi_init_golden_registers
  2. xgpu_vi_mailbox_send_ack
  3. xgpu_vi_mailbox_set_valid
  4. xgpu_vi_mailbox_trans_msg
  5. xgpu_vi_mailbox_rcv_msg
  6. xgpu_vi_poll_ack
  7. xgpu_vi_poll_msg
  8. xgpu_vi_send_access_requests
  9. xgpu_vi_request_reset
  10. xgpu_vi_wait_reset_cmpl
  11. xgpu_vi_request_full_gpu_access
  12. xgpu_vi_release_full_gpu_access
  13. xgpu_vi_mailbox_ack_irq
  14. xgpu_vi_set_mailbox_ack_irq
  15. xgpu_vi_mailbox_flr_work
  16. xgpu_vi_set_mailbox_rcv_irq
  17. xgpu_vi_mailbox_rcv_irq
  18. xgpu_vi_mailbox_set_irq_funcs
  19. xgpu_vi_mailbox_add_irq_id
  20. xgpu_vi_mailbox_get_irq
  21. xgpu_vi_mailbox_put_irq

   1 /*
   2  * Copyright 2017 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: Xiangliang.Yu@amd.com
  23  */
  24 
  25 #include "amdgpu.h"
  26 #include "vi.h"
  27 #include "bif/bif_5_0_d.h"
  28 #include "bif/bif_5_0_sh_mask.h"
  29 #include "vid.h"
  30 #include "gca/gfx_8_0_d.h"
  31 #include "gca/gfx_8_0_sh_mask.h"
  32 #include "gmc_v8_0.h"
  33 #include "gfx_v8_0.h"
  34 #include "sdma_v3_0.h"
  35 #include "tonga_ih.h"
  36 #include "gmc/gmc_8_2_d.h"
  37 #include "gmc/gmc_8_2_sh_mask.h"
  38 #include "oss/oss_3_0_d.h"
  39 #include "oss/oss_3_0_sh_mask.h"
  40 #include "dce/dce_10_0_d.h"
  41 #include "dce/dce_10_0_sh_mask.h"
  42 #include "smu/smu_7_1_3_d.h"
  43 #include "mxgpu_vi.h"
  44 
  45 /* VI golden setting */
  46 static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
  47         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  48         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  49         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  50         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  51         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
  52         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
  53         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
  54         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
  55         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  56         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  57         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  58         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  59         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  60         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  61         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  62         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  63         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  64         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  65         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  66         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  67         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  68         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  69         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  70         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
  71         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  72         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  73         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  74         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  75         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  76         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  77         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  78         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  79         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  80         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  81         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
  82         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
  83         mmPCIE_DATA, 0x000f0000, 0x00000000,
  84         mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
  85         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
  86         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
  87         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
  88         mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
  89         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
  90         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
  91         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
  92         mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  93         mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
  94 };
  95 
  96 static const u32 xgpu_fiji_golden_settings_a10[] = {
  97         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
  98         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  99         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
 100         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
 101         mmFBC_MISC, 0x1f311fff, 0x12300000,
 102         mmHDMI_CONTROL, 0x31000111, 0x00000011,
 103         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 104         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 105         mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
 106         mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
 107         mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
 108         mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
 109         mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
 110         mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
 111         mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
 112         mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
 113         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 114         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 115         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 116         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 117         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 118         mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 119         mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 120         mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 121         mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 122 };
 123 
 124 static const u32 xgpu_fiji_golden_common_all[] = {
 125         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 126         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 127         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 128         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 129         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 130         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 131         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 132         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
 133         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 134         mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 135 };
 136 
 137 static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
 138         mmRLC_CGTT_MGCG_OVERRIDE,   0xffffffff, 0xffffffff,
 139         mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
 140         mmCB_CGTT_SCLK_CTRL,        0xffffffff, 0x00000100,
 141         mmCGTT_BCI_CLK_CTRL,        0xffffffff, 0x00000100,
 142         mmCGTT_CP_CLK_CTRL,         0xffffffff, 0x00000100,
 143         mmCGTT_CPC_CLK_CTRL,        0xffffffff, 0x00000100,
 144         mmCGTT_CPF_CLK_CTRL,        0xffffffff, 0x40000100,
 145         mmCGTT_DRM_CLK_CTRL0,       0xffffffff, 0x00600100,
 146         mmCGTT_GDS_CLK_CTRL,        0xffffffff, 0x00000100,
 147         mmCGTT_IA_CLK_CTRL,         0xffffffff, 0x06000100,
 148         mmCGTT_PA_CLK_CTRL,         0xffffffff, 0x00000100,
 149         mmCGTT_WD_CLK_CTRL,         0xffffffff, 0x06000100,
 150         mmCGTT_PC_CLK_CTRL,         0xffffffff, 0x00000100,
 151         mmCGTT_RLC_CLK_CTRL,        0xffffffff, 0x00000100,
 152         mmCGTT_SC_CLK_CTRL,         0xffffffff, 0x00000100,
 153         mmCGTT_SPI_CLK_CTRL,        0xffffffff, 0x00000100,
 154         mmCGTT_SQ_CLK_CTRL,         0xffffffff, 0x00000100,
 155         mmCGTT_SQG_CLK_CTRL,        0xffffffff, 0x00000100,
 156         mmCGTT_SX_CLK_CTRL0,        0xffffffff, 0x00000100,
 157         mmCGTT_SX_CLK_CTRL1,        0xffffffff, 0x00000100,
 158         mmCGTT_SX_CLK_CTRL2,        0xffffffff, 0x00000100,
 159         mmCGTT_SX_CLK_CTRL3,        0xffffffff, 0x00000100,
 160         mmCGTT_SX_CLK_CTRL4,        0xffffffff, 0x00000100,
 161         mmCGTT_TCI_CLK_CTRL,        0xffffffff, 0x00000100,
 162         mmCGTT_TCP_CLK_CTRL,        0xffffffff, 0x00000100,
 163         mmCGTT_VGT_CLK_CTRL,        0xffffffff, 0x06000100,
 164         mmDB_CGTT_CLK_CTRL_0,       0xffffffff, 0x00000100,
 165         mmTA_CGTT_CTRL,             0xffffffff, 0x00000100,
 166         mmTCA_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
 167         mmTCC_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
 168         mmTD_CGTT_CTRL,             0xffffffff, 0x00000100,
 169         mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
 170         mmCGTS_CU0_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 171         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 172         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 173         mmCGTS_CU0_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 174         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 175         mmCGTS_CU1_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 176         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 177         mmCGTS_CU1_TA_CTRL_REG,     0xffffffff, 0x00040007,
 178         mmCGTS_CU1_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 179         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 180         mmCGTS_CU2_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 181         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 182         mmCGTS_CU2_TA_CTRL_REG,     0xffffffff, 0x00040007,
 183         mmCGTS_CU2_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 184         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 185         mmCGTS_CU3_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 186         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 187         mmCGTS_CU3_TA_CTRL_REG,     0xffffffff, 0x00040007,
 188         mmCGTS_CU3_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 189         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 190         mmCGTS_CU4_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 191         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 192         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 193         mmCGTS_CU4_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 194         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 195         mmCGTS_CU5_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 196         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 197         mmCGTS_CU5_TA_CTRL_REG,     0xffffffff, 0x00040007,
 198         mmCGTS_CU5_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 199         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 200         mmCGTS_CU6_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 201         mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 202         mmCGTS_CU6_TA_CTRL_REG,     0xffffffff, 0x00040007,
 203         mmCGTS_CU6_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 204         mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 205         mmCGTS_CU7_SP0_CTRL_REG,    0xffffffff, 0x00010000,
 206         mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 207         mmCGTS_CU7_TA_CTRL_REG,     0xffffffff, 0x00040007,
 208         mmCGTS_CU7_SP1_CTRL_REG,    0xffffffff, 0x00060005,
 209         mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 210         mmCGTS_SM_CTRL_REG,         0xffffffff, 0x96e00200,
 211         mmCP_RB_WPTR_POLL_CNTL,     0xffffffff, 0x00900100,
 212         mmRLC_CGCG_CGLS_CTRL,       0xffffffff, 0x0020003c,
 213         mmPCIE_INDEX,               0xffffffff, 0x0140001c,
 214         mmPCIE_DATA,                0x000f0000, 0x00000000,
 215         mmSMC_IND_INDEX_4,          0xffffffff, 0xC060000C,
 216         mmSMC_IND_DATA_4,           0xc0000fff, 0x00000100,
 217         mmXDMA_CLOCK_GATING_CNTL,   0xffffffff, 0x00000100,
 218         mmXDMA_MEM_POWER_CNTL,      0x00000101, 0x00000000,
 219         mmMC_MEM_POWER_LS,          0xffffffff, 0x00000104,
 220         mmCGTT_DRM_CLK_CTRL0,       0xff000fff, 0x00000100,
 221         mmHDP_XDP_CGTT_BLK_CTRL,    0xc0000fff, 0x00000104,
 222         mmCP_MEM_SLP_CNTL,          0x00000001, 0x00000001,
 223         mmSDMA0_CLK_CTRL,           0xff000ff0, 0x00000100,
 224         mmSDMA1_CLK_CTRL,           0xff000ff0, 0x00000100,
 225 };
 226 
 227 static const u32 xgpu_tonga_golden_settings_a11[] = {
 228         mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 229         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 230         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 231         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
 232         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
 233         mmFBC_MISC, 0x1f311fff, 0x12300000,
 234         mmGB_GPU_ID, 0x0000000f, 0x00000000,
 235         mmHDMI_CONTROL, 0x31000111, 0x00000011,
 236         mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
 237         mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
 238         mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
 239         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 240         mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 241         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 242         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 243         mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
 244         mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
 245         mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
 246         mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
 247         mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
 248         mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
 249         mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
 250         mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
 251         mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
 252         mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
 253         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 254         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 255         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 256         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 257         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 258         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 259         mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 260         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 261         mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 262         mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 263         mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 264         mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 265 };
 266 
 267 static const u32 xgpu_tonga_golden_common_all[] = {
 268         mmGRBM_GFX_INDEX,               0xffffffff, 0xe0000000,
 269         mmPA_SC_RASTER_CONFIG,          0xffffffff, 0x16000012,
 270         mmPA_SC_RASTER_CONFIG_1,        0xffffffff, 0x0000002A,
 271         mmGB_ADDR_CONFIG,               0xffffffff, 0x22011002,
 272         mmSPI_RESOURCE_RESERVE_CU_0,    0xffffffff, 0x00000800,
 273         mmSPI_RESOURCE_RESERVE_CU_1,    0xffffffff, 0x00000800,
 274         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 275 };
 276 
 277 void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
 278 {
 279         switch (adev->asic_type) {
 280         case CHIP_FIJI:
 281                 amdgpu_device_program_register_sequence(adev,
 282                                                         xgpu_fiji_mgcg_cgcg_init,
 283                                                         ARRAY_SIZE(
 284                                                                 xgpu_fiji_mgcg_cgcg_init));
 285                 amdgpu_device_program_register_sequence(adev,
 286                                                         xgpu_fiji_golden_settings_a10,
 287                                                         ARRAY_SIZE(
 288                                                                 xgpu_fiji_golden_settings_a10));
 289                 amdgpu_device_program_register_sequence(adev,
 290                                                         xgpu_fiji_golden_common_all,
 291                                                         ARRAY_SIZE(
 292                                                                 xgpu_fiji_golden_common_all));
 293                 break;
 294         case CHIP_TONGA:
 295                 amdgpu_device_program_register_sequence(adev,
 296                                                         xgpu_tonga_mgcg_cgcg_init,
 297                                                         ARRAY_SIZE(
 298                                                                 xgpu_tonga_mgcg_cgcg_init));
 299                 amdgpu_device_program_register_sequence(adev,
 300                                                         xgpu_tonga_golden_settings_a11,
 301                                                         ARRAY_SIZE(
 302                                                                 xgpu_tonga_golden_settings_a11));
 303                 amdgpu_device_program_register_sequence(adev,
 304                                                         xgpu_tonga_golden_common_all,
 305                                                         ARRAY_SIZE(
 306                                                                 xgpu_tonga_golden_common_all));
 307                 break;
 308         default:
 309                 BUG_ON("Doesn't support chip type.\n");
 310                 break;
 311         }
 312 }
 313 
 314 /*
 315  * Mailbox communication between GPU hypervisor and VFs
 316  */
 317 static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
 318 {
 319         u32 reg;
 320         int timeout = VI_MAILBOX_TIMEDOUT;
 321         u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
 322 
 323         reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 324         reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
 325         WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
 326 
 327         /*Wait for RCV_MSG_VALID to be 0*/
 328         reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 329         while (reg & mask) {
 330                 if (timeout <= 0) {
 331                         pr_err("RCV_MSG_VALID is not cleared\n");
 332                         break;
 333                 }
 334                 mdelay(1);
 335                 timeout -=1;
 336 
 337                 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 338         }
 339 }
 340 
 341 static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
 342 {
 343         u32 reg;
 344 
 345         reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 346         reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
 347                             TRN_MSG_VALID, val ? 1 : 0);
 348         WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
 349 }
 350 
 351 static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
 352                                       enum idh_request req)
 353 {
 354         u32 reg;
 355 
 356         reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
 357         reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
 358                             MSGBUF_DATA, req);
 359         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
 360 
 361         xgpu_vi_mailbox_set_valid(adev, true);
 362 }
 363 
 364 static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
 365                                    enum idh_event event)
 366 {
 367         u32 reg;
 368         u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
 369 
 370         /* workaround: host driver doesn't set VALID for CMPL now */
 371         if (event != IDH_FLR_NOTIFICATION_CMPL) {
 372                 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 373                 if (!(reg & mask))
 374                         return -ENOENT;
 375         }
 376 
 377         reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
 378         if (reg != event)
 379                 return -ENOENT;
 380 
 381         /* send ack to PF */
 382         xgpu_vi_mailbox_send_ack(adev);
 383 
 384         return 0;
 385 }
 386 
 387 static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
 388 {
 389         int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
 390         u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
 391         u32 reg;
 392 
 393         reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 394         while (!(reg & mask)) {
 395                 if (timeout <= 0) {
 396                         pr_err("Doesn't get ack from pf.\n");
 397                         r = -ETIME;
 398                         break;
 399                 }
 400                 mdelay(5);
 401                 timeout -= 5;
 402 
 403                 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
 404         }
 405 
 406         return r;
 407 }
 408 
 409 static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 410 {
 411         int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
 412 
 413         r = xgpu_vi_mailbox_rcv_msg(adev, event);
 414         while (r) {
 415                 if (timeout <= 0) {
 416                         pr_err("Doesn't get ack from pf.\n");
 417                         r = -ETIME;
 418                         break;
 419                 }
 420                 mdelay(5);
 421                 timeout -= 5;
 422 
 423                 r = xgpu_vi_mailbox_rcv_msg(adev, event);
 424         }
 425 
 426         return r;
 427 }
 428 
 429 static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
 430                                         enum idh_request request)
 431 {
 432         int r;
 433 
 434         xgpu_vi_mailbox_trans_msg(adev, request);
 435 
 436         /* start to poll ack */
 437         r = xgpu_vi_poll_ack(adev);
 438         if (r)
 439                 return r;
 440 
 441         xgpu_vi_mailbox_set_valid(adev, false);
 442 
 443         /* start to check msg if request is idh_req_gpu_init_access */
 444         if (request == IDH_REQ_GPU_INIT_ACCESS ||
 445                 request == IDH_REQ_GPU_FINI_ACCESS ||
 446                 request == IDH_REQ_GPU_RESET_ACCESS) {
 447                 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
 448                 if (r) {
 449                         pr_err("Doesn't get ack from pf, give up\n");
 450                         return r;
 451                 }
 452         }
 453 
 454         return 0;
 455 }
 456 
 457 static int xgpu_vi_request_reset(struct amdgpu_device *adev)
 458 {
 459         return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
 460 }
 461 
 462 static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
 463 {
 464         return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
 465 }
 466 
 467 static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
 468                                            bool init)
 469 {
 470         enum idh_request req;
 471 
 472         req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
 473         return xgpu_vi_send_access_requests(adev, req);
 474 }
 475 
 476 static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
 477                                            bool init)
 478 {
 479         enum idh_request req;
 480         int r = 0;
 481 
 482         req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
 483         r = xgpu_vi_send_access_requests(adev, req);
 484 
 485         return r;
 486 }
 487 
 488 /* add support mailbox interrupts */
 489 static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
 490                                    struct amdgpu_irq_src *source,
 491                                    struct amdgpu_iv_entry *entry)
 492 {
 493         DRM_DEBUG("get ack intr and do nothing.\n");
 494         return 0;
 495 }
 496 
 497 static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
 498                                        struct amdgpu_irq_src *src,
 499                                        unsigned type,
 500                                        enum amdgpu_interrupt_state state)
 501 {
 502         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
 503 
 504         tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
 505                             (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
 506         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 507 
 508         return 0;
 509 }
 510 
 511 static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
 512 {
 513         struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
 514         struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
 515 
 516         /* wait until RCV_MSG become 3 */
 517         if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
 518                 pr_err("failed to receive FLR_CMPL\n");
 519                 return;
 520         }
 521 
 522         /* Trigger recovery due to world switch failure */
 523         if (amdgpu_device_should_recover_gpu(adev))
 524                 amdgpu_device_gpu_recover(adev, NULL);
 525 }
 526 
 527 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
 528                                        struct amdgpu_irq_src *src,
 529                                        unsigned type,
 530                                        enum amdgpu_interrupt_state state)
 531 {
 532         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
 533 
 534         tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
 535                             (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
 536         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 537 
 538         return 0;
 539 }
 540 
 541 static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
 542                                    struct amdgpu_irq_src *source,
 543                                    struct amdgpu_iv_entry *entry)
 544 {
 545         int r;
 546 
 547         /* trigger gpu-reset by hypervisor only if TDR disbaled */
 548         if (!amdgpu_gpu_recovery) {
 549                 /* see what event we get */
 550                 r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
 551 
 552                 /* only handle FLR_NOTIFY now */
 553                 if (!r)
 554                         schedule_work(&adev->virt.flr_work);
 555         }
 556 
 557         return 0;
 558 }
 559 
 560 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
 561         .set = xgpu_vi_set_mailbox_ack_irq,
 562         .process = xgpu_vi_mailbox_ack_irq,
 563 };
 564 
 565 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
 566         .set = xgpu_vi_set_mailbox_rcv_irq,
 567         .process = xgpu_vi_mailbox_rcv_irq,
 568 };
 569 
 570 void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
 571 {
 572         adev->virt.ack_irq.num_types = 1;
 573         adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
 574         adev->virt.rcv_irq.num_types = 1;
 575         adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
 576 }
 577 
 578 int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
 579 {
 580         int r;
 581 
 582         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
 583         if (r)
 584                 return r;
 585 
 586         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
 587         if (r) {
 588                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 589                 return r;
 590         }
 591 
 592         return 0;
 593 }
 594 
 595 int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
 596 {
 597         int r;
 598 
 599         r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
 600         if (r)
 601                 return r;
 602         r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
 603         if (r) {
 604                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 605                 return r;
 606         }
 607 
 608         INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
 609 
 610         return 0;
 611 }
 612 
 613 void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
 614 {
 615         amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
 616         amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 617 }
 618 
 619 const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
 620         .req_full_gpu           = xgpu_vi_request_full_gpu_access,
 621         .rel_full_gpu           = xgpu_vi_release_full_gpu_access,
 622         .reset_gpu              = xgpu_vi_request_reset,
 623         .wait_reset             = xgpu_vi_wait_reset_cmpl,
 624         .trans_msg              = NULL, /* Does not need to trans VF errors to host. */
 625 };

/* [<][>][^][v][top][bottom][index][help] */