1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2014 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/pci.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "amdgpu.h"
28*4882a593Smuzhiyun #include "amdgpu_atombios.h"
29*4882a593Smuzhiyun #include "amdgpu_ih.h"
30*4882a593Smuzhiyun #include "amdgpu_uvd.h"
31*4882a593Smuzhiyun #include "amdgpu_vce.h"
32*4882a593Smuzhiyun #include "amdgpu_ucode.h"
33*4882a593Smuzhiyun #include "atom.h"
34*4882a593Smuzhiyun #include "amd_pcie.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "gmc/gmc_8_1_d.h"
37*4882a593Smuzhiyun #include "gmc/gmc_8_1_sh_mask.h"
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "oss/oss_3_0_d.h"
40*4882a593Smuzhiyun #include "oss/oss_3_0_sh_mask.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include "bif/bif_5_0_d.h"
43*4882a593Smuzhiyun #include "bif/bif_5_0_sh_mask.h"
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "gca/gfx_8_0_d.h"
46*4882a593Smuzhiyun #include "gca/gfx_8_0_sh_mask.h"
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include "smu/smu_7_1_1_d.h"
49*4882a593Smuzhiyun #include "smu/smu_7_1_1_sh_mask.h"
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #include "uvd/uvd_5_0_d.h"
52*4882a593Smuzhiyun #include "uvd/uvd_5_0_sh_mask.h"
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include "vce/vce_3_0_d.h"
55*4882a593Smuzhiyun #include "vce/vce_3_0_sh_mask.h"
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #include "dce/dce_10_0_d.h"
58*4882a593Smuzhiyun #include "dce/dce_10_0_sh_mask.h"
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #include "vid.h"
61*4882a593Smuzhiyun #include "vi.h"
62*4882a593Smuzhiyun #include "gmc_v8_0.h"
63*4882a593Smuzhiyun #include "gmc_v7_0.h"
64*4882a593Smuzhiyun #include "gfx_v8_0.h"
65*4882a593Smuzhiyun #include "sdma_v2_4.h"
66*4882a593Smuzhiyun #include "sdma_v3_0.h"
67*4882a593Smuzhiyun #include "dce_v10_0.h"
68*4882a593Smuzhiyun #include "dce_v11_0.h"
69*4882a593Smuzhiyun #include "iceland_ih.h"
70*4882a593Smuzhiyun #include "tonga_ih.h"
71*4882a593Smuzhiyun #include "cz_ih.h"
72*4882a593Smuzhiyun #include "uvd_v5_0.h"
73*4882a593Smuzhiyun #include "uvd_v6_0.h"
74*4882a593Smuzhiyun #include "vce_v3_0.h"
75*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_ACP)
76*4882a593Smuzhiyun #include "amdgpu_acp.h"
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun #include "dce_virtual.h"
79*4882a593Smuzhiyun #include "mxgpu_vi.h"
80*4882a593Smuzhiyun #include "amdgpu_dm.h"
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Indirect registers accessor
84*4882a593Smuzhiyun */
vi_pcie_rreg(struct amdgpu_device * adev,u32 reg)85*4882a593Smuzhiyun static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun unsigned long flags;
88*4882a593Smuzhiyun u32 r;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91*4882a593Smuzhiyun WREG32_NO_KIQ(mmPCIE_INDEX, reg);
92*4882a593Smuzhiyun (void)RREG32_NO_KIQ(mmPCIE_INDEX);
93*4882a593Smuzhiyun r = RREG32_NO_KIQ(mmPCIE_DATA);
94*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
95*4882a593Smuzhiyun return r;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
vi_pcie_wreg(struct amdgpu_device * adev,u32 reg,u32 v)98*4882a593Smuzhiyun static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun unsigned long flags;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun spin_lock_irqsave(&adev->pcie_idx_lock, flags);
103*4882a593Smuzhiyun WREG32_NO_KIQ(mmPCIE_INDEX, reg);
104*4882a593Smuzhiyun (void)RREG32_NO_KIQ(mmPCIE_INDEX);
105*4882a593Smuzhiyun WREG32_NO_KIQ(mmPCIE_DATA, v);
106*4882a593Smuzhiyun (void)RREG32_NO_KIQ(mmPCIE_DATA);
107*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
vi_smc_rreg(struct amdgpu_device * adev,u32 reg)110*4882a593Smuzhiyun static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun unsigned long flags;
113*4882a593Smuzhiyun u32 r;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun spin_lock_irqsave(&adev->smc_idx_lock, flags);
116*4882a593Smuzhiyun WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
117*4882a593Smuzhiyun r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
118*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
119*4882a593Smuzhiyun return r;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
vi_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)122*4882a593Smuzhiyun static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun unsigned long flags;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun spin_lock_irqsave(&adev->smc_idx_lock, flags);
127*4882a593Smuzhiyun WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
128*4882a593Smuzhiyun WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
129*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* smu_8_0_d.h */
133*4882a593Smuzhiyun #define mmMP0PUB_IND_INDEX 0x180
134*4882a593Smuzhiyun #define mmMP0PUB_IND_DATA 0x181
135*4882a593Smuzhiyun
cz_smc_rreg(struct amdgpu_device * adev,u32 reg)136*4882a593Smuzhiyun static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun unsigned long flags;
139*4882a593Smuzhiyun u32 r;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun spin_lock_irqsave(&adev->smc_idx_lock, flags);
142*4882a593Smuzhiyun WREG32(mmMP0PUB_IND_INDEX, (reg));
143*4882a593Smuzhiyun r = RREG32(mmMP0PUB_IND_DATA);
144*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
145*4882a593Smuzhiyun return r;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
cz_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)148*4882a593Smuzhiyun static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun unsigned long flags;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun spin_lock_irqsave(&adev->smc_idx_lock, flags);
153*4882a593Smuzhiyun WREG32(mmMP0PUB_IND_INDEX, (reg));
154*4882a593Smuzhiyun WREG32(mmMP0PUB_IND_DATA, (v));
155*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
vi_uvd_ctx_rreg(struct amdgpu_device * adev,u32 reg)158*4882a593Smuzhiyun static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun unsigned long flags;
161*4882a593Smuzhiyun u32 r;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164*4882a593Smuzhiyun WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
165*4882a593Smuzhiyun r = RREG32(mmUVD_CTX_DATA);
166*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
167*4882a593Smuzhiyun return r;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
vi_uvd_ctx_wreg(struct amdgpu_device * adev,u32 reg,u32 v)170*4882a593Smuzhiyun static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun unsigned long flags;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175*4882a593Smuzhiyun WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
176*4882a593Smuzhiyun WREG32(mmUVD_CTX_DATA, (v));
177*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
vi_didt_rreg(struct amdgpu_device * adev,u32 reg)180*4882a593Smuzhiyun static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun unsigned long flags;
183*4882a593Smuzhiyun u32 r;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun spin_lock_irqsave(&adev->didt_idx_lock, flags);
186*4882a593Smuzhiyun WREG32(mmDIDT_IND_INDEX, (reg));
187*4882a593Smuzhiyun r = RREG32(mmDIDT_IND_DATA);
188*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
189*4882a593Smuzhiyun return r;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
vi_didt_wreg(struct amdgpu_device * adev,u32 reg,u32 v)192*4882a593Smuzhiyun static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun unsigned long flags;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun spin_lock_irqsave(&adev->didt_idx_lock, flags);
197*4882a593Smuzhiyun WREG32(mmDIDT_IND_INDEX, (reg));
198*4882a593Smuzhiyun WREG32(mmDIDT_IND_DATA, (v));
199*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
vi_gc_cac_rreg(struct amdgpu_device * adev,u32 reg)202*4882a593Smuzhiyun static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun unsigned long flags;
205*4882a593Smuzhiyun u32 r;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
208*4882a593Smuzhiyun WREG32(mmGC_CAC_IND_INDEX, (reg));
209*4882a593Smuzhiyun r = RREG32(mmGC_CAC_IND_DATA);
210*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
211*4882a593Smuzhiyun return r;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
vi_gc_cac_wreg(struct amdgpu_device * adev,u32 reg,u32 v)214*4882a593Smuzhiyun static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun unsigned long flags;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
219*4882a593Smuzhiyun WREG32(mmGC_CAC_IND_INDEX, (reg));
220*4882a593Smuzhiyun WREG32(mmGC_CAC_IND_DATA, (v));
221*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun static const u32 tonga_mgcg_cgcg_init[] =
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
228*4882a593Smuzhiyun mmPCIE_INDEX, 0xffffffff, 0x0140001c,
229*4882a593Smuzhiyun mmPCIE_DATA, 0x000f0000, 0x00000000,
230*4882a593Smuzhiyun mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
231*4882a593Smuzhiyun mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
232*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233*4882a593Smuzhiyun mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static const u32 fiji_mgcg_cgcg_init[] =
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
239*4882a593Smuzhiyun mmPCIE_INDEX, 0xffffffff, 0x0140001c,
240*4882a593Smuzhiyun mmPCIE_DATA, 0x000f0000, 0x00000000,
241*4882a593Smuzhiyun mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
242*4882a593Smuzhiyun mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
243*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
244*4882a593Smuzhiyun mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun static const u32 iceland_mgcg_cgcg_init[] =
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
250*4882a593Smuzhiyun mmPCIE_DATA, 0x000f0000, 0x00000000,
251*4882a593Smuzhiyun mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
252*4882a593Smuzhiyun mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
253*4882a593Smuzhiyun mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun static const u32 cz_mgcg_cgcg_init[] =
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
259*4882a593Smuzhiyun mmPCIE_INDEX, 0xffffffff, 0x0140001c,
260*4882a593Smuzhiyun mmPCIE_DATA, 0x000f0000, 0x00000000,
261*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
262*4882a593Smuzhiyun mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun static const u32 stoney_mgcg_cgcg_init[] =
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
268*4882a593Smuzhiyun mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
269*4882a593Smuzhiyun mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun
vi_init_golden_registers(struct amdgpu_device * adev)272*4882a593Smuzhiyun static void vi_init_golden_registers(struct amdgpu_device *adev)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun /* Some of the registers might be dependent on GRBM_GFX_INDEX */
275*4882a593Smuzhiyun mutex_lock(&adev->grbm_idx_mutex);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev)) {
278*4882a593Smuzhiyun xgpu_vi_init_golden_registers(adev);
279*4882a593Smuzhiyun mutex_unlock(&adev->grbm_idx_mutex);
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun switch (adev->asic_type) {
284*4882a593Smuzhiyun case CHIP_TOPAZ:
285*4882a593Smuzhiyun amdgpu_device_program_register_sequence(adev,
286*4882a593Smuzhiyun iceland_mgcg_cgcg_init,
287*4882a593Smuzhiyun ARRAY_SIZE(iceland_mgcg_cgcg_init));
288*4882a593Smuzhiyun break;
289*4882a593Smuzhiyun case CHIP_FIJI:
290*4882a593Smuzhiyun amdgpu_device_program_register_sequence(adev,
291*4882a593Smuzhiyun fiji_mgcg_cgcg_init,
292*4882a593Smuzhiyun ARRAY_SIZE(fiji_mgcg_cgcg_init));
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun case CHIP_TONGA:
295*4882a593Smuzhiyun amdgpu_device_program_register_sequence(adev,
296*4882a593Smuzhiyun tonga_mgcg_cgcg_init,
297*4882a593Smuzhiyun ARRAY_SIZE(tonga_mgcg_cgcg_init));
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun case CHIP_CARRIZO:
300*4882a593Smuzhiyun amdgpu_device_program_register_sequence(adev,
301*4882a593Smuzhiyun cz_mgcg_cgcg_init,
302*4882a593Smuzhiyun ARRAY_SIZE(cz_mgcg_cgcg_init));
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun case CHIP_STONEY:
305*4882a593Smuzhiyun amdgpu_device_program_register_sequence(adev,
306*4882a593Smuzhiyun stoney_mgcg_cgcg_init,
307*4882a593Smuzhiyun ARRAY_SIZE(stoney_mgcg_cgcg_init));
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun case CHIP_POLARIS10:
310*4882a593Smuzhiyun case CHIP_POLARIS11:
311*4882a593Smuzhiyun case CHIP_POLARIS12:
312*4882a593Smuzhiyun case CHIP_VEGAM:
313*4882a593Smuzhiyun default:
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun mutex_unlock(&adev->grbm_idx_mutex);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun * vi_get_xclk - get the xclk
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun * @adev: amdgpu_device pointer
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun * Returns the reference clock used by the gfx engine
325*4882a593Smuzhiyun * (VI).
326*4882a593Smuzhiyun */
vi_get_xclk(struct amdgpu_device * adev)327*4882a593Smuzhiyun static u32 vi_get_xclk(struct amdgpu_device *adev)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun u32 reference_clock = adev->clock.spll.reference_freq;
330*4882a593Smuzhiyun u32 tmp;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
333*4882a593Smuzhiyun return reference_clock;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
336*4882a593Smuzhiyun if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
337*4882a593Smuzhiyun return 1000;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
340*4882a593Smuzhiyun if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
341*4882a593Smuzhiyun return reference_clock / 4;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun return reference_clock;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * vi_srbm_select - select specific register instances
348*4882a593Smuzhiyun *
349*4882a593Smuzhiyun * @adev: amdgpu_device pointer
350*4882a593Smuzhiyun * @me: selected ME (micro engine)
351*4882a593Smuzhiyun * @pipe: pipe
352*4882a593Smuzhiyun * @queue: queue
353*4882a593Smuzhiyun * @vmid: VMID
354*4882a593Smuzhiyun *
355*4882a593Smuzhiyun * Switches the currently active registers instances. Some
356*4882a593Smuzhiyun * registers are instanced per VMID, others are instanced per
357*4882a593Smuzhiyun * me/pipe/queue combination.
358*4882a593Smuzhiyun */
vi_srbm_select(struct amdgpu_device * adev,u32 me,u32 pipe,u32 queue,u32 vmid)359*4882a593Smuzhiyun void vi_srbm_select(struct amdgpu_device *adev,
360*4882a593Smuzhiyun u32 me, u32 pipe, u32 queue, u32 vmid)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun u32 srbm_gfx_cntl = 0;
363*4882a593Smuzhiyun srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
364*4882a593Smuzhiyun srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
365*4882a593Smuzhiyun srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
366*4882a593Smuzhiyun srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
367*4882a593Smuzhiyun WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
vi_vga_set_state(struct amdgpu_device * adev,bool state)370*4882a593Smuzhiyun static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun /* todo */
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
vi_read_disabled_bios(struct amdgpu_device * adev)375*4882a593Smuzhiyun static bool vi_read_disabled_bios(struct amdgpu_device *adev)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun u32 bus_cntl;
378*4882a593Smuzhiyun u32 d1vga_control = 0;
379*4882a593Smuzhiyun u32 d2vga_control = 0;
380*4882a593Smuzhiyun u32 vga_render_control = 0;
381*4882a593Smuzhiyun u32 rom_cntl;
382*4882a593Smuzhiyun bool r;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun bus_cntl = RREG32(mmBUS_CNTL);
385*4882a593Smuzhiyun if (adev->mode_info.num_crtc) {
386*4882a593Smuzhiyun d1vga_control = RREG32(mmD1VGA_CONTROL);
387*4882a593Smuzhiyun d2vga_control = RREG32(mmD2VGA_CONTROL);
388*4882a593Smuzhiyun vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun rom_cntl = RREG32_SMC(ixROM_CNTL);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* enable the rom */
393*4882a593Smuzhiyun WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
394*4882a593Smuzhiyun if (adev->mode_info.num_crtc) {
395*4882a593Smuzhiyun /* Disable VGA mode */
396*4882a593Smuzhiyun WREG32(mmD1VGA_CONTROL,
397*4882a593Smuzhiyun (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
398*4882a593Smuzhiyun D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
399*4882a593Smuzhiyun WREG32(mmD2VGA_CONTROL,
400*4882a593Smuzhiyun (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
401*4882a593Smuzhiyun D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
402*4882a593Smuzhiyun WREG32(mmVGA_RENDER_CONTROL,
403*4882a593Smuzhiyun (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun r = amdgpu_read_bios(adev);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* restore regs */
410*4882a593Smuzhiyun WREG32(mmBUS_CNTL, bus_cntl);
411*4882a593Smuzhiyun if (adev->mode_info.num_crtc) {
412*4882a593Smuzhiyun WREG32(mmD1VGA_CONTROL, d1vga_control);
413*4882a593Smuzhiyun WREG32(mmD2VGA_CONTROL, d2vga_control);
414*4882a593Smuzhiyun WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun WREG32_SMC(ixROM_CNTL, rom_cntl);
417*4882a593Smuzhiyun return r;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
vi_read_bios_from_rom(struct amdgpu_device * adev,u8 * bios,u32 length_bytes)420*4882a593Smuzhiyun static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
421*4882a593Smuzhiyun u8 *bios, u32 length_bytes)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun u32 *dw_ptr;
424*4882a593Smuzhiyun unsigned long flags;
425*4882a593Smuzhiyun u32 i, length_dw;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (bios == NULL)
428*4882a593Smuzhiyun return false;
429*4882a593Smuzhiyun if (length_bytes == 0)
430*4882a593Smuzhiyun return false;
431*4882a593Smuzhiyun /* APU vbios image is part of sbios image */
432*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
433*4882a593Smuzhiyun return false;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun dw_ptr = (u32 *)bios;
436*4882a593Smuzhiyun length_dw = ALIGN(length_bytes, 4) / 4;
437*4882a593Smuzhiyun /* take the smc lock since we are using the smc index */
438*4882a593Smuzhiyun spin_lock_irqsave(&adev->smc_idx_lock, flags);
439*4882a593Smuzhiyun /* set rom index to 0 */
440*4882a593Smuzhiyun WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
441*4882a593Smuzhiyun WREG32(mmSMC_IND_DATA_11, 0);
442*4882a593Smuzhiyun /* set index to data for continous read */
443*4882a593Smuzhiyun WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
444*4882a593Smuzhiyun for (i = 0; i < length_dw; i++)
445*4882a593Smuzhiyun dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
446*4882a593Smuzhiyun spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return true;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
452*4882a593Smuzhiyun {mmGRBM_STATUS},
453*4882a593Smuzhiyun {mmGRBM_STATUS2},
454*4882a593Smuzhiyun {mmGRBM_STATUS_SE0},
455*4882a593Smuzhiyun {mmGRBM_STATUS_SE1},
456*4882a593Smuzhiyun {mmGRBM_STATUS_SE2},
457*4882a593Smuzhiyun {mmGRBM_STATUS_SE3},
458*4882a593Smuzhiyun {mmSRBM_STATUS},
459*4882a593Smuzhiyun {mmSRBM_STATUS2},
460*4882a593Smuzhiyun {mmSRBM_STATUS3},
461*4882a593Smuzhiyun {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
462*4882a593Smuzhiyun {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
463*4882a593Smuzhiyun {mmCP_STAT},
464*4882a593Smuzhiyun {mmCP_STALLED_STAT1},
465*4882a593Smuzhiyun {mmCP_STALLED_STAT2},
466*4882a593Smuzhiyun {mmCP_STALLED_STAT3},
467*4882a593Smuzhiyun {mmCP_CPF_BUSY_STAT},
468*4882a593Smuzhiyun {mmCP_CPF_STALLED_STAT1},
469*4882a593Smuzhiyun {mmCP_CPF_STATUS},
470*4882a593Smuzhiyun {mmCP_CPC_BUSY_STAT},
471*4882a593Smuzhiyun {mmCP_CPC_STALLED_STAT1},
472*4882a593Smuzhiyun {mmCP_CPC_STATUS},
473*4882a593Smuzhiyun {mmGB_ADDR_CONFIG},
474*4882a593Smuzhiyun {mmMC_ARB_RAMCFG},
475*4882a593Smuzhiyun {mmGB_TILE_MODE0},
476*4882a593Smuzhiyun {mmGB_TILE_MODE1},
477*4882a593Smuzhiyun {mmGB_TILE_MODE2},
478*4882a593Smuzhiyun {mmGB_TILE_MODE3},
479*4882a593Smuzhiyun {mmGB_TILE_MODE4},
480*4882a593Smuzhiyun {mmGB_TILE_MODE5},
481*4882a593Smuzhiyun {mmGB_TILE_MODE6},
482*4882a593Smuzhiyun {mmGB_TILE_MODE7},
483*4882a593Smuzhiyun {mmGB_TILE_MODE8},
484*4882a593Smuzhiyun {mmGB_TILE_MODE9},
485*4882a593Smuzhiyun {mmGB_TILE_MODE10},
486*4882a593Smuzhiyun {mmGB_TILE_MODE11},
487*4882a593Smuzhiyun {mmGB_TILE_MODE12},
488*4882a593Smuzhiyun {mmGB_TILE_MODE13},
489*4882a593Smuzhiyun {mmGB_TILE_MODE14},
490*4882a593Smuzhiyun {mmGB_TILE_MODE15},
491*4882a593Smuzhiyun {mmGB_TILE_MODE16},
492*4882a593Smuzhiyun {mmGB_TILE_MODE17},
493*4882a593Smuzhiyun {mmGB_TILE_MODE18},
494*4882a593Smuzhiyun {mmGB_TILE_MODE19},
495*4882a593Smuzhiyun {mmGB_TILE_MODE20},
496*4882a593Smuzhiyun {mmGB_TILE_MODE21},
497*4882a593Smuzhiyun {mmGB_TILE_MODE22},
498*4882a593Smuzhiyun {mmGB_TILE_MODE23},
499*4882a593Smuzhiyun {mmGB_TILE_MODE24},
500*4882a593Smuzhiyun {mmGB_TILE_MODE25},
501*4882a593Smuzhiyun {mmGB_TILE_MODE26},
502*4882a593Smuzhiyun {mmGB_TILE_MODE27},
503*4882a593Smuzhiyun {mmGB_TILE_MODE28},
504*4882a593Smuzhiyun {mmGB_TILE_MODE29},
505*4882a593Smuzhiyun {mmGB_TILE_MODE30},
506*4882a593Smuzhiyun {mmGB_TILE_MODE31},
507*4882a593Smuzhiyun {mmGB_MACROTILE_MODE0},
508*4882a593Smuzhiyun {mmGB_MACROTILE_MODE1},
509*4882a593Smuzhiyun {mmGB_MACROTILE_MODE2},
510*4882a593Smuzhiyun {mmGB_MACROTILE_MODE3},
511*4882a593Smuzhiyun {mmGB_MACROTILE_MODE4},
512*4882a593Smuzhiyun {mmGB_MACROTILE_MODE5},
513*4882a593Smuzhiyun {mmGB_MACROTILE_MODE6},
514*4882a593Smuzhiyun {mmGB_MACROTILE_MODE7},
515*4882a593Smuzhiyun {mmGB_MACROTILE_MODE8},
516*4882a593Smuzhiyun {mmGB_MACROTILE_MODE9},
517*4882a593Smuzhiyun {mmGB_MACROTILE_MODE10},
518*4882a593Smuzhiyun {mmGB_MACROTILE_MODE11},
519*4882a593Smuzhiyun {mmGB_MACROTILE_MODE12},
520*4882a593Smuzhiyun {mmGB_MACROTILE_MODE13},
521*4882a593Smuzhiyun {mmGB_MACROTILE_MODE14},
522*4882a593Smuzhiyun {mmGB_MACROTILE_MODE15},
523*4882a593Smuzhiyun {mmCC_RB_BACKEND_DISABLE, true},
524*4882a593Smuzhiyun {mmGC_USER_RB_BACKEND_DISABLE, true},
525*4882a593Smuzhiyun {mmGB_BACKEND_MAP, false},
526*4882a593Smuzhiyun {mmPA_SC_RASTER_CONFIG, true},
527*4882a593Smuzhiyun {mmPA_SC_RASTER_CONFIG_1, true},
528*4882a593Smuzhiyun };
529*4882a593Smuzhiyun
vi_get_register_value(struct amdgpu_device * adev,bool indexed,u32 se_num,u32 sh_num,u32 reg_offset)530*4882a593Smuzhiyun static uint32_t vi_get_register_value(struct amdgpu_device *adev,
531*4882a593Smuzhiyun bool indexed, u32 se_num,
532*4882a593Smuzhiyun u32 sh_num, u32 reg_offset)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun if (indexed) {
535*4882a593Smuzhiyun uint32_t val;
536*4882a593Smuzhiyun unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
537*4882a593Smuzhiyun unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun switch (reg_offset) {
540*4882a593Smuzhiyun case mmCC_RB_BACKEND_DISABLE:
541*4882a593Smuzhiyun return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
542*4882a593Smuzhiyun case mmGC_USER_RB_BACKEND_DISABLE:
543*4882a593Smuzhiyun return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
544*4882a593Smuzhiyun case mmPA_SC_RASTER_CONFIG:
545*4882a593Smuzhiyun return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
546*4882a593Smuzhiyun case mmPA_SC_RASTER_CONFIG_1:
547*4882a593Smuzhiyun return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun mutex_lock(&adev->grbm_idx_mutex);
551*4882a593Smuzhiyun if (se_num != 0xffffffff || sh_num != 0xffffffff)
552*4882a593Smuzhiyun amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun val = RREG32(reg_offset);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (se_num != 0xffffffff || sh_num != 0xffffffff)
557*4882a593Smuzhiyun amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
558*4882a593Smuzhiyun mutex_unlock(&adev->grbm_idx_mutex);
559*4882a593Smuzhiyun return val;
560*4882a593Smuzhiyun } else {
561*4882a593Smuzhiyun unsigned idx;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun switch (reg_offset) {
564*4882a593Smuzhiyun case mmGB_ADDR_CONFIG:
565*4882a593Smuzhiyun return adev->gfx.config.gb_addr_config;
566*4882a593Smuzhiyun case mmMC_ARB_RAMCFG:
567*4882a593Smuzhiyun return adev->gfx.config.mc_arb_ramcfg;
568*4882a593Smuzhiyun case mmGB_TILE_MODE0:
569*4882a593Smuzhiyun case mmGB_TILE_MODE1:
570*4882a593Smuzhiyun case mmGB_TILE_MODE2:
571*4882a593Smuzhiyun case mmGB_TILE_MODE3:
572*4882a593Smuzhiyun case mmGB_TILE_MODE4:
573*4882a593Smuzhiyun case mmGB_TILE_MODE5:
574*4882a593Smuzhiyun case mmGB_TILE_MODE6:
575*4882a593Smuzhiyun case mmGB_TILE_MODE7:
576*4882a593Smuzhiyun case mmGB_TILE_MODE8:
577*4882a593Smuzhiyun case mmGB_TILE_MODE9:
578*4882a593Smuzhiyun case mmGB_TILE_MODE10:
579*4882a593Smuzhiyun case mmGB_TILE_MODE11:
580*4882a593Smuzhiyun case mmGB_TILE_MODE12:
581*4882a593Smuzhiyun case mmGB_TILE_MODE13:
582*4882a593Smuzhiyun case mmGB_TILE_MODE14:
583*4882a593Smuzhiyun case mmGB_TILE_MODE15:
584*4882a593Smuzhiyun case mmGB_TILE_MODE16:
585*4882a593Smuzhiyun case mmGB_TILE_MODE17:
586*4882a593Smuzhiyun case mmGB_TILE_MODE18:
587*4882a593Smuzhiyun case mmGB_TILE_MODE19:
588*4882a593Smuzhiyun case mmGB_TILE_MODE20:
589*4882a593Smuzhiyun case mmGB_TILE_MODE21:
590*4882a593Smuzhiyun case mmGB_TILE_MODE22:
591*4882a593Smuzhiyun case mmGB_TILE_MODE23:
592*4882a593Smuzhiyun case mmGB_TILE_MODE24:
593*4882a593Smuzhiyun case mmGB_TILE_MODE25:
594*4882a593Smuzhiyun case mmGB_TILE_MODE26:
595*4882a593Smuzhiyun case mmGB_TILE_MODE27:
596*4882a593Smuzhiyun case mmGB_TILE_MODE28:
597*4882a593Smuzhiyun case mmGB_TILE_MODE29:
598*4882a593Smuzhiyun case mmGB_TILE_MODE30:
599*4882a593Smuzhiyun case mmGB_TILE_MODE31:
600*4882a593Smuzhiyun idx = (reg_offset - mmGB_TILE_MODE0);
601*4882a593Smuzhiyun return adev->gfx.config.tile_mode_array[idx];
602*4882a593Smuzhiyun case mmGB_MACROTILE_MODE0:
603*4882a593Smuzhiyun case mmGB_MACROTILE_MODE1:
604*4882a593Smuzhiyun case mmGB_MACROTILE_MODE2:
605*4882a593Smuzhiyun case mmGB_MACROTILE_MODE3:
606*4882a593Smuzhiyun case mmGB_MACROTILE_MODE4:
607*4882a593Smuzhiyun case mmGB_MACROTILE_MODE5:
608*4882a593Smuzhiyun case mmGB_MACROTILE_MODE6:
609*4882a593Smuzhiyun case mmGB_MACROTILE_MODE7:
610*4882a593Smuzhiyun case mmGB_MACROTILE_MODE8:
611*4882a593Smuzhiyun case mmGB_MACROTILE_MODE9:
612*4882a593Smuzhiyun case mmGB_MACROTILE_MODE10:
613*4882a593Smuzhiyun case mmGB_MACROTILE_MODE11:
614*4882a593Smuzhiyun case mmGB_MACROTILE_MODE12:
615*4882a593Smuzhiyun case mmGB_MACROTILE_MODE13:
616*4882a593Smuzhiyun case mmGB_MACROTILE_MODE14:
617*4882a593Smuzhiyun case mmGB_MACROTILE_MODE15:
618*4882a593Smuzhiyun idx = (reg_offset - mmGB_MACROTILE_MODE0);
619*4882a593Smuzhiyun return adev->gfx.config.macrotile_mode_array[idx];
620*4882a593Smuzhiyun default:
621*4882a593Smuzhiyun return RREG32(reg_offset);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
vi_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)626*4882a593Smuzhiyun static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
627*4882a593Smuzhiyun u32 sh_num, u32 reg_offset, u32 *value)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun uint32_t i;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun *value = 0;
632*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
633*4882a593Smuzhiyun bool indexed = vi_allowed_read_registers[i].grbm_indexed;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (reg_offset != vi_allowed_read_registers[i].reg_offset)
636*4882a593Smuzhiyun continue;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun *value = vi_get_register_value(adev, indexed, se_num, sh_num,
639*4882a593Smuzhiyun reg_offset);
640*4882a593Smuzhiyun return 0;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun return -EINVAL;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
vi_gpu_pci_config_reset(struct amdgpu_device * adev)645*4882a593Smuzhiyun static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun u32 i;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun dev_info(adev->dev, "GPU pci config reset\n");
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* disable BM */
652*4882a593Smuzhiyun pci_clear_master(adev->pdev);
653*4882a593Smuzhiyun /* reset */
654*4882a593Smuzhiyun amdgpu_device_pci_config_reset(adev);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun udelay(100);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* wait for asic to come out of reset */
659*4882a593Smuzhiyun for (i = 0; i < adev->usec_timeout; i++) {
660*4882a593Smuzhiyun if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
661*4882a593Smuzhiyun /* enable BM */
662*4882a593Smuzhiyun pci_set_master(adev->pdev);
663*4882a593Smuzhiyun adev->has_hw_reset = true;
664*4882a593Smuzhiyun return 0;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun udelay(1);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun return -EINVAL;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /**
672*4882a593Smuzhiyun * vi_asic_pci_config_reset - soft reset GPU
673*4882a593Smuzhiyun *
674*4882a593Smuzhiyun * @adev: amdgpu_device pointer
675*4882a593Smuzhiyun *
676*4882a593Smuzhiyun * Use PCI Config method to reset the GPU.
677*4882a593Smuzhiyun *
678*4882a593Smuzhiyun * Returns 0 for success.
679*4882a593Smuzhiyun */
vi_asic_pci_config_reset(struct amdgpu_device * adev)680*4882a593Smuzhiyun static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun int r;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun amdgpu_atombios_scratch_regs_engine_hung(adev, true);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun r = vi_gpu_pci_config_reset(adev);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun amdgpu_atombios_scratch_regs_engine_hung(adev, false);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return r;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
vi_asic_supports_baco(struct amdgpu_device * adev)693*4882a593Smuzhiyun static bool vi_asic_supports_baco(struct amdgpu_device *adev)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun switch (adev->asic_type) {
696*4882a593Smuzhiyun case CHIP_FIJI:
697*4882a593Smuzhiyun case CHIP_TONGA:
698*4882a593Smuzhiyun case CHIP_POLARIS10:
699*4882a593Smuzhiyun case CHIP_POLARIS11:
700*4882a593Smuzhiyun case CHIP_POLARIS12:
701*4882a593Smuzhiyun case CHIP_TOPAZ:
702*4882a593Smuzhiyun return amdgpu_dpm_is_baco_supported(adev);
703*4882a593Smuzhiyun default:
704*4882a593Smuzhiyun return false;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device * adev)709*4882a593Smuzhiyun vi_asic_reset_method(struct amdgpu_device *adev)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun bool baco_reset;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
714*4882a593Smuzhiyun amdgpu_reset_method == AMD_RESET_METHOD_BACO)
715*4882a593Smuzhiyun return amdgpu_reset_method;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (amdgpu_reset_method != -1)
718*4882a593Smuzhiyun dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
719*4882a593Smuzhiyun amdgpu_reset_method);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun switch (adev->asic_type) {
722*4882a593Smuzhiyun case CHIP_FIJI:
723*4882a593Smuzhiyun case CHIP_TONGA:
724*4882a593Smuzhiyun case CHIP_POLARIS10:
725*4882a593Smuzhiyun case CHIP_POLARIS11:
726*4882a593Smuzhiyun case CHIP_POLARIS12:
727*4882a593Smuzhiyun case CHIP_TOPAZ:
728*4882a593Smuzhiyun baco_reset = amdgpu_dpm_is_baco_supported(adev);
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun default:
731*4882a593Smuzhiyun baco_reset = false;
732*4882a593Smuzhiyun break;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (baco_reset)
736*4882a593Smuzhiyun return AMD_RESET_METHOD_BACO;
737*4882a593Smuzhiyun else
738*4882a593Smuzhiyun return AMD_RESET_METHOD_LEGACY;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /**
742*4882a593Smuzhiyun * vi_asic_reset - soft reset GPU
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * @adev: amdgpu_device pointer
745*4882a593Smuzhiyun *
746*4882a593Smuzhiyun * Look up which blocks are hung and attempt
747*4882a593Smuzhiyun * to reset them.
748*4882a593Smuzhiyun * Returns 0 for success.
749*4882a593Smuzhiyun */
vi_asic_reset(struct amdgpu_device * adev)750*4882a593Smuzhiyun static int vi_asic_reset(struct amdgpu_device *adev)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun int r;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
755*4882a593Smuzhiyun dev_info(adev->dev, "BACO reset\n");
756*4882a593Smuzhiyun r = amdgpu_dpm_baco_reset(adev);
757*4882a593Smuzhiyun } else {
758*4882a593Smuzhiyun dev_info(adev->dev, "PCI CONFIG reset\n");
759*4882a593Smuzhiyun r = vi_asic_pci_config_reset(adev);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun return r;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
vi_get_config_memsize(struct amdgpu_device * adev)765*4882a593Smuzhiyun static u32 vi_get_config_memsize(struct amdgpu_device *adev)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun return RREG32(mmCONFIG_MEMSIZE);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
vi_set_uvd_clock(struct amdgpu_device * adev,u32 clock,u32 cntl_reg,u32 status_reg)770*4882a593Smuzhiyun static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
771*4882a593Smuzhiyun u32 cntl_reg, u32 status_reg)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun int r, i;
774*4882a593Smuzhiyun struct atom_clock_dividers dividers;
775*4882a593Smuzhiyun uint32_t tmp;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun r = amdgpu_atombios_get_clock_dividers(adev,
778*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
779*4882a593Smuzhiyun clock, false, ÷rs);
780*4882a593Smuzhiyun if (r)
781*4882a593Smuzhiyun return r;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun tmp = RREG32_SMC(cntl_reg);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
786*4882a593Smuzhiyun tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
787*4882a593Smuzhiyun else
788*4882a593Smuzhiyun tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
789*4882a593Smuzhiyun CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
790*4882a593Smuzhiyun tmp |= dividers.post_divider;
791*4882a593Smuzhiyun WREG32_SMC(cntl_reg, tmp);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
794*4882a593Smuzhiyun tmp = RREG32_SMC(status_reg);
795*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU) {
796*4882a593Smuzhiyun if (tmp & 0x10000)
797*4882a593Smuzhiyun break;
798*4882a593Smuzhiyun } else {
799*4882a593Smuzhiyun if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
800*4882a593Smuzhiyun break;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun mdelay(10);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun if (i == 100)
805*4882a593Smuzhiyun return -ETIMEDOUT;
806*4882a593Smuzhiyun return 0;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
810*4882a593Smuzhiyun #define ixGNB_CLK1_STATUS 0xD822010C
811*4882a593Smuzhiyun #define ixGNB_CLK2_DFS_CNTL 0xD8220110
812*4882a593Smuzhiyun #define ixGNB_CLK2_STATUS 0xD822012C
813*4882a593Smuzhiyun #define ixGNB_CLK3_DFS_CNTL 0xD8220130
814*4882a593Smuzhiyun #define ixGNB_CLK3_STATUS 0xD822014C
815*4882a593Smuzhiyun
vi_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)816*4882a593Smuzhiyun static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun int r;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU) {
821*4882a593Smuzhiyun r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
822*4882a593Smuzhiyun if (r)
823*4882a593Smuzhiyun return r;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
826*4882a593Smuzhiyun if (r)
827*4882a593Smuzhiyun return r;
828*4882a593Smuzhiyun } else {
829*4882a593Smuzhiyun r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
830*4882a593Smuzhiyun if (r)
831*4882a593Smuzhiyun return r;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
834*4882a593Smuzhiyun if (r)
835*4882a593Smuzhiyun return r;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun return 0;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
vi_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)841*4882a593Smuzhiyun static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun int r, i;
844*4882a593Smuzhiyun struct atom_clock_dividers dividers;
845*4882a593Smuzhiyun u32 tmp;
846*4882a593Smuzhiyun u32 reg_ctrl;
847*4882a593Smuzhiyun u32 reg_status;
848*4882a593Smuzhiyun u32 status_mask;
849*4882a593Smuzhiyun u32 reg_mask;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU) {
852*4882a593Smuzhiyun reg_ctrl = ixGNB_CLK3_DFS_CNTL;
853*4882a593Smuzhiyun reg_status = ixGNB_CLK3_STATUS;
854*4882a593Smuzhiyun status_mask = 0x00010000;
855*4882a593Smuzhiyun reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
856*4882a593Smuzhiyun } else {
857*4882a593Smuzhiyun reg_ctrl = ixCG_ECLK_CNTL;
858*4882a593Smuzhiyun reg_status = ixCG_ECLK_STATUS;
859*4882a593Smuzhiyun status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
860*4882a593Smuzhiyun reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun r = amdgpu_atombios_get_clock_dividers(adev,
864*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
865*4882a593Smuzhiyun ecclk, false, ÷rs);
866*4882a593Smuzhiyun if (r)
867*4882a593Smuzhiyun return r;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
870*4882a593Smuzhiyun if (RREG32_SMC(reg_status) & status_mask)
871*4882a593Smuzhiyun break;
872*4882a593Smuzhiyun mdelay(10);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (i == 100)
876*4882a593Smuzhiyun return -ETIMEDOUT;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun tmp = RREG32_SMC(reg_ctrl);
879*4882a593Smuzhiyun tmp &= ~reg_mask;
880*4882a593Smuzhiyun tmp |= dividers.post_divider;
881*4882a593Smuzhiyun WREG32_SMC(reg_ctrl, tmp);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
884*4882a593Smuzhiyun if (RREG32_SMC(reg_status) & status_mask)
885*4882a593Smuzhiyun break;
886*4882a593Smuzhiyun mdelay(10);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (i == 100)
890*4882a593Smuzhiyun return -ETIMEDOUT;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun return 0;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
vi_pcie_gen3_enable(struct amdgpu_device * adev)895*4882a593Smuzhiyun static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun if (pci_is_root_bus(adev->pdev->bus))
898*4882a593Smuzhiyun return;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (amdgpu_pcie_gen2 == 0)
901*4882a593Smuzhiyun return;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
904*4882a593Smuzhiyun return;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
907*4882a593Smuzhiyun CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
908*4882a593Smuzhiyun return;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* todo */
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
vi_program_aspm(struct amdgpu_device * adev)913*4882a593Smuzhiyun static void vi_program_aspm(struct amdgpu_device *adev)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun if (amdgpu_aspm == 0)
917*4882a593Smuzhiyun return;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* todo */
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
vi_enable_doorbell_aperture(struct amdgpu_device * adev,bool enable)922*4882a593Smuzhiyun static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
923*4882a593Smuzhiyun bool enable)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun u32 tmp;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /* not necessary on CZ */
928*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
929*4882a593Smuzhiyun return;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun tmp = RREG32(mmBIF_DOORBELL_APER_EN);
932*4882a593Smuzhiyun if (enable)
933*4882a593Smuzhiyun tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
934*4882a593Smuzhiyun else
935*4882a593Smuzhiyun tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun WREG32(mmBIF_DOORBELL_APER_EN, tmp);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
941*4882a593Smuzhiyun #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
942*4882a593Smuzhiyun #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
943*4882a593Smuzhiyun
vi_get_rev_id(struct amdgpu_device * adev)944*4882a593Smuzhiyun static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
947*4882a593Smuzhiyun return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
948*4882a593Smuzhiyun >> ATI_REV_ID_FUSE_MACRO__SHIFT;
949*4882a593Smuzhiyun else
950*4882a593Smuzhiyun return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
951*4882a593Smuzhiyun >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
vi_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)954*4882a593Smuzhiyun static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun if (!ring || !ring->funcs->emit_wreg) {
957*4882a593Smuzhiyun WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
958*4882a593Smuzhiyun RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
959*4882a593Smuzhiyun } else {
960*4882a593Smuzhiyun amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
vi_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)964*4882a593Smuzhiyun static void vi_invalidate_hdp(struct amdgpu_device *adev,
965*4882a593Smuzhiyun struct amdgpu_ring *ring)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun if (!ring || !ring->funcs->emit_wreg) {
968*4882a593Smuzhiyun WREG32(mmHDP_DEBUG0, 1);
969*4882a593Smuzhiyun RREG32(mmHDP_DEBUG0);
970*4882a593Smuzhiyun } else {
971*4882a593Smuzhiyun amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
vi_need_full_reset(struct amdgpu_device * adev)975*4882a593Smuzhiyun static bool vi_need_full_reset(struct amdgpu_device *adev)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun switch (adev->asic_type) {
978*4882a593Smuzhiyun case CHIP_CARRIZO:
979*4882a593Smuzhiyun case CHIP_STONEY:
980*4882a593Smuzhiyun /* CZ has hang issues with full reset at the moment */
981*4882a593Smuzhiyun return false;
982*4882a593Smuzhiyun case CHIP_FIJI:
983*4882a593Smuzhiyun case CHIP_TONGA:
984*4882a593Smuzhiyun /* XXX: soft reset should work on fiji and tonga */
985*4882a593Smuzhiyun return true;
986*4882a593Smuzhiyun case CHIP_POLARIS10:
987*4882a593Smuzhiyun case CHIP_POLARIS11:
988*4882a593Smuzhiyun case CHIP_POLARIS12:
989*4882a593Smuzhiyun case CHIP_TOPAZ:
990*4882a593Smuzhiyun default:
991*4882a593Smuzhiyun /* change this when we support soft reset */
992*4882a593Smuzhiyun return true;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
vi_get_pcie_usage(struct amdgpu_device * adev,uint64_t * count0,uint64_t * count1)996*4882a593Smuzhiyun static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
997*4882a593Smuzhiyun uint64_t *count1)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun uint32_t perfctr = 0;
1000*4882a593Smuzhiyun uint64_t cnt0_of, cnt1_of;
1001*4882a593Smuzhiyun int tmp;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /* This reports 0 on APUs, so return to avoid writing/reading registers
1004*4882a593Smuzhiyun * that may or may not be different from their GPU counterparts
1005*4882a593Smuzhiyun */
1006*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
1007*4882a593Smuzhiyun return;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* Set the 2 events that we wish to watch, defined above */
1010*4882a593Smuzhiyun /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1011*4882a593Smuzhiyun perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1012*4882a593Smuzhiyun perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /* Write to enable desired perf counters */
1015*4882a593Smuzhiyun WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1016*4882a593Smuzhiyun /* Zero out and enable the perf counters
1017*4882a593Smuzhiyun * Write 0x5:
1018*4882a593Smuzhiyun * Bit 0 = Start all counters(1)
1019*4882a593Smuzhiyun * Bit 2 = Global counter reset enable(1)
1020*4882a593Smuzhiyun */
1021*4882a593Smuzhiyun WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun msleep(1000);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /* Load the shadow and disable the perf counters
1026*4882a593Smuzhiyun * Write 0x2:
1027*4882a593Smuzhiyun * Bit 0 = Stop counters(0)
1028*4882a593Smuzhiyun * Bit 1 = Load the shadow counters(1)
1029*4882a593Smuzhiyun */
1030*4882a593Smuzhiyun WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /* Read register values to get any >32bit overflow */
1033*4882a593Smuzhiyun tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1034*4882a593Smuzhiyun cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1035*4882a593Smuzhiyun cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /* Get the values and add the overflow */
1038*4882a593Smuzhiyun *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1039*4882a593Smuzhiyun *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
vi_get_pcie_replay_count(struct amdgpu_device * adev)1042*4882a593Smuzhiyun static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun uint64_t nak_r, nak_g;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* Get the number of NAKs received and generated */
1047*4882a593Smuzhiyun nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1048*4882a593Smuzhiyun nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* Add the total number of NAKs, i.e the number of replays */
1051*4882a593Smuzhiyun return (nak_r + nak_g);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
vi_need_reset_on_init(struct amdgpu_device * adev)1054*4882a593Smuzhiyun static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun u32 clock_cntl, pc;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU)
1059*4882a593Smuzhiyun return false;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun /* check if the SMC is already running */
1062*4882a593Smuzhiyun clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1063*4882a593Smuzhiyun pc = RREG32_SMC(ixSMC_PC_C);
1064*4882a593Smuzhiyun if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1065*4882a593Smuzhiyun (0x20100 <= pc))
1066*4882a593Smuzhiyun return true;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun return false;
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
vi_pre_asic_init(struct amdgpu_device * adev)1071*4882a593Smuzhiyun static void vi_pre_asic_init(struct amdgpu_device *adev)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun static const struct amdgpu_asic_funcs vi_asic_funcs =
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun .read_disabled_bios = &vi_read_disabled_bios,
1078*4882a593Smuzhiyun .read_bios_from_rom = &vi_read_bios_from_rom,
1079*4882a593Smuzhiyun .read_register = &vi_read_register,
1080*4882a593Smuzhiyun .reset = &vi_asic_reset,
1081*4882a593Smuzhiyun .reset_method = &vi_asic_reset_method,
1082*4882a593Smuzhiyun .set_vga_state = &vi_vga_set_state,
1083*4882a593Smuzhiyun .get_xclk = &vi_get_xclk,
1084*4882a593Smuzhiyun .set_uvd_clocks = &vi_set_uvd_clocks,
1085*4882a593Smuzhiyun .set_vce_clocks = &vi_set_vce_clocks,
1086*4882a593Smuzhiyun .get_config_memsize = &vi_get_config_memsize,
1087*4882a593Smuzhiyun .flush_hdp = &vi_flush_hdp,
1088*4882a593Smuzhiyun .invalidate_hdp = &vi_invalidate_hdp,
1089*4882a593Smuzhiyun .need_full_reset = &vi_need_full_reset,
1090*4882a593Smuzhiyun .init_doorbell_index = &legacy_doorbell_index_init,
1091*4882a593Smuzhiyun .get_pcie_usage = &vi_get_pcie_usage,
1092*4882a593Smuzhiyun .need_reset_on_init = &vi_need_reset_on_init,
1093*4882a593Smuzhiyun .get_pcie_replay_count = &vi_get_pcie_replay_count,
1094*4882a593Smuzhiyun .supports_baco = &vi_asic_supports_baco,
1095*4882a593Smuzhiyun .pre_asic_init = &vi_pre_asic_init,
1096*4882a593Smuzhiyun };
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun #define CZ_REV_BRISTOL(rev) \
1099*4882a593Smuzhiyun ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1100*4882a593Smuzhiyun
vi_common_early_init(void * handle)1101*4882a593Smuzhiyun static int vi_common_early_init(void *handle)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if (adev->flags & AMD_IS_APU) {
1106*4882a593Smuzhiyun adev->smc_rreg = &cz_smc_rreg;
1107*4882a593Smuzhiyun adev->smc_wreg = &cz_smc_wreg;
1108*4882a593Smuzhiyun } else {
1109*4882a593Smuzhiyun adev->smc_rreg = &vi_smc_rreg;
1110*4882a593Smuzhiyun adev->smc_wreg = &vi_smc_wreg;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun adev->pcie_rreg = &vi_pcie_rreg;
1113*4882a593Smuzhiyun adev->pcie_wreg = &vi_pcie_wreg;
1114*4882a593Smuzhiyun adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1115*4882a593Smuzhiyun adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1116*4882a593Smuzhiyun adev->didt_rreg = &vi_didt_rreg;
1117*4882a593Smuzhiyun adev->didt_wreg = &vi_didt_wreg;
1118*4882a593Smuzhiyun adev->gc_cac_rreg = &vi_gc_cac_rreg;
1119*4882a593Smuzhiyun adev->gc_cac_wreg = &vi_gc_cac_wreg;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun adev->asic_funcs = &vi_asic_funcs;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun adev->rev_id = vi_get_rev_id(adev);
1124*4882a593Smuzhiyun adev->external_rev_id = 0xFF;
1125*4882a593Smuzhiyun switch (adev->asic_type) {
1126*4882a593Smuzhiyun case CHIP_TOPAZ:
1127*4882a593Smuzhiyun adev->cg_flags = 0;
1128*4882a593Smuzhiyun adev->pg_flags = 0;
1129*4882a593Smuzhiyun adev->external_rev_id = 0x1;
1130*4882a593Smuzhiyun break;
1131*4882a593Smuzhiyun case CHIP_FIJI:
1132*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1133*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_MGLS |
1134*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1135*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1136*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGTS |
1137*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGTS_LS |
1138*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1139*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1140*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1141*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1142*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1143*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1144*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1145*4882a593Smuzhiyun AMD_CG_SUPPORT_ROM_MGCG |
1146*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_MGCG |
1147*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_LS |
1148*4882a593Smuzhiyun AMD_CG_SUPPORT_UVD_MGCG;
1149*4882a593Smuzhiyun adev->pg_flags = 0;
1150*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x3c;
1151*4882a593Smuzhiyun break;
1152*4882a593Smuzhiyun case CHIP_TONGA:
1153*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1154*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1155*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1156*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1157*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1158*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1159*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1160*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1161*4882a593Smuzhiyun AMD_CG_SUPPORT_ROM_MGCG |
1162*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_MGCG |
1163*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_LS |
1164*4882a593Smuzhiyun AMD_CG_SUPPORT_DRM_LS |
1165*4882a593Smuzhiyun AMD_CG_SUPPORT_UVD_MGCG;
1166*4882a593Smuzhiyun adev->pg_flags = 0;
1167*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x14;
1168*4882a593Smuzhiyun break;
1169*4882a593Smuzhiyun case CHIP_POLARIS11:
1170*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1171*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1172*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1173*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1174*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1175*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGCG |
1176*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGLS |
1177*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1178*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1179*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_MGCG |
1180*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1181*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1182*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1183*4882a593Smuzhiyun AMD_CG_SUPPORT_ROM_MGCG |
1184*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_MGCG |
1185*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_LS |
1186*4882a593Smuzhiyun AMD_CG_SUPPORT_DRM_LS |
1187*4882a593Smuzhiyun AMD_CG_SUPPORT_UVD_MGCG |
1188*4882a593Smuzhiyun AMD_CG_SUPPORT_VCE_MGCG;
1189*4882a593Smuzhiyun adev->pg_flags = 0;
1190*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x5A;
1191*4882a593Smuzhiyun break;
1192*4882a593Smuzhiyun case CHIP_POLARIS10:
1193*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1194*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1195*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1196*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1197*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1198*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGCG |
1199*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGLS |
1200*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1201*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1202*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_MGCG |
1203*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1204*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1205*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1206*4882a593Smuzhiyun AMD_CG_SUPPORT_ROM_MGCG |
1207*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_MGCG |
1208*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_LS |
1209*4882a593Smuzhiyun AMD_CG_SUPPORT_DRM_LS |
1210*4882a593Smuzhiyun AMD_CG_SUPPORT_UVD_MGCG |
1211*4882a593Smuzhiyun AMD_CG_SUPPORT_VCE_MGCG;
1212*4882a593Smuzhiyun adev->pg_flags = 0;
1213*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x50;
1214*4882a593Smuzhiyun break;
1215*4882a593Smuzhiyun case CHIP_POLARIS12:
1216*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1217*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1218*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1219*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1220*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1221*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGCG |
1222*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGLS |
1223*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1224*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1225*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_MGCG |
1226*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1227*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1228*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1229*4882a593Smuzhiyun AMD_CG_SUPPORT_ROM_MGCG |
1230*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_MGCG |
1231*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_LS |
1232*4882a593Smuzhiyun AMD_CG_SUPPORT_DRM_LS |
1233*4882a593Smuzhiyun AMD_CG_SUPPORT_UVD_MGCG |
1234*4882a593Smuzhiyun AMD_CG_SUPPORT_VCE_MGCG;
1235*4882a593Smuzhiyun adev->pg_flags = 0;
1236*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x64;
1237*4882a593Smuzhiyun break;
1238*4882a593Smuzhiyun case CHIP_VEGAM:
1239*4882a593Smuzhiyun adev->cg_flags = 0;
1240*4882a593Smuzhiyun /*AMD_CG_SUPPORT_GFX_MGCG |
1241*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1242*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1243*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1244*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1245*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGCG |
1246*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_3D_CGLS |
1247*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1248*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1249*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_MGCG |
1250*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1251*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1252*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1253*4882a593Smuzhiyun AMD_CG_SUPPORT_ROM_MGCG |
1254*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_MGCG |
1255*4882a593Smuzhiyun AMD_CG_SUPPORT_MC_LS |
1256*4882a593Smuzhiyun AMD_CG_SUPPORT_DRM_LS |
1257*4882a593Smuzhiyun AMD_CG_SUPPORT_UVD_MGCG |
1258*4882a593Smuzhiyun AMD_CG_SUPPORT_VCE_MGCG;*/
1259*4882a593Smuzhiyun adev->pg_flags = 0;
1260*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x6E;
1261*4882a593Smuzhiyun break;
1262*4882a593Smuzhiyun case CHIP_CARRIZO:
1263*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1264*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_MGCG |
1265*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_MGLS |
1266*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1267*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1268*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGTS |
1269*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGTS_LS |
1270*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGCG |
1271*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1272*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1273*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1274*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1275*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1276*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1277*4882a593Smuzhiyun AMD_CG_SUPPORT_VCE_MGCG;
1278*4882a593Smuzhiyun /* rev0 hardware requires workarounds to support PG */
1279*4882a593Smuzhiyun adev->pg_flags = 0;
1280*4882a593Smuzhiyun if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1281*4882a593Smuzhiyun adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1282*4882a593Smuzhiyun AMD_PG_SUPPORT_GFX_PIPELINE |
1283*4882a593Smuzhiyun AMD_PG_SUPPORT_CP |
1284*4882a593Smuzhiyun AMD_PG_SUPPORT_UVD |
1285*4882a593Smuzhiyun AMD_PG_SUPPORT_VCE;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x1;
1288*4882a593Smuzhiyun break;
1289*4882a593Smuzhiyun case CHIP_STONEY:
1290*4882a593Smuzhiyun adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1291*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_MGCG |
1292*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_MGLS |
1293*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_RLC_LS |
1294*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CP_LS |
1295*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGTS |
1296*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGTS_LS |
1297*4882a593Smuzhiyun AMD_CG_SUPPORT_GFX_CGLS |
1298*4882a593Smuzhiyun AMD_CG_SUPPORT_BIF_LS |
1299*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_MGCG |
1300*4882a593Smuzhiyun AMD_CG_SUPPORT_HDP_LS |
1301*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_MGCG |
1302*4882a593Smuzhiyun AMD_CG_SUPPORT_SDMA_LS |
1303*4882a593Smuzhiyun AMD_CG_SUPPORT_VCE_MGCG;
1304*4882a593Smuzhiyun adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1305*4882a593Smuzhiyun AMD_PG_SUPPORT_GFX_SMG |
1306*4882a593Smuzhiyun AMD_PG_SUPPORT_GFX_PIPELINE |
1307*4882a593Smuzhiyun AMD_PG_SUPPORT_CP |
1308*4882a593Smuzhiyun AMD_PG_SUPPORT_UVD |
1309*4882a593Smuzhiyun AMD_PG_SUPPORT_VCE;
1310*4882a593Smuzhiyun adev->external_rev_id = adev->rev_id + 0x61;
1311*4882a593Smuzhiyun break;
1312*4882a593Smuzhiyun default:
1313*4882a593Smuzhiyun /* FIXME: not supported yet */
1314*4882a593Smuzhiyun return -EINVAL;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev)) {
1318*4882a593Smuzhiyun amdgpu_virt_init_setting(adev);
1319*4882a593Smuzhiyun xgpu_vi_mailbox_set_irq_funcs(adev);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun return 0;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
vi_common_late_init(void * handle)1325*4882a593Smuzhiyun static int vi_common_late_init(void *handle)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev))
1330*4882a593Smuzhiyun xgpu_vi_mailbox_get_irq(adev);
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun return 0;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
vi_common_sw_init(void * handle)1335*4882a593Smuzhiyun static int vi_common_sw_init(void *handle)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev))
1340*4882a593Smuzhiyun xgpu_vi_mailbox_add_irq_id(adev);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun return 0;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
vi_common_sw_fini(void * handle)1345*4882a593Smuzhiyun static int vi_common_sw_fini(void *handle)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun return 0;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
vi_common_hw_init(void * handle)1350*4882a593Smuzhiyun static int vi_common_hw_init(void *handle)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* move the golden regs per IP block */
1355*4882a593Smuzhiyun vi_init_golden_registers(adev);
1356*4882a593Smuzhiyun /* enable pcie gen2/3 link */
1357*4882a593Smuzhiyun vi_pcie_gen3_enable(adev);
1358*4882a593Smuzhiyun /* enable aspm */
1359*4882a593Smuzhiyun vi_program_aspm(adev);
1360*4882a593Smuzhiyun /* enable the doorbell aperture */
1361*4882a593Smuzhiyun vi_enable_doorbell_aperture(adev, true);
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun return 0;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
vi_common_hw_fini(void * handle)1366*4882a593Smuzhiyun static int vi_common_hw_fini(void *handle)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun /* enable the doorbell aperture */
1371*4882a593Smuzhiyun vi_enable_doorbell_aperture(adev, false);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev))
1374*4882a593Smuzhiyun xgpu_vi_mailbox_put_irq(adev);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun return 0;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
vi_common_suspend(void * handle)1379*4882a593Smuzhiyun static int vi_common_suspend(void *handle)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun return vi_common_hw_fini(adev);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
vi_common_resume(void * handle)1386*4882a593Smuzhiyun static int vi_common_resume(void *handle)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun return vi_common_hw_init(adev);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
vi_common_is_idle(void * handle)1393*4882a593Smuzhiyun static bool vi_common_is_idle(void *handle)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun return true;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
vi_common_wait_for_idle(void * handle)1398*4882a593Smuzhiyun static int vi_common_wait_for_idle(void *handle)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun return 0;
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
vi_common_soft_reset(void * handle)1403*4882a593Smuzhiyun static int vi_common_soft_reset(void *handle)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun return 0;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
vi_update_bif_medium_grain_light_sleep(struct amdgpu_device * adev,bool enable)1408*4882a593Smuzhiyun static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1409*4882a593Smuzhiyun bool enable)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun uint32_t temp, data;
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1416*4882a593Smuzhiyun data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1417*4882a593Smuzhiyun PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1418*4882a593Smuzhiyun PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1419*4882a593Smuzhiyun else
1420*4882a593Smuzhiyun data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1421*4882a593Smuzhiyun PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1422*4882a593Smuzhiyun PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun if (temp != data)
1425*4882a593Smuzhiyun WREG32_PCIE(ixPCIE_CNTL2, data);
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)1428*4882a593Smuzhiyun static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1429*4882a593Smuzhiyun bool enable)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun uint32_t temp, data;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1436*4882a593Smuzhiyun data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1437*4882a593Smuzhiyun else
1438*4882a593Smuzhiyun data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun if (temp != data)
1441*4882a593Smuzhiyun WREG32(mmHDP_HOST_PATH_CNTL, data);
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
vi_update_hdp_light_sleep(struct amdgpu_device * adev,bool enable)1444*4882a593Smuzhiyun static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1445*4882a593Smuzhiyun bool enable)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun uint32_t temp, data;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun temp = data = RREG32(mmHDP_MEM_POWER_LS);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1452*4882a593Smuzhiyun data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1453*4882a593Smuzhiyun else
1454*4882a593Smuzhiyun data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun if (temp != data)
1457*4882a593Smuzhiyun WREG32(mmHDP_MEM_POWER_LS, data);
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
vi_update_drm_light_sleep(struct amdgpu_device * adev,bool enable)1460*4882a593Smuzhiyun static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1461*4882a593Smuzhiyun bool enable)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun uint32_t temp, data;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun temp = data = RREG32(0x157a);
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1468*4882a593Smuzhiyun data |= 1;
1469*4882a593Smuzhiyun else
1470*4882a593Smuzhiyun data &= ~1;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (temp != data)
1473*4882a593Smuzhiyun WREG32(0x157a, data);
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun
vi_update_rom_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)1477*4882a593Smuzhiyun static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1478*4882a593Smuzhiyun bool enable)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun uint32_t temp, data;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1485*4882a593Smuzhiyun data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1486*4882a593Smuzhiyun CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1487*4882a593Smuzhiyun else
1488*4882a593Smuzhiyun data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1489*4882a593Smuzhiyun CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun if (temp != data)
1492*4882a593Smuzhiyun WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
vi_common_set_clockgating_state_by_smu(void * handle,enum amd_clockgating_state state)1495*4882a593Smuzhiyun static int vi_common_set_clockgating_state_by_smu(void *handle,
1496*4882a593Smuzhiyun enum amd_clockgating_state state)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun uint32_t msg_id, pp_state = 0;
1499*4882a593Smuzhiyun uint32_t pp_support_state = 0;
1500*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1503*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1504*4882a593Smuzhiyun pp_support_state = PP_STATE_SUPPORT_LS;
1505*4882a593Smuzhiyun pp_state = PP_STATE_LS;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1508*4882a593Smuzhiyun pp_support_state |= PP_STATE_SUPPORT_CG;
1509*4882a593Smuzhiyun pp_state |= PP_STATE_CG;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1512*4882a593Smuzhiyun pp_state = 0;
1513*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1514*4882a593Smuzhiyun PP_BLOCK_SYS_MC,
1515*4882a593Smuzhiyun pp_support_state,
1516*4882a593Smuzhiyun pp_state);
1517*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1521*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1522*4882a593Smuzhiyun pp_support_state = PP_STATE_SUPPORT_LS;
1523*4882a593Smuzhiyun pp_state = PP_STATE_LS;
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1526*4882a593Smuzhiyun pp_support_state |= PP_STATE_SUPPORT_CG;
1527*4882a593Smuzhiyun pp_state |= PP_STATE_CG;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1530*4882a593Smuzhiyun pp_state = 0;
1531*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1532*4882a593Smuzhiyun PP_BLOCK_SYS_SDMA,
1533*4882a593Smuzhiyun pp_support_state,
1534*4882a593Smuzhiyun pp_state);
1535*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1539*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1540*4882a593Smuzhiyun pp_support_state = PP_STATE_SUPPORT_LS;
1541*4882a593Smuzhiyun pp_state = PP_STATE_LS;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1544*4882a593Smuzhiyun pp_support_state |= PP_STATE_SUPPORT_CG;
1545*4882a593Smuzhiyun pp_state |= PP_STATE_CG;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1548*4882a593Smuzhiyun pp_state = 0;
1549*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1550*4882a593Smuzhiyun PP_BLOCK_SYS_HDP,
1551*4882a593Smuzhiyun pp_support_state,
1552*4882a593Smuzhiyun pp_state);
1553*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1558*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1559*4882a593Smuzhiyun pp_state = 0;
1560*4882a593Smuzhiyun else
1561*4882a593Smuzhiyun pp_state = PP_STATE_LS;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1564*4882a593Smuzhiyun PP_BLOCK_SYS_BIF,
1565*4882a593Smuzhiyun PP_STATE_SUPPORT_LS,
1566*4882a593Smuzhiyun pp_state);
1567*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1570*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1571*4882a593Smuzhiyun pp_state = 0;
1572*4882a593Smuzhiyun else
1573*4882a593Smuzhiyun pp_state = PP_STATE_CG;
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1576*4882a593Smuzhiyun PP_BLOCK_SYS_BIF,
1577*4882a593Smuzhiyun PP_STATE_SUPPORT_CG,
1578*4882a593Smuzhiyun pp_state);
1579*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1585*4882a593Smuzhiyun pp_state = 0;
1586*4882a593Smuzhiyun else
1587*4882a593Smuzhiyun pp_state = PP_STATE_LS;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1590*4882a593Smuzhiyun PP_BLOCK_SYS_DRM,
1591*4882a593Smuzhiyun PP_STATE_SUPPORT_LS,
1592*4882a593Smuzhiyun pp_state);
1593*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun if (state == AMD_CG_STATE_UNGATE)
1599*4882a593Smuzhiyun pp_state = 0;
1600*4882a593Smuzhiyun else
1601*4882a593Smuzhiyun pp_state = PP_STATE_CG;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1604*4882a593Smuzhiyun PP_BLOCK_SYS_ROM,
1605*4882a593Smuzhiyun PP_STATE_SUPPORT_CG,
1606*4882a593Smuzhiyun pp_state);
1607*4882a593Smuzhiyun amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun return 0;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
vi_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)1612*4882a593Smuzhiyun static int vi_common_set_clockgating_state(void *handle,
1613*4882a593Smuzhiyun enum amd_clockgating_state state)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev))
1618*4882a593Smuzhiyun return 0;
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun switch (adev->asic_type) {
1621*4882a593Smuzhiyun case CHIP_FIJI:
1622*4882a593Smuzhiyun vi_update_bif_medium_grain_light_sleep(adev,
1623*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1624*4882a593Smuzhiyun vi_update_hdp_medium_grain_clock_gating(adev,
1625*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1626*4882a593Smuzhiyun vi_update_hdp_light_sleep(adev,
1627*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1628*4882a593Smuzhiyun vi_update_rom_medium_grain_clock_gating(adev,
1629*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1630*4882a593Smuzhiyun break;
1631*4882a593Smuzhiyun case CHIP_CARRIZO:
1632*4882a593Smuzhiyun case CHIP_STONEY:
1633*4882a593Smuzhiyun vi_update_bif_medium_grain_light_sleep(adev,
1634*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1635*4882a593Smuzhiyun vi_update_hdp_medium_grain_clock_gating(adev,
1636*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1637*4882a593Smuzhiyun vi_update_hdp_light_sleep(adev,
1638*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1639*4882a593Smuzhiyun vi_update_drm_light_sleep(adev,
1640*4882a593Smuzhiyun state == AMD_CG_STATE_GATE);
1641*4882a593Smuzhiyun break;
1642*4882a593Smuzhiyun case CHIP_TONGA:
1643*4882a593Smuzhiyun case CHIP_POLARIS10:
1644*4882a593Smuzhiyun case CHIP_POLARIS11:
1645*4882a593Smuzhiyun case CHIP_POLARIS12:
1646*4882a593Smuzhiyun case CHIP_VEGAM:
1647*4882a593Smuzhiyun vi_common_set_clockgating_state_by_smu(adev, state);
1648*4882a593Smuzhiyun default:
1649*4882a593Smuzhiyun break;
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun return 0;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun
vi_common_set_powergating_state(void * handle,enum amd_powergating_state state)1654*4882a593Smuzhiyun static int vi_common_set_powergating_state(void *handle,
1655*4882a593Smuzhiyun enum amd_powergating_state state)
1656*4882a593Smuzhiyun {
1657*4882a593Smuzhiyun return 0;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
vi_common_get_clockgating_state(void * handle,u32 * flags)1660*4882a593Smuzhiyun static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1663*4882a593Smuzhiyun int data;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev))
1666*4882a593Smuzhiyun *flags = 0;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun /* AMD_CG_SUPPORT_BIF_LS */
1669*4882a593Smuzhiyun data = RREG32_PCIE(ixPCIE_CNTL2);
1670*4882a593Smuzhiyun if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1671*4882a593Smuzhiyun *flags |= AMD_CG_SUPPORT_BIF_LS;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun /* AMD_CG_SUPPORT_HDP_LS */
1674*4882a593Smuzhiyun data = RREG32(mmHDP_MEM_POWER_LS);
1675*4882a593Smuzhiyun if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1676*4882a593Smuzhiyun *flags |= AMD_CG_SUPPORT_HDP_LS;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun /* AMD_CG_SUPPORT_HDP_MGCG */
1679*4882a593Smuzhiyun data = RREG32(mmHDP_HOST_PATH_CNTL);
1680*4882a593Smuzhiyun if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1681*4882a593Smuzhiyun *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun /* AMD_CG_SUPPORT_ROM_MGCG */
1684*4882a593Smuzhiyun data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1685*4882a593Smuzhiyun if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1686*4882a593Smuzhiyun *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun static const struct amd_ip_funcs vi_common_ip_funcs = {
1690*4882a593Smuzhiyun .name = "vi_common",
1691*4882a593Smuzhiyun .early_init = vi_common_early_init,
1692*4882a593Smuzhiyun .late_init = vi_common_late_init,
1693*4882a593Smuzhiyun .sw_init = vi_common_sw_init,
1694*4882a593Smuzhiyun .sw_fini = vi_common_sw_fini,
1695*4882a593Smuzhiyun .hw_init = vi_common_hw_init,
1696*4882a593Smuzhiyun .hw_fini = vi_common_hw_fini,
1697*4882a593Smuzhiyun .suspend = vi_common_suspend,
1698*4882a593Smuzhiyun .resume = vi_common_resume,
1699*4882a593Smuzhiyun .is_idle = vi_common_is_idle,
1700*4882a593Smuzhiyun .wait_for_idle = vi_common_wait_for_idle,
1701*4882a593Smuzhiyun .soft_reset = vi_common_soft_reset,
1702*4882a593Smuzhiyun .set_clockgating_state = vi_common_set_clockgating_state,
1703*4882a593Smuzhiyun .set_powergating_state = vi_common_set_powergating_state,
1704*4882a593Smuzhiyun .get_clockgating_state = vi_common_get_clockgating_state,
1705*4882a593Smuzhiyun };
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun static const struct amdgpu_ip_block_version vi_common_ip_block =
1708*4882a593Smuzhiyun {
1709*4882a593Smuzhiyun .type = AMD_IP_BLOCK_TYPE_COMMON,
1710*4882a593Smuzhiyun .major = 1,
1711*4882a593Smuzhiyun .minor = 0,
1712*4882a593Smuzhiyun .rev = 0,
1713*4882a593Smuzhiyun .funcs = &vi_common_ip_funcs,
1714*4882a593Smuzhiyun };
1715*4882a593Smuzhiyun
vi_set_virt_ops(struct amdgpu_device * adev)1716*4882a593Smuzhiyun void vi_set_virt_ops(struct amdgpu_device *adev)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun adev->virt.ops = &xgpu_vi_virt_ops;
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
vi_set_ip_blocks(struct amdgpu_device * adev)1721*4882a593Smuzhiyun int vi_set_ip_blocks(struct amdgpu_device *adev)
1722*4882a593Smuzhiyun {
1723*4882a593Smuzhiyun switch (adev->asic_type) {
1724*4882a593Smuzhiyun case CHIP_TOPAZ:
1725*4882a593Smuzhiyun /* topaz has no DCE, UVD, VCE */
1726*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1727*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1728*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1729*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1730*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1731*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1732*4882a593Smuzhiyun if (adev->enable_virtual_display)
1733*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1734*4882a593Smuzhiyun break;
1735*4882a593Smuzhiyun case CHIP_FIJI:
1736*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1737*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1738*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1739*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1740*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1741*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1742*4882a593Smuzhiyun if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1743*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1744*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_DC)
1745*4882a593Smuzhiyun else if (amdgpu_device_has_dc_support(adev))
1746*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dm_ip_block);
1747*4882a593Smuzhiyun #endif
1748*4882a593Smuzhiyun else
1749*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1750*4882a593Smuzhiyun if (!amdgpu_sriov_vf(adev)) {
1751*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1752*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun break;
1755*4882a593Smuzhiyun case CHIP_TONGA:
1756*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1757*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1758*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1759*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1760*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1761*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1762*4882a593Smuzhiyun if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1763*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1764*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_DC)
1765*4882a593Smuzhiyun else if (amdgpu_device_has_dc_support(adev))
1766*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dm_ip_block);
1767*4882a593Smuzhiyun #endif
1768*4882a593Smuzhiyun else
1769*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1770*4882a593Smuzhiyun if (!amdgpu_sriov_vf(adev)) {
1771*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1772*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun break;
1775*4882a593Smuzhiyun case CHIP_POLARIS10:
1776*4882a593Smuzhiyun case CHIP_POLARIS11:
1777*4882a593Smuzhiyun case CHIP_POLARIS12:
1778*4882a593Smuzhiyun case CHIP_VEGAM:
1779*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1780*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1781*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1782*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1783*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1784*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1785*4882a593Smuzhiyun if (adev->enable_virtual_display)
1786*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1787*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_DC)
1788*4882a593Smuzhiyun else if (amdgpu_device_has_dc_support(adev))
1789*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dm_ip_block);
1790*4882a593Smuzhiyun #endif
1791*4882a593Smuzhiyun else
1792*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1793*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1794*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1795*4882a593Smuzhiyun break;
1796*4882a593Smuzhiyun case CHIP_CARRIZO:
1797*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1798*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1799*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1800*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1801*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1802*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1803*4882a593Smuzhiyun if (adev->enable_virtual_display)
1804*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1805*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_DC)
1806*4882a593Smuzhiyun else if (amdgpu_device_has_dc_support(adev))
1807*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dm_ip_block);
1808*4882a593Smuzhiyun #endif
1809*4882a593Smuzhiyun else
1810*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1811*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1812*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1813*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_ACP)
1814*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &acp_ip_block);
1815*4882a593Smuzhiyun #endif
1816*4882a593Smuzhiyun break;
1817*4882a593Smuzhiyun case CHIP_STONEY:
1818*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1819*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1820*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1821*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1822*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1823*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1824*4882a593Smuzhiyun if (adev->enable_virtual_display)
1825*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1826*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_DC)
1827*4882a593Smuzhiyun else if (amdgpu_device_has_dc_support(adev))
1828*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dm_ip_block);
1829*4882a593Smuzhiyun #endif
1830*4882a593Smuzhiyun else
1831*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1832*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1833*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1834*4882a593Smuzhiyun #if defined(CONFIG_DRM_AMD_ACP)
1835*4882a593Smuzhiyun amdgpu_device_ip_block_add(adev, &acp_ip_block);
1836*4882a593Smuzhiyun #endif
1837*4882a593Smuzhiyun break;
1838*4882a593Smuzhiyun default:
1839*4882a593Smuzhiyun /* FIXME: not supported yet */
1840*4882a593Smuzhiyun return -EINVAL;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun return 0;
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
legacy_doorbell_index_init(struct amdgpu_device * adev)1846*4882a593Smuzhiyun void legacy_doorbell_index_init(struct amdgpu_device *adev)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1849*4882a593Smuzhiyun adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1850*4882a593Smuzhiyun adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1851*4882a593Smuzhiyun adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1852*4882a593Smuzhiyun adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1853*4882a593Smuzhiyun adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1854*4882a593Smuzhiyun adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1855*4882a593Smuzhiyun adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1856*4882a593Smuzhiyun adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1857*4882a593Smuzhiyun adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1858*4882a593Smuzhiyun adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1859*4882a593Smuzhiyun adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1860*4882a593Smuzhiyun adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1861*4882a593Smuzhiyun adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1862*4882a593Smuzhiyun }
1863