xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/r300.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun  * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * Authors: Dave Airlie
25*4882a593Smuzhiyun  *          Alex Deucher
26*4882a593Smuzhiyun  *          Jerome Glisse
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/pci.h>
30*4882a593Smuzhiyun #include <linux/seq_file.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <drm/drm.h>
34*4882a593Smuzhiyun #include <drm/drm_crtc_helper.h>
35*4882a593Smuzhiyun #include <drm/drm_debugfs.h>
36*4882a593Smuzhiyun #include <drm/drm_device.h>
37*4882a593Smuzhiyun #include <drm/drm_file.h>
38*4882a593Smuzhiyun #include <drm/radeon_drm.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include "r100_track.h"
41*4882a593Smuzhiyun #include "r300_reg_safe.h"
42*4882a593Smuzhiyun #include "r300d.h"
43*4882a593Smuzhiyun #include "radeon.h"
44*4882a593Smuzhiyun #include "radeon_asic.h"
45*4882a593Smuzhiyun #include "radeon_reg.h"
46*4882a593Smuzhiyun #include "rv350d.h"
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * GPU Errata:
51*4882a593Smuzhiyun  * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
52*4882a593Smuzhiyun  *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
53*4882a593Smuzhiyun  *   However, scheduling such write to the ring seems harmless, i suspect
54*4882a593Smuzhiyun  *   the CP read collide with the flush somehow, or maybe the MC, hard to
55*4882a593Smuzhiyun  *   tell. (Jerome Glisse)
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * Indirect registers accessor
60*4882a593Smuzhiyun  */
rv370_pcie_rreg(struct radeon_device * rdev,uint32_t reg)61*4882a593Smuzhiyun uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	unsigned long flags;
64*4882a593Smuzhiyun 	uint32_t r;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
67*4882a593Smuzhiyun 	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
68*4882a593Smuzhiyun 	r = RREG32(RADEON_PCIE_DATA);
69*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
70*4882a593Smuzhiyun 	return r;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
rv370_pcie_wreg(struct radeon_device * rdev,uint32_t reg,uint32_t v)73*4882a593Smuzhiyun void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	unsigned long flags;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
78*4882a593Smuzhiyun 	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
79*4882a593Smuzhiyun 	WREG32(RADEON_PCIE_DATA, (v));
80*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * rv370,rv380 PCIE GART
85*4882a593Smuzhiyun  */
86*4882a593Smuzhiyun static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
87*4882a593Smuzhiyun 
rv370_pcie_gart_tlb_flush(struct radeon_device * rdev)88*4882a593Smuzhiyun void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	uint32_t tmp;
91*4882a593Smuzhiyun 	int i;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/* Workaround HW bug do flush 2 times */
94*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
95*4882a593Smuzhiyun 		tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
96*4882a593Smuzhiyun 		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
97*4882a593Smuzhiyun 		(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
98*4882a593Smuzhiyun 		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 	mb();
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define R300_PTE_UNSNOOPED (1 << 0)
104*4882a593Smuzhiyun #define R300_PTE_WRITEABLE (1 << 2)
105*4882a593Smuzhiyun #define R300_PTE_READABLE  (1 << 3)
106*4882a593Smuzhiyun 
rv370_pcie_gart_get_page_entry(uint64_t addr,uint32_t flags)107*4882a593Smuzhiyun uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	addr = (lower_32_bits(addr) >> 8) |
110*4882a593Smuzhiyun 		((upper_32_bits(addr) & 0xff) << 24);
111*4882a593Smuzhiyun 	if (flags & RADEON_GART_PAGE_READ)
112*4882a593Smuzhiyun 		addr |= R300_PTE_READABLE;
113*4882a593Smuzhiyun 	if (flags & RADEON_GART_PAGE_WRITE)
114*4882a593Smuzhiyun 		addr |= R300_PTE_WRITEABLE;
115*4882a593Smuzhiyun 	if (!(flags & RADEON_GART_PAGE_SNOOP))
116*4882a593Smuzhiyun 		addr |= R300_PTE_UNSNOOPED;
117*4882a593Smuzhiyun 	return addr;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
rv370_pcie_gart_set_page(struct radeon_device * rdev,unsigned i,uint64_t entry)120*4882a593Smuzhiyun void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
121*4882a593Smuzhiyun 			      uint64_t entry)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	void __iomem *ptr = rdev->gart.ptr;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* on x86 we want this to be CPU endian, on powerpc
126*4882a593Smuzhiyun 	 * on powerpc without HW swappers, it'll get swapped on way
127*4882a593Smuzhiyun 	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
128*4882a593Smuzhiyun 	writel(entry, ((void __iomem *)ptr) + (i * 4));
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
rv370_pcie_gart_init(struct radeon_device * rdev)131*4882a593Smuzhiyun int rv370_pcie_gart_init(struct radeon_device *rdev)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	int r;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (rdev->gart.robj) {
136*4882a593Smuzhiyun 		WARN(1, "RV370 PCIE GART already initialized\n");
137*4882a593Smuzhiyun 		return 0;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 	/* Initialize common gart structure */
140*4882a593Smuzhiyun 	r = radeon_gart_init(rdev);
141*4882a593Smuzhiyun 	if (r)
142*4882a593Smuzhiyun 		return r;
143*4882a593Smuzhiyun 	r = rv370_debugfs_pcie_gart_info_init(rdev);
144*4882a593Smuzhiyun 	if (r)
145*4882a593Smuzhiyun 		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
146*4882a593Smuzhiyun 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
147*4882a593Smuzhiyun 	rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
148*4882a593Smuzhiyun 	rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
149*4882a593Smuzhiyun 	rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
150*4882a593Smuzhiyun 	return radeon_gart_table_vram_alloc(rdev);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
rv370_pcie_gart_enable(struct radeon_device * rdev)153*4882a593Smuzhiyun int rv370_pcie_gart_enable(struct radeon_device *rdev)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	uint32_t table_addr;
156*4882a593Smuzhiyun 	uint32_t tmp;
157*4882a593Smuzhiyun 	int r;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (rdev->gart.robj == NULL) {
160*4882a593Smuzhiyun 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
161*4882a593Smuzhiyun 		return -EINVAL;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 	r = radeon_gart_table_vram_pin(rdev);
164*4882a593Smuzhiyun 	if (r)
165*4882a593Smuzhiyun 		return r;
166*4882a593Smuzhiyun 	/* discard memory request outside of configured range */
167*4882a593Smuzhiyun 	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
168*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
169*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
170*4882a593Smuzhiyun 	tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
171*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
172*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
173*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
174*4882a593Smuzhiyun 	table_addr = rdev->gart.table_addr;
175*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
176*4882a593Smuzhiyun 	/* FIXME: setup default page */
177*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
178*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
179*4882a593Smuzhiyun 	/* Clear error */
180*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
181*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
182*4882a593Smuzhiyun 	tmp |= RADEON_PCIE_TX_GART_EN;
183*4882a593Smuzhiyun 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
184*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
185*4882a593Smuzhiyun 	rv370_pcie_gart_tlb_flush(rdev);
186*4882a593Smuzhiyun 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
187*4882a593Smuzhiyun 		 (unsigned)(rdev->mc.gtt_size >> 20),
188*4882a593Smuzhiyun 		 (unsigned long long)table_addr);
189*4882a593Smuzhiyun 	rdev->gart.ready = true;
190*4882a593Smuzhiyun 	return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
rv370_pcie_gart_disable(struct radeon_device * rdev)193*4882a593Smuzhiyun void rv370_pcie_gart_disable(struct radeon_device *rdev)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	u32 tmp;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
198*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
199*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
200*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
201*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
202*4882a593Smuzhiyun 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
203*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
204*4882a593Smuzhiyun 	radeon_gart_table_vram_unpin(rdev);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
rv370_pcie_gart_fini(struct radeon_device * rdev)207*4882a593Smuzhiyun void rv370_pcie_gart_fini(struct radeon_device *rdev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	radeon_gart_fini(rdev);
210*4882a593Smuzhiyun 	rv370_pcie_gart_disable(rdev);
211*4882a593Smuzhiyun 	radeon_gart_table_vram_free(rdev);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
r300_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)214*4882a593Smuzhiyun void r300_fence_ring_emit(struct radeon_device *rdev,
215*4882a593Smuzhiyun 			  struct radeon_fence *fence)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[fence->ring];
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Who ever call radeon_fence_emit should call ring_lock and ask
220*4882a593Smuzhiyun 	 * for enough space (today caller are ib schedule and buffer move) */
221*4882a593Smuzhiyun 	/* Write SC register so SC & US assert idle */
222*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
223*4882a593Smuzhiyun 	radeon_ring_write(ring, 0);
224*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
225*4882a593Smuzhiyun 	radeon_ring_write(ring, 0);
226*4882a593Smuzhiyun 	/* Flush 3D cache */
227*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
228*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
229*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
230*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_ZC_FLUSH);
231*4882a593Smuzhiyun 	/* Wait until IDLE & CLEAN */
232*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
233*4882a593Smuzhiyun 	radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
234*4882a593Smuzhiyun 				 RADEON_WAIT_2D_IDLECLEAN |
235*4882a593Smuzhiyun 				 RADEON_WAIT_DMA_GUI_IDLE));
236*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
237*4882a593Smuzhiyun 	radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
238*4882a593Smuzhiyun 				RADEON_HDP_READ_BUFFER_INVALIDATE);
239*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
240*4882a593Smuzhiyun 	radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
241*4882a593Smuzhiyun 	/* Emit fence sequence & fire IRQ */
242*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
243*4882a593Smuzhiyun 	radeon_ring_write(ring, fence->seq);
244*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
245*4882a593Smuzhiyun 	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
r300_ring_start(struct radeon_device * rdev,struct radeon_ring * ring)248*4882a593Smuzhiyun void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	unsigned gb_tile_config;
251*4882a593Smuzhiyun 	int r;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
254*4882a593Smuzhiyun 	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
255*4882a593Smuzhiyun 	switch(rdev->num_gb_pipes) {
256*4882a593Smuzhiyun 	case 2:
257*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_R300;
258*4882a593Smuzhiyun 		break;
259*4882a593Smuzhiyun 	case 3:
260*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
261*4882a593Smuzhiyun 		break;
262*4882a593Smuzhiyun 	case 4:
263*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_R420;
264*4882a593Smuzhiyun 		break;
265*4882a593Smuzhiyun 	case 1:
266*4882a593Smuzhiyun 	default:
267*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_RV350;
268*4882a593Smuzhiyun 		break;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	r = radeon_ring_lock(rdev, ring, 64);
272*4882a593Smuzhiyun 	if (r) {
273*4882a593Smuzhiyun 		return;
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
276*4882a593Smuzhiyun 	radeon_ring_write(ring,
277*4882a593Smuzhiyun 			  RADEON_ISYNC_ANY2D_IDLE3D |
278*4882a593Smuzhiyun 			  RADEON_ISYNC_ANY3D_IDLE2D |
279*4882a593Smuzhiyun 			  RADEON_ISYNC_WAIT_IDLEGUI |
280*4882a593Smuzhiyun 			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
281*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
282*4882a593Smuzhiyun 	radeon_ring_write(ring, gb_tile_config);
283*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
284*4882a593Smuzhiyun 	radeon_ring_write(ring,
285*4882a593Smuzhiyun 			  RADEON_WAIT_2D_IDLECLEAN |
286*4882a593Smuzhiyun 			  RADEON_WAIT_3D_IDLECLEAN);
287*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
288*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
289*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
290*4882a593Smuzhiyun 	radeon_ring_write(ring, 0);
291*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
292*4882a593Smuzhiyun 	radeon_ring_write(ring, 0);
293*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
294*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
295*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
296*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
297*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
298*4882a593Smuzhiyun 	radeon_ring_write(ring,
299*4882a593Smuzhiyun 			  RADEON_WAIT_2D_IDLECLEAN |
300*4882a593Smuzhiyun 			  RADEON_WAIT_3D_IDLECLEAN);
301*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
302*4882a593Smuzhiyun 	radeon_ring_write(ring, 0);
303*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
304*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
305*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
306*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
307*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
308*4882a593Smuzhiyun 	radeon_ring_write(ring,
309*4882a593Smuzhiyun 			  ((6 << R300_MS_X0_SHIFT) |
310*4882a593Smuzhiyun 			   (6 << R300_MS_Y0_SHIFT) |
311*4882a593Smuzhiyun 			   (6 << R300_MS_X1_SHIFT) |
312*4882a593Smuzhiyun 			   (6 << R300_MS_Y1_SHIFT) |
313*4882a593Smuzhiyun 			   (6 << R300_MS_X2_SHIFT) |
314*4882a593Smuzhiyun 			   (6 << R300_MS_Y2_SHIFT) |
315*4882a593Smuzhiyun 			   (6 << R300_MSBD0_Y_SHIFT) |
316*4882a593Smuzhiyun 			   (6 << R300_MSBD0_X_SHIFT)));
317*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
318*4882a593Smuzhiyun 	radeon_ring_write(ring,
319*4882a593Smuzhiyun 			  ((6 << R300_MS_X3_SHIFT) |
320*4882a593Smuzhiyun 			   (6 << R300_MS_Y3_SHIFT) |
321*4882a593Smuzhiyun 			   (6 << R300_MS_X4_SHIFT) |
322*4882a593Smuzhiyun 			   (6 << R300_MS_Y4_SHIFT) |
323*4882a593Smuzhiyun 			   (6 << R300_MS_X5_SHIFT) |
324*4882a593Smuzhiyun 			   (6 << R300_MS_Y5_SHIFT) |
325*4882a593Smuzhiyun 			   (6 << R300_MSBD1_SHIFT)));
326*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
327*4882a593Smuzhiyun 	radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
328*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
329*4882a593Smuzhiyun 	radeon_ring_write(ring,
330*4882a593Smuzhiyun 			  R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
331*4882a593Smuzhiyun 	radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
332*4882a593Smuzhiyun 	radeon_ring_write(ring,
333*4882a593Smuzhiyun 			  R300_GEOMETRY_ROUND_NEAREST |
334*4882a593Smuzhiyun 			  R300_COLOR_ROUND_NEAREST);
335*4882a593Smuzhiyun 	radeon_ring_unlock_commit(rdev, ring, false);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
r300_errata(struct radeon_device * rdev)338*4882a593Smuzhiyun static void r300_errata(struct radeon_device *rdev)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	rdev->pll_errata = 0;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (rdev->family == CHIP_R300 &&
343*4882a593Smuzhiyun 	    (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
344*4882a593Smuzhiyun 		rdev->pll_errata |= CHIP_ERRATA_R300_CG;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
r300_mc_wait_for_idle(struct radeon_device * rdev)348*4882a593Smuzhiyun int r300_mc_wait_for_idle(struct radeon_device *rdev)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	unsigned i;
351*4882a593Smuzhiyun 	uint32_t tmp;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
354*4882a593Smuzhiyun 		/* read MC_STATUS */
355*4882a593Smuzhiyun 		tmp = RREG32(RADEON_MC_STATUS);
356*4882a593Smuzhiyun 		if (tmp & R300_MC_IDLE) {
357*4882a593Smuzhiyun 			return 0;
358*4882a593Smuzhiyun 		}
359*4882a593Smuzhiyun 		udelay(1);
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 	return -1;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
r300_gpu_init(struct radeon_device * rdev)364*4882a593Smuzhiyun static void r300_gpu_init(struct radeon_device *rdev)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	uint32_t gb_tile_config, tmp;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
369*4882a593Smuzhiyun 	    (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
370*4882a593Smuzhiyun 		/* r300,r350 */
371*4882a593Smuzhiyun 		rdev->num_gb_pipes = 2;
372*4882a593Smuzhiyun 	} else {
373*4882a593Smuzhiyun 		/* rv350,rv370,rv380,r300 AD, r350 AH */
374*4882a593Smuzhiyun 		rdev->num_gb_pipes = 1;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	rdev->num_z_pipes = 1;
377*4882a593Smuzhiyun 	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
378*4882a593Smuzhiyun 	switch (rdev->num_gb_pipes) {
379*4882a593Smuzhiyun 	case 2:
380*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_R300;
381*4882a593Smuzhiyun 		break;
382*4882a593Smuzhiyun 	case 3:
383*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
384*4882a593Smuzhiyun 		break;
385*4882a593Smuzhiyun 	case 4:
386*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_R420;
387*4882a593Smuzhiyun 		break;
388*4882a593Smuzhiyun 	default:
389*4882a593Smuzhiyun 	case 1:
390*4882a593Smuzhiyun 		gb_tile_config |= R300_PIPE_COUNT_RV350;
391*4882a593Smuzhiyun 		break;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 	WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (r100_gui_wait_for_idle(rdev)) {
396*4882a593Smuzhiyun 		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	tmp = RREG32(R300_DST_PIPE_CONFIG);
400*4882a593Smuzhiyun 	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	WREG32(R300_RB2D_DSTCACHE_MODE,
403*4882a593Smuzhiyun 	       R300_DC_AUTOFLUSH_ENABLE |
404*4882a593Smuzhiyun 	       R300_DC_DC_DISABLE_IGNORE_PE);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (r100_gui_wait_for_idle(rdev)) {
407*4882a593Smuzhiyun 		pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	if (r300_mc_wait_for_idle(rdev)) {
410*4882a593Smuzhiyun 		pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 	DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized\n",
413*4882a593Smuzhiyun 		 rdev->num_gb_pipes, rdev->num_z_pipes);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
r300_asic_reset(struct radeon_device * rdev,bool hard)416*4882a593Smuzhiyun int r300_asic_reset(struct radeon_device *rdev, bool hard)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct r100_mc_save save;
419*4882a593Smuzhiyun 	u32 status, tmp;
420*4882a593Smuzhiyun 	int ret = 0;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	status = RREG32(R_000E40_RBBM_STATUS);
423*4882a593Smuzhiyun 	if (!G_000E40_GUI_ACTIVE(status)) {
424*4882a593Smuzhiyun 		return 0;
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 	r100_mc_stop(rdev, &save);
427*4882a593Smuzhiyun 	status = RREG32(R_000E40_RBBM_STATUS);
428*4882a593Smuzhiyun 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
429*4882a593Smuzhiyun 	/* stop CP */
430*4882a593Smuzhiyun 	WREG32(RADEON_CP_CSQ_CNTL, 0);
431*4882a593Smuzhiyun 	tmp = RREG32(RADEON_CP_RB_CNTL);
432*4882a593Smuzhiyun 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
433*4882a593Smuzhiyun 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
434*4882a593Smuzhiyun 	WREG32(RADEON_CP_RB_WPTR, 0);
435*4882a593Smuzhiyun 	WREG32(RADEON_CP_RB_CNTL, tmp);
436*4882a593Smuzhiyun 	/* save PCI state */
437*4882a593Smuzhiyun 	pci_save_state(rdev->pdev);
438*4882a593Smuzhiyun 	/* disable bus mastering */
439*4882a593Smuzhiyun 	r100_bm_disable(rdev);
440*4882a593Smuzhiyun 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
441*4882a593Smuzhiyun 					S_0000F0_SOFT_RESET_GA(1));
442*4882a593Smuzhiyun 	RREG32(R_0000F0_RBBM_SOFT_RESET);
443*4882a593Smuzhiyun 	mdelay(500);
444*4882a593Smuzhiyun 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
445*4882a593Smuzhiyun 	mdelay(1);
446*4882a593Smuzhiyun 	status = RREG32(R_000E40_RBBM_STATUS);
447*4882a593Smuzhiyun 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
448*4882a593Smuzhiyun 	/* resetting the CP seems to be problematic sometimes it end up
449*4882a593Smuzhiyun 	 * hard locking the computer, but it's necessary for successful
450*4882a593Smuzhiyun 	 * reset more test & playing is needed on R3XX/R4XX to find a
451*4882a593Smuzhiyun 	 * reliable (if any solution)
452*4882a593Smuzhiyun 	 */
453*4882a593Smuzhiyun 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
454*4882a593Smuzhiyun 	RREG32(R_0000F0_RBBM_SOFT_RESET);
455*4882a593Smuzhiyun 	mdelay(500);
456*4882a593Smuzhiyun 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
457*4882a593Smuzhiyun 	mdelay(1);
458*4882a593Smuzhiyun 	status = RREG32(R_000E40_RBBM_STATUS);
459*4882a593Smuzhiyun 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
460*4882a593Smuzhiyun 	/* restore PCI & busmastering */
461*4882a593Smuzhiyun 	pci_restore_state(rdev->pdev);
462*4882a593Smuzhiyun 	r100_enable_bm(rdev);
463*4882a593Smuzhiyun 	/* Check if GPU is idle */
464*4882a593Smuzhiyun 	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
465*4882a593Smuzhiyun 		dev_err(rdev->dev, "failed to reset GPU\n");
466*4882a593Smuzhiyun 		ret = -1;
467*4882a593Smuzhiyun 	} else
468*4882a593Smuzhiyun 		dev_info(rdev->dev, "GPU reset succeed\n");
469*4882a593Smuzhiyun 	r100_mc_resume(rdev, &save);
470*4882a593Smuzhiyun 	return ret;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /*
474*4882a593Smuzhiyun  * r300,r350,rv350,rv380 VRAM info
475*4882a593Smuzhiyun  */
r300_mc_init(struct radeon_device * rdev)476*4882a593Smuzhiyun void r300_mc_init(struct radeon_device *rdev)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	u64 base;
479*4882a593Smuzhiyun 	u32 tmp;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* DDR for all card after R300 & IGP */
482*4882a593Smuzhiyun 	rdev->mc.vram_is_ddr = true;
483*4882a593Smuzhiyun 	tmp = RREG32(RADEON_MEM_CNTL);
484*4882a593Smuzhiyun 	tmp &= R300_MEM_NUM_CHANNELS_MASK;
485*4882a593Smuzhiyun 	switch (tmp) {
486*4882a593Smuzhiyun 	case 0: rdev->mc.vram_width = 64; break;
487*4882a593Smuzhiyun 	case 1: rdev->mc.vram_width = 128; break;
488*4882a593Smuzhiyun 	case 2: rdev->mc.vram_width = 256; break;
489*4882a593Smuzhiyun 	default:  rdev->mc.vram_width = 128; break;
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 	r100_vram_init_sizes(rdev);
492*4882a593Smuzhiyun 	base = rdev->mc.aper_base;
493*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_IGP)
494*4882a593Smuzhiyun 		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
495*4882a593Smuzhiyun 	radeon_vram_location(rdev, &rdev->mc, base);
496*4882a593Smuzhiyun 	rdev->mc.gtt_base_align = 0;
497*4882a593Smuzhiyun 	if (!(rdev->flags & RADEON_IS_AGP))
498*4882a593Smuzhiyun 		radeon_gtt_location(rdev, &rdev->mc);
499*4882a593Smuzhiyun 	radeon_update_bandwidth_info(rdev);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
rv370_set_pcie_lanes(struct radeon_device * rdev,int lanes)502*4882a593Smuzhiyun void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	uint32_t link_width_cntl, mask;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_IGP)
507*4882a593Smuzhiyun 		return;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (!(rdev->flags & RADEON_IS_PCIE))
510*4882a593Smuzhiyun 		return;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* FIXME wait for idle */
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	switch (lanes) {
515*4882a593Smuzhiyun 	case 0:
516*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
517*4882a593Smuzhiyun 		break;
518*4882a593Smuzhiyun 	case 1:
519*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
520*4882a593Smuzhiyun 		break;
521*4882a593Smuzhiyun 	case 2:
522*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
523*4882a593Smuzhiyun 		break;
524*4882a593Smuzhiyun 	case 4:
525*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
526*4882a593Smuzhiyun 		break;
527*4882a593Smuzhiyun 	case 8:
528*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
529*4882a593Smuzhiyun 		break;
530*4882a593Smuzhiyun 	case 12:
531*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
532*4882a593Smuzhiyun 		break;
533*4882a593Smuzhiyun 	case 16:
534*4882a593Smuzhiyun 	default:
535*4882a593Smuzhiyun 		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
536*4882a593Smuzhiyun 		break;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
542*4882a593Smuzhiyun 	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
543*4882a593Smuzhiyun 		return;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
546*4882a593Smuzhiyun 			     RADEON_PCIE_LC_RECONFIG_NOW |
547*4882a593Smuzhiyun 			     RADEON_PCIE_LC_RECONFIG_LATER |
548*4882a593Smuzhiyun 			     RADEON_PCIE_LC_SHORT_RECONFIG_EN);
549*4882a593Smuzhiyun 	link_width_cntl |= mask;
550*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
551*4882a593Smuzhiyun 	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
552*4882a593Smuzhiyun 						     RADEON_PCIE_LC_RECONFIG_NOW));
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/* wait for lane set to complete */
555*4882a593Smuzhiyun 	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
556*4882a593Smuzhiyun 	while (link_width_cntl == 0xffffffff)
557*4882a593Smuzhiyun 		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
rv370_get_pcie_lanes(struct radeon_device * rdev)561*4882a593Smuzhiyun int rv370_get_pcie_lanes(struct radeon_device *rdev)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	u32 link_width_cntl;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_IGP)
566*4882a593Smuzhiyun 		return 0;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (!(rdev->flags & RADEON_IS_PCIE))
569*4882a593Smuzhiyun 		return 0;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	/* FIXME wait for idle */
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
576*4882a593Smuzhiyun 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
577*4882a593Smuzhiyun 		return 0;
578*4882a593Smuzhiyun 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
579*4882a593Smuzhiyun 		return 1;
580*4882a593Smuzhiyun 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
581*4882a593Smuzhiyun 		return 2;
582*4882a593Smuzhiyun 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
583*4882a593Smuzhiyun 		return 4;
584*4882a593Smuzhiyun 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
585*4882a593Smuzhiyun 		return 8;
586*4882a593Smuzhiyun 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
587*4882a593Smuzhiyun 	default:
588*4882a593Smuzhiyun 		return 16;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
rv370_debugfs_pcie_gart_info(struct seq_file * m,void * data)593*4882a593Smuzhiyun static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	struct drm_info_node *node = (struct drm_info_node *) m->private;
596*4882a593Smuzhiyun 	struct drm_device *dev = node->minor->dev;
597*4882a593Smuzhiyun 	struct radeon_device *rdev = dev->dev_private;
598*4882a593Smuzhiyun 	uint32_t tmp;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
601*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
602*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
603*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
604*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
605*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
606*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
607*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
608*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
609*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
610*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
611*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
612*4882a593Smuzhiyun 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
613*4882a593Smuzhiyun 	seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
614*4882a593Smuzhiyun 	return 0;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun static struct drm_info_list rv370_pcie_gart_info_list[] = {
618*4882a593Smuzhiyun 	{"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
619*4882a593Smuzhiyun };
620*4882a593Smuzhiyun #endif
621*4882a593Smuzhiyun 
rv370_debugfs_pcie_gart_info_init(struct radeon_device * rdev)622*4882a593Smuzhiyun static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
625*4882a593Smuzhiyun 	return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
626*4882a593Smuzhiyun #else
627*4882a593Smuzhiyun 	return 0;
628*4882a593Smuzhiyun #endif
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
r300_packet0_check(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt,unsigned idx,unsigned reg)631*4882a593Smuzhiyun static int r300_packet0_check(struct radeon_cs_parser *p,
632*4882a593Smuzhiyun 		struct radeon_cs_packet *pkt,
633*4882a593Smuzhiyun 		unsigned idx, unsigned reg)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	struct radeon_bo_list *reloc;
636*4882a593Smuzhiyun 	struct r100_cs_track *track;
637*4882a593Smuzhiyun 	volatile uint32_t *ib;
638*4882a593Smuzhiyun 	uint32_t tmp, tile_flags = 0;
639*4882a593Smuzhiyun 	unsigned i;
640*4882a593Smuzhiyun 	int r;
641*4882a593Smuzhiyun 	u32 idx_value;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	ib = p->ib.ptr;
644*4882a593Smuzhiyun 	track = (struct r100_cs_track *)p->track;
645*4882a593Smuzhiyun 	idx_value = radeon_get_ib_value(p, idx);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	switch(reg) {
648*4882a593Smuzhiyun 	case AVIVO_D1MODE_VLINE_START_END:
649*4882a593Smuzhiyun 	case RADEON_CRTC_GUI_TRIG_VLINE:
650*4882a593Smuzhiyun 		r = r100_cs_packet_parse_vline(p);
651*4882a593Smuzhiyun 		if (r) {
652*4882a593Smuzhiyun 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
653*4882a593Smuzhiyun 					idx, reg);
654*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
655*4882a593Smuzhiyun 			return r;
656*4882a593Smuzhiyun 		}
657*4882a593Smuzhiyun 		break;
658*4882a593Smuzhiyun 	case RADEON_DST_PITCH_OFFSET:
659*4882a593Smuzhiyun 	case RADEON_SRC_PITCH_OFFSET:
660*4882a593Smuzhiyun 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
661*4882a593Smuzhiyun 		if (r)
662*4882a593Smuzhiyun 			return r;
663*4882a593Smuzhiyun 		break;
664*4882a593Smuzhiyun 	case R300_RB3D_COLOROFFSET0:
665*4882a593Smuzhiyun 	case R300_RB3D_COLOROFFSET1:
666*4882a593Smuzhiyun 	case R300_RB3D_COLOROFFSET2:
667*4882a593Smuzhiyun 	case R300_RB3D_COLOROFFSET3:
668*4882a593Smuzhiyun 		i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
669*4882a593Smuzhiyun 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
670*4882a593Smuzhiyun 		if (r) {
671*4882a593Smuzhiyun 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
672*4882a593Smuzhiyun 					idx, reg);
673*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
674*4882a593Smuzhiyun 			return r;
675*4882a593Smuzhiyun 		}
676*4882a593Smuzhiyun 		track->cb[i].robj = reloc->robj;
677*4882a593Smuzhiyun 		track->cb[i].offset = idx_value;
678*4882a593Smuzhiyun 		track->cb_dirty = true;
679*4882a593Smuzhiyun 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
680*4882a593Smuzhiyun 		break;
681*4882a593Smuzhiyun 	case R300_ZB_DEPTHOFFSET:
682*4882a593Smuzhiyun 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
683*4882a593Smuzhiyun 		if (r) {
684*4882a593Smuzhiyun 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
685*4882a593Smuzhiyun 					idx, reg);
686*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
687*4882a593Smuzhiyun 			return r;
688*4882a593Smuzhiyun 		}
689*4882a593Smuzhiyun 		track->zb.robj = reloc->robj;
690*4882a593Smuzhiyun 		track->zb.offset = idx_value;
691*4882a593Smuzhiyun 		track->zb_dirty = true;
692*4882a593Smuzhiyun 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
693*4882a593Smuzhiyun 		break;
694*4882a593Smuzhiyun 	case R300_TX_OFFSET_0:
695*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+4:
696*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+8:
697*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+12:
698*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+16:
699*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+20:
700*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+24:
701*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+28:
702*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+32:
703*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+36:
704*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+40:
705*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+44:
706*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+48:
707*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+52:
708*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+56:
709*4882a593Smuzhiyun 	case R300_TX_OFFSET_0+60:
710*4882a593Smuzhiyun 		i = (reg - R300_TX_OFFSET_0) >> 2;
711*4882a593Smuzhiyun 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
712*4882a593Smuzhiyun 		if (r) {
713*4882a593Smuzhiyun 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
714*4882a593Smuzhiyun 					idx, reg);
715*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
716*4882a593Smuzhiyun 			return r;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 		if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
720*4882a593Smuzhiyun 			ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
721*4882a593Smuzhiyun 				  ((idx_value & ~31) + (u32)reloc->gpu_offset);
722*4882a593Smuzhiyun 		} else {
723*4882a593Smuzhiyun 			if (reloc->tiling_flags & RADEON_TILING_MACRO)
724*4882a593Smuzhiyun 				tile_flags |= R300_TXO_MACRO_TILE;
725*4882a593Smuzhiyun 			if (reloc->tiling_flags & RADEON_TILING_MICRO)
726*4882a593Smuzhiyun 				tile_flags |= R300_TXO_MICRO_TILE;
727*4882a593Smuzhiyun 			else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
728*4882a593Smuzhiyun 				tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 			tmp = idx_value + ((u32)reloc->gpu_offset);
731*4882a593Smuzhiyun 			tmp |= tile_flags;
732*4882a593Smuzhiyun 			ib[idx] = tmp;
733*4882a593Smuzhiyun 		}
734*4882a593Smuzhiyun 		track->textures[i].robj = reloc->robj;
735*4882a593Smuzhiyun 		track->tex_dirty = true;
736*4882a593Smuzhiyun 		break;
737*4882a593Smuzhiyun 	/* Tracked registers */
738*4882a593Smuzhiyun 	case 0x2084:
739*4882a593Smuzhiyun 		/* VAP_VF_CNTL */
740*4882a593Smuzhiyun 		track->vap_vf_cntl = idx_value;
741*4882a593Smuzhiyun 		break;
742*4882a593Smuzhiyun 	case 0x20B4:
743*4882a593Smuzhiyun 		/* VAP_VTX_SIZE */
744*4882a593Smuzhiyun 		track->vtx_size = idx_value & 0x7F;
745*4882a593Smuzhiyun 		break;
746*4882a593Smuzhiyun 	case 0x2134:
747*4882a593Smuzhiyun 		/* VAP_VF_MAX_VTX_INDX */
748*4882a593Smuzhiyun 		track->max_indx = idx_value & 0x00FFFFFFUL;
749*4882a593Smuzhiyun 		break;
750*4882a593Smuzhiyun 	case 0x2088:
751*4882a593Smuzhiyun 		/* VAP_ALT_NUM_VERTICES - only valid on r500 */
752*4882a593Smuzhiyun 		if (p->rdev->family < CHIP_RV515)
753*4882a593Smuzhiyun 			goto fail;
754*4882a593Smuzhiyun 		track->vap_alt_nverts = idx_value & 0xFFFFFF;
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 	case 0x43E4:
757*4882a593Smuzhiyun 		/* SC_SCISSOR1 */
758*4882a593Smuzhiyun 		track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
759*4882a593Smuzhiyun 		if (p->rdev->family < CHIP_RV515) {
760*4882a593Smuzhiyun 			track->maxy -= 1440;
761*4882a593Smuzhiyun 		}
762*4882a593Smuzhiyun 		track->cb_dirty = true;
763*4882a593Smuzhiyun 		track->zb_dirty = true;
764*4882a593Smuzhiyun 		break;
765*4882a593Smuzhiyun 	case 0x4E00:
766*4882a593Smuzhiyun 		/* RB3D_CCTL */
767*4882a593Smuzhiyun 		if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
768*4882a593Smuzhiyun 		    p->rdev->cmask_filp != p->filp) {
769*4882a593Smuzhiyun 			DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
770*4882a593Smuzhiyun 			return -EINVAL;
771*4882a593Smuzhiyun 		}
772*4882a593Smuzhiyun 		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
773*4882a593Smuzhiyun 		track->cb_dirty = true;
774*4882a593Smuzhiyun 		break;
775*4882a593Smuzhiyun 	case 0x4E38:
776*4882a593Smuzhiyun 	case 0x4E3C:
777*4882a593Smuzhiyun 	case 0x4E40:
778*4882a593Smuzhiyun 	case 0x4E44:
779*4882a593Smuzhiyun 		/* RB3D_COLORPITCH0 */
780*4882a593Smuzhiyun 		/* RB3D_COLORPITCH1 */
781*4882a593Smuzhiyun 		/* RB3D_COLORPITCH2 */
782*4882a593Smuzhiyun 		/* RB3D_COLORPITCH3 */
783*4882a593Smuzhiyun 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
784*4882a593Smuzhiyun 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
785*4882a593Smuzhiyun 			if (r) {
786*4882a593Smuzhiyun 				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
787*4882a593Smuzhiyun 					  idx, reg);
788*4882a593Smuzhiyun 				radeon_cs_dump_packet(p, pkt);
789*4882a593Smuzhiyun 				return r;
790*4882a593Smuzhiyun 			}
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 			if (reloc->tiling_flags & RADEON_TILING_MACRO)
793*4882a593Smuzhiyun 				tile_flags |= R300_COLOR_TILE_ENABLE;
794*4882a593Smuzhiyun 			if (reloc->tiling_flags & RADEON_TILING_MICRO)
795*4882a593Smuzhiyun 				tile_flags |= R300_COLOR_MICROTILE_ENABLE;
796*4882a593Smuzhiyun 			else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
797*4882a593Smuzhiyun 				tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 			tmp = idx_value & ~(0x7 << 16);
800*4882a593Smuzhiyun 			tmp |= tile_flags;
801*4882a593Smuzhiyun 			ib[idx] = tmp;
802*4882a593Smuzhiyun 		}
803*4882a593Smuzhiyun 		i = (reg - 0x4E38) >> 2;
804*4882a593Smuzhiyun 		track->cb[i].pitch = idx_value & 0x3FFE;
805*4882a593Smuzhiyun 		switch (((idx_value >> 21) & 0xF)) {
806*4882a593Smuzhiyun 		case 9:
807*4882a593Smuzhiyun 		case 11:
808*4882a593Smuzhiyun 		case 12:
809*4882a593Smuzhiyun 			track->cb[i].cpp = 1;
810*4882a593Smuzhiyun 			break;
811*4882a593Smuzhiyun 		case 3:
812*4882a593Smuzhiyun 		case 4:
813*4882a593Smuzhiyun 		case 13:
814*4882a593Smuzhiyun 		case 15:
815*4882a593Smuzhiyun 			track->cb[i].cpp = 2;
816*4882a593Smuzhiyun 			break;
817*4882a593Smuzhiyun 		case 5:
818*4882a593Smuzhiyun 			if (p->rdev->family < CHIP_RV515) {
819*4882a593Smuzhiyun 				DRM_ERROR("Invalid color buffer format (%d)!\n",
820*4882a593Smuzhiyun 					  ((idx_value >> 21) & 0xF));
821*4882a593Smuzhiyun 				return -EINVAL;
822*4882a593Smuzhiyun 			}
823*4882a593Smuzhiyun 			fallthrough;
824*4882a593Smuzhiyun 		case 6:
825*4882a593Smuzhiyun 			track->cb[i].cpp = 4;
826*4882a593Smuzhiyun 			break;
827*4882a593Smuzhiyun 		case 10:
828*4882a593Smuzhiyun 			track->cb[i].cpp = 8;
829*4882a593Smuzhiyun 			break;
830*4882a593Smuzhiyun 		case 7:
831*4882a593Smuzhiyun 			track->cb[i].cpp = 16;
832*4882a593Smuzhiyun 			break;
833*4882a593Smuzhiyun 		default:
834*4882a593Smuzhiyun 			DRM_ERROR("Invalid color buffer format (%d) !\n",
835*4882a593Smuzhiyun 				  ((idx_value >> 21) & 0xF));
836*4882a593Smuzhiyun 			return -EINVAL;
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 		track->cb_dirty = true;
839*4882a593Smuzhiyun 		break;
840*4882a593Smuzhiyun 	case 0x4F00:
841*4882a593Smuzhiyun 		/* ZB_CNTL */
842*4882a593Smuzhiyun 		if (idx_value & 2) {
843*4882a593Smuzhiyun 			track->z_enabled = true;
844*4882a593Smuzhiyun 		} else {
845*4882a593Smuzhiyun 			track->z_enabled = false;
846*4882a593Smuzhiyun 		}
847*4882a593Smuzhiyun 		track->zb_dirty = true;
848*4882a593Smuzhiyun 		break;
849*4882a593Smuzhiyun 	case 0x4F10:
850*4882a593Smuzhiyun 		/* ZB_FORMAT */
851*4882a593Smuzhiyun 		switch ((idx_value & 0xF)) {
852*4882a593Smuzhiyun 		case 0:
853*4882a593Smuzhiyun 		case 1:
854*4882a593Smuzhiyun 			track->zb.cpp = 2;
855*4882a593Smuzhiyun 			break;
856*4882a593Smuzhiyun 		case 2:
857*4882a593Smuzhiyun 			track->zb.cpp = 4;
858*4882a593Smuzhiyun 			break;
859*4882a593Smuzhiyun 		default:
860*4882a593Smuzhiyun 			DRM_ERROR("Invalid z buffer format (%d) !\n",
861*4882a593Smuzhiyun 				  (idx_value & 0xF));
862*4882a593Smuzhiyun 			return -EINVAL;
863*4882a593Smuzhiyun 		}
864*4882a593Smuzhiyun 		track->zb_dirty = true;
865*4882a593Smuzhiyun 		break;
866*4882a593Smuzhiyun 	case 0x4F24:
867*4882a593Smuzhiyun 		/* ZB_DEPTHPITCH */
868*4882a593Smuzhiyun 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
869*4882a593Smuzhiyun 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
870*4882a593Smuzhiyun 			if (r) {
871*4882a593Smuzhiyun 				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
872*4882a593Smuzhiyun 					  idx, reg);
873*4882a593Smuzhiyun 				radeon_cs_dump_packet(p, pkt);
874*4882a593Smuzhiyun 				return r;
875*4882a593Smuzhiyun 			}
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 			if (reloc->tiling_flags & RADEON_TILING_MACRO)
878*4882a593Smuzhiyun 				tile_flags |= R300_DEPTHMACROTILE_ENABLE;
879*4882a593Smuzhiyun 			if (reloc->tiling_flags & RADEON_TILING_MICRO)
880*4882a593Smuzhiyun 				tile_flags |= R300_DEPTHMICROTILE_TILED;
881*4882a593Smuzhiyun 			else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
882*4882a593Smuzhiyun 				tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 			tmp = idx_value & ~(0x7 << 16);
885*4882a593Smuzhiyun 			tmp |= tile_flags;
886*4882a593Smuzhiyun 			ib[idx] = tmp;
887*4882a593Smuzhiyun 		}
888*4882a593Smuzhiyun 		track->zb.pitch = idx_value & 0x3FFC;
889*4882a593Smuzhiyun 		track->zb_dirty = true;
890*4882a593Smuzhiyun 		break;
891*4882a593Smuzhiyun 	case 0x4104:
892*4882a593Smuzhiyun 		/* TX_ENABLE */
893*4882a593Smuzhiyun 		for (i = 0; i < 16; i++) {
894*4882a593Smuzhiyun 			bool enabled;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 			enabled = !!(idx_value & (1 << i));
897*4882a593Smuzhiyun 			track->textures[i].enabled = enabled;
898*4882a593Smuzhiyun 		}
899*4882a593Smuzhiyun 		track->tex_dirty = true;
900*4882a593Smuzhiyun 		break;
901*4882a593Smuzhiyun 	case 0x44C0:
902*4882a593Smuzhiyun 	case 0x44C4:
903*4882a593Smuzhiyun 	case 0x44C8:
904*4882a593Smuzhiyun 	case 0x44CC:
905*4882a593Smuzhiyun 	case 0x44D0:
906*4882a593Smuzhiyun 	case 0x44D4:
907*4882a593Smuzhiyun 	case 0x44D8:
908*4882a593Smuzhiyun 	case 0x44DC:
909*4882a593Smuzhiyun 	case 0x44E0:
910*4882a593Smuzhiyun 	case 0x44E4:
911*4882a593Smuzhiyun 	case 0x44E8:
912*4882a593Smuzhiyun 	case 0x44EC:
913*4882a593Smuzhiyun 	case 0x44F0:
914*4882a593Smuzhiyun 	case 0x44F4:
915*4882a593Smuzhiyun 	case 0x44F8:
916*4882a593Smuzhiyun 	case 0x44FC:
917*4882a593Smuzhiyun 		/* TX_FORMAT1_[0-15] */
918*4882a593Smuzhiyun 		i = (reg - 0x44C0) >> 2;
919*4882a593Smuzhiyun 		tmp = (idx_value >> 25) & 0x3;
920*4882a593Smuzhiyun 		track->textures[i].tex_coord_type = tmp;
921*4882a593Smuzhiyun 		switch ((idx_value & 0x1F)) {
922*4882a593Smuzhiyun 		case R300_TX_FORMAT_X8:
923*4882a593Smuzhiyun 		case R300_TX_FORMAT_Y4X4:
924*4882a593Smuzhiyun 		case R300_TX_FORMAT_Z3Y3X2:
925*4882a593Smuzhiyun 			track->textures[i].cpp = 1;
926*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
927*4882a593Smuzhiyun 			break;
928*4882a593Smuzhiyun 		case R300_TX_FORMAT_X16:
929*4882a593Smuzhiyun 		case R300_TX_FORMAT_FL_I16:
930*4882a593Smuzhiyun 		case R300_TX_FORMAT_Y8X8:
931*4882a593Smuzhiyun 		case R300_TX_FORMAT_Z5Y6X5:
932*4882a593Smuzhiyun 		case R300_TX_FORMAT_Z6Y5X5:
933*4882a593Smuzhiyun 		case R300_TX_FORMAT_W4Z4Y4X4:
934*4882a593Smuzhiyun 		case R300_TX_FORMAT_W1Z5Y5X5:
935*4882a593Smuzhiyun 		case R300_TX_FORMAT_D3DMFT_CxV8U8:
936*4882a593Smuzhiyun 		case R300_TX_FORMAT_B8G8_B8G8:
937*4882a593Smuzhiyun 		case R300_TX_FORMAT_G8R8_G8B8:
938*4882a593Smuzhiyun 			track->textures[i].cpp = 2;
939*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
940*4882a593Smuzhiyun 			break;
941*4882a593Smuzhiyun 		case R300_TX_FORMAT_Y16X16:
942*4882a593Smuzhiyun 		case R300_TX_FORMAT_FL_I16A16:
943*4882a593Smuzhiyun 		case R300_TX_FORMAT_Z11Y11X10:
944*4882a593Smuzhiyun 		case R300_TX_FORMAT_Z10Y11X11:
945*4882a593Smuzhiyun 		case R300_TX_FORMAT_W8Z8Y8X8:
946*4882a593Smuzhiyun 		case R300_TX_FORMAT_W2Z10Y10X10:
947*4882a593Smuzhiyun 		case 0x17:
948*4882a593Smuzhiyun 		case R300_TX_FORMAT_FL_I32:
949*4882a593Smuzhiyun 		case 0x1e:
950*4882a593Smuzhiyun 			track->textures[i].cpp = 4;
951*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
952*4882a593Smuzhiyun 			break;
953*4882a593Smuzhiyun 		case R300_TX_FORMAT_W16Z16Y16X16:
954*4882a593Smuzhiyun 		case R300_TX_FORMAT_FL_R16G16B16A16:
955*4882a593Smuzhiyun 		case R300_TX_FORMAT_FL_I32A32:
956*4882a593Smuzhiyun 			track->textures[i].cpp = 8;
957*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
958*4882a593Smuzhiyun 			break;
959*4882a593Smuzhiyun 		case R300_TX_FORMAT_FL_R32G32B32A32:
960*4882a593Smuzhiyun 			track->textures[i].cpp = 16;
961*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
962*4882a593Smuzhiyun 			break;
963*4882a593Smuzhiyun 		case R300_TX_FORMAT_DXT1:
964*4882a593Smuzhiyun 			track->textures[i].cpp = 1;
965*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
966*4882a593Smuzhiyun 			break;
967*4882a593Smuzhiyun 		case R300_TX_FORMAT_ATI2N:
968*4882a593Smuzhiyun 			if (p->rdev->family < CHIP_R420) {
969*4882a593Smuzhiyun 				DRM_ERROR("Invalid texture format %u\n",
970*4882a593Smuzhiyun 					  (idx_value & 0x1F));
971*4882a593Smuzhiyun 				return -EINVAL;
972*4882a593Smuzhiyun 			}
973*4882a593Smuzhiyun 			/* The same rules apply as for DXT3/5. */
974*4882a593Smuzhiyun 			fallthrough;
975*4882a593Smuzhiyun 		case R300_TX_FORMAT_DXT3:
976*4882a593Smuzhiyun 		case R300_TX_FORMAT_DXT5:
977*4882a593Smuzhiyun 			track->textures[i].cpp = 1;
978*4882a593Smuzhiyun 			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
979*4882a593Smuzhiyun 			break;
980*4882a593Smuzhiyun 		default:
981*4882a593Smuzhiyun 			DRM_ERROR("Invalid texture format %u\n",
982*4882a593Smuzhiyun 				  (idx_value & 0x1F));
983*4882a593Smuzhiyun 			return -EINVAL;
984*4882a593Smuzhiyun 		}
985*4882a593Smuzhiyun 		track->tex_dirty = true;
986*4882a593Smuzhiyun 		break;
987*4882a593Smuzhiyun 	case 0x4400:
988*4882a593Smuzhiyun 	case 0x4404:
989*4882a593Smuzhiyun 	case 0x4408:
990*4882a593Smuzhiyun 	case 0x440C:
991*4882a593Smuzhiyun 	case 0x4410:
992*4882a593Smuzhiyun 	case 0x4414:
993*4882a593Smuzhiyun 	case 0x4418:
994*4882a593Smuzhiyun 	case 0x441C:
995*4882a593Smuzhiyun 	case 0x4420:
996*4882a593Smuzhiyun 	case 0x4424:
997*4882a593Smuzhiyun 	case 0x4428:
998*4882a593Smuzhiyun 	case 0x442C:
999*4882a593Smuzhiyun 	case 0x4430:
1000*4882a593Smuzhiyun 	case 0x4434:
1001*4882a593Smuzhiyun 	case 0x4438:
1002*4882a593Smuzhiyun 	case 0x443C:
1003*4882a593Smuzhiyun 		/* TX_FILTER0_[0-15] */
1004*4882a593Smuzhiyun 		i = (reg - 0x4400) >> 2;
1005*4882a593Smuzhiyun 		tmp = idx_value & 0x7;
1006*4882a593Smuzhiyun 		if (tmp == 2 || tmp == 4 || tmp == 6) {
1007*4882a593Smuzhiyun 			track->textures[i].roundup_w = false;
1008*4882a593Smuzhiyun 		}
1009*4882a593Smuzhiyun 		tmp = (idx_value >> 3) & 0x7;
1010*4882a593Smuzhiyun 		if (tmp == 2 || tmp == 4 || tmp == 6) {
1011*4882a593Smuzhiyun 			track->textures[i].roundup_h = false;
1012*4882a593Smuzhiyun 		}
1013*4882a593Smuzhiyun 		track->tex_dirty = true;
1014*4882a593Smuzhiyun 		break;
1015*4882a593Smuzhiyun 	case 0x4500:
1016*4882a593Smuzhiyun 	case 0x4504:
1017*4882a593Smuzhiyun 	case 0x4508:
1018*4882a593Smuzhiyun 	case 0x450C:
1019*4882a593Smuzhiyun 	case 0x4510:
1020*4882a593Smuzhiyun 	case 0x4514:
1021*4882a593Smuzhiyun 	case 0x4518:
1022*4882a593Smuzhiyun 	case 0x451C:
1023*4882a593Smuzhiyun 	case 0x4520:
1024*4882a593Smuzhiyun 	case 0x4524:
1025*4882a593Smuzhiyun 	case 0x4528:
1026*4882a593Smuzhiyun 	case 0x452C:
1027*4882a593Smuzhiyun 	case 0x4530:
1028*4882a593Smuzhiyun 	case 0x4534:
1029*4882a593Smuzhiyun 	case 0x4538:
1030*4882a593Smuzhiyun 	case 0x453C:
1031*4882a593Smuzhiyun 		/* TX_FORMAT2_[0-15] */
1032*4882a593Smuzhiyun 		i = (reg - 0x4500) >> 2;
1033*4882a593Smuzhiyun 		tmp = idx_value & 0x3FFF;
1034*4882a593Smuzhiyun 		track->textures[i].pitch = tmp + 1;
1035*4882a593Smuzhiyun 		if (p->rdev->family >= CHIP_RV515) {
1036*4882a593Smuzhiyun 			tmp = ((idx_value >> 15) & 1) << 11;
1037*4882a593Smuzhiyun 			track->textures[i].width_11 = tmp;
1038*4882a593Smuzhiyun 			tmp = ((idx_value >> 16) & 1) << 11;
1039*4882a593Smuzhiyun 			track->textures[i].height_11 = tmp;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 			/* ATI1N */
1042*4882a593Smuzhiyun 			if (idx_value & (1 << 14)) {
1043*4882a593Smuzhiyun 				/* The same rules apply as for DXT1. */
1044*4882a593Smuzhiyun 				track->textures[i].compress_format =
1045*4882a593Smuzhiyun 					R100_TRACK_COMP_DXT1;
1046*4882a593Smuzhiyun 			}
1047*4882a593Smuzhiyun 		} else if (idx_value & (1 << 14)) {
1048*4882a593Smuzhiyun 			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1049*4882a593Smuzhiyun 			return -EINVAL;
1050*4882a593Smuzhiyun 		}
1051*4882a593Smuzhiyun 		track->tex_dirty = true;
1052*4882a593Smuzhiyun 		break;
1053*4882a593Smuzhiyun 	case 0x4480:
1054*4882a593Smuzhiyun 	case 0x4484:
1055*4882a593Smuzhiyun 	case 0x4488:
1056*4882a593Smuzhiyun 	case 0x448C:
1057*4882a593Smuzhiyun 	case 0x4490:
1058*4882a593Smuzhiyun 	case 0x4494:
1059*4882a593Smuzhiyun 	case 0x4498:
1060*4882a593Smuzhiyun 	case 0x449C:
1061*4882a593Smuzhiyun 	case 0x44A0:
1062*4882a593Smuzhiyun 	case 0x44A4:
1063*4882a593Smuzhiyun 	case 0x44A8:
1064*4882a593Smuzhiyun 	case 0x44AC:
1065*4882a593Smuzhiyun 	case 0x44B0:
1066*4882a593Smuzhiyun 	case 0x44B4:
1067*4882a593Smuzhiyun 	case 0x44B8:
1068*4882a593Smuzhiyun 	case 0x44BC:
1069*4882a593Smuzhiyun 		/* TX_FORMAT0_[0-15] */
1070*4882a593Smuzhiyun 		i = (reg - 0x4480) >> 2;
1071*4882a593Smuzhiyun 		tmp = idx_value & 0x7FF;
1072*4882a593Smuzhiyun 		track->textures[i].width = tmp + 1;
1073*4882a593Smuzhiyun 		tmp = (idx_value >> 11) & 0x7FF;
1074*4882a593Smuzhiyun 		track->textures[i].height = tmp + 1;
1075*4882a593Smuzhiyun 		tmp = (idx_value >> 26) & 0xF;
1076*4882a593Smuzhiyun 		track->textures[i].num_levels = tmp;
1077*4882a593Smuzhiyun 		tmp = idx_value & (1 << 31);
1078*4882a593Smuzhiyun 		track->textures[i].use_pitch = !!tmp;
1079*4882a593Smuzhiyun 		tmp = (idx_value >> 22) & 0xF;
1080*4882a593Smuzhiyun 		track->textures[i].txdepth = tmp;
1081*4882a593Smuzhiyun 		track->tex_dirty = true;
1082*4882a593Smuzhiyun 		break;
1083*4882a593Smuzhiyun 	case R300_ZB_ZPASS_ADDR:
1084*4882a593Smuzhiyun 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1085*4882a593Smuzhiyun 		if (r) {
1086*4882a593Smuzhiyun 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1087*4882a593Smuzhiyun 					idx, reg);
1088*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
1089*4882a593Smuzhiyun 			return r;
1090*4882a593Smuzhiyun 		}
1091*4882a593Smuzhiyun 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1092*4882a593Smuzhiyun 		break;
1093*4882a593Smuzhiyun 	case 0x4e0c:
1094*4882a593Smuzhiyun 		/* RB3D_COLOR_CHANNEL_MASK */
1095*4882a593Smuzhiyun 		track->color_channel_mask = idx_value;
1096*4882a593Smuzhiyun 		track->cb_dirty = true;
1097*4882a593Smuzhiyun 		break;
1098*4882a593Smuzhiyun 	case 0x43a4:
1099*4882a593Smuzhiyun 		/* SC_HYPERZ_EN */
1100*4882a593Smuzhiyun 		/* r300c emits this register - we need to disable hyperz for it
1101*4882a593Smuzhiyun 		 * without complaining */
1102*4882a593Smuzhiyun 		if (p->rdev->hyperz_filp != p->filp) {
1103*4882a593Smuzhiyun 			if (idx_value & 0x1)
1104*4882a593Smuzhiyun 				ib[idx] = idx_value & ~1;
1105*4882a593Smuzhiyun 		}
1106*4882a593Smuzhiyun 		break;
1107*4882a593Smuzhiyun 	case 0x4f1c:
1108*4882a593Smuzhiyun 		/* ZB_BW_CNTL */
1109*4882a593Smuzhiyun 		track->zb_cb_clear = !!(idx_value & (1 << 5));
1110*4882a593Smuzhiyun 		track->cb_dirty = true;
1111*4882a593Smuzhiyun 		track->zb_dirty = true;
1112*4882a593Smuzhiyun 		if (p->rdev->hyperz_filp != p->filp) {
1113*4882a593Smuzhiyun 			if (idx_value & (R300_HIZ_ENABLE |
1114*4882a593Smuzhiyun 					 R300_RD_COMP_ENABLE |
1115*4882a593Smuzhiyun 					 R300_WR_COMP_ENABLE |
1116*4882a593Smuzhiyun 					 R300_FAST_FILL_ENABLE))
1117*4882a593Smuzhiyun 				goto fail;
1118*4882a593Smuzhiyun 		}
1119*4882a593Smuzhiyun 		break;
1120*4882a593Smuzhiyun 	case 0x4e04:
1121*4882a593Smuzhiyun 		/* RB3D_BLENDCNTL */
1122*4882a593Smuzhiyun 		track->blend_read_enable = !!(idx_value & (1 << 2));
1123*4882a593Smuzhiyun 		track->cb_dirty = true;
1124*4882a593Smuzhiyun 		break;
1125*4882a593Smuzhiyun 	case R300_RB3D_AARESOLVE_OFFSET:
1126*4882a593Smuzhiyun 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1127*4882a593Smuzhiyun 		if (r) {
1128*4882a593Smuzhiyun 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1129*4882a593Smuzhiyun 				  idx, reg);
1130*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
1131*4882a593Smuzhiyun 			return r;
1132*4882a593Smuzhiyun 		}
1133*4882a593Smuzhiyun 		track->aa.robj = reloc->robj;
1134*4882a593Smuzhiyun 		track->aa.offset = idx_value;
1135*4882a593Smuzhiyun 		track->aa_dirty = true;
1136*4882a593Smuzhiyun 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1137*4882a593Smuzhiyun 		break;
1138*4882a593Smuzhiyun 	case R300_RB3D_AARESOLVE_PITCH:
1139*4882a593Smuzhiyun 		track->aa.pitch = idx_value & 0x3FFE;
1140*4882a593Smuzhiyun 		track->aa_dirty = true;
1141*4882a593Smuzhiyun 		break;
1142*4882a593Smuzhiyun 	case R300_RB3D_AARESOLVE_CTL:
1143*4882a593Smuzhiyun 		track->aaresolve = idx_value & 0x1;
1144*4882a593Smuzhiyun 		track->aa_dirty = true;
1145*4882a593Smuzhiyun 		break;
1146*4882a593Smuzhiyun 	case 0x4f30: /* ZB_MASK_OFFSET */
1147*4882a593Smuzhiyun 	case 0x4f34: /* ZB_ZMASK_PITCH */
1148*4882a593Smuzhiyun 	case 0x4f44: /* ZB_HIZ_OFFSET */
1149*4882a593Smuzhiyun 	case 0x4f54: /* ZB_HIZ_PITCH */
1150*4882a593Smuzhiyun 		if (idx_value && (p->rdev->hyperz_filp != p->filp))
1151*4882a593Smuzhiyun 			goto fail;
1152*4882a593Smuzhiyun 		break;
1153*4882a593Smuzhiyun 	case 0x4028:
1154*4882a593Smuzhiyun 		if (idx_value && (p->rdev->hyperz_filp != p->filp))
1155*4882a593Smuzhiyun 			goto fail;
1156*4882a593Smuzhiyun 		/* GB_Z_PEQ_CONFIG */
1157*4882a593Smuzhiyun 		if (p->rdev->family >= CHIP_RV350)
1158*4882a593Smuzhiyun 			break;
1159*4882a593Smuzhiyun 		goto fail;
1160*4882a593Smuzhiyun 		break;
1161*4882a593Smuzhiyun 	case 0x4be8:
1162*4882a593Smuzhiyun 		/* valid register only on RV530 */
1163*4882a593Smuzhiyun 		if (p->rdev->family == CHIP_RV530)
1164*4882a593Smuzhiyun 			break;
1165*4882a593Smuzhiyun 		/* fallthrough do not move */
1166*4882a593Smuzhiyun 	default:
1167*4882a593Smuzhiyun 		goto fail;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 	return 0;
1170*4882a593Smuzhiyun fail:
1171*4882a593Smuzhiyun 	pr_err("Forbidden register 0x%04X in cs at %d (val=%08x)\n",
1172*4882a593Smuzhiyun 	       reg, idx, idx_value);
1173*4882a593Smuzhiyun 	return -EINVAL;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
r300_packet3_check(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt)1176*4882a593Smuzhiyun static int r300_packet3_check(struct radeon_cs_parser *p,
1177*4882a593Smuzhiyun 			      struct radeon_cs_packet *pkt)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun 	struct radeon_bo_list *reloc;
1180*4882a593Smuzhiyun 	struct r100_cs_track *track;
1181*4882a593Smuzhiyun 	volatile uint32_t *ib;
1182*4882a593Smuzhiyun 	unsigned idx;
1183*4882a593Smuzhiyun 	int r;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	ib = p->ib.ptr;
1186*4882a593Smuzhiyun 	idx = pkt->idx + 1;
1187*4882a593Smuzhiyun 	track = (struct r100_cs_track *)p->track;
1188*4882a593Smuzhiyun 	switch(pkt->opcode) {
1189*4882a593Smuzhiyun 	case PACKET3_3D_LOAD_VBPNTR:
1190*4882a593Smuzhiyun 		r = r100_packet3_load_vbpntr(p, pkt, idx);
1191*4882a593Smuzhiyun 		if (r)
1192*4882a593Smuzhiyun 			return r;
1193*4882a593Smuzhiyun 		break;
1194*4882a593Smuzhiyun 	case PACKET3_INDX_BUFFER:
1195*4882a593Smuzhiyun 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1196*4882a593Smuzhiyun 		if (r) {
1197*4882a593Smuzhiyun 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1198*4882a593Smuzhiyun 			radeon_cs_dump_packet(p, pkt);
1199*4882a593Smuzhiyun 			return r;
1200*4882a593Smuzhiyun 		}
1201*4882a593Smuzhiyun 		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1202*4882a593Smuzhiyun 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1203*4882a593Smuzhiyun 		if (r) {
1204*4882a593Smuzhiyun 			return r;
1205*4882a593Smuzhiyun 		}
1206*4882a593Smuzhiyun 		break;
1207*4882a593Smuzhiyun 	/* Draw packet */
1208*4882a593Smuzhiyun 	case PACKET3_3D_DRAW_IMMD:
1209*4882a593Smuzhiyun 		/* Number of dwords is vtx_size * (num_vertices - 1)
1210*4882a593Smuzhiyun 		 * PRIM_WALK must be equal to 3 vertex data in embedded
1211*4882a593Smuzhiyun 		 * in cmd stream */
1212*4882a593Smuzhiyun 		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1213*4882a593Smuzhiyun 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1214*4882a593Smuzhiyun 			return -EINVAL;
1215*4882a593Smuzhiyun 		}
1216*4882a593Smuzhiyun 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1217*4882a593Smuzhiyun 		track->immd_dwords = pkt->count - 1;
1218*4882a593Smuzhiyun 		r = r100_cs_track_check(p->rdev, track);
1219*4882a593Smuzhiyun 		if (r) {
1220*4882a593Smuzhiyun 			return r;
1221*4882a593Smuzhiyun 		}
1222*4882a593Smuzhiyun 		break;
1223*4882a593Smuzhiyun 	case PACKET3_3D_DRAW_IMMD_2:
1224*4882a593Smuzhiyun 		/* Number of dwords is vtx_size * (num_vertices - 1)
1225*4882a593Smuzhiyun 		 * PRIM_WALK must be equal to 3 vertex data in embedded
1226*4882a593Smuzhiyun 		 * in cmd stream */
1227*4882a593Smuzhiyun 		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1228*4882a593Smuzhiyun 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1229*4882a593Smuzhiyun 			return -EINVAL;
1230*4882a593Smuzhiyun 		}
1231*4882a593Smuzhiyun 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1232*4882a593Smuzhiyun 		track->immd_dwords = pkt->count;
1233*4882a593Smuzhiyun 		r = r100_cs_track_check(p->rdev, track);
1234*4882a593Smuzhiyun 		if (r) {
1235*4882a593Smuzhiyun 			return r;
1236*4882a593Smuzhiyun 		}
1237*4882a593Smuzhiyun 		break;
1238*4882a593Smuzhiyun 	case PACKET3_3D_DRAW_VBUF:
1239*4882a593Smuzhiyun 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1240*4882a593Smuzhiyun 		r = r100_cs_track_check(p->rdev, track);
1241*4882a593Smuzhiyun 		if (r) {
1242*4882a593Smuzhiyun 			return r;
1243*4882a593Smuzhiyun 		}
1244*4882a593Smuzhiyun 		break;
1245*4882a593Smuzhiyun 	case PACKET3_3D_DRAW_VBUF_2:
1246*4882a593Smuzhiyun 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1247*4882a593Smuzhiyun 		r = r100_cs_track_check(p->rdev, track);
1248*4882a593Smuzhiyun 		if (r) {
1249*4882a593Smuzhiyun 			return r;
1250*4882a593Smuzhiyun 		}
1251*4882a593Smuzhiyun 		break;
1252*4882a593Smuzhiyun 	case PACKET3_3D_DRAW_INDX:
1253*4882a593Smuzhiyun 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1254*4882a593Smuzhiyun 		r = r100_cs_track_check(p->rdev, track);
1255*4882a593Smuzhiyun 		if (r) {
1256*4882a593Smuzhiyun 			return r;
1257*4882a593Smuzhiyun 		}
1258*4882a593Smuzhiyun 		break;
1259*4882a593Smuzhiyun 	case PACKET3_3D_DRAW_INDX_2:
1260*4882a593Smuzhiyun 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1261*4882a593Smuzhiyun 		r = r100_cs_track_check(p->rdev, track);
1262*4882a593Smuzhiyun 		if (r) {
1263*4882a593Smuzhiyun 			return r;
1264*4882a593Smuzhiyun 		}
1265*4882a593Smuzhiyun 		break;
1266*4882a593Smuzhiyun 	case PACKET3_3D_CLEAR_HIZ:
1267*4882a593Smuzhiyun 	case PACKET3_3D_CLEAR_ZMASK:
1268*4882a593Smuzhiyun 		if (p->rdev->hyperz_filp != p->filp)
1269*4882a593Smuzhiyun 			return -EINVAL;
1270*4882a593Smuzhiyun 		break;
1271*4882a593Smuzhiyun 	case PACKET3_3D_CLEAR_CMASK:
1272*4882a593Smuzhiyun 		if (p->rdev->cmask_filp != p->filp)
1273*4882a593Smuzhiyun 			return -EINVAL;
1274*4882a593Smuzhiyun 		break;
1275*4882a593Smuzhiyun 	case PACKET3_NOP:
1276*4882a593Smuzhiyun 		break;
1277*4882a593Smuzhiyun 	default:
1278*4882a593Smuzhiyun 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1279*4882a593Smuzhiyun 		return -EINVAL;
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun 	return 0;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun 
r300_cs_parse(struct radeon_cs_parser * p)1284*4882a593Smuzhiyun int r300_cs_parse(struct radeon_cs_parser *p)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun 	struct radeon_cs_packet pkt;
1287*4882a593Smuzhiyun 	struct r100_cs_track *track;
1288*4882a593Smuzhiyun 	int r;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	track = kzalloc(sizeof(*track), GFP_KERNEL);
1291*4882a593Smuzhiyun 	if (track == NULL)
1292*4882a593Smuzhiyun 		return -ENOMEM;
1293*4882a593Smuzhiyun 	r100_cs_track_clear(p->rdev, track);
1294*4882a593Smuzhiyun 	p->track = track;
1295*4882a593Smuzhiyun 	do {
1296*4882a593Smuzhiyun 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
1297*4882a593Smuzhiyun 		if (r) {
1298*4882a593Smuzhiyun 			return r;
1299*4882a593Smuzhiyun 		}
1300*4882a593Smuzhiyun 		p->idx += pkt.count + 2;
1301*4882a593Smuzhiyun 		switch (pkt.type) {
1302*4882a593Smuzhiyun 		case RADEON_PACKET_TYPE0:
1303*4882a593Smuzhiyun 			r = r100_cs_parse_packet0(p, &pkt,
1304*4882a593Smuzhiyun 						  p->rdev->config.r300.reg_safe_bm,
1305*4882a593Smuzhiyun 						  p->rdev->config.r300.reg_safe_bm_size,
1306*4882a593Smuzhiyun 						  &r300_packet0_check);
1307*4882a593Smuzhiyun 			break;
1308*4882a593Smuzhiyun 		case RADEON_PACKET_TYPE2:
1309*4882a593Smuzhiyun 			break;
1310*4882a593Smuzhiyun 		case RADEON_PACKET_TYPE3:
1311*4882a593Smuzhiyun 			r = r300_packet3_check(p, &pkt);
1312*4882a593Smuzhiyun 			break;
1313*4882a593Smuzhiyun 		default:
1314*4882a593Smuzhiyun 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1315*4882a593Smuzhiyun 			return -EINVAL;
1316*4882a593Smuzhiyun 		}
1317*4882a593Smuzhiyun 		if (r) {
1318*4882a593Smuzhiyun 			return r;
1319*4882a593Smuzhiyun 		}
1320*4882a593Smuzhiyun 	} while (p->idx < p->chunk_ib->length_dw);
1321*4882a593Smuzhiyun 	return 0;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun 
r300_set_reg_safe(struct radeon_device * rdev)1324*4882a593Smuzhiyun void r300_set_reg_safe(struct radeon_device *rdev)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun 	rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1327*4882a593Smuzhiyun 	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun 
r300_mc_program(struct radeon_device * rdev)1330*4882a593Smuzhiyun void r300_mc_program(struct radeon_device *rdev)
1331*4882a593Smuzhiyun {
1332*4882a593Smuzhiyun 	struct r100_mc_save save;
1333*4882a593Smuzhiyun 	int r;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	r = r100_debugfs_mc_info_init(rdev);
1336*4882a593Smuzhiyun 	if (r) {
1337*4882a593Smuzhiyun 		dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	/* Stops all mc clients */
1341*4882a593Smuzhiyun 	r100_mc_stop(rdev, &save);
1342*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_AGP) {
1343*4882a593Smuzhiyun 		WREG32(R_00014C_MC_AGP_LOCATION,
1344*4882a593Smuzhiyun 			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1345*4882a593Smuzhiyun 			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1346*4882a593Smuzhiyun 		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1347*4882a593Smuzhiyun 		WREG32(R_00015C_AGP_BASE_2,
1348*4882a593Smuzhiyun 			upper_32_bits(rdev->mc.agp_base) & 0xff);
1349*4882a593Smuzhiyun 	} else {
1350*4882a593Smuzhiyun 		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1351*4882a593Smuzhiyun 		WREG32(R_000170_AGP_BASE, 0);
1352*4882a593Smuzhiyun 		WREG32(R_00015C_AGP_BASE_2, 0);
1353*4882a593Smuzhiyun 	}
1354*4882a593Smuzhiyun 	/* Wait for mc idle */
1355*4882a593Smuzhiyun 	if (r300_mc_wait_for_idle(rdev))
1356*4882a593Smuzhiyun 		DRM_INFO("Failed to wait MC idle before programming MC.\n");
1357*4882a593Smuzhiyun 	/* Program MC, should be a 32bits limited address space */
1358*4882a593Smuzhiyun 	WREG32(R_000148_MC_FB_LOCATION,
1359*4882a593Smuzhiyun 		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1360*4882a593Smuzhiyun 		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1361*4882a593Smuzhiyun 	r100_mc_resume(rdev, &save);
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun 
r300_clock_startup(struct radeon_device * rdev)1364*4882a593Smuzhiyun void r300_clock_startup(struct radeon_device *rdev)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun 	u32 tmp;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	if (radeon_dynclks != -1 && radeon_dynclks)
1369*4882a593Smuzhiyun 		radeon_legacy_set_clock_gating(rdev, 1);
1370*4882a593Smuzhiyun 	/* We need to force on some of the block */
1371*4882a593Smuzhiyun 	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1372*4882a593Smuzhiyun 	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1373*4882a593Smuzhiyun 	if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1374*4882a593Smuzhiyun 		tmp |= S_00000D_FORCE_VAP(1);
1375*4882a593Smuzhiyun 	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
r300_startup(struct radeon_device * rdev)1378*4882a593Smuzhiyun static int r300_startup(struct radeon_device *rdev)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun 	int r;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	/* set common regs */
1383*4882a593Smuzhiyun 	r100_set_common_regs(rdev);
1384*4882a593Smuzhiyun 	/* program mc */
1385*4882a593Smuzhiyun 	r300_mc_program(rdev);
1386*4882a593Smuzhiyun 	/* Resume clock */
1387*4882a593Smuzhiyun 	r300_clock_startup(rdev);
1388*4882a593Smuzhiyun 	/* Initialize GPU configuration (# pipes, ...) */
1389*4882a593Smuzhiyun 	r300_gpu_init(rdev);
1390*4882a593Smuzhiyun 	/* Initialize GART (initialize after TTM so we can allocate
1391*4882a593Smuzhiyun 	 * memory through TTM but finalize after TTM) */
1392*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCIE) {
1393*4882a593Smuzhiyun 		r = rv370_pcie_gart_enable(rdev);
1394*4882a593Smuzhiyun 		if (r)
1395*4882a593Smuzhiyun 			return r;
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	if (rdev->family == CHIP_R300 ||
1399*4882a593Smuzhiyun 	    rdev->family == CHIP_R350 ||
1400*4882a593Smuzhiyun 	    rdev->family == CHIP_RV350)
1401*4882a593Smuzhiyun 		r100_enable_bm(rdev);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCI) {
1404*4882a593Smuzhiyun 		r = r100_pci_gart_enable(rdev);
1405*4882a593Smuzhiyun 		if (r)
1406*4882a593Smuzhiyun 			return r;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	/* allocate wb buffer */
1410*4882a593Smuzhiyun 	r = radeon_wb_init(rdev);
1411*4882a593Smuzhiyun 	if (r)
1412*4882a593Smuzhiyun 		return r;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1415*4882a593Smuzhiyun 	if (r) {
1416*4882a593Smuzhiyun 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1417*4882a593Smuzhiyun 		return r;
1418*4882a593Smuzhiyun 	}
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/* Enable IRQ */
1421*4882a593Smuzhiyun 	if (!rdev->irq.installed) {
1422*4882a593Smuzhiyun 		r = radeon_irq_kms_init(rdev);
1423*4882a593Smuzhiyun 		if (r)
1424*4882a593Smuzhiyun 			return r;
1425*4882a593Smuzhiyun 	}
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	r100_irq_set(rdev);
1428*4882a593Smuzhiyun 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1429*4882a593Smuzhiyun 	/* 1M ring buffer */
1430*4882a593Smuzhiyun 	r = r100_cp_init(rdev, 1024 * 1024);
1431*4882a593Smuzhiyun 	if (r) {
1432*4882a593Smuzhiyun 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1433*4882a593Smuzhiyun 		return r;
1434*4882a593Smuzhiyun 	}
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	r = radeon_ib_pool_init(rdev);
1437*4882a593Smuzhiyun 	if (r) {
1438*4882a593Smuzhiyun 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1439*4882a593Smuzhiyun 		return r;
1440*4882a593Smuzhiyun 	}
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	return 0;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun 
r300_resume(struct radeon_device * rdev)1445*4882a593Smuzhiyun int r300_resume(struct radeon_device *rdev)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	int r;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	/* Make sur GART are not working */
1450*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCIE)
1451*4882a593Smuzhiyun 		rv370_pcie_gart_disable(rdev);
1452*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCI)
1453*4882a593Smuzhiyun 		r100_pci_gart_disable(rdev);
1454*4882a593Smuzhiyun 	/* Resume clock before doing reset */
1455*4882a593Smuzhiyun 	r300_clock_startup(rdev);
1456*4882a593Smuzhiyun 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
1457*4882a593Smuzhiyun 	if (radeon_asic_reset(rdev)) {
1458*4882a593Smuzhiyun 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1459*4882a593Smuzhiyun 			RREG32(R_000E40_RBBM_STATUS),
1460*4882a593Smuzhiyun 			RREG32(R_0007C0_CP_STAT));
1461*4882a593Smuzhiyun 	}
1462*4882a593Smuzhiyun 	/* post */
1463*4882a593Smuzhiyun 	radeon_combios_asic_init(rdev->ddev);
1464*4882a593Smuzhiyun 	/* Resume clock after posting */
1465*4882a593Smuzhiyun 	r300_clock_startup(rdev);
1466*4882a593Smuzhiyun 	/* Initialize surface registers */
1467*4882a593Smuzhiyun 	radeon_surface_init(rdev);
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	rdev->accel_working = true;
1470*4882a593Smuzhiyun 	r = r300_startup(rdev);
1471*4882a593Smuzhiyun 	if (r) {
1472*4882a593Smuzhiyun 		rdev->accel_working = false;
1473*4882a593Smuzhiyun 	}
1474*4882a593Smuzhiyun 	return r;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun 
r300_suspend(struct radeon_device * rdev)1477*4882a593Smuzhiyun int r300_suspend(struct radeon_device *rdev)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun 	radeon_pm_suspend(rdev);
1480*4882a593Smuzhiyun 	r100_cp_disable(rdev);
1481*4882a593Smuzhiyun 	radeon_wb_disable(rdev);
1482*4882a593Smuzhiyun 	r100_irq_disable(rdev);
1483*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCIE)
1484*4882a593Smuzhiyun 		rv370_pcie_gart_disable(rdev);
1485*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCI)
1486*4882a593Smuzhiyun 		r100_pci_gart_disable(rdev);
1487*4882a593Smuzhiyun 	return 0;
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun 
r300_fini(struct radeon_device * rdev)1490*4882a593Smuzhiyun void r300_fini(struct radeon_device *rdev)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun 	radeon_pm_fini(rdev);
1493*4882a593Smuzhiyun 	r100_cp_fini(rdev);
1494*4882a593Smuzhiyun 	radeon_wb_fini(rdev);
1495*4882a593Smuzhiyun 	radeon_ib_pool_fini(rdev);
1496*4882a593Smuzhiyun 	radeon_gem_fini(rdev);
1497*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCIE)
1498*4882a593Smuzhiyun 		rv370_pcie_gart_fini(rdev);
1499*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCI)
1500*4882a593Smuzhiyun 		r100_pci_gart_fini(rdev);
1501*4882a593Smuzhiyun 	radeon_agp_fini(rdev);
1502*4882a593Smuzhiyun 	radeon_irq_kms_fini(rdev);
1503*4882a593Smuzhiyun 	radeon_fence_driver_fini(rdev);
1504*4882a593Smuzhiyun 	radeon_bo_fini(rdev);
1505*4882a593Smuzhiyun 	radeon_atombios_fini(rdev);
1506*4882a593Smuzhiyun 	kfree(rdev->bios);
1507*4882a593Smuzhiyun 	rdev->bios = NULL;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun 
r300_init(struct radeon_device * rdev)1510*4882a593Smuzhiyun int r300_init(struct radeon_device *rdev)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun 	int r;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	/* Disable VGA */
1515*4882a593Smuzhiyun 	r100_vga_render_disable(rdev);
1516*4882a593Smuzhiyun 	/* Initialize scratch registers */
1517*4882a593Smuzhiyun 	radeon_scratch_init(rdev);
1518*4882a593Smuzhiyun 	/* Initialize surface registers */
1519*4882a593Smuzhiyun 	radeon_surface_init(rdev);
1520*4882a593Smuzhiyun 	/* TODO: disable VGA need to use VGA request */
1521*4882a593Smuzhiyun 	/* restore some register to sane defaults */
1522*4882a593Smuzhiyun 	r100_restore_sanity(rdev);
1523*4882a593Smuzhiyun 	/* BIOS*/
1524*4882a593Smuzhiyun 	if (!radeon_get_bios(rdev)) {
1525*4882a593Smuzhiyun 		if (ASIC_IS_AVIVO(rdev))
1526*4882a593Smuzhiyun 			return -EINVAL;
1527*4882a593Smuzhiyun 	}
1528*4882a593Smuzhiyun 	if (rdev->is_atom_bios) {
1529*4882a593Smuzhiyun 		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1530*4882a593Smuzhiyun 		return -EINVAL;
1531*4882a593Smuzhiyun 	} else {
1532*4882a593Smuzhiyun 		r = radeon_combios_init(rdev);
1533*4882a593Smuzhiyun 		if (r)
1534*4882a593Smuzhiyun 			return r;
1535*4882a593Smuzhiyun 	}
1536*4882a593Smuzhiyun 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
1537*4882a593Smuzhiyun 	if (radeon_asic_reset(rdev)) {
1538*4882a593Smuzhiyun 		dev_warn(rdev->dev,
1539*4882a593Smuzhiyun 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1540*4882a593Smuzhiyun 			RREG32(R_000E40_RBBM_STATUS),
1541*4882a593Smuzhiyun 			RREG32(R_0007C0_CP_STAT));
1542*4882a593Smuzhiyun 	}
1543*4882a593Smuzhiyun 	/* check if cards are posted or not */
1544*4882a593Smuzhiyun 	if (radeon_boot_test_post_card(rdev) == false)
1545*4882a593Smuzhiyun 		return -EINVAL;
1546*4882a593Smuzhiyun 	/* Set asic errata */
1547*4882a593Smuzhiyun 	r300_errata(rdev);
1548*4882a593Smuzhiyun 	/* Initialize clocks */
1549*4882a593Smuzhiyun 	radeon_get_clock_info(rdev->ddev);
1550*4882a593Smuzhiyun 	/* initialize AGP */
1551*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_AGP) {
1552*4882a593Smuzhiyun 		r = radeon_agp_init(rdev);
1553*4882a593Smuzhiyun 		if (r) {
1554*4882a593Smuzhiyun 			radeon_agp_disable(rdev);
1555*4882a593Smuzhiyun 		}
1556*4882a593Smuzhiyun 	}
1557*4882a593Smuzhiyun 	/* initialize memory controller */
1558*4882a593Smuzhiyun 	r300_mc_init(rdev);
1559*4882a593Smuzhiyun 	/* Fence driver */
1560*4882a593Smuzhiyun 	r = radeon_fence_driver_init(rdev);
1561*4882a593Smuzhiyun 	if (r)
1562*4882a593Smuzhiyun 		return r;
1563*4882a593Smuzhiyun 	/* Memory manager */
1564*4882a593Smuzhiyun 	r = radeon_bo_init(rdev);
1565*4882a593Smuzhiyun 	if (r)
1566*4882a593Smuzhiyun 		return r;
1567*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCIE) {
1568*4882a593Smuzhiyun 		r = rv370_pcie_gart_init(rdev);
1569*4882a593Smuzhiyun 		if (r)
1570*4882a593Smuzhiyun 			return r;
1571*4882a593Smuzhiyun 	}
1572*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_PCI) {
1573*4882a593Smuzhiyun 		r = r100_pci_gart_init(rdev);
1574*4882a593Smuzhiyun 		if (r)
1575*4882a593Smuzhiyun 			return r;
1576*4882a593Smuzhiyun 	}
1577*4882a593Smuzhiyun 	r300_set_reg_safe(rdev);
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/* Initialize power management */
1580*4882a593Smuzhiyun 	radeon_pm_init(rdev);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	rdev->accel_working = true;
1583*4882a593Smuzhiyun 	r = r300_startup(rdev);
1584*4882a593Smuzhiyun 	if (r) {
1585*4882a593Smuzhiyun 		/* Something went wrong with the accel init, so stop accel */
1586*4882a593Smuzhiyun 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
1587*4882a593Smuzhiyun 		r100_cp_fini(rdev);
1588*4882a593Smuzhiyun 		radeon_wb_fini(rdev);
1589*4882a593Smuzhiyun 		radeon_ib_pool_fini(rdev);
1590*4882a593Smuzhiyun 		radeon_irq_kms_fini(rdev);
1591*4882a593Smuzhiyun 		if (rdev->flags & RADEON_IS_PCIE)
1592*4882a593Smuzhiyun 			rv370_pcie_gart_fini(rdev);
1593*4882a593Smuzhiyun 		if (rdev->flags & RADEON_IS_PCI)
1594*4882a593Smuzhiyun 			r100_pci_gart_fini(rdev);
1595*4882a593Smuzhiyun 		radeon_agp_fini(rdev);
1596*4882a593Smuzhiyun 		rdev->accel_working = false;
1597*4882a593Smuzhiyun 	}
1598*4882a593Smuzhiyun 	return 0;
1599*4882a593Smuzhiyun }
1600