xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/radeon_uvd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2011 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  * All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
7*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun  * the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
22*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
23*4882a593Smuzhiyun  * of the Software.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Authors:
28*4882a593Smuzhiyun  *    Christian König <deathsimple@vodafone.de>
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/firmware.h>
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <drm/drm.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "radeon.h"
37*4882a593Smuzhiyun #include "radeon_ucode.h"
38*4882a593Smuzhiyun #include "r600d.h"
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* 1 second timeout */
41*4882a593Smuzhiyun #define UVD_IDLE_TIMEOUT_MS	1000
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* Firmware Names */
44*4882a593Smuzhiyun #define FIRMWARE_R600		"radeon/R600_uvd.bin"
45*4882a593Smuzhiyun #define FIRMWARE_RS780		"radeon/RS780_uvd.bin"
46*4882a593Smuzhiyun #define FIRMWARE_RV770		"radeon/RV770_uvd.bin"
47*4882a593Smuzhiyun #define FIRMWARE_RV710		"radeon/RV710_uvd.bin"
48*4882a593Smuzhiyun #define FIRMWARE_CYPRESS	"radeon/CYPRESS_uvd.bin"
49*4882a593Smuzhiyun #define FIRMWARE_SUMO		"radeon/SUMO_uvd.bin"
50*4882a593Smuzhiyun #define FIRMWARE_TAHITI		"radeon/TAHITI_uvd.bin"
51*4882a593Smuzhiyun #define FIRMWARE_BONAIRE_LEGACY	"radeon/BONAIRE_uvd.bin"
52*4882a593Smuzhiyun #define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_R600);
55*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_RS780);
56*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_RV770);
57*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_RV710);
58*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_CYPRESS);
59*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_SUMO);
60*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_TAHITI);
61*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_BONAIRE_LEGACY);
62*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_BONAIRE);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static void radeon_uvd_idle_work_handler(struct work_struct *work);
65*4882a593Smuzhiyun 
radeon_uvd_init(struct radeon_device * rdev)66*4882a593Smuzhiyun int radeon_uvd_init(struct radeon_device *rdev)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	unsigned long bo_size;
69*4882a593Smuzhiyun 	const char *fw_name = NULL, *legacy_fw_name = NULL;
70*4882a593Smuzhiyun 	int i, r;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	switch (rdev->family) {
75*4882a593Smuzhiyun 	case CHIP_RV610:
76*4882a593Smuzhiyun 	case CHIP_RV630:
77*4882a593Smuzhiyun 	case CHIP_RV670:
78*4882a593Smuzhiyun 	case CHIP_RV620:
79*4882a593Smuzhiyun 	case CHIP_RV635:
80*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_R600;
81*4882a593Smuzhiyun 		break;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	case CHIP_RS780:
84*4882a593Smuzhiyun 	case CHIP_RS880:
85*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_RS780;
86*4882a593Smuzhiyun 		break;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	case CHIP_RV770:
89*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_RV770;
90*4882a593Smuzhiyun 		break;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	case CHIP_RV710:
93*4882a593Smuzhiyun 	case CHIP_RV730:
94*4882a593Smuzhiyun 	case CHIP_RV740:
95*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_RV710;
96*4882a593Smuzhiyun 		break;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	case CHIP_CYPRESS:
99*4882a593Smuzhiyun 	case CHIP_HEMLOCK:
100*4882a593Smuzhiyun 	case CHIP_JUNIPER:
101*4882a593Smuzhiyun 	case CHIP_REDWOOD:
102*4882a593Smuzhiyun 	case CHIP_CEDAR:
103*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_CYPRESS;
104*4882a593Smuzhiyun 		break;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	case CHIP_SUMO:
107*4882a593Smuzhiyun 	case CHIP_SUMO2:
108*4882a593Smuzhiyun 	case CHIP_PALM:
109*4882a593Smuzhiyun 	case CHIP_CAYMAN:
110*4882a593Smuzhiyun 	case CHIP_BARTS:
111*4882a593Smuzhiyun 	case CHIP_TURKS:
112*4882a593Smuzhiyun 	case CHIP_CAICOS:
113*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_SUMO;
114*4882a593Smuzhiyun 		break;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	case CHIP_TAHITI:
117*4882a593Smuzhiyun 	case CHIP_VERDE:
118*4882a593Smuzhiyun 	case CHIP_PITCAIRN:
119*4882a593Smuzhiyun 	case CHIP_ARUBA:
120*4882a593Smuzhiyun 	case CHIP_OLAND:
121*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_TAHITI;
122*4882a593Smuzhiyun 		break;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	case CHIP_BONAIRE:
125*4882a593Smuzhiyun 	case CHIP_KABINI:
126*4882a593Smuzhiyun 	case CHIP_KAVERI:
127*4882a593Smuzhiyun 	case CHIP_HAWAII:
128*4882a593Smuzhiyun 	case CHIP_MULLINS:
129*4882a593Smuzhiyun 		legacy_fw_name = FIRMWARE_BONAIRE_LEGACY;
130*4882a593Smuzhiyun 		fw_name = FIRMWARE_BONAIRE;
131*4882a593Smuzhiyun 		break;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	default:
134*4882a593Smuzhiyun 		return -EINVAL;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	rdev->uvd.fw_header_present = false;
138*4882a593Smuzhiyun 	rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
139*4882a593Smuzhiyun 	if (fw_name) {
140*4882a593Smuzhiyun 		/* Let's try to load the newer firmware first */
141*4882a593Smuzhiyun 		r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
142*4882a593Smuzhiyun 		if (r) {
143*4882a593Smuzhiyun 			dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
144*4882a593Smuzhiyun 				fw_name);
145*4882a593Smuzhiyun 		} else {
146*4882a593Smuzhiyun 			struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
147*4882a593Smuzhiyun 			unsigned version_major, version_minor, family_id;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 			r = radeon_ucode_validate(rdev->uvd_fw);
150*4882a593Smuzhiyun 			if (r)
151*4882a593Smuzhiyun 				return r;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 			rdev->uvd.fw_header_present = true;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 			family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
156*4882a593Smuzhiyun 			version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
157*4882a593Smuzhiyun 			version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
158*4882a593Smuzhiyun 			DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
159*4882a593Smuzhiyun 				 version_major, version_minor, family_id);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 			/*
162*4882a593Smuzhiyun 			 * Limit the number of UVD handles depending on
163*4882a593Smuzhiyun 			 * microcode major and minor versions.
164*4882a593Smuzhiyun 			 */
165*4882a593Smuzhiyun 			if ((version_major >= 0x01) && (version_minor >= 0x37))
166*4882a593Smuzhiyun 				rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * In case there is only legacy firmware, or we encounter an error
172*4882a593Smuzhiyun 	 * while loading the new firmware, we fall back to loading the legacy
173*4882a593Smuzhiyun 	 * firmware now.
174*4882a593Smuzhiyun 	 */
175*4882a593Smuzhiyun 	if (!fw_name || r) {
176*4882a593Smuzhiyun 		r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev);
177*4882a593Smuzhiyun 		if (r) {
178*4882a593Smuzhiyun 			dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
179*4882a593Smuzhiyun 				legacy_fw_name);
180*4882a593Smuzhiyun 			return r;
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
185*4882a593Smuzhiyun 		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
186*4882a593Smuzhiyun 		  RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
187*4882a593Smuzhiyun 	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
188*4882a593Smuzhiyun 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
189*4882a593Smuzhiyun 			     NULL, &rdev->uvd.vcpu_bo);
190*4882a593Smuzhiyun 	if (r) {
191*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
192*4882a593Smuzhiyun 		return r;
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
196*4882a593Smuzhiyun 	if (r) {
197*4882a593Smuzhiyun 		radeon_bo_unref(&rdev->uvd.vcpu_bo);
198*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
199*4882a593Smuzhiyun 		return r;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
203*4882a593Smuzhiyun 			  &rdev->uvd.gpu_addr);
204*4882a593Smuzhiyun 	if (r) {
205*4882a593Smuzhiyun 		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
206*4882a593Smuzhiyun 		radeon_bo_unref(&rdev->uvd.vcpu_bo);
207*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
208*4882a593Smuzhiyun 		return r;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
212*4882a593Smuzhiyun 	if (r) {
213*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) UVD map failed\n", r);
214*4882a593Smuzhiyun 		return r;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	for (i = 0; i < rdev->uvd.max_handles; ++i) {
220*4882a593Smuzhiyun 		atomic_set(&rdev->uvd.handles[i], 0);
221*4882a593Smuzhiyun 		rdev->uvd.filp[i] = NULL;
222*4882a593Smuzhiyun 		rdev->uvd.img_size[i] = 0;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
radeon_uvd_fini(struct radeon_device * rdev)228*4882a593Smuzhiyun void radeon_uvd_fini(struct radeon_device *rdev)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	int r;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (rdev->uvd.vcpu_bo == NULL)
233*4882a593Smuzhiyun 		return;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
236*4882a593Smuzhiyun 	if (!r) {
237*4882a593Smuzhiyun 		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
238*4882a593Smuzhiyun 		radeon_bo_unpin(rdev->uvd.vcpu_bo);
239*4882a593Smuzhiyun 		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	radeon_bo_unref(&rdev->uvd.vcpu_bo);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	release_firmware(rdev->uvd_fw);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
radeon_uvd_suspend(struct radeon_device * rdev)249*4882a593Smuzhiyun int radeon_uvd_suspend(struct radeon_device *rdev)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	int i, r;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (rdev->uvd.vcpu_bo == NULL)
254*4882a593Smuzhiyun 		return 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	for (i = 0; i < rdev->uvd.max_handles; ++i) {
257*4882a593Smuzhiyun 		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
258*4882a593Smuzhiyun 		if (handle != 0) {
259*4882a593Smuzhiyun 			struct radeon_fence *fence;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 			radeon_uvd_note_usage(rdev);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 			r = radeon_uvd_get_destroy_msg(rdev,
264*4882a593Smuzhiyun 				R600_RING_TYPE_UVD_INDEX, handle, &fence);
265*4882a593Smuzhiyun 			if (r) {
266*4882a593Smuzhiyun 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
267*4882a593Smuzhiyun 				continue;
268*4882a593Smuzhiyun 			}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 			radeon_fence_wait(fence, false);
271*4882a593Smuzhiyun 			radeon_fence_unref(&fence);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 			rdev->uvd.filp[i] = NULL;
274*4882a593Smuzhiyun 			atomic_set(&rdev->uvd.handles[i], 0);
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
radeon_uvd_resume(struct radeon_device * rdev)281*4882a593Smuzhiyun int radeon_uvd_resume(struct radeon_device *rdev)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	unsigned size;
284*4882a593Smuzhiyun 	void *ptr;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (rdev->uvd.vcpu_bo == NULL)
287*4882a593Smuzhiyun 		return -EINVAL;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	size = radeon_bo_size(rdev->uvd.vcpu_bo);
292*4882a593Smuzhiyun 	size -= rdev->uvd_fw->size;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	ptr = rdev->uvd.cpu_addr;
295*4882a593Smuzhiyun 	ptr += rdev->uvd_fw->size;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	memset_io((void __iomem *)ptr, 0, size);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return 0;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
radeon_uvd_force_into_uvd_segment(struct radeon_bo * rbo,uint32_t allowed_domains)302*4882a593Smuzhiyun void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
303*4882a593Smuzhiyun 				       uint32_t allowed_domains)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	int i;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	for (i = 0; i < rbo->placement.num_placement; ++i) {
308*4882a593Smuzhiyun 		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
309*4882a593Smuzhiyun 		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* If it must be in VRAM it must be in the first segment as well */
313*4882a593Smuzhiyun 	if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
314*4882a593Smuzhiyun 		return;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* abort if we already have more than one placement */
317*4882a593Smuzhiyun 	if (rbo->placement.num_placement > 1)
318*4882a593Smuzhiyun 		return;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* add another 256MB segment */
321*4882a593Smuzhiyun 	rbo->placements[1] = rbo->placements[0];
322*4882a593Smuzhiyun 	rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
323*4882a593Smuzhiyun 	rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
324*4882a593Smuzhiyun 	rbo->placement.num_placement++;
325*4882a593Smuzhiyun 	rbo->placement.num_busy_placement++;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
radeon_uvd_free_handles(struct radeon_device * rdev,struct drm_file * filp)328*4882a593Smuzhiyun void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	int i, r;
331*4882a593Smuzhiyun 	for (i = 0; i < rdev->uvd.max_handles; ++i) {
332*4882a593Smuzhiyun 		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
333*4882a593Smuzhiyun 		if (handle != 0 && rdev->uvd.filp[i] == filp) {
334*4882a593Smuzhiyun 			struct radeon_fence *fence;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 			radeon_uvd_note_usage(rdev);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 			r = radeon_uvd_get_destroy_msg(rdev,
339*4882a593Smuzhiyun 				R600_RING_TYPE_UVD_INDEX, handle, &fence);
340*4882a593Smuzhiyun 			if (r) {
341*4882a593Smuzhiyun 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
342*4882a593Smuzhiyun 				continue;
343*4882a593Smuzhiyun 			}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 			radeon_fence_wait(fence, false);
346*4882a593Smuzhiyun 			radeon_fence_unref(&fence);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 			rdev->uvd.filp[i] = NULL;
349*4882a593Smuzhiyun 			atomic_set(&rdev->uvd.handles[i], 0);
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
radeon_uvd_cs_msg_decode(uint32_t * msg,unsigned buf_sizes[])354*4882a593Smuzhiyun static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	unsigned stream_type = msg[4];
357*4882a593Smuzhiyun 	unsigned width = msg[6];
358*4882a593Smuzhiyun 	unsigned height = msg[7];
359*4882a593Smuzhiyun 	unsigned dpb_size = msg[9];
360*4882a593Smuzhiyun 	unsigned pitch = msg[28];
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	unsigned width_in_mb = width / 16;
363*4882a593Smuzhiyun 	unsigned height_in_mb = ALIGN(height / 16, 2);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	unsigned image_size, tmp, min_dpb_size;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	image_size = width * height;
368*4882a593Smuzhiyun 	image_size += image_size / 2;
369*4882a593Smuzhiyun 	image_size = ALIGN(image_size, 1024);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	switch (stream_type) {
372*4882a593Smuzhiyun 	case 0: /* H264 */
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		/* reference picture buffer */
375*4882a593Smuzhiyun 		min_dpb_size = image_size * 17;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		/* macroblock context buffer */
378*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		/* IT surface buffer */
381*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 32;
382*4882a593Smuzhiyun 		break;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	case 1: /* VC1 */
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		/* reference picture buffer */
387*4882a593Smuzhiyun 		min_dpb_size = image_size * 3;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		/* CONTEXT_BUFFER */
390*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 128;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		/* IT surface buffer */
393*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * 64;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		/* DB surface buffer */
396*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * 128;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		/* BP */
399*4882a593Smuzhiyun 		tmp = max(width_in_mb, height_in_mb);
400*4882a593Smuzhiyun 		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
401*4882a593Smuzhiyun 		break;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	case 3: /* MPEG2 */
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		/* reference picture buffer */
406*4882a593Smuzhiyun 		min_dpb_size = image_size * 3;
407*4882a593Smuzhiyun 		break;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	case 4: /* MPEG4 */
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		/* reference picture buffer */
412*4882a593Smuzhiyun 		min_dpb_size = image_size * 3;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 		/* CM */
415*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 64;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		/* IT surface buffer */
418*4882a593Smuzhiyun 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
419*4882a593Smuzhiyun 		break;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	default:
422*4882a593Smuzhiyun 		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
423*4882a593Smuzhiyun 		return -EINVAL;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (width > pitch) {
427*4882a593Smuzhiyun 		DRM_ERROR("Invalid UVD decoding target pitch!\n");
428*4882a593Smuzhiyun 		return -EINVAL;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	if (dpb_size < min_dpb_size) {
432*4882a593Smuzhiyun 		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
433*4882a593Smuzhiyun 			  dpb_size, min_dpb_size);
434*4882a593Smuzhiyun 		return -EINVAL;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	buf_sizes[0x1] = dpb_size;
438*4882a593Smuzhiyun 	buf_sizes[0x2] = image_size;
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
radeon_uvd_validate_codec(struct radeon_cs_parser * p,unsigned stream_type)442*4882a593Smuzhiyun static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
443*4882a593Smuzhiyun 				     unsigned stream_type)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	switch (stream_type) {
446*4882a593Smuzhiyun 	case 0: /* H264 */
447*4882a593Smuzhiyun 	case 1: /* VC1 */
448*4882a593Smuzhiyun 		/* always supported */
449*4882a593Smuzhiyun 		return 0;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	case 3: /* MPEG2 */
452*4882a593Smuzhiyun 	case 4: /* MPEG4 */
453*4882a593Smuzhiyun 		/* only since UVD 3 */
454*4882a593Smuzhiyun 		if (p->rdev->family >= CHIP_PALM)
455*4882a593Smuzhiyun 			return 0;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		fallthrough;
458*4882a593Smuzhiyun 	default:
459*4882a593Smuzhiyun 		DRM_ERROR("UVD codec not supported by hardware %d!\n",
460*4882a593Smuzhiyun 			  stream_type);
461*4882a593Smuzhiyun 		return -EINVAL;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
radeon_uvd_cs_msg(struct radeon_cs_parser * p,struct radeon_bo * bo,unsigned offset,unsigned buf_sizes[])465*4882a593Smuzhiyun static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
466*4882a593Smuzhiyun 			     unsigned offset, unsigned buf_sizes[])
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	int32_t *msg, msg_type, handle;
469*4882a593Smuzhiyun 	unsigned img_size = 0;
470*4882a593Smuzhiyun 	struct dma_fence *f;
471*4882a593Smuzhiyun 	void *ptr;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	int i, r;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (offset & 0x3F) {
476*4882a593Smuzhiyun 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
477*4882a593Smuzhiyun 		return -EINVAL;
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	f = dma_resv_get_excl(bo->tbo.base.resv);
481*4882a593Smuzhiyun 	if (f) {
482*4882a593Smuzhiyun 		r = radeon_fence_wait((struct radeon_fence *)f, false);
483*4882a593Smuzhiyun 		if (r) {
484*4882a593Smuzhiyun 			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
485*4882a593Smuzhiyun 			return r;
486*4882a593Smuzhiyun 		}
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	r = radeon_bo_kmap(bo, &ptr);
490*4882a593Smuzhiyun 	if (r) {
491*4882a593Smuzhiyun 		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
492*4882a593Smuzhiyun 		return r;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	msg = ptr + offset;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	msg_type = msg[1];
498*4882a593Smuzhiyun 	handle = msg[2];
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (handle == 0) {
501*4882a593Smuzhiyun 		DRM_ERROR("Invalid UVD handle!\n");
502*4882a593Smuzhiyun 		return -EINVAL;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	switch (msg_type) {
506*4882a593Smuzhiyun 	case 0:
507*4882a593Smuzhiyun 		/* it's a create msg, calc image size (width * height) */
508*4882a593Smuzhiyun 		img_size = msg[7] * msg[8];
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		r = radeon_uvd_validate_codec(p, msg[4]);
511*4882a593Smuzhiyun 		radeon_bo_kunmap(bo);
512*4882a593Smuzhiyun 		if (r)
513*4882a593Smuzhiyun 			return r;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		/* try to alloc a new handle */
516*4882a593Smuzhiyun 		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
517*4882a593Smuzhiyun 			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
518*4882a593Smuzhiyun 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
519*4882a593Smuzhiyun 				return -EINVAL;
520*4882a593Smuzhiyun 			}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 			if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
523*4882a593Smuzhiyun 				p->rdev->uvd.filp[i] = p->filp;
524*4882a593Smuzhiyun 				p->rdev->uvd.img_size[i] = img_size;
525*4882a593Smuzhiyun 				return 0;
526*4882a593Smuzhiyun 			}
527*4882a593Smuzhiyun 		}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 		DRM_ERROR("No more free UVD handles!\n");
530*4882a593Smuzhiyun 		return -EINVAL;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	case 1:
533*4882a593Smuzhiyun 		/* it's a decode msg, validate codec and calc buffer sizes */
534*4882a593Smuzhiyun 		r = radeon_uvd_validate_codec(p, msg[4]);
535*4882a593Smuzhiyun 		if (!r)
536*4882a593Smuzhiyun 			r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
537*4882a593Smuzhiyun 		radeon_bo_kunmap(bo);
538*4882a593Smuzhiyun 		if (r)
539*4882a593Smuzhiyun 			return r;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		/* validate the handle */
542*4882a593Smuzhiyun 		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
543*4882a593Smuzhiyun 			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
544*4882a593Smuzhiyun 				if (p->rdev->uvd.filp[i] != p->filp) {
545*4882a593Smuzhiyun 					DRM_ERROR("UVD handle collision detected!\n");
546*4882a593Smuzhiyun 					return -EINVAL;
547*4882a593Smuzhiyun 				}
548*4882a593Smuzhiyun 				return 0;
549*4882a593Smuzhiyun 			}
550*4882a593Smuzhiyun 		}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
553*4882a593Smuzhiyun 		return -ENOENT;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	case 2:
556*4882a593Smuzhiyun 		/* it's a destroy msg, free the handle */
557*4882a593Smuzhiyun 		for (i = 0; i < p->rdev->uvd.max_handles; ++i)
558*4882a593Smuzhiyun 			atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
559*4882a593Smuzhiyun 		radeon_bo_kunmap(bo);
560*4882a593Smuzhiyun 		return 0;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	default:
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
565*4882a593Smuzhiyun 		return -EINVAL;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	BUG();
569*4882a593Smuzhiyun 	return -EINVAL;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
radeon_uvd_cs_reloc(struct radeon_cs_parser * p,int data0,int data1,unsigned buf_sizes[],bool * has_msg_cmd)572*4882a593Smuzhiyun static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
573*4882a593Smuzhiyun 			       int data0, int data1,
574*4882a593Smuzhiyun 			       unsigned buf_sizes[], bool *has_msg_cmd)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct radeon_cs_chunk *relocs_chunk;
577*4882a593Smuzhiyun 	struct radeon_bo_list *reloc;
578*4882a593Smuzhiyun 	unsigned idx, cmd, offset;
579*4882a593Smuzhiyun 	uint64_t start, end;
580*4882a593Smuzhiyun 	int r;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	relocs_chunk = p->chunk_relocs;
583*4882a593Smuzhiyun 	offset = radeon_get_ib_value(p, data0);
584*4882a593Smuzhiyun 	idx = radeon_get_ib_value(p, data1);
585*4882a593Smuzhiyun 	if (idx >= relocs_chunk->length_dw) {
586*4882a593Smuzhiyun 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
587*4882a593Smuzhiyun 			  idx, relocs_chunk->length_dw);
588*4882a593Smuzhiyun 		return -EINVAL;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	reloc = &p->relocs[(idx / 4)];
592*4882a593Smuzhiyun 	start = reloc->gpu_offset;
593*4882a593Smuzhiyun 	end = start + radeon_bo_size(reloc->robj);
594*4882a593Smuzhiyun 	start += offset;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	p->ib.ptr[data0] = start & 0xFFFFFFFF;
597*4882a593Smuzhiyun 	p->ib.ptr[data1] = start >> 32;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	cmd = radeon_get_ib_value(p, p->idx) >> 1;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	if (cmd < 0x4) {
602*4882a593Smuzhiyun 		if (end <= start) {
603*4882a593Smuzhiyun 			DRM_ERROR("invalid reloc offset %X!\n", offset);
604*4882a593Smuzhiyun 			return -EINVAL;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 		if ((end - start) < buf_sizes[cmd]) {
607*4882a593Smuzhiyun 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
608*4882a593Smuzhiyun 				  (unsigned)(end - start), buf_sizes[cmd]);
609*4882a593Smuzhiyun 			return -EINVAL;
610*4882a593Smuzhiyun 		}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	} else if (cmd != 0x100) {
613*4882a593Smuzhiyun 		DRM_ERROR("invalid UVD command %X!\n", cmd);
614*4882a593Smuzhiyun 		return -EINVAL;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if ((start >> 28) != ((end - 1) >> 28)) {
618*4882a593Smuzhiyun 		DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
619*4882a593Smuzhiyun 			  start, end);
620*4882a593Smuzhiyun 		return -EINVAL;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	/* TODO: is this still necessary on NI+ ? */
624*4882a593Smuzhiyun 	if ((cmd == 0 || cmd == 0x3) &&
625*4882a593Smuzhiyun 	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
626*4882a593Smuzhiyun 		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
627*4882a593Smuzhiyun 			  start, end);
628*4882a593Smuzhiyun 		return -EINVAL;
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	if (cmd == 0) {
632*4882a593Smuzhiyun 		if (*has_msg_cmd) {
633*4882a593Smuzhiyun 			DRM_ERROR("More than one message in a UVD-IB!\n");
634*4882a593Smuzhiyun 			return -EINVAL;
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 		*has_msg_cmd = true;
637*4882a593Smuzhiyun 		r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
638*4882a593Smuzhiyun 		if (r)
639*4882a593Smuzhiyun 			return r;
640*4882a593Smuzhiyun 	} else if (!*has_msg_cmd) {
641*4882a593Smuzhiyun 		DRM_ERROR("Message needed before other commands are send!\n");
642*4882a593Smuzhiyun 		return -EINVAL;
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	return 0;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
radeon_uvd_cs_reg(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt,int * data0,int * data1,unsigned buf_sizes[],bool * has_msg_cmd)648*4882a593Smuzhiyun static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
649*4882a593Smuzhiyun 			     struct radeon_cs_packet *pkt,
650*4882a593Smuzhiyun 			     int *data0, int *data1,
651*4882a593Smuzhiyun 			     unsigned buf_sizes[],
652*4882a593Smuzhiyun 			     bool *has_msg_cmd)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	int i, r;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	p->idx++;
657*4882a593Smuzhiyun 	for (i = 0; i <= pkt->count; ++i) {
658*4882a593Smuzhiyun 		switch (pkt->reg + i*4) {
659*4882a593Smuzhiyun 		case UVD_GPCOM_VCPU_DATA0:
660*4882a593Smuzhiyun 			*data0 = p->idx;
661*4882a593Smuzhiyun 			break;
662*4882a593Smuzhiyun 		case UVD_GPCOM_VCPU_DATA1:
663*4882a593Smuzhiyun 			*data1 = p->idx;
664*4882a593Smuzhiyun 			break;
665*4882a593Smuzhiyun 		case UVD_GPCOM_VCPU_CMD:
666*4882a593Smuzhiyun 			r = radeon_uvd_cs_reloc(p, *data0, *data1,
667*4882a593Smuzhiyun 						buf_sizes, has_msg_cmd);
668*4882a593Smuzhiyun 			if (r)
669*4882a593Smuzhiyun 				return r;
670*4882a593Smuzhiyun 			break;
671*4882a593Smuzhiyun 		case UVD_ENGINE_CNTL:
672*4882a593Smuzhiyun 		case UVD_NO_OP:
673*4882a593Smuzhiyun 			break;
674*4882a593Smuzhiyun 		default:
675*4882a593Smuzhiyun 			DRM_ERROR("Invalid reg 0x%X!\n",
676*4882a593Smuzhiyun 				  pkt->reg + i*4);
677*4882a593Smuzhiyun 			return -EINVAL;
678*4882a593Smuzhiyun 		}
679*4882a593Smuzhiyun 		p->idx++;
680*4882a593Smuzhiyun 	}
681*4882a593Smuzhiyun 	return 0;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
radeon_uvd_cs_parse(struct radeon_cs_parser * p)684*4882a593Smuzhiyun int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	struct radeon_cs_packet pkt;
687*4882a593Smuzhiyun 	int r, data0 = 0, data1 = 0;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	/* does the IB has a msg command */
690*4882a593Smuzhiyun 	bool has_msg_cmd = false;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	/* minimum buffer sizes */
693*4882a593Smuzhiyun 	unsigned buf_sizes[] = {
694*4882a593Smuzhiyun 		[0x00000000]	=	2048,
695*4882a593Smuzhiyun 		[0x00000001]	=	32 * 1024 * 1024,
696*4882a593Smuzhiyun 		[0x00000002]	=	2048 * 1152 * 3,
697*4882a593Smuzhiyun 		[0x00000003]	=	2048,
698*4882a593Smuzhiyun 	};
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (p->chunk_ib->length_dw % 16) {
701*4882a593Smuzhiyun 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
702*4882a593Smuzhiyun 			  p->chunk_ib->length_dw);
703*4882a593Smuzhiyun 		return -EINVAL;
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (p->chunk_relocs == NULL) {
707*4882a593Smuzhiyun 		DRM_ERROR("No relocation chunk !\n");
708*4882a593Smuzhiyun 		return -EINVAL;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	do {
713*4882a593Smuzhiyun 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
714*4882a593Smuzhiyun 		if (r)
715*4882a593Smuzhiyun 			return r;
716*4882a593Smuzhiyun 		switch (pkt.type) {
717*4882a593Smuzhiyun 		case RADEON_PACKET_TYPE0:
718*4882a593Smuzhiyun 			r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
719*4882a593Smuzhiyun 					      buf_sizes, &has_msg_cmd);
720*4882a593Smuzhiyun 			if (r)
721*4882a593Smuzhiyun 				return r;
722*4882a593Smuzhiyun 			break;
723*4882a593Smuzhiyun 		case RADEON_PACKET_TYPE2:
724*4882a593Smuzhiyun 			p->idx += pkt.count + 2;
725*4882a593Smuzhiyun 			break;
726*4882a593Smuzhiyun 		default:
727*4882a593Smuzhiyun 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
728*4882a593Smuzhiyun 			return -EINVAL;
729*4882a593Smuzhiyun 		}
730*4882a593Smuzhiyun 	} while (p->idx < p->chunk_ib->length_dw);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	if (!has_msg_cmd) {
733*4882a593Smuzhiyun 		DRM_ERROR("UVD-IBs need a msg command!\n");
734*4882a593Smuzhiyun 		return -EINVAL;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	return 0;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
radeon_uvd_send_msg(struct radeon_device * rdev,int ring,uint64_t addr,struct radeon_fence ** fence)740*4882a593Smuzhiyun static int radeon_uvd_send_msg(struct radeon_device *rdev,
741*4882a593Smuzhiyun 			       int ring, uint64_t addr,
742*4882a593Smuzhiyun 			       struct radeon_fence **fence)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	struct radeon_ib ib;
745*4882a593Smuzhiyun 	int i, r;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
748*4882a593Smuzhiyun 	if (r)
749*4882a593Smuzhiyun 		return r;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
752*4882a593Smuzhiyun 	ib.ptr[1] = addr;
753*4882a593Smuzhiyun 	ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
754*4882a593Smuzhiyun 	ib.ptr[3] = addr >> 32;
755*4882a593Smuzhiyun 	ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
756*4882a593Smuzhiyun 	ib.ptr[5] = 0;
757*4882a593Smuzhiyun 	for (i = 6; i < 16; i += 2) {
758*4882a593Smuzhiyun 		ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
759*4882a593Smuzhiyun 		ib.ptr[i+1] = 0;
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 	ib.length_dw = 16;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	if (fence)
766*4882a593Smuzhiyun 		*fence = radeon_fence_ref(ib.fence);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	radeon_ib_free(rdev, &ib);
769*4882a593Smuzhiyun 	return r;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun  * multiple fence commands without any stream commands in between can
774*4882a593Smuzhiyun  * crash the vcpu so just try to emmit a dummy create/destroy msg to
775*4882a593Smuzhiyun  * avoid this
776*4882a593Smuzhiyun  */
radeon_uvd_get_create_msg(struct radeon_device * rdev,int ring,uint32_t handle,struct radeon_fence ** fence)777*4882a593Smuzhiyun int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
778*4882a593Smuzhiyun 			      uint32_t handle, struct radeon_fence **fence)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	/* we use the last page of the vcpu bo for the UVD message */
781*4882a593Smuzhiyun 	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
782*4882a593Smuzhiyun 		RADEON_GPU_PAGE_SIZE;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	uint32_t *msg = rdev->uvd.cpu_addr + offs;
785*4882a593Smuzhiyun 	uint64_t addr = rdev->uvd.gpu_addr + offs;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	int r, i;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
790*4882a593Smuzhiyun 	if (r)
791*4882a593Smuzhiyun 		return r;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	/* stitch together an UVD create msg */
794*4882a593Smuzhiyun 	msg[0] = cpu_to_le32(0x00000de4);
795*4882a593Smuzhiyun 	msg[1] = cpu_to_le32(0x00000000);
796*4882a593Smuzhiyun 	msg[2] = cpu_to_le32(handle);
797*4882a593Smuzhiyun 	msg[3] = cpu_to_le32(0x00000000);
798*4882a593Smuzhiyun 	msg[4] = cpu_to_le32(0x00000000);
799*4882a593Smuzhiyun 	msg[5] = cpu_to_le32(0x00000000);
800*4882a593Smuzhiyun 	msg[6] = cpu_to_le32(0x00000000);
801*4882a593Smuzhiyun 	msg[7] = cpu_to_le32(0x00000780);
802*4882a593Smuzhiyun 	msg[8] = cpu_to_le32(0x00000440);
803*4882a593Smuzhiyun 	msg[9] = cpu_to_le32(0x00000000);
804*4882a593Smuzhiyun 	msg[10] = cpu_to_le32(0x01b37000);
805*4882a593Smuzhiyun 	for (i = 11; i < 1024; ++i)
806*4882a593Smuzhiyun 		msg[i] = cpu_to_le32(0x0);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
809*4882a593Smuzhiyun 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
810*4882a593Smuzhiyun 	return r;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun 
radeon_uvd_get_destroy_msg(struct radeon_device * rdev,int ring,uint32_t handle,struct radeon_fence ** fence)813*4882a593Smuzhiyun int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
814*4882a593Smuzhiyun 			       uint32_t handle, struct radeon_fence **fence)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	/* we use the last page of the vcpu bo for the UVD message */
817*4882a593Smuzhiyun 	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
818*4882a593Smuzhiyun 		RADEON_GPU_PAGE_SIZE;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	uint32_t *msg = rdev->uvd.cpu_addr + offs;
821*4882a593Smuzhiyun 	uint64_t addr = rdev->uvd.gpu_addr + offs;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	int r, i;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
826*4882a593Smuzhiyun 	if (r)
827*4882a593Smuzhiyun 		return r;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	/* stitch together an UVD destroy msg */
830*4882a593Smuzhiyun 	msg[0] = cpu_to_le32(0x00000de4);
831*4882a593Smuzhiyun 	msg[1] = cpu_to_le32(0x00000002);
832*4882a593Smuzhiyun 	msg[2] = cpu_to_le32(handle);
833*4882a593Smuzhiyun 	msg[3] = cpu_to_le32(0x00000000);
834*4882a593Smuzhiyun 	for (i = 4; i < 1024; ++i)
835*4882a593Smuzhiyun 		msg[i] = cpu_to_le32(0x0);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
838*4882a593Smuzhiyun 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
839*4882a593Smuzhiyun 	return r;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun /**
843*4882a593Smuzhiyun  * radeon_uvd_count_handles - count number of open streams
844*4882a593Smuzhiyun  *
845*4882a593Smuzhiyun  * @rdev: radeon_device pointer
846*4882a593Smuzhiyun  * @sd: number of SD streams
847*4882a593Smuzhiyun  * @hd: number of HD streams
848*4882a593Smuzhiyun  *
849*4882a593Smuzhiyun  * Count the number of open SD/HD streams as a hint for power mangement
850*4882a593Smuzhiyun  */
radeon_uvd_count_handles(struct radeon_device * rdev,unsigned * sd,unsigned * hd)851*4882a593Smuzhiyun static void radeon_uvd_count_handles(struct radeon_device *rdev,
852*4882a593Smuzhiyun 				     unsigned *sd, unsigned *hd)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	unsigned i;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	*sd = 0;
857*4882a593Smuzhiyun 	*hd = 0;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	for (i = 0; i < rdev->uvd.max_handles; ++i) {
860*4882a593Smuzhiyun 		if (!atomic_read(&rdev->uvd.handles[i]))
861*4882a593Smuzhiyun 			continue;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 		if (rdev->uvd.img_size[i] >= 720*576)
864*4882a593Smuzhiyun 			++(*hd);
865*4882a593Smuzhiyun 		else
866*4882a593Smuzhiyun 			++(*sd);
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
radeon_uvd_idle_work_handler(struct work_struct * work)870*4882a593Smuzhiyun static void radeon_uvd_idle_work_handler(struct work_struct *work)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	struct radeon_device *rdev =
873*4882a593Smuzhiyun 		container_of(work, struct radeon_device, uvd.idle_work.work);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
876*4882a593Smuzhiyun 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
877*4882a593Smuzhiyun 			radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
878*4882a593Smuzhiyun 						 &rdev->pm.dpm.hd);
879*4882a593Smuzhiyun 			radeon_dpm_enable_uvd(rdev, false);
880*4882a593Smuzhiyun 		} else {
881*4882a593Smuzhiyun 			radeon_set_uvd_clocks(rdev, 0, 0);
882*4882a593Smuzhiyun 		}
883*4882a593Smuzhiyun 	} else {
884*4882a593Smuzhiyun 		schedule_delayed_work(&rdev->uvd.idle_work,
885*4882a593Smuzhiyun 				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
886*4882a593Smuzhiyun 	}
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
radeon_uvd_note_usage(struct radeon_device * rdev)889*4882a593Smuzhiyun void radeon_uvd_note_usage(struct radeon_device *rdev)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun 	bool streams_changed = false;
892*4882a593Smuzhiyun 	bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
893*4882a593Smuzhiyun 	set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
894*4882a593Smuzhiyun 					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
897*4882a593Smuzhiyun 		unsigned hd = 0, sd = 0;
898*4882a593Smuzhiyun 		radeon_uvd_count_handles(rdev, &sd, &hd);
899*4882a593Smuzhiyun 		if ((rdev->pm.dpm.sd != sd) ||
900*4882a593Smuzhiyun 		    (rdev->pm.dpm.hd != hd)) {
901*4882a593Smuzhiyun 			rdev->pm.dpm.sd = sd;
902*4882a593Smuzhiyun 			rdev->pm.dpm.hd = hd;
903*4882a593Smuzhiyun 			/* disable this for now */
904*4882a593Smuzhiyun 			/*streams_changed = true;*/
905*4882a593Smuzhiyun 		}
906*4882a593Smuzhiyun 	}
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	if (set_clocks || streams_changed) {
909*4882a593Smuzhiyun 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
910*4882a593Smuzhiyun 			radeon_dpm_enable_uvd(rdev, true);
911*4882a593Smuzhiyun 		} else {
912*4882a593Smuzhiyun 			radeon_set_uvd_clocks(rdev, 53300, 40000);
913*4882a593Smuzhiyun 		}
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
radeon_uvd_calc_upll_post_div(unsigned vco_freq,unsigned target_freq,unsigned pd_min,unsigned pd_even)917*4882a593Smuzhiyun static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
918*4882a593Smuzhiyun 					      unsigned target_freq,
919*4882a593Smuzhiyun 					      unsigned pd_min,
920*4882a593Smuzhiyun 					      unsigned pd_even)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	unsigned post_div = vco_freq / target_freq;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	/* adjust to post divider minimum value */
925*4882a593Smuzhiyun 	if (post_div < pd_min)
926*4882a593Smuzhiyun 		post_div = pd_min;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	/* we alway need a frequency less than or equal the target */
929*4882a593Smuzhiyun 	if ((vco_freq / post_div) > target_freq)
930*4882a593Smuzhiyun 		post_div += 1;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	/* post dividers above a certain value must be even */
933*4882a593Smuzhiyun 	if (post_div > pd_even && post_div % 2)
934*4882a593Smuzhiyun 		post_div += 1;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	return post_div;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun /**
940*4882a593Smuzhiyun  * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
941*4882a593Smuzhiyun  *
942*4882a593Smuzhiyun  * @rdev: radeon_device pointer
943*4882a593Smuzhiyun  * @vclk: wanted VCLK
944*4882a593Smuzhiyun  * @dclk: wanted DCLK
945*4882a593Smuzhiyun  * @vco_min: minimum VCO frequency
946*4882a593Smuzhiyun  * @vco_max: maximum VCO frequency
947*4882a593Smuzhiyun  * @fb_factor: factor to multiply vco freq with
948*4882a593Smuzhiyun  * @fb_mask: limit and bitmask for feedback divider
949*4882a593Smuzhiyun  * @pd_min: post divider minimum
950*4882a593Smuzhiyun  * @pd_max: post divider maximum
951*4882a593Smuzhiyun  * @pd_even: post divider must be even above this value
952*4882a593Smuzhiyun  * @optimal_fb_div: resulting feedback divider
953*4882a593Smuzhiyun  * @optimal_vclk_div: resulting vclk post divider
954*4882a593Smuzhiyun  * @optimal_dclk_div: resulting dclk post divider
955*4882a593Smuzhiyun  *
956*4882a593Smuzhiyun  * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
957*4882a593Smuzhiyun  * Returns zero on success -EINVAL on error.
958*4882a593Smuzhiyun  */
radeon_uvd_calc_upll_dividers(struct radeon_device * rdev,unsigned vclk,unsigned dclk,unsigned vco_min,unsigned vco_max,unsigned fb_factor,unsigned fb_mask,unsigned pd_min,unsigned pd_max,unsigned pd_even,unsigned * optimal_fb_div,unsigned * optimal_vclk_div,unsigned * optimal_dclk_div)959*4882a593Smuzhiyun int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
960*4882a593Smuzhiyun 				  unsigned vclk, unsigned dclk,
961*4882a593Smuzhiyun 				  unsigned vco_min, unsigned vco_max,
962*4882a593Smuzhiyun 				  unsigned fb_factor, unsigned fb_mask,
963*4882a593Smuzhiyun 				  unsigned pd_min, unsigned pd_max,
964*4882a593Smuzhiyun 				  unsigned pd_even,
965*4882a593Smuzhiyun 				  unsigned *optimal_fb_div,
966*4882a593Smuzhiyun 				  unsigned *optimal_vclk_div,
967*4882a593Smuzhiyun 				  unsigned *optimal_dclk_div)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/* start off with something large */
972*4882a593Smuzhiyun 	unsigned optimal_score = ~0;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	/* loop through vco from low to high */
975*4882a593Smuzhiyun 	vco_min = max(max(vco_min, vclk), dclk);
976*4882a593Smuzhiyun 	for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 		uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
979*4882a593Smuzhiyun 		unsigned vclk_div, dclk_div, score;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 		do_div(fb_div, ref_freq);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 		/* fb div out of range ? */
984*4882a593Smuzhiyun 		if (fb_div > fb_mask)
985*4882a593Smuzhiyun 			break; /* it can oly get worse */
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 		fb_div &= fb_mask;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 		/* calc vclk divider with current vco freq */
990*4882a593Smuzhiyun 		vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
991*4882a593Smuzhiyun 							 pd_min, pd_even);
992*4882a593Smuzhiyun 		if (vclk_div > pd_max)
993*4882a593Smuzhiyun 			break; /* vco is too big, it has to stop */
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 		/* calc dclk divider with current vco freq */
996*4882a593Smuzhiyun 		dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
997*4882a593Smuzhiyun 							 pd_min, pd_even);
998*4882a593Smuzhiyun 		if (dclk_div > pd_max)
999*4882a593Smuzhiyun 			break; /* vco is too big, it has to stop */
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 		/* calc score with current vco freq */
1002*4882a593Smuzhiyun 		score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 		/* determine if this vco setting is better than current optimal settings */
1005*4882a593Smuzhiyun 		if (score < optimal_score) {
1006*4882a593Smuzhiyun 			*optimal_fb_div = fb_div;
1007*4882a593Smuzhiyun 			*optimal_vclk_div = vclk_div;
1008*4882a593Smuzhiyun 			*optimal_dclk_div = dclk_div;
1009*4882a593Smuzhiyun 			optimal_score = score;
1010*4882a593Smuzhiyun 			if (optimal_score == 0)
1011*4882a593Smuzhiyun 				break; /* it can't get better than this */
1012*4882a593Smuzhiyun 		}
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	/* did we found a valid setup ? */
1016*4882a593Smuzhiyun 	if (optimal_score == ~0)
1017*4882a593Smuzhiyun 		return -EINVAL;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	return 0;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun 
radeon_uvd_send_upll_ctlreq(struct radeon_device * rdev,unsigned cg_upll_func_cntl)1022*4882a593Smuzhiyun int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1023*4882a593Smuzhiyun 				unsigned cg_upll_func_cntl)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun 	unsigned i;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/* make sure UPLL_CTLREQ is deasserted */
1028*4882a593Smuzhiyun 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	mdelay(10);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	/* assert UPLL_CTLREQ */
1033*4882a593Smuzhiyun 	WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	/* wait for CTLACK and CTLACK2 to get asserted */
1036*4882a593Smuzhiyun 	for (i = 0; i < 100; ++i) {
1037*4882a593Smuzhiyun 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1038*4882a593Smuzhiyun 		if ((RREG32(cg_upll_func_cntl) & mask) == mask)
1039*4882a593Smuzhiyun 			break;
1040*4882a593Smuzhiyun 		mdelay(10);
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	/* deassert UPLL_CTLREQ */
1044*4882a593Smuzhiyun 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	if (i == 100) {
1047*4882a593Smuzhiyun 		DRM_ERROR("Timeout setting UVD clocks!\n");
1048*4882a593Smuzhiyun 		return -ETIMEDOUT;
1049*4882a593Smuzhiyun 	}
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	return 0;
1052*4882a593Smuzhiyun }
1053