xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2011 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  * All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
7*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun  * the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
22*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
23*4882a593Smuzhiyun  * of the Software.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Authors:
28*4882a593Smuzhiyun  *    Christian König <deathsimple@vodafone.de>
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/firmware.h>
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <drm/drm.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "amdgpu.h"
37*4882a593Smuzhiyun #include "amdgpu_pm.h"
38*4882a593Smuzhiyun #include "amdgpu_uvd.h"
39*4882a593Smuzhiyun #include "cikd.h"
40*4882a593Smuzhiyun #include "uvd/uvd_4_2_d.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include "amdgpu_ras.h"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* 1 second timeout */
45*4882a593Smuzhiyun #define UVD_IDLE_TIMEOUT	msecs_to_jiffies(1000)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* Firmware versions for VI */
48*4882a593Smuzhiyun #define FW_1_65_10	((1 << 24) | (65 << 16) | (10 << 8))
49*4882a593Smuzhiyun #define FW_1_87_11	((1 << 24) | (87 << 16) | (11 << 8))
50*4882a593Smuzhiyun #define FW_1_87_12	((1 << 24) | (87 << 16) | (12 << 8))
51*4882a593Smuzhiyun #define FW_1_37_15	((1 << 24) | (37 << 16) | (15 << 8))
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /* Polaris10/11 firmware version */
54*4882a593Smuzhiyun #define FW_1_66_16	((1 << 24) | (66 << 16) | (16 << 8))
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Firmware Names */
57*4882a593Smuzhiyun #ifdef CONFIG_DRM_AMDGPU_SI
58*4882a593Smuzhiyun #define FIRMWARE_TAHITI		"amdgpu/tahiti_uvd.bin"
59*4882a593Smuzhiyun #define FIRMWARE_VERDE		"amdgpu/verde_uvd.bin"
60*4882a593Smuzhiyun #define FIRMWARE_PITCAIRN	"amdgpu/pitcairn_uvd.bin"
61*4882a593Smuzhiyun #define FIRMWARE_OLAND		"amdgpu/oland_uvd.bin"
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun #ifdef CONFIG_DRM_AMDGPU_CIK
64*4882a593Smuzhiyun #define FIRMWARE_BONAIRE	"amdgpu/bonaire_uvd.bin"
65*4882a593Smuzhiyun #define FIRMWARE_KABINI	"amdgpu/kabini_uvd.bin"
66*4882a593Smuzhiyun #define FIRMWARE_KAVERI	"amdgpu/kaveri_uvd.bin"
67*4882a593Smuzhiyun #define FIRMWARE_HAWAII	"amdgpu/hawaii_uvd.bin"
68*4882a593Smuzhiyun #define FIRMWARE_MULLINS	"amdgpu/mullins_uvd.bin"
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
71*4882a593Smuzhiyun #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
72*4882a593Smuzhiyun #define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
73*4882a593Smuzhiyun #define FIRMWARE_STONEY		"amdgpu/stoney_uvd.bin"
74*4882a593Smuzhiyun #define FIRMWARE_POLARIS10	"amdgpu/polaris10_uvd.bin"
75*4882a593Smuzhiyun #define FIRMWARE_POLARIS11	"amdgpu/polaris11_uvd.bin"
76*4882a593Smuzhiyun #define FIRMWARE_POLARIS12	"amdgpu/polaris12_uvd.bin"
77*4882a593Smuzhiyun #define FIRMWARE_VEGAM		"amdgpu/vegam_uvd.bin"
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define FIRMWARE_VEGA10		"amdgpu/vega10_uvd.bin"
80*4882a593Smuzhiyun #define FIRMWARE_VEGA12		"amdgpu/vega12_uvd.bin"
81*4882a593Smuzhiyun #define FIRMWARE_VEGA20		"amdgpu/vega20_uvd.bin"
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* These are common relative offsets for all asics, from uvd_7_0_offset.h,  */
84*4882a593Smuzhiyun #define UVD_GPCOM_VCPU_CMD		0x03c3
85*4882a593Smuzhiyun #define UVD_GPCOM_VCPU_DATA0	0x03c4
86*4882a593Smuzhiyun #define UVD_GPCOM_VCPU_DATA1	0x03c5
87*4882a593Smuzhiyun #define UVD_NO_OP				0x03ff
88*4882a593Smuzhiyun #define UVD_BASE_SI				0x3800
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * amdgpu_uvd_cs_ctx - Command submission parser context
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  * Used for emulating virtual memory support on UVD 4.2.
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun struct amdgpu_uvd_cs_ctx {
96*4882a593Smuzhiyun 	struct amdgpu_cs_parser *parser;
97*4882a593Smuzhiyun 	unsigned reg, count;
98*4882a593Smuzhiyun 	unsigned data0, data1;
99*4882a593Smuzhiyun 	unsigned idx;
100*4882a593Smuzhiyun 	unsigned ib_idx;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* does the IB has a msg command */
103*4882a593Smuzhiyun 	bool has_msg_cmd;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/* minimum buffer sizes */
106*4882a593Smuzhiyun 	unsigned *buf_sizes;
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #ifdef CONFIG_DRM_AMDGPU_SI
110*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_TAHITI);
111*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_VERDE);
112*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_PITCAIRN);
113*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_OLAND);
114*4882a593Smuzhiyun #endif
115*4882a593Smuzhiyun #ifdef CONFIG_DRM_AMDGPU_CIK
116*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_BONAIRE);
117*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_KABINI);
118*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_KAVERI);
119*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_HAWAII);
120*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_MULLINS);
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_TONGA);
123*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_CARRIZO);
124*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_FIJI);
125*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_STONEY);
126*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_POLARIS10);
127*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_POLARIS11);
128*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_POLARIS12);
129*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_VEGAM);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_VEGA10);
132*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_VEGA12);
133*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_VEGA20);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
136*4882a593Smuzhiyun 
amdgpu_uvd_sw_init(struct amdgpu_device * adev)137*4882a593Smuzhiyun int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	unsigned long bo_size;
140*4882a593Smuzhiyun 	const char *fw_name;
141*4882a593Smuzhiyun 	const struct common_firmware_header *hdr;
142*4882a593Smuzhiyun 	unsigned family_id;
143*4882a593Smuzhiyun 	int i, j, r;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	switch (adev->asic_type) {
148*4882a593Smuzhiyun #ifdef CONFIG_DRM_AMDGPU_SI
149*4882a593Smuzhiyun 	case CHIP_TAHITI:
150*4882a593Smuzhiyun 		fw_name = FIRMWARE_TAHITI;
151*4882a593Smuzhiyun 		break;
152*4882a593Smuzhiyun 	case CHIP_VERDE:
153*4882a593Smuzhiyun 		fw_name = FIRMWARE_VERDE;
154*4882a593Smuzhiyun 		break;
155*4882a593Smuzhiyun 	case CHIP_PITCAIRN:
156*4882a593Smuzhiyun 		fw_name = FIRMWARE_PITCAIRN;
157*4882a593Smuzhiyun 		break;
158*4882a593Smuzhiyun 	case CHIP_OLAND:
159*4882a593Smuzhiyun 		fw_name = FIRMWARE_OLAND;
160*4882a593Smuzhiyun 		break;
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun #ifdef CONFIG_DRM_AMDGPU_CIK
163*4882a593Smuzhiyun 	case CHIP_BONAIRE:
164*4882a593Smuzhiyun 		fw_name = FIRMWARE_BONAIRE;
165*4882a593Smuzhiyun 		break;
166*4882a593Smuzhiyun 	case CHIP_KABINI:
167*4882a593Smuzhiyun 		fw_name = FIRMWARE_KABINI;
168*4882a593Smuzhiyun 		break;
169*4882a593Smuzhiyun 	case CHIP_KAVERI:
170*4882a593Smuzhiyun 		fw_name = FIRMWARE_KAVERI;
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 	case CHIP_HAWAII:
173*4882a593Smuzhiyun 		fw_name = FIRMWARE_HAWAII;
174*4882a593Smuzhiyun 		break;
175*4882a593Smuzhiyun 	case CHIP_MULLINS:
176*4882a593Smuzhiyun 		fw_name = FIRMWARE_MULLINS;
177*4882a593Smuzhiyun 		break;
178*4882a593Smuzhiyun #endif
179*4882a593Smuzhiyun 	case CHIP_TONGA:
180*4882a593Smuzhiyun 		fw_name = FIRMWARE_TONGA;
181*4882a593Smuzhiyun 		break;
182*4882a593Smuzhiyun 	case CHIP_FIJI:
183*4882a593Smuzhiyun 		fw_name = FIRMWARE_FIJI;
184*4882a593Smuzhiyun 		break;
185*4882a593Smuzhiyun 	case CHIP_CARRIZO:
186*4882a593Smuzhiyun 		fw_name = FIRMWARE_CARRIZO;
187*4882a593Smuzhiyun 		break;
188*4882a593Smuzhiyun 	case CHIP_STONEY:
189*4882a593Smuzhiyun 		fw_name = FIRMWARE_STONEY;
190*4882a593Smuzhiyun 		break;
191*4882a593Smuzhiyun 	case CHIP_POLARIS10:
192*4882a593Smuzhiyun 		fw_name = FIRMWARE_POLARIS10;
193*4882a593Smuzhiyun 		break;
194*4882a593Smuzhiyun 	case CHIP_POLARIS11:
195*4882a593Smuzhiyun 		fw_name = FIRMWARE_POLARIS11;
196*4882a593Smuzhiyun 		break;
197*4882a593Smuzhiyun 	case CHIP_POLARIS12:
198*4882a593Smuzhiyun 		fw_name = FIRMWARE_POLARIS12;
199*4882a593Smuzhiyun 		break;
200*4882a593Smuzhiyun 	case CHIP_VEGA10:
201*4882a593Smuzhiyun 		fw_name = FIRMWARE_VEGA10;
202*4882a593Smuzhiyun 		break;
203*4882a593Smuzhiyun 	case CHIP_VEGA12:
204*4882a593Smuzhiyun 		fw_name = FIRMWARE_VEGA12;
205*4882a593Smuzhiyun 		break;
206*4882a593Smuzhiyun 	case CHIP_VEGAM:
207*4882a593Smuzhiyun 		fw_name = FIRMWARE_VEGAM;
208*4882a593Smuzhiyun 		break;
209*4882a593Smuzhiyun 	case CHIP_VEGA20:
210*4882a593Smuzhiyun 		fw_name = FIRMWARE_VEGA20;
211*4882a593Smuzhiyun 		break;
212*4882a593Smuzhiyun 	default:
213*4882a593Smuzhiyun 		return -EINVAL;
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
217*4882a593Smuzhiyun 	if (r) {
218*4882a593Smuzhiyun 		dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
219*4882a593Smuzhiyun 			fw_name);
220*4882a593Smuzhiyun 		return r;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	r = amdgpu_ucode_validate(adev->uvd.fw);
224*4882a593Smuzhiyun 	if (r) {
225*4882a593Smuzhiyun 		dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
226*4882a593Smuzhiyun 			fw_name);
227*4882a593Smuzhiyun 		release_firmware(adev->uvd.fw);
228*4882a593Smuzhiyun 		adev->uvd.fw = NULL;
229*4882a593Smuzhiyun 		return r;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Set the default UVD handles that the firmware can handle */
233*4882a593Smuzhiyun 	adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
236*4882a593Smuzhiyun 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (adev->asic_type < CHIP_VEGA20) {
239*4882a593Smuzhiyun 		unsigned version_major, version_minor;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
242*4882a593Smuzhiyun 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
243*4882a593Smuzhiyun 		DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
244*4882a593Smuzhiyun 			version_major, version_minor, family_id);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		/*
247*4882a593Smuzhiyun 		 * Limit the number of UVD handles depending on microcode major
248*4882a593Smuzhiyun 		 * and minor versions. The firmware version which has 40 UVD
249*4882a593Smuzhiyun 		 * instances support is 1.80. So all subsequent versions should
250*4882a593Smuzhiyun 		 * also have the same support.
251*4882a593Smuzhiyun 		 */
252*4882a593Smuzhiyun 		if ((version_major > 0x01) ||
253*4882a593Smuzhiyun 		    ((version_major == 0x01) && (version_minor >= 0x50)))
254*4882a593Smuzhiyun 			adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
257*4882a593Smuzhiyun 					(family_id << 8));
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		if ((adev->asic_type == CHIP_POLARIS10 ||
260*4882a593Smuzhiyun 		     adev->asic_type == CHIP_POLARIS11) &&
261*4882a593Smuzhiyun 		    (adev->uvd.fw_version < FW_1_66_16))
262*4882a593Smuzhiyun 			DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
263*4882a593Smuzhiyun 				  version_major, version_minor);
264*4882a593Smuzhiyun 	} else {
265*4882a593Smuzhiyun 		unsigned int enc_major, enc_minor, dec_minor;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
268*4882a593Smuzhiyun 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
269*4882a593Smuzhiyun 		enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
270*4882a593Smuzhiyun 		DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
271*4882a593Smuzhiyun 			enc_major, enc_minor, dec_minor, family_id);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 		adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
279*4882a593Smuzhiyun 		  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
280*4882a593Smuzhiyun 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
281*4882a593Smuzhiyun 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
284*4882a593Smuzhiyun 		if (adev->uvd.harvest_config & (1 << j))
285*4882a593Smuzhiyun 			continue;
286*4882a593Smuzhiyun 		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
287*4882a593Smuzhiyun 					    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
288*4882a593Smuzhiyun 					    &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
289*4882a593Smuzhiyun 		if (r) {
290*4882a593Smuzhiyun 			dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
291*4882a593Smuzhiyun 			return r;
292*4882a593Smuzhiyun 		}
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	for (i = 0; i < adev->uvd.max_handles; ++i) {
296*4882a593Smuzhiyun 		atomic_set(&adev->uvd.handles[i], 0);
297*4882a593Smuzhiyun 		adev->uvd.filp[i] = NULL;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
301*4882a593Smuzhiyun 	if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
302*4882a593Smuzhiyun 		adev->uvd.address_64_bit = true;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	switch (adev->asic_type) {
305*4882a593Smuzhiyun 	case CHIP_TONGA:
306*4882a593Smuzhiyun 		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
307*4882a593Smuzhiyun 		break;
308*4882a593Smuzhiyun 	case CHIP_CARRIZO:
309*4882a593Smuzhiyun 		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
310*4882a593Smuzhiyun 		break;
311*4882a593Smuzhiyun 	case CHIP_FIJI:
312*4882a593Smuzhiyun 		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
313*4882a593Smuzhiyun 		break;
314*4882a593Smuzhiyun 	case CHIP_STONEY:
315*4882a593Smuzhiyun 		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
316*4882a593Smuzhiyun 		break;
317*4882a593Smuzhiyun 	default:
318*4882a593Smuzhiyun 		adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
amdgpu_uvd_sw_fini(struct amdgpu_device * adev)324*4882a593Smuzhiyun int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	int i, j;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	cancel_delayed_work_sync(&adev->uvd.idle_work);
329*4882a593Smuzhiyun 	drm_sched_entity_destroy(&adev->uvd.entity);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
332*4882a593Smuzhiyun 		if (adev->uvd.harvest_config & (1 << j))
333*4882a593Smuzhiyun 			continue;
334*4882a593Smuzhiyun 		kvfree(adev->uvd.inst[j].saved_bo);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
337*4882a593Smuzhiyun 				      &adev->uvd.inst[j].gpu_addr,
338*4882a593Smuzhiyun 				      (void **)&adev->uvd.inst[j].cpu_addr);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		amdgpu_ring_fini(&adev->uvd.inst[j].ring);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
343*4882a593Smuzhiyun 			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 	release_firmware(adev->uvd.fw);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return 0;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun  * amdgpu_uvd_entity_init - init entity
352*4882a593Smuzhiyun  *
353*4882a593Smuzhiyun  * @adev: amdgpu_device pointer
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  */
amdgpu_uvd_entity_init(struct amdgpu_device * adev)356*4882a593Smuzhiyun int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct amdgpu_ring *ring;
359*4882a593Smuzhiyun 	struct drm_gpu_scheduler *sched;
360*4882a593Smuzhiyun 	int r;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	ring = &adev->uvd.inst[0].ring;
363*4882a593Smuzhiyun 	sched = &ring->sched;
364*4882a593Smuzhiyun 	r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
365*4882a593Smuzhiyun 				  &sched, 1, NULL);
366*4882a593Smuzhiyun 	if (r) {
367*4882a593Smuzhiyun 		DRM_ERROR("Failed setting up UVD kernel entity.\n");
368*4882a593Smuzhiyun 		return r;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return 0;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
amdgpu_uvd_suspend(struct amdgpu_device * adev)374*4882a593Smuzhiyun int amdgpu_uvd_suspend(struct amdgpu_device *adev)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	unsigned size;
377*4882a593Smuzhiyun 	void *ptr;
378*4882a593Smuzhiyun 	int i, j;
379*4882a593Smuzhiyun 	bool in_ras_intr = amdgpu_ras_intr_triggered();
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	cancel_delayed_work_sync(&adev->uvd.idle_work);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* only valid for physical mode */
384*4882a593Smuzhiyun 	if (adev->asic_type < CHIP_POLARIS10) {
385*4882a593Smuzhiyun 		for (i = 0; i < adev->uvd.max_handles; ++i)
386*4882a593Smuzhiyun 			if (atomic_read(&adev->uvd.handles[i]))
387*4882a593Smuzhiyun 				break;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		if (i == adev->uvd.max_handles)
390*4882a593Smuzhiyun 			return 0;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
394*4882a593Smuzhiyun 		if (adev->uvd.harvest_config & (1 << j))
395*4882a593Smuzhiyun 			continue;
396*4882a593Smuzhiyun 		if (adev->uvd.inst[j].vcpu_bo == NULL)
397*4882a593Smuzhiyun 			continue;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 		size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
400*4882a593Smuzhiyun 		ptr = adev->uvd.inst[j].cpu_addr;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
403*4882a593Smuzhiyun 		if (!adev->uvd.inst[j].saved_bo)
404*4882a593Smuzhiyun 			return -ENOMEM;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		/* re-write 0 since err_event_athub will corrupt VCPU buffer */
407*4882a593Smuzhiyun 		if (in_ras_intr)
408*4882a593Smuzhiyun 			memset(adev->uvd.inst[j].saved_bo, 0, size);
409*4882a593Smuzhiyun 		else
410*4882a593Smuzhiyun 			memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (in_ras_intr)
414*4882a593Smuzhiyun 		DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
amdgpu_uvd_resume(struct amdgpu_device * adev)419*4882a593Smuzhiyun int amdgpu_uvd_resume(struct amdgpu_device *adev)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	unsigned size;
422*4882a593Smuzhiyun 	void *ptr;
423*4882a593Smuzhiyun 	int i;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
426*4882a593Smuzhiyun 		if (adev->uvd.harvest_config & (1 << i))
427*4882a593Smuzhiyun 			continue;
428*4882a593Smuzhiyun 		if (adev->uvd.inst[i].vcpu_bo == NULL)
429*4882a593Smuzhiyun 			return -EINVAL;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
432*4882a593Smuzhiyun 		ptr = adev->uvd.inst[i].cpu_addr;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		if (adev->uvd.inst[i].saved_bo != NULL) {
435*4882a593Smuzhiyun 			memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
436*4882a593Smuzhiyun 			kvfree(adev->uvd.inst[i].saved_bo);
437*4882a593Smuzhiyun 			adev->uvd.inst[i].saved_bo = NULL;
438*4882a593Smuzhiyun 		} else {
439*4882a593Smuzhiyun 			const struct common_firmware_header *hdr;
440*4882a593Smuzhiyun 			unsigned offset;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 			hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
443*4882a593Smuzhiyun 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
444*4882a593Smuzhiyun 				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
445*4882a593Smuzhiyun 				memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
446*4882a593Smuzhiyun 					    le32_to_cpu(hdr->ucode_size_bytes));
447*4882a593Smuzhiyun 				size -= le32_to_cpu(hdr->ucode_size_bytes);
448*4882a593Smuzhiyun 				ptr += le32_to_cpu(hdr->ucode_size_bytes);
449*4882a593Smuzhiyun 			}
450*4882a593Smuzhiyun 			memset_io(ptr, 0, size);
451*4882a593Smuzhiyun 			/* to restore uvd fence seq */
452*4882a593Smuzhiyun 			amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 	return 0;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
amdgpu_uvd_free_handles(struct amdgpu_device * adev,struct drm_file * filp)458*4882a593Smuzhiyun void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
461*4882a593Smuzhiyun 	int i, r;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	for (i = 0; i < adev->uvd.max_handles; ++i) {
464*4882a593Smuzhiyun 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		if (handle != 0 && adev->uvd.filp[i] == filp) {
467*4882a593Smuzhiyun 			struct dma_fence *fence;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 			r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
470*4882a593Smuzhiyun 						       &fence);
471*4882a593Smuzhiyun 			if (r) {
472*4882a593Smuzhiyun 				DRM_ERROR("Error destroying UVD %d!\n", r);
473*4882a593Smuzhiyun 				continue;
474*4882a593Smuzhiyun 			}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 			dma_fence_wait(fence, false);
477*4882a593Smuzhiyun 			dma_fence_put(fence);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 			adev->uvd.filp[i] = NULL;
480*4882a593Smuzhiyun 			atomic_set(&adev->uvd.handles[i], 0);
481*4882a593Smuzhiyun 		}
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo * abo)485*4882a593Smuzhiyun static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	int i;
488*4882a593Smuzhiyun 	for (i = 0; i < abo->placement.num_placement; ++i) {
489*4882a593Smuzhiyun 		abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
490*4882a593Smuzhiyun 		abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx * ctx)494*4882a593Smuzhiyun static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	uint32_t lo, hi;
497*4882a593Smuzhiyun 	uint64_t addr;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
500*4882a593Smuzhiyun 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
501*4882a593Smuzhiyun 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return addr;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun /**
507*4882a593Smuzhiyun  * amdgpu_uvd_cs_pass1 - first parsing round
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * @ctx: UVD parser context
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  * Make sure UVD message and feedback buffers are in VRAM and
512*4882a593Smuzhiyun  * nobody is violating an 256MB boundary.
513*4882a593Smuzhiyun  */
amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx * ctx)514*4882a593Smuzhiyun static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct ttm_operation_ctx tctx = { false, false };
517*4882a593Smuzhiyun 	struct amdgpu_bo_va_mapping *mapping;
518*4882a593Smuzhiyun 	struct amdgpu_bo *bo;
519*4882a593Smuzhiyun 	uint32_t cmd;
520*4882a593Smuzhiyun 	uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
521*4882a593Smuzhiyun 	int r = 0;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
524*4882a593Smuzhiyun 	if (r) {
525*4882a593Smuzhiyun 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
526*4882a593Smuzhiyun 		return r;
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (!ctx->parser->adev->uvd.address_64_bit) {
530*4882a593Smuzhiyun 		/* check if it's a message or feedback command */
531*4882a593Smuzhiyun 		cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
532*4882a593Smuzhiyun 		if (cmd == 0x0 || cmd == 0x3) {
533*4882a593Smuzhiyun 			/* yes, force it into VRAM */
534*4882a593Smuzhiyun 			uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
535*4882a593Smuzhiyun 			amdgpu_bo_placement_from_domain(bo, domain);
536*4882a593Smuzhiyun 		}
537*4882a593Smuzhiyun 		amdgpu_uvd_force_into_uvd_segment(bo);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return r;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun /**
546*4882a593Smuzhiyun  * amdgpu_uvd_cs_msg_decode - handle UVD decode message
547*4882a593Smuzhiyun  *
548*4882a593Smuzhiyun  * @msg: pointer to message structure
549*4882a593Smuzhiyun  * @buf_sizes: returned buffer sizes
550*4882a593Smuzhiyun  *
551*4882a593Smuzhiyun  * Peek into the decode message and calculate the necessary buffer sizes.
552*4882a593Smuzhiyun  */
amdgpu_uvd_cs_msg_decode(struct amdgpu_device * adev,uint32_t * msg,unsigned buf_sizes[])553*4882a593Smuzhiyun static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
554*4882a593Smuzhiyun 	unsigned buf_sizes[])
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	unsigned stream_type = msg[4];
557*4882a593Smuzhiyun 	unsigned width = msg[6];
558*4882a593Smuzhiyun 	unsigned height = msg[7];
559*4882a593Smuzhiyun 	unsigned dpb_size = msg[9];
560*4882a593Smuzhiyun 	unsigned pitch = msg[28];
561*4882a593Smuzhiyun 	unsigned level = msg[57];
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	unsigned width_in_mb = width / 16;
564*4882a593Smuzhiyun 	unsigned height_in_mb = ALIGN(height / 16, 2);
565*4882a593Smuzhiyun 	unsigned fs_in_mb = width_in_mb * height_in_mb;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
568*4882a593Smuzhiyun 	unsigned min_ctx_size = ~0;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	image_size = width * height;
571*4882a593Smuzhiyun 	image_size += image_size / 2;
572*4882a593Smuzhiyun 	image_size = ALIGN(image_size, 1024);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	switch (stream_type) {
575*4882a593Smuzhiyun 	case 0: /* H264 */
576*4882a593Smuzhiyun 		switch(level) {
577*4882a593Smuzhiyun 		case 30:
578*4882a593Smuzhiyun 			num_dpb_buffer = 8100 / fs_in_mb;
579*4882a593Smuzhiyun 			break;
580*4882a593Smuzhiyun 		case 31:
581*4882a593Smuzhiyun 			num_dpb_buffer = 18000 / fs_in_mb;
582*4882a593Smuzhiyun 			break;
583*4882a593Smuzhiyun 		case 32:
584*4882a593Smuzhiyun 			num_dpb_buffer = 20480 / fs_in_mb;
585*4882a593Smuzhiyun 			break;
586*4882a593Smuzhiyun 		case 41:
587*4882a593Smuzhiyun 			num_dpb_buffer = 32768 / fs_in_mb;
588*4882a593Smuzhiyun 			break;
589*4882a593Smuzhiyun 		case 42:
590*4882a593Smuzhiyun 			num_dpb_buffer = 34816 / fs_in_mb;
591*4882a593Smuzhiyun 			break;
592*4882a593Smuzhiyun 		case 50:
593*4882a593Smuzhiyun 			num_dpb_buffer = 110400 / fs_in_mb;
594*4882a593Smuzhiyun 			break;
595*4882a593Smuzhiyun 		case 51:
596*4882a593Smuzhiyun 			num_dpb_buffer = 184320 / fs_in_mb;
597*4882a593Smuzhiyun 			break;
598*4882a593Smuzhiyun 		default:
599*4882a593Smuzhiyun 			num_dpb_buffer = 184320 / fs_in_mb;
600*4882a593Smuzhiyun 			break;
601*4882a593Smuzhiyun 		}
602*4882a593Smuzhiyun 		num_dpb_buffer++;
603*4882a593Smuzhiyun 		if (num_dpb_buffer > 17)
604*4882a593Smuzhiyun 			num_dpb_buffer = 17;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 		/* reference picture buffer */
607*4882a593Smuzhiyun 		min_dpb_size = image_size * num_dpb_buffer;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		/* macroblock context buffer */
610*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 		/* IT surface buffer */
613*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 32;
614*4882a593Smuzhiyun 		break;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	case 1: /* VC1 */
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 		/* reference picture buffer */
619*4882a593Smuzhiyun 		min_dpb_size = image_size * 3;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		/* CONTEXT_BUFFER */
622*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 128;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		/* IT surface buffer */
625*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * 64;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		/* DB surface buffer */
628*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * 128;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		/* BP */
631*4882a593Smuzhiyun 		tmp = max(width_in_mb, height_in_mb);
632*4882a593Smuzhiyun 		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
633*4882a593Smuzhiyun 		break;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	case 3: /* MPEG2 */
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		/* reference picture buffer */
638*4882a593Smuzhiyun 		min_dpb_size = image_size * 3;
639*4882a593Smuzhiyun 		break;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	case 4: /* MPEG4 */
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		/* reference picture buffer */
644*4882a593Smuzhiyun 		min_dpb_size = image_size * 3;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 		/* CM */
647*4882a593Smuzhiyun 		min_dpb_size += width_in_mb * height_in_mb * 64;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 		/* IT surface buffer */
650*4882a593Smuzhiyun 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
651*4882a593Smuzhiyun 		break;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	case 7: /* H264 Perf */
654*4882a593Smuzhiyun 		switch(level) {
655*4882a593Smuzhiyun 		case 30:
656*4882a593Smuzhiyun 			num_dpb_buffer = 8100 / fs_in_mb;
657*4882a593Smuzhiyun 			break;
658*4882a593Smuzhiyun 		case 31:
659*4882a593Smuzhiyun 			num_dpb_buffer = 18000 / fs_in_mb;
660*4882a593Smuzhiyun 			break;
661*4882a593Smuzhiyun 		case 32:
662*4882a593Smuzhiyun 			num_dpb_buffer = 20480 / fs_in_mb;
663*4882a593Smuzhiyun 			break;
664*4882a593Smuzhiyun 		case 41:
665*4882a593Smuzhiyun 			num_dpb_buffer = 32768 / fs_in_mb;
666*4882a593Smuzhiyun 			break;
667*4882a593Smuzhiyun 		case 42:
668*4882a593Smuzhiyun 			num_dpb_buffer = 34816 / fs_in_mb;
669*4882a593Smuzhiyun 			break;
670*4882a593Smuzhiyun 		case 50:
671*4882a593Smuzhiyun 			num_dpb_buffer = 110400 / fs_in_mb;
672*4882a593Smuzhiyun 			break;
673*4882a593Smuzhiyun 		case 51:
674*4882a593Smuzhiyun 			num_dpb_buffer = 184320 / fs_in_mb;
675*4882a593Smuzhiyun 			break;
676*4882a593Smuzhiyun 		default:
677*4882a593Smuzhiyun 			num_dpb_buffer = 184320 / fs_in_mb;
678*4882a593Smuzhiyun 			break;
679*4882a593Smuzhiyun 		}
680*4882a593Smuzhiyun 		num_dpb_buffer++;
681*4882a593Smuzhiyun 		if (num_dpb_buffer > 17)
682*4882a593Smuzhiyun 			num_dpb_buffer = 17;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		/* reference picture buffer */
685*4882a593Smuzhiyun 		min_dpb_size = image_size * num_dpb_buffer;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		if (!adev->uvd.use_ctx_buf){
688*4882a593Smuzhiyun 			/* macroblock context buffer */
689*4882a593Smuzhiyun 			min_dpb_size +=
690*4882a593Smuzhiyun 				width_in_mb * height_in_mb * num_dpb_buffer * 192;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 			/* IT surface buffer */
693*4882a593Smuzhiyun 			min_dpb_size += width_in_mb * height_in_mb * 32;
694*4882a593Smuzhiyun 		} else {
695*4882a593Smuzhiyun 			/* macroblock context buffer */
696*4882a593Smuzhiyun 			min_ctx_size =
697*4882a593Smuzhiyun 				width_in_mb * height_in_mb * num_dpb_buffer * 192;
698*4882a593Smuzhiyun 		}
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	case 8: /* MJPEG */
702*4882a593Smuzhiyun 		min_dpb_size = 0;
703*4882a593Smuzhiyun 		break;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	case 16: /* H265 */
706*4882a593Smuzhiyun 		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
707*4882a593Smuzhiyun 		image_size = ALIGN(image_size, 256);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
710*4882a593Smuzhiyun 		min_dpb_size = image_size * num_dpb_buffer;
711*4882a593Smuzhiyun 		min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
712*4882a593Smuzhiyun 					   * 16 * num_dpb_buffer + 52 * 1024;
713*4882a593Smuzhiyun 		break;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	default:
716*4882a593Smuzhiyun 		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
717*4882a593Smuzhiyun 		return -EINVAL;
718*4882a593Smuzhiyun 	}
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (width > pitch) {
721*4882a593Smuzhiyun 		DRM_ERROR("Invalid UVD decoding target pitch!\n");
722*4882a593Smuzhiyun 		return -EINVAL;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	if (dpb_size < min_dpb_size) {
726*4882a593Smuzhiyun 		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
727*4882a593Smuzhiyun 			  dpb_size, min_dpb_size);
728*4882a593Smuzhiyun 		return -EINVAL;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	buf_sizes[0x1] = dpb_size;
732*4882a593Smuzhiyun 	buf_sizes[0x2] = image_size;
733*4882a593Smuzhiyun 	buf_sizes[0x4] = min_ctx_size;
734*4882a593Smuzhiyun 	/* store image width to adjust nb memory pstate */
735*4882a593Smuzhiyun 	adev->uvd.decode_image_width = width;
736*4882a593Smuzhiyun 	return 0;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun /**
740*4882a593Smuzhiyun  * amdgpu_uvd_cs_msg - handle UVD message
741*4882a593Smuzhiyun  *
742*4882a593Smuzhiyun  * @ctx: UVD parser context
743*4882a593Smuzhiyun  * @bo: buffer object containing the message
744*4882a593Smuzhiyun  * @offset: offset into the buffer object
745*4882a593Smuzhiyun  *
746*4882a593Smuzhiyun  * Peek into the UVD message and extract the session id.
747*4882a593Smuzhiyun  * Make sure that we don't open up to many sessions.
748*4882a593Smuzhiyun  */
amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx * ctx,struct amdgpu_bo * bo,unsigned offset)749*4882a593Smuzhiyun static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
750*4882a593Smuzhiyun 			     struct amdgpu_bo *bo, unsigned offset)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	struct amdgpu_device *adev = ctx->parser->adev;
753*4882a593Smuzhiyun 	int32_t *msg, msg_type, handle;
754*4882a593Smuzhiyun 	void *ptr;
755*4882a593Smuzhiyun 	long r;
756*4882a593Smuzhiyun 	int i;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (offset & 0x3F) {
759*4882a593Smuzhiyun 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
760*4882a593Smuzhiyun 		return -EINVAL;
761*4882a593Smuzhiyun 	}
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	r = amdgpu_bo_kmap(bo, &ptr);
764*4882a593Smuzhiyun 	if (r) {
765*4882a593Smuzhiyun 		DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
766*4882a593Smuzhiyun 		return r;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	msg = ptr + offset;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	msg_type = msg[1];
772*4882a593Smuzhiyun 	handle = msg[2];
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	if (handle == 0) {
775*4882a593Smuzhiyun 		DRM_ERROR("Invalid UVD handle!\n");
776*4882a593Smuzhiyun 		return -EINVAL;
777*4882a593Smuzhiyun 	}
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	switch (msg_type) {
780*4882a593Smuzhiyun 	case 0:
781*4882a593Smuzhiyun 		/* it's a create msg, calc image size (width * height) */
782*4882a593Smuzhiyun 		amdgpu_bo_kunmap(bo);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 		/* try to alloc a new handle */
785*4882a593Smuzhiyun 		for (i = 0; i < adev->uvd.max_handles; ++i) {
786*4882a593Smuzhiyun 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
787*4882a593Smuzhiyun 				DRM_ERROR(")Handle 0x%x already in use!\n",
788*4882a593Smuzhiyun 					  handle);
789*4882a593Smuzhiyun 				return -EINVAL;
790*4882a593Smuzhiyun 			}
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
793*4882a593Smuzhiyun 				adev->uvd.filp[i] = ctx->parser->filp;
794*4882a593Smuzhiyun 				return 0;
795*4882a593Smuzhiyun 			}
796*4882a593Smuzhiyun 		}
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		DRM_ERROR("No more free UVD handles!\n");
799*4882a593Smuzhiyun 		return -ENOSPC;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	case 1:
802*4882a593Smuzhiyun 		/* it's a decode msg, calc buffer sizes */
803*4882a593Smuzhiyun 		r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
804*4882a593Smuzhiyun 		amdgpu_bo_kunmap(bo);
805*4882a593Smuzhiyun 		if (r)
806*4882a593Smuzhiyun 			return r;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 		/* validate the handle */
809*4882a593Smuzhiyun 		for (i = 0; i < adev->uvd.max_handles; ++i) {
810*4882a593Smuzhiyun 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
811*4882a593Smuzhiyun 				if (adev->uvd.filp[i] != ctx->parser->filp) {
812*4882a593Smuzhiyun 					DRM_ERROR("UVD handle collision detected!\n");
813*4882a593Smuzhiyun 					return -EINVAL;
814*4882a593Smuzhiyun 				}
815*4882a593Smuzhiyun 				return 0;
816*4882a593Smuzhiyun 			}
817*4882a593Smuzhiyun 		}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
820*4882a593Smuzhiyun 		return -ENOENT;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	case 2:
823*4882a593Smuzhiyun 		/* it's a destroy msg, free the handle */
824*4882a593Smuzhiyun 		for (i = 0; i < adev->uvd.max_handles; ++i)
825*4882a593Smuzhiyun 			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
826*4882a593Smuzhiyun 		amdgpu_bo_kunmap(bo);
827*4882a593Smuzhiyun 		return 0;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	default:
830*4882a593Smuzhiyun 		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
831*4882a593Smuzhiyun 		return -EINVAL;
832*4882a593Smuzhiyun 	}
833*4882a593Smuzhiyun 	BUG();
834*4882a593Smuzhiyun 	return -EINVAL;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun /**
838*4882a593Smuzhiyun  * amdgpu_uvd_cs_pass2 - second parsing round
839*4882a593Smuzhiyun  *
840*4882a593Smuzhiyun  * @ctx: UVD parser context
841*4882a593Smuzhiyun  *
842*4882a593Smuzhiyun  * Patch buffer addresses, make sure buffer sizes are correct.
843*4882a593Smuzhiyun  */
amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx * ctx)844*4882a593Smuzhiyun static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	struct amdgpu_bo_va_mapping *mapping;
847*4882a593Smuzhiyun 	struct amdgpu_bo *bo;
848*4882a593Smuzhiyun 	uint32_t cmd;
849*4882a593Smuzhiyun 	uint64_t start, end;
850*4882a593Smuzhiyun 	uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
851*4882a593Smuzhiyun 	int r;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
854*4882a593Smuzhiyun 	if (r) {
855*4882a593Smuzhiyun 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
856*4882a593Smuzhiyun 		return r;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	start = amdgpu_bo_gpu_offset(bo);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	end = (mapping->last + 1 - mapping->start);
862*4882a593Smuzhiyun 	end = end * AMDGPU_GPU_PAGE_SIZE + start;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
865*4882a593Smuzhiyun 	start += addr;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
868*4882a593Smuzhiyun 			    lower_32_bits(start));
869*4882a593Smuzhiyun 	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
870*4882a593Smuzhiyun 			    upper_32_bits(start));
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
873*4882a593Smuzhiyun 	if (cmd < 0x4) {
874*4882a593Smuzhiyun 		if ((end - start) < ctx->buf_sizes[cmd]) {
875*4882a593Smuzhiyun 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
876*4882a593Smuzhiyun 				  (unsigned)(end - start),
877*4882a593Smuzhiyun 				  ctx->buf_sizes[cmd]);
878*4882a593Smuzhiyun 			return -EINVAL;
879*4882a593Smuzhiyun 		}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	} else if (cmd == 0x206) {
882*4882a593Smuzhiyun 		if ((end - start) < ctx->buf_sizes[4]) {
883*4882a593Smuzhiyun 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
884*4882a593Smuzhiyun 					  (unsigned)(end - start),
885*4882a593Smuzhiyun 					  ctx->buf_sizes[4]);
886*4882a593Smuzhiyun 			return -EINVAL;
887*4882a593Smuzhiyun 		}
888*4882a593Smuzhiyun 	} else if ((cmd != 0x100) && (cmd != 0x204)) {
889*4882a593Smuzhiyun 		DRM_ERROR("invalid UVD command %X!\n", cmd);
890*4882a593Smuzhiyun 		return -EINVAL;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	if (!ctx->parser->adev->uvd.address_64_bit) {
894*4882a593Smuzhiyun 		if ((start >> 28) != ((end - 1) >> 28)) {
895*4882a593Smuzhiyun 			DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
896*4882a593Smuzhiyun 				  start, end);
897*4882a593Smuzhiyun 			return -EINVAL;
898*4882a593Smuzhiyun 		}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 		if ((cmd == 0 || cmd == 0x3) &&
901*4882a593Smuzhiyun 		    (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
902*4882a593Smuzhiyun 			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
903*4882a593Smuzhiyun 				  start, end);
904*4882a593Smuzhiyun 			return -EINVAL;
905*4882a593Smuzhiyun 		}
906*4882a593Smuzhiyun 	}
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	if (cmd == 0) {
909*4882a593Smuzhiyun 		ctx->has_msg_cmd = true;
910*4882a593Smuzhiyun 		r = amdgpu_uvd_cs_msg(ctx, bo, addr);
911*4882a593Smuzhiyun 		if (r)
912*4882a593Smuzhiyun 			return r;
913*4882a593Smuzhiyun 	} else if (!ctx->has_msg_cmd) {
914*4882a593Smuzhiyun 		DRM_ERROR("Message needed before other commands are send!\n");
915*4882a593Smuzhiyun 		return -EINVAL;
916*4882a593Smuzhiyun 	}
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	return 0;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun /**
922*4882a593Smuzhiyun  * amdgpu_uvd_cs_reg - parse register writes
923*4882a593Smuzhiyun  *
924*4882a593Smuzhiyun  * @ctx: UVD parser context
925*4882a593Smuzhiyun  * @cb: callback function
926*4882a593Smuzhiyun  *
927*4882a593Smuzhiyun  * Parse the register writes, call cb on each complete command.
928*4882a593Smuzhiyun  */
amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))929*4882a593Smuzhiyun static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
930*4882a593Smuzhiyun 			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
933*4882a593Smuzhiyun 	int i, r;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	ctx->idx++;
936*4882a593Smuzhiyun 	for (i = 0; i <= ctx->count; ++i) {
937*4882a593Smuzhiyun 		unsigned reg = ctx->reg + i;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 		if (ctx->idx >= ib->length_dw) {
940*4882a593Smuzhiyun 			DRM_ERROR("Register command after end of CS!\n");
941*4882a593Smuzhiyun 			return -EINVAL;
942*4882a593Smuzhiyun 		}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 		switch (reg) {
945*4882a593Smuzhiyun 		case mmUVD_GPCOM_VCPU_DATA0:
946*4882a593Smuzhiyun 			ctx->data0 = ctx->idx;
947*4882a593Smuzhiyun 			break;
948*4882a593Smuzhiyun 		case mmUVD_GPCOM_VCPU_DATA1:
949*4882a593Smuzhiyun 			ctx->data1 = ctx->idx;
950*4882a593Smuzhiyun 			break;
951*4882a593Smuzhiyun 		case mmUVD_GPCOM_VCPU_CMD:
952*4882a593Smuzhiyun 			r = cb(ctx);
953*4882a593Smuzhiyun 			if (r)
954*4882a593Smuzhiyun 				return r;
955*4882a593Smuzhiyun 			break;
956*4882a593Smuzhiyun 		case mmUVD_ENGINE_CNTL:
957*4882a593Smuzhiyun 		case mmUVD_NO_OP:
958*4882a593Smuzhiyun 			break;
959*4882a593Smuzhiyun 		default:
960*4882a593Smuzhiyun 			DRM_ERROR("Invalid reg 0x%X!\n", reg);
961*4882a593Smuzhiyun 			return -EINVAL;
962*4882a593Smuzhiyun 		}
963*4882a593Smuzhiyun 		ctx->idx++;
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 	return 0;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun /**
969*4882a593Smuzhiyun  * amdgpu_uvd_cs_packets - parse UVD packets
970*4882a593Smuzhiyun  *
971*4882a593Smuzhiyun  * @ctx: UVD parser context
972*4882a593Smuzhiyun  * @cb: callback function
973*4882a593Smuzhiyun  *
974*4882a593Smuzhiyun  * Parse the command stream packets.
975*4882a593Smuzhiyun  */
amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))976*4882a593Smuzhiyun static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
977*4882a593Smuzhiyun 				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun 	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
980*4882a593Smuzhiyun 	int r;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
983*4882a593Smuzhiyun 		uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
984*4882a593Smuzhiyun 		unsigned type = CP_PACKET_GET_TYPE(cmd);
985*4882a593Smuzhiyun 		switch (type) {
986*4882a593Smuzhiyun 		case PACKET_TYPE0:
987*4882a593Smuzhiyun 			ctx->reg = CP_PACKET0_GET_REG(cmd);
988*4882a593Smuzhiyun 			ctx->count = CP_PACKET_GET_COUNT(cmd);
989*4882a593Smuzhiyun 			r = amdgpu_uvd_cs_reg(ctx, cb);
990*4882a593Smuzhiyun 			if (r)
991*4882a593Smuzhiyun 				return r;
992*4882a593Smuzhiyun 			break;
993*4882a593Smuzhiyun 		case PACKET_TYPE2:
994*4882a593Smuzhiyun 			++ctx->idx;
995*4882a593Smuzhiyun 			break;
996*4882a593Smuzhiyun 		default:
997*4882a593Smuzhiyun 			DRM_ERROR("Unknown packet type %d !\n", type);
998*4882a593Smuzhiyun 			return -EINVAL;
999*4882a593Smuzhiyun 		}
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 	return 0;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun /**
1005*4882a593Smuzhiyun  * amdgpu_uvd_ring_parse_cs - UVD command submission parser
1006*4882a593Smuzhiyun  *
1007*4882a593Smuzhiyun  * @parser: Command submission parser context
1008*4882a593Smuzhiyun  *
1009*4882a593Smuzhiyun  * Parse the command stream, patch in addresses as necessary.
1010*4882a593Smuzhiyun  */
amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser * parser,uint32_t ib_idx)1011*4882a593Smuzhiyun int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun 	struct amdgpu_uvd_cs_ctx ctx = {};
1014*4882a593Smuzhiyun 	unsigned buf_sizes[] = {
1015*4882a593Smuzhiyun 		[0x00000000]	=	2048,
1016*4882a593Smuzhiyun 		[0x00000001]	=	0xFFFFFFFF,
1017*4882a593Smuzhiyun 		[0x00000002]	=	0xFFFFFFFF,
1018*4882a593Smuzhiyun 		[0x00000003]	=	2048,
1019*4882a593Smuzhiyun 		[0x00000004]	=	0xFFFFFFFF,
1020*4882a593Smuzhiyun 	};
1021*4882a593Smuzhiyun 	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
1022*4882a593Smuzhiyun 	int r;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	parser->job->vm = NULL;
1025*4882a593Smuzhiyun 	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	if (ib->length_dw % 16) {
1028*4882a593Smuzhiyun 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
1029*4882a593Smuzhiyun 			  ib->length_dw);
1030*4882a593Smuzhiyun 		return -EINVAL;
1031*4882a593Smuzhiyun 	}
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	ctx.parser = parser;
1034*4882a593Smuzhiyun 	ctx.buf_sizes = buf_sizes;
1035*4882a593Smuzhiyun 	ctx.ib_idx = ib_idx;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* first round only required on chips without UVD 64 bit address support */
1038*4882a593Smuzhiyun 	if (!parser->adev->uvd.address_64_bit) {
1039*4882a593Smuzhiyun 		/* first round, make sure the buffers are actually in the UVD segment */
1040*4882a593Smuzhiyun 		r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
1041*4882a593Smuzhiyun 		if (r)
1042*4882a593Smuzhiyun 			return r;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* second round, patch buffer addresses into the command stream */
1046*4882a593Smuzhiyun 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
1047*4882a593Smuzhiyun 	if (r)
1048*4882a593Smuzhiyun 		return r;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	if (!ctx.has_msg_cmd) {
1051*4882a593Smuzhiyun 		DRM_ERROR("UVD-IBs need a msg command!\n");
1052*4882a593Smuzhiyun 		return -EINVAL;
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	return 0;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
amdgpu_uvd_send_msg(struct amdgpu_ring * ring,struct amdgpu_bo * bo,bool direct,struct dma_fence ** fence)1058*4882a593Smuzhiyun static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1059*4882a593Smuzhiyun 			       bool direct, struct dma_fence **fence)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
1062*4882a593Smuzhiyun 	struct dma_fence *f = NULL;
1063*4882a593Smuzhiyun 	struct amdgpu_job *job;
1064*4882a593Smuzhiyun 	struct amdgpu_ib *ib;
1065*4882a593Smuzhiyun 	uint32_t data[4];
1066*4882a593Smuzhiyun 	uint64_t addr;
1067*4882a593Smuzhiyun 	long r;
1068*4882a593Smuzhiyun 	int i;
1069*4882a593Smuzhiyun 	unsigned offset_idx = 0;
1070*4882a593Smuzhiyun 	unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	amdgpu_bo_kunmap(bo);
1073*4882a593Smuzhiyun 	amdgpu_bo_unpin(bo);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	if (!ring->adev->uvd.address_64_bit) {
1076*4882a593Smuzhiyun 		struct ttm_operation_ctx ctx = { true, false };
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1079*4882a593Smuzhiyun 		amdgpu_uvd_force_into_uvd_segment(bo);
1080*4882a593Smuzhiyun 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1081*4882a593Smuzhiyun 		if (r)
1082*4882a593Smuzhiyun 			goto err;
1083*4882a593Smuzhiyun 	}
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
1086*4882a593Smuzhiyun 				     AMDGPU_IB_POOL_DELAYED, &job);
1087*4882a593Smuzhiyun 	if (r)
1088*4882a593Smuzhiyun 		goto err;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	if (adev->asic_type >= CHIP_VEGA10) {
1091*4882a593Smuzhiyun 		offset_idx = 1 + ring->me;
1092*4882a593Smuzhiyun 		offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1093*4882a593Smuzhiyun 		offset[2] = adev->reg_offset[UVD_HWIP][1][1];
1094*4882a593Smuzhiyun 	}
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
1097*4882a593Smuzhiyun 	data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
1098*4882a593Smuzhiyun 	data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
1099*4882a593Smuzhiyun 	data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	ib = &job->ibs[0];
1102*4882a593Smuzhiyun 	addr = amdgpu_bo_gpu_offset(bo);
1103*4882a593Smuzhiyun 	ib->ptr[0] = data[0];
1104*4882a593Smuzhiyun 	ib->ptr[1] = addr;
1105*4882a593Smuzhiyun 	ib->ptr[2] = data[1];
1106*4882a593Smuzhiyun 	ib->ptr[3] = addr >> 32;
1107*4882a593Smuzhiyun 	ib->ptr[4] = data[2];
1108*4882a593Smuzhiyun 	ib->ptr[5] = 0;
1109*4882a593Smuzhiyun 	for (i = 6; i < 16; i += 2) {
1110*4882a593Smuzhiyun 		ib->ptr[i] = data[3];
1111*4882a593Smuzhiyun 		ib->ptr[i+1] = 0;
1112*4882a593Smuzhiyun 	}
1113*4882a593Smuzhiyun 	ib->length_dw = 16;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	if (direct) {
1116*4882a593Smuzhiyun 		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
1117*4882a593Smuzhiyun 							true, false,
1118*4882a593Smuzhiyun 							msecs_to_jiffies(10));
1119*4882a593Smuzhiyun 		if (r == 0)
1120*4882a593Smuzhiyun 			r = -ETIMEDOUT;
1121*4882a593Smuzhiyun 		if (r < 0)
1122*4882a593Smuzhiyun 			goto err_free;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 		r = amdgpu_job_submit_direct(job, ring, &f);
1125*4882a593Smuzhiyun 		if (r)
1126*4882a593Smuzhiyun 			goto err_free;
1127*4882a593Smuzhiyun 	} else {
1128*4882a593Smuzhiyun 		r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
1129*4882a593Smuzhiyun 				     AMDGPU_SYNC_ALWAYS,
1130*4882a593Smuzhiyun 				     AMDGPU_FENCE_OWNER_UNDEFINED);
1131*4882a593Smuzhiyun 		if (r)
1132*4882a593Smuzhiyun 			goto err_free;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 		r = amdgpu_job_submit(job, &adev->uvd.entity,
1135*4882a593Smuzhiyun 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1136*4882a593Smuzhiyun 		if (r)
1137*4882a593Smuzhiyun 			goto err_free;
1138*4882a593Smuzhiyun 	}
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	amdgpu_bo_fence(bo, f, false);
1141*4882a593Smuzhiyun 	amdgpu_bo_unreserve(bo);
1142*4882a593Smuzhiyun 	amdgpu_bo_unref(&bo);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	if (fence)
1145*4882a593Smuzhiyun 		*fence = dma_fence_get(f);
1146*4882a593Smuzhiyun 	dma_fence_put(f);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	return 0;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun err_free:
1151*4882a593Smuzhiyun 	amdgpu_job_free(job);
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun err:
1154*4882a593Smuzhiyun 	amdgpu_bo_unreserve(bo);
1155*4882a593Smuzhiyun 	amdgpu_bo_unref(&bo);
1156*4882a593Smuzhiyun 	return r;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun /* multiple fence commands without any stream commands in between can
1160*4882a593Smuzhiyun    crash the vcpu so just try to emmit a dummy create/destroy msg to
1161*4882a593Smuzhiyun    avoid this */
amdgpu_uvd_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)1162*4882a593Smuzhiyun int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1163*4882a593Smuzhiyun 			      struct dma_fence **fence)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
1166*4882a593Smuzhiyun 	struct amdgpu_bo *bo = NULL;
1167*4882a593Smuzhiyun 	uint32_t *msg;
1168*4882a593Smuzhiyun 	int r, i;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1171*4882a593Smuzhiyun 				      AMDGPU_GEM_DOMAIN_VRAM,
1172*4882a593Smuzhiyun 				      &bo, NULL, (void **)&msg);
1173*4882a593Smuzhiyun 	if (r)
1174*4882a593Smuzhiyun 		return r;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	/* stitch together an UVD create msg */
1177*4882a593Smuzhiyun 	msg[0] = cpu_to_le32(0x00000de4);
1178*4882a593Smuzhiyun 	msg[1] = cpu_to_le32(0x00000000);
1179*4882a593Smuzhiyun 	msg[2] = cpu_to_le32(handle);
1180*4882a593Smuzhiyun 	msg[3] = cpu_to_le32(0x00000000);
1181*4882a593Smuzhiyun 	msg[4] = cpu_to_le32(0x00000000);
1182*4882a593Smuzhiyun 	msg[5] = cpu_to_le32(0x00000000);
1183*4882a593Smuzhiyun 	msg[6] = cpu_to_le32(0x00000000);
1184*4882a593Smuzhiyun 	msg[7] = cpu_to_le32(0x00000780);
1185*4882a593Smuzhiyun 	msg[8] = cpu_to_le32(0x00000440);
1186*4882a593Smuzhiyun 	msg[9] = cpu_to_le32(0x00000000);
1187*4882a593Smuzhiyun 	msg[10] = cpu_to_le32(0x01b37000);
1188*4882a593Smuzhiyun 	for (i = 11; i < 1024; ++i)
1189*4882a593Smuzhiyun 		msg[i] = cpu_to_le32(0x0);
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	return amdgpu_uvd_send_msg(ring, bo, true, fence);
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun 
amdgpu_uvd_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)1194*4882a593Smuzhiyun int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1195*4882a593Smuzhiyun 			       bool direct, struct dma_fence **fence)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
1198*4882a593Smuzhiyun 	struct amdgpu_bo *bo = NULL;
1199*4882a593Smuzhiyun 	uint32_t *msg;
1200*4882a593Smuzhiyun 	int r, i;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1203*4882a593Smuzhiyun 				      AMDGPU_GEM_DOMAIN_VRAM,
1204*4882a593Smuzhiyun 				      &bo, NULL, (void **)&msg);
1205*4882a593Smuzhiyun 	if (r)
1206*4882a593Smuzhiyun 		return r;
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/* stitch together an UVD destroy msg */
1209*4882a593Smuzhiyun 	msg[0] = cpu_to_le32(0x00000de4);
1210*4882a593Smuzhiyun 	msg[1] = cpu_to_le32(0x00000002);
1211*4882a593Smuzhiyun 	msg[2] = cpu_to_le32(handle);
1212*4882a593Smuzhiyun 	msg[3] = cpu_to_le32(0x00000000);
1213*4882a593Smuzhiyun 	for (i = 4; i < 1024; ++i)
1214*4882a593Smuzhiyun 		msg[i] = cpu_to_le32(0x0);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	return amdgpu_uvd_send_msg(ring, bo, direct, fence);
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun 
amdgpu_uvd_idle_work_handler(struct work_struct * work)1219*4882a593Smuzhiyun static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	struct amdgpu_device *adev =
1222*4882a593Smuzhiyun 		container_of(work, struct amdgpu_device, uvd.idle_work.work);
1223*4882a593Smuzhiyun 	unsigned fences = 0, i, j;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1226*4882a593Smuzhiyun 		if (adev->uvd.harvest_config & (1 << i))
1227*4882a593Smuzhiyun 			continue;
1228*4882a593Smuzhiyun 		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1229*4882a593Smuzhiyun 		for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1230*4882a593Smuzhiyun 			fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1231*4882a593Smuzhiyun 		}
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	if (fences == 0) {
1235*4882a593Smuzhiyun 		if (adev->pm.dpm_enabled) {
1236*4882a593Smuzhiyun 			amdgpu_dpm_enable_uvd(adev, false);
1237*4882a593Smuzhiyun 		} else {
1238*4882a593Smuzhiyun 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1239*4882a593Smuzhiyun 			/* shutdown the UVD block */
1240*4882a593Smuzhiyun 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1241*4882a593Smuzhiyun 							       AMD_PG_STATE_GATE);
1242*4882a593Smuzhiyun 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1243*4882a593Smuzhiyun 							       AMD_CG_STATE_GATE);
1244*4882a593Smuzhiyun 		}
1245*4882a593Smuzhiyun 	} else {
1246*4882a593Smuzhiyun 		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1247*4882a593Smuzhiyun 	}
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun 
amdgpu_uvd_ring_begin_use(struct amdgpu_ring * ring)1250*4882a593Smuzhiyun void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
1253*4882a593Smuzhiyun 	bool set_clocks;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	if (amdgpu_sriov_vf(adev))
1256*4882a593Smuzhiyun 		return;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1259*4882a593Smuzhiyun 	if (set_clocks) {
1260*4882a593Smuzhiyun 		if (adev->pm.dpm_enabled) {
1261*4882a593Smuzhiyun 			amdgpu_dpm_enable_uvd(adev, true);
1262*4882a593Smuzhiyun 		} else {
1263*4882a593Smuzhiyun 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1264*4882a593Smuzhiyun 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1265*4882a593Smuzhiyun 							       AMD_CG_STATE_UNGATE);
1266*4882a593Smuzhiyun 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1267*4882a593Smuzhiyun 							       AMD_PG_STATE_UNGATE);
1268*4882a593Smuzhiyun 		}
1269*4882a593Smuzhiyun 	}
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun 
amdgpu_uvd_ring_end_use(struct amdgpu_ring * ring)1272*4882a593Smuzhiyun void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun 	if (!amdgpu_sriov_vf(ring->adev))
1275*4882a593Smuzhiyun 		schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun /**
1279*4882a593Smuzhiyun  * amdgpu_uvd_ring_test_ib - test ib execution
1280*4882a593Smuzhiyun  *
1281*4882a593Smuzhiyun  * @ring: amdgpu_ring pointer
1282*4882a593Smuzhiyun  *
1283*4882a593Smuzhiyun  * Test if we can successfully execute an IB
1284*4882a593Smuzhiyun  */
amdgpu_uvd_ring_test_ib(struct amdgpu_ring * ring,long timeout)1285*4882a593Smuzhiyun int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun 	struct dma_fence *fence;
1288*4882a593Smuzhiyun 	long r;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1291*4882a593Smuzhiyun 	if (r)
1292*4882a593Smuzhiyun 		goto error;
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1295*4882a593Smuzhiyun 	if (r)
1296*4882a593Smuzhiyun 		goto error;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	r = dma_fence_wait_timeout(fence, false, timeout);
1299*4882a593Smuzhiyun 	if (r == 0)
1300*4882a593Smuzhiyun 		r = -ETIMEDOUT;
1301*4882a593Smuzhiyun 	else if (r > 0)
1302*4882a593Smuzhiyun 		r = 0;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	dma_fence_put(fence);
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun error:
1307*4882a593Smuzhiyun 	return r;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun /**
1311*4882a593Smuzhiyun  * amdgpu_uvd_used_handles - returns used UVD handles
1312*4882a593Smuzhiyun  *
1313*4882a593Smuzhiyun  * @adev: amdgpu_device pointer
1314*4882a593Smuzhiyun  *
1315*4882a593Smuzhiyun  * Returns the number of UVD handles in use
1316*4882a593Smuzhiyun  */
amdgpu_uvd_used_handles(struct amdgpu_device * adev)1317*4882a593Smuzhiyun uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun 	unsigned i;
1320*4882a593Smuzhiyun 	uint32_t used_handles = 0;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	for (i = 0; i < adev->uvd.max_handles; ++i) {
1323*4882a593Smuzhiyun 		/*
1324*4882a593Smuzhiyun 		 * Handles can be freed in any order, and not
1325*4882a593Smuzhiyun 		 * necessarily linear. So we need to count
1326*4882a593Smuzhiyun 		 * all non-zero handles.
1327*4882a593Smuzhiyun 		 */
1328*4882a593Smuzhiyun 		if (atomic_read(&adev->uvd.handles[i]))
1329*4882a593Smuzhiyun 			used_handles++;
1330*4882a593Smuzhiyun 	}
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	return used_handles;
1333*4882a593Smuzhiyun }
1334