1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors: Dave Airlie
25*4882a593Smuzhiyun * Alex Deucher
26*4882a593Smuzhiyun * Jerome Glisse
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/pci.h>
30*4882a593Smuzhiyun #include <linux/pm_runtime.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun #include <linux/uaccess.h>
33*4882a593Smuzhiyun #include <linux/vga_switcheroo.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <drm/drm_agpsupport.h>
36*4882a593Smuzhiyun #include <drm/drm_fb_helper.h>
37*4882a593Smuzhiyun #include <drm/drm_file.h>
38*4882a593Smuzhiyun #include <drm/drm_ioctl.h>
39*4882a593Smuzhiyun #include <drm/radeon_drm.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include "radeon.h"
42*4882a593Smuzhiyun #include "radeon_asic.h"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #if defined(CONFIG_VGA_SWITCHEROO)
45*4882a593Smuzhiyun bool radeon_has_atpx(void);
46*4882a593Smuzhiyun #else
radeon_has_atpx(void)47*4882a593Smuzhiyun static inline bool radeon_has_atpx(void) { return false; }
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * radeon_driver_unload_kms - Main unload function for KMS.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * @dev: drm dev pointer
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * This is the main unload function for KMS (all asics).
56*4882a593Smuzhiyun * It calls radeon_modeset_fini() to tear down the
57*4882a593Smuzhiyun * displays, and radeon_device_fini() to tear down
58*4882a593Smuzhiyun * the rest of the device (CP, writeback, etc.).
59*4882a593Smuzhiyun * Returns 0 on success.
60*4882a593Smuzhiyun */
radeon_driver_unload_kms(struct drm_device * dev)61*4882a593Smuzhiyun void radeon_driver_unload_kms(struct drm_device *dev)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (rdev == NULL)
66*4882a593Smuzhiyun return;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (rdev->rmmio == NULL)
69*4882a593Smuzhiyun goto done_free;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (radeon_is_px(dev)) {
72*4882a593Smuzhiyun pm_runtime_get_sync(dev->dev);
73*4882a593Smuzhiyun pm_runtime_forbid(dev->dev);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun radeon_acpi_fini(rdev);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun radeon_modeset_fini(rdev);
79*4882a593Smuzhiyun radeon_device_fini(rdev);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (dev->agp)
82*4882a593Smuzhiyun arch_phys_wc_del(dev->agp->agp_mtrr);
83*4882a593Smuzhiyun kfree(dev->agp);
84*4882a593Smuzhiyun dev->agp = NULL;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun done_free:
87*4882a593Smuzhiyun kfree(rdev);
88*4882a593Smuzhiyun dev->dev_private = NULL;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun * radeon_driver_load_kms - Main load function for KMS.
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * @dev: drm dev pointer
95*4882a593Smuzhiyun * @flags: device flags
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * This is the main load function for KMS (all asics).
98*4882a593Smuzhiyun * It calls radeon_device_init() to set up the non-display
99*4882a593Smuzhiyun * parts of the chip (asic init, CP, writeback, etc.), and
100*4882a593Smuzhiyun * radeon_modeset_init() to set up the display parts
101*4882a593Smuzhiyun * (crtcs, encoders, hotplug detect, etc.).
102*4882a593Smuzhiyun * Returns 0 on success, error on failure.
103*4882a593Smuzhiyun */
radeon_driver_load_kms(struct drm_device * dev,unsigned long flags)104*4882a593Smuzhiyun int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct radeon_device *rdev;
107*4882a593Smuzhiyun int r, acpi_status;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
110*4882a593Smuzhiyun if (rdev == NULL) {
111*4882a593Smuzhiyun return -ENOMEM;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun dev->dev_private = (void *)rdev;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* update BUS flag */
116*4882a593Smuzhiyun if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
117*4882a593Smuzhiyun flags |= RADEON_IS_AGP;
118*4882a593Smuzhiyun } else if (pci_is_pcie(dev->pdev)) {
119*4882a593Smuzhiyun flags |= RADEON_IS_PCIE;
120*4882a593Smuzhiyun } else {
121*4882a593Smuzhiyun flags |= RADEON_IS_PCI;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if ((radeon_runtime_pm != 0) &&
125*4882a593Smuzhiyun radeon_has_atpx() &&
126*4882a593Smuzhiyun ((flags & RADEON_IS_IGP) == 0) &&
127*4882a593Smuzhiyun !pci_is_thunderbolt_attached(dev->pdev))
128*4882a593Smuzhiyun flags |= RADEON_IS_PX;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* radeon_device_init should report only fatal error
131*4882a593Smuzhiyun * like memory allocation failure or iomapping failure,
132*4882a593Smuzhiyun * or memory manager initialization failure, it must
133*4882a593Smuzhiyun * properly initialize the GPU MC controller and permit
134*4882a593Smuzhiyun * VRAM allocation
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun r = radeon_device_init(rdev, dev, dev->pdev, flags);
137*4882a593Smuzhiyun if (r) {
138*4882a593Smuzhiyun dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
139*4882a593Smuzhiyun goto out;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Again modeset_init should fail only on fatal error
143*4882a593Smuzhiyun * otherwise it should provide enough functionalities
144*4882a593Smuzhiyun * for shadowfb to run
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun r = radeon_modeset_init(rdev);
147*4882a593Smuzhiyun if (r)
148*4882a593Smuzhiyun dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Call ACPI methods: require modeset init
151*4882a593Smuzhiyun * but failure is not fatal
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun if (!r) {
154*4882a593Smuzhiyun acpi_status = radeon_acpi_init(rdev);
155*4882a593Smuzhiyun if (acpi_status)
156*4882a593Smuzhiyun dev_dbg(&dev->pdev->dev,
157*4882a593Smuzhiyun "Error during ACPI methods call\n");
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (radeon_is_px(dev)) {
161*4882a593Smuzhiyun dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
162*4882a593Smuzhiyun pm_runtime_use_autosuspend(dev->dev);
163*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(dev->dev, 5000);
164*4882a593Smuzhiyun pm_runtime_set_active(dev->dev);
165*4882a593Smuzhiyun pm_runtime_allow(dev->dev);
166*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev->dev);
167*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev->dev);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun out:
171*4882a593Smuzhiyun if (r)
172*4882a593Smuzhiyun radeon_driver_unload_kms(dev);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun return r;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun * radeon_set_filp_rights - Set filp right.
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * @dev: drm dev pointer
182*4882a593Smuzhiyun * @owner: drm file
183*4882a593Smuzhiyun * @applier: drm file
184*4882a593Smuzhiyun * @value: value
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * Sets the filp rights for the device (all asics).
187*4882a593Smuzhiyun */
radeon_set_filp_rights(struct drm_device * dev,struct drm_file ** owner,struct drm_file * applier,uint32_t * value)188*4882a593Smuzhiyun static void radeon_set_filp_rights(struct drm_device *dev,
189*4882a593Smuzhiyun struct drm_file **owner,
190*4882a593Smuzhiyun struct drm_file *applier,
191*4882a593Smuzhiyun uint32_t *value)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun mutex_lock(&rdev->gem.mutex);
196*4882a593Smuzhiyun if (*value == 1) {
197*4882a593Smuzhiyun /* wants rights */
198*4882a593Smuzhiyun if (!*owner)
199*4882a593Smuzhiyun *owner = applier;
200*4882a593Smuzhiyun } else if (*value == 0) {
201*4882a593Smuzhiyun /* revokes rights */
202*4882a593Smuzhiyun if (*owner == applier)
203*4882a593Smuzhiyun *owner = NULL;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun *value = *owner == applier ? 1 : 0;
206*4882a593Smuzhiyun mutex_unlock(&rdev->gem.mutex);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * Userspace get information ioctl
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun * radeon_info_ioctl - answer a device specific request.
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun * @rdev: radeon device pointer
216*4882a593Smuzhiyun * @data: request object
217*4882a593Smuzhiyun * @filp: drm filp
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * This function is used to pass device specific parameters to the userspace
220*4882a593Smuzhiyun * drivers. Examples include: pci device id, pipeline parms, tiling params,
221*4882a593Smuzhiyun * etc. (all asics).
222*4882a593Smuzhiyun * Returns 0 on success, -EINVAL on failure.
223*4882a593Smuzhiyun */
radeon_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)224*4882a593Smuzhiyun static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
227*4882a593Smuzhiyun struct drm_radeon_info *info = data;
228*4882a593Smuzhiyun struct radeon_mode_info *minfo = &rdev->mode_info;
229*4882a593Smuzhiyun uint32_t *value, value_tmp, *value_ptr, value_size;
230*4882a593Smuzhiyun uint64_t value64;
231*4882a593Smuzhiyun struct drm_crtc *crtc;
232*4882a593Smuzhiyun int i, found;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun value_ptr = (uint32_t *)((unsigned long)info->value);
235*4882a593Smuzhiyun value = &value_tmp;
236*4882a593Smuzhiyun value_size = sizeof(uint32_t);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun switch (info->request) {
239*4882a593Smuzhiyun case RADEON_INFO_DEVICE_ID:
240*4882a593Smuzhiyun *value = dev->pdev->device;
241*4882a593Smuzhiyun break;
242*4882a593Smuzhiyun case RADEON_INFO_NUM_GB_PIPES:
243*4882a593Smuzhiyun *value = rdev->num_gb_pipes;
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun case RADEON_INFO_NUM_Z_PIPES:
246*4882a593Smuzhiyun *value = rdev->num_z_pipes;
247*4882a593Smuzhiyun break;
248*4882a593Smuzhiyun case RADEON_INFO_ACCEL_WORKING:
249*4882a593Smuzhiyun /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
250*4882a593Smuzhiyun if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
251*4882a593Smuzhiyun *value = false;
252*4882a593Smuzhiyun else
253*4882a593Smuzhiyun *value = rdev->accel_working;
254*4882a593Smuzhiyun break;
255*4882a593Smuzhiyun case RADEON_INFO_CRTC_FROM_ID:
256*4882a593Smuzhiyun if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
257*4882a593Smuzhiyun DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
258*4882a593Smuzhiyun return -EFAULT;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun for (i = 0, found = 0; i < rdev->num_crtc; i++) {
261*4882a593Smuzhiyun crtc = (struct drm_crtc *)minfo->crtcs[i];
262*4882a593Smuzhiyun if (crtc && crtc->base.id == *value) {
263*4882a593Smuzhiyun struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
264*4882a593Smuzhiyun *value = radeon_crtc->crtc_id;
265*4882a593Smuzhiyun found = 1;
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun if (!found) {
270*4882a593Smuzhiyun DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
271*4882a593Smuzhiyun return -EINVAL;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun break;
274*4882a593Smuzhiyun case RADEON_INFO_ACCEL_WORKING2:
275*4882a593Smuzhiyun if (rdev->family == CHIP_HAWAII) {
276*4882a593Smuzhiyun if (rdev->accel_working) {
277*4882a593Smuzhiyun if (rdev->new_fw)
278*4882a593Smuzhiyun *value = 3;
279*4882a593Smuzhiyun else
280*4882a593Smuzhiyun *value = 2;
281*4882a593Smuzhiyun } else {
282*4882a593Smuzhiyun *value = 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun } else {
285*4882a593Smuzhiyun *value = rdev->accel_working;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun break;
288*4882a593Smuzhiyun case RADEON_INFO_TILING_CONFIG:
289*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
290*4882a593Smuzhiyun *value = rdev->config.cik.tile_config;
291*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
292*4882a593Smuzhiyun *value = rdev->config.si.tile_config;
293*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
294*4882a593Smuzhiyun *value = rdev->config.cayman.tile_config;
295*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
296*4882a593Smuzhiyun *value = rdev->config.evergreen.tile_config;
297*4882a593Smuzhiyun else if (rdev->family >= CHIP_RV770)
298*4882a593Smuzhiyun *value = rdev->config.rv770.tile_config;
299*4882a593Smuzhiyun else if (rdev->family >= CHIP_R600)
300*4882a593Smuzhiyun *value = rdev->config.r600.tile_config;
301*4882a593Smuzhiyun else {
302*4882a593Smuzhiyun DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
303*4882a593Smuzhiyun return -EINVAL;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun case RADEON_INFO_WANT_HYPERZ:
307*4882a593Smuzhiyun /* The "value" here is both an input and output parameter.
308*4882a593Smuzhiyun * If the input value is 1, filp requests hyper-z access.
309*4882a593Smuzhiyun * If the input value is 0, filp revokes its hyper-z access.
310*4882a593Smuzhiyun *
311*4882a593Smuzhiyun * When returning, the value is 1 if filp owns hyper-z access,
312*4882a593Smuzhiyun * 0 otherwise. */
313*4882a593Smuzhiyun if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
314*4882a593Smuzhiyun DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
315*4882a593Smuzhiyun return -EFAULT;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun if (*value >= 2) {
318*4882a593Smuzhiyun DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
319*4882a593Smuzhiyun return -EINVAL;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun case RADEON_INFO_WANT_CMASK:
324*4882a593Smuzhiyun /* The same logic as Hyper-Z. */
325*4882a593Smuzhiyun if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
326*4882a593Smuzhiyun DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
327*4882a593Smuzhiyun return -EFAULT;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun if (*value >= 2) {
330*4882a593Smuzhiyun DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
331*4882a593Smuzhiyun return -EINVAL;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
334*4882a593Smuzhiyun break;
335*4882a593Smuzhiyun case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
336*4882a593Smuzhiyun /* return clock value in KHz */
337*4882a593Smuzhiyun if (rdev->asic->get_xclk)
338*4882a593Smuzhiyun *value = radeon_get_xclk(rdev) * 10;
339*4882a593Smuzhiyun else
340*4882a593Smuzhiyun *value = rdev->clock.spll.reference_freq * 10;
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun case RADEON_INFO_NUM_BACKENDS:
343*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
344*4882a593Smuzhiyun *value = rdev->config.cik.max_backends_per_se *
345*4882a593Smuzhiyun rdev->config.cik.max_shader_engines;
346*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
347*4882a593Smuzhiyun *value = rdev->config.si.max_backends_per_se *
348*4882a593Smuzhiyun rdev->config.si.max_shader_engines;
349*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
350*4882a593Smuzhiyun *value = rdev->config.cayman.max_backends_per_se *
351*4882a593Smuzhiyun rdev->config.cayman.max_shader_engines;
352*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
353*4882a593Smuzhiyun *value = rdev->config.evergreen.max_backends;
354*4882a593Smuzhiyun else if (rdev->family >= CHIP_RV770)
355*4882a593Smuzhiyun *value = rdev->config.rv770.max_backends;
356*4882a593Smuzhiyun else if (rdev->family >= CHIP_R600)
357*4882a593Smuzhiyun *value = rdev->config.r600.max_backends;
358*4882a593Smuzhiyun else {
359*4882a593Smuzhiyun return -EINVAL;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun break;
362*4882a593Smuzhiyun case RADEON_INFO_NUM_TILE_PIPES:
363*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
364*4882a593Smuzhiyun *value = rdev->config.cik.max_tile_pipes;
365*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
366*4882a593Smuzhiyun *value = rdev->config.si.max_tile_pipes;
367*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
368*4882a593Smuzhiyun *value = rdev->config.cayman.max_tile_pipes;
369*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
370*4882a593Smuzhiyun *value = rdev->config.evergreen.max_tile_pipes;
371*4882a593Smuzhiyun else if (rdev->family >= CHIP_RV770)
372*4882a593Smuzhiyun *value = rdev->config.rv770.max_tile_pipes;
373*4882a593Smuzhiyun else if (rdev->family >= CHIP_R600)
374*4882a593Smuzhiyun *value = rdev->config.r600.max_tile_pipes;
375*4882a593Smuzhiyun else {
376*4882a593Smuzhiyun return -EINVAL;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun case RADEON_INFO_FUSION_GART_WORKING:
380*4882a593Smuzhiyun *value = 1;
381*4882a593Smuzhiyun break;
382*4882a593Smuzhiyun case RADEON_INFO_BACKEND_MAP:
383*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
384*4882a593Smuzhiyun *value = rdev->config.cik.backend_map;
385*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
386*4882a593Smuzhiyun *value = rdev->config.si.backend_map;
387*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
388*4882a593Smuzhiyun *value = rdev->config.cayman.backend_map;
389*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
390*4882a593Smuzhiyun *value = rdev->config.evergreen.backend_map;
391*4882a593Smuzhiyun else if (rdev->family >= CHIP_RV770)
392*4882a593Smuzhiyun *value = rdev->config.rv770.backend_map;
393*4882a593Smuzhiyun else if (rdev->family >= CHIP_R600)
394*4882a593Smuzhiyun *value = rdev->config.r600.backend_map;
395*4882a593Smuzhiyun else {
396*4882a593Smuzhiyun return -EINVAL;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun case RADEON_INFO_VA_START:
400*4882a593Smuzhiyun /* this is where we report if vm is supported or not */
401*4882a593Smuzhiyun if (rdev->family < CHIP_CAYMAN)
402*4882a593Smuzhiyun return -EINVAL;
403*4882a593Smuzhiyun *value = RADEON_VA_RESERVED_SIZE;
404*4882a593Smuzhiyun break;
405*4882a593Smuzhiyun case RADEON_INFO_IB_VM_MAX_SIZE:
406*4882a593Smuzhiyun /* this is where we report if vm is supported or not */
407*4882a593Smuzhiyun if (rdev->family < CHIP_CAYMAN)
408*4882a593Smuzhiyun return -EINVAL;
409*4882a593Smuzhiyun *value = RADEON_IB_VM_MAX_SIZE;
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun case RADEON_INFO_MAX_PIPES:
412*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
413*4882a593Smuzhiyun *value = rdev->config.cik.max_cu_per_sh;
414*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
415*4882a593Smuzhiyun *value = rdev->config.si.max_cu_per_sh;
416*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
417*4882a593Smuzhiyun *value = rdev->config.cayman.max_pipes_per_simd;
418*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
419*4882a593Smuzhiyun *value = rdev->config.evergreen.max_pipes;
420*4882a593Smuzhiyun else if (rdev->family >= CHIP_RV770)
421*4882a593Smuzhiyun *value = rdev->config.rv770.max_pipes;
422*4882a593Smuzhiyun else if (rdev->family >= CHIP_R600)
423*4882a593Smuzhiyun *value = rdev->config.r600.max_pipes;
424*4882a593Smuzhiyun else {
425*4882a593Smuzhiyun return -EINVAL;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun case RADEON_INFO_TIMESTAMP:
429*4882a593Smuzhiyun if (rdev->family < CHIP_R600) {
430*4882a593Smuzhiyun DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
431*4882a593Smuzhiyun return -EINVAL;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun value = (uint32_t*)&value64;
434*4882a593Smuzhiyun value_size = sizeof(uint64_t);
435*4882a593Smuzhiyun value64 = radeon_get_gpu_clock_counter(rdev);
436*4882a593Smuzhiyun break;
437*4882a593Smuzhiyun case RADEON_INFO_MAX_SE:
438*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
439*4882a593Smuzhiyun *value = rdev->config.cik.max_shader_engines;
440*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
441*4882a593Smuzhiyun *value = rdev->config.si.max_shader_engines;
442*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
443*4882a593Smuzhiyun *value = rdev->config.cayman.max_shader_engines;
444*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
445*4882a593Smuzhiyun *value = rdev->config.evergreen.num_ses;
446*4882a593Smuzhiyun else
447*4882a593Smuzhiyun *value = 1;
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun case RADEON_INFO_MAX_SH_PER_SE:
450*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
451*4882a593Smuzhiyun *value = rdev->config.cik.max_sh_per_se;
452*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
453*4882a593Smuzhiyun *value = rdev->config.si.max_sh_per_se;
454*4882a593Smuzhiyun else
455*4882a593Smuzhiyun return -EINVAL;
456*4882a593Smuzhiyun break;
457*4882a593Smuzhiyun case RADEON_INFO_FASTFB_WORKING:
458*4882a593Smuzhiyun *value = rdev->fastfb_working;
459*4882a593Smuzhiyun break;
460*4882a593Smuzhiyun case RADEON_INFO_RING_WORKING:
461*4882a593Smuzhiyun if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
462*4882a593Smuzhiyun DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
463*4882a593Smuzhiyun return -EFAULT;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun switch (*value) {
466*4882a593Smuzhiyun case RADEON_CS_RING_GFX:
467*4882a593Smuzhiyun case RADEON_CS_RING_COMPUTE:
468*4882a593Smuzhiyun *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
469*4882a593Smuzhiyun break;
470*4882a593Smuzhiyun case RADEON_CS_RING_DMA:
471*4882a593Smuzhiyun *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
472*4882a593Smuzhiyun *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
473*4882a593Smuzhiyun break;
474*4882a593Smuzhiyun case RADEON_CS_RING_UVD:
475*4882a593Smuzhiyun *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
476*4882a593Smuzhiyun break;
477*4882a593Smuzhiyun case RADEON_CS_RING_VCE:
478*4882a593Smuzhiyun *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
479*4882a593Smuzhiyun break;
480*4882a593Smuzhiyun default:
481*4882a593Smuzhiyun return -EINVAL;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun case RADEON_INFO_SI_TILE_MODE_ARRAY:
485*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE) {
486*4882a593Smuzhiyun value = rdev->config.cik.tile_mode_array;
487*4882a593Smuzhiyun value_size = sizeof(uint32_t)*32;
488*4882a593Smuzhiyun } else if (rdev->family >= CHIP_TAHITI) {
489*4882a593Smuzhiyun value = rdev->config.si.tile_mode_array;
490*4882a593Smuzhiyun value_size = sizeof(uint32_t)*32;
491*4882a593Smuzhiyun } else {
492*4882a593Smuzhiyun DRM_DEBUG_KMS("tile mode array is si+ only!\n");
493*4882a593Smuzhiyun return -EINVAL;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun break;
496*4882a593Smuzhiyun case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
497*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE) {
498*4882a593Smuzhiyun value = rdev->config.cik.macrotile_mode_array;
499*4882a593Smuzhiyun value_size = sizeof(uint32_t)*16;
500*4882a593Smuzhiyun } else {
501*4882a593Smuzhiyun DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
502*4882a593Smuzhiyun return -EINVAL;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun break;
505*4882a593Smuzhiyun case RADEON_INFO_SI_CP_DMA_COMPUTE:
506*4882a593Smuzhiyun *value = 1;
507*4882a593Smuzhiyun break;
508*4882a593Smuzhiyun case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
509*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE) {
510*4882a593Smuzhiyun *value = rdev->config.cik.backend_enable_mask;
511*4882a593Smuzhiyun } else if (rdev->family >= CHIP_TAHITI) {
512*4882a593Smuzhiyun *value = rdev->config.si.backend_enable_mask;
513*4882a593Smuzhiyun } else {
514*4882a593Smuzhiyun DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
515*4882a593Smuzhiyun return -EINVAL;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun break;
518*4882a593Smuzhiyun case RADEON_INFO_MAX_SCLK:
519*4882a593Smuzhiyun if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
520*4882a593Smuzhiyun rdev->pm.dpm_enabled)
521*4882a593Smuzhiyun *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun *value = rdev->pm.default_sclk * 10;
524*4882a593Smuzhiyun break;
525*4882a593Smuzhiyun case RADEON_INFO_VCE_FW_VERSION:
526*4882a593Smuzhiyun *value = rdev->vce.fw_version;
527*4882a593Smuzhiyun break;
528*4882a593Smuzhiyun case RADEON_INFO_VCE_FB_VERSION:
529*4882a593Smuzhiyun *value = rdev->vce.fb_version;
530*4882a593Smuzhiyun break;
531*4882a593Smuzhiyun case RADEON_INFO_NUM_BYTES_MOVED:
532*4882a593Smuzhiyun value = (uint32_t*)&value64;
533*4882a593Smuzhiyun value_size = sizeof(uint64_t);
534*4882a593Smuzhiyun value64 = atomic64_read(&rdev->num_bytes_moved);
535*4882a593Smuzhiyun break;
536*4882a593Smuzhiyun case RADEON_INFO_VRAM_USAGE:
537*4882a593Smuzhiyun value = (uint32_t*)&value64;
538*4882a593Smuzhiyun value_size = sizeof(uint64_t);
539*4882a593Smuzhiyun value64 = atomic64_read(&rdev->vram_usage);
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun case RADEON_INFO_GTT_USAGE:
542*4882a593Smuzhiyun value = (uint32_t*)&value64;
543*4882a593Smuzhiyun value_size = sizeof(uint64_t);
544*4882a593Smuzhiyun value64 = atomic64_read(&rdev->gtt_usage);
545*4882a593Smuzhiyun break;
546*4882a593Smuzhiyun case RADEON_INFO_ACTIVE_CU_COUNT:
547*4882a593Smuzhiyun if (rdev->family >= CHIP_BONAIRE)
548*4882a593Smuzhiyun *value = rdev->config.cik.active_cus;
549*4882a593Smuzhiyun else if (rdev->family >= CHIP_TAHITI)
550*4882a593Smuzhiyun *value = rdev->config.si.active_cus;
551*4882a593Smuzhiyun else if (rdev->family >= CHIP_CAYMAN)
552*4882a593Smuzhiyun *value = rdev->config.cayman.active_simds;
553*4882a593Smuzhiyun else if (rdev->family >= CHIP_CEDAR)
554*4882a593Smuzhiyun *value = rdev->config.evergreen.active_simds;
555*4882a593Smuzhiyun else if (rdev->family >= CHIP_RV770)
556*4882a593Smuzhiyun *value = rdev->config.rv770.active_simds;
557*4882a593Smuzhiyun else if (rdev->family >= CHIP_R600)
558*4882a593Smuzhiyun *value = rdev->config.r600.active_simds;
559*4882a593Smuzhiyun else
560*4882a593Smuzhiyun *value = 1;
561*4882a593Smuzhiyun break;
562*4882a593Smuzhiyun case RADEON_INFO_CURRENT_GPU_TEMP:
563*4882a593Smuzhiyun /* get temperature in millidegrees C */
564*4882a593Smuzhiyun if (rdev->asic->pm.get_temperature)
565*4882a593Smuzhiyun *value = radeon_get_temperature(rdev);
566*4882a593Smuzhiyun else
567*4882a593Smuzhiyun *value = 0;
568*4882a593Smuzhiyun break;
569*4882a593Smuzhiyun case RADEON_INFO_CURRENT_GPU_SCLK:
570*4882a593Smuzhiyun /* get sclk in Mhz */
571*4882a593Smuzhiyun if (rdev->pm.dpm_enabled)
572*4882a593Smuzhiyun *value = radeon_dpm_get_current_sclk(rdev) / 100;
573*4882a593Smuzhiyun else
574*4882a593Smuzhiyun *value = rdev->pm.current_sclk / 100;
575*4882a593Smuzhiyun break;
576*4882a593Smuzhiyun case RADEON_INFO_CURRENT_GPU_MCLK:
577*4882a593Smuzhiyun /* get mclk in Mhz */
578*4882a593Smuzhiyun if (rdev->pm.dpm_enabled)
579*4882a593Smuzhiyun *value = radeon_dpm_get_current_mclk(rdev) / 100;
580*4882a593Smuzhiyun else
581*4882a593Smuzhiyun *value = rdev->pm.current_mclk / 100;
582*4882a593Smuzhiyun break;
583*4882a593Smuzhiyun case RADEON_INFO_READ_REG:
584*4882a593Smuzhiyun if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
585*4882a593Smuzhiyun DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
586*4882a593Smuzhiyun return -EFAULT;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun if (radeon_get_allowed_info_register(rdev, *value, value))
589*4882a593Smuzhiyun return -EINVAL;
590*4882a593Smuzhiyun break;
591*4882a593Smuzhiyun case RADEON_INFO_VA_UNMAP_WORKING:
592*4882a593Smuzhiyun *value = true;
593*4882a593Smuzhiyun break;
594*4882a593Smuzhiyun case RADEON_INFO_GPU_RESET_COUNTER:
595*4882a593Smuzhiyun *value = atomic_read(&rdev->gpu_reset_counter);
596*4882a593Smuzhiyun break;
597*4882a593Smuzhiyun default:
598*4882a593Smuzhiyun DRM_DEBUG_KMS("Invalid request %d\n", info->request);
599*4882a593Smuzhiyun return -EINVAL;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun if (copy_to_user(value_ptr, (char*)value, value_size)) {
602*4882a593Smuzhiyun DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
603*4882a593Smuzhiyun return -EFAULT;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * Outdated mess for old drm with Xorg being in charge (void function now).
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun /**
613*4882a593Smuzhiyun * radeon_driver_lastclose_kms - drm callback for last close
614*4882a593Smuzhiyun *
615*4882a593Smuzhiyun * @dev: drm dev pointer
616*4882a593Smuzhiyun *
617*4882a593Smuzhiyun * Switch vga_switcheroo state after last close (all asics).
618*4882a593Smuzhiyun */
radeon_driver_lastclose_kms(struct drm_device * dev)619*4882a593Smuzhiyun void radeon_driver_lastclose_kms(struct drm_device *dev)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun drm_fb_helper_lastclose(dev);
622*4882a593Smuzhiyun vga_switcheroo_process_delayed_switch();
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /**
626*4882a593Smuzhiyun * radeon_driver_open_kms - drm callback for open
627*4882a593Smuzhiyun *
628*4882a593Smuzhiyun * @dev: drm dev pointer
629*4882a593Smuzhiyun * @file_priv: drm file
630*4882a593Smuzhiyun *
631*4882a593Smuzhiyun * On device open, init vm on cayman+ (all asics).
632*4882a593Smuzhiyun * Returns 0 on success, error on failure.
633*4882a593Smuzhiyun */
radeon_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)634*4882a593Smuzhiyun int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
637*4882a593Smuzhiyun struct radeon_fpriv *fpriv;
638*4882a593Smuzhiyun struct radeon_vm *vm;
639*4882a593Smuzhiyun int r;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun file_priv->driver_priv = NULL;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun r = pm_runtime_get_sync(dev->dev);
644*4882a593Smuzhiyun if (r < 0) {
645*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev->dev);
646*4882a593Smuzhiyun return r;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* new gpu have virtual address space support */
650*4882a593Smuzhiyun if (rdev->family >= CHIP_CAYMAN) {
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
653*4882a593Smuzhiyun if (unlikely(!fpriv)) {
654*4882a593Smuzhiyun r = -ENOMEM;
655*4882a593Smuzhiyun goto err_suspend;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (rdev->accel_working) {
659*4882a593Smuzhiyun vm = &fpriv->vm;
660*4882a593Smuzhiyun r = radeon_vm_init(rdev, vm);
661*4882a593Smuzhiyun if (r)
662*4882a593Smuzhiyun goto err_fpriv;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
665*4882a593Smuzhiyun if (r)
666*4882a593Smuzhiyun goto err_vm_fini;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* map the ib pool buffer read only into
669*4882a593Smuzhiyun * virtual address space */
670*4882a593Smuzhiyun vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
671*4882a593Smuzhiyun rdev->ring_tmp_bo.bo);
672*4882a593Smuzhiyun if (!vm->ib_bo_va) {
673*4882a593Smuzhiyun r = -ENOMEM;
674*4882a593Smuzhiyun goto err_vm_fini;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
678*4882a593Smuzhiyun RADEON_VA_IB_OFFSET,
679*4882a593Smuzhiyun RADEON_VM_PAGE_READABLE |
680*4882a593Smuzhiyun RADEON_VM_PAGE_SNOOPED);
681*4882a593Smuzhiyun if (r)
682*4882a593Smuzhiyun goto err_vm_fini;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun file_priv->driver_priv = fpriv;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev->dev);
688*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev->dev);
689*4882a593Smuzhiyun return 0;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun err_vm_fini:
692*4882a593Smuzhiyun radeon_vm_fini(rdev, vm);
693*4882a593Smuzhiyun err_fpriv:
694*4882a593Smuzhiyun kfree(fpriv);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun err_suspend:
697*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev->dev);
698*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev->dev);
699*4882a593Smuzhiyun return r;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /**
703*4882a593Smuzhiyun * radeon_driver_postclose_kms - drm callback for post close
704*4882a593Smuzhiyun *
705*4882a593Smuzhiyun * @dev: drm dev pointer
706*4882a593Smuzhiyun * @file_priv: drm file
707*4882a593Smuzhiyun *
708*4882a593Smuzhiyun * On device close, tear down hyperz and cmask filps on r1xx-r5xx
709*4882a593Smuzhiyun * (all asics). And tear down vm on cayman+ (all asics).
710*4882a593Smuzhiyun */
radeon_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)711*4882a593Smuzhiyun void radeon_driver_postclose_kms(struct drm_device *dev,
712*4882a593Smuzhiyun struct drm_file *file_priv)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun pm_runtime_get_sync(dev->dev);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun mutex_lock(&rdev->gem.mutex);
719*4882a593Smuzhiyun if (rdev->hyperz_filp == file_priv)
720*4882a593Smuzhiyun rdev->hyperz_filp = NULL;
721*4882a593Smuzhiyun if (rdev->cmask_filp == file_priv)
722*4882a593Smuzhiyun rdev->cmask_filp = NULL;
723*4882a593Smuzhiyun mutex_unlock(&rdev->gem.mutex);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun radeon_uvd_free_handles(rdev, file_priv);
726*4882a593Smuzhiyun radeon_vce_free_handles(rdev, file_priv);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* new gpu have virtual address space support */
729*4882a593Smuzhiyun if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
730*4882a593Smuzhiyun struct radeon_fpriv *fpriv = file_priv->driver_priv;
731*4882a593Smuzhiyun struct radeon_vm *vm = &fpriv->vm;
732*4882a593Smuzhiyun int r;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (rdev->accel_working) {
735*4882a593Smuzhiyun r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
736*4882a593Smuzhiyun if (!r) {
737*4882a593Smuzhiyun if (vm->ib_bo_va)
738*4882a593Smuzhiyun radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
739*4882a593Smuzhiyun radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun radeon_vm_fini(rdev, vm);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun kfree(fpriv);
745*4882a593Smuzhiyun file_priv->driver_priv = NULL;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev->dev);
748*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev->dev);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /*
752*4882a593Smuzhiyun * VBlank related functions.
753*4882a593Smuzhiyun */
754*4882a593Smuzhiyun /**
755*4882a593Smuzhiyun * radeon_get_vblank_counter_kms - get frame count
756*4882a593Smuzhiyun *
757*4882a593Smuzhiyun * @crtc: crtc to get the frame count from
758*4882a593Smuzhiyun *
759*4882a593Smuzhiyun * Gets the frame count on the requested crtc (all asics).
760*4882a593Smuzhiyun * Returns frame count on success, -EINVAL on failure.
761*4882a593Smuzhiyun */
radeon_get_vblank_counter_kms(struct drm_crtc * crtc)762*4882a593Smuzhiyun u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
765*4882a593Smuzhiyun unsigned int pipe = crtc->index;
766*4882a593Smuzhiyun int vpos, hpos, stat;
767*4882a593Smuzhiyun u32 count;
768*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (pipe >= rdev->num_crtc) {
771*4882a593Smuzhiyun DRM_ERROR("Invalid crtc %u\n", pipe);
772*4882a593Smuzhiyun return -EINVAL;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* The hw increments its frame counter at start of vsync, not at start
776*4882a593Smuzhiyun * of vblank, as is required by DRM core vblank counter handling.
777*4882a593Smuzhiyun * Cook the hw count here to make it appear to the caller as if it
778*4882a593Smuzhiyun * incremented at start of vblank. We measure distance to start of
779*4882a593Smuzhiyun * vblank in vpos. vpos therefore will be >= 0 between start of vblank
780*4882a593Smuzhiyun * and start of vsync, so vpos >= 0 means to bump the hw frame counter
781*4882a593Smuzhiyun * result by 1 to give the proper appearance to caller.
782*4882a593Smuzhiyun */
783*4882a593Smuzhiyun if (rdev->mode_info.crtcs[pipe]) {
784*4882a593Smuzhiyun /* Repeat readout if needed to provide stable result if
785*4882a593Smuzhiyun * we cross start of vsync during the queries.
786*4882a593Smuzhiyun */
787*4882a593Smuzhiyun do {
788*4882a593Smuzhiyun count = radeon_get_vblank_counter(rdev, pipe);
789*4882a593Smuzhiyun /* Ask radeon_get_crtc_scanoutpos to return vpos as
790*4882a593Smuzhiyun * distance to start of vblank, instead of regular
791*4882a593Smuzhiyun * vertical scanout pos.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun stat = radeon_get_crtc_scanoutpos(
794*4882a593Smuzhiyun dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
795*4882a593Smuzhiyun &vpos, &hpos, NULL, NULL,
796*4882a593Smuzhiyun &rdev->mode_info.crtcs[pipe]->base.hwmode);
797*4882a593Smuzhiyun } while (count != radeon_get_vblank_counter(rdev, pipe));
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
800*4882a593Smuzhiyun (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
801*4882a593Smuzhiyun DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun else {
804*4882a593Smuzhiyun DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
805*4882a593Smuzhiyun pipe, vpos);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* Bump counter if we are at >= leading edge of vblank,
808*4882a593Smuzhiyun * but before vsync where vpos would turn negative and
809*4882a593Smuzhiyun * the hw counter really increments.
810*4882a593Smuzhiyun */
811*4882a593Smuzhiyun if (vpos >= 0)
812*4882a593Smuzhiyun count++;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun else {
816*4882a593Smuzhiyun /* Fallback to use value as is. */
817*4882a593Smuzhiyun count = radeon_get_vblank_counter(rdev, pipe);
818*4882a593Smuzhiyun DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun return count;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /**
825*4882a593Smuzhiyun * radeon_enable_vblank_kms - enable vblank interrupt
826*4882a593Smuzhiyun *
827*4882a593Smuzhiyun * @crtc: crtc to enable vblank interrupt for
828*4882a593Smuzhiyun *
829*4882a593Smuzhiyun * Enable the interrupt on the requested crtc (all asics).
830*4882a593Smuzhiyun * Returns 0 on success, -EINVAL on failure.
831*4882a593Smuzhiyun */
radeon_enable_vblank_kms(struct drm_crtc * crtc)832*4882a593Smuzhiyun int radeon_enable_vblank_kms(struct drm_crtc *crtc)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
835*4882a593Smuzhiyun unsigned int pipe = crtc->index;
836*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
837*4882a593Smuzhiyun unsigned long irqflags;
838*4882a593Smuzhiyun int r;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (pipe >= rdev->num_crtc) {
841*4882a593Smuzhiyun DRM_ERROR("Invalid crtc %d\n", pipe);
842*4882a593Smuzhiyun return -EINVAL;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun spin_lock_irqsave(&rdev->irq.lock, irqflags);
846*4882a593Smuzhiyun rdev->irq.crtc_vblank_int[pipe] = true;
847*4882a593Smuzhiyun r = radeon_irq_set(rdev);
848*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
849*4882a593Smuzhiyun return r;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /**
853*4882a593Smuzhiyun * radeon_disable_vblank_kms - disable vblank interrupt
854*4882a593Smuzhiyun *
855*4882a593Smuzhiyun * @crtc: crtc to disable vblank interrupt for
856*4882a593Smuzhiyun *
857*4882a593Smuzhiyun * Disable the interrupt on the requested crtc (all asics).
858*4882a593Smuzhiyun */
radeon_disable_vblank_kms(struct drm_crtc * crtc)859*4882a593Smuzhiyun void radeon_disable_vblank_kms(struct drm_crtc *crtc)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
862*4882a593Smuzhiyun unsigned int pipe = crtc->index;
863*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
864*4882a593Smuzhiyun unsigned long irqflags;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (pipe >= rdev->num_crtc) {
867*4882a593Smuzhiyun DRM_ERROR("Invalid crtc %d\n", pipe);
868*4882a593Smuzhiyun return;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun spin_lock_irqsave(&rdev->irq.lock, irqflags);
872*4882a593Smuzhiyun rdev->irq.crtc_vblank_int[pipe] = false;
873*4882a593Smuzhiyun radeon_irq_set(rdev);
874*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun const struct drm_ioctl_desc radeon_ioctls_kms[] = {
878*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
879*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
880*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
881*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
882*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
883*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
884*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
885*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
886*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
887*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
888*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
889*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
890*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
891*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
892*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
893*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
894*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
895*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
896*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
897*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
898*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
899*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
900*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
901*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
902*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
903*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
904*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
905*4882a593Smuzhiyun /* KMS */
906*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
907*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
908*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
909*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
910*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
911*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
912*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
913*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
914*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
915*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
916*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
917*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
918*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
919*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
920*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
921*4882a593Smuzhiyun };
922*4882a593Smuzhiyun int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
923