xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nouveau_vga.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun #include <linux/vgaarb.h>
3*4882a593Smuzhiyun #include <linux/vga_switcheroo.h>
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <drm/drm_crtc_helper.h>
6*4882a593Smuzhiyun #include <drm/drm_fb_helper.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "nouveau_drv.h"
9*4882a593Smuzhiyun #include "nouveau_acpi.h"
10*4882a593Smuzhiyun #include "nouveau_fbcon.h"
11*4882a593Smuzhiyun #include "nouveau_vga.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun static unsigned int
nouveau_vga_set_decode(void * priv,bool state)14*4882a593Smuzhiyun nouveau_vga_set_decode(void *priv, bool state)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	struct nouveau_drm *drm = nouveau_drm(priv);
17*4882a593Smuzhiyun 	struct nvif_object *device = &drm->client.device.object;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
20*4882a593Smuzhiyun 	    drm->client.device.info.chipset >= 0x4c)
21*4882a593Smuzhiyun 		nvif_wr32(device, 0x088060, state);
22*4882a593Smuzhiyun 	else
23*4882a593Smuzhiyun 	if (drm->client.device.info.chipset >= 0x40)
24*4882a593Smuzhiyun 		nvif_wr32(device, 0x088054, state);
25*4882a593Smuzhiyun 	else
26*4882a593Smuzhiyun 		nvif_wr32(device, 0x001854, state);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	if (state)
29*4882a593Smuzhiyun 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
30*4882a593Smuzhiyun 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
31*4882a593Smuzhiyun 	else
32*4882a593Smuzhiyun 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static void
nouveau_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)36*4882a593Smuzhiyun nouveau_switcheroo_set_state(struct pci_dev *pdev,
37*4882a593Smuzhiyun 			     enum vga_switcheroo_state state)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	struct drm_device *dev = pci_get_drvdata(pdev);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
42*4882a593Smuzhiyun 		return;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	if (state == VGA_SWITCHEROO_ON) {
45*4882a593Smuzhiyun 		pr_err("VGA switcheroo: switched nouveau on\n");
46*4882a593Smuzhiyun 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
47*4882a593Smuzhiyun 		nouveau_pmops_resume(&pdev->dev);
48*4882a593Smuzhiyun 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
49*4882a593Smuzhiyun 	} else {
50*4882a593Smuzhiyun 		pr_err("VGA switcheroo: switched nouveau off\n");
51*4882a593Smuzhiyun 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
52*4882a593Smuzhiyun 		nouveau_switcheroo_optimus_dsm();
53*4882a593Smuzhiyun 		nouveau_pmops_suspend(&pdev->dev);
54*4882a593Smuzhiyun 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun static void
nouveau_switcheroo_reprobe(struct pci_dev * pdev)59*4882a593Smuzhiyun nouveau_switcheroo_reprobe(struct pci_dev *pdev)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct drm_device *dev = pci_get_drvdata(pdev);
62*4882a593Smuzhiyun 	drm_fb_helper_output_poll_changed(dev);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun static bool
nouveau_switcheroo_can_switch(struct pci_dev * pdev)66*4882a593Smuzhiyun nouveau_switcheroo_can_switch(struct pci_dev *pdev)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct drm_device *dev = pci_get_drvdata(pdev);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/*
71*4882a593Smuzhiyun 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
72*4882a593Smuzhiyun 	 * locking inversion with the driver load path. And the access here is
73*4882a593Smuzhiyun 	 * completely racy anyway. So don't bother with locking for now.
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 	return atomic_read(&dev->open_count) == 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static const struct vga_switcheroo_client_ops
79*4882a593Smuzhiyun nouveau_switcheroo_ops = {
80*4882a593Smuzhiyun 	.set_gpu_state = nouveau_switcheroo_set_state,
81*4882a593Smuzhiyun 	.reprobe = nouveau_switcheroo_reprobe,
82*4882a593Smuzhiyun 	.can_switch = nouveau_switcheroo_can_switch,
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun void
nouveau_vga_init(struct nouveau_drm * drm)86*4882a593Smuzhiyun nouveau_vga_init(struct nouveau_drm *drm)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct drm_device *dev = drm->dev;
89*4882a593Smuzhiyun 	bool runtime = nouveau_pmops_runtime();
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* only relevant for PCI devices */
92*4882a593Smuzhiyun 	if (!dev->pdev)
93*4882a593Smuzhiyun 		return;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* don't register Thunderbolt eGPU with vga_switcheroo */
98*4882a593Smuzhiyun 	if (pci_is_thunderbolt_attached(dev->pdev))
99*4882a593Smuzhiyun 		return;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
104*4882a593Smuzhiyun 		vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun void
nouveau_vga_fini(struct nouveau_drm * drm)108*4882a593Smuzhiyun nouveau_vga_fini(struct nouveau_drm *drm)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct drm_device *dev = drm->dev;
111*4882a593Smuzhiyun 	bool runtime = nouveau_pmops_runtime();
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* only relevant for PCI devices */
114*4882a593Smuzhiyun 	if (!dev->pdev)
115*4882a593Smuzhiyun 		return;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	vga_client_register(dev->pdev, NULL, NULL, NULL);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (pci_is_thunderbolt_attached(dev->pdev))
120*4882a593Smuzhiyun 		return;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	vga_switcheroo_unregister_client(dev->pdev);
123*4882a593Smuzhiyun 	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
124*4882a593Smuzhiyun 		vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun void
nouveau_vga_lastclose(struct drm_device * dev)129*4882a593Smuzhiyun nouveau_vga_lastclose(struct drm_device *dev)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	vga_switcheroo_process_delayed_switch();
132*4882a593Smuzhiyun }
133