1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * vgaarb.c: Implements the VGA arbitration. For details refer to
3*4882a593Smuzhiyun * Documentation/gpu/vgaarbiter.rst
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
7*4882a593Smuzhiyun * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
8*4882a593Smuzhiyun * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
11*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
12*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
13*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
15*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
18*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
19*4882a593Smuzhiyun * Software.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27*4882a593Smuzhiyun * DEALINGS
28*4882a593Smuzhiyun * IN THE SOFTWARE.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define pr_fmt(fmt) "vgaarb: " fmt
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg)
35*4882a593Smuzhiyun #define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg)
36*4882a593Smuzhiyun #define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/module.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/pci.h>
41*4882a593Smuzhiyun #include <linux/errno.h>
42*4882a593Smuzhiyun #include <linux/init.h>
43*4882a593Smuzhiyun #include <linux/list.h>
44*4882a593Smuzhiyun #include <linux/sched/signal.h>
45*4882a593Smuzhiyun #include <linux/wait.h>
46*4882a593Smuzhiyun #include <linux/spinlock.h>
47*4882a593Smuzhiyun #include <linux/poll.h>
48*4882a593Smuzhiyun #include <linux/miscdevice.h>
49*4882a593Smuzhiyun #include <linux/slab.h>
50*4882a593Smuzhiyun #include <linux/screen_info.h>
51*4882a593Smuzhiyun #include <linux/vt.h>
52*4882a593Smuzhiyun #include <linux/console.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include <linux/uaccess.h>
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include <linux/vgaarb.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static void vga_arbiter_notify_clients(void);
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * We keep a list of all vga devices in the system to speed
61*4882a593Smuzhiyun * up the various operations of the arbiter
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun struct vga_device {
64*4882a593Smuzhiyun struct list_head list;
65*4882a593Smuzhiyun struct pci_dev *pdev;
66*4882a593Smuzhiyun unsigned int decodes; /* what does it decodes */
67*4882a593Smuzhiyun unsigned int owns; /* what does it owns */
68*4882a593Smuzhiyun unsigned int locks; /* what does it locks */
69*4882a593Smuzhiyun unsigned int io_lock_cnt; /* legacy IO lock count */
70*4882a593Smuzhiyun unsigned int mem_lock_cnt; /* legacy MEM lock count */
71*4882a593Smuzhiyun unsigned int io_norm_cnt; /* normal IO count */
72*4882a593Smuzhiyun unsigned int mem_norm_cnt; /* normal MEM count */
73*4882a593Smuzhiyun bool bridge_has_one_vga;
74*4882a593Smuzhiyun /* allow IRQ enable/disable hook */
75*4882a593Smuzhiyun void *cookie;
76*4882a593Smuzhiyun void (*irq_set_state)(void *cookie, bool enable);
77*4882a593Smuzhiyun unsigned int (*set_vga_decode)(void *cookie, bool decode);
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static LIST_HEAD(vga_list);
81*4882a593Smuzhiyun static int vga_count, vga_decode_count;
82*4882a593Smuzhiyun static bool vga_arbiter_used;
83*4882a593Smuzhiyun static DEFINE_SPINLOCK(vga_lock);
84*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun
vga_iostate_to_str(unsigned int iostate)87*4882a593Smuzhiyun static const char *vga_iostate_to_str(unsigned int iostate)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
90*4882a593Smuzhiyun iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
91*4882a593Smuzhiyun switch (iostate) {
92*4882a593Smuzhiyun case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM:
93*4882a593Smuzhiyun return "io+mem";
94*4882a593Smuzhiyun case VGA_RSRC_LEGACY_IO:
95*4882a593Smuzhiyun return "io";
96*4882a593Smuzhiyun case VGA_RSRC_LEGACY_MEM:
97*4882a593Smuzhiyun return "mem";
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun return "none";
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
vga_str_to_iostate(char * buf,int str_size,int * io_state)102*4882a593Smuzhiyun static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun /* we could in theory hand out locks on IO and mem
105*4882a593Smuzhiyun * separately to userspace but it can cause deadlocks */
106*4882a593Smuzhiyun if (strncmp(buf, "none", 4) == 0) {
107*4882a593Smuzhiyun *io_state = VGA_RSRC_NONE;
108*4882a593Smuzhiyun return 1;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* XXX We're not chekcing the str_size! */
112*4882a593Smuzhiyun if (strncmp(buf, "io+mem", 6) == 0)
113*4882a593Smuzhiyun goto both;
114*4882a593Smuzhiyun else if (strncmp(buf, "io", 2) == 0)
115*4882a593Smuzhiyun goto both;
116*4882a593Smuzhiyun else if (strncmp(buf, "mem", 3) == 0)
117*4882a593Smuzhiyun goto both;
118*4882a593Smuzhiyun return 0;
119*4882a593Smuzhiyun both:
120*4882a593Smuzhiyun *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
121*4882a593Smuzhiyun return 1;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* this is only used a cookie - it should not be dereferenced */
125*4882a593Smuzhiyun static struct pci_dev *vga_default;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun static void vga_arb_device_card_gone(struct pci_dev *pdev);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Find somebody in our list */
vgadev_find(struct pci_dev * pdev)130*4882a593Smuzhiyun static struct vga_device *vgadev_find(struct pci_dev *pdev)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct vga_device *vgadev;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun list_for_each_entry(vgadev, &vga_list, list)
135*4882a593Smuzhiyun if (pdev == vgadev->pdev)
136*4882a593Smuzhiyun return vgadev;
137*4882a593Smuzhiyun return NULL;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun * vga_default_device - return the default VGA device, for vgacon
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * This can be defined by the platform. The default implementation
144*4882a593Smuzhiyun * is rather dumb and will probably only work properly on single
145*4882a593Smuzhiyun * vga card setups and/or x86 platforms.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * If your VGA default device is not PCI, you'll have to return
148*4882a593Smuzhiyun * NULL here. In this case, I assume it will not conflict with
149*4882a593Smuzhiyun * any PCI card. If this is not true, I'll have to define two archs
150*4882a593Smuzhiyun * hooks for enabling/disabling the VGA default device if that is
151*4882a593Smuzhiyun * possible. This may be a problem with real _ISA_ VGA cards, in
152*4882a593Smuzhiyun * addition to a PCI one. I don't know at this point how to deal
153*4882a593Smuzhiyun * with that card. Can theirs IOs be disabled at all ? If not, then
154*4882a593Smuzhiyun * I suppose it's a matter of having the proper arch hook telling
155*4882a593Smuzhiyun * us about it, so we basically never allow anybody to succeed a
156*4882a593Smuzhiyun * vga_get()...
157*4882a593Smuzhiyun */
vga_default_device(void)158*4882a593Smuzhiyun struct pci_dev *vga_default_device(void)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun return vga_default;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vga_default_device);
163*4882a593Smuzhiyun
vga_set_default_device(struct pci_dev * pdev)164*4882a593Smuzhiyun void vga_set_default_device(struct pci_dev *pdev)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun if (vga_default == pdev)
167*4882a593Smuzhiyun return;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun pci_dev_put(vga_default);
170*4882a593Smuzhiyun vga_default = pci_dev_get(pdev);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun * vga_remove_vgacon - deactivete vga console
175*4882a593Smuzhiyun *
176*4882a593Smuzhiyun * Unbind and unregister vgacon in case pdev is the default vga
177*4882a593Smuzhiyun * device. Can be called by gpu drivers on initialization to make
178*4882a593Smuzhiyun * sure vga register access done by vgacon will not disturb the
179*4882a593Smuzhiyun * device.
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * @pdev: pci device.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun #if !defined(CONFIG_VGA_CONSOLE)
vga_remove_vgacon(struct pci_dev * pdev)184*4882a593Smuzhiyun int vga_remove_vgacon(struct pci_dev *pdev)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun return 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun #elif !defined(CONFIG_DUMMY_CONSOLE)
vga_remove_vgacon(struct pci_dev * pdev)189*4882a593Smuzhiyun int vga_remove_vgacon(struct pci_dev *pdev)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun return -ENODEV;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun #else
vga_remove_vgacon(struct pci_dev * pdev)194*4882a593Smuzhiyun int vga_remove_vgacon(struct pci_dev *pdev)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun int ret = 0;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (pdev != vga_default)
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun vgaarb_info(&pdev->dev, "deactivate vga console\n");
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun console_lock();
203*4882a593Smuzhiyun if (con_is_bound(&vga_con))
204*4882a593Smuzhiyun ret = do_take_over_console(&dummy_con, 0,
205*4882a593Smuzhiyun MAX_NR_CONSOLES - 1, 1);
206*4882a593Smuzhiyun if (ret == 0) {
207*4882a593Smuzhiyun ret = do_unregister_con_driver(&vga_con);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Ignore "already unregistered". */
210*4882a593Smuzhiyun if (ret == -ENODEV)
211*4882a593Smuzhiyun ret = 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun console_unlock();
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun return ret;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun EXPORT_SYMBOL(vga_remove_vgacon);
219*4882a593Smuzhiyun
vga_irq_set_state(struct vga_device * vgadev,bool state)220*4882a593Smuzhiyun static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun if (vgadev->irq_set_state)
223*4882a593Smuzhiyun vgadev->irq_set_state(vgadev->cookie, state);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* If we don't ever use VGA arb we should avoid
228*4882a593Smuzhiyun turning off anything anywhere due to old X servers getting
229*4882a593Smuzhiyun confused about the boot device not being VGA */
vga_check_first_use(void)230*4882a593Smuzhiyun static void vga_check_first_use(void)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun /* we should inform all GPUs in the system that
233*4882a593Smuzhiyun * VGA arb has occurred and to try and disable resources
234*4882a593Smuzhiyun * if they can */
235*4882a593Smuzhiyun if (!vga_arbiter_used) {
236*4882a593Smuzhiyun vga_arbiter_used = true;
237*4882a593Smuzhiyun vga_arbiter_notify_clients();
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
__vga_tryget(struct vga_device * vgadev,unsigned int rsrc)241*4882a593Smuzhiyun static struct vga_device *__vga_tryget(struct vga_device *vgadev,
242*4882a593Smuzhiyun unsigned int rsrc)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
245*4882a593Smuzhiyun unsigned int wants, legacy_wants, match;
246*4882a593Smuzhiyun struct vga_device *conflict;
247*4882a593Smuzhiyun unsigned int pci_bits;
248*4882a593Smuzhiyun u32 flags = 0;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* Account for "normal" resources to lock. If we decode the legacy,
251*4882a593Smuzhiyun * counterpart, we need to request it as well
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun if ((rsrc & VGA_RSRC_NORMAL_IO) &&
254*4882a593Smuzhiyun (vgadev->decodes & VGA_RSRC_LEGACY_IO))
255*4882a593Smuzhiyun rsrc |= VGA_RSRC_LEGACY_IO;
256*4882a593Smuzhiyun if ((rsrc & VGA_RSRC_NORMAL_MEM) &&
257*4882a593Smuzhiyun (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
258*4882a593Smuzhiyun rsrc |= VGA_RSRC_LEGACY_MEM;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc);
261*4882a593Smuzhiyun vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Check what resources we need to acquire */
264*4882a593Smuzhiyun wants = rsrc & ~vgadev->owns;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* We already own everything, just mark locked & bye bye */
267*4882a593Smuzhiyun if (wants == 0)
268*4882a593Smuzhiyun goto lock_them;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* We don't need to request a legacy resource, we just enable
271*4882a593Smuzhiyun * appropriate decoding and go
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
274*4882a593Smuzhiyun if (legacy_wants == 0)
275*4882a593Smuzhiyun goto enable_them;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Ok, we don't, let's find out how we need to kick off */
278*4882a593Smuzhiyun list_for_each_entry(conflict, &vga_list, list) {
279*4882a593Smuzhiyun unsigned int lwants = legacy_wants;
280*4882a593Smuzhiyun unsigned int change_bridge = 0;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Don't conflict with myself */
283*4882a593Smuzhiyun if (vgadev == conflict)
284*4882a593Smuzhiyun continue;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* Check if the architecture allows a conflict between those
287*4882a593Smuzhiyun * 2 devices or if they are on separate domains
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun if (!vga_conflicts(vgadev->pdev, conflict->pdev))
290*4882a593Smuzhiyun continue;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* We have a possible conflict. before we go further, we must
293*4882a593Smuzhiyun * check if we sit on the same bus as the conflicting device.
294*4882a593Smuzhiyun * if we don't, then we must tie both IO and MEM resources
295*4882a593Smuzhiyun * together since there is only a single bit controlling
296*4882a593Smuzhiyun * VGA forwarding on P2P bridges
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun if (vgadev->pdev->bus != conflict->pdev->bus) {
299*4882a593Smuzhiyun change_bridge = 1;
300*4882a593Smuzhiyun lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Check if the guy has a lock on the resource. If he does,
304*4882a593Smuzhiyun * return the conflicting entry
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun if (conflict->locks & lwants)
307*4882a593Smuzhiyun return conflict;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Ok, now check if it owns the resource we want. We can
310*4882a593Smuzhiyun * lock resources that are not decoded, therefore a device
311*4882a593Smuzhiyun * can own resources it doesn't decode.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun match = lwants & conflict->owns;
314*4882a593Smuzhiyun if (!match)
315*4882a593Smuzhiyun continue;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* looks like he doesn't have a lock, we can steal
318*4882a593Smuzhiyun * them from him
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun flags = 0;
322*4882a593Smuzhiyun pci_bits = 0;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* If we can't control legacy resources via the bridge, we
325*4882a593Smuzhiyun * also need to disable normal decoding.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun if (!conflict->bridge_has_one_vga) {
328*4882a593Smuzhiyun if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM)
329*4882a593Smuzhiyun pci_bits |= PCI_COMMAND_MEMORY;
330*4882a593Smuzhiyun if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO)
331*4882a593Smuzhiyun pci_bits |= PCI_COMMAND_IO;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (pci_bits) {
334*4882a593Smuzhiyun vga_irq_set_state(conflict, false);
335*4882a593Smuzhiyun flags |= PCI_VGA_STATE_CHANGE_DECODES;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (change_bridge)
340*4882a593Smuzhiyun flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
343*4882a593Smuzhiyun conflict->owns &= ~match;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* If we disabled normal decoding, reflect it in owns */
346*4882a593Smuzhiyun if (pci_bits & PCI_COMMAND_MEMORY)
347*4882a593Smuzhiyun conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
348*4882a593Smuzhiyun if (pci_bits & PCI_COMMAND_IO)
349*4882a593Smuzhiyun conflict->owns &= ~VGA_RSRC_NORMAL_IO;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun enable_them:
353*4882a593Smuzhiyun /* ok dude, we got it, everybody conflicting has been disabled, let's
354*4882a593Smuzhiyun * enable us. Mark any bits in "owns" regardless of whether we
355*4882a593Smuzhiyun * decoded them. We can lock resources we don't decode, therefore
356*4882a593Smuzhiyun * we must track them via "owns".
357*4882a593Smuzhiyun */
358*4882a593Smuzhiyun flags = 0;
359*4882a593Smuzhiyun pci_bits = 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (!vgadev->bridge_has_one_vga) {
362*4882a593Smuzhiyun flags |= PCI_VGA_STATE_CHANGE_DECODES;
363*4882a593Smuzhiyun if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
364*4882a593Smuzhiyun pci_bits |= PCI_COMMAND_MEMORY;
365*4882a593Smuzhiyun if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
366*4882a593Smuzhiyun pci_bits |= PCI_COMMAND_IO;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun if (wants & VGA_RSRC_LEGACY_MASK)
369*4882a593Smuzhiyun flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (!vgadev->bridge_has_one_vga)
374*4882a593Smuzhiyun vga_irq_set_state(vgadev, true);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun vgadev->owns |= wants;
377*4882a593Smuzhiyun lock_them:
378*4882a593Smuzhiyun vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
379*4882a593Smuzhiyun if (rsrc & VGA_RSRC_LEGACY_IO)
380*4882a593Smuzhiyun vgadev->io_lock_cnt++;
381*4882a593Smuzhiyun if (rsrc & VGA_RSRC_LEGACY_MEM)
382*4882a593Smuzhiyun vgadev->mem_lock_cnt++;
383*4882a593Smuzhiyun if (rsrc & VGA_RSRC_NORMAL_IO)
384*4882a593Smuzhiyun vgadev->io_norm_cnt++;
385*4882a593Smuzhiyun if (rsrc & VGA_RSRC_NORMAL_MEM)
386*4882a593Smuzhiyun vgadev->mem_norm_cnt++;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun return NULL;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
__vga_put(struct vga_device * vgadev,unsigned int rsrc)391*4882a593Smuzhiyun static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
394*4882a593Smuzhiyun unsigned int old_locks = vgadev->locks;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun vgaarb_dbg(dev, "%s\n", __func__);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Update our counters, and account for equivalent legacy resources
399*4882a593Smuzhiyun * if we decode them
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
402*4882a593Smuzhiyun vgadev->io_norm_cnt--;
403*4882a593Smuzhiyun if (vgadev->decodes & VGA_RSRC_LEGACY_IO)
404*4882a593Smuzhiyun rsrc |= VGA_RSRC_LEGACY_IO;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) {
407*4882a593Smuzhiyun vgadev->mem_norm_cnt--;
408*4882a593Smuzhiyun if (vgadev->decodes & VGA_RSRC_LEGACY_MEM)
409*4882a593Smuzhiyun rsrc |= VGA_RSRC_LEGACY_MEM;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0)
412*4882a593Smuzhiyun vgadev->io_lock_cnt--;
413*4882a593Smuzhiyun if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
414*4882a593Smuzhiyun vgadev->mem_lock_cnt--;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* Just clear lock bits, we do lazy operations so we don't really
417*4882a593Smuzhiyun * have to bother about anything else at this point
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun if (vgadev->io_lock_cnt == 0)
420*4882a593Smuzhiyun vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
421*4882a593Smuzhiyun if (vgadev->mem_lock_cnt == 0)
422*4882a593Smuzhiyun vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* Kick the wait queue in case somebody was waiting if we actually
425*4882a593Smuzhiyun * released something
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun if (old_locks != vgadev->locks)
428*4882a593Smuzhiyun wake_up_all(&vga_wait_queue);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * vga_get - acquire & locks VGA resources
433*4882a593Smuzhiyun * @pdev: pci device of the VGA card or NULL for the system default
434*4882a593Smuzhiyun * @rsrc: bit mask of resources to acquire and lock
435*4882a593Smuzhiyun * @interruptible: blocking should be interruptible by signals ?
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * This function acquires VGA resources for the given card and mark those
438*4882a593Smuzhiyun * resources locked. If the resource requested are "normal" (and not legacy)
439*4882a593Smuzhiyun * resources, the arbiter will first check whether the card is doing legacy
440*4882a593Smuzhiyun * decoding for that type of resource. If yes, the lock is "converted" into a
441*4882a593Smuzhiyun * legacy resource lock.
442*4882a593Smuzhiyun *
443*4882a593Smuzhiyun * The arbiter will first look for all VGA cards that might conflict and disable
444*4882a593Smuzhiyun * their IOs and/or Memory access, including VGA forwarding on P2P bridges if
445*4882a593Smuzhiyun * necessary, so that the requested resources can be used. Then, the card is
446*4882a593Smuzhiyun * marked as locking these resources and the IO and/or Memory accesses are
447*4882a593Smuzhiyun * enabled on the card (including VGA forwarding on parent P2P bridges if any).
448*4882a593Smuzhiyun *
449*4882a593Smuzhiyun * This function will block if some conflicting card is already locking one of
450*4882a593Smuzhiyun * the required resources (or any resource on a different bus segment, since P2P
451*4882a593Smuzhiyun * bridges don't differentiate VGA memory and IO afaik). You can indicate
452*4882a593Smuzhiyun * whether this blocking should be interruptible by a signal (for userland
453*4882a593Smuzhiyun * interface) or not.
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * Must not be called at interrupt time or in atomic context. If the card
456*4882a593Smuzhiyun * already owns the resources, the function succeeds. Nested calls are
457*4882a593Smuzhiyun * supported (a per-resource counter is maintained)
458*4882a593Smuzhiyun *
459*4882a593Smuzhiyun * On success, release the VGA resource again with vga_put().
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * Returns:
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * 0 on success, negative error code on failure.
464*4882a593Smuzhiyun */
vga_get(struct pci_dev * pdev,unsigned int rsrc,int interruptible)465*4882a593Smuzhiyun int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct vga_device *vgadev, *conflict;
468*4882a593Smuzhiyun unsigned long flags;
469*4882a593Smuzhiyun wait_queue_entry_t wait;
470*4882a593Smuzhiyun int rc = 0;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun vga_check_first_use();
473*4882a593Smuzhiyun /* The one who calls us should check for this, but lets be sure... */
474*4882a593Smuzhiyun if (pdev == NULL)
475*4882a593Smuzhiyun pdev = vga_default_device();
476*4882a593Smuzhiyun if (pdev == NULL)
477*4882a593Smuzhiyun return 0;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun for (;;) {
480*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
481*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
482*4882a593Smuzhiyun if (vgadev == NULL) {
483*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
484*4882a593Smuzhiyun rc = -ENODEV;
485*4882a593Smuzhiyun break;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun conflict = __vga_tryget(vgadev, rsrc);
488*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
489*4882a593Smuzhiyun if (conflict == NULL)
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* We have a conflict, we wait until somebody kicks the
494*4882a593Smuzhiyun * work queue. Currently we have one work queue that we
495*4882a593Smuzhiyun * kick each time some resources are released, but it would
496*4882a593Smuzhiyun * be fairly easy to have a per device one so that we only
497*4882a593Smuzhiyun * need to attach to the conflicting device
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun init_waitqueue_entry(&wait, current);
500*4882a593Smuzhiyun add_wait_queue(&vga_wait_queue, &wait);
501*4882a593Smuzhiyun set_current_state(interruptible ?
502*4882a593Smuzhiyun TASK_INTERRUPTIBLE :
503*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
504*4882a593Smuzhiyun if (interruptible && signal_pending(current)) {
505*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
506*4882a593Smuzhiyun remove_wait_queue(&vga_wait_queue, &wait);
507*4882a593Smuzhiyun rc = -ERESTARTSYS;
508*4882a593Smuzhiyun break;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun schedule();
511*4882a593Smuzhiyun remove_wait_queue(&vga_wait_queue, &wait);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun return rc;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun EXPORT_SYMBOL(vga_get);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /**
518*4882a593Smuzhiyun * vga_tryget - try to acquire & lock legacy VGA resources
519*4882a593Smuzhiyun * @pdev: pci devivce of VGA card or NULL for system default
520*4882a593Smuzhiyun * @rsrc: bit mask of resources to acquire and lock
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * This function performs the same operation as vga_get(), but will return an
523*4882a593Smuzhiyun * error (-EBUSY) instead of blocking if the resources are already locked by
524*4882a593Smuzhiyun * another card. It can be called in any context
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * On success, release the VGA resource again with vga_put().
527*4882a593Smuzhiyun *
528*4882a593Smuzhiyun * Returns:
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * 0 on success, negative error code on failure.
531*4882a593Smuzhiyun */
vga_tryget(struct pci_dev * pdev,unsigned int rsrc)532*4882a593Smuzhiyun static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun struct vga_device *vgadev;
535*4882a593Smuzhiyun unsigned long flags;
536*4882a593Smuzhiyun int rc = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun vga_check_first_use();
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* The one who calls us should check for this, but lets be sure... */
541*4882a593Smuzhiyun if (pdev == NULL)
542*4882a593Smuzhiyun pdev = vga_default_device();
543*4882a593Smuzhiyun if (pdev == NULL)
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
546*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
547*4882a593Smuzhiyun if (vgadev == NULL) {
548*4882a593Smuzhiyun rc = -ENODEV;
549*4882a593Smuzhiyun goto bail;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun if (__vga_tryget(vgadev, rsrc))
552*4882a593Smuzhiyun rc = -EBUSY;
553*4882a593Smuzhiyun bail:
554*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
555*4882a593Smuzhiyun return rc;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun * vga_put - release lock on legacy VGA resources
560*4882a593Smuzhiyun * @pdev: pci device of VGA card or NULL for system default
561*4882a593Smuzhiyun * @rsrc: but mask of resource to release
562*4882a593Smuzhiyun *
563*4882a593Smuzhiyun * This fuction releases resources previously locked by vga_get() or
564*4882a593Smuzhiyun * vga_tryget(). The resources aren't disabled right away, so that a subsequence
565*4882a593Smuzhiyun * vga_get() on the same card will succeed immediately. Resources have a
566*4882a593Smuzhiyun * counter, so locks are only released if the counter reaches 0.
567*4882a593Smuzhiyun */
vga_put(struct pci_dev * pdev,unsigned int rsrc)568*4882a593Smuzhiyun void vga_put(struct pci_dev *pdev, unsigned int rsrc)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct vga_device *vgadev;
571*4882a593Smuzhiyun unsigned long flags;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* The one who calls us should check for this, but lets be sure... */
574*4882a593Smuzhiyun if (pdev == NULL)
575*4882a593Smuzhiyun pdev = vga_default_device();
576*4882a593Smuzhiyun if (pdev == NULL)
577*4882a593Smuzhiyun return;
578*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
579*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
580*4882a593Smuzhiyun if (vgadev == NULL)
581*4882a593Smuzhiyun goto bail;
582*4882a593Smuzhiyun __vga_put(vgadev, rsrc);
583*4882a593Smuzhiyun bail:
584*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun EXPORT_SYMBOL(vga_put);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * Rules for using a bridge to control a VGA descendant decoding: if a bridge
590*4882a593Smuzhiyun * has only one VGA descendant then it can be used to control the VGA routing
591*4882a593Smuzhiyun * for that device. It should always use the bridge closest to the device to
592*4882a593Smuzhiyun * control it. If a bridge has a direct VGA descendant, but also have a sub-
593*4882a593Smuzhiyun * bridge VGA descendant then we cannot use that bridge to control the direct
594*4882a593Smuzhiyun * VGA descendant. So for every device we register, we need to iterate all
595*4882a593Smuzhiyun * its parent bridges so we can invalidate any devices using them properly.
596*4882a593Smuzhiyun */
vga_arbiter_check_bridge_sharing(struct vga_device * vgadev)597*4882a593Smuzhiyun static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct vga_device *same_bridge_vgadev;
600*4882a593Smuzhiyun struct pci_bus *new_bus, *bus;
601*4882a593Smuzhiyun struct pci_dev *new_bridge, *bridge;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun vgadev->bridge_has_one_vga = true;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if (list_empty(&vga_list))
606*4882a593Smuzhiyun return;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* okay iterate the new devices bridge hierarachy */
609*4882a593Smuzhiyun new_bus = vgadev->pdev->bus;
610*4882a593Smuzhiyun while (new_bus) {
611*4882a593Smuzhiyun new_bridge = new_bus->self;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* go through list of devices already registered */
614*4882a593Smuzhiyun list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
615*4882a593Smuzhiyun bus = same_bridge_vgadev->pdev->bus;
616*4882a593Smuzhiyun bridge = bus->self;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* see if the share a bridge with this device */
619*4882a593Smuzhiyun if (new_bridge == bridge) {
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun * If their direct parent bridge is the same
622*4882a593Smuzhiyun * as any bridge of this device then it can't
623*4882a593Smuzhiyun * be used for that device.
624*4882a593Smuzhiyun */
625*4882a593Smuzhiyun same_bridge_vgadev->bridge_has_one_vga = false;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * Now iterate the previous devices bridge hierarchy.
630*4882a593Smuzhiyun * If the new devices parent bridge is in the other
631*4882a593Smuzhiyun * devices hierarchy then we can't use it to control
632*4882a593Smuzhiyun * this device
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun while (bus) {
635*4882a593Smuzhiyun bridge = bus->self;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (bridge && bridge == vgadev->pdev->bus->self)
638*4882a593Smuzhiyun vgadev->bridge_has_one_vga = false;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun bus = bus->parent;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun new_bus = new_bus->parent;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun * Currently, we assume that the "initial" setup of the system is
649*4882a593Smuzhiyun * not sane, that is we come up with conflicting devices and let
650*4882a593Smuzhiyun * the arbiter's client decides if devices decodes or not legacy
651*4882a593Smuzhiyun * things.
652*4882a593Smuzhiyun */
vga_arbiter_add_pci_device(struct pci_dev * pdev)653*4882a593Smuzhiyun static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct vga_device *vgadev;
656*4882a593Smuzhiyun unsigned long flags;
657*4882a593Smuzhiyun struct pci_bus *bus;
658*4882a593Smuzhiyun struct pci_dev *bridge;
659*4882a593Smuzhiyun u16 cmd;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /* Only deal with VGA class devices */
662*4882a593Smuzhiyun if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
663*4882a593Smuzhiyun return false;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* Allocate structure */
666*4882a593Smuzhiyun vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
667*4882a593Smuzhiyun if (vgadev == NULL) {
668*4882a593Smuzhiyun vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n");
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * What to do on allocation failure ? For now, let's just do
671*4882a593Smuzhiyun * nothing, I'm not sure there is anything saner to be done.
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun return false;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* Take lock & check for duplicates */
677*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
678*4882a593Smuzhiyun if (vgadev_find(pdev) != NULL) {
679*4882a593Smuzhiyun BUG_ON(1);
680*4882a593Smuzhiyun goto fail;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun vgadev->pdev = pdev;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /* By default, assume we decode everything */
685*4882a593Smuzhiyun vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
686*4882a593Smuzhiyun VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /* by default mark it as decoding */
689*4882a593Smuzhiyun vga_decode_count++;
690*4882a593Smuzhiyun /* Mark that we "own" resources based on our enables, we will
691*4882a593Smuzhiyun * clear that below if the bridge isn't forwarding
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_COMMAND, &cmd);
694*4882a593Smuzhiyun if (cmd & PCI_COMMAND_IO)
695*4882a593Smuzhiyun vgadev->owns |= VGA_RSRC_LEGACY_IO;
696*4882a593Smuzhiyun if (cmd & PCI_COMMAND_MEMORY)
697*4882a593Smuzhiyun vgadev->owns |= VGA_RSRC_LEGACY_MEM;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Check if VGA cycles can get down to us */
700*4882a593Smuzhiyun bus = pdev->bus;
701*4882a593Smuzhiyun while (bus) {
702*4882a593Smuzhiyun bridge = bus->self;
703*4882a593Smuzhiyun if (bridge) {
704*4882a593Smuzhiyun u16 l;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
707*4882a593Smuzhiyun if (!(l & PCI_BRIDGE_CTL_VGA)) {
708*4882a593Smuzhiyun vgadev->owns = 0;
709*4882a593Smuzhiyun break;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun bus = bus->parent;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /* Deal with VGA default device. Use first enabled one
716*4882a593Smuzhiyun * by default if arch doesn't have it's own hook
717*4882a593Smuzhiyun */
718*4882a593Smuzhiyun if (vga_default == NULL &&
719*4882a593Smuzhiyun ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
720*4882a593Smuzhiyun vgaarb_info(&pdev->dev, "setting as boot VGA device\n");
721*4882a593Smuzhiyun vga_set_default_device(pdev);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun vga_arbiter_check_bridge_sharing(vgadev);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Add to the list */
727*4882a593Smuzhiyun list_add_tail(&vgadev->list, &vga_list);
728*4882a593Smuzhiyun vga_count++;
729*4882a593Smuzhiyun vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
730*4882a593Smuzhiyun vga_iostate_to_str(vgadev->decodes),
731*4882a593Smuzhiyun vga_iostate_to_str(vgadev->owns),
732*4882a593Smuzhiyun vga_iostate_to_str(vgadev->locks));
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
735*4882a593Smuzhiyun return true;
736*4882a593Smuzhiyun fail:
737*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
738*4882a593Smuzhiyun kfree(vgadev);
739*4882a593Smuzhiyun return false;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
vga_arbiter_del_pci_device(struct pci_dev * pdev)742*4882a593Smuzhiyun static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun struct vga_device *vgadev;
745*4882a593Smuzhiyun unsigned long flags;
746*4882a593Smuzhiyun bool ret = true;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
749*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
750*4882a593Smuzhiyun if (vgadev == NULL) {
751*4882a593Smuzhiyun ret = false;
752*4882a593Smuzhiyun goto bail;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (vga_default == pdev)
756*4882a593Smuzhiyun vga_set_default_device(NULL);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
759*4882a593Smuzhiyun vga_decode_count--;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /* Remove entry from list */
762*4882a593Smuzhiyun list_del(&vgadev->list);
763*4882a593Smuzhiyun vga_count--;
764*4882a593Smuzhiyun /* Notify userland driver that the device is gone so it discards
765*4882a593Smuzhiyun * it's copies of the pci_dev pointer
766*4882a593Smuzhiyun */
767*4882a593Smuzhiyun vga_arb_device_card_gone(pdev);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /* Wake up all possible waiters */
770*4882a593Smuzhiyun wake_up_all(&vga_wait_queue);
771*4882a593Smuzhiyun bail:
772*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
773*4882a593Smuzhiyun kfree(vgadev);
774*4882a593Smuzhiyun return ret;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* this is called with the lock */
vga_update_device_decodes(struct vga_device * vgadev,int new_decodes)778*4882a593Smuzhiyun static inline void vga_update_device_decodes(struct vga_device *vgadev,
779*4882a593Smuzhiyun int new_decodes)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
782*4882a593Smuzhiyun int old_decodes, decodes_removed, decodes_unlocked;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun old_decodes = vgadev->decodes;
785*4882a593Smuzhiyun decodes_removed = ~new_decodes & old_decodes;
786*4882a593Smuzhiyun decodes_unlocked = vgadev->locks & decodes_removed;
787*4882a593Smuzhiyun vgadev->decodes = new_decodes;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun vgaarb_info(dev, "changed VGA decodes: olddecodes=%s,decodes=%s:owns=%s\n",
790*4882a593Smuzhiyun vga_iostate_to_str(old_decodes),
791*4882a593Smuzhiyun vga_iostate_to_str(vgadev->decodes),
792*4882a593Smuzhiyun vga_iostate_to_str(vgadev->owns));
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* if we removed locked decodes, lock count goes to zero, and release */
795*4882a593Smuzhiyun if (decodes_unlocked) {
796*4882a593Smuzhiyun if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
797*4882a593Smuzhiyun vgadev->io_lock_cnt = 0;
798*4882a593Smuzhiyun if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
799*4882a593Smuzhiyun vgadev->mem_lock_cnt = 0;
800*4882a593Smuzhiyun __vga_put(vgadev, decodes_unlocked);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* change decodes counter */
804*4882a593Smuzhiyun if (old_decodes & VGA_RSRC_LEGACY_MASK &&
805*4882a593Smuzhiyun !(new_decodes & VGA_RSRC_LEGACY_MASK))
806*4882a593Smuzhiyun vga_decode_count--;
807*4882a593Smuzhiyun if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
808*4882a593Smuzhiyun new_decodes & VGA_RSRC_LEGACY_MASK)
809*4882a593Smuzhiyun vga_decode_count++;
810*4882a593Smuzhiyun vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
__vga_set_legacy_decoding(struct pci_dev * pdev,unsigned int decodes,bool userspace)813*4882a593Smuzhiyun static void __vga_set_legacy_decoding(struct pci_dev *pdev,
814*4882a593Smuzhiyun unsigned int decodes,
815*4882a593Smuzhiyun bool userspace)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun struct vga_device *vgadev;
818*4882a593Smuzhiyun unsigned long flags;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun decodes &= VGA_RSRC_LEGACY_MASK;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
823*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
824*4882a593Smuzhiyun if (vgadev == NULL)
825*4882a593Smuzhiyun goto bail;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* don't let userspace futz with kernel driver decodes */
828*4882a593Smuzhiyun if (userspace && vgadev->set_vga_decode)
829*4882a593Smuzhiyun goto bail;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* update the device decodes + counter */
832*4882a593Smuzhiyun vga_update_device_decodes(vgadev, decodes);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* XXX if somebody is going from "doesn't decode" to "decodes" state
835*4882a593Smuzhiyun * here, additional care must be taken as we may have pending owner
836*4882a593Smuzhiyun * ship of non-legacy region ...
837*4882a593Smuzhiyun */
838*4882a593Smuzhiyun bail:
839*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
vga_set_legacy_decoding(struct pci_dev * pdev,unsigned int decodes)842*4882a593Smuzhiyun void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun __vga_set_legacy_decoding(pdev, decodes, false);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun EXPORT_SYMBOL(vga_set_legacy_decoding);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /**
849*4882a593Smuzhiyun * vga_client_register - register or unregister a VGA arbitration client
850*4882a593Smuzhiyun * @pdev: pci device of the VGA client
851*4882a593Smuzhiyun * @cookie: client cookie to be used in callbacks
852*4882a593Smuzhiyun * @irq_set_state: irq state change callback
853*4882a593Smuzhiyun * @set_vga_decode: vga decode change callback
854*4882a593Smuzhiyun *
855*4882a593Smuzhiyun * Clients have two callback mechanisms they can use.
856*4882a593Smuzhiyun *
857*4882a593Smuzhiyun * @irq_set_state callback: If a client can't disable its GPUs VGA
858*4882a593Smuzhiyun * resources, then we need to be able to ask it to turn off its irqs when we
859*4882a593Smuzhiyun * turn off its mem and io decoding.
860*4882a593Smuzhiyun *
861*4882a593Smuzhiyun * @set_vga_decode callback: If a client can disable its GPU VGA resource, it
862*4882a593Smuzhiyun * will get a callback from this to set the encode/decode state.
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * Rationale: we cannot disable VGA decode resources unconditionally some single
865*4882a593Smuzhiyun * GPU laptops seem to require ACPI or BIOS access to the VGA registers to
866*4882a593Smuzhiyun * control things like backlights etc. Hopefully newer multi-GPU laptops do
867*4882a593Smuzhiyun * something saner, and desktops won't have any special ACPI for this. The
868*4882a593Smuzhiyun * driver will get a callback when VGA arbitration is first used by userspace
869*4882a593Smuzhiyun * since some older X servers have issues.
870*4882a593Smuzhiyun *
871*4882a593Smuzhiyun * This function does not check whether a client for @pdev has been registered
872*4882a593Smuzhiyun * already.
873*4882a593Smuzhiyun *
874*4882a593Smuzhiyun * To unregister just call this function with @irq_set_state and @set_vga_decode
875*4882a593Smuzhiyun * both set to NULL for the same @pdev as originally used to register them.
876*4882a593Smuzhiyun *
877*4882a593Smuzhiyun * Returns: 0 on success, -1 on failure
878*4882a593Smuzhiyun */
vga_client_register(struct pci_dev * pdev,void * cookie,void (* irq_set_state)(void * cookie,bool state),unsigned int (* set_vga_decode)(void * cookie,bool decode))879*4882a593Smuzhiyun int vga_client_register(struct pci_dev *pdev, void *cookie,
880*4882a593Smuzhiyun void (*irq_set_state)(void *cookie, bool state),
881*4882a593Smuzhiyun unsigned int (*set_vga_decode)(void *cookie,
882*4882a593Smuzhiyun bool decode))
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun int ret = -ENODEV;
885*4882a593Smuzhiyun struct vga_device *vgadev;
886*4882a593Smuzhiyun unsigned long flags;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
889*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
890*4882a593Smuzhiyun if (!vgadev)
891*4882a593Smuzhiyun goto bail;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun vgadev->irq_set_state = irq_set_state;
894*4882a593Smuzhiyun vgadev->set_vga_decode = set_vga_decode;
895*4882a593Smuzhiyun vgadev->cookie = cookie;
896*4882a593Smuzhiyun ret = 0;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun bail:
899*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
900*4882a593Smuzhiyun return ret;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun EXPORT_SYMBOL(vga_client_register);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun /*
906*4882a593Smuzhiyun * Char driver implementation
907*4882a593Smuzhiyun *
908*4882a593Smuzhiyun * Semantics is:
909*4882a593Smuzhiyun *
910*4882a593Smuzhiyun * open : open user instance of the arbitrer. by default, it's
911*4882a593Smuzhiyun * attached to the default VGA device of the system.
912*4882a593Smuzhiyun *
913*4882a593Smuzhiyun * close : close user instance, release locks
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * read : return a string indicating the status of the target.
916*4882a593Smuzhiyun * an IO state string is of the form {io,mem,io+mem,none},
917*4882a593Smuzhiyun * mc and ic are respectively mem and io lock counts (for
918*4882a593Smuzhiyun * debugging/diagnostic only). "decodes" indicate what the
919*4882a593Smuzhiyun * card currently decodes, "owns" indicates what is currently
920*4882a593Smuzhiyun * enabled on it, and "locks" indicates what is locked by this
921*4882a593Smuzhiyun * card. If the card is unplugged, we get "invalid" then for
922*4882a593Smuzhiyun * card_ID and an -ENODEV error is returned for any command
923*4882a593Smuzhiyun * until a new card is targeted
924*4882a593Smuzhiyun *
925*4882a593Smuzhiyun * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
926*4882a593Smuzhiyun *
927*4882a593Smuzhiyun * write : write a command to the arbiter. List of commands is:
928*4882a593Smuzhiyun *
929*4882a593Smuzhiyun * target <card_ID> : switch target to card <card_ID> (see below)
930*4882a593Smuzhiyun * lock <io_state> : acquires locks on target ("none" is invalid io_state)
931*4882a593Smuzhiyun * trylock <io_state> : non-blocking acquire locks on target
932*4882a593Smuzhiyun * unlock <io_state> : release locks on target
933*4882a593Smuzhiyun * unlock all : release all locks on target held by this user
934*4882a593Smuzhiyun * decodes <io_state> : set the legacy decoding attributes for the card
935*4882a593Smuzhiyun *
936*4882a593Smuzhiyun * poll : event if something change on any card (not just the target)
937*4882a593Smuzhiyun *
938*4882a593Smuzhiyun * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
939*4882a593Smuzhiyun * to go back to the system default card (TODO: not implemented yet).
940*4882a593Smuzhiyun * Currently, only PCI is supported as a prefix, but the userland API may
941*4882a593Smuzhiyun * support other bus types in the future, even if the current kernel
942*4882a593Smuzhiyun * implementation doesn't.
943*4882a593Smuzhiyun *
944*4882a593Smuzhiyun * Note about locks:
945*4882a593Smuzhiyun *
946*4882a593Smuzhiyun * The driver keeps track of which user has what locks on which card. It
947*4882a593Smuzhiyun * supports stacking, like the kernel one. This complexifies the implementation
948*4882a593Smuzhiyun * a bit, but makes the arbiter more tolerant to userspace problems and able
949*4882a593Smuzhiyun * to properly cleanup in all cases when a process dies.
950*4882a593Smuzhiyun * Currently, a max of 16 cards simultaneously can have locks issued from
951*4882a593Smuzhiyun * userspace for a given user (file descriptor instance) of the arbiter.
952*4882a593Smuzhiyun *
953*4882a593Smuzhiyun * If the device is hot-unplugged, there is a hook inside the module to notify
954*4882a593Smuzhiyun * they being added/removed in the system and automatically added/removed in
955*4882a593Smuzhiyun * the arbiter.
956*4882a593Smuzhiyun */
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun #define MAX_USER_CARDS CONFIG_VGA_ARB_MAX_GPUS
959*4882a593Smuzhiyun #define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /*
962*4882a593Smuzhiyun * Each user has an array of these, tracking which cards have locks
963*4882a593Smuzhiyun */
964*4882a593Smuzhiyun struct vga_arb_user_card {
965*4882a593Smuzhiyun struct pci_dev *pdev;
966*4882a593Smuzhiyun unsigned int mem_cnt;
967*4882a593Smuzhiyun unsigned int io_cnt;
968*4882a593Smuzhiyun };
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun struct vga_arb_private {
971*4882a593Smuzhiyun struct list_head list;
972*4882a593Smuzhiyun struct pci_dev *target;
973*4882a593Smuzhiyun struct vga_arb_user_card cards[MAX_USER_CARDS];
974*4882a593Smuzhiyun spinlock_t lock;
975*4882a593Smuzhiyun };
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun static LIST_HEAD(vga_user_list);
978*4882a593Smuzhiyun static DEFINE_SPINLOCK(vga_user_lock);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /*
982*4882a593Smuzhiyun * This function gets a string in the format: "PCI:domain:bus:dev.fn" and
983*4882a593Smuzhiyun * returns the respective values. If the string is not in this format,
984*4882a593Smuzhiyun * it returns 0.
985*4882a593Smuzhiyun */
vga_pci_str_to_vars(char * buf,int count,unsigned int * domain,unsigned int * bus,unsigned int * devfn)986*4882a593Smuzhiyun static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
987*4882a593Smuzhiyun unsigned int *bus, unsigned int *devfn)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun int n;
990*4882a593Smuzhiyun unsigned int slot, func;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
994*4882a593Smuzhiyun if (n != 4)
995*4882a593Smuzhiyun return 0;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun *devfn = PCI_DEVFN(slot, func);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun return 1;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
vga_arb_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1002*4882a593Smuzhiyun static ssize_t vga_arb_read(struct file *file, char __user *buf,
1003*4882a593Smuzhiyun size_t count, loff_t *ppos)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun struct vga_arb_private *priv = file->private_data;
1006*4882a593Smuzhiyun struct vga_device *vgadev;
1007*4882a593Smuzhiyun struct pci_dev *pdev;
1008*4882a593Smuzhiyun unsigned long flags;
1009*4882a593Smuzhiyun size_t len;
1010*4882a593Smuzhiyun int rc;
1011*4882a593Smuzhiyun char *lbuf;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun lbuf = kmalloc(1024, GFP_KERNEL);
1014*4882a593Smuzhiyun if (lbuf == NULL)
1015*4882a593Smuzhiyun return -ENOMEM;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /* Shields against vga_arb_device_card_gone (pci_dev going
1018*4882a593Smuzhiyun * away), and allows access to vga list
1019*4882a593Smuzhiyun */
1020*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /* If we are targeting the default, use it */
1023*4882a593Smuzhiyun pdev = priv->target;
1024*4882a593Smuzhiyun if (pdev == NULL || pdev == PCI_INVALID_CARD) {
1025*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
1026*4882a593Smuzhiyun len = sprintf(lbuf, "invalid");
1027*4882a593Smuzhiyun goto done;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Find card vgadev structure */
1031*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
1032*4882a593Smuzhiyun if (vgadev == NULL) {
1033*4882a593Smuzhiyun /* Wow, it's not in the list, that shouldn't happen,
1034*4882a593Smuzhiyun * let's fix us up and return invalid card
1035*4882a593Smuzhiyun */
1036*4882a593Smuzhiyun if (pdev == priv->target)
1037*4882a593Smuzhiyun vga_arb_device_card_gone(pdev);
1038*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
1039*4882a593Smuzhiyun len = sprintf(lbuf, "invalid");
1040*4882a593Smuzhiyun goto done;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* Fill the buffer with infos */
1044*4882a593Smuzhiyun len = snprintf(lbuf, 1024,
1045*4882a593Smuzhiyun "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
1046*4882a593Smuzhiyun vga_decode_count, pci_name(pdev),
1047*4882a593Smuzhiyun vga_iostate_to_str(vgadev->decodes),
1048*4882a593Smuzhiyun vga_iostate_to_str(vgadev->owns),
1049*4882a593Smuzhiyun vga_iostate_to_str(vgadev->locks),
1050*4882a593Smuzhiyun vgadev->io_lock_cnt, vgadev->mem_lock_cnt);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
1053*4882a593Smuzhiyun done:
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /* Copy that to user */
1056*4882a593Smuzhiyun if (len > count)
1057*4882a593Smuzhiyun len = count;
1058*4882a593Smuzhiyun rc = copy_to_user(buf, lbuf, len);
1059*4882a593Smuzhiyun kfree(lbuf);
1060*4882a593Smuzhiyun if (rc)
1061*4882a593Smuzhiyun return -EFAULT;
1062*4882a593Smuzhiyun return len;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun /*
1066*4882a593Smuzhiyun * TODO: To avoid parsing inside kernel and to improve the speed we may
1067*4882a593Smuzhiyun * consider use ioctl here
1068*4882a593Smuzhiyun */
vga_arb_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1069*4882a593Smuzhiyun static ssize_t vga_arb_write(struct file *file, const char __user *buf,
1070*4882a593Smuzhiyun size_t count, loff_t *ppos)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct vga_arb_private *priv = file->private_data;
1073*4882a593Smuzhiyun struct vga_arb_user_card *uc = NULL;
1074*4882a593Smuzhiyun struct pci_dev *pdev;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun unsigned int io_state;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun char kbuf[64], *curr_pos;
1079*4882a593Smuzhiyun size_t remaining = count;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun int ret_val;
1082*4882a593Smuzhiyun int i;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (count >= sizeof(kbuf))
1085*4882a593Smuzhiyun return -EINVAL;
1086*4882a593Smuzhiyun if (copy_from_user(kbuf, buf, count))
1087*4882a593Smuzhiyun return -EFAULT;
1088*4882a593Smuzhiyun curr_pos = kbuf;
1089*4882a593Smuzhiyun kbuf[count] = '\0'; /* Just to make sure... */
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (strncmp(curr_pos, "lock ", 5) == 0) {
1092*4882a593Smuzhiyun curr_pos += 5;
1093*4882a593Smuzhiyun remaining -= 5;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun pr_debug("client 0x%p called 'lock'\n", priv);
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1098*4882a593Smuzhiyun ret_val = -EPROTO;
1099*4882a593Smuzhiyun goto done;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun if (io_state == VGA_RSRC_NONE) {
1102*4882a593Smuzhiyun ret_val = -EPROTO;
1103*4882a593Smuzhiyun goto done;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun pdev = priv->target;
1107*4882a593Smuzhiyun if (priv->target == NULL) {
1108*4882a593Smuzhiyun ret_val = -ENODEV;
1109*4882a593Smuzhiyun goto done;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun vga_get_uninterruptible(pdev, io_state);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun /* Update the client's locks lists... */
1115*4882a593Smuzhiyun for (i = 0; i < MAX_USER_CARDS; i++) {
1116*4882a593Smuzhiyun if (priv->cards[i].pdev == pdev) {
1117*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_IO)
1118*4882a593Smuzhiyun priv->cards[i].io_cnt++;
1119*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_MEM)
1120*4882a593Smuzhiyun priv->cards[i].mem_cnt++;
1121*4882a593Smuzhiyun break;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun ret_val = count;
1126*4882a593Smuzhiyun goto done;
1127*4882a593Smuzhiyun } else if (strncmp(curr_pos, "unlock ", 7) == 0) {
1128*4882a593Smuzhiyun curr_pos += 7;
1129*4882a593Smuzhiyun remaining -= 7;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun pr_debug("client 0x%p called 'unlock'\n", priv);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun if (strncmp(curr_pos, "all", 3) == 0)
1134*4882a593Smuzhiyun io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
1135*4882a593Smuzhiyun else {
1136*4882a593Smuzhiyun if (!vga_str_to_iostate
1137*4882a593Smuzhiyun (curr_pos, remaining, &io_state)) {
1138*4882a593Smuzhiyun ret_val = -EPROTO;
1139*4882a593Smuzhiyun goto done;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun /* TODO: Add this?
1142*4882a593Smuzhiyun if (io_state == VGA_RSRC_NONE) {
1143*4882a593Smuzhiyun ret_val = -EPROTO;
1144*4882a593Smuzhiyun goto done;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun */
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun pdev = priv->target;
1150*4882a593Smuzhiyun if (priv->target == NULL) {
1151*4882a593Smuzhiyun ret_val = -ENODEV;
1152*4882a593Smuzhiyun goto done;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun for (i = 0; i < MAX_USER_CARDS; i++) {
1155*4882a593Smuzhiyun if (priv->cards[i].pdev == pdev)
1156*4882a593Smuzhiyun uc = &priv->cards[i];
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (!uc) {
1160*4882a593Smuzhiyun ret_val = -EINVAL;
1161*4882a593Smuzhiyun goto done;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
1165*4882a593Smuzhiyun ret_val = -EINVAL;
1166*4882a593Smuzhiyun goto done;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
1170*4882a593Smuzhiyun ret_val = -EINVAL;
1171*4882a593Smuzhiyun goto done;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun vga_put(pdev, io_state);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_IO)
1177*4882a593Smuzhiyun uc->io_cnt--;
1178*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_MEM)
1179*4882a593Smuzhiyun uc->mem_cnt--;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun ret_val = count;
1182*4882a593Smuzhiyun goto done;
1183*4882a593Smuzhiyun } else if (strncmp(curr_pos, "trylock ", 8) == 0) {
1184*4882a593Smuzhiyun curr_pos += 8;
1185*4882a593Smuzhiyun remaining -= 8;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun pr_debug("client 0x%p called 'trylock'\n", priv);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1190*4882a593Smuzhiyun ret_val = -EPROTO;
1191*4882a593Smuzhiyun goto done;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun /* TODO: Add this?
1194*4882a593Smuzhiyun if (io_state == VGA_RSRC_NONE) {
1195*4882a593Smuzhiyun ret_val = -EPROTO;
1196*4882a593Smuzhiyun goto done;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun */
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun pdev = priv->target;
1201*4882a593Smuzhiyun if (priv->target == NULL) {
1202*4882a593Smuzhiyun ret_val = -ENODEV;
1203*4882a593Smuzhiyun goto done;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (vga_tryget(pdev, io_state)) {
1207*4882a593Smuzhiyun /* Update the client's locks lists... */
1208*4882a593Smuzhiyun for (i = 0; i < MAX_USER_CARDS; i++) {
1209*4882a593Smuzhiyun if (priv->cards[i].pdev == pdev) {
1210*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_IO)
1211*4882a593Smuzhiyun priv->cards[i].io_cnt++;
1212*4882a593Smuzhiyun if (io_state & VGA_RSRC_LEGACY_MEM)
1213*4882a593Smuzhiyun priv->cards[i].mem_cnt++;
1214*4882a593Smuzhiyun break;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun ret_val = count;
1218*4882a593Smuzhiyun goto done;
1219*4882a593Smuzhiyun } else {
1220*4882a593Smuzhiyun ret_val = -EBUSY;
1221*4882a593Smuzhiyun goto done;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun } else if (strncmp(curr_pos, "target ", 7) == 0) {
1225*4882a593Smuzhiyun unsigned int domain, bus, devfn;
1226*4882a593Smuzhiyun struct vga_device *vgadev;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun curr_pos += 7;
1229*4882a593Smuzhiyun remaining -= 7;
1230*4882a593Smuzhiyun pr_debug("client 0x%p called 'target'\n", priv);
1231*4882a593Smuzhiyun /* if target is default */
1232*4882a593Smuzhiyun if (!strncmp(curr_pos, "default", 7))
1233*4882a593Smuzhiyun pdev = pci_dev_get(vga_default_device());
1234*4882a593Smuzhiyun else {
1235*4882a593Smuzhiyun if (!vga_pci_str_to_vars(curr_pos, remaining,
1236*4882a593Smuzhiyun &domain, &bus, &devfn)) {
1237*4882a593Smuzhiyun ret_val = -EPROTO;
1238*4882a593Smuzhiyun goto done;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
1241*4882a593Smuzhiyun if (!pdev) {
1242*4882a593Smuzhiyun pr_debug("invalid PCI address %04x:%02x:%02x.%x\n",
1243*4882a593Smuzhiyun domain, bus, PCI_SLOT(devfn),
1244*4882a593Smuzhiyun PCI_FUNC(devfn));
1245*4882a593Smuzhiyun ret_val = -ENODEV;
1246*4882a593Smuzhiyun goto done;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos,
1250*4882a593Smuzhiyun domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1251*4882a593Smuzhiyun pdev);
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun vgadev = vgadev_find(pdev);
1255*4882a593Smuzhiyun pr_debug("vgadev %p\n", vgadev);
1256*4882a593Smuzhiyun if (vgadev == NULL) {
1257*4882a593Smuzhiyun if (pdev) {
1258*4882a593Smuzhiyun vgaarb_dbg(&pdev->dev, "not a VGA device\n");
1259*4882a593Smuzhiyun pci_dev_put(pdev);
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun ret_val = -ENODEV;
1263*4882a593Smuzhiyun goto done;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun priv->target = pdev;
1267*4882a593Smuzhiyun for (i = 0; i < MAX_USER_CARDS; i++) {
1268*4882a593Smuzhiyun if (priv->cards[i].pdev == pdev)
1269*4882a593Smuzhiyun break;
1270*4882a593Smuzhiyun if (priv->cards[i].pdev == NULL) {
1271*4882a593Smuzhiyun priv->cards[i].pdev = pdev;
1272*4882a593Smuzhiyun priv->cards[i].io_cnt = 0;
1273*4882a593Smuzhiyun priv->cards[i].mem_cnt = 0;
1274*4882a593Smuzhiyun break;
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun if (i == MAX_USER_CARDS) {
1278*4882a593Smuzhiyun vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n",
1279*4882a593Smuzhiyun MAX_USER_CARDS);
1280*4882a593Smuzhiyun pci_dev_put(pdev);
1281*4882a593Smuzhiyun /* XXX: which value to return? */
1282*4882a593Smuzhiyun ret_val = -ENOMEM;
1283*4882a593Smuzhiyun goto done;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun ret_val = count;
1287*4882a593Smuzhiyun pci_dev_put(pdev);
1288*4882a593Smuzhiyun goto done;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
1292*4882a593Smuzhiyun curr_pos += 8;
1293*4882a593Smuzhiyun remaining -= 8;
1294*4882a593Smuzhiyun pr_debug("client 0x%p called 'decodes'\n", priv);
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1297*4882a593Smuzhiyun ret_val = -EPROTO;
1298*4882a593Smuzhiyun goto done;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun pdev = priv->target;
1301*4882a593Smuzhiyun if (priv->target == NULL) {
1302*4882a593Smuzhiyun ret_val = -ENODEV;
1303*4882a593Smuzhiyun goto done;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun __vga_set_legacy_decoding(pdev, io_state, true);
1307*4882a593Smuzhiyun ret_val = count;
1308*4882a593Smuzhiyun goto done;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun /* If we got here, the message written is not part of the protocol! */
1311*4882a593Smuzhiyun return -EPROTO;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun done:
1314*4882a593Smuzhiyun return ret_val;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
vga_arb_fpoll(struct file * file,poll_table * wait)1317*4882a593Smuzhiyun static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun pr_debug("%s\n", __func__);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun poll_wait(file, &vga_wait_queue, wait);
1322*4882a593Smuzhiyun return EPOLLIN;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
vga_arb_open(struct inode * inode,struct file * file)1325*4882a593Smuzhiyun static int vga_arb_open(struct inode *inode, struct file *file)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun struct vga_arb_private *priv;
1328*4882a593Smuzhiyun unsigned long flags;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun pr_debug("%s\n", __func__);
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1333*4882a593Smuzhiyun if (priv == NULL)
1334*4882a593Smuzhiyun return -ENOMEM;
1335*4882a593Smuzhiyun spin_lock_init(&priv->lock);
1336*4882a593Smuzhiyun file->private_data = priv;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun spin_lock_irqsave(&vga_user_lock, flags);
1339*4882a593Smuzhiyun list_add(&priv->list, &vga_user_list);
1340*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_user_lock, flags);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* Set the client' lists of locks */
1343*4882a593Smuzhiyun priv->target = vga_default_device(); /* Maybe this is still null! */
1344*4882a593Smuzhiyun priv->cards[0].pdev = priv->target;
1345*4882a593Smuzhiyun priv->cards[0].io_cnt = 0;
1346*4882a593Smuzhiyun priv->cards[0].mem_cnt = 0;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun return 0;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
vga_arb_release(struct inode * inode,struct file * file)1352*4882a593Smuzhiyun static int vga_arb_release(struct inode *inode, struct file *file)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun struct vga_arb_private *priv = file->private_data;
1355*4882a593Smuzhiyun struct vga_arb_user_card *uc;
1356*4882a593Smuzhiyun unsigned long flags;
1357*4882a593Smuzhiyun int i;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun pr_debug("%s\n", __func__);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun spin_lock_irqsave(&vga_user_lock, flags);
1362*4882a593Smuzhiyun list_del(&priv->list);
1363*4882a593Smuzhiyun for (i = 0; i < MAX_USER_CARDS; i++) {
1364*4882a593Smuzhiyun uc = &priv->cards[i];
1365*4882a593Smuzhiyun if (uc->pdev == NULL)
1366*4882a593Smuzhiyun continue;
1367*4882a593Smuzhiyun vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n",
1368*4882a593Smuzhiyun uc->io_cnt, uc->mem_cnt);
1369*4882a593Smuzhiyun while (uc->io_cnt--)
1370*4882a593Smuzhiyun vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
1371*4882a593Smuzhiyun while (uc->mem_cnt--)
1372*4882a593Smuzhiyun vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM);
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_user_lock, flags);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun kfree(priv);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun return 0;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
vga_arb_device_card_gone(struct pci_dev * pdev)1381*4882a593Smuzhiyun static void vga_arb_device_card_gone(struct pci_dev *pdev)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /*
1386*4882a593Smuzhiyun * callback any registered clients to let them know we have a
1387*4882a593Smuzhiyun * change in VGA cards
1388*4882a593Smuzhiyun */
vga_arbiter_notify_clients(void)1389*4882a593Smuzhiyun static void vga_arbiter_notify_clients(void)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct vga_device *vgadev;
1392*4882a593Smuzhiyun unsigned long flags;
1393*4882a593Smuzhiyun uint32_t new_decodes;
1394*4882a593Smuzhiyun bool new_state;
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun if (!vga_arbiter_used)
1397*4882a593Smuzhiyun return;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun spin_lock_irqsave(&vga_lock, flags);
1400*4882a593Smuzhiyun list_for_each_entry(vgadev, &vga_list, list) {
1401*4882a593Smuzhiyun if (vga_count > 1)
1402*4882a593Smuzhiyun new_state = false;
1403*4882a593Smuzhiyun else
1404*4882a593Smuzhiyun new_state = true;
1405*4882a593Smuzhiyun if (vgadev->set_vga_decode) {
1406*4882a593Smuzhiyun new_decodes = vgadev->set_vga_decode(vgadev->cookie,
1407*4882a593Smuzhiyun new_state);
1408*4882a593Smuzhiyun vga_update_device_decodes(vgadev, new_decodes);
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun spin_unlock_irqrestore(&vga_lock, flags);
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
pci_notify(struct notifier_block * nb,unsigned long action,void * data)1414*4882a593Smuzhiyun static int pci_notify(struct notifier_block *nb, unsigned long action,
1415*4882a593Smuzhiyun void *data)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun struct device *dev = data;
1418*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(dev);
1419*4882a593Smuzhiyun bool notify = false;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun vgaarb_dbg(dev, "%s\n", __func__);
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun /* For now we're only intereted in devices added and removed. I didn't
1424*4882a593Smuzhiyun * test this thing here, so someone needs to double check for the
1425*4882a593Smuzhiyun * cases of hotplugable vga cards. */
1426*4882a593Smuzhiyun if (action == BUS_NOTIFY_ADD_DEVICE)
1427*4882a593Smuzhiyun notify = vga_arbiter_add_pci_device(pdev);
1428*4882a593Smuzhiyun else if (action == BUS_NOTIFY_DEL_DEVICE)
1429*4882a593Smuzhiyun notify = vga_arbiter_del_pci_device(pdev);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun if (notify)
1432*4882a593Smuzhiyun vga_arbiter_notify_clients();
1433*4882a593Smuzhiyun return 0;
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun static struct notifier_block pci_notifier = {
1437*4882a593Smuzhiyun .notifier_call = pci_notify,
1438*4882a593Smuzhiyun };
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun static const struct file_operations vga_arb_device_fops = {
1441*4882a593Smuzhiyun .read = vga_arb_read,
1442*4882a593Smuzhiyun .write = vga_arb_write,
1443*4882a593Smuzhiyun .poll = vga_arb_fpoll,
1444*4882a593Smuzhiyun .open = vga_arb_open,
1445*4882a593Smuzhiyun .release = vga_arb_release,
1446*4882a593Smuzhiyun .llseek = noop_llseek,
1447*4882a593Smuzhiyun };
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun static struct miscdevice vga_arb_device = {
1450*4882a593Smuzhiyun MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
1451*4882a593Smuzhiyun };
1452*4882a593Smuzhiyun
vga_arb_select_default_device(void)1453*4882a593Smuzhiyun static void __init vga_arb_select_default_device(void)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun struct pci_dev *pdev;
1456*4882a593Smuzhiyun struct vga_device *vgadev;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun #if defined(CONFIG_X86) || defined(CONFIG_IA64)
1459*4882a593Smuzhiyun u64 base = screen_info.lfb_base;
1460*4882a593Smuzhiyun u64 size = screen_info.lfb_size;
1461*4882a593Smuzhiyun u64 limit;
1462*4882a593Smuzhiyun resource_size_t start, end;
1463*4882a593Smuzhiyun unsigned long flags;
1464*4882a593Smuzhiyun int i;
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
1467*4882a593Smuzhiyun base |= (u64)screen_info.ext_lfb_base << 32;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun limit = base + size;
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun list_for_each_entry(vgadev, &vga_list, list) {
1472*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
1473*4882a593Smuzhiyun /*
1474*4882a593Smuzhiyun * Override vga_arbiter_add_pci_device()'s I/O based detection
1475*4882a593Smuzhiyun * as it may take the wrong device (e.g. on Apple system under
1476*4882a593Smuzhiyun * EFI).
1477*4882a593Smuzhiyun *
1478*4882a593Smuzhiyun * Select the device owning the boot framebuffer if there is
1479*4882a593Smuzhiyun * one.
1480*4882a593Smuzhiyun */
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /* Does firmware framebuffer belong to us? */
1483*4882a593Smuzhiyun for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1484*4882a593Smuzhiyun flags = pci_resource_flags(vgadev->pdev, i);
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun if ((flags & IORESOURCE_MEM) == 0)
1487*4882a593Smuzhiyun continue;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun start = pci_resource_start(vgadev->pdev, i);
1490*4882a593Smuzhiyun end = pci_resource_end(vgadev->pdev, i);
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun if (!start || !end)
1493*4882a593Smuzhiyun continue;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun if (base < start || limit >= end)
1496*4882a593Smuzhiyun continue;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun if (!vga_default_device())
1499*4882a593Smuzhiyun vgaarb_info(dev, "setting as boot device\n");
1500*4882a593Smuzhiyun else if (vgadev->pdev != vga_default_device())
1501*4882a593Smuzhiyun vgaarb_info(dev, "overriding boot device\n");
1502*4882a593Smuzhiyun vga_set_default_device(vgadev->pdev);
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun #endif
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun if (!vga_default_device()) {
1508*4882a593Smuzhiyun list_for_each_entry(vgadev, &vga_list, list) {
1509*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
1510*4882a593Smuzhiyun u16 cmd;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun pdev = vgadev->pdev;
1513*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1514*4882a593Smuzhiyun if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1515*4882a593Smuzhiyun vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
1516*4882a593Smuzhiyun vga_set_default_device(pdev);
1517*4882a593Smuzhiyun break;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun if (!vga_default_device()) {
1523*4882a593Smuzhiyun vgadev = list_first_entry_or_null(&vga_list,
1524*4882a593Smuzhiyun struct vga_device, list);
1525*4882a593Smuzhiyun if (vgadev) {
1526*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
1527*4882a593Smuzhiyun vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
1528*4882a593Smuzhiyun vga_set_default_device(vgadev->pdev);
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun
vga_arb_device_init(void)1533*4882a593Smuzhiyun static int __init vga_arb_device_init(void)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun int rc;
1536*4882a593Smuzhiyun struct pci_dev *pdev;
1537*4882a593Smuzhiyun struct vga_device *vgadev;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun rc = misc_register(&vga_arb_device);
1540*4882a593Smuzhiyun if (rc < 0)
1541*4882a593Smuzhiyun pr_err("error %d registering device\n", rc);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun bus_register_notifier(&pci_bus_type, &pci_notifier);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /* We add all PCI devices satisfying VGA class in the arbiter by
1546*4882a593Smuzhiyun * default */
1547*4882a593Smuzhiyun pdev = NULL;
1548*4882a593Smuzhiyun while ((pdev =
1549*4882a593Smuzhiyun pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1550*4882a593Smuzhiyun PCI_ANY_ID, pdev)) != NULL)
1551*4882a593Smuzhiyun vga_arbiter_add_pci_device(pdev);
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun list_for_each_entry(vgadev, &vga_list, list) {
1554*4882a593Smuzhiyun struct device *dev = &vgadev->pdev->dev;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun if (vgadev->bridge_has_one_vga)
1557*4882a593Smuzhiyun vgaarb_info(dev, "bridge control possible\n");
1558*4882a593Smuzhiyun else
1559*4882a593Smuzhiyun vgaarb_info(dev, "no bridge control possible\n");
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun vga_arb_select_default_device();
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun pr_info("loaded\n");
1565*4882a593Smuzhiyun return rc;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun subsys_initcall(vga_arb_device_init);
1568