1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) Intel Corp. 2007.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7*4882a593Smuzhiyun * develop this driver.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is part of the Vermilion Range fb driver.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Authors:
12*4882a593Smuzhiyun * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
13*4882a593Smuzhiyun * Michel Dänzer <michel-at-tungstengraphics-dot-com>
14*4882a593Smuzhiyun * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/errno.h>
20*4882a593Smuzhiyun #include <linux/string.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/mm.h>
24*4882a593Smuzhiyun #include <linux/fb.h>
25*4882a593Smuzhiyun #include <linux/pci.h>
26*4882a593Smuzhiyun #include <asm/set_memory.h>
27*4882a593Smuzhiyun #include <asm/tlbflush.h>
28*4882a593Smuzhiyun #include <linux/mmzone.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* #define VERMILION_DEBUG */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "vermilion.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define MODULE_NAME "vmlfb"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static struct mutex vml_mutex;
39*4882a593Smuzhiyun static struct list_head global_no_mode;
40*4882a593Smuzhiyun static struct list_head global_has_mode;
41*4882a593Smuzhiyun static struct fb_ops vmlfb_ops;
42*4882a593Smuzhiyun static struct vml_sys *subsys = NULL;
43*4882a593Smuzhiyun static char *vml_default_mode = "1024x768@60";
44*4882a593Smuzhiyun static const struct fb_videomode defaultmode = {
45*4882a593Smuzhiyun NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6,
46*4882a593Smuzhiyun 0, FB_VMODE_NONINTERLACED
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static u32 vml_mem_requested = (10 * 1024 * 1024);
50*4882a593Smuzhiyun static u32 vml_mem_contig = (4 * 1024 * 1024);
51*4882a593Smuzhiyun static u32 vml_mem_min = (4 * 1024 * 1024);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static u32 vml_clocks[] = {
54*4882a593Smuzhiyun 6750,
55*4882a593Smuzhiyun 13500,
56*4882a593Smuzhiyun 27000,
57*4882a593Smuzhiyun 29700,
58*4882a593Smuzhiyun 37125,
59*4882a593Smuzhiyun 54000,
60*4882a593Smuzhiyun 59400,
61*4882a593Smuzhiyun 74250,
62*4882a593Smuzhiyun 120000,
63*4882a593Smuzhiyun 148500
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Allocate a contiguous vram area and make its linear kernel map
70*4882a593Smuzhiyun * uncached.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun
vmlfb_alloc_vram_area(struct vram_area * va,unsigned max_order,unsigned min_order)73*4882a593Smuzhiyun static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
74*4882a593Smuzhiyun unsigned min_order)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun gfp_t flags;
77*4882a593Smuzhiyun unsigned long i;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun max_order++;
80*4882a593Smuzhiyun do {
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Really try hard to get the needed memory.
83*4882a593Smuzhiyun * We need memory below the first 32MB, so we
84*4882a593Smuzhiyun * add the __GFP_DMA flag that guarantees that we are
85*4882a593Smuzhiyun * below the first 16MB.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun flags = __GFP_DMA | __GFP_HIGH | __GFP_KSWAPD_RECLAIM;
89*4882a593Smuzhiyun va->logical =
90*4882a593Smuzhiyun __get_free_pages(flags, --max_order);
91*4882a593Smuzhiyun } while (va->logical == 0 && max_order > min_order);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (!va->logical)
94*4882a593Smuzhiyun return -ENOMEM;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun va->phys = virt_to_phys((void *)va->logical);
97*4882a593Smuzhiyun va->size = PAGE_SIZE << max_order;
98*4882a593Smuzhiyun va->order = max_order;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * It seems like __get_free_pages only ups the usage count
102*4882a593Smuzhiyun * of the first page. This doesn't work with fault mapping, so
103*4882a593Smuzhiyun * up the usage count once more (XXX: should use split_page or
104*4882a593Smuzhiyun * compound page).
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun memset((void *)va->logical, 0x00, va->size);
108*4882a593Smuzhiyun for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) {
109*4882a593Smuzhiyun get_page(virt_to_page(i));
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Change caching policy of the linear kernel map to avoid
114*4882a593Smuzhiyun * mapping type conflicts with user-space mappings.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME
119*4882a593Smuzhiyun ": Allocated %ld bytes vram area at 0x%08lx\n",
120*4882a593Smuzhiyun va->size, va->phys);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Free a contiguous vram area and reset its linear kernel map
127*4882a593Smuzhiyun * mapping type.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun
vmlfb_free_vram_area(struct vram_area * va)130*4882a593Smuzhiyun static void vmlfb_free_vram_area(struct vram_area *va)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun unsigned long j;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (va->logical) {
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * Reset the linear kernel map caching policy.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun set_pages_wb(virt_to_page(va->logical),
141*4882a593Smuzhiyun va->size >> PAGE_SHIFT);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Decrease the usage count on the pages we've used
145*4882a593Smuzhiyun * to compensate for upping when allocating.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun for (j = va->logical; j < va->logical + va->size;
149*4882a593Smuzhiyun j += PAGE_SIZE) {
150*4882a593Smuzhiyun (void)put_page_testzero(virt_to_page(j));
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME
154*4882a593Smuzhiyun ": Freeing %ld bytes vram area at 0x%08lx\n",
155*4882a593Smuzhiyun va->size, va->phys);
156*4882a593Smuzhiyun free_pages(va->logical, va->order);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun va->logical = 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * Free allocated vram.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun
vmlfb_free_vram(struct vml_info * vinfo)166*4882a593Smuzhiyun static void vmlfb_free_vram(struct vml_info *vinfo)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun int i;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (i = 0; i < vinfo->num_areas; ++i) {
171*4882a593Smuzhiyun vmlfb_free_vram_area(&vinfo->vram[i]);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun vinfo->num_areas = 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * Allocate vram. Currently we try to allocate contiguous areas from the
178*4882a593Smuzhiyun * __GFP_DMA zone and puzzle them together. A better approach would be to
179*4882a593Smuzhiyun * allocate one contiguous area for scanout and use one-page allocations for
180*4882a593Smuzhiyun * offscreen areas. This requires user-space and GPU virtual mappings.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun
vmlfb_alloc_vram(struct vml_info * vinfo,size_t requested,size_t min_total,size_t min_contig)183*4882a593Smuzhiyun static int vmlfb_alloc_vram(struct vml_info *vinfo,
184*4882a593Smuzhiyun size_t requested,
185*4882a593Smuzhiyun size_t min_total, size_t min_contig)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int i, j;
188*4882a593Smuzhiyun int order;
189*4882a593Smuzhiyun int contiguous;
190*4882a593Smuzhiyun int err;
191*4882a593Smuzhiyun struct vram_area *va;
192*4882a593Smuzhiyun struct vram_area *va2;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun vinfo->num_areas = 0;
195*4882a593Smuzhiyun for (i = 0; i < VML_VRAM_AREAS; ++i) {
196*4882a593Smuzhiyun va = &vinfo->vram[i];
197*4882a593Smuzhiyun order = 0;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun while (requested > (PAGE_SIZE << order) && order < MAX_ORDER)
200*4882a593Smuzhiyun order++;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun err = vmlfb_alloc_vram_area(va, order, 0);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (err)
205*4882a593Smuzhiyun break;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (i == 0) {
208*4882a593Smuzhiyun vinfo->vram_start = va->phys;
209*4882a593Smuzhiyun vinfo->vram_logical = (void __iomem *) va->logical;
210*4882a593Smuzhiyun vinfo->vram_contig_size = va->size;
211*4882a593Smuzhiyun vinfo->num_areas = 1;
212*4882a593Smuzhiyun } else {
213*4882a593Smuzhiyun contiguous = 0;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun for (j = 0; j < i; ++j) {
216*4882a593Smuzhiyun va2 = &vinfo->vram[j];
217*4882a593Smuzhiyun if (va->phys + va->size == va2->phys ||
218*4882a593Smuzhiyun va2->phys + va2->size == va->phys) {
219*4882a593Smuzhiyun contiguous = 1;
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (contiguous) {
225*4882a593Smuzhiyun vinfo->num_areas++;
226*4882a593Smuzhiyun if (va->phys < vinfo->vram_start) {
227*4882a593Smuzhiyun vinfo->vram_start = va->phys;
228*4882a593Smuzhiyun vinfo->vram_logical =
229*4882a593Smuzhiyun (void __iomem *)va->logical;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun vinfo->vram_contig_size += va->size;
232*4882a593Smuzhiyun } else {
233*4882a593Smuzhiyun vmlfb_free_vram_area(va);
234*4882a593Smuzhiyun break;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (requested < va->size)
239*4882a593Smuzhiyun break;
240*4882a593Smuzhiyun else
241*4882a593Smuzhiyun requested -= va->size;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (vinfo->vram_contig_size > min_total &&
245*4882a593Smuzhiyun vinfo->vram_contig_size > min_contig) {
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME
248*4882a593Smuzhiyun ": Contiguous vram: %ld bytes at physical 0x%08lx.\n",
249*4882a593Smuzhiyun (unsigned long)vinfo->vram_contig_size,
250*4882a593Smuzhiyun (unsigned long)vinfo->vram_start);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME
256*4882a593Smuzhiyun ": Could not allocate requested minimal amount of vram.\n");
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun vmlfb_free_vram(vinfo);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return -ENOMEM;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun * Find the GPU to use with our display controller.
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun
vmlfb_get_gpu(struct vml_par * par)267*4882a593Smuzhiyun static int vmlfb_get_gpu(struct vml_par *par)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun mutex_lock(&vml_mutex);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun par->gpu = pci_get_device(PCI_VENDOR_ID_INTEL, VML_DEVICE_GPU, NULL);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (!par->gpu) {
274*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
275*4882a593Smuzhiyun return -ENODEV;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (pci_enable_device(par->gpu) < 0)
281*4882a593Smuzhiyun return -ENODEV;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return 0;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * Find a contiguous vram area that contains a given offset from vram start.
288*4882a593Smuzhiyun */
vmlfb_vram_offset(struct vml_info * vinfo,unsigned long offset)289*4882a593Smuzhiyun static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun unsigned long aoffset;
292*4882a593Smuzhiyun unsigned i;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun for (i = 0; i < vinfo->num_areas; ++i) {
295*4882a593Smuzhiyun aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (aoffset < vinfo->vram[i].size) {
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return -EINVAL;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * Remap the MMIO register spaces of the VDC and the GPU.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun
vmlfb_enable_mmio(struct vml_par * par)309*4882a593Smuzhiyun static int vmlfb_enable_mmio(struct vml_par *par)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun int err;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun par->vdc_mem_base = pci_resource_start(par->vdc, 0);
314*4882a593Smuzhiyun par->vdc_mem_size = pci_resource_len(par->vdc, 0);
315*4882a593Smuzhiyun if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) {
316*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME
317*4882a593Smuzhiyun ": Could not claim display controller MMIO.\n");
318*4882a593Smuzhiyun return -EBUSY;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
321*4882a593Smuzhiyun if (par->vdc_mem == NULL) {
322*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME
323*4882a593Smuzhiyun ": Could not map display controller MMIO.\n");
324*4882a593Smuzhiyun err = -ENOMEM;
325*4882a593Smuzhiyun goto out_err_0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun par->gpu_mem_base = pci_resource_start(par->gpu, 0);
329*4882a593Smuzhiyun par->gpu_mem_size = pci_resource_len(par->gpu, 0);
330*4882a593Smuzhiyun if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) {
331*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n");
332*4882a593Smuzhiyun err = -EBUSY;
333*4882a593Smuzhiyun goto out_err_1;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
336*4882a593Smuzhiyun if (par->gpu_mem == NULL) {
337*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
338*4882a593Smuzhiyun err = -ENOMEM;
339*4882a593Smuzhiyun goto out_err_2;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return 0;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun out_err_2:
345*4882a593Smuzhiyun release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
346*4882a593Smuzhiyun out_err_1:
347*4882a593Smuzhiyun iounmap(par->vdc_mem);
348*4882a593Smuzhiyun out_err_0:
349*4882a593Smuzhiyun release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
350*4882a593Smuzhiyun return err;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * Unmap the VDC and GPU register spaces.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun
vmlfb_disable_mmio(struct vml_par * par)357*4882a593Smuzhiyun static void vmlfb_disable_mmio(struct vml_par *par)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun iounmap(par->gpu_mem);
360*4882a593Smuzhiyun release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
361*4882a593Smuzhiyun iounmap(par->vdc_mem);
362*4882a593Smuzhiyun release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Release and uninit the VDC and GPU.
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun
vmlfb_release_devices(struct vml_par * par)369*4882a593Smuzhiyun static void vmlfb_release_devices(struct vml_par *par)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun if (atomic_dec_and_test(&par->refcount)) {
372*4882a593Smuzhiyun pci_disable_device(par->gpu);
373*4882a593Smuzhiyun pci_disable_device(par->vdc);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /*
378*4882a593Smuzhiyun * Free up allocated resources for a device.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun
vml_pci_remove(struct pci_dev * dev)381*4882a593Smuzhiyun static void vml_pci_remove(struct pci_dev *dev)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct fb_info *info;
384*4882a593Smuzhiyun struct vml_info *vinfo;
385*4882a593Smuzhiyun struct vml_par *par;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun info = pci_get_drvdata(dev);
388*4882a593Smuzhiyun if (info) {
389*4882a593Smuzhiyun vinfo = container_of(info, struct vml_info, info);
390*4882a593Smuzhiyun par = vinfo->par;
391*4882a593Smuzhiyun mutex_lock(&vml_mutex);
392*4882a593Smuzhiyun unregister_framebuffer(info);
393*4882a593Smuzhiyun fb_dealloc_cmap(&info->cmap);
394*4882a593Smuzhiyun vmlfb_free_vram(vinfo);
395*4882a593Smuzhiyun vmlfb_disable_mmio(par);
396*4882a593Smuzhiyun vmlfb_release_devices(par);
397*4882a593Smuzhiyun kfree(vinfo);
398*4882a593Smuzhiyun kfree(par);
399*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
vmlfb_set_pref_pixel_format(struct fb_var_screeninfo * var)403*4882a593Smuzhiyun static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun switch (var->bits_per_pixel) {
406*4882a593Smuzhiyun case 16:
407*4882a593Smuzhiyun var->blue.offset = 0;
408*4882a593Smuzhiyun var->blue.length = 5;
409*4882a593Smuzhiyun var->green.offset = 5;
410*4882a593Smuzhiyun var->green.length = 5;
411*4882a593Smuzhiyun var->red.offset = 10;
412*4882a593Smuzhiyun var->red.length = 5;
413*4882a593Smuzhiyun var->transp.offset = 15;
414*4882a593Smuzhiyun var->transp.length = 1;
415*4882a593Smuzhiyun break;
416*4882a593Smuzhiyun case 32:
417*4882a593Smuzhiyun var->blue.offset = 0;
418*4882a593Smuzhiyun var->blue.length = 8;
419*4882a593Smuzhiyun var->green.offset = 8;
420*4882a593Smuzhiyun var->green.length = 8;
421*4882a593Smuzhiyun var->red.offset = 16;
422*4882a593Smuzhiyun var->red.length = 8;
423*4882a593Smuzhiyun var->transp.offset = 24;
424*4882a593Smuzhiyun var->transp.length = 0;
425*4882a593Smuzhiyun break;
426*4882a593Smuzhiyun default:
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun var->blue.msb_right = var->green.msb_right =
431*4882a593Smuzhiyun var->red.msb_right = var->transp.msb_right = 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Device initialization.
436*4882a593Smuzhiyun * We initialize one vml_par struct per device and one vml_info
437*4882a593Smuzhiyun * struct per pipe. Currently we have only one pipe.
438*4882a593Smuzhiyun */
439*4882a593Smuzhiyun
vml_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)440*4882a593Smuzhiyun static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct vml_info *vinfo;
443*4882a593Smuzhiyun struct fb_info *info;
444*4882a593Smuzhiyun struct vml_par *par;
445*4882a593Smuzhiyun int err = 0;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun par = kzalloc(sizeof(*par), GFP_KERNEL);
448*4882a593Smuzhiyun if (par == NULL)
449*4882a593Smuzhiyun return -ENOMEM;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL);
452*4882a593Smuzhiyun if (vinfo == NULL) {
453*4882a593Smuzhiyun err = -ENOMEM;
454*4882a593Smuzhiyun goto out_err_0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun vinfo->par = par;
458*4882a593Smuzhiyun par->vdc = dev;
459*4882a593Smuzhiyun atomic_set(&par->refcount, 1);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun switch (id->device) {
462*4882a593Smuzhiyun case VML_DEVICE_VDC:
463*4882a593Smuzhiyun if ((err = vmlfb_get_gpu(par)))
464*4882a593Smuzhiyun goto out_err_1;
465*4882a593Smuzhiyun pci_set_drvdata(dev, &vinfo->info);
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun default:
468*4882a593Smuzhiyun err = -ENODEV;
469*4882a593Smuzhiyun goto out_err_1;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun info = &vinfo->info;
473*4882a593Smuzhiyun info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun err = vmlfb_enable_mmio(par);
476*4882a593Smuzhiyun if (err)
477*4882a593Smuzhiyun goto out_err_2;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun err = vmlfb_alloc_vram(vinfo, vml_mem_requested,
480*4882a593Smuzhiyun vml_mem_contig, vml_mem_min);
481*4882a593Smuzhiyun if (err)
482*4882a593Smuzhiyun goto out_err_3;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun strcpy(info->fix.id, "Vermilion Range");
485*4882a593Smuzhiyun info->fix.mmio_start = 0;
486*4882a593Smuzhiyun info->fix.mmio_len = 0;
487*4882a593Smuzhiyun info->fix.smem_start = vinfo->vram_start;
488*4882a593Smuzhiyun info->fix.smem_len = vinfo->vram_contig_size;
489*4882a593Smuzhiyun info->fix.type = FB_TYPE_PACKED_PIXELS;
490*4882a593Smuzhiyun info->fix.visual = FB_VISUAL_TRUECOLOR;
491*4882a593Smuzhiyun info->fix.ypanstep = 1;
492*4882a593Smuzhiyun info->fix.xpanstep = 1;
493*4882a593Smuzhiyun info->fix.ywrapstep = 0;
494*4882a593Smuzhiyun info->fix.accel = FB_ACCEL_NONE;
495*4882a593Smuzhiyun info->screen_base = vinfo->vram_logical;
496*4882a593Smuzhiyun info->pseudo_palette = vinfo->pseudo_palette;
497*4882a593Smuzhiyun info->par = par;
498*4882a593Smuzhiyun info->fbops = &vmlfb_ops;
499*4882a593Smuzhiyun info->device = &dev->dev;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun INIT_LIST_HEAD(&vinfo->head);
502*4882a593Smuzhiyun vinfo->pipe_disabled = 1;
503*4882a593Smuzhiyun vinfo->cur_blank_mode = FB_BLANK_UNBLANK;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun info->var.grayscale = 0;
506*4882a593Smuzhiyun info->var.bits_per_pixel = 16;
507*4882a593Smuzhiyun vmlfb_set_pref_pixel_format(&info->var);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (!fb_find_mode
510*4882a593Smuzhiyun (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) {
511*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n");
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
515*4882a593Smuzhiyun err = -ENOMEM;
516*4882a593Smuzhiyun goto out_err_4;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun err = register_framebuffer(info);
520*4882a593Smuzhiyun if (err) {
521*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n");
522*4882a593Smuzhiyun goto out_err_5;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun printk("Initialized vmlfb\n");
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun return 0;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun out_err_5:
530*4882a593Smuzhiyun fb_dealloc_cmap(&info->cmap);
531*4882a593Smuzhiyun out_err_4:
532*4882a593Smuzhiyun vmlfb_free_vram(vinfo);
533*4882a593Smuzhiyun out_err_3:
534*4882a593Smuzhiyun vmlfb_disable_mmio(par);
535*4882a593Smuzhiyun out_err_2:
536*4882a593Smuzhiyun vmlfb_release_devices(par);
537*4882a593Smuzhiyun out_err_1:
538*4882a593Smuzhiyun kfree(vinfo);
539*4882a593Smuzhiyun out_err_0:
540*4882a593Smuzhiyun kfree(par);
541*4882a593Smuzhiyun return err;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
vmlfb_open(struct fb_info * info,int user)544*4882a593Smuzhiyun static int vmlfb_open(struct fb_info *info, int user)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * Save registers here?
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun return 0;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
vmlfb_release(struct fb_info * info,int user)552*4882a593Smuzhiyun static int vmlfb_release(struct fb_info *info, int user)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun * Restore registers here.
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun return 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
vml_nearest_clock(int clock)561*4882a593Smuzhiyun static int vml_nearest_clock(int clock)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun int i;
565*4882a593Smuzhiyun int cur_index;
566*4882a593Smuzhiyun int cur_diff;
567*4882a593Smuzhiyun int diff;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun cur_index = 0;
570*4882a593Smuzhiyun cur_diff = clock - vml_clocks[0];
571*4882a593Smuzhiyun cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff;
572*4882a593Smuzhiyun for (i = 1; i < vml_num_clocks; ++i) {
573*4882a593Smuzhiyun diff = clock - vml_clocks[i];
574*4882a593Smuzhiyun diff = (diff < 0) ? -diff : diff;
575*4882a593Smuzhiyun if (diff < cur_diff) {
576*4882a593Smuzhiyun cur_index = i;
577*4882a593Smuzhiyun cur_diff = diff;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun return vml_clocks[cur_index];
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
vmlfb_check_var_locked(struct fb_var_screeninfo * var,struct vml_info * vinfo)583*4882a593Smuzhiyun static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
584*4882a593Smuzhiyun struct vml_info *vinfo)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun u32 pitch;
587*4882a593Smuzhiyun u64 mem;
588*4882a593Smuzhiyun int nearest_clock;
589*4882a593Smuzhiyun int clock;
590*4882a593Smuzhiyun int clock_diff;
591*4882a593Smuzhiyun struct fb_var_screeninfo v;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun v = *var;
594*4882a593Smuzhiyun clock = PICOS2KHZ(var->pixclock);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (subsys && subsys->nearest_clock) {
597*4882a593Smuzhiyun nearest_clock = subsys->nearest_clock(subsys, clock);
598*4882a593Smuzhiyun } else {
599*4882a593Smuzhiyun nearest_clock = vml_nearest_clock(clock);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * Accept a 20% diff.
604*4882a593Smuzhiyun */
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun clock_diff = nearest_clock - clock;
607*4882a593Smuzhiyun clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff;
608*4882a593Smuzhiyun if (clock_diff > clock / 5) {
609*4882a593Smuzhiyun #if 0
610*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": Diff failure. %d %d\n",clock_diff,clock);
611*4882a593Smuzhiyun #endif
612*4882a593Smuzhiyun return -EINVAL;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun v.pixclock = KHZ2PICOS(nearest_clock);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) {
618*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n");
619*4882a593Smuzhiyun return -EINVAL;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) {
622*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME
623*4882a593Smuzhiyun ": Virtual resolution failure.\n");
624*4882a593Smuzhiyun return -EINVAL;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun switch (v.bits_per_pixel) {
627*4882a593Smuzhiyun case 0 ... 16:
628*4882a593Smuzhiyun v.bits_per_pixel = 16;
629*4882a593Smuzhiyun break;
630*4882a593Smuzhiyun case 17 ... 32:
631*4882a593Smuzhiyun v.bits_per_pixel = 32;
632*4882a593Smuzhiyun break;
633*4882a593Smuzhiyun default:
634*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n",
635*4882a593Smuzhiyun var->bits_per_pixel);
636*4882a593Smuzhiyun return -EINVAL;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
640*4882a593Smuzhiyun mem = (u64)pitch * var->yres_virtual;
641*4882a593Smuzhiyun if (mem > vinfo->vram_contig_size) {
642*4882a593Smuzhiyun return -ENOMEM;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun switch (v.bits_per_pixel) {
646*4882a593Smuzhiyun case 16:
647*4882a593Smuzhiyun if (var->blue.offset != 0 ||
648*4882a593Smuzhiyun var->blue.length != 5 ||
649*4882a593Smuzhiyun var->green.offset != 5 ||
650*4882a593Smuzhiyun var->green.length != 5 ||
651*4882a593Smuzhiyun var->red.offset != 10 ||
652*4882a593Smuzhiyun var->red.length != 5 ||
653*4882a593Smuzhiyun var->transp.offset != 15 || var->transp.length != 1) {
654*4882a593Smuzhiyun vmlfb_set_pref_pixel_format(&v);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun break;
657*4882a593Smuzhiyun case 32:
658*4882a593Smuzhiyun if (var->blue.offset != 0 ||
659*4882a593Smuzhiyun var->blue.length != 8 ||
660*4882a593Smuzhiyun var->green.offset != 8 ||
661*4882a593Smuzhiyun var->green.length != 8 ||
662*4882a593Smuzhiyun var->red.offset != 16 ||
663*4882a593Smuzhiyun var->red.length != 8 ||
664*4882a593Smuzhiyun (var->transp.length != 0 && var->transp.length != 8) ||
665*4882a593Smuzhiyun (var->transp.length == 8 && var->transp.offset != 24)) {
666*4882a593Smuzhiyun vmlfb_set_pref_pixel_format(&v);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun break;
669*4882a593Smuzhiyun default:
670*4882a593Smuzhiyun return -EINVAL;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun *var = v;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
vmlfb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)678*4882a593Smuzhiyun static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct vml_info *vinfo = container_of(info, struct vml_info, info);
681*4882a593Smuzhiyun int ret;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun mutex_lock(&vml_mutex);
684*4882a593Smuzhiyun ret = vmlfb_check_var_locked(var, vinfo);
685*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun return ret;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
vml_wait_vblank(struct vml_info * vinfo)690*4882a593Smuzhiyun static void vml_wait_vblank(struct vml_info *vinfo)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
693*4882a593Smuzhiyun mdelay(20);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
vmlfb_disable_pipe(struct vml_info * vinfo)696*4882a593Smuzhiyun static void vmlfb_disable_pipe(struct vml_info *vinfo)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun struct vml_par *par = vinfo->par;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* Disable the MDVO pad */
701*4882a593Smuzhiyun VML_WRITE32(par, VML_RCOMPSTAT, 0);
702*4882a593Smuzhiyun while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* Disable display planes */
705*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCCNTR,
706*4882a593Smuzhiyun VML_READ32(par, VML_DSPCCNTR) & ~VML_GFX_ENABLE);
707*4882a593Smuzhiyun (void)VML_READ32(par, VML_DSPCCNTR);
708*4882a593Smuzhiyun /* Wait for vblank for the disable to take effect */
709*4882a593Smuzhiyun vml_wait_vblank(vinfo);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* Next, disable display pipes */
712*4882a593Smuzhiyun VML_WRITE32(par, VML_PIPEACONF, 0);
713*4882a593Smuzhiyun (void)VML_READ32(par, VML_PIPEACONF);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun vinfo->pipe_disabled = 1;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun #ifdef VERMILION_DEBUG
vml_dump_regs(struct vml_info * vinfo)719*4882a593Smuzhiyun static void vml_dump_regs(struct vml_info *vinfo)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun struct vml_par *par = vinfo->par;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n");
724*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A : 0x%08x\n",
725*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_HTOTAL_A));
726*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A : 0x%08x\n",
727*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_HBLANK_A));
728*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A : 0x%08x\n",
729*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_HSYNC_A));
730*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A : 0x%08x\n",
731*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_VTOTAL_A));
732*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A : 0x%08x\n",
733*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_VBLANK_A));
734*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A : 0x%08x\n",
735*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_VSYNC_A));
736*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE : 0x%08x\n",
737*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_DSPCSTRIDE));
738*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE : 0x%08x\n",
739*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_DSPCSIZE));
740*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS : 0x%08x\n",
741*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_DSPCPOS));
742*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tDSPARB : 0x%08x\n",
743*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_DSPARB));
744*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR : 0x%08x\n",
745*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_DSPCADDR));
746*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A : 0x%08x\n",
747*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_BCLRPAT_A));
748*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A : 0x%08x\n",
749*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_CANVSCLR_A));
750*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC : 0x%08x\n",
751*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_PIPEASRC));
752*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF : 0x%08x\n",
753*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_PIPEACONF));
754*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR : 0x%08x\n",
755*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_DSPCCNTR));
756*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT : 0x%08x\n",
757*4882a593Smuzhiyun (unsigned)VML_READ32(par, VML_RCOMPSTAT));
758*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n");
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun #endif
761*4882a593Smuzhiyun
vmlfb_set_par_locked(struct vml_info * vinfo)762*4882a593Smuzhiyun static int vmlfb_set_par_locked(struct vml_info *vinfo)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun struct vml_par *par = vinfo->par;
765*4882a593Smuzhiyun struct fb_info *info = &vinfo->info;
766*4882a593Smuzhiyun struct fb_var_screeninfo *var = &info->var;
767*4882a593Smuzhiyun u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end;
768*4882a593Smuzhiyun u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end;
769*4882a593Smuzhiyun u32 dspcntr;
770*4882a593Smuzhiyun int clock;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
773*4882a593Smuzhiyun vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
774*4882a593Smuzhiyun info->fix.line_length = vinfo->stride;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (!subsys)
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun htotal =
780*4882a593Smuzhiyun var->xres + var->right_margin + var->hsync_len + var->left_margin;
781*4882a593Smuzhiyun hactive = var->xres;
782*4882a593Smuzhiyun hblank_start = var->xres;
783*4882a593Smuzhiyun hblank_end = htotal;
784*4882a593Smuzhiyun hsync_start = hactive + var->right_margin;
785*4882a593Smuzhiyun hsync_end = hsync_start + var->hsync_len;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun vtotal =
788*4882a593Smuzhiyun var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
789*4882a593Smuzhiyun vactive = var->yres;
790*4882a593Smuzhiyun vblank_start = var->yres;
791*4882a593Smuzhiyun vblank_end = vtotal;
792*4882a593Smuzhiyun vsync_start = vactive + var->lower_margin;
793*4882a593Smuzhiyun vsync_end = vsync_start + var->vsync_len;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun dspcntr = VML_GFX_ENABLE | VML_GFX_GAMMABYPASS;
796*4882a593Smuzhiyun clock = PICOS2KHZ(var->pixclock);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (subsys->nearest_clock) {
799*4882a593Smuzhiyun clock = subsys->nearest_clock(subsys, clock);
800*4882a593Smuzhiyun } else {
801*4882a593Smuzhiyun clock = vml_nearest_clock(clock);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME
804*4882a593Smuzhiyun ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal,
805*4882a593Smuzhiyun ((clock / htotal) * 1000) / vtotal);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun switch (var->bits_per_pixel) {
808*4882a593Smuzhiyun case 16:
809*4882a593Smuzhiyun dspcntr |= VML_GFX_ARGB1555;
810*4882a593Smuzhiyun break;
811*4882a593Smuzhiyun case 32:
812*4882a593Smuzhiyun if (var->transp.length == 8)
813*4882a593Smuzhiyun dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT;
814*4882a593Smuzhiyun else
815*4882a593Smuzhiyun dspcntr |= VML_GFX_RGB0888;
816*4882a593Smuzhiyun break;
817*4882a593Smuzhiyun default:
818*4882a593Smuzhiyun return -EINVAL;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun vmlfb_disable_pipe(vinfo);
822*4882a593Smuzhiyun mb();
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if (subsys->set_clock)
825*4882a593Smuzhiyun subsys->set_clock(subsys, clock);
826*4882a593Smuzhiyun else
827*4882a593Smuzhiyun return -EINVAL;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1));
830*4882a593Smuzhiyun VML_WRITE32(par, VML_HBLANK_A,
831*4882a593Smuzhiyun ((hblank_end - 1) << 16) | (hblank_start - 1));
832*4882a593Smuzhiyun VML_WRITE32(par, VML_HSYNC_A,
833*4882a593Smuzhiyun ((hsync_end - 1) << 16) | (hsync_start - 1));
834*4882a593Smuzhiyun VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1));
835*4882a593Smuzhiyun VML_WRITE32(par, VML_VBLANK_A,
836*4882a593Smuzhiyun ((vblank_end - 1) << 16) | (vblank_start - 1));
837*4882a593Smuzhiyun VML_WRITE32(par, VML_VSYNC_A,
838*4882a593Smuzhiyun ((vsync_end - 1) << 16) | (vsync_start - 1));
839*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride);
840*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCSIZE,
841*4882a593Smuzhiyun ((var->yres - 1) << 16) | (var->xres - 1));
842*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCPOS, 0x00000000);
843*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPARB, VML_FIFO_DEFAULT);
844*4882a593Smuzhiyun VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000);
845*4882a593Smuzhiyun VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000);
846*4882a593Smuzhiyun VML_WRITE32(par, VML_PIPEASRC,
847*4882a593Smuzhiyun ((var->xres - 1) << 16) | (var->yres - 1));
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun wmb();
850*4882a593Smuzhiyun VML_WRITE32(par, VML_PIPEACONF, VML_PIPE_ENABLE);
851*4882a593Smuzhiyun wmb();
852*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCCNTR, dspcntr);
853*4882a593Smuzhiyun wmb();
854*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
855*4882a593Smuzhiyun var->yoffset * vinfo->stride +
856*4882a593Smuzhiyun var->xoffset * vinfo->bytes_per_pixel);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun VML_WRITE32(par, VML_RCOMPSTAT, VML_MDVO_PAD_ENABLE);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun while (!(VML_READ32(par, VML_RCOMPSTAT) &
861*4882a593Smuzhiyun (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun vinfo->pipe_disabled = 0;
864*4882a593Smuzhiyun #ifdef VERMILION_DEBUG
865*4882a593Smuzhiyun vml_dump_regs(vinfo);
866*4882a593Smuzhiyun #endif
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return 0;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
vmlfb_set_par(struct fb_info * info)871*4882a593Smuzhiyun static int vmlfb_set_par(struct fb_info *info)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct vml_info *vinfo = container_of(info, struct vml_info, info);
874*4882a593Smuzhiyun int ret;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun mutex_lock(&vml_mutex);
877*4882a593Smuzhiyun list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
878*4882a593Smuzhiyun ret = vmlfb_set_par_locked(vinfo);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
881*4882a593Smuzhiyun return ret;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
vmlfb_blank_locked(struct vml_info * vinfo)884*4882a593Smuzhiyun static int vmlfb_blank_locked(struct vml_info *vinfo)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct vml_par *par = vinfo->par;
887*4882a593Smuzhiyun u32 cur = VML_READ32(par, VML_PIPEACONF);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun switch (vinfo->cur_blank_mode) {
890*4882a593Smuzhiyun case FB_BLANK_UNBLANK:
891*4882a593Smuzhiyun if (vinfo->pipe_disabled) {
892*4882a593Smuzhiyun vmlfb_set_par_locked(vinfo);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun VML_WRITE32(par, VML_PIPEACONF, cur & ~VML_PIPE_FORCE_BORDER);
895*4882a593Smuzhiyun (void)VML_READ32(par, VML_PIPEACONF);
896*4882a593Smuzhiyun break;
897*4882a593Smuzhiyun case FB_BLANK_NORMAL:
898*4882a593Smuzhiyun if (vinfo->pipe_disabled) {
899*4882a593Smuzhiyun vmlfb_set_par_locked(vinfo);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun VML_WRITE32(par, VML_PIPEACONF, cur | VML_PIPE_FORCE_BORDER);
902*4882a593Smuzhiyun (void)VML_READ32(par, VML_PIPEACONF);
903*4882a593Smuzhiyun break;
904*4882a593Smuzhiyun case FB_BLANK_VSYNC_SUSPEND:
905*4882a593Smuzhiyun case FB_BLANK_HSYNC_SUSPEND:
906*4882a593Smuzhiyun if (!vinfo->pipe_disabled) {
907*4882a593Smuzhiyun vmlfb_disable_pipe(vinfo);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun break;
910*4882a593Smuzhiyun case FB_BLANK_POWERDOWN:
911*4882a593Smuzhiyun if (!vinfo->pipe_disabled) {
912*4882a593Smuzhiyun vmlfb_disable_pipe(vinfo);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun break;
915*4882a593Smuzhiyun default:
916*4882a593Smuzhiyun return -EINVAL;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
vmlfb_blank(int blank_mode,struct fb_info * info)922*4882a593Smuzhiyun static int vmlfb_blank(int blank_mode, struct fb_info *info)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun struct vml_info *vinfo = container_of(info, struct vml_info, info);
925*4882a593Smuzhiyun int ret;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun mutex_lock(&vml_mutex);
928*4882a593Smuzhiyun vinfo->cur_blank_mode = blank_mode;
929*4882a593Smuzhiyun ret = vmlfb_blank_locked(vinfo);
930*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
931*4882a593Smuzhiyun return ret;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
vmlfb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)934*4882a593Smuzhiyun static int vmlfb_pan_display(struct fb_var_screeninfo *var,
935*4882a593Smuzhiyun struct fb_info *info)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct vml_info *vinfo = container_of(info, struct vml_info, info);
938*4882a593Smuzhiyun struct vml_par *par = vinfo->par;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun mutex_lock(&vml_mutex);
941*4882a593Smuzhiyun VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
942*4882a593Smuzhiyun var->yoffset * vinfo->stride +
943*4882a593Smuzhiyun var->xoffset * vinfo->bytes_per_pixel);
944*4882a593Smuzhiyun (void)VML_READ32(par, VML_DSPCADDR);
945*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun return 0;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
vmlfb_setcolreg(u_int regno,u_int red,u_int green,u_int blue,u_int transp,struct fb_info * info)950*4882a593Smuzhiyun static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
951*4882a593Smuzhiyun u_int transp, struct fb_info *info)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun u32 v;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun if (regno >= 16)
956*4882a593Smuzhiyun return -EINVAL;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if (info->var.grayscale) {
959*4882a593Smuzhiyun red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (info->fix.visual != FB_VISUAL_TRUECOLOR)
963*4882a593Smuzhiyun return -EINVAL;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun red = VML_TOHW(red, info->var.red.length);
966*4882a593Smuzhiyun blue = VML_TOHW(blue, info->var.blue.length);
967*4882a593Smuzhiyun green = VML_TOHW(green, info->var.green.length);
968*4882a593Smuzhiyun transp = VML_TOHW(transp, info->var.transp.length);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun v = (red << info->var.red.offset) |
971*4882a593Smuzhiyun (green << info->var.green.offset) |
972*4882a593Smuzhiyun (blue << info->var.blue.offset) |
973*4882a593Smuzhiyun (transp << info->var.transp.offset);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun switch (info->var.bits_per_pixel) {
976*4882a593Smuzhiyun case 16:
977*4882a593Smuzhiyun ((u32 *) info->pseudo_palette)[regno] = v;
978*4882a593Smuzhiyun break;
979*4882a593Smuzhiyun case 24:
980*4882a593Smuzhiyun case 32:
981*4882a593Smuzhiyun ((u32 *) info->pseudo_palette)[regno] = v;
982*4882a593Smuzhiyun break;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun return 0;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
vmlfb_mmap(struct fb_info * info,struct vm_area_struct * vma)987*4882a593Smuzhiyun static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun struct vml_info *vinfo = container_of(info, struct vml_info, info);
990*4882a593Smuzhiyun unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
991*4882a593Smuzhiyun int ret;
992*4882a593Smuzhiyun unsigned long prot;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun ret = vmlfb_vram_offset(vinfo, offset);
995*4882a593Smuzhiyun if (ret)
996*4882a593Smuzhiyun return -EINVAL;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
999*4882a593Smuzhiyun pgprot_val(vma->vm_page_prot) =
1000*4882a593Smuzhiyun prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun return vm_iomap_memory(vma, vinfo->vram_start,
1003*4882a593Smuzhiyun vinfo->vram_contig_size);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
vmlfb_sync(struct fb_info * info)1006*4882a593Smuzhiyun static int vmlfb_sync(struct fb_info *info)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun return 0;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
vmlfb_cursor(struct fb_info * info,struct fb_cursor * cursor)1011*4882a593Smuzhiyun static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun return -EINVAL; /* just to force soft_cursor() call */
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun static struct fb_ops vmlfb_ops = {
1017*4882a593Smuzhiyun .owner = THIS_MODULE,
1018*4882a593Smuzhiyun .fb_open = vmlfb_open,
1019*4882a593Smuzhiyun .fb_release = vmlfb_release,
1020*4882a593Smuzhiyun .fb_check_var = vmlfb_check_var,
1021*4882a593Smuzhiyun .fb_set_par = vmlfb_set_par,
1022*4882a593Smuzhiyun .fb_blank = vmlfb_blank,
1023*4882a593Smuzhiyun .fb_pan_display = vmlfb_pan_display,
1024*4882a593Smuzhiyun .fb_fillrect = cfb_fillrect,
1025*4882a593Smuzhiyun .fb_copyarea = cfb_copyarea,
1026*4882a593Smuzhiyun .fb_imageblit = cfb_imageblit,
1027*4882a593Smuzhiyun .fb_cursor = vmlfb_cursor,
1028*4882a593Smuzhiyun .fb_sync = vmlfb_sync,
1029*4882a593Smuzhiyun .fb_mmap = vmlfb_mmap,
1030*4882a593Smuzhiyun .fb_setcolreg = vmlfb_setcolreg
1031*4882a593Smuzhiyun };
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun static const struct pci_device_id vml_ids[] = {
1034*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)},
1035*4882a593Smuzhiyun {0}
1036*4882a593Smuzhiyun };
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun static struct pci_driver vmlfb_pci_driver = {
1039*4882a593Smuzhiyun .name = "vmlfb",
1040*4882a593Smuzhiyun .id_table = vml_ids,
1041*4882a593Smuzhiyun .probe = vml_pci_probe,
1042*4882a593Smuzhiyun .remove = vml_pci_remove,
1043*4882a593Smuzhiyun };
1044*4882a593Smuzhiyun
vmlfb_cleanup(void)1045*4882a593Smuzhiyun static void __exit vmlfb_cleanup(void)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun pci_unregister_driver(&vmlfb_pci_driver);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
vmlfb_init(void)1050*4882a593Smuzhiyun static int __init vmlfb_init(void)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun #ifndef MODULE
1054*4882a593Smuzhiyun char *option = NULL;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if (fb_get_options(MODULE_NAME, &option))
1057*4882a593Smuzhiyun return -ENODEV;
1058*4882a593Smuzhiyun #endif
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": initializing\n");
1061*4882a593Smuzhiyun mutex_init(&vml_mutex);
1062*4882a593Smuzhiyun INIT_LIST_HEAD(&global_no_mode);
1063*4882a593Smuzhiyun INIT_LIST_HEAD(&global_has_mode);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun return pci_register_driver(&vmlfb_pci_driver);
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
vmlfb_register_subsys(struct vml_sys * sys)1068*4882a593Smuzhiyun int vmlfb_register_subsys(struct vml_sys *sys)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun struct vml_info *entry;
1071*4882a593Smuzhiyun struct list_head *list;
1072*4882a593Smuzhiyun u32 save_activate;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun mutex_lock(&vml_mutex);
1075*4882a593Smuzhiyun if (subsys != NULL) {
1076*4882a593Smuzhiyun subsys->restore(subsys);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun subsys = sys;
1079*4882a593Smuzhiyun subsys->save(subsys);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /*
1082*4882a593Smuzhiyun * We need to restart list traversal for each item, since we
1083*4882a593Smuzhiyun * release the list mutex in the loop.
1084*4882a593Smuzhiyun */
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun list = global_no_mode.next;
1087*4882a593Smuzhiyun while (list != &global_no_mode) {
1088*4882a593Smuzhiyun list_del_init(list);
1089*4882a593Smuzhiyun entry = list_entry(list, struct vml_info, head);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /*
1092*4882a593Smuzhiyun * First, try the current mode which might not be
1093*4882a593Smuzhiyun * completely validated with respect to the pixel clock.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (!vmlfb_check_var_locked(&entry->info.var, entry)) {
1097*4882a593Smuzhiyun vmlfb_set_par_locked(entry);
1098*4882a593Smuzhiyun list_add_tail(list, &global_has_mode);
1099*4882a593Smuzhiyun } else {
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun /*
1102*4882a593Smuzhiyun * Didn't work. Try to find another mode,
1103*4882a593Smuzhiyun * that matches this subsys.
1104*4882a593Smuzhiyun */
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
1107*4882a593Smuzhiyun save_activate = entry->info.var.activate;
1108*4882a593Smuzhiyun entry->info.var.bits_per_pixel = 16;
1109*4882a593Smuzhiyun vmlfb_set_pref_pixel_format(&entry->info.var);
1110*4882a593Smuzhiyun if (fb_find_mode(&entry->info.var,
1111*4882a593Smuzhiyun &entry->info,
1112*4882a593Smuzhiyun vml_default_mode, NULL, 0, NULL, 16)) {
1113*4882a593Smuzhiyun entry->info.var.activate |=
1114*4882a593Smuzhiyun FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
1115*4882a593Smuzhiyun fb_set_var(&entry->info, &entry->info.var);
1116*4882a593Smuzhiyun } else {
1117*4882a593Smuzhiyun printk(KERN_ERR MODULE_NAME
1118*4882a593Smuzhiyun ": Sorry. no mode found for this subsys.\n");
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun entry->info.var.activate = save_activate;
1121*4882a593Smuzhiyun mutex_lock(&vml_mutex);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun vmlfb_blank_locked(entry);
1124*4882a593Smuzhiyun list = global_no_mode.next;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n",
1129*4882a593Smuzhiyun subsys->name ? subsys->name : "unknown");
1130*4882a593Smuzhiyun return 0;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmlfb_register_subsys);
1134*4882a593Smuzhiyun
vmlfb_unregister_subsys(struct vml_sys * sys)1135*4882a593Smuzhiyun void vmlfb_unregister_subsys(struct vml_sys *sys)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun struct vml_info *entry, *next;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun mutex_lock(&vml_mutex);
1140*4882a593Smuzhiyun if (subsys != sys) {
1141*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
1142*4882a593Smuzhiyun return;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun subsys->restore(subsys);
1145*4882a593Smuzhiyun subsys = NULL;
1146*4882a593Smuzhiyun list_for_each_entry_safe(entry, next, &global_has_mode, head) {
1147*4882a593Smuzhiyun printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n");
1148*4882a593Smuzhiyun vmlfb_disable_pipe(entry);
1149*4882a593Smuzhiyun list_move_tail(&entry->head, &global_no_mode);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun mutex_unlock(&vml_mutex);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun module_init(vmlfb_init);
1157*4882a593Smuzhiyun module_exit(vmlfb_cleanup);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun MODULE_AUTHOR("Tungsten Graphics");
1160*4882a593Smuzhiyun MODULE_DESCRIPTION("Initialization of the Vermilion display devices");
1161*4882a593Smuzhiyun MODULE_VERSION("1.0.0");
1162*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1163