xref: /OK3568_Linux_fs/kernel/drivers/char/agp/efficeon-agp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Transmeta's Efficeon AGPGART driver.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Based upon a diff by Linus around November '02.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
7*4882a593Smuzhiyun  * and H. Peter Anvin <hpa@transmeta.com>.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * NOTE-cpg-040217:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *   - when compiled as a module, after loading the module,
14*4882a593Smuzhiyun  *     it will refuse to unload, indicating it is in use,
15*4882a593Smuzhiyun  *     when it is not.
16*4882a593Smuzhiyun  *   - no s3 (suspend to ram) testing.
17*4882a593Smuzhiyun  *   - tested on the efficeon integrated nothbridge for tens
18*4882a593Smuzhiyun  *     of iterations of starting x and glxgears.
19*4882a593Smuzhiyun  *   - tested with radeon 9000 and radeon mobility m9 cards
20*4882a593Smuzhiyun  *   - tested with c3/c4 enabled (with the mobility m9 card)
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <linux/pci.h>
25*4882a593Smuzhiyun #include <linux/init.h>
26*4882a593Smuzhiyun #include <linux/agp_backend.h>
27*4882a593Smuzhiyun #include <linux/gfp.h>
28*4882a593Smuzhiyun #include <linux/page-flags.h>
29*4882a593Smuzhiyun #include <linux/mm.h>
30*4882a593Smuzhiyun #include "agp.h"
31*4882a593Smuzhiyun #include "intel-agp.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * The real differences to the generic AGP code is
35*4882a593Smuzhiyun  * in the GART mappings - a two-level setup with the
36*4882a593Smuzhiyun  * first level being an on-chip 64-entry table.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * The page array is filled through the ATTPAGE register
39*4882a593Smuzhiyun  * (Aperture Translation Table Page Register) at 0xB8. Bits:
40*4882a593Smuzhiyun  *  31:20: physical page address
41*4882a593Smuzhiyun  *   11:9: Page Attribute Table Index (PATI)
42*4882a593Smuzhiyun  *	   must match the PAT index for the
43*4882a593Smuzhiyun  *	   mapped pages (the 2nd level page table pages
44*4882a593Smuzhiyun  *	   themselves should be just regular WB-cacheable,
45*4882a593Smuzhiyun  *	   so this is normally zero.)
46*4882a593Smuzhiyun  *      8: Present
47*4882a593Smuzhiyun  *    7:6: reserved, write as zero
48*4882a593Smuzhiyun  *    5:0: GATT directory index: which 1st-level entry
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * The Efficeon AGP spec requires pages to be WB-cacheable
51*4882a593Smuzhiyun  * but to be explicitly CLFLUSH'd after any changes.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun #define EFFICEON_ATTPAGE	0xb8
54*4882a593Smuzhiyun #define EFFICEON_L1_SIZE	64	/* Number of PDE pages */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define EFFICEON_PATI		(0 << 9)
57*4882a593Smuzhiyun #define EFFICEON_PRESENT	(1 << 8)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static struct _efficeon_private {
60*4882a593Smuzhiyun 	unsigned long l1_table[EFFICEON_L1_SIZE];
61*4882a593Smuzhiyun } efficeon_private;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun static const struct gatt_mask efficeon_generic_masks[] =
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	{.mask = 0x00000001, .type = 0}
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* This function does the same thing as mask_memory() for this chipset... */
efficeon_mask_memory(struct page * page)69*4882a593Smuzhiyun static inline unsigned long efficeon_mask_memory(struct page *page)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	unsigned long addr = page_to_phys(page);
72*4882a593Smuzhiyun 	return addr | 0x00000001;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	{256, 65536, 0},
78*4882a593Smuzhiyun 	{128, 32768, 32},
79*4882a593Smuzhiyun 	{64, 16384, 48},
80*4882a593Smuzhiyun 	{32, 8192, 56}
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * Control interfaces are largely identical to
85*4882a593Smuzhiyun  * the legacy Intel 440BX..
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun 
efficeon_fetch_size(void)88*4882a593Smuzhiyun static int efficeon_fetch_size(void)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	int i;
91*4882a593Smuzhiyun 	u16 temp;
92*4882a593Smuzhiyun 	struct aper_size_info_lvl2 *values;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
95*4882a593Smuzhiyun 	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
98*4882a593Smuzhiyun 		if (temp == values[i].size_value) {
99*4882a593Smuzhiyun 			agp_bridge->previous_size =
100*4882a593Smuzhiyun 			    agp_bridge->current_size = (void *) (values + i);
101*4882a593Smuzhiyun 			agp_bridge->aperture_size_idx = i;
102*4882a593Smuzhiyun 			return values[i].size;
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
efficeon_tlbflush(struct agp_memory * mem)109*4882a593Smuzhiyun static void efficeon_tlbflush(struct agp_memory * mem)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
112*4882a593Smuzhiyun 	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
113*4882a593Smuzhiyun 	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
efficeon_cleanup(void)116*4882a593Smuzhiyun static void efficeon_cleanup(void)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	u16 temp;
119*4882a593Smuzhiyun 	struct aper_size_info_lvl2 *previous_size;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
122*4882a593Smuzhiyun 	previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
123*4882a593Smuzhiyun 	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
124*4882a593Smuzhiyun 	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
125*4882a593Smuzhiyun 	pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
126*4882a593Smuzhiyun 			      previous_size->size_value);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
efficeon_configure(void)129*4882a593Smuzhiyun static int efficeon_configure(void)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	u16 temp2;
132*4882a593Smuzhiyun 	struct aper_size_info_lvl2 *current_size;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_configure()\n");
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	current_size = A_SIZE_LVL2(agp_bridge->current_size);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* aperture size */
139*4882a593Smuzhiyun 	pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
140*4882a593Smuzhiyun 			      current_size->size_value);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* address to map to */
143*4882a593Smuzhiyun 	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
144*4882a593Smuzhiyun 						    AGP_APERTURE_BAR);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* agpctrl */
147*4882a593Smuzhiyun 	pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* paccfg/nbxcfg */
150*4882a593Smuzhiyun 	pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
151*4882a593Smuzhiyun 	pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
152*4882a593Smuzhiyun 			      (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
153*4882a593Smuzhiyun 	/* clear any possible error conditions */
154*4882a593Smuzhiyun 	pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
155*4882a593Smuzhiyun 	return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
efficeon_free_gatt_table(struct agp_bridge_data * bridge)158*4882a593Smuzhiyun static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	int index, freed = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	for (index = 0; index < EFFICEON_L1_SIZE; index++) {
163*4882a593Smuzhiyun 		unsigned long page = efficeon_private.l1_table[index];
164*4882a593Smuzhiyun 		if (page) {
165*4882a593Smuzhiyun 			efficeon_private.l1_table[index] = 0;
166*4882a593Smuzhiyun 			free_page(page);
167*4882a593Smuzhiyun 			freed++;
168*4882a593Smuzhiyun 		}
169*4882a593Smuzhiyun 		printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
170*4882a593Smuzhiyun 			agp_bridge->dev, EFFICEON_ATTPAGE, index);
171*4882a593Smuzhiyun 		pci_write_config_dword(agp_bridge->dev,
172*4882a593Smuzhiyun 			EFFICEON_ATTPAGE, index);
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * Since we don't need contiguous memory we just try
181*4882a593Smuzhiyun  * to get the gatt table once
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
185*4882a593Smuzhiyun #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
186*4882a593Smuzhiyun 	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
187*4882a593Smuzhiyun #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
188*4882a593Smuzhiyun #undef  GET_GATT
189*4882a593Smuzhiyun #define GET_GATT(addr) (efficeon_private.gatt_pages[\
190*4882a593Smuzhiyun 	GET_PAGE_DIR_IDX(addr)]->remapped)
191*4882a593Smuzhiyun 
efficeon_create_gatt_table(struct agp_bridge_data * bridge)192*4882a593Smuzhiyun static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	int index;
195*4882a593Smuzhiyun 	const int pati    = EFFICEON_PATI;
196*4882a593Smuzhiyun 	const int present = EFFICEON_PRESENT;
197*4882a593Smuzhiyun 	const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
198*4882a593Smuzhiyun 	int num_entries, l1_pages;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* There are 2^10 PTE pages per PDE page */
205*4882a593Smuzhiyun 	BUG_ON(num_entries & 0x3ff);
206*4882a593Smuzhiyun 	l1_pages = num_entries >> 10;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	for (index = 0 ; index < l1_pages ; index++) {
209*4882a593Smuzhiyun 		int offset;
210*4882a593Smuzhiyun 		unsigned long page;
211*4882a593Smuzhiyun 		unsigned long value;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		page = efficeon_private.l1_table[index];
214*4882a593Smuzhiyun 		BUG_ON(page);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		page = get_zeroed_page(GFP_KERNEL);
217*4882a593Smuzhiyun 		if (!page) {
218*4882a593Smuzhiyun 			efficeon_free_gatt_table(agp_bridge);
219*4882a593Smuzhiyun 			return -ENOMEM;
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
223*4882a593Smuzhiyun 			clflush((char *)page+offset);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		efficeon_private.l1_table[index] = page;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		value = virt_to_phys((unsigned long *)page) | pati | present | index;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		pci_write_config_dword(agp_bridge->dev,
230*4882a593Smuzhiyun 			EFFICEON_ATTPAGE, value);
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
efficeon_insert_memory(struct agp_memory * mem,off_t pg_start,int type)236*4882a593Smuzhiyun static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	int i, count = mem->page_count, num_entries;
239*4882a593Smuzhiyun 	unsigned int *page, *last_page;
240*4882a593Smuzhiyun 	const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
241*4882a593Smuzhiyun 	const unsigned long clflush_mask = ~(clflush_chunk-1);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
246*4882a593Smuzhiyun 	if ((pg_start + mem->page_count) > num_entries)
247*4882a593Smuzhiyun 		return -EINVAL;
248*4882a593Smuzhiyun 	if (type != 0 || mem->type != 0)
249*4882a593Smuzhiyun 		return -EINVAL;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (!mem->is_flushed) {
252*4882a593Smuzhiyun 		global_cache_flush();
253*4882a593Smuzhiyun 		mem->is_flushed = true;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	last_page = NULL;
257*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
258*4882a593Smuzhiyun 		int index = pg_start + i;
259*4882a593Smuzhiyun 		unsigned long insert = efficeon_mask_memory(mem->pages[i]);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		page = (unsigned int *) efficeon_private.l1_table[index >> 10];
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 		if (!page)
264*4882a593Smuzhiyun 			continue;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		page += (index & 0x3ff);
267*4882a593Smuzhiyun 		*page = insert;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		/* clflush is slow, so don't clflush until we have to */
270*4882a593Smuzhiyun 		if (last_page &&
271*4882a593Smuzhiyun 		    (((unsigned long)page^(unsigned long)last_page) &
272*4882a593Smuzhiyun 		     clflush_mask))
273*4882a593Smuzhiyun 			clflush(last_page);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		last_page = page;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if ( last_page )
279*4882a593Smuzhiyun 		clflush(last_page);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	agp_bridge->driver->tlb_flush(mem);
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
efficeon_remove_memory(struct agp_memory * mem,off_t pg_start,int type)285*4882a593Smuzhiyun static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	int i, count = mem->page_count, num_entries;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if ((pg_start + mem->page_count) > num_entries)
294*4882a593Smuzhiyun 		return -EINVAL;
295*4882a593Smuzhiyun 	if (type != 0 || mem->type != 0)
296*4882a593Smuzhiyun 		return -EINVAL;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
299*4882a593Smuzhiyun 		int index = pg_start + i;
300*4882a593Smuzhiyun 		unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		if (!page)
303*4882a593Smuzhiyun 			continue;
304*4882a593Smuzhiyun 		page += (index & 0x3ff);
305*4882a593Smuzhiyun 		*page = 0;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 	agp_bridge->driver->tlb_flush(mem);
308*4882a593Smuzhiyun 	return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun static const struct agp_bridge_driver efficeon_driver = {
313*4882a593Smuzhiyun 	.owner			= THIS_MODULE,
314*4882a593Smuzhiyun 	.aperture_sizes		= efficeon_generic_sizes,
315*4882a593Smuzhiyun 	.size_type		= LVL2_APER_SIZE,
316*4882a593Smuzhiyun 	.num_aperture_sizes	= 4,
317*4882a593Smuzhiyun 	.configure		= efficeon_configure,
318*4882a593Smuzhiyun 	.fetch_size		= efficeon_fetch_size,
319*4882a593Smuzhiyun 	.cleanup		= efficeon_cleanup,
320*4882a593Smuzhiyun 	.tlb_flush		= efficeon_tlbflush,
321*4882a593Smuzhiyun 	.mask_memory		= agp_generic_mask_memory,
322*4882a593Smuzhiyun 	.masks			= efficeon_generic_masks,
323*4882a593Smuzhiyun 	.agp_enable		= agp_generic_enable,
324*4882a593Smuzhiyun 	.cache_flush		= global_cache_flush,
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	// Efficeon-specific GATT table setup / populate / teardown
327*4882a593Smuzhiyun 	.create_gatt_table	= efficeon_create_gatt_table,
328*4882a593Smuzhiyun 	.free_gatt_table	= efficeon_free_gatt_table,
329*4882a593Smuzhiyun 	.insert_memory		= efficeon_insert_memory,
330*4882a593Smuzhiyun 	.remove_memory		= efficeon_remove_memory,
331*4882a593Smuzhiyun 	.cant_use_aperture	= false,	// true might be faster?
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	// Generic
334*4882a593Smuzhiyun 	.alloc_by_type		= agp_generic_alloc_by_type,
335*4882a593Smuzhiyun 	.free_by_type		= agp_generic_free_by_type,
336*4882a593Smuzhiyun 	.agp_alloc_page		= agp_generic_alloc_page,
337*4882a593Smuzhiyun 	.agp_alloc_pages	= agp_generic_alloc_pages,
338*4882a593Smuzhiyun 	.agp_destroy_page	= agp_generic_destroy_page,
339*4882a593Smuzhiyun 	.agp_destroy_pages	= agp_generic_destroy_pages,
340*4882a593Smuzhiyun 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
341*4882a593Smuzhiyun };
342*4882a593Smuzhiyun 
agp_efficeon_probe(struct pci_dev * pdev,const struct pci_device_id * ent)343*4882a593Smuzhiyun static int agp_efficeon_probe(struct pci_dev *pdev,
344*4882a593Smuzhiyun 			      const struct pci_device_id *ent)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct agp_bridge_data *bridge;
347*4882a593Smuzhiyun 	u8 cap_ptr;
348*4882a593Smuzhiyun 	struct resource *r;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
351*4882a593Smuzhiyun 	if (!cap_ptr)
352*4882a593Smuzhiyun 		return -ENODEV;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/* Probe for Efficeon controller */
355*4882a593Smuzhiyun 	if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
356*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
357*4882a593Smuzhiyun 		    pdev->device);
358*4882a593Smuzhiyun 		return -ENODEV;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	bridge = agp_alloc_bridge();
364*4882a593Smuzhiyun 	if (!bridge)
365*4882a593Smuzhiyun 		return -ENOMEM;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	bridge->driver = &efficeon_driver;
368*4882a593Smuzhiyun 	bridge->dev = pdev;
369*4882a593Smuzhiyun 	bridge->capndx = cap_ptr;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/*
372*4882a593Smuzhiyun 	* If the device has not been properly setup, the following will catch
373*4882a593Smuzhiyun 	* the problem and should stop the system from crashing.
374*4882a593Smuzhiyun 	* 20030610 - hamish@zot.org
375*4882a593Smuzhiyun 	*/
376*4882a593Smuzhiyun 	if (pci_enable_device(pdev)) {
377*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Unable to Enable PCI device\n");
378*4882a593Smuzhiyun 		agp_put_bridge(bridge);
379*4882a593Smuzhiyun 		return -ENODEV;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/*
383*4882a593Smuzhiyun 	* The following fixes the case where the BIOS has "forgotten" to
384*4882a593Smuzhiyun 	* provide an address range for the GART.
385*4882a593Smuzhiyun 	* 20030610 - hamish@zot.org
386*4882a593Smuzhiyun 	*/
387*4882a593Smuzhiyun 	r = &pdev->resource[0];
388*4882a593Smuzhiyun 	if (!r->start && r->end) {
389*4882a593Smuzhiyun 		if (pci_assign_resource(pdev, 0)) {
390*4882a593Smuzhiyun 			printk(KERN_ERR PFX "could not assign resource 0\n");
391*4882a593Smuzhiyun 			agp_put_bridge(bridge);
392*4882a593Smuzhiyun 			return -ENODEV;
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Fill in the mode register */
397*4882a593Smuzhiyun 	if (cap_ptr) {
398*4882a593Smuzhiyun 		pci_read_config_dword(pdev,
399*4882a593Smuzhiyun 				bridge->capndx+PCI_AGP_STATUS,
400*4882a593Smuzhiyun 				&bridge->mode);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	pci_set_drvdata(pdev, bridge);
404*4882a593Smuzhiyun 	return agp_add_bridge(bridge);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
agp_efficeon_remove(struct pci_dev * pdev)407*4882a593Smuzhiyun static void agp_efficeon_remove(struct pci_dev *pdev)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	agp_remove_bridge(bridge);
412*4882a593Smuzhiyun 	agp_put_bridge(bridge);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun #ifdef CONFIG_PM
agp_efficeon_suspend(struct pci_dev * dev,pm_message_t state)416*4882a593Smuzhiyun static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
agp_efficeon_resume(struct pci_dev * pdev)421*4882a593Smuzhiyun static int agp_efficeon_resume(struct pci_dev *pdev)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
424*4882a593Smuzhiyun 	return efficeon_configure();
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun #endif
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun static const struct pci_device_id agp_efficeon_pci_table[] = {
429*4882a593Smuzhiyun 	{
430*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
431*4882a593Smuzhiyun 	.class_mask	= ~0,
432*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_TRANSMETA,
433*4882a593Smuzhiyun 	.device		= PCI_ANY_ID,
434*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
435*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
436*4882a593Smuzhiyun 	},
437*4882a593Smuzhiyun 	{ }
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun static struct pci_driver agp_efficeon_pci_driver = {
443*4882a593Smuzhiyun 	.name		= "agpgart-efficeon",
444*4882a593Smuzhiyun 	.id_table	= agp_efficeon_pci_table,
445*4882a593Smuzhiyun 	.probe		= agp_efficeon_probe,
446*4882a593Smuzhiyun 	.remove		= agp_efficeon_remove,
447*4882a593Smuzhiyun #ifdef CONFIG_PM
448*4882a593Smuzhiyun 	.suspend	= agp_efficeon_suspend,
449*4882a593Smuzhiyun 	.resume		= agp_efficeon_resume,
450*4882a593Smuzhiyun #endif
451*4882a593Smuzhiyun };
452*4882a593Smuzhiyun 
agp_efficeon_init(void)453*4882a593Smuzhiyun static int __init agp_efficeon_init(void)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	static int agp_initialised=0;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (agp_off)
458*4882a593Smuzhiyun 		return -EINVAL;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	if (agp_initialised == 1)
461*4882a593Smuzhiyun 		return 0;
462*4882a593Smuzhiyun 	agp_initialised=1;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	return pci_register_driver(&agp_efficeon_pci_driver);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
agp_efficeon_cleanup(void)467*4882a593Smuzhiyun static void __exit agp_efficeon_cleanup(void)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	pci_unregister_driver(&agp_efficeon_pci_driver);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun module_init(agp_efficeon_init);
473*4882a593Smuzhiyun module_exit(agp_efficeon_cleanup);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
476*4882a593Smuzhiyun MODULE_LICENSE("GPL and additional rights");
477