xref: /OK3568_Linux_fs/kernel/drivers/char/agp/amd64-agp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2001-2003 SuSE Labs.
4*4882a593Smuzhiyun  * Distributed under the GNU public license, v2.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
7*4882a593Smuzhiyun  * It also includes support for the AMD 8151 AGP bridge,
8*4882a593Smuzhiyun  * although it doesn't actually do much, as all the real
9*4882a593Smuzhiyun  * work is done in the northbridge(s).
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/agp_backend.h>
16*4882a593Smuzhiyun #include <linux/mmzone.h>
17*4882a593Smuzhiyun #include <asm/page.h>		/* PAGE_SIZE */
18*4882a593Smuzhiyun #include <asm/e820/api.h>
19*4882a593Smuzhiyun #include <asm/amd_nb.h>
20*4882a593Smuzhiyun #include <asm/gart.h>
21*4882a593Smuzhiyun #include "agp.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* NVIDIA K8 registers */
24*4882a593Smuzhiyun #define NVIDIA_X86_64_0_APBASE		0x10
25*4882a593Smuzhiyun #define NVIDIA_X86_64_1_APBASE1		0x50
26*4882a593Smuzhiyun #define NVIDIA_X86_64_1_APLIMIT1	0x54
27*4882a593Smuzhiyun #define NVIDIA_X86_64_1_APSIZE		0xa8
28*4882a593Smuzhiyun #define NVIDIA_X86_64_1_APBASE2		0xd8
29*4882a593Smuzhiyun #define NVIDIA_X86_64_1_APLIMIT2	0xdc
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* ULi K8 registers */
32*4882a593Smuzhiyun #define ULI_X86_64_BASE_ADDR		0x10
33*4882a593Smuzhiyun #define ULI_X86_64_HTT_FEA_REG		0x50
34*4882a593Smuzhiyun #define ULI_X86_64_ENU_SCR_REG		0x54
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static struct resource *aperture_resource;
37*4882a593Smuzhiyun static bool __initdata agp_try_unsupported = 1;
38*4882a593Smuzhiyun static int agp_bridges_found;
39*4882a593Smuzhiyun 
amd64_tlbflush(struct agp_memory * temp)40*4882a593Smuzhiyun static void amd64_tlbflush(struct agp_memory *temp)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	amd_flush_garts();
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
amd64_insert_memory(struct agp_memory * mem,off_t pg_start,int type)45*4882a593Smuzhiyun static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	int i, j, num_entries;
48*4882a593Smuzhiyun 	long long tmp;
49*4882a593Smuzhiyun 	int mask_type;
50*4882a593Smuzhiyun 	struct agp_bridge_data *bridge = mem->bridge;
51*4882a593Smuzhiyun 	u32 pte;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	num_entries = agp_num_entries();
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (type != mem->type)
56*4882a593Smuzhiyun 		return -EINVAL;
57*4882a593Smuzhiyun 	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
58*4882a593Smuzhiyun 	if (mask_type != 0)
59*4882a593Smuzhiyun 		return -EINVAL;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* Make sure we can fit the range in the gatt table. */
63*4882a593Smuzhiyun 	/* FIXME: could wrap */
64*4882a593Smuzhiyun 	if (((unsigned long)pg_start + mem->page_count) > num_entries)
65*4882a593Smuzhiyun 		return -EINVAL;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	j = pg_start;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/* gatt table should be empty. */
70*4882a593Smuzhiyun 	while (j < (pg_start + mem->page_count)) {
71*4882a593Smuzhiyun 		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
72*4882a593Smuzhiyun 			return -EBUSY;
73*4882a593Smuzhiyun 		j++;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (!mem->is_flushed) {
77*4882a593Smuzhiyun 		global_cache_flush();
78*4882a593Smuzhiyun 		mem->is_flushed = true;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
82*4882a593Smuzhiyun 		tmp = agp_bridge->driver->mask_memory(agp_bridge,
83*4882a593Smuzhiyun 						      page_to_phys(mem->pages[i]),
84*4882a593Smuzhiyun 						      mask_type);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		BUG_ON(tmp & 0xffffff0000000ffcULL);
87*4882a593Smuzhiyun 		pte = (tmp & 0x000000ff00000000ULL) >> 28;
88*4882a593Smuzhiyun 		pte |=(tmp & 0x00000000fffff000ULL);
89*4882a593Smuzhiyun 		pte |= GPTE_VALID | GPTE_COHERENT;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		writel(pte, agp_bridge->gatt_table+j);
92*4882a593Smuzhiyun 		readl(agp_bridge->gatt_table+j);	/* PCI Posting. */
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 	amd64_tlbflush(mem);
95*4882a593Smuzhiyun 	return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * This hack alters the order element according
100*4882a593Smuzhiyun  * to the size of a long. It sucks. I totally disown this, even
101*4882a593Smuzhiyun  * though it does appear to work for the most part.
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun static struct aper_size_info_32 amd64_aperture_sizes[7] =
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	{32,   8192,   3+(sizeof(long)/8), 0 },
106*4882a593Smuzhiyun 	{64,   16384,  4+(sizeof(long)/8), 1<<1 },
107*4882a593Smuzhiyun 	{128,  32768,  5+(sizeof(long)/8), 1<<2 },
108*4882a593Smuzhiyun 	{256,  65536,  6+(sizeof(long)/8), 1<<1 | 1<<2 },
109*4882a593Smuzhiyun 	{512,  131072, 7+(sizeof(long)/8), 1<<3 },
110*4882a593Smuzhiyun 	{1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
111*4882a593Smuzhiyun 	{2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun  * Get the current Aperture size from the x86-64.
117*4882a593Smuzhiyun  * Note, that there may be multiple x86-64's, but we just return
118*4882a593Smuzhiyun  * the value from the first one we find. The set_size functions
119*4882a593Smuzhiyun  * keep the rest coherent anyway. Or at least should do.
120*4882a593Smuzhiyun  */
amd64_fetch_size(void)121*4882a593Smuzhiyun static int amd64_fetch_size(void)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct pci_dev *dev;
124*4882a593Smuzhiyun 	int i;
125*4882a593Smuzhiyun 	u32 temp;
126*4882a593Smuzhiyun 	struct aper_size_info_32 *values;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	dev = node_to_amd_nb(0)->misc;
129*4882a593Smuzhiyun 	if (dev==NULL)
130*4882a593Smuzhiyun 		return 0;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
133*4882a593Smuzhiyun 	temp = (temp & 0xe);
134*4882a593Smuzhiyun 	values = A_SIZE_32(amd64_aperture_sizes);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
137*4882a593Smuzhiyun 		if (temp == values[i].size_value) {
138*4882a593Smuzhiyun 			agp_bridge->previous_size =
139*4882a593Smuzhiyun 			    agp_bridge->current_size = (void *) (values + i);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 			agp_bridge->aperture_size_idx = i;
142*4882a593Smuzhiyun 			return values[i].size;
143*4882a593Smuzhiyun 		}
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 	return 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun  * In a multiprocessor x86-64 system, this function gets
150*4882a593Smuzhiyun  * called once for each CPU.
151*4882a593Smuzhiyun  */
amd64_configure(struct pci_dev * hammer,u64 gatt_table)152*4882a593Smuzhiyun static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	u64 aperturebase;
155*4882a593Smuzhiyun 	u32 tmp;
156*4882a593Smuzhiyun 	u64 aper_base;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Address to map to */
159*4882a593Smuzhiyun 	pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
160*4882a593Smuzhiyun 	aperturebase = (u64)tmp << 25;
161*4882a593Smuzhiyun 	aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	enable_gart_translation(hammer, gatt_table);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return aper_base;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun static const struct aper_size_info_32 amd_8151_sizes[7] =
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	{2048, 524288, 9, 0x00000000 },	/* 0 0 0 0 0 0 */
172*4882a593Smuzhiyun 	{1024, 262144, 8, 0x00000400 },	/* 1 0 0 0 0 0 */
173*4882a593Smuzhiyun 	{512,  131072, 7, 0x00000600 },	/* 1 1 0 0 0 0 */
174*4882a593Smuzhiyun 	{256,  65536,  6, 0x00000700 },	/* 1 1 1 0 0 0 */
175*4882a593Smuzhiyun 	{128,  32768,  5, 0x00000720 },	/* 1 1 1 1 0 0 */
176*4882a593Smuzhiyun 	{64,   16384,  4, 0x00000730 },	/* 1 1 1 1 1 0 */
177*4882a593Smuzhiyun 	{32,   8192,   3, 0x00000738 }	/* 1 1 1 1 1 1 */
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
amd_8151_configure(void)180*4882a593Smuzhiyun static int amd_8151_configure(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
183*4882a593Smuzhiyun 	int i;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (!amd_nb_has_feature(AMD_NB_GART))
186*4882a593Smuzhiyun 		return 0;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* Configure AGP regs in each x86-64 host bridge. */
189*4882a593Smuzhiyun 	for (i = 0; i < amd_nb_num(); i++) {
190*4882a593Smuzhiyun 		agp_bridge->gart_bus_addr =
191*4882a593Smuzhiyun 			amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 	amd_flush_garts();
194*4882a593Smuzhiyun 	return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 
amd64_cleanup(void)198*4882a593Smuzhiyun static void amd64_cleanup(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	u32 tmp;
201*4882a593Smuzhiyun 	int i;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (!amd_nb_has_feature(AMD_NB_GART))
204*4882a593Smuzhiyun 		return;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	for (i = 0; i < amd_nb_num(); i++) {
207*4882a593Smuzhiyun 		struct pci_dev *dev = node_to_amd_nb(i)->misc;
208*4882a593Smuzhiyun 		/* disable gart translation */
209*4882a593Smuzhiyun 		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
210*4882a593Smuzhiyun 		tmp &= ~GARTEN;
211*4882a593Smuzhiyun 		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun static const struct agp_bridge_driver amd_8151_driver = {
217*4882a593Smuzhiyun 	.owner			= THIS_MODULE,
218*4882a593Smuzhiyun 	.aperture_sizes		= amd_8151_sizes,
219*4882a593Smuzhiyun 	.size_type		= U32_APER_SIZE,
220*4882a593Smuzhiyun 	.num_aperture_sizes	= 7,
221*4882a593Smuzhiyun 	.needs_scratch_page	= true,
222*4882a593Smuzhiyun 	.configure		= amd_8151_configure,
223*4882a593Smuzhiyun 	.fetch_size		= amd64_fetch_size,
224*4882a593Smuzhiyun 	.cleanup		= amd64_cleanup,
225*4882a593Smuzhiyun 	.tlb_flush		= amd64_tlbflush,
226*4882a593Smuzhiyun 	.mask_memory		= agp_generic_mask_memory,
227*4882a593Smuzhiyun 	.masks			= NULL,
228*4882a593Smuzhiyun 	.agp_enable		= agp_generic_enable,
229*4882a593Smuzhiyun 	.cache_flush		= global_cache_flush,
230*4882a593Smuzhiyun 	.create_gatt_table	= agp_generic_create_gatt_table,
231*4882a593Smuzhiyun 	.free_gatt_table	= agp_generic_free_gatt_table,
232*4882a593Smuzhiyun 	.insert_memory		= amd64_insert_memory,
233*4882a593Smuzhiyun 	.remove_memory		= agp_generic_remove_memory,
234*4882a593Smuzhiyun 	.alloc_by_type		= agp_generic_alloc_by_type,
235*4882a593Smuzhiyun 	.free_by_type		= agp_generic_free_by_type,
236*4882a593Smuzhiyun 	.agp_alloc_page		= agp_generic_alloc_page,
237*4882a593Smuzhiyun 	.agp_alloc_pages	= agp_generic_alloc_pages,
238*4882a593Smuzhiyun 	.agp_destroy_page	= agp_generic_destroy_page,
239*4882a593Smuzhiyun 	.agp_destroy_pages	= agp_generic_destroy_pages,
240*4882a593Smuzhiyun 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* Some basic sanity checks for the aperture. */
agp_aperture_valid(u64 aper,u32 size)244*4882a593Smuzhiyun static int agp_aperture_valid(u64 aper, u32 size)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	if (!aperture_valid(aper, size, 32*1024*1024))
247*4882a593Smuzhiyun 		return 0;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* Request the Aperture. This catches cases when someone else
250*4882a593Smuzhiyun 	   already put a mapping in there - happens with some very broken BIOS
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	   Maybe better to use pci_assign_resource/pci_enable_device instead
253*4882a593Smuzhiyun 	   trusting the bridges? */
254*4882a593Smuzhiyun 	if (!aperture_resource &&
255*4882a593Smuzhiyun 	    !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
256*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
257*4882a593Smuzhiyun 		return 0;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 	return 1;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun  * W*s centric BIOS sometimes only set up the aperture in the AGP
264*4882a593Smuzhiyun  * bridge, not the northbridge. On AMD64 this is handled early
265*4882a593Smuzhiyun  * in aperture.c, but when IOMMU is not enabled or we run
266*4882a593Smuzhiyun  * on a 32bit kernel this needs to be redone.
267*4882a593Smuzhiyun  * Unfortunately it is impossible to fix the aperture here because it's too late
268*4882a593Smuzhiyun  * to allocate that much memory. But at least error out cleanly instead of
269*4882a593Smuzhiyun  * crashing.
270*4882a593Smuzhiyun  */
fix_northbridge(struct pci_dev * nb,struct pci_dev * agp,u16 cap)271*4882a593Smuzhiyun static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	u64 aper, nb_aper;
274*4882a593Smuzhiyun 	int order = 0;
275*4882a593Smuzhiyun 	u32 nb_order, nb_base;
276*4882a593Smuzhiyun 	u16 apsize;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
279*4882a593Smuzhiyun 	nb_order = (nb_order >> 1) & 7;
280*4882a593Smuzhiyun 	pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
281*4882a593Smuzhiyun 	nb_aper = (u64)nb_base << 25;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* Northbridge seems to contain crap. Try the AGP bridge. */
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	pci_read_config_word(agp, cap+0x14, &apsize);
286*4882a593Smuzhiyun 	if (apsize == 0xffff) {
287*4882a593Smuzhiyun 		if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
288*4882a593Smuzhiyun 			return 0;
289*4882a593Smuzhiyun 		return -1;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	apsize &= 0xfff;
293*4882a593Smuzhiyun 	/* Some BIOS use weird encodings not in the AGPv3 table. */
294*4882a593Smuzhiyun 	if (apsize & 0xff)
295*4882a593Smuzhiyun 		apsize |= 0xf00;
296*4882a593Smuzhiyun 	order = 7 - hweight16(apsize);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	aper = pci_bus_address(agp, AGP_APERTURE_BAR);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/*
301*4882a593Smuzhiyun 	 * On some sick chips APSIZE is 0. This means it wants 4G
302*4882a593Smuzhiyun 	 * so let double check that order, and lets trust the AMD NB settings
303*4882a593Smuzhiyun 	 */
304*4882a593Smuzhiyun 	if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
305*4882a593Smuzhiyun 		dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
306*4882a593Smuzhiyun 			 32 << order);
307*4882a593Smuzhiyun 		order = nb_order;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (nb_order >= order) {
311*4882a593Smuzhiyun 		if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
312*4882a593Smuzhiyun 			return 0;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
316*4882a593Smuzhiyun 		 aper, 32 << order);
317*4882a593Smuzhiyun 	if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
318*4882a593Smuzhiyun 		return -1;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	gart_set_size_and_enable(nb, order);
321*4882a593Smuzhiyun 	pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
cache_nbs(struct pci_dev * pdev,u32 cap_ptr)326*4882a593Smuzhiyun static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	int i;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (amd_cache_northbridges() < 0)
331*4882a593Smuzhiyun 		return -ENODEV;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	if (!amd_nb_has_feature(AMD_NB_GART))
334*4882a593Smuzhiyun 		return -ENODEV;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	i = 0;
337*4882a593Smuzhiyun 	for (i = 0; i < amd_nb_num(); i++) {
338*4882a593Smuzhiyun 		struct pci_dev *dev = node_to_amd_nb(i)->misc;
339*4882a593Smuzhiyun 		if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
340*4882a593Smuzhiyun 			dev_err(&dev->dev, "no usable aperture found\n");
341*4882a593Smuzhiyun #ifdef __x86_64__
342*4882a593Smuzhiyun 			/* should port this to i386 */
343*4882a593Smuzhiyun 			dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
344*4882a593Smuzhiyun #endif
345*4882a593Smuzhiyun 			return -1;
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 	return 0;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /* Handle AMD 8151 quirks */
amd8151_init(struct pci_dev * pdev,struct agp_bridge_data * bridge)352*4882a593Smuzhiyun static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	char *revstring;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	switch (pdev->revision) {
357*4882a593Smuzhiyun 	case 0x01: revstring="A0"; break;
358*4882a593Smuzhiyun 	case 0x02: revstring="A1"; break;
359*4882a593Smuzhiyun 	case 0x11: revstring="B0"; break;
360*4882a593Smuzhiyun 	case 0x12: revstring="B1"; break;
361*4882a593Smuzhiyun 	case 0x13: revstring="B2"; break;
362*4882a593Smuzhiyun 	case 0x14: revstring="B3"; break;
363*4882a593Smuzhiyun 	default:   revstring="??"; break;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/*
369*4882a593Smuzhiyun 	 * Work around errata.
370*4882a593Smuzhiyun 	 * Chips before B2 stepping incorrectly reporting v3.5
371*4882a593Smuzhiyun 	 */
372*4882a593Smuzhiyun 	if (pdev->revision < 0x13) {
373*4882a593Smuzhiyun 		dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
374*4882a593Smuzhiyun 		bridge->major_version = 3;
375*4882a593Smuzhiyun 		bridge->minor_version = 0;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun static const struct aper_size_info_32 uli_sizes[7] =
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	{256, 65536, 6, 10},
383*4882a593Smuzhiyun 	{128, 32768, 5, 9},
384*4882a593Smuzhiyun 	{64, 16384, 4, 8},
385*4882a593Smuzhiyun 	{32, 8192, 3, 7},
386*4882a593Smuzhiyun 	{16, 4096, 2, 6},
387*4882a593Smuzhiyun 	{8, 2048, 1, 4},
388*4882a593Smuzhiyun 	{4, 1024, 0, 3}
389*4882a593Smuzhiyun };
uli_agp_init(struct pci_dev * pdev)390*4882a593Smuzhiyun static int uli_agp_init(struct pci_dev *pdev)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	u32 httfea,baseaddr,enuscr;
393*4882a593Smuzhiyun 	struct pci_dev *dev1;
394*4882a593Smuzhiyun 	int i, ret;
395*4882a593Smuzhiyun 	unsigned size = amd64_fetch_size();
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	dev_info(&pdev->dev, "setting up ULi AGP\n");
398*4882a593Smuzhiyun 	dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
399*4882a593Smuzhiyun 	if (dev1 == NULL) {
400*4882a593Smuzhiyun 		dev_info(&pdev->dev, "can't find ULi secondary device\n");
401*4882a593Smuzhiyun 		return -ENODEV;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
405*4882a593Smuzhiyun 		if (uli_sizes[i].size == size)
406*4882a593Smuzhiyun 			break;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (i == ARRAY_SIZE(uli_sizes)) {
409*4882a593Smuzhiyun 		dev_info(&pdev->dev, "no ULi size found for %d\n", size);
410*4882a593Smuzhiyun 		ret = -ENODEV;
411*4882a593Smuzhiyun 		goto put;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/* shadow x86-64 registers into ULi registers */
415*4882a593Smuzhiyun 	pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
416*4882a593Smuzhiyun 			       &httfea);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* if x86-64 aperture base is beyond 4G, exit here */
419*4882a593Smuzhiyun 	if ((httfea & 0x7fff) >> (32 - 25)) {
420*4882a593Smuzhiyun 		ret = -ENODEV;
421*4882a593Smuzhiyun 		goto put;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	httfea = (httfea& 0x7fff) << 25;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
427*4882a593Smuzhiyun 	baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
428*4882a593Smuzhiyun 	baseaddr|= httfea;
429*4882a593Smuzhiyun 	pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	enuscr= httfea+ (size * 1024 * 1024) - 1;
432*4882a593Smuzhiyun 	pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
433*4882a593Smuzhiyun 	pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
434*4882a593Smuzhiyun 	ret = 0;
435*4882a593Smuzhiyun put:
436*4882a593Smuzhiyun 	pci_dev_put(dev1);
437*4882a593Smuzhiyun 	return ret;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun static const struct aper_size_info_32 nforce3_sizes[5] =
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	{512,  131072, 7, 0x00000000 },
444*4882a593Smuzhiyun 	{256,  65536,  6, 0x00000008 },
445*4882a593Smuzhiyun 	{128,  32768,  5, 0x0000000C },
446*4882a593Smuzhiyun 	{64,   16384,  4, 0x0000000E },
447*4882a593Smuzhiyun 	{32,   8192,   3, 0x0000000F }
448*4882a593Smuzhiyun };
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* Handle shadow device of the Nvidia NForce3 */
451*4882a593Smuzhiyun /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
nforce3_agp_init(struct pci_dev * pdev)452*4882a593Smuzhiyun static int nforce3_agp_init(struct pci_dev *pdev)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	u32 tmp, apbase, apbar, aplimit;
455*4882a593Smuzhiyun 	struct pci_dev *dev1;
456*4882a593Smuzhiyun 	int i, ret;
457*4882a593Smuzhiyun 	unsigned size = amd64_fetch_size();
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
462*4882a593Smuzhiyun 	if (dev1 == NULL) {
463*4882a593Smuzhiyun 		dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
464*4882a593Smuzhiyun 		return -ENODEV;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
468*4882a593Smuzhiyun 		if (nforce3_sizes[i].size == size)
469*4882a593Smuzhiyun 			break;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (i == ARRAY_SIZE(nforce3_sizes)) {
472*4882a593Smuzhiyun 		dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
473*4882a593Smuzhiyun 		ret = -ENODEV;
474*4882a593Smuzhiyun 		goto put;
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
478*4882a593Smuzhiyun 	tmp &= ~(0xf);
479*4882a593Smuzhiyun 	tmp |= nforce3_sizes[i].size_value;
480*4882a593Smuzhiyun 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* shadow x86-64 registers into NVIDIA registers */
483*4882a593Smuzhiyun 	pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
484*4882a593Smuzhiyun 			       &apbase);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* if x86-64 aperture base is beyond 4G, exit here */
487*4882a593Smuzhiyun 	if ( (apbase & 0x7fff) >> (32 - 25) ) {
488*4882a593Smuzhiyun 		dev_info(&pdev->dev, "aperture base > 4G\n");
489*4882a593Smuzhiyun 		ret = -ENODEV;
490*4882a593Smuzhiyun 		goto put;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	apbase = (apbase & 0x7fff) << 25;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
496*4882a593Smuzhiyun 	apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
497*4882a593Smuzhiyun 	apbar |= apbase;
498*4882a593Smuzhiyun 	pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	aplimit = apbase + (size * 1024 * 1024) - 1;
501*4882a593Smuzhiyun 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
502*4882a593Smuzhiyun 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
503*4882a593Smuzhiyun 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
504*4882a593Smuzhiyun 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	ret = 0;
507*4882a593Smuzhiyun put:
508*4882a593Smuzhiyun 	pci_dev_put(dev1);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	return ret;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
agp_amd64_probe(struct pci_dev * pdev,const struct pci_device_id * ent)513*4882a593Smuzhiyun static int agp_amd64_probe(struct pci_dev *pdev,
514*4882a593Smuzhiyun 			   const struct pci_device_id *ent)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct agp_bridge_data *bridge;
517*4882a593Smuzhiyun 	u8 cap_ptr;
518*4882a593Smuzhiyun 	int err;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* The Highlander principle */
521*4882a593Smuzhiyun 	if (agp_bridges_found)
522*4882a593Smuzhiyun 		return -ENODEV;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
525*4882a593Smuzhiyun 	if (!cap_ptr)
526*4882a593Smuzhiyun 		return -ENODEV;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* Could check for AGPv3 here */
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	bridge = agp_alloc_bridge();
531*4882a593Smuzhiyun 	if (!bridge)
532*4882a593Smuzhiyun 		return -ENOMEM;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
535*4882a593Smuzhiyun 	    pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
536*4882a593Smuzhiyun 		amd8151_init(pdev, bridge);
537*4882a593Smuzhiyun 	} else {
538*4882a593Smuzhiyun 		dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
539*4882a593Smuzhiyun 			 pdev->vendor, pdev->device);
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	bridge->driver = &amd_8151_driver;
543*4882a593Smuzhiyun 	bridge->dev = pdev;
544*4882a593Smuzhiyun 	bridge->capndx = cap_ptr;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/* Fill in the mode register */
547*4882a593Smuzhiyun 	pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (cache_nbs(pdev, cap_ptr) == -1) {
550*4882a593Smuzhiyun 		agp_put_bridge(bridge);
551*4882a593Smuzhiyun 		return -ENODEV;
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
555*4882a593Smuzhiyun 		int ret = nforce3_agp_init(pdev);
556*4882a593Smuzhiyun 		if (ret) {
557*4882a593Smuzhiyun 			agp_put_bridge(bridge);
558*4882a593Smuzhiyun 			return ret;
559*4882a593Smuzhiyun 		}
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_AL) {
563*4882a593Smuzhiyun 		int ret = uli_agp_init(pdev);
564*4882a593Smuzhiyun 		if (ret) {
565*4882a593Smuzhiyun 			agp_put_bridge(bridge);
566*4882a593Smuzhiyun 			return ret;
567*4882a593Smuzhiyun 		}
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	pci_set_drvdata(pdev, bridge);
571*4882a593Smuzhiyun 	err = agp_add_bridge(bridge);
572*4882a593Smuzhiyun 	if (err < 0)
573*4882a593Smuzhiyun 		return err;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	agp_bridges_found++;
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
agp_amd64_remove(struct pci_dev * pdev)579*4882a593Smuzhiyun static void agp_amd64_remove(struct pci_dev *pdev)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	release_mem_region(virt_to_phys(bridge->gatt_table_real),
584*4882a593Smuzhiyun 			   amd64_aperture_sizes[bridge->aperture_size_idx].size);
585*4882a593Smuzhiyun 	agp_remove_bridge(bridge);
586*4882a593Smuzhiyun 	agp_put_bridge(bridge);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	agp_bridges_found--;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun #ifdef CONFIG_PM
592*4882a593Smuzhiyun 
agp_amd64_suspend(struct pci_dev * pdev,pm_message_t state)593*4882a593Smuzhiyun static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	pci_save_state(pdev);
596*4882a593Smuzhiyun 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
agp_amd64_resume(struct pci_dev * pdev)601*4882a593Smuzhiyun static int agp_amd64_resume(struct pci_dev *pdev)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	pci_set_power_state(pdev, PCI_D0);
604*4882a593Smuzhiyun 	pci_restore_state(pdev);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
607*4882a593Smuzhiyun 		nforce3_agp_init(pdev);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	return amd_8151_configure();
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun #endif /* CONFIG_PM */
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun static const struct pci_device_id agp_amd64_pci_table[] = {
615*4882a593Smuzhiyun 	{
616*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
617*4882a593Smuzhiyun 	.class_mask	= ~0,
618*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_AMD,
619*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_AMD_8151_0,
620*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
621*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
622*4882a593Smuzhiyun 	},
623*4882a593Smuzhiyun 	/* ULi M1689 */
624*4882a593Smuzhiyun 	{
625*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
626*4882a593Smuzhiyun 	.class_mask	= ~0,
627*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_AL,
628*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_AL_M1689,
629*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
630*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
631*4882a593Smuzhiyun 	},
632*4882a593Smuzhiyun 	/* VIA K8T800Pro */
633*4882a593Smuzhiyun 	{
634*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
635*4882a593Smuzhiyun 	.class_mask	= ~0,
636*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_VIA,
637*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_VIA_K8T800PRO_0,
638*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
639*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
640*4882a593Smuzhiyun 	},
641*4882a593Smuzhiyun 	/* VIA K8T800 */
642*4882a593Smuzhiyun 	{
643*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
644*4882a593Smuzhiyun 	.class_mask	= ~0,
645*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_VIA,
646*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_VIA_8385_0,
647*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
648*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
649*4882a593Smuzhiyun 	},
650*4882a593Smuzhiyun 	/* VIA K8M800 / K8N800 */
651*4882a593Smuzhiyun 	{
652*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
653*4882a593Smuzhiyun 	.class_mask	= ~0,
654*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_VIA,
655*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_VIA_8380_0,
656*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
657*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
658*4882a593Smuzhiyun 	},
659*4882a593Smuzhiyun 	/* VIA K8M890 / K8N890 */
660*4882a593Smuzhiyun 	{
661*4882a593Smuzhiyun 	.class          = (PCI_CLASS_BRIDGE_HOST << 8),
662*4882a593Smuzhiyun 	.class_mask     = ~0,
663*4882a593Smuzhiyun 	.vendor         = PCI_VENDOR_ID_VIA,
664*4882a593Smuzhiyun 	.device         = PCI_DEVICE_ID_VIA_VT3336,
665*4882a593Smuzhiyun 	.subvendor      = PCI_ANY_ID,
666*4882a593Smuzhiyun 	.subdevice      = PCI_ANY_ID,
667*4882a593Smuzhiyun 	},
668*4882a593Smuzhiyun 	/* VIA K8T890 */
669*4882a593Smuzhiyun 	{
670*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
671*4882a593Smuzhiyun 	.class_mask	= ~0,
672*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_VIA,
673*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_VIA_3238_0,
674*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
675*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
676*4882a593Smuzhiyun 	},
677*4882a593Smuzhiyun 	/* VIA K8T800/K8M800/K8N800 */
678*4882a593Smuzhiyun 	{
679*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
680*4882a593Smuzhiyun 	.class_mask	= ~0,
681*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_VIA,
682*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_VIA_838X_1,
683*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
684*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
685*4882a593Smuzhiyun 	},
686*4882a593Smuzhiyun 	/* NForce3 */
687*4882a593Smuzhiyun 	{
688*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
689*4882a593Smuzhiyun 	.class_mask	= ~0,
690*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_NVIDIA,
691*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE3,
692*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
693*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
694*4882a593Smuzhiyun 	},
695*4882a593Smuzhiyun 	{
696*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
697*4882a593Smuzhiyun 	.class_mask	= ~0,
698*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_NVIDIA,
699*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE3S,
700*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
701*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
702*4882a593Smuzhiyun 	},
703*4882a593Smuzhiyun 	/* SIS 755 */
704*4882a593Smuzhiyun 	{
705*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
706*4882a593Smuzhiyun 	.class_mask	= ~0,
707*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_SI,
708*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_SI_755,
709*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
710*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
711*4882a593Smuzhiyun 	},
712*4882a593Smuzhiyun 	/* SIS 760 */
713*4882a593Smuzhiyun 	{
714*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
715*4882a593Smuzhiyun 	.class_mask	= ~0,
716*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_SI,
717*4882a593Smuzhiyun 	.device		= PCI_DEVICE_ID_SI_760,
718*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
719*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
720*4882a593Smuzhiyun 	},
721*4882a593Smuzhiyun 	/* ALI/ULI M1695 */
722*4882a593Smuzhiyun 	{
723*4882a593Smuzhiyun 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
724*4882a593Smuzhiyun 	.class_mask	= ~0,
725*4882a593Smuzhiyun 	.vendor		= PCI_VENDOR_ID_AL,
726*4882a593Smuzhiyun 	.device		= 0x1695,
727*4882a593Smuzhiyun 	.subvendor	= PCI_ANY_ID,
728*4882a593Smuzhiyun 	.subdevice	= PCI_ANY_ID,
729*4882a593Smuzhiyun 	},
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	{ }
732*4882a593Smuzhiyun };
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
737*4882a593Smuzhiyun 	{ PCI_DEVICE_CLASS(0, 0) },
738*4882a593Smuzhiyun 	{ }
739*4882a593Smuzhiyun };
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun static struct pci_driver agp_amd64_pci_driver = {
742*4882a593Smuzhiyun 	.name		= "agpgart-amd64",
743*4882a593Smuzhiyun 	.id_table	= agp_amd64_pci_table,
744*4882a593Smuzhiyun 	.probe		= agp_amd64_probe,
745*4882a593Smuzhiyun 	.remove		= agp_amd64_remove,
746*4882a593Smuzhiyun #ifdef CONFIG_PM
747*4882a593Smuzhiyun 	.suspend	= agp_amd64_suspend,
748*4882a593Smuzhiyun 	.resume		= agp_amd64_resume,
749*4882a593Smuzhiyun #endif
750*4882a593Smuzhiyun };
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun /* Not static due to IOMMU code calling it early. */
agp_amd64_init(void)754*4882a593Smuzhiyun int __init agp_amd64_init(void)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	int err = 0;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (agp_off)
759*4882a593Smuzhiyun 		return -EINVAL;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	err = pci_register_driver(&agp_amd64_pci_driver);
762*4882a593Smuzhiyun 	if (err < 0)
763*4882a593Smuzhiyun 		return err;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	if (agp_bridges_found == 0) {
766*4882a593Smuzhiyun 		if (!agp_try_unsupported && !agp_try_unsupported_boot) {
767*4882a593Smuzhiyun 			printk(KERN_INFO PFX "No supported AGP bridge found.\n");
768*4882a593Smuzhiyun #ifdef MODULE
769*4882a593Smuzhiyun 			printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
770*4882a593Smuzhiyun #else
771*4882a593Smuzhiyun 			printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
772*4882a593Smuzhiyun #endif
773*4882a593Smuzhiyun 			pci_unregister_driver(&agp_amd64_pci_driver);
774*4882a593Smuzhiyun 			return -ENODEV;
775*4882a593Smuzhiyun 		}
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 		/* First check that we have at least one AMD64 NB */
778*4882a593Smuzhiyun 		if (!amd_nb_num()) {
779*4882a593Smuzhiyun 			pci_unregister_driver(&agp_amd64_pci_driver);
780*4882a593Smuzhiyun 			return -ENODEV;
781*4882a593Smuzhiyun 		}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 		/* Look for any AGP bridge */
784*4882a593Smuzhiyun 		agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
785*4882a593Smuzhiyun 		err = driver_attach(&agp_amd64_pci_driver.driver);
786*4882a593Smuzhiyun 		if (err == 0 && agp_bridges_found == 0) {
787*4882a593Smuzhiyun 			pci_unregister_driver(&agp_amd64_pci_driver);
788*4882a593Smuzhiyun 			err = -ENODEV;
789*4882a593Smuzhiyun 		}
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun 	return err;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
agp_amd64_mod_init(void)794*4882a593Smuzhiyun static int __init agp_amd64_mod_init(void)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun #ifndef MODULE
797*4882a593Smuzhiyun 	if (gart_iommu_aperture)
798*4882a593Smuzhiyun 		return agp_bridges_found ? 0 : -ENODEV;
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun 	return agp_amd64_init();
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
agp_amd64_cleanup(void)803*4882a593Smuzhiyun static void __exit agp_amd64_cleanup(void)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun #ifndef MODULE
806*4882a593Smuzhiyun 	if (gart_iommu_aperture)
807*4882a593Smuzhiyun 		return;
808*4882a593Smuzhiyun #endif
809*4882a593Smuzhiyun 	if (aperture_resource)
810*4882a593Smuzhiyun 		release_resource(aperture_resource);
811*4882a593Smuzhiyun 	pci_unregister_driver(&agp_amd64_pci_driver);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun module_init(agp_amd64_mod_init);
815*4882a593Smuzhiyun module_exit(agp_amd64_cleanup);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun MODULE_AUTHOR("Dave Jones, Andi Kleen");
818*4882a593Smuzhiyun module_param(agp_try_unsupported, bool, 0);
819*4882a593Smuzhiyun MODULE_LICENSE("GPL");
820