1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Serverworks AGPGART routines.
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/pci.h>
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/string.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/jiffies.h>
11*4882a593Smuzhiyun #include <linux/agp_backend.h>
12*4882a593Smuzhiyun #include <asm/set_memory.h>
13*4882a593Smuzhiyun #include "agp.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define SVWRKS_COMMAND 0x04
16*4882a593Smuzhiyun #define SVWRKS_APSIZE 0x10
17*4882a593Smuzhiyun #define SVWRKS_MMBASE 0x14
18*4882a593Smuzhiyun #define SVWRKS_CACHING 0x4b
19*4882a593Smuzhiyun #define SVWRKS_AGP_ENABLE 0x60
20*4882a593Smuzhiyun #define SVWRKS_FEATURE 0x68
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define SVWRKS_SIZE_MASK 0xfe000000
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Memory mapped registers */
25*4882a593Smuzhiyun #define SVWRKS_GART_CACHE 0x02
26*4882a593Smuzhiyun #define SVWRKS_GATTBASE 0x04
27*4882a593Smuzhiyun #define SVWRKS_TLBFLUSH 0x10
28*4882a593Smuzhiyun #define SVWRKS_POSTFLUSH 0x14
29*4882a593Smuzhiyun #define SVWRKS_DIRFLUSH 0x0c
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct serverworks_page_map {
33*4882a593Smuzhiyun unsigned long *real;
34*4882a593Smuzhiyun unsigned long __iomem *remapped;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static struct _serverworks_private {
38*4882a593Smuzhiyun struct pci_dev *svrwrks_dev; /* device one */
39*4882a593Smuzhiyun volatile u8 __iomem *registers;
40*4882a593Smuzhiyun struct serverworks_page_map **gatt_pages;
41*4882a593Smuzhiyun int num_tables;
42*4882a593Smuzhiyun struct serverworks_page_map scratch_dir;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun int gart_addr_ofs;
45*4882a593Smuzhiyun int mm_addr_ofs;
46*4882a593Smuzhiyun } serverworks_private;
47*4882a593Smuzhiyun
serverworks_create_page_map(struct serverworks_page_map * page_map)48*4882a593Smuzhiyun static int serverworks_create_page_map(struct serverworks_page_map *page_map)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun int i;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
53*4882a593Smuzhiyun if (page_map->real == NULL) {
54*4882a593Smuzhiyun return -ENOMEM;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun set_memory_uc((unsigned long)page_map->real, 1);
58*4882a593Smuzhiyun page_map->remapped = page_map->real;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
61*4882a593Smuzhiyun writel(agp_bridge->scratch_page, page_map->remapped+i);
62*4882a593Smuzhiyun /* Red Pen: Everyone else does pci posting flush here */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun return 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
serverworks_free_page_map(struct serverworks_page_map * page_map)67*4882a593Smuzhiyun static void serverworks_free_page_map(struct serverworks_page_map *page_map)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun set_memory_wb((unsigned long)page_map->real, 1);
70*4882a593Smuzhiyun free_page((unsigned long) page_map->real);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
serverworks_free_gatt_pages(void)73*4882a593Smuzhiyun static void serverworks_free_gatt_pages(void)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun int i;
76*4882a593Smuzhiyun struct serverworks_page_map **tables;
77*4882a593Smuzhiyun struct serverworks_page_map *entry;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun tables = serverworks_private.gatt_pages;
80*4882a593Smuzhiyun for (i = 0; i < serverworks_private.num_tables; i++) {
81*4882a593Smuzhiyun entry = tables[i];
82*4882a593Smuzhiyun if (entry != NULL) {
83*4882a593Smuzhiyun if (entry->real != NULL) {
84*4882a593Smuzhiyun serverworks_free_page_map(entry);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun kfree(entry);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun kfree(tables);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
serverworks_create_gatt_pages(int nr_tables)92*4882a593Smuzhiyun static int serverworks_create_gatt_pages(int nr_tables)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct serverworks_page_map **tables;
95*4882a593Smuzhiyun struct serverworks_page_map *entry;
96*4882a593Smuzhiyun int retval = 0;
97*4882a593Smuzhiyun int i;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *),
100*4882a593Smuzhiyun GFP_KERNEL);
101*4882a593Smuzhiyun if (tables == NULL)
102*4882a593Smuzhiyun return -ENOMEM;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun for (i = 0; i < nr_tables; i++) {
105*4882a593Smuzhiyun entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
106*4882a593Smuzhiyun if (entry == NULL) {
107*4882a593Smuzhiyun retval = -ENOMEM;
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun tables[i] = entry;
111*4882a593Smuzhiyun retval = serverworks_create_page_map(entry);
112*4882a593Smuzhiyun if (retval != 0) break;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun serverworks_private.num_tables = nr_tables;
115*4882a593Smuzhiyun serverworks_private.gatt_pages = tables;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (retval != 0) serverworks_free_gatt_pages();
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun return retval;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
123*4882a593Smuzhiyun GET_PAGE_DIR_IDX(addr)]->remapped)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #ifndef GET_PAGE_DIR_OFF
126*4882a593Smuzhiyun #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #ifndef GET_PAGE_DIR_IDX
130*4882a593Smuzhiyun #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
131*4882a593Smuzhiyun GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
132*4882a593Smuzhiyun #endif
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #ifndef GET_GATT_OFF
135*4882a593Smuzhiyun #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
136*4882a593Smuzhiyun #endif
137*4882a593Smuzhiyun
serverworks_create_gatt_table(struct agp_bridge_data * bridge)138*4882a593Smuzhiyun static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct aper_size_info_lvl2 *value;
141*4882a593Smuzhiyun struct serverworks_page_map page_dir;
142*4882a593Smuzhiyun int retval;
143*4882a593Smuzhiyun u32 temp;
144*4882a593Smuzhiyun int i;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun value = A_SIZE_LVL2(agp_bridge->current_size);
147*4882a593Smuzhiyun retval = serverworks_create_page_map(&page_dir);
148*4882a593Smuzhiyun if (retval != 0) {
149*4882a593Smuzhiyun return retval;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
152*4882a593Smuzhiyun if (retval != 0) {
153*4882a593Smuzhiyun serverworks_free_page_map(&page_dir);
154*4882a593Smuzhiyun return retval;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun /* Create a fake scratch directory */
157*4882a593Smuzhiyun for (i = 0; i < 1024; i++) {
158*4882a593Smuzhiyun writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
159*4882a593Smuzhiyun writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun retval = serverworks_create_gatt_pages(value->num_entries / 1024);
163*4882a593Smuzhiyun if (retval != 0) {
164*4882a593Smuzhiyun serverworks_free_page_map(&page_dir);
165*4882a593Smuzhiyun serverworks_free_page_map(&serverworks_private.scratch_dir);
166*4882a593Smuzhiyun return retval;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun agp_bridge->gatt_table_real = (u32 *)page_dir.real;
170*4882a593Smuzhiyun agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
171*4882a593Smuzhiyun agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Get the address for the gart region.
174*4882a593Smuzhiyun * This is a bus address even on the alpha, b/c its
175*4882a593Smuzhiyun * used to program the agp master not the cpu
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
179*4882a593Smuzhiyun agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Calculate the agp offset */
182*4882a593Smuzhiyun for (i = 0; i < value->num_entries / 1024; i++)
183*4882a593Smuzhiyun writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
serverworks_free_gatt_table(struct agp_bridge_data * bridge)188*4882a593Smuzhiyun static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct serverworks_page_map page_dir;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
193*4882a593Smuzhiyun page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun serverworks_free_gatt_pages();
196*4882a593Smuzhiyun serverworks_free_page_map(&page_dir);
197*4882a593Smuzhiyun serverworks_free_page_map(&serverworks_private.scratch_dir);
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
serverworks_fetch_size(void)201*4882a593Smuzhiyun static int serverworks_fetch_size(void)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun int i;
204*4882a593Smuzhiyun u32 temp;
205*4882a593Smuzhiyun u32 temp2;
206*4882a593Smuzhiyun struct aper_size_info_lvl2 *values;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
209*4882a593Smuzhiyun pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
210*4882a593Smuzhiyun pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
211*4882a593Smuzhiyun SVWRKS_SIZE_MASK);
212*4882a593Smuzhiyun pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
213*4882a593Smuzhiyun pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
214*4882a593Smuzhiyun temp2 &= SVWRKS_SIZE_MASK;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
217*4882a593Smuzhiyun if (temp2 == values[i].size_value) {
218*4882a593Smuzhiyun agp_bridge->previous_size =
219*4882a593Smuzhiyun agp_bridge->current_size = (void *) (values + i);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun agp_bridge->aperture_size_idx = i;
222*4882a593Smuzhiyun return values[i].size;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * This routine could be implemented by taking the addresses
231*4882a593Smuzhiyun * written to the GATT, and flushing them individually. However
232*4882a593Smuzhiyun * currently it just flushes the whole table. Which is probably
233*4882a593Smuzhiyun * more efficient, since agp_memory blocks can be a large number of
234*4882a593Smuzhiyun * entries.
235*4882a593Smuzhiyun */
serverworks_tlbflush(struct agp_memory * temp)236*4882a593Smuzhiyun static void serverworks_tlbflush(struct agp_memory *temp)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun unsigned long timeout;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
241*4882a593Smuzhiyun timeout = jiffies + 3*HZ;
242*4882a593Smuzhiyun while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
243*4882a593Smuzhiyun cpu_relax();
244*4882a593Smuzhiyun if (time_after(jiffies, timeout)) {
245*4882a593Smuzhiyun dev_err(&serverworks_private.svrwrks_dev->dev,
246*4882a593Smuzhiyun "TLB post flush took more than 3 seconds\n");
247*4882a593Smuzhiyun break;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
252*4882a593Smuzhiyun timeout = jiffies + 3*HZ;
253*4882a593Smuzhiyun while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
254*4882a593Smuzhiyun cpu_relax();
255*4882a593Smuzhiyun if (time_after(jiffies, timeout)) {
256*4882a593Smuzhiyun dev_err(&serverworks_private.svrwrks_dev->dev,
257*4882a593Smuzhiyun "TLB Dir flush took more than 3 seconds\n");
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
serverworks_configure(void)263*4882a593Smuzhiyun static int serverworks_configure(void)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct aper_size_info_lvl2 *current_size;
266*4882a593Smuzhiyun u32 temp;
267*4882a593Smuzhiyun u8 enable_reg;
268*4882a593Smuzhiyun u16 cap_reg;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun current_size = A_SIZE_LVL2(agp_bridge->current_size);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Get the memory mapped registers */
273*4882a593Smuzhiyun pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
274*4882a593Smuzhiyun temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
275*4882a593Smuzhiyun serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
276*4882a593Smuzhiyun if (!serverworks_private.registers) {
277*4882a593Smuzhiyun dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
278*4882a593Smuzhiyun return -ENOMEM;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
282*4882a593Smuzhiyun readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
285*4882a593Smuzhiyun readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
288*4882a593Smuzhiyun cap_reg &= ~0x0007;
289*4882a593Smuzhiyun cap_reg |= 0x4;
290*4882a593Smuzhiyun writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
291*4882a593Smuzhiyun readw(serverworks_private.registers+SVWRKS_COMMAND);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
294*4882a593Smuzhiyun enable_reg |= 0x1; /* Agp Enable bit */
295*4882a593Smuzhiyun pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
296*4882a593Smuzhiyun serverworks_tlbflush(NULL);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* Fill in the mode register */
301*4882a593Smuzhiyun pci_read_config_dword(serverworks_private.svrwrks_dev,
302*4882a593Smuzhiyun agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
305*4882a593Smuzhiyun enable_reg &= ~0x3;
306*4882a593Smuzhiyun pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
309*4882a593Smuzhiyun enable_reg |= (1<<6);
310*4882a593Smuzhiyun pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun return 0;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
serverworks_cleanup(void)315*4882a593Smuzhiyun static void serverworks_cleanup(void)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun iounmap((void __iomem *) serverworks_private.registers);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
serverworks_insert_memory(struct agp_memory * mem,off_t pg_start,int type)320*4882a593Smuzhiyun static int serverworks_insert_memory(struct agp_memory *mem,
321*4882a593Smuzhiyun off_t pg_start, int type)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun int i, j, num_entries;
324*4882a593Smuzhiyun unsigned long __iomem *cur_gatt;
325*4882a593Smuzhiyun unsigned long addr;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (type != 0 || mem->type != 0) {
330*4882a593Smuzhiyun return -EINVAL;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun if ((pg_start + mem->page_count) > num_entries) {
333*4882a593Smuzhiyun return -EINVAL;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun j = pg_start;
337*4882a593Smuzhiyun while (j < (pg_start + mem->page_count)) {
338*4882a593Smuzhiyun addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
339*4882a593Smuzhiyun cur_gatt = SVRWRKS_GET_GATT(addr);
340*4882a593Smuzhiyun if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
341*4882a593Smuzhiyun return -EBUSY;
342*4882a593Smuzhiyun j++;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (!mem->is_flushed) {
346*4882a593Smuzhiyun global_cache_flush();
347*4882a593Smuzhiyun mem->is_flushed = true;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
351*4882a593Smuzhiyun addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
352*4882a593Smuzhiyun cur_gatt = SVRWRKS_GET_GATT(addr);
353*4882a593Smuzhiyun writel(agp_bridge->driver->mask_memory(agp_bridge,
354*4882a593Smuzhiyun page_to_phys(mem->pages[i]), mem->type),
355*4882a593Smuzhiyun cur_gatt+GET_GATT_OFF(addr));
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun serverworks_tlbflush(mem);
358*4882a593Smuzhiyun return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
serverworks_remove_memory(struct agp_memory * mem,off_t pg_start,int type)361*4882a593Smuzhiyun static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
362*4882a593Smuzhiyun int type)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun int i;
365*4882a593Smuzhiyun unsigned long __iomem *cur_gatt;
366*4882a593Smuzhiyun unsigned long addr;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (type != 0 || mem->type != 0) {
369*4882a593Smuzhiyun return -EINVAL;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun global_cache_flush();
373*4882a593Smuzhiyun serverworks_tlbflush(mem);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun for (i = pg_start; i < (mem->page_count + pg_start); i++) {
376*4882a593Smuzhiyun addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
377*4882a593Smuzhiyun cur_gatt = SVRWRKS_GET_GATT(addr);
378*4882a593Smuzhiyun writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun serverworks_tlbflush(mem);
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun static const struct gatt_mask serverworks_masks[] =
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun {.mask = 1, .type = 0}
388*4882a593Smuzhiyun };
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun static const struct aper_size_info_lvl2 serverworks_sizes[7] =
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun {2048, 524288, 0x80000000},
393*4882a593Smuzhiyun {1024, 262144, 0xc0000000},
394*4882a593Smuzhiyun {512, 131072, 0xe0000000},
395*4882a593Smuzhiyun {256, 65536, 0xf0000000},
396*4882a593Smuzhiyun {128, 32768, 0xf8000000},
397*4882a593Smuzhiyun {64, 16384, 0xfc000000},
398*4882a593Smuzhiyun {32, 8192, 0xfe000000}
399*4882a593Smuzhiyun };
400*4882a593Smuzhiyun
serverworks_agp_enable(struct agp_bridge_data * bridge,u32 mode)401*4882a593Smuzhiyun static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun u32 command;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun pci_read_config_dword(serverworks_private.svrwrks_dev,
406*4882a593Smuzhiyun bridge->capndx + PCI_AGP_STATUS,
407*4882a593Smuzhiyun &command);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun command = agp_collect_device_status(bridge, mode, command);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun command &= ~0x10; /* disable FW */
412*4882a593Smuzhiyun command &= ~0x08;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun command |= 0x100;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun pci_write_config_dword(serverworks_private.svrwrks_dev,
417*4882a593Smuzhiyun bridge->capndx + PCI_AGP_COMMAND,
418*4882a593Smuzhiyun command);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun agp_device_command(command, false);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun static const struct agp_bridge_driver sworks_driver = {
424*4882a593Smuzhiyun .owner = THIS_MODULE,
425*4882a593Smuzhiyun .aperture_sizes = serverworks_sizes,
426*4882a593Smuzhiyun .size_type = LVL2_APER_SIZE,
427*4882a593Smuzhiyun .num_aperture_sizes = 7,
428*4882a593Smuzhiyun .configure = serverworks_configure,
429*4882a593Smuzhiyun .fetch_size = serverworks_fetch_size,
430*4882a593Smuzhiyun .cleanup = serverworks_cleanup,
431*4882a593Smuzhiyun .tlb_flush = serverworks_tlbflush,
432*4882a593Smuzhiyun .mask_memory = agp_generic_mask_memory,
433*4882a593Smuzhiyun .masks = serverworks_masks,
434*4882a593Smuzhiyun .agp_enable = serverworks_agp_enable,
435*4882a593Smuzhiyun .cache_flush = global_cache_flush,
436*4882a593Smuzhiyun .create_gatt_table = serverworks_create_gatt_table,
437*4882a593Smuzhiyun .free_gatt_table = serverworks_free_gatt_table,
438*4882a593Smuzhiyun .insert_memory = serverworks_insert_memory,
439*4882a593Smuzhiyun .remove_memory = serverworks_remove_memory,
440*4882a593Smuzhiyun .alloc_by_type = agp_generic_alloc_by_type,
441*4882a593Smuzhiyun .free_by_type = agp_generic_free_by_type,
442*4882a593Smuzhiyun .agp_alloc_page = agp_generic_alloc_page,
443*4882a593Smuzhiyun .agp_alloc_pages = agp_generic_alloc_pages,
444*4882a593Smuzhiyun .agp_destroy_page = agp_generic_destroy_page,
445*4882a593Smuzhiyun .agp_destroy_pages = agp_generic_destroy_pages,
446*4882a593Smuzhiyun .agp_type_to_mask_type = agp_generic_type_to_mask_type,
447*4882a593Smuzhiyun };
448*4882a593Smuzhiyun
agp_serverworks_probe(struct pci_dev * pdev,const struct pci_device_id * ent)449*4882a593Smuzhiyun static int agp_serverworks_probe(struct pci_dev *pdev,
450*4882a593Smuzhiyun const struct pci_device_id *ent)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct agp_bridge_data *bridge;
453*4882a593Smuzhiyun struct pci_dev *bridge_dev;
454*4882a593Smuzhiyun u32 temp, temp2;
455*4882a593Smuzhiyun u8 cap_ptr = 0;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun switch (pdev->device) {
460*4882a593Smuzhiyun case 0x0006:
461*4882a593Smuzhiyun dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
462*4882a593Smuzhiyun return -ENODEV;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun case PCI_DEVICE_ID_SERVERWORKS_HE:
465*4882a593Smuzhiyun case PCI_DEVICE_ID_SERVERWORKS_LE:
466*4882a593Smuzhiyun case 0x0007:
467*4882a593Smuzhiyun break;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun default:
470*4882a593Smuzhiyun if (cap_ptr)
471*4882a593Smuzhiyun dev_err(&pdev->dev, "unsupported Serverworks chipset "
472*4882a593Smuzhiyun "[%04x/%04x]\n", pdev->vendor, pdev->device);
473*4882a593Smuzhiyun return -ENODEV;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Everything is on func 1 here so we are hardcoding function one */
477*4882a593Smuzhiyun bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
478*4882a593Smuzhiyun (unsigned int)pdev->bus->number,
479*4882a593Smuzhiyun PCI_DEVFN(0, 1));
480*4882a593Smuzhiyun if (!bridge_dev) {
481*4882a593Smuzhiyun dev_info(&pdev->dev, "can't find secondary device\n");
482*4882a593Smuzhiyun return -ENODEV;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun serverworks_private.svrwrks_dev = bridge_dev;
486*4882a593Smuzhiyun serverworks_private.gart_addr_ofs = 0x10;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
489*4882a593Smuzhiyun if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
490*4882a593Smuzhiyun pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
491*4882a593Smuzhiyun if (temp2 != 0) {
492*4882a593Smuzhiyun dev_info(&pdev->dev, "64 bit aperture address, "
493*4882a593Smuzhiyun "but top bits are not zero; disabling AGP\n");
494*4882a593Smuzhiyun return -ENODEV;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun serverworks_private.mm_addr_ofs = 0x18;
497*4882a593Smuzhiyun } else
498*4882a593Smuzhiyun serverworks_private.mm_addr_ofs = 0x14;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
501*4882a593Smuzhiyun if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
502*4882a593Smuzhiyun pci_read_config_dword(pdev,
503*4882a593Smuzhiyun serverworks_private.mm_addr_ofs + 4, &temp2);
504*4882a593Smuzhiyun if (temp2 != 0) {
505*4882a593Smuzhiyun dev_info(&pdev->dev, "64 bit MMIO address, but top "
506*4882a593Smuzhiyun "bits are not zero; disabling AGP\n");
507*4882a593Smuzhiyun return -ENODEV;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun bridge = agp_alloc_bridge();
512*4882a593Smuzhiyun if (!bridge)
513*4882a593Smuzhiyun return -ENOMEM;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun bridge->driver = &sworks_driver;
516*4882a593Smuzhiyun bridge->dev_private_data = &serverworks_private;
517*4882a593Smuzhiyun bridge->dev = pci_dev_get(pdev);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun pci_set_drvdata(pdev, bridge);
520*4882a593Smuzhiyun return agp_add_bridge(bridge);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
agp_serverworks_remove(struct pci_dev * pdev)523*4882a593Smuzhiyun static void agp_serverworks_remove(struct pci_dev *pdev)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun pci_dev_put(bridge->dev);
528*4882a593Smuzhiyun agp_remove_bridge(bridge);
529*4882a593Smuzhiyun agp_put_bridge(bridge);
530*4882a593Smuzhiyun pci_dev_put(serverworks_private.svrwrks_dev);
531*4882a593Smuzhiyun serverworks_private.svrwrks_dev = NULL;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun static struct pci_device_id agp_serverworks_pci_table[] = {
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun .class = (PCI_CLASS_BRIDGE_HOST << 8),
537*4882a593Smuzhiyun .class_mask = ~0,
538*4882a593Smuzhiyun .vendor = PCI_VENDOR_ID_SERVERWORKS,
539*4882a593Smuzhiyun .device = PCI_ANY_ID,
540*4882a593Smuzhiyun .subvendor = PCI_ANY_ID,
541*4882a593Smuzhiyun .subdevice = PCI_ANY_ID,
542*4882a593Smuzhiyun },
543*4882a593Smuzhiyun { }
544*4882a593Smuzhiyun };
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun static struct pci_driver agp_serverworks_pci_driver = {
549*4882a593Smuzhiyun .name = "agpgart-serverworks",
550*4882a593Smuzhiyun .id_table = agp_serverworks_pci_table,
551*4882a593Smuzhiyun .probe = agp_serverworks_probe,
552*4882a593Smuzhiyun .remove = agp_serverworks_remove,
553*4882a593Smuzhiyun };
554*4882a593Smuzhiyun
agp_serverworks_init(void)555*4882a593Smuzhiyun static int __init agp_serverworks_init(void)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun if (agp_off)
558*4882a593Smuzhiyun return -EINVAL;
559*4882a593Smuzhiyun return pci_register_driver(&agp_serverworks_pci_driver);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
agp_serverworks_cleanup(void)562*4882a593Smuzhiyun static void __exit agp_serverworks_cleanup(void)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun pci_unregister_driver(&agp_serverworks_pci_driver);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun module_init(agp_serverworks_init);
568*4882a593Smuzhiyun module_exit(agp_serverworks_cleanup);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun MODULE_LICENSE("GPL and additional rights");
571*4882a593Smuzhiyun
572