1/*
2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include <linux/config.h>
27#include <linux/version.h>
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/mm.h>
33#include <linux/string.h>
34#include <linux/errno.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/pagemap.h>
40#include <linux/miscdevice.h>
41#include <linux/pm.h>
42#include <asm/system.h>
43#include <asm/uaccess.h>
44#include <asm/io.h>
45#include <asm/page.h>
46
47#include <linux/agp_backend.h>
48#include "agp.h"
49
50MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
51MODULE_PARM(agp_try_unsupported, "1i");
52MODULE_LICENSE("GPL and additional rights");
53EXPORT_SYMBOL(agp_free_memory);
54EXPORT_SYMBOL(agp_allocate_memory);
55EXPORT_SYMBOL(agp_copy_info);
56EXPORT_SYMBOL(agp_bind_memory);
57EXPORT_SYMBOL(agp_unbind_memory);
58EXPORT_SYMBOL(agp_enable);
59EXPORT_SYMBOL(agp_backend_acquire);
60EXPORT_SYMBOL(agp_backend_release);
61
62static void flush_cache(void);
63
64static struct agp_bridge_data agp_bridge;
65static int agp_try_unsupported __initdata = 0;
66
67int agp_memory_reserved;
68__u32 *agp_gatt_table;
69
70static inline void flush_cache(void)
71{
72#if defined(__i386__) || defined(__x86_64__)
73	asm volatile ("wbinvd":::"memory");
74#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__)
75	/* ??? I wonder if we'll really need to flush caches, or if the
76	   core logic can manage to keep the system coherent.  The ARM
77	   speaks only of using `cflush' to get things in memory in
78	   preparation for power failure.
79
80	   If we do need to call `cflush', we'll need a target page,
81	   as we can only flush one page at a time.
82
83	   Ditto for IA-64. --davidm 00/08/07 */
84	mb();
85#else
86#error "Please define flush_cache."
87#endif
88}
89
90#ifdef CONFIG_SMP
91static atomic_t cpus_waiting;
92
93static void ipi_handler(void *null)
94{
95	flush_cache();
96	atomic_dec(&cpus_waiting);
97	while (atomic_read(&cpus_waiting) > 0)
98		barrier();
99}
100
101static void smp_flush_cache(void)
102{
103	atomic_set(&cpus_waiting, smp_num_cpus - 1);
104	if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
105		panic(PFX "timed out waiting for the other CPUs!\n");
106	flush_cache();
107	while (atomic_read(&cpus_waiting) > 0)
108		barrier();
109}
110#define global_cache_flush smp_flush_cache
111#else				/* CONFIG_SMP */
112#define global_cache_flush flush_cache
113#endif				/* CONFIG_SMP */
114
115int agp_backend_acquire(void)
116{
117	if (agp_bridge.type == NOT_SUPPORTED) {
118		return -EINVAL;
119	}
120	atomic_inc(&agp_bridge.agp_in_use);
121
122	if (atomic_read(&agp_bridge.agp_in_use) != 1) {
123		atomic_dec(&agp_bridge.agp_in_use);
124		return -EBUSY;
125	}
126	MOD_INC_USE_COUNT;
127	return 0;
128}
129
130void agp_backend_release(void)
131{
132	if (agp_bridge.type == NOT_SUPPORTED) {
133		return;
134	}
135	atomic_dec(&agp_bridge.agp_in_use);
136	MOD_DEC_USE_COUNT;
137}
138
139/*
140 * Generic routines for handling agp_memory structures -
141 * They use the basic page allocation routines to do the
142 * brunt of the work.
143 */
144
145
146static void agp_free_key(int key)
147{
148
149	if (key < 0) {
150		return;
151	}
152	if (key < MAXKEY) {
153		clear_bit(key, agp_bridge.key_list);
154	}
155}
156
157static int agp_get_key(void)
158{
159	int bit;
160
161	bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
162	if (bit < MAXKEY) {
163		set_bit(bit, agp_bridge.key_list);
164		return bit;
165	}
166	return -1;
167}
168
169static agp_memory *agp_create_memory(int scratch_pages)
170{
171	agp_memory *new;
172
173	new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
174
175	if (new == NULL) {
176		return NULL;
177	}
178	memset(new, 0, sizeof(agp_memory));
179	new->key = agp_get_key();
180
181	if (new->key < 0) {
182		kfree(new);
183		return NULL;
184	}
185	new->memory = vmalloc(PAGE_SIZE * scratch_pages);
186
187	if (new->memory == NULL) {
188		agp_free_key(new->key);
189		kfree(new);
190		return NULL;
191	}
192	new->num_scratch_pages = scratch_pages;
193	return new;
194}
195
196void agp_free_memory(agp_memory * curr)
197{
198	int i;
199
200	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {
201		return;
202	}
203	if (curr->is_bound == TRUE) {
204		agp_unbind_memory(curr);
205	}
206	if (curr->type != 0) {
207		agp_bridge.free_by_type(curr);
208		return;
209	}
210	if (curr->page_count != 0) {
211		for (i = 0; i < curr->page_count; i++) {
212			curr->memory[i] &= ~(0x00000fff);
213			agp_bridge.agp_destroy_page((unsigned long)
214					 phys_to_virt(curr->memory[i]));
215		}
216	}
217	agp_free_key(curr->key);
218	vfree(curr->memory);
219	kfree(curr);
220	MOD_DEC_USE_COUNT;
221}
222
223#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
224
225agp_memory *agp_allocate_memory(size_t page_count, u32 type)
226{
227	int scratch_pages;
228	agp_memory *new;
229	int i;
230
231	if (agp_bridge.type == NOT_SUPPORTED) {
232		return NULL;
233	}
234	if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
235	    agp_bridge.max_memory_agp) {
236		return NULL;
237	}
238
239	if (type != 0) {
240		new = agp_bridge.alloc_by_type(page_count, type);
241		return new;
242	}
243      	/* We always increase the module count, since free auto-decrements
244	 * it
245	 */
246
247      	MOD_INC_USE_COUNT;
248
249	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
250
251	new = agp_create_memory(scratch_pages);
252
253	if (new == NULL) {
254	      	MOD_DEC_USE_COUNT;
255		return NULL;
256	}
257	for (i = 0; i < page_count; i++) {
258		new->memory[i] = agp_bridge.agp_alloc_page();
259
260		if (new->memory[i] == 0) {
261			/* Free this structure */
262			agp_free_memory(new);
263			return NULL;
264		}
265		new->memory[i] =
266		    agp_bridge.mask_memory(
267				   virt_to_phys((void *) new->memory[i]),
268						  type);
269		new->page_count++;
270	}
271
272	return new;
273}
274
275/* End - Generic routines for handling agp_memory structures */
276
277static int agp_return_size(void)
278{
279	int current_size;
280	void *temp;
281
282	temp = agp_bridge.current_size;
283
284	switch (agp_bridge.size_type) {
285	case U8_APER_SIZE:
286		current_size = A_SIZE_8(temp)->size;
287		break;
288	case U16_APER_SIZE:
289		current_size = A_SIZE_16(temp)->size;
290		break;
291	case U32_APER_SIZE:
292		current_size = A_SIZE_32(temp)->size;
293		break;
294	case LVL2_APER_SIZE:
295		current_size = A_SIZE_LVL2(temp)->size;
296		break;
297	case FIXED_APER_SIZE:
298		current_size = A_SIZE_FIX(temp)->size;
299		break;
300	default:
301		current_size = 0;
302		break;
303	}
304
305	current_size -= (agp_memory_reserved / (1024*1024));
306
307	return current_size;
308}
309
310/* Routine to copy over information structure */
311
312int agp_copy_info(agp_kern_info * info)
313{
314	unsigned long page_mask = 0;
315	int i;
316
317	memset(info, 0, sizeof(agp_kern_info));
318	if (agp_bridge.type == NOT_SUPPORTED) {
319		info->chipset = agp_bridge.type;
320		return -1;
321	}
322	info->version.major = agp_bridge.version->major;
323	info->version.minor = agp_bridge.version->minor;
324	info->device = agp_bridge.dev;
325	info->chipset = agp_bridge.type;
326	info->mode = agp_bridge.mode;
327	info->aper_base = agp_bridge.gart_bus_addr;
328	info->aper_size = agp_return_size();
329	info->max_memory = agp_bridge.max_memory_agp;
330	info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
331	info->cant_use_aperture = agp_bridge.cant_use_aperture;
332
333	for(i = 0; i < agp_bridge.num_of_masks; i++)
334		page_mask |= agp_bridge.mask_memory(page_mask, i);
335
336	info->page_mask = ~page_mask;
337	return 0;
338}
339
340/* End - Routine to copy over information structure */
341
342/*
343 * Routines for handling swapping of agp_memory into the GATT -
344 * These routines take agp_memory and insert them into the GATT.
345 * They call device specific routines to actually write to the GATT.
346 */
347
348int agp_bind_memory(agp_memory * curr, off_t pg_start)
349{
350	int ret_val;
351
352	if ((agp_bridge.type == NOT_SUPPORTED) ||
353	    (curr == NULL) || (curr->is_bound == TRUE)) {
354		return -EINVAL;
355	}
356	if (curr->is_flushed == FALSE) {
357		CACHE_FLUSH();
358		curr->is_flushed = TRUE;
359	}
360	ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
361
362	if (ret_val != 0) {
363		return ret_val;
364	}
365	curr->is_bound = TRUE;
366	curr->pg_start = pg_start;
367	return 0;
368}
369
370int agp_unbind_memory(agp_memory * curr)
371{
372	int ret_val;
373
374	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {
375		return -EINVAL;
376	}
377	if (curr->is_bound != TRUE) {
378		return -EINVAL;
379	}
380	ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
381
382	if (ret_val != 0) {
383		return ret_val;
384	}
385	curr->is_bound = FALSE;
386	curr->pg_start = 0;
387	return 0;
388}
389
390/* End - Routines for handling swapping of agp_memory into the GATT */
391
392/*
393 * Driver routines - start
394 * Currently this module supports the following chipsets:
395 * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3,
396 * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541,
397 * and generic support for the SiS chipsets.
398 */
399
400/* Generic Agp routines - Start */
401
402static void agp_generic_agp_enable(u32 mode)
403{
404	struct pci_dev *device = NULL;
405	u32 command, scratch;
406	u8 cap_ptr;
407
408	pci_read_config_dword(agp_bridge.dev,
409			      agp_bridge.capndx + 4,
410			      &command);
411
412	/*
413	 * PASS1: go throu all devices that claim to be
414	 *        AGP devices and collect their data.
415	 */
416
417
418	pci_for_each_dev(device) {
419		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
420		if (cap_ptr != 0x00) {
421			/*
422			 * Ok, here we have a AGP device. Disable impossible
423			 * settings, and adjust the readqueue to the minimum.
424			 */
425
426			pci_read_config_dword(device, cap_ptr + 4, &scratch);
427
428			/* adjust RQ depth */
429			command =
430			    ((command & ~0xff000000) |
431			     min_t(u32, (mode & 0xff000000),
432				 min_t(u32, (command & 0xff000000),
433				     (scratch & 0xff000000))));
434
435			/* disable SBA if it's not supported */
436			if (!((command & 0x00000200) &&
437			      (scratch & 0x00000200) &&
438			      (mode & 0x00000200)))
439				command &= ~0x00000200;
440
441			/* disable FW if it's not supported */
442			if (!((command & 0x00000010) &&
443			      (scratch & 0x00000010) &&
444			      (mode & 0x00000010)))
445				command &= ~0x00000010;
446
447			if (!((command & 4) &&
448			      (scratch & 4) &&
449			      (mode & 4)))
450				command &= ~0x00000004;
451
452			if (!((command & 2) &&
453			      (scratch & 2) &&
454			      (mode & 2)))
455				command &= ~0x00000002;
456
457			if (!((command & 1) &&
458			      (scratch & 1) &&
459			      (mode & 1)))
460				command &= ~0x00000001;
461		}
462	}
463	/*
464	 * PASS2: Figure out the 4X/2X/1X setting and enable the
465	 *        target (our motherboard chipset).
466	 */
467
468	if (command & 4) {
469		command &= ~3;	/* 4X */
470	}
471	if (command & 2) {
472		command &= ~5;	/* 2X */
473	}
474	if (command & 1) {
475		command &= ~6;	/* 1X */
476	}
477	command |= 0x00000100;
478
479	pci_write_config_dword(agp_bridge.dev,
480			       agp_bridge.capndx + 8,
481			       command);
482
483	/*
484	 * PASS3: Go throu all AGP devices and update the
485	 *        command registers.
486	 */
487
488	pci_for_each_dev(device) {
489		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
490		if (cap_ptr != 0x00)
491			pci_write_config_dword(device, cap_ptr + 8, command);
492	}
493}
494
495static int agp_generic_create_gatt_table(void)
496{
497	char *table;
498	char *table_end;
499	int size;
500	int page_order;
501	int num_entries;
502	int i;
503	void *temp;
504	struct page *page;
505	int err = 0;
506
507	/* The generic routines can't handle 2 level gatt's */
508	if (agp_bridge.size_type == LVL2_APER_SIZE) {
509		return -EINVAL;
510	}
511
512	table = NULL;
513	i = agp_bridge.aperture_size_idx;
514	temp = agp_bridge.current_size;
515	size = page_order = num_entries = 0;
516
517	if (agp_bridge.size_type != FIXED_APER_SIZE) {
518		do {
519			switch (agp_bridge.size_type) {
520			case U8_APER_SIZE:
521				size = A_SIZE_8(temp)->size;
522				page_order =
523				    A_SIZE_8(temp)->page_order;
524				num_entries =
525				    A_SIZE_8(temp)->num_entries;
526				break;
527			case U16_APER_SIZE:
528				size = A_SIZE_16(temp)->size;
529				page_order = A_SIZE_16(temp)->page_order;
530				num_entries = A_SIZE_16(temp)->num_entries;
531				break;
532			case U32_APER_SIZE:
533				size = A_SIZE_32(temp)->size;
534				page_order = A_SIZE_32(temp)->page_order;
535				num_entries = A_SIZE_32(temp)->num_entries;
536				break;
537				/* This case will never really happen. */
538			case FIXED_APER_SIZE:
539			case LVL2_APER_SIZE:
540			default:
541				size = page_order = num_entries = 0;
542				break;
543			}
544
545			table = (char *) __get_free_pages(GFP_KERNEL,
546							  page_order);
547
548			if (table == NULL) {
549				i++;
550				switch (agp_bridge.size_type) {
551				case U8_APER_SIZE:
552					agp_bridge.current_size = A_IDX8();
553					break;
554				case U16_APER_SIZE:
555					agp_bridge.current_size = A_IDX16();
556					break;
557				case U32_APER_SIZE:
558					agp_bridge.current_size = A_IDX32();
559					break;
560					/* This case will never really
561					 * happen.
562					 */
563				case FIXED_APER_SIZE:
564				case LVL2_APER_SIZE:
565				default:
566					agp_bridge.current_size =
567					    agp_bridge.current_size;
568					break;
569				}
570				temp = agp_bridge.current_size;
571			} else {
572				agp_bridge.aperture_size_idx = i;
573			}
574		} while ((table == NULL) &&
575			 (i < agp_bridge.num_aperture_sizes));
576	} else {
577		size = ((aper_size_info_fixed *) temp)->size;
578		page_order = ((aper_size_info_fixed *) temp)->page_order;
579		num_entries = ((aper_size_info_fixed *) temp)->num_entries;
580		table = (char *) __get_free_pages(GFP_KERNEL, page_order);
581	}
582
583	if (table == NULL) {
584		return -ENOMEM;
585	}
586	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
587
588	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
589		SetPageReserved(page);
590
591	agp_bridge.gatt_table_real = (unsigned long *) table;
592	agp_gatt_table = (void *)table;
593#ifdef CONFIG_X86
594	err = change_page_attr(virt_to_page(table), 1<<page_order, PAGE_KERNEL_NOCACHE);
595#endif
596	if (!err)
597	agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
598					(PAGE_SIZE * (1 << page_order)));
599	CACHE_FLUSH();
600
601	if (agp_bridge.gatt_table == NULL || err) {
602		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
603			ClearPageReserved(page);
604
605		free_pages((unsigned long) table, page_order);
606
607		return -ENOMEM;
608	}
609	agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
610
611	for (i = 0; i < num_entries; i++) {
612		agp_bridge.gatt_table[i] =
613		    (unsigned long) agp_bridge.scratch_page;
614	}
615
616	return 0;
617}
618
619static int agp_generic_suspend(void)
620{
621	return 0;
622}
623
624static void agp_generic_resume(void)
625{
626	return;
627}
628
629static int agp_generic_free_gatt_table(void)
630{
631	int page_order;
632	char *table, *table_end;
633	void *temp;
634	struct page *page;
635
636	temp = agp_bridge.current_size;
637
638	switch (agp_bridge.size_type) {
639	case U8_APER_SIZE:
640		page_order = A_SIZE_8(temp)->page_order;
641		break;
642	case U16_APER_SIZE:
643		page_order = A_SIZE_16(temp)->page_order;
644		break;
645	case U32_APER_SIZE:
646		page_order = A_SIZE_32(temp)->page_order;
647		break;
648	case FIXED_APER_SIZE:
649		page_order = A_SIZE_FIX(temp)->page_order;
650		break;
651	case LVL2_APER_SIZE:
652		/* The generic routines can't deal with 2 level gatt's */
653		return -EINVAL;
654		break;
655	default:
656		page_order = 0;
657		break;
658	}
659
660	/* Do not worry about freeing memory, because if this is
661	 * called, then all agp memory is deallocated and removed
662	 * from the table.
663	 */
664
665#ifdef CONFIG_X86
666	change_page_attr(virt_to_page(agp_bridge.gatt_table_real), 1<<page_order,
667			 PAGE_KERNEL);
668#endif
669	iounmap(agp_bridge.gatt_table);
670	table = (char *) agp_bridge.gatt_table_real;
671	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
672
673	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
674		ClearPageReserved(page);
675
676	free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
677	return 0;
678}
679
680static int agp_generic_insert_memory(agp_memory * mem,
681				     off_t pg_start, int type)
682{
683	int i, j, num_entries;
684	void *temp;
685
686	temp = agp_bridge.current_size;
687
688	switch (agp_bridge.size_type) {
689	case U8_APER_SIZE:
690		num_entries = A_SIZE_8(temp)->num_entries;
691		break;
692	case U16_APER_SIZE:
693		num_entries = A_SIZE_16(temp)->num_entries;
694		break;
695	case U32_APER_SIZE:
696		num_entries = A_SIZE_32(temp)->num_entries;
697		break;
698	case FIXED_APER_SIZE:
699		num_entries = A_SIZE_FIX(temp)->num_entries;
700		break;
701	case LVL2_APER_SIZE:
702		/* The generic routines can't deal with 2 level gatt's */
703		return -EINVAL;
704		break;
705	default:
706		num_entries = 0;
707		break;
708	}
709
710	num_entries -= agp_memory_reserved/PAGE_SIZE;
711	if (num_entries < 0) num_entries = 0;
712
713	if (type != 0 || mem->type != 0) {
714		/* The generic routines know nothing of memory types */
715		return -EINVAL;
716	}
717	if ((pg_start + mem->page_count) > num_entries) {
718		return -EINVAL;
719	}
720	j = pg_start;
721
722	while (j < (pg_start + mem->page_count)) {
723		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
724			return -EBUSY;
725		}
726		j++;
727	}
728
729	if (mem->is_flushed == FALSE) {
730		CACHE_FLUSH();
731		mem->is_flushed = TRUE;
732	}
733	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
734		agp_bridge.gatt_table[j] = mem->memory[i];
735	}
736
737	agp_bridge.tlb_flush(mem);
738	return 0;
739}
740
741static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
742				     int type)
743{
744	int i;
745
746	if (type != 0 || mem->type != 0) {
747		/* The generic routines know nothing of memory types */
748		return -EINVAL;
749	}
750	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
751		agp_bridge.gatt_table[i] =
752		    (unsigned long) agp_bridge.scratch_page;
753	}
754
755	agp_bridge.tlb_flush(mem);
756	return 0;
757}
758
759static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
760{
761	return NULL;
762}
763
764static void agp_generic_free_by_type(agp_memory * curr)
765{
766	if (curr->memory != NULL) {
767		vfree(curr->memory);
768	}
769	agp_free_key(curr->key);
770	kfree(curr);
771}
772
773/*
774 * Basic Page Allocation Routines -
775 * These routines handle page allocation
776 * and by default they reserve the allocated
777 * memory.  They also handle incrementing the
778 * current_memory_agp value, Which is checked
779 * against a maximum value.
780 */
781
782static unsigned long agp_generic_alloc_page(void)
783{
784	struct page * page;
785
786	page = alloc_page(GFP_KERNEL);
787	if (page == NULL) {
788		return 0;
789	}
790#ifdef CONFIG_X86
791	if (change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) < 0) {
792		__free_page(page);
793		return 0;
794	}
795#endif
796	get_page(page);
797	LockPage(page);
798	atomic_inc(&agp_bridge.current_memory_agp);
799	return (unsigned long)page_address(page);
800}
801
802static void agp_generic_destroy_page(unsigned long addr)
803{
804	void *pt = (void *) addr;
805	struct page *page;
806
807	if (pt == NULL) {
808		return;
809	}
810
811	page = virt_to_page(pt);
812#ifdef CONFIG_X86
813	change_page_attr(page, 1, PAGE_KERNEL);
814#endif
815	put_page(page);
816	UnlockPage(page);
817	free_page((unsigned long) pt);
818	atomic_dec(&agp_bridge.current_memory_agp);
819}
820
821/* End Basic Page Allocation Routines */
822
823void agp_enable(u32 mode)
824{
825	if (agp_bridge.type == NOT_SUPPORTED) return;
826	agp_bridge.agp_enable(mode);
827}
828
829/* End - Generic Agp routines */
830
831#ifdef CONFIG_AGP_I810
832static aper_size_info_fixed intel_i810_sizes[] =
833{
834	{64, 16384, 4},
835     /* The 32M mode still requires a 64k gatt */
836	{32, 8192, 4}
837};
838
839#define AGP_DCACHE_MEMORY 1
840#define AGP_PHYS_MEMORY   2
841
842static gatt_mask intel_i810_masks[] =
843{
844	{I810_PTE_VALID, 0},
845	{(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY},
846	{I810_PTE_VALID, 0}
847};
848
849static struct _intel_i810_private {
850	struct pci_dev *i810_dev;	/* device one */
851	volatile u8 *registers;
852	int num_dcache_entries;
853} intel_i810_private;
854
855static int intel_i810_fetch_size(void)
856{
857	u32 smram_miscc;
858	aper_size_info_fixed *values;
859
860	pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
861	values = A_SIZE_FIX(agp_bridge.aperture_sizes);
862
863	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
864		printk(KERN_WARNING PFX "i810 is disabled\n");
865		return 0;
866	}
867	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
868		agp_bridge.previous_size =
869		    agp_bridge.current_size = (void *) (values + 1);
870		agp_bridge.aperture_size_idx = 1;
871		return values[1].size;
872	} else {
873		agp_bridge.previous_size =
874		    agp_bridge.current_size = (void *) (values);
875		agp_bridge.aperture_size_idx = 0;
876		return values[0].size;
877	}
878
879	return 0;
880}
881
882static int intel_i810_configure(void)
883{
884	aper_size_info_fixed *current_size;
885	u32 temp;
886	int i;
887
888	current_size = A_SIZE_FIX(agp_bridge.current_size);
889
890	pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
891	temp &= 0xfff80000;
892
893	intel_i810_private.registers =
894	    (volatile u8 *) ioremap(temp, 128 * 4096);
895
896	if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
897	     & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
898		/* This will need to be dynamically assigned */
899		printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
900		intel_i810_private.num_dcache_entries = 1024;
901	}
902	pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
903	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
904	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
905		 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
906	CACHE_FLUSH();
907
908	if (agp_bridge.needs_scratch_page == TRUE) {
909		for (i = 0; i < current_size->num_entries; i++) {
910			OUTREG32(intel_i810_private.registers,
911				 I810_PTE_BASE + (i * 4),
912				 agp_bridge.scratch_page);
913		}
914	}
915	return 0;
916}
917
918static void intel_i810_cleanup(void)
919{
920	OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
921	iounmap((void *) intel_i810_private.registers);
922}
923
924static void intel_i810_tlbflush(agp_memory * mem)
925{
926	return;
927}
928
929static void intel_i810_agp_enable(u32 mode)
930{
931	return;
932}
933
934static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
935				     int type)
936{
937	int i, j, num_entries;
938	void *temp;
939
940	temp = agp_bridge.current_size;
941	num_entries = A_SIZE_FIX(temp)->num_entries;
942
943	if ((pg_start + mem->page_count) > num_entries) {
944		return -EINVAL;
945	}
946	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
947		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
948			return -EBUSY;
949		}
950	}
951
952	if (type != 0 || mem->type != 0) {
953		if ((type == AGP_DCACHE_MEMORY) &&
954		    (mem->type == AGP_DCACHE_MEMORY)) {
955			/* special insert */
956			CACHE_FLUSH();
957			for (i = pg_start;
958			     i < (pg_start + mem->page_count); i++) {
959				OUTREG32(intel_i810_private.registers,
960					 I810_PTE_BASE + (i * 4),
961					 (i * 4096) | I810_PTE_LOCAL |
962					 I810_PTE_VALID);
963			}
964			CACHE_FLUSH();
965			agp_bridge.tlb_flush(mem);
966			return 0;
967		}
968	        if((type == AGP_PHYS_MEMORY) &&
969		   (mem->type == AGP_PHYS_MEMORY)) {
970		   goto insert;
971		}
972		return -EINVAL;
973	}
974
975insert:
976   	CACHE_FLUSH();
977	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
978		OUTREG32(intel_i810_private.registers,
979			 I810_PTE_BASE + (j * 4), mem->memory[i]);
980	}
981	CACHE_FLUSH();
982
983	agp_bridge.tlb_flush(mem);
984	return 0;
985}
986
987static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
988				     int type)
989{
990	int i;
991
992	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
993		OUTREG32(intel_i810_private.registers,
994			 I810_PTE_BASE + (i * 4),
995			 agp_bridge.scratch_page);
996	}
997
998	CACHE_FLUSH();
999	agp_bridge.tlb_flush(mem);
1000	return 0;
1001}
1002
1003static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
1004{
1005	agp_memory *new;
1006
1007	if (type == AGP_DCACHE_MEMORY) {
1008		if (pg_count != intel_i810_private.num_dcache_entries) {
1009			return NULL;
1010		}
1011		new = agp_create_memory(1);
1012
1013		if (new == NULL) {
1014			return NULL;
1015		}
1016		new->type = AGP_DCACHE_MEMORY;
1017		new->page_count = pg_count;
1018		new->num_scratch_pages = 0;
1019		vfree(new->memory);
1020	   	MOD_INC_USE_COUNT;
1021		return new;
1022	}
1023	if(type == AGP_PHYS_MEMORY) {
1024		/* The I810 requires a physical address to program
1025		 * it's mouse pointer into hardware.  However the
1026		 * Xserver still writes to it through the agp
1027		 * aperture
1028		 */
1029	   	if (pg_count != 1) {
1030		   	return NULL;
1031		}
1032	   	new = agp_create_memory(1);
1033
1034		if (new == NULL) {
1035			return NULL;
1036		}
1037	   	MOD_INC_USE_COUNT;
1038		new->memory[0] = agp_bridge.agp_alloc_page();
1039
1040		if (new->memory[0] == 0) {
1041			/* Free this structure */
1042			agp_free_memory(new);
1043			return NULL;
1044		}
1045		new->memory[0] =
1046		    agp_bridge.mask_memory(
1047				   virt_to_phys((void *) new->memory[0]),
1048						  type);
1049		new->page_count = 1;
1050	   	new->num_scratch_pages = 1;
1051	   	new->type = AGP_PHYS_MEMORY;
1052	        new->physical = virt_to_phys((void *) new->memory[0]);
1053	   	return new;
1054	}
1055
1056	return NULL;
1057}
1058
1059static void intel_i810_free_by_type(agp_memory * curr)
1060{
1061	agp_free_key(curr->key);
1062   	if(curr->type == AGP_PHYS_MEMORY) {
1063	   	agp_bridge.agp_destroy_page((unsigned long)
1064				 phys_to_virt(curr->memory[0]));
1065		vfree(curr->memory);
1066	}
1067	kfree(curr);
1068   	MOD_DEC_USE_COUNT;
1069}
1070
1071static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
1072{
1073	/* Type checking must be done elsewhere */
1074	return addr | agp_bridge.masks[type].mask;
1075}
1076
1077static int __init intel_i810_setup(struct pci_dev *i810_dev)
1078{
1079	intel_i810_private.i810_dev = i810_dev;
1080
1081	agp_bridge.masks = intel_i810_masks;
1082	agp_bridge.num_of_masks = 2;
1083	agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
1084	agp_bridge.size_type = FIXED_APER_SIZE;
1085	agp_bridge.num_aperture_sizes = 2;
1086	agp_bridge.dev_private_data = (void *) &intel_i810_private;
1087	agp_bridge.needs_scratch_page = TRUE;
1088	agp_bridge.configure = intel_i810_configure;
1089	agp_bridge.fetch_size = intel_i810_fetch_size;
1090	agp_bridge.cleanup = intel_i810_cleanup;
1091	agp_bridge.tlb_flush = intel_i810_tlbflush;
1092	agp_bridge.mask_memory = intel_i810_mask_memory;
1093	agp_bridge.agp_enable = intel_i810_agp_enable;
1094	agp_bridge.cache_flush = global_cache_flush;
1095	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1096	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1097	agp_bridge.insert_memory = intel_i810_insert_entries;
1098	agp_bridge.remove_memory = intel_i810_remove_entries;
1099	agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
1100	agp_bridge.free_by_type = intel_i810_free_by_type;
1101	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1102	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1103	agp_bridge.suspend = agp_generic_suspend;
1104	agp_bridge.resume = agp_generic_resume;
1105	agp_bridge.cant_use_aperture = 0;
1106
1107	return 0;
1108}
1109
1110static aper_size_info_fixed intel_i830_sizes[] =
1111{
1112	{128, 32768, 5},
1113	/* The 64M mode still requires a 128k gatt */
1114	{64, 16384, 5}
1115};
1116
1117static struct _intel_i830_private {
1118	struct pci_dev *i830_dev;   /* device one */
1119	volatile u8 *registers;
1120	int gtt_entries;
1121} intel_i830_private;
1122
1123static void intel_i830_init_gtt_entries(void)
1124{
1125	u16 gmch_ctrl;
1126	int gtt_entries;
1127	u8 rdct;
1128	static const int ddt[4] = { 0, 16, 32, 64 };
1129
1130	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
1131
1132	switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1133	case I830_GMCH_GMS_STOLEN_512:
1134		gtt_entries = KB(512) - KB(132);
1135		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
1136		break;
1137	case I830_GMCH_GMS_STOLEN_1024:
1138		gtt_entries = MB(1) - KB(132);
1139		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
1140		break;
1141	case I830_GMCH_GMS_STOLEN_8192:
1142		gtt_entries = MB(8) - KB(132);
1143		printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1));
1144		break;
1145	case I830_GMCH_GMS_LOCAL:
1146		rdct = INREG8(intel_i830_private.registers,I830_RDRAM_CHANNEL_TYPE);
1147		gtt_entries = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]);
1148		printk(KERN_INFO PFX "detected %dK local memory.\n",gtt_entries / KB(1));
1149		break;
1150	default:
1151		printk(KERN_INFO PFX "no video memory detected.\n");
1152		gtt_entries = 0;
1153		break;
1154	}
1155
1156	gtt_entries /= KB(4);
1157
1158	intel_i830_private.gtt_entries = gtt_entries;
1159}
1160
1161/* The intel i830 automatically initializes the agp aperture during POST.
1162 * Use the memory already set aside for in the GTT.
1163 */
1164static int intel_i830_create_gatt_table(void)
1165{
1166	int page_order;
1167	aper_size_info_fixed *size;
1168	int num_entries;
1169	u32 temp;
1170
1171	size = agp_bridge.current_size;
1172	page_order = size->page_order;
1173	num_entries = size->num_entries;
1174	agp_bridge.gatt_table_real = 0;
1175
1176	pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp);
1177	temp &= 0xfff80000;
1178
1179	intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096);
1180	if (!intel_i830_private.registers) return (-ENOMEM);
1181
1182	temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
1183	CACHE_FLUSH();
1184
1185	/* we have to call this as early as possible after the MMIO base address is known */
1186	intel_i830_init_gtt_entries();
1187
1188	agp_bridge.gatt_table = NULL;
1189
1190	agp_bridge.gatt_bus_addr = temp;
1191
1192	return(0);
1193}
1194
1195/* Return the gatt table to a sane state. Use the top of stolen
1196 * memory for the GTT.
1197 */
1198static int intel_i830_free_gatt_table(void)
1199{
1200	return(0);
1201}
1202
1203static int intel_i830_fetch_size(void)
1204{
1205	u16 gmch_ctrl;
1206	aper_size_info_fixed *values;
1207
1208	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
1209	values = A_SIZE_FIX(agp_bridge.aperture_sizes);
1210
1211	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
1212		agp_bridge.previous_size = agp_bridge.current_size = (void *) values;
1213		agp_bridge.aperture_size_idx = 0;
1214		return(values[0].size);
1215	} else {
1216		agp_bridge.previous_size = agp_bridge.current_size = (void *) values;
1217		agp_bridge.aperture_size_idx = 1;
1218		return(values[1].size);
1219	}
1220
1221	return(0);
1222}
1223
1224static int intel_i830_configure(void)
1225{
1226	aper_size_info_fixed *current_size;
1227	u32 temp;
1228	u16 gmch_ctrl;
1229	int i;
1230
1231	current_size = A_SIZE_FIX(agp_bridge.current_size);
1232
1233	pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp);
1234	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1235
1236	pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl);
1237	gmch_ctrl |= I830_GMCH_ENABLED;
1238	pci_write_config_word(agp_bridge.dev,I830_GMCH_CTRL,gmch_ctrl);
1239
1240	OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
1241	CACHE_FLUSH();
1242
1243	if (agp_bridge.needs_scratch_page == TRUE)
1244		for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
1245			OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page);
1246
1247	return (0);
1248}
1249
1250static void intel_i830_cleanup(void)
1251{
1252	iounmap((void *) intel_i830_private.registers);
1253}
1254
1255static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type)
1256{
1257	int i,j,num_entries;
1258	void *temp;
1259
1260	temp = agp_bridge.current_size;
1261	num_entries = A_SIZE_FIX(temp)->num_entries;
1262
1263	if (pg_start < intel_i830_private.gtt_entries) {
1264		printk (KERN_DEBUG "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
1265				pg_start,intel_i830_private.gtt_entries);
1266
1267		printk ("Trying to insert into local/stolen memory\n");
1268		return (-EINVAL);
1269	}
1270
1271	if ((pg_start + mem->page_count) > num_entries)
1272		return (-EINVAL);
1273
1274	/* The i830 can't check the GTT for entries since its read only,
1275	 * depend on the caller to make the correct offset decisions.
1276	 */
1277
1278	if ((type != 0 && type != AGP_PHYS_MEMORY) ||
1279		(mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
1280		return (-EINVAL);
1281
1282	CACHE_FLUSH();
1283
1284	for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
1285		OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]);
1286
1287	CACHE_FLUSH();
1288
1289	agp_bridge.tlb_flush(mem);
1290
1291	return(0);
1292}
1293
1294static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type)
1295{
1296	int i;
1297
1298	CACHE_FLUSH ();
1299
1300	if (pg_start < intel_i830_private.gtt_entries) {
1301		printk ("Trying to disable local/stolen memory\n");
1302		return (-EINVAL);
1303	}
1304
1305	for (i = pg_start; i < (mem->page_count + pg_start); i++)
1306		OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page);
1307
1308	CACHE_FLUSH();
1309
1310	agp_bridge.tlb_flush(mem);
1311
1312	return (0);
1313}
1314
1315static agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
1316{
1317	agp_memory *nw;
1318
1319	/* always return NULL for now */
1320	if (type == AGP_DCACHE_MEMORY) return(NULL);
1321
1322	if (type == AGP_PHYS_MEMORY) {
1323		unsigned long physical;
1324
1325		/* The i830 requires a physical address to program
1326		 * it's mouse pointer into hardware. However the
1327		 * Xserver still writes to it through the agp
1328		 * aperture
1329		 */
1330
1331		if (pg_count != 1) return(NULL);
1332
1333		nw = agp_create_memory(1);
1334
1335		if (nw == NULL) return(NULL);
1336
1337		MOD_INC_USE_COUNT;
1338		nw->memory[0] = agp_bridge.agp_alloc_page();
1339		physical = nw->memory[0];
1340		if (nw->memory[0] == 0) {
1341			/* free this structure */
1342			agp_free_memory(nw);
1343			return(NULL);
1344		}
1345
1346		nw->memory[0] = agp_bridge.mask_memory(virt_to_phys((void *) nw->memory[0]),type);
1347		nw->page_count = 1;
1348		nw->num_scratch_pages = 1;
1349		nw->type = AGP_PHYS_MEMORY;
1350		nw->physical = virt_to_phys((void *) physical);
1351		return(nw);
1352	}
1353
1354	return(NULL);
1355}
1356
1357static int __init intel_i830_setup(struct pci_dev *i830_dev)
1358{
1359	intel_i830_private.i830_dev = i830_dev;
1360
1361	agp_bridge.masks = intel_i810_masks;
1362	agp_bridge.num_of_masks = 3;
1363	agp_bridge.aperture_sizes = (void *) intel_i830_sizes;
1364	agp_bridge.size_type = FIXED_APER_SIZE;
1365	agp_bridge.num_aperture_sizes = 2;
1366
1367	agp_bridge.dev_private_data = (void *) &intel_i830_private;
1368	agp_bridge.needs_scratch_page = TRUE;
1369
1370	agp_bridge.configure = intel_i830_configure;
1371	agp_bridge.fetch_size = intel_i830_fetch_size;
1372	agp_bridge.cleanup = intel_i830_cleanup;
1373	agp_bridge.tlb_flush = intel_i810_tlbflush;
1374	agp_bridge.mask_memory = intel_i810_mask_memory;
1375	agp_bridge.agp_enable = intel_i810_agp_enable;
1376	agp_bridge.cache_flush = global_cache_flush;
1377
1378	agp_bridge.create_gatt_table = intel_i830_create_gatt_table;
1379	agp_bridge.free_gatt_table = intel_i830_free_gatt_table;
1380
1381	agp_bridge.insert_memory = intel_i830_insert_entries;
1382	agp_bridge.remove_memory = intel_i830_remove_entries;
1383	agp_bridge.alloc_by_type = intel_i830_alloc_by_type;
1384	agp_bridge.free_by_type = intel_i810_free_by_type;
1385	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1386	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1387
1388	agp_bridge.suspend = agp_generic_suspend;
1389	agp_bridge.resume = agp_generic_resume;
1390	agp_bridge.cant_use_aperture = 0;
1391
1392	return(0);
1393}
1394
1395#endif /* CONFIG_AGP_I810 */
1396
1397 #ifdef CONFIG_AGP_INTEL
1398
1399#endif /* CONFIG_AGP_I810 */
1400
1401#ifdef CONFIG_AGP_INTEL
1402
1403static int intel_fetch_size(void)
1404{
1405	int i;
1406	u16 temp;
1407	aper_size_info_16 *values;
1408
1409	pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
1410	values = A_SIZE_16(agp_bridge.aperture_sizes);
1411
1412	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1413		if (temp == values[i].size_value) {
1414			agp_bridge.previous_size =
1415			    agp_bridge.current_size = (void *) (values + i);
1416			agp_bridge.aperture_size_idx = i;
1417			return values[i].size;
1418		}
1419	}
1420
1421	return 0;
1422}
1423
1424
1425static int intel_8xx_fetch_size(void)
1426{
1427	int i;
1428	u8 temp;
1429	aper_size_info_8 *values;
1430
1431	pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp);
1432
1433        /* Intel 815 chipsets have a _weird_ APSIZE register with only
1434         * one non-reserved bit, so mask the others out ... */
1435        if (agp_bridge.type == INTEL_I815)
1436		temp &= (1 << 3);
1437
1438	values = A_SIZE_8(agp_bridge.aperture_sizes);
1439
1440	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1441		if (temp == values[i].size_value) {
1442			agp_bridge.previous_size =
1443			    agp_bridge.current_size = (void *) (values + i);
1444			agp_bridge.aperture_size_idx = i;
1445			return values[i].size;
1446		}
1447	}
1448
1449	return 0;
1450}
1451
1452static void intel_tlbflush(agp_memory * mem)
1453{
1454	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
1455	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1456}
1457
1458
1459static void intel_8xx_tlbflush(agp_memory * mem)
1460{
1461  u32 temp;
1462  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
1463  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7));
1464  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
1465  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7));
1466}
1467
1468
1469static void intel_cleanup(void)
1470{
1471	u16 temp;
1472	aper_size_info_16 *previous_size;
1473
1474	previous_size = A_SIZE_16(agp_bridge.previous_size);
1475	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
1476	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
1477	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1478			      previous_size->size_value);
1479}
1480
1481
1482static void intel_8xx_cleanup(void)
1483{
1484	u16 temp;
1485	aper_size_info_8 *previous_size;
1486
1487	previous_size = A_SIZE_8(agp_bridge.previous_size);
1488	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
1489	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
1490	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1491			      previous_size->size_value);
1492}
1493
1494
1495static int intel_configure(void)
1496{
1497	u32 temp;
1498	u16 temp2;
1499	aper_size_info_16 *current_size;
1500
1501	current_size = A_SIZE_16(agp_bridge.current_size);
1502
1503	/* aperture size */
1504	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1505			      current_size->size_value);
1506
1507	/* address to map to */
1508	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1509	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1510
1511	/* attbase - aperture base */
1512	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1513			       agp_bridge.gatt_bus_addr);
1514
1515	/* agpctrl */
1516	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1517
1518	/* paccfg/nbxcfg */
1519	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
1520	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
1521			      (temp2 & ~(1 << 10)) | (1 << 9));
1522	/* clear any possible error conditions */
1523	pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
1524	return 0;
1525}
1526
1527static int intel_815_configure(void)
1528{
1529	u32 temp, addr;
1530	u8 temp2;
1531	aper_size_info_8 *current_size;
1532
1533	current_size = A_SIZE_8(agp_bridge.current_size);
1534
1535	/* aperture size */
1536	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1537			      current_size->size_value);
1538
1539	/* address to map to */
1540	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1541	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1542
1543	/* attbase - aperture base */
1544        /* the Intel 815 chipset spec. says that bits 29-31 in the
1545         * ATTBASE register are reserved -> try not to write them */
1546        if (agp_bridge.gatt_bus_addr &  INTEL_815_ATTBASE_MASK)
1547		panic("gatt bus addr too high");
1548	pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr);
1549	addr &= INTEL_815_ATTBASE_MASK;
1550	addr |= agp_bridge.gatt_bus_addr;
1551	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr);
1552
1553	/* agpctrl */
1554	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1555
1556	/* apcont */
1557	pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2);
1558	pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT,
1559			      temp2 | (1 << 1));
1560	/* clear any possible error conditions */
1561        /* Oddness : this chipset seems to have no ERRSTS register ! */
1562	return 0;
1563}
1564
1565static void intel_820_tlbflush(agp_memory * mem)
1566{
1567  return;
1568}
1569
1570static void intel_820_cleanup(void)
1571{
1572	u8 temp;
1573	aper_size_info_8 *previous_size;
1574
1575	previous_size = A_SIZE_8(agp_bridge.previous_size);
1576	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp);
1577	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR,
1578			      temp & ~(1 << 1));
1579	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1580			      previous_size->size_value);
1581}
1582
1583
1584static int intel_820_configure(void)
1585{
1586	u32 temp;
1587 	u8 temp2;
1588	aper_size_info_8 *current_size;
1589
1590	current_size = A_SIZE_8(agp_bridge.current_size);
1591
1592	/* aperture size */
1593	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1594			      current_size->size_value);
1595
1596	/* address to map to */
1597	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1598	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1599
1600	/* attbase - aperture base */
1601	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1602			       agp_bridge.gatt_bus_addr);
1603
1604	/* agpctrl */
1605	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1606
1607	/* global enable aperture access */
1608	/* This flag is not accessed through MCHCFG register as in */
1609	/* i850 chipset. */
1610	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2);
1611	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR,
1612			      temp2 | (1 << 1));
1613	/* clear any possible AGP-related error conditions */
1614	pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c);
1615	return 0;
1616}
1617
1618static int intel_830mp_configure(void)
1619{
1620       u32 temp;
1621       u16 temp2;
1622       aper_size_info_8 *current_size;
1623
1624       current_size = A_SIZE_8(agp_bridge.current_size);
1625
1626       /* aperture size */
1627       pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1628                             current_size->size_value);
1629
1630       /* address to map to */
1631       pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1632       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1633
1634       /* attbase - aperture base */
1635       pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1636                              agp_bridge.gatt_bus_addr);
1637
1638       /* agpctrl */
1639       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1640
1641       /* gmch */
1642       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
1643       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
1644                             temp2 | (1 << 9));
1645       /* clear any possible AGP-related error conditions */
1646       pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c);
1647       return 0;
1648}
1649
1650
1651
1652static int intel_840_configure(void)
1653{
1654	u32 temp;
1655	u16 temp2;
1656	aper_size_info_8 *current_size;
1657
1658	current_size = A_SIZE_8(agp_bridge.current_size);
1659
1660	/* aperture size */
1661	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1662			      current_size->size_value);
1663
1664	/* address to map to */
1665	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1666	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1667
1668	/* attbase - aperture base */
1669	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1670			       agp_bridge.gatt_bus_addr);
1671
1672	/* agpctrl */
1673	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1674
1675	/* mcgcfg */
1676	pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
1677	pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
1678			      temp2 | (1 << 9));
1679	/* clear any possible error conditions */
1680	pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000);
1681	return 0;
1682}
1683
1684static int intel_845_configure(void)
1685{
1686	u32 temp;
1687	u8 temp2;
1688	aper_size_info_8 *current_size;
1689
1690	current_size = A_SIZE_8(agp_bridge.current_size);
1691
1692	/* aperture size */
1693	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1694			      current_size->size_value);
1695
1696	/* address to map to */
1697	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1698	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1699
1700	/* attbase - aperture base */
1701	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1702			       agp_bridge.gatt_bus_addr);
1703
1704	/* agpctrl */
1705	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1706
1707	/* agpm */
1708	pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2);
1709	pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM,
1710			      temp2 | (1 << 1));
1711	/* clear any possible error conditions */
1712	pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c);
1713	return 0;
1714}
1715
1716static void intel_845_resume(void)
1717{
1718   intel_845_configure();
1719}
1720
1721
1722static int intel_850_configure(void)
1723{
1724	u32 temp;
1725	u16 temp2;
1726	aper_size_info_8 *current_size;
1727
1728	current_size = A_SIZE_8(agp_bridge.current_size);
1729
1730	/* aperture size */
1731	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1732			      current_size->size_value);
1733
1734	/* address to map to */
1735	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1736	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1737
1738	/* attbase - aperture base */
1739	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1740			       agp_bridge.gatt_bus_addr);
1741
1742	/* agpctrl */
1743	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1744
1745	/* mcgcfg */
1746	pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2);
1747	pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG,
1748			      temp2 | (1 << 9));
1749	/* clear any possible AGP-related error conditions */
1750	pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c);
1751	return 0;
1752}
1753
1754static int intel_860_configure(void)
1755{
1756	u32 temp;
1757	u16 temp2;
1758	aper_size_info_8 *current_size;
1759
1760	current_size = A_SIZE_8(agp_bridge.current_size);
1761
1762	/* aperture size */
1763	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1764			      current_size->size_value);
1765
1766	/* address to map to */
1767	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1768	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1769
1770	/* attbase - aperture base */
1771	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1772			       agp_bridge.gatt_bus_addr);
1773
1774	/* agpctrl */
1775	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1776
1777	/* mcgcfg */
1778	pci_read_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, &temp2);
1779	pci_write_config_word(agp_bridge.dev, INTEL_I860_MCHCFG,
1780			      temp2 | (1 << 9));
1781	/* clear any possible AGP-related error conditions */
1782	pci_write_config_word(agp_bridge.dev, INTEL_I860_ERRSTS, 0xf700);
1783	return 0;
1784}
1785
1786
1787static unsigned long intel_mask_memory(unsigned long addr, int type)
1788{
1789	/* Memory type is ignored */
1790
1791	return addr | agp_bridge.masks[0].mask;
1792}
1793
1794static void intel_resume(void)
1795{
1796	intel_configure();
1797}
1798
1799/* Setup function */
1800static gatt_mask intel_generic_masks[] =
1801{
1802	{0x00000017, 0}
1803};
1804
1805static aper_size_info_8 intel_815_sizes[2] =
1806{
1807	{64, 16384, 4, 0},
1808	{32, 8192, 3, 8},
1809};
1810
1811static aper_size_info_8 intel_8xx_sizes[7] =
1812{
1813	{256, 65536, 6, 0},
1814	{128, 32768, 5, 32},
1815	{64, 16384, 4, 48},
1816	{32, 8192, 3, 56},
1817	{16, 4096, 2, 60},
1818	{8, 2048, 1, 62},
1819	{4, 1024, 0, 63}
1820};
1821
1822static aper_size_info_16 intel_generic_sizes[7] =
1823{
1824	{256, 65536, 6, 0},
1825	{128, 32768, 5, 32},
1826	{64, 16384, 4, 48},
1827	{32, 8192, 3, 56},
1828	{16, 4096, 2, 60},
1829	{8, 2048, 1, 62},
1830	{4, 1024, 0, 63}
1831};
1832
1833static aper_size_info_8 intel_830mp_sizes[4] =
1834{
1835  {256, 65536, 6, 0},
1836  {128, 32768, 5, 32},
1837  {64, 16384, 4, 48},
1838  {32, 8192, 3, 56}
1839};
1840
1841static int __init intel_generic_setup (struct pci_dev *pdev)
1842{
1843	agp_bridge.masks = intel_generic_masks;
1844	agp_bridge.num_of_masks = 1;
1845	agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1846	agp_bridge.size_type = U16_APER_SIZE;
1847	agp_bridge.num_aperture_sizes = 7;
1848	agp_bridge.dev_private_data = NULL;
1849	agp_bridge.needs_scratch_page = FALSE;
1850	agp_bridge.configure = intel_configure;
1851	agp_bridge.fetch_size = intel_fetch_size;
1852	agp_bridge.cleanup = intel_cleanup;
1853	agp_bridge.tlb_flush = intel_tlbflush;
1854	agp_bridge.mask_memory = intel_mask_memory;
1855	agp_bridge.agp_enable = agp_generic_agp_enable;
1856	agp_bridge.cache_flush = global_cache_flush;
1857	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1858	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1859	agp_bridge.insert_memory = agp_generic_insert_memory;
1860	agp_bridge.remove_memory = agp_generic_remove_memory;
1861	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1862	agp_bridge.free_by_type = agp_generic_free_by_type;
1863	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1864	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1865	agp_bridge.suspend = agp_generic_suspend;
1866	agp_bridge.resume = intel_resume;
1867	agp_bridge.cant_use_aperture = 0;
1868
1869	return 0;
1870
1871	(void) pdev; /* unused */
1872}
1873
1874static int __init intel_815_setup (struct pci_dev *pdev)
1875{
1876	agp_bridge.masks = intel_generic_masks;
1877	agp_bridge.num_of_masks = 1;
1878	agp_bridge.aperture_sizes = (void *) intel_815_sizes;
1879	agp_bridge.size_type = U8_APER_SIZE;
1880	agp_bridge.num_aperture_sizes = 2;
1881	agp_bridge.dev_private_data = NULL;
1882	agp_bridge.needs_scratch_page = FALSE;
1883	agp_bridge.configure = intel_815_configure;
1884	agp_bridge.fetch_size = intel_8xx_fetch_size;
1885	agp_bridge.cleanup = intel_8xx_cleanup;
1886	agp_bridge.tlb_flush = intel_8xx_tlbflush;
1887	agp_bridge.mask_memory = intel_mask_memory;
1888	agp_bridge.agp_enable = agp_generic_agp_enable;
1889	agp_bridge.cache_flush = global_cache_flush;
1890	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1891	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1892	agp_bridge.insert_memory = agp_generic_insert_memory;
1893	agp_bridge.remove_memory = agp_generic_remove_memory;
1894	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1895	agp_bridge.free_by_type = agp_generic_free_by_type;
1896	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1897	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1898	agp_bridge.suspend = agp_generic_suspend;
1899	agp_bridge.resume = agp_generic_resume;
1900	agp_bridge.cant_use_aperture = 0;
1901
1902	return 0;
1903
1904	(void) pdev; /* unused */
1905}
1906
1907static int __init intel_820_setup (struct pci_dev *pdev)
1908{
1909       agp_bridge.masks = intel_generic_masks;
1910       agp_bridge.num_of_masks = 1;
1911       agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
1912       agp_bridge.size_type = U8_APER_SIZE;
1913       agp_bridge.num_aperture_sizes = 7;
1914       agp_bridge.dev_private_data = NULL;
1915       agp_bridge.needs_scratch_page = FALSE;
1916       agp_bridge.configure = intel_820_configure;
1917       agp_bridge.fetch_size = intel_8xx_fetch_size;
1918       agp_bridge.cleanup = intel_820_cleanup;
1919       agp_bridge.tlb_flush = intel_820_tlbflush;
1920       agp_bridge.mask_memory = intel_mask_memory;
1921       agp_bridge.agp_enable = agp_generic_agp_enable;
1922       agp_bridge.cache_flush = global_cache_flush;
1923       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1924       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1925       agp_bridge.insert_memory = agp_generic_insert_memory;
1926       agp_bridge.remove_memory = agp_generic_remove_memory;
1927       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1928       agp_bridge.free_by_type = agp_generic_free_by_type;
1929       agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1930       agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1931       agp_bridge.suspend = agp_generic_suspend;
1932       agp_bridge.resume = agp_generic_resume;
1933       agp_bridge.cant_use_aperture = 0;
1934
1935       return 0;
1936
1937       (void) pdev; /* unused */
1938}
1939
1940static int __init intel_830mp_setup (struct pci_dev *pdev)
1941{
1942       agp_bridge.masks = intel_generic_masks;
1943       agp_bridge.num_of_masks = 1;
1944       agp_bridge.aperture_sizes = (void *) intel_830mp_sizes;
1945       agp_bridge.size_type = U8_APER_SIZE;
1946       agp_bridge.num_aperture_sizes = 4;
1947       agp_bridge.dev_private_data = NULL;
1948       agp_bridge.needs_scratch_page = FALSE;
1949       agp_bridge.configure = intel_830mp_configure;
1950       agp_bridge.fetch_size = intel_8xx_fetch_size;
1951       agp_bridge.cleanup = intel_8xx_cleanup;
1952       agp_bridge.tlb_flush = intel_8xx_tlbflush;
1953       agp_bridge.mask_memory = intel_mask_memory;
1954       agp_bridge.agp_enable = agp_generic_agp_enable;
1955       agp_bridge.cache_flush = global_cache_flush;
1956       agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1957       agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1958       agp_bridge.insert_memory = agp_generic_insert_memory;
1959       agp_bridge.remove_memory = agp_generic_remove_memory;
1960       agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1961       agp_bridge.free_by_type = agp_generic_free_by_type;
1962       agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1963       agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1964       agp_bridge.suspend = agp_generic_suspend;
1965       agp_bridge.resume = agp_generic_resume;
1966       agp_bridge.cant_use_aperture = 0;
1967       return 0;
1968
1969       (void) pdev; /* unused */
1970}
1971
1972static int __init intel_840_setup (struct pci_dev *pdev)
1973{
1974	agp_bridge.masks = intel_generic_masks;
1975	agp_bridge.num_of_masks = 1;
1976	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
1977	agp_bridge.size_type = U8_APER_SIZE;
1978	agp_bridge.num_aperture_sizes = 7;
1979	agp_bridge.dev_private_data = NULL;
1980	agp_bridge.needs_scratch_page = FALSE;
1981	agp_bridge.configure = intel_840_configure;
1982	agp_bridge.fetch_size = intel_8xx_fetch_size;
1983	agp_bridge.cleanup = intel_8xx_cleanup;
1984	agp_bridge.tlb_flush = intel_8xx_tlbflush;
1985	agp_bridge.mask_memory = intel_mask_memory;
1986	agp_bridge.agp_enable = agp_generic_agp_enable;
1987	agp_bridge.cache_flush = global_cache_flush;
1988	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1989	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1990	agp_bridge.insert_memory = agp_generic_insert_memory;
1991	agp_bridge.remove_memory = agp_generic_remove_memory;
1992	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1993	agp_bridge.free_by_type = agp_generic_free_by_type;
1994	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
1995	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
1996	agp_bridge.suspend = agp_generic_suspend;
1997	agp_bridge.resume = agp_generic_resume;
1998	agp_bridge.cant_use_aperture = 0;
1999
2000	return 0;
2001
2002	(void) pdev; /* unused */
2003}
2004
2005static int __init intel_845_setup (struct pci_dev *pdev)
2006{
2007	agp_bridge.masks = intel_generic_masks;
2008	agp_bridge.num_of_masks = 1;
2009	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
2010	agp_bridge.size_type = U8_APER_SIZE;
2011	agp_bridge.num_aperture_sizes = 7;
2012	agp_bridge.dev_private_data = NULL;
2013	agp_bridge.needs_scratch_page = FALSE;
2014	agp_bridge.configure = intel_845_configure;
2015	agp_bridge.fetch_size = intel_8xx_fetch_size;
2016	agp_bridge.cleanup = intel_8xx_cleanup;
2017	agp_bridge.tlb_flush = intel_8xx_tlbflush;
2018	agp_bridge.mask_memory = intel_mask_memory;
2019	agp_bridge.agp_enable = agp_generic_agp_enable;
2020	agp_bridge.cache_flush = global_cache_flush;
2021	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
2022	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
2023	agp_bridge.insert_memory = agp_generic_insert_memory;
2024	agp_bridge.remove_memory = agp_generic_remove_memory;
2025	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2026	agp_bridge.free_by_type = agp_generic_free_by_type;
2027	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
2028	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
2029	agp_bridge.suspend = agp_generic_suspend;
2030	agp_bridge.resume = intel_845_resume;
2031	agp_bridge.cant_use_aperture = 0;
2032
2033	return 0;
2034
2035	(void) pdev; /* unused */
2036}
2037
2038static int __init intel_850_setup (struct pci_dev *pdev)
2039{
2040	agp_bridge.masks = intel_generic_masks;
2041	agp_bridge.num_of_masks = 1;
2042	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
2043	agp_bridge.size_type = U8_APER_SIZE;
2044	agp_bridge.num_aperture_sizes = 7;
2045	agp_bridge.dev_private_data = NULL;
2046	agp_bridge.needs_scratch_page = FALSE;
2047	agp_bridge.configure = intel_850_configure;
2048	agp_bridge.fetch_size = intel_8xx_fetch_size;
2049	agp_bridge.cleanup = intel_8xx_cleanup;
2050	agp_bridge.tlb_flush = intel_8xx_tlbflush;
2051	agp_bridge.mask_memory = intel_mask_memory;
2052	agp_bridge.agp_enable = agp_generic_agp_enable;
2053	agp_bridge.cache_flush = global_cache_flush;
2054	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
2055	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
2056	agp_bridge.insert_memory = agp_generic_insert_memory;
2057	agp_bridge.remove_memory = agp_generic_remove_memory;
2058	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2059	agp_bridge.free_by_type = agp_generic_free_by_type;
2060	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
2061	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
2062	agp_bridge.suspend = agp_generic_suspend;
2063	agp_bridge.resume = agp_generic_resume;
2064	agp_bridge.cant_use_aperture = 0;
2065
2066	return 0;
2067
2068	(void) pdev; /* unused */
2069}
2070
2071static int __init intel_860_setup (struct pci_dev *pdev)
2072{
2073	agp_bridge.masks = intel_generic_masks;
2074	agp_bridge.num_of_masks = 1;
2075	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
2076	agp_bridge.size_type = U8_APER_SIZE;
2077	agp_bridge.num_aperture_sizes = 7;
2078	agp_bridge.dev_private_data = NULL;
2079	agp_bridge.needs_scratch_page = FALSE;
2080	agp_bridge.configure = intel_860_configure;
2081	agp_bridge.fetch_size = intel_8xx_fetch_size;
2082	agp_bridge.cleanup = intel_8xx_cleanup;
2083	agp_bridge.tlb_flush = intel_8xx_tlbflush;
2084	agp_bridge.mask_memory = intel_mask_memory;
2085	agp_bridge.agp_enable = agp_generic_agp_enable;
2086	agp_bridge.cache_flush = global_cache_flush;
2087	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
2088	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
2089	agp_bridge.insert_memory = agp_generic_insert_memory;
2090	agp_bridge.remove_memory = agp_generic_remove_memory;
2091	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2092	agp_bridge.free_by_type = agp_generic_free_by_type;
2093	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
2094	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
2095	agp_bridge.suspend = agp_generic_suspend;
2096	agp_bridge.resume = agp_generic_resume;
2097	agp_bridge.cant_use_aperture = 0;
2098
2099	return 0;
2100
2101	(void) pdev; /* unused */
2102}
2103
2104#endif /* CONFIG_AGP_INTEL */
2105
2106#ifdef CONFIG_AGP_VIA
2107
2108static int via_fetch_size(void)
2109{
2110	int i;
2111	u8 temp;
2112	aper_size_info_8 *values;
2113
2114	values = A_SIZE_8(agp_bridge.aperture_sizes);
2115	pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
2116	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
2117		if (temp == values[i].size_value) {
2118			agp_bridge.previous_size =
2119			    agp_bridge.current_size = (void *) (values + i);
2120			agp_bridge.aperture_size_idx = i;
2121			return values[i].size;
2122		}
2123	}
2124
2125	return 0;
2126}
2127
2128static int via_configure(void)
2129{
2130	u32 temp;
2131	aper_size_info_8 *current_size;
2132
2133	current_size = A_SIZE_8(agp_bridge.current_size);
2134	/* aperture size */
2135	pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
2136			      current_size->size_value);
2137	/* address to map too */
2138	pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
2139	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
2140
2141	/* GART control register */
2142	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
2143
2144	/* attbase - aperture GATT base */
2145	pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
2146			    (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
2147	return 0;
2148}
2149
2150static void via_cleanup(void)
2151{
2152	aper_size_info_8 *previous_size;
2153
2154	previous_size = A_SIZE_8(agp_bridge.previous_size);
2155	pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
2156			      previous_size->size_value);
2157	/* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
2158	 * during reinitialization.
2159	 */
2160}
2161
2162static void via_tlbflush(agp_memory * mem)
2163{
2164	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
2165	pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
2166}
2167
2168static unsigned long via_mask_memory(unsigned long addr, int type)
2169{
2170	/* Memory type is ignored */
2171
2172	return addr | agp_bridge.masks[0].mask;
2173}
2174
2175static aper_size_info_8 via_generic_sizes[7] =
2176{
2177	{256, 65536, 6, 0},
2178	{128, 32768, 5, 128},
2179	{64, 16384, 4, 192},
2180	{32, 8192, 3, 224},
2181	{16, 4096, 2, 240},
2182	{8, 2048, 1, 248},
2183	{4, 1024, 0, 252}
2184};
2185
2186static gatt_mask via_generic_masks[] =
2187{
2188	{0x00000000, 0}
2189};
2190
2191static int __init via_generic_setup (struct pci_dev *pdev)
2192{
2193	agp_bridge.masks = via_generic_masks;
2194	agp_bridge.num_of_masks = 1;
2195	agp_bridge.aperture_sizes = (void *) via_generic_sizes;
2196	agp_bridge.size_type = U8_APER_SIZE;
2197	agp_bridge.num_aperture_sizes = 7;
2198	agp_bridge.dev_private_data = NULL;
2199	agp_bridge.needs_scratch_page = FALSE;
2200	agp_bridge.configure = via_configure;
2201	agp_bridge.fetch_size = via_fetch_size;
2202	agp_bridge.cleanup = via_cleanup;
2203	agp_bridge.tlb_flush = via_tlbflush;
2204	agp_bridge.mask_memory = via_mask_memory;
2205	agp_bridge.agp_enable = agp_generic_agp_enable;
2206	agp_bridge.cache_flush = global_cache_flush;
2207	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
2208	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
2209	agp_bridge.insert_memory = agp_generic_insert_memory;
2210	agp_bridge.remove_memory = agp_generic_remove_memory;
2211	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2212	agp_bridge.free_by_type = agp_generic_free_by_type;
2213	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
2214	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
2215	agp_bridge.suspend = agp_generic_suspend;
2216	agp_bridge.resume = agp_generic_resume;
2217	agp_bridge.cant_use_aperture = 0;
2218
2219	return 0;
2220
2221	(void) pdev; /* unused */
2222}
2223
2224#endif /* CONFIG_AGP_VIA */
2225
2226#ifdef CONFIG_AGP_SIS
2227
2228static int sis_fetch_size(void)
2229{
2230	u8 temp_size;
2231	int i;
2232	aper_size_info_8 *values;
2233
2234	pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
2235	values = A_SIZE_8(agp_bridge.aperture_sizes);
2236	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
2237		if ((temp_size == values[i].size_value) ||
2238		    ((temp_size & ~(0x03)) ==
2239		     (values[i].size_value & ~(0x03)))) {
2240			agp_bridge.previous_size =
2241			    agp_bridge.current_size = (void *) (values + i);
2242
2243			agp_bridge.aperture_size_idx = i;
2244			return values[i].size;
2245		}
2246	}
2247
2248	return 0;
2249}
2250
2251
2252static void sis_tlbflush(agp_memory * mem)
2253{
2254	pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
2255}
2256
2257static int sis_configure(void)
2258{
2259	u32 temp;
2260	aper_size_info_8 *current_size;
2261
2262	current_size = A_SIZE_8(agp_bridge.current_size);
2263	pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
2264	pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
2265	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
2266	pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
2267			       agp_bridge.gatt_bus_addr);
2268	pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
2269			      current_size->size_value);
2270	return 0;
2271}
2272
2273static void sis_cleanup(void)
2274{
2275	aper_size_info_8 *previous_size;
2276
2277	previous_size = A_SIZE_8(agp_bridge.previous_size);
2278	pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
2279			      (previous_size->size_value & ~(0x03)));
2280}
2281
2282static unsigned long sis_mask_memory(unsigned long addr, int type)
2283{
2284	/* Memory type is ignored */
2285
2286	return addr | agp_bridge.masks[0].mask;
2287}
2288
2289static aper_size_info_8 sis_generic_sizes[7] =
2290{
2291	{256, 65536, 6, 99},
2292	{128, 32768, 5, 83},
2293	{64, 16384, 4, 67},
2294	{32, 8192, 3, 51},
2295	{16, 4096, 2, 35},
2296	{8, 2048, 1, 19},
2297	{4, 1024, 0, 3}
2298};
2299
2300static gatt_mask sis_generic_masks[] =
2301{
2302	{0x00000000, 0}
2303};
2304
2305static int __init sis_generic_setup (struct pci_dev *pdev)
2306{
2307	agp_bridge.masks = sis_generic_masks;
2308	agp_bridge.num_of_masks = 1;
2309	agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
2310	agp_bridge.size_type = U8_APER_SIZE;
2311	agp_bridge.num_aperture_sizes = 7;
2312	agp_bridge.dev_private_data = NULL;
2313	agp_bridge.needs_scratch_page = FALSE;
2314	agp_bridge.configure = sis_configure;
2315	agp_bridge.fetch_size = sis_fetch_size;
2316	agp_bridge.cleanup = sis_cleanup;
2317	agp_bridge.tlb_flush = sis_tlbflush;
2318	agp_bridge.mask_memory = sis_mask_memory;
2319	agp_bridge.agp_enable = agp_generic_agp_enable;
2320	agp_bridge.cache_flush = global_cache_flush;
2321	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
2322	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
2323	agp_bridge.insert_memory = agp_generic_insert_memory;
2324	agp_bridge.remove_memory = agp_generic_remove_memory;
2325	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2326	agp_bridge.free_by_type = agp_generic_free_by_type;
2327	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
2328	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
2329	agp_bridge.suspend = agp_generic_suspend;
2330	agp_bridge.resume = agp_generic_resume;
2331	agp_bridge.cant_use_aperture = 0;
2332
2333	return 0;
2334}
2335
2336#endif /* CONFIG_AGP_SIS */
2337
2338#ifdef CONFIG_AGP_AMD
2339
2340typedef struct _amd_page_map {
2341	unsigned long *real;
2342	unsigned long *remapped;
2343} amd_page_map;
2344
2345static struct _amd_irongate_private {
2346	volatile u8 *registers;
2347	amd_page_map **gatt_pages;
2348	int num_tables;
2349} amd_irongate_private;
2350
2351static int amd_create_page_map(amd_page_map *page_map)
2352{
2353	int i;
2354	int err = 0;
2355
2356	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
2357	if (page_map->real == NULL) {
2358		return -ENOMEM;
2359	}
2360	SetPageReserved(virt_to_page(page_map->real));
2361	CACHE_FLUSH();
2362#ifdef CONFIG_X86
2363	err = change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL_NOCACHE);
2364#endif
2365	if (!err)
2366	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
2367					    PAGE_SIZE);
2368	if (page_map->remapped == NULL || err) {
2369		ClearPageReserved(virt_to_page(page_map->real));
2370		free_page((unsigned long) page_map->real);
2371		page_map->real = NULL;
2372		return -ENOMEM;
2373	}
2374	CACHE_FLUSH();
2375
2376	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
2377		page_map->remapped[i] = agp_bridge.scratch_page;
2378	}
2379
2380	return 0;
2381}
2382
2383static void amd_free_page_map(amd_page_map *page_map)
2384{
2385	iounmap(page_map->remapped);
2386#ifdef CONFIG_X86
2387	change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL);
2388#endif
2389	ClearPageReserved(virt_to_page(page_map->real));
2390	free_page((unsigned long) page_map->real);
2391}
2392
2393static void amd_free_gatt_pages(void)
2394{
2395	int i;
2396	amd_page_map **tables;
2397	amd_page_map *entry;
2398
2399	tables = amd_irongate_private.gatt_pages;
2400	for(i = 0; i < amd_irongate_private.num_tables; i++) {
2401		entry = tables[i];
2402		if (entry != NULL) {
2403			if (entry->real != NULL) {
2404				amd_free_page_map(entry);
2405			}
2406			kfree(entry);
2407		}
2408	}
2409	kfree(tables);
2410}
2411
2412static int amd_create_gatt_pages(int nr_tables)
2413{
2414	amd_page_map **tables;
2415	amd_page_map *entry;
2416	int retval = 0;
2417	int i;
2418
2419	tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *),
2420			 GFP_KERNEL);
2421	if (tables == NULL) {
2422		return -ENOMEM;
2423	}
2424	memset(tables, 0, sizeof(amd_page_map *) * (nr_tables + 1));
2425	for (i = 0; i < nr_tables; i++) {
2426		entry = kmalloc(sizeof(amd_page_map), GFP_KERNEL);
2427		if (entry == NULL) {
2428			retval = -ENOMEM;
2429			break;
2430		}
2431		memset(entry, 0, sizeof(amd_page_map));
2432		tables[i] = entry;
2433		retval = amd_create_page_map(entry);
2434		if (retval != 0) break;
2435	}
2436	amd_irongate_private.num_tables = nr_tables;
2437	amd_irongate_private.gatt_pages = tables;
2438
2439	if (retval != 0) amd_free_gatt_pages();
2440
2441	return retval;
2442}
2443
2444/* Since we don't need contigious memory we just try
2445 * to get the gatt table once
2446 */
2447
2448#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
2449#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
2450	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
2451#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
2452#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
2453	GET_PAGE_DIR_IDX(addr)]->remapped)
2454
2455static int amd_create_gatt_table(void)
2456{
2457	aper_size_info_lvl2 *value;
2458	amd_page_map page_dir;
2459	unsigned long addr;
2460	int retval;
2461	u32 temp;
2462	int i;
2463
2464	value = A_SIZE_LVL2(agp_bridge.current_size);
2465	retval = amd_create_page_map(&page_dir);
2466	if (retval != 0) {
2467		return retval;
2468	}
2469
2470	retval = amd_create_gatt_pages(value->num_entries / 1024);
2471	if (retval != 0) {
2472		amd_free_page_map(&page_dir);
2473		return retval;
2474	}
2475
2476	agp_bridge.gatt_table_real = page_dir.real;
2477	agp_bridge.gatt_table = page_dir.remapped;
2478	agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
2479
2480	/* Get the address for the gart region.
2481	 * This is a bus address even on the alpha, b/c its
2482	 * used to program the agp master not the cpu
2483	 */
2484
2485	pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
2486	addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
2487	agp_bridge.gart_bus_addr = addr;
2488
2489	/* Calculate the agp offset */
2490	for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
2491		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
2492			virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
2493		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
2494	}
2495
2496	return 0;
2497}
2498
2499static int amd_free_gatt_table(void)
2500{
2501	amd_page_map page_dir;
2502
2503	page_dir.real = agp_bridge.gatt_table_real;
2504	page_dir.remapped = agp_bridge.gatt_table;
2505
2506	amd_free_gatt_pages();
2507	amd_free_page_map(&page_dir);
2508	return 0;
2509}
2510
2511static int amd_irongate_fetch_size(void)
2512{
2513	int i;
2514	u32 temp;
2515	aper_size_info_lvl2 *values;
2516
2517	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
2518	temp = (temp & 0x0000000e);
2519	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
2520	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
2521		if (temp == values[i].size_value) {
2522			agp_bridge.previous_size =
2523			    agp_bridge.current_size = (void *) (values + i);
2524
2525			agp_bridge.aperture_size_idx = i;
2526			return values[i].size;
2527		}
2528	}
2529
2530	return 0;
2531}
2532
2533static int amd_irongate_configure(void)
2534{
2535	aper_size_info_lvl2 *current_size;
2536	u32 temp;
2537	u16 enable_reg;
2538
2539	current_size = A_SIZE_LVL2(agp_bridge.current_size);
2540
2541	/* Get the memory mapped registers */
2542	pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
2543	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
2544	amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
2545
2546	/* Write out the address of the gatt table */
2547	OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
2548		 agp_bridge.gatt_bus_addr);
2549
2550	/* Write the Sync register */
2551	pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
2552
2553   	/* Set indexing mode */
2554   	pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00);
2555
2556	/* Write the enable register */
2557	enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
2558	enable_reg = (enable_reg | 0x0004);
2559	OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
2560
2561	/* Write out the size register */
2562	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
2563	temp = (((temp & ~(0x0000000e)) | current_size->size_value)
2564		| 0x00000001);
2565	pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
2566
2567	/* Flush the tlb */
2568	OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
2569
2570	return 0;
2571}
2572
2573static void amd_irongate_cleanup(void)
2574{
2575	aper_size_info_lvl2 *previous_size;
2576	u32 temp;
2577	u16 enable_reg;
2578
2579	previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
2580
2581	enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
2582	enable_reg = (enable_reg & ~(0x0004));
2583	OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
2584
2585	/* Write back the previous size and disable gart translation */
2586	pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
2587	temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
2588	pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
2589	iounmap((void *) amd_irongate_private.registers);
2590}
2591
2592/*
2593 * This routine could be implemented by taking the addresses
2594 * written to the GATT, and flushing them individually.  However
2595 * currently it just flushes the whole table.  Which is probably
2596 * more efficent, since agp_memory blocks can be a large number of
2597 * entries.
2598 */
2599
2600static void amd_irongate_tlbflush(agp_memory * temp)
2601{
2602	OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
2603}
2604
2605static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
2606{
2607	/* Only type 0 is supported by the irongate */
2608
2609	return addr | agp_bridge.masks[0].mask;
2610}
2611
2612static int amd_insert_memory(agp_memory * mem,
2613			     off_t pg_start, int type)
2614{
2615	int i, j, num_entries;
2616	unsigned long *cur_gatt;
2617	unsigned long addr;
2618
2619	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
2620
2621	if (type != 0 || mem->type != 0) {
2622		return -EINVAL;
2623	}
2624	if ((pg_start + mem->page_count) > num_entries) {
2625		return -EINVAL;
2626	}
2627
2628	j = pg_start;
2629	while (j < (pg_start + mem->page_count)) {
2630		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
2631		cur_gatt = GET_GATT(addr);
2632		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
2633			return -EBUSY;
2634		}
2635		j++;
2636	}
2637
2638	if (mem->is_flushed == FALSE) {
2639		CACHE_FLUSH();
2640		mem->is_flushed = TRUE;
2641	}
2642
2643	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
2644		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
2645		cur_gatt = GET_GATT(addr);
2646		cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
2647	}
2648	agp_bridge.tlb_flush(mem);
2649	return 0;
2650}
2651
2652static int amd_remove_memory(agp_memory * mem, off_t pg_start,
2653			     int type)
2654{
2655	int i;
2656	unsigned long *cur_gatt;
2657	unsigned long addr;
2658
2659	if (type != 0 || mem->type != 0) {
2660		return -EINVAL;
2661	}
2662	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
2663		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
2664		cur_gatt = GET_GATT(addr);
2665		cur_gatt[GET_GATT_OFF(addr)] =
2666			(unsigned long) agp_bridge.scratch_page;
2667	}
2668
2669	agp_bridge.tlb_flush(mem);
2670	return 0;
2671}
2672
2673static aper_size_info_lvl2 amd_irongate_sizes[7] =
2674{
2675	{2048, 524288, 0x0000000c},
2676	{1024, 262144, 0x0000000a},
2677	{512, 131072, 0x00000008},
2678	{256, 65536, 0x00000006},
2679	{128, 32768, 0x00000004},
2680	{64, 16384, 0x00000002},
2681	{32, 8192, 0x00000000}
2682};
2683
2684static gatt_mask amd_irongate_masks[] =
2685{
2686	{0x00000001, 0}
2687};
2688
2689static int __init amd_irongate_setup (struct pci_dev *pdev)
2690{
2691	agp_bridge.masks = amd_irongate_masks;
2692	agp_bridge.num_of_masks = 1;
2693	agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
2694	agp_bridge.size_type = LVL2_APER_SIZE;
2695	agp_bridge.num_aperture_sizes = 7;
2696	agp_bridge.dev_private_data = (void *) &amd_irongate_private;
2697	agp_bridge.needs_scratch_page = FALSE;
2698	agp_bridge.configure = amd_irongate_configure;
2699	agp_bridge.fetch_size = amd_irongate_fetch_size;
2700	agp_bridge.cleanup = amd_irongate_cleanup;
2701	agp_bridge.tlb_flush = amd_irongate_tlbflush;
2702	agp_bridge.mask_memory = amd_irongate_mask_memory;
2703	agp_bridge.agp_enable = agp_generic_agp_enable;
2704	agp_bridge.cache_flush = global_cache_flush;
2705	agp_bridge.create_gatt_table = amd_create_gatt_table;
2706	agp_bridge.free_gatt_table = amd_free_gatt_table;
2707	agp_bridge.insert_memory = amd_insert_memory;
2708	agp_bridge.remove_memory = amd_remove_memory;
2709	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2710	agp_bridge.free_by_type = agp_generic_free_by_type;
2711	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
2712	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
2713	agp_bridge.suspend = agp_generic_suspend;
2714	agp_bridge.resume = agp_generic_resume;
2715	agp_bridge.cant_use_aperture = 0;
2716
2717	return 0;
2718
2719	(void) pdev; /* unused */
2720}
2721
2722#endif /* CONFIG_AGP_AMD */
2723
2724#ifdef CONFIG_AGP_AMD_8151
2725
2726/* Begin AMD-8151 support */
2727
2728static u_int64_t pci_read64 (struct pci_dev *dev, int reg)
2729{
2730	union {
2731		u64 full;
2732		struct {
2733			u32 high;
2734			u32 low;
2735		} split;
2736	} tmp;
2737	pci_read_config_dword(dev, reg, &tmp.split.high);
2738	pci_read_config_dword(dev, reg+4, &tmp.split.low);
2739	return tmp.full;
2740}
2741
2742static void pci_write64 (struct pci_dev *dev, int reg, u64 value)
2743{
2744	union {
2745		u64 full;
2746		struct {
2747			u32 high;
2748			u32 low;
2749		} split;
2750	} tmp;
2751	tmp.full = value;
2752	pci_write_config_dword(dev, reg, tmp.split.high);
2753	pci_write_config_dword(dev, reg+4, tmp.split.low);
2754}
2755
2756
2757static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
2758{
2759	int i, j, num_entries;
2760	void *temp;
2761	long tmp;
2762	u32 pte;
2763	u64 addr;
2764
2765	temp = agp_bridge.current_size;
2766
2767	num_entries = A_SIZE_32(temp)->num_entries;
2768
2769	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
2770
2771	if (type != 0 || mem->type != 0)
2772		return -EINVAL;
2773
2774	/* Make sure we can fit the range in the gatt table. */
2775	if ((pg_start + mem->page_count) > num_entries)
2776		return -EINVAL;
2777
2778	j = pg_start;
2779
2780	/* gatt table should be empty. */
2781	while (j < (pg_start + mem->page_count)) {
2782		if (!PGE_EMPTY(agp_bridge.gatt_table[j]))
2783			return -EBUSY;
2784		j++;
2785	}
2786
2787	if (mem->is_flushed == FALSE) {
2788		CACHE_FLUSH();
2789		mem->is_flushed = TRUE;
2790	}
2791
2792	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
2793		addr = mem->memory[i];
2794
2795		tmp = addr;
2796		BUG_ON(tmp & 0xffffff0000000ffc);
2797		pte = (tmp & 0x000000ff00000000) >> 28;
2798		pte |=(tmp & 0x00000000fffff000);
2799		pte |= 1<<1|1<<0;
2800
2801		agp_bridge.gatt_table[j] = pte;
2802	}
2803	agp_bridge.tlb_flush(mem);
2804	return 0;
2805}
2806
2807/*
2808 * This hack alters the order element according
2809 * to the size of a long. It sucks. I totally disown this, even
2810 * though it does appear to work for the most part.
2811 */
2812static aper_size_info_32 x86_64_aperture_sizes[7] =
2813{
2814	{32,   8192,   3+(sizeof(long)/8), 0 },
2815	{64,   16384,  4+(sizeof(long)/8), 1<<1 },
2816	{128,  32768,  5+(sizeof(long)/8), 1<<2 },
2817	{256,  65536,  6+(sizeof(long)/8), 1<<1 | 1<<2 },
2818	{512,  131072, 7+(sizeof(long)/8), 1<<3 },
2819	{1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
2820	{2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
2821};
2822
2823
2824/*
2825 * Get the current Aperture size from the x86-64.
2826 * Note, that there may be multiple x86-64's, but we just return
2827 * the value from the first one we find. The set_size functions
2828 * keep the rest coherent anyway. Or at least should do.
2829 */
2830static int amd_x86_64_fetch_size(void)
2831{
2832	struct pci_dev *dev;
2833	int i;
2834	u32 temp;
2835	aper_size_info_32 *values;
2836
2837	pci_for_each_dev(dev) {
2838		if (dev->bus->number==0 &&
2839			PCI_FUNC(dev->devfn)==3 &&
2840			PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
2841
2842			pci_read_config_dword(dev, AMD_X86_64_GARTAPERTURECTL, &temp);
2843			temp = (temp & 0xe);
2844			values = A_SIZE_32(x86_64_aperture_sizes);
2845
2846			for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
2847				if (temp == values[i].size_value) {
2848					agp_bridge.previous_size =
2849					    agp_bridge.current_size = (void *) (values + i);
2850
2851					agp_bridge.aperture_size_idx = i;
2852					return values[i].size;
2853				}
2854			}
2855		}
2856	}
2857	/* erk, couldn't find an x86-64 ? */
2858	return 0;
2859}
2860
2861
2862static void inline flush_x86_64_tlb(struct pci_dev *dev)
2863{
2864	u32 tmp;
2865
2866	pci_read_config_dword (dev, AMD_X86_64_GARTCACHECTL, &tmp);
2867	tmp |= 1<<0;
2868	pci_write_config_dword (dev, AMD_X86_64_GARTCACHECTL, tmp);
2869}
2870
2871
2872void amd_x86_64_tlbflush(agp_memory * temp)
2873{
2874	struct pci_dev *dev;
2875
2876	pci_for_each_dev(dev) {
2877		if (dev->bus->number==0 && PCI_FUNC(dev->devfn)==3 &&
2878		    PCI_SLOT(dev->devfn) >=24 && PCI_SLOT(dev->devfn) <=31) {
2879			flush_x86_64_tlb (dev);
2880		}
2881	}
2882}
2883
2884
2885/*
2886 * In a multiprocessor x86-64 system, this function gets
2887 * called once for each CPU.
2888 */
2889u64 amd_x86_64_configure (struct pci_dev *hammer, u64 gatt_table)
2890{
2891	u64 aperturebase;
2892	u32 tmp;
2893	u64 addr, aper_base;
2894
2895	/* Address to map to */
2896	pci_read_config_dword (hammer, AMD_X86_64_GARTAPERTUREBASE, &tmp);
2897	aperturebase = tmp << 25;
2898	aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
2899
2900	/* address of the mappings table */
2901	addr = (u64) gatt_table;
2902	addr >>= 12;
2903	tmp = (u32) addr<<4;
2904	tmp &= ~0xf;
2905	pci_write_config_dword (hammer, AMD_X86_64_GARTTABLEBASE, tmp);
2906
2907	/* Enable GART translation for this hammer. */
2908	pci_read_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, &tmp);
2909	tmp &= 0x3f;
2910	tmp |= 1<<0;
2911	pci_write_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, tmp);
2912
2913	/* keep CPU's coherent. */
2914	flush_x86_64_tlb (hammer);
2915
2916	return aper_base;
2917}
2918
2919
2920static aper_size_info_32 amd_8151_sizes[7] =
2921{
2922	{2048, 524288, 9, 0x00000000 },	/* 0 0 0 0 0 0 */
2923	{1024, 262144, 8, 0x00000400 },	/* 1 0 0 0 0 0 */
2924	{512,  131072, 7, 0x00000600 },	/* 1 1 0 0 0 0 */
2925	{256,  65536,  6, 0x00000700 },	/* 1 1 1 0 0 0 */
2926	{128,  32768,  5, 0x00000720 },	/* 1 1 1 1 0 0 */
2927	{64,   16384,  4, 0x00000730 },	/* 1 1 1 1 1 0 */
2928	{32,   8192,   3, 0x00000738 } 	/* 1 1 1 1 1 1 */
2929};
2930
2931static int amd_8151_configure(void)
2932{
2933	struct pci_dev *dev, *hammer=NULL;
2934	int current_size;
2935	int tmp, tmp2, i;
2936	u64 aperbar;
2937	unsigned long gatt_bus = virt_to_phys(agp_bridge.gatt_table_real);
2938
2939	/* Configure AGP regs in each x86-64 host bridge. */
2940	pci_for_each_dev(dev) {
2941		if (dev->bus->number==0 &&
2942			PCI_FUNC(dev->devfn)==3 &&
2943			PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
2944			agp_bridge.gart_bus_addr = amd_x86_64_configure(dev,gatt_bus);
2945			hammer = dev;
2946
2947			/*
2948			 * TODO: Cache pci_dev's of x86-64's in private struct to save us
2949			 * having to scan the pci list each time.
2950			 */
2951		}
2952	}
2953
2954	if (hammer == NULL) {
2955		return -ENODEV;
2956	}
2957
2958	/* Shadow x86-64 registers into 8151 registers. */
2959
2960	dev = agp_bridge.dev;
2961	if (!dev)
2962		return -ENODEV;
2963
2964	current_size = amd_x86_64_fetch_size();
2965
2966	pci_read_config_dword(dev, AMD_8151_APERTURESIZE, &tmp);
2967	tmp &= ~(0xfff);
2968
2969	/* translate x86-64 size bits to 8151 size bits*/
2970	for (i=0 ; i<7; i++) {
2971		if (amd_8151_sizes[i].size == current_size)
2972			tmp |= (amd_8151_sizes[i].size_value) << 3;
2973	}
2974	pci_write_config_dword(dev, AMD_8151_APERTURESIZE, tmp);
2975
2976	pci_read_config_dword (hammer, AMD_X86_64_GARTAPERTUREBASE, &tmp);
2977	aperbar = pci_read64 (dev, AMD_8151_VMAPERTURE);
2978	aperbar |= (tmp & 0x7fff) <<25;
2979	aperbar &= 0x000000ffffffffff;
2980	aperbar |= 1<<2;
2981	pci_write64 (dev, AMD_8151_VMAPERTURE, aperbar);
2982
2983	pci_read_config_dword(dev, AMD_8151_AGP_CTL , &tmp);
2984	tmp &= ~(AMD_8151_GTLBEN | AMD_8151_APEREN);
2985
2986	pci_read_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, &tmp2);
2987	if (tmp2 & AMD_X86_64_GARTEN)
2988		tmp |= AMD_8151_APEREN;
2989	// FIXME: bit 7 of AMD_8151_AGP_CTL (GTLBEN) must be copied if set.
2990	// But where is it set ?
2991	pci_write_config_dword(dev, AMD_8151_AGP_CTL, tmp);
2992
2993	return 0;
2994}
2995
2996
2997static void amd_8151_cleanup(void)
2998{
2999	struct pci_dev *dev;
3000	u32 tmp;
3001
3002	pci_for_each_dev(dev) {
3003		/* disable gart translation */
3004		if (dev->bus->number==0 && PCI_FUNC(dev->devfn)==3 &&
3005		    (PCI_SLOT(dev->devfn) >=24) && (PCI_SLOT(dev->devfn) <=31)) {
3006
3007			pci_read_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, &tmp);
3008			tmp &= ~(AMD_X86_64_GARTEN);
3009			pci_write_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, tmp);
3010		}
3011
3012		/* Now shadow the disable in the 8151 */
3013		if (dev->vendor == PCI_VENDOR_ID_AMD &&
3014			dev->device == PCI_DEVICE_ID_AMD_8151_0) {
3015
3016			pci_read_config_dword (dev, AMD_8151_AGP_CTL, &tmp);
3017			tmp &= ~(AMD_8151_APEREN);
3018			pci_write_config_dword (dev, AMD_8151_AGP_CTL, tmp);
3019		}
3020	}
3021}
3022
3023
3024
3025static unsigned long amd_8151_mask_memory(unsigned long addr, int type)
3026{
3027	return addr | agp_bridge.masks[0].mask;
3028}
3029
3030
3031static gatt_mask amd_8151_masks[] =
3032{
3033	{0x00000001, 0}
3034};
3035
3036
3037/*
3038 * Try to configure an AGP v3 capable setup.
3039 * If we fail (typically because we don't have an AGP v3
3040 * card in the system) we fall back to the generic AGP v2
3041 * routines.
3042 */
3043static void agp_x86_64_agp_enable(u32 mode)
3044{
3045	struct pci_dev *device = NULL;
3046	u32 command, scratch;
3047	u8 cap_ptr;
3048	u8 agp_v3;
3049	u8 v3_devs=0;
3050
3051
3052	/* PASS1: Count # of devs capable of AGPv3 mode. */
3053	pci_for_each_dev(device) {
3054		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
3055		if (cap_ptr != 0x00) {
3056			pci_read_config_dword(device, cap_ptr, &scratch);
3057			scratch &= (1<<20|1<<21|1<<22|1<<23);
3058			scratch = scratch>>20;
3059			/* AGP v3 capable ? */
3060			if (scratch>=3) {
3061				v3_devs++;
3062				printk (KERN_INFO "AGP: Found AGPv3 capable device at %d:%d:%d\n",
3063					device->bus->number, PCI_FUNC(device->devfn), PCI_SLOT(device->devfn));
3064			} else {
3065				printk (KERN_INFO "AGP: Meh. version %x AGP device found.\n", scratch);
3066			}
3067		}
3068	}
3069	/* If not enough, go to AGP v2 setup */
3070	if (v3_devs<2) {
3071		printk (KERN_INFO "AGP: Only %d devices found, not enough, trying AGPv2\n", v3_devs);
3072		return agp_generic_agp_enable(mode);
3073	} else {
3074		printk (KERN_INFO "AGP: Enough AGPv3 devices found, setting up...\n");
3075	}
3076
3077
3078	pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 4, &command);
3079
3080	/*
3081	 * PASS2: go through all devices that claim to be
3082	 *        AGP devices and collect their data.
3083	 */
3084
3085	pci_for_each_dev(device) {
3086		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
3087		if (cap_ptr != 0x00) {
3088			/*
3089			 * Ok, here we have a AGP device. Disable impossible
3090			 * settings, and adjust the readqueue to the minimum.
3091			 */
3092
3093			printk (KERN_INFO "AGP: Setting up AGPv3 capable device at %d:%d:%d\n",
3094					device->bus->number, PCI_FUNC(device->devfn), PCI_SLOT(device->devfn));
3095			pci_read_config_dword(device, cap_ptr + 4, &scratch);
3096			agp_v3 = (scratch & (1<<3) ) >>3;
3097
3098			/* adjust RQ depth */
3099			command =
3100			    ((command & ~0xff000000) |
3101			     min_t(u32, (mode & 0xff000000),
3102				 min_t(u32, (command & 0xff000000),
3103				     (scratch & 0xff000000))));
3104
3105			/* disable SBA if it's not supported */
3106			if (!((command & 0x200) && (scratch & 0x200) && (mode & 0x200)))
3107				command &= ~0x200;
3108
3109			/* disable FW if it's not supported */
3110			if (!((command & 0x10) && (scratch & 0x10) && (mode & 0x10)))
3111				command &= ~0x10;
3112
3113			if (!((command & 2) && (scratch & 2) && (mode & 2))) {
3114				command &= ~2;		/* 8x */
3115				printk (KERN_INFO "AGP: Putting device into 8x mode\n");
3116			}
3117
3118			if (!((command & 1) && (scratch & 1) && (mode & 1))) {
3119				command &= ~1;		/* 4x */
3120				printk (KERN_INFO "AGP: Putting device into 4x mode\n");
3121			}
3122		}
3123	}
3124	/*
3125	 * PASS3: Figure out the 8X/4X setting and enable the
3126	 *        target (our motherboard chipset).
3127	 */
3128
3129	if (command & 2)
3130		command &= ~5;	/* 8X */
3131
3132	if (command & 1)
3133		command &= ~6;	/* 4X */
3134
3135	command |= 0x100;
3136
3137	pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx + 8, command);
3138
3139	/*
3140	 * PASS4: Go through all AGP devices and update the
3141	 *        command registers.
3142	 */
3143
3144	pci_for_each_dev(device) {
3145		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
3146		if (cap_ptr != 0x00)
3147			pci_write_config_dword(device, cap_ptr + 8, command);
3148	}
3149}
3150
3151
3152static int __init amd_8151_setup (struct pci_dev *pdev)
3153{
3154	agp_bridge.masks = amd_8151_masks;
3155	agp_bridge.num_of_masks = 1;
3156	agp_bridge.aperture_sizes = (void *) amd_8151_sizes;
3157	agp_bridge.size_type = U32_APER_SIZE;
3158	agp_bridge.num_aperture_sizes = 7;
3159	agp_bridge.dev_private_data = NULL;
3160	agp_bridge.needs_scratch_page = FALSE;
3161	agp_bridge.configure = amd_8151_configure;
3162	agp_bridge.fetch_size = amd_x86_64_fetch_size;
3163	agp_bridge.cleanup = amd_8151_cleanup;
3164	agp_bridge.tlb_flush = amd_x86_64_tlbflush;
3165	agp_bridge.mask_memory = amd_8151_mask_memory;
3166	agp_bridge.agp_enable = agp_x86_64_agp_enable;
3167	agp_bridge.cache_flush = global_cache_flush;
3168	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
3169	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
3170	agp_bridge.insert_memory = x86_64_insert_memory;
3171	agp_bridge.remove_memory = agp_generic_remove_memory;
3172	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
3173	agp_bridge.free_by_type = agp_generic_free_by_type;
3174	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
3175	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
3176	agp_bridge.suspend = agp_generic_suspend;
3177	agp_bridge.resume = agp_generic_resume;
3178	agp_bridge.cant_use_aperture = 0;
3179
3180	return 0;
3181
3182	(void) pdev; /* unused */
3183}
3184
3185#endif /* CONFIG_AGP_AMD_8151 */
3186
3187#ifdef CONFIG_AGP_ALI
3188
3189static int ali_fetch_size(void)
3190{
3191	int i;
3192	u32 temp;
3193	aper_size_info_32 *values;
3194
3195	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
3196	temp &= ~(0xfffffff0);
3197	values = A_SIZE_32(agp_bridge.aperture_sizes);
3198
3199	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
3200		if (temp == values[i].size_value) {
3201			agp_bridge.previous_size =
3202			    agp_bridge.current_size = (void *) (values + i);
3203			agp_bridge.aperture_size_idx = i;
3204			return values[i].size;
3205		}
3206	}
3207
3208	return 0;
3209}
3210
3211static void ali_tlbflush(agp_memory * mem)
3212{
3213	u32 temp;
3214
3215	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
3216// clear tag
3217	pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
3218			((temp & 0xfffffff0) | 0x00000001|0x00000002));
3219}
3220
3221static void ali_cleanup(void)
3222{
3223	aper_size_info_32 *previous_size;
3224	u32 temp;
3225
3226	previous_size = A_SIZE_32(agp_bridge.previous_size);
3227
3228	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
3229// clear tag
3230	pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
3231			((temp & 0xffffff00) | 0x00000001|0x00000002));
3232
3233	pci_read_config_dword(agp_bridge.dev,  ALI_ATTBASE, &temp);
3234	pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
3235			((temp & 0x00000ff0) | previous_size->size_value));
3236}
3237
3238static int ali_configure(void)
3239{
3240	u32 temp;
3241	aper_size_info_32 *current_size;
3242
3243	current_size = A_SIZE_32(agp_bridge.current_size);
3244
3245	/* aperture size and gatt addr */
3246	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
3247	temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000))
3248			| (current_size->size_value & 0xf));
3249	pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp);
3250
3251	/* tlb control */
3252
3253	/*
3254	 *	Question: Jeff, ALi's patch deletes this:
3255	 *
3256	 *	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
3257	 *	pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
3258	 *			       ((temp & 0xffffff00) | 0x00000010));
3259	 *
3260	 *	and replaces it with the following, which seems to duplicate the
3261	 *	next couple of lines below it. I suspect this was an oversight,
3262	 *	but you might want to check up on this?
3263	 */
3264
3265	pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
3266	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
3267
3268	/* address to map to */
3269	pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
3270	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
3271
3272
3273	pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
3274	temp &= 0xffffff7f;		//enable TLB
3275	pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp);
3276
3277	return 0;
3278}
3279
3280static unsigned long ali_mask_memory(unsigned long addr, int type)
3281{
3282	/* Memory type is ignored */
3283
3284	return addr | agp_bridge.masks[0].mask;
3285}
3286
3287static void ali_cache_flush(void)
3288{
3289	global_cache_flush();
3290
3291	if (agp_bridge.type == ALI_M1541) {
3292		int i, page_count;
3293		u32 temp;
3294
3295		page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order;
3296		for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
3297			pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
3298			pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
3299					(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
3300					  (agp_bridge.gatt_bus_addr + i)) |
3301					    ALI_CACHE_FLUSH_EN));
3302		}
3303	}
3304}
3305
3306
3307static unsigned long ali_alloc_page(void)
3308{
3309	unsigned long p = agp_generic_alloc_page();
3310	if (!p)
3311		return 0;
3312
3313	/* probably not needed anymore */
3314	global_cache_flush();
3315
3316	if (agp_bridge.type == ALI_M1541) {
3317		u32 temp;
3318		pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
3319		pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
3320				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
3321				  virt_to_phys((void *)p)) |
3322				    ALI_CACHE_FLUSH_EN ));
3323	}
3324	return p;
3325}
3326
3327static void ali_destroy_page(unsigned long addr)
3328{
3329	u32 temp;
3330	void *pt = (void *) addr;
3331
3332	if (pt == NULL)
3333		return;
3334
3335	global_cache_flush();
3336
3337	if (agp_bridge.type == ALI_M1541) {
3338		pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
3339		pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
3340				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
3341				  virt_to_phys(pt)) |
3342				    ALI_CACHE_FLUSH_EN));
3343	}
3344
3345	agp_generic_destroy_page(addr);
3346}
3347
3348/* Setup function */
3349static gatt_mask ali_generic_masks[] =
3350{
3351	{0x00000000, 0}
3352};
3353
3354static aper_size_info_32 ali_generic_sizes[7] =
3355{
3356	{256, 65536, 6, 10},
3357	{128, 32768, 5, 9},
3358	{64, 16384, 4, 8},
3359	{32, 8192, 3, 7},
3360	{16, 4096, 2, 6},
3361	{8, 2048, 1, 4},
3362	{4, 1024, 0, 3}
3363};
3364
3365static int __init ali_generic_setup (struct pci_dev *pdev)
3366{
3367	agp_bridge.masks = ali_generic_masks;
3368	agp_bridge.num_of_masks = 1;
3369	agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
3370	agp_bridge.size_type = U32_APER_SIZE;
3371	agp_bridge.num_aperture_sizes = 7;
3372	agp_bridge.dev_private_data = NULL;
3373	agp_bridge.needs_scratch_page = FALSE;
3374	agp_bridge.configure = ali_configure;
3375	agp_bridge.fetch_size = ali_fetch_size;
3376	agp_bridge.cleanup = ali_cleanup;
3377	agp_bridge.tlb_flush = ali_tlbflush;
3378	agp_bridge.mask_memory = ali_mask_memory;
3379	agp_bridge.agp_enable = agp_generic_agp_enable;
3380	agp_bridge.cache_flush = ali_cache_flush;
3381	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
3382	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
3383	agp_bridge.insert_memory = agp_generic_insert_memory;
3384	agp_bridge.remove_memory = agp_generic_remove_memory;
3385	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
3386	agp_bridge.free_by_type = agp_generic_free_by_type;
3387	agp_bridge.agp_alloc_page = ali_alloc_page;
3388	agp_bridge.agp_destroy_page = ali_destroy_page;
3389	agp_bridge.suspend = agp_generic_suspend;
3390	agp_bridge.resume = agp_generic_resume;
3391	agp_bridge.cant_use_aperture = 0;
3392
3393	return 0;
3394
3395	(void) pdev; /* unused */
3396}
3397
3398#endif /* CONFIG_AGP_ALI */
3399
3400#ifdef CONFIG_AGP_SWORKS
3401typedef struct _serverworks_page_map {
3402	unsigned long *real;
3403	unsigned long *remapped;
3404} serverworks_page_map;
3405
3406static struct _serverworks_private {
3407	struct pci_dev *svrwrks_dev;	/* device one */
3408	volatile u8 *registers;
3409	serverworks_page_map **gatt_pages;
3410	int num_tables;
3411	serverworks_page_map scratch_dir;
3412
3413	int gart_addr_ofs;
3414	int mm_addr_ofs;
3415} serverworks_private;
3416
3417static int serverworks_create_page_map(serverworks_page_map *page_map)
3418{
3419	int i;
3420	int err = 0;
3421
3422	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
3423	if (page_map->real == NULL) {
3424		return -ENOMEM;
3425	}
3426	SetPageReserved(virt_to_page(page_map->real));
3427#ifdef CONFIG_X86
3428	err = change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL_NOCACHE);
3429#endif
3430	CACHE_FLUSH();
3431	if (!err)
3432	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
3433					    PAGE_SIZE);
3434	if (page_map->remapped == NULL || err) {
3435		ClearPageReserved(virt_to_page(page_map->real));
3436		free_page((unsigned long) page_map->real);
3437		page_map->real = NULL;
3438		return -ENOMEM;
3439	}
3440	CACHE_FLUSH();
3441
3442	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
3443		page_map->remapped[i] = agp_bridge.scratch_page;
3444	}
3445
3446	return 0;
3447}
3448
3449static void serverworks_free_page_map(serverworks_page_map *page_map)
3450{
3451#ifdef CONFIG_X86
3452	change_page_attr(virt_to_page(page_map->real),1,PAGE_KERNEL);
3453#endif
3454	iounmap(page_map->remapped);
3455	ClearPageReserved(virt_to_page(page_map->real));
3456	free_page((unsigned long) page_map->real);
3457}
3458
3459static void serverworks_free_gatt_pages(void)
3460{
3461	int i;
3462	serverworks_page_map **tables;
3463	serverworks_page_map *entry;
3464
3465	tables = serverworks_private.gatt_pages;
3466	for(i = 0; i < serverworks_private.num_tables; i++) {
3467		entry = tables[i];
3468		if (entry != NULL) {
3469			if (entry->real != NULL) {
3470				serverworks_free_page_map(entry);
3471			}
3472			kfree(entry);
3473		}
3474	}
3475	kfree(tables);
3476}
3477
3478static int serverworks_create_gatt_pages(int nr_tables)
3479{
3480	serverworks_page_map **tables;
3481	serverworks_page_map *entry;
3482	int retval = 0;
3483	int i;
3484
3485	tables = kmalloc((nr_tables + 1) * sizeof(serverworks_page_map *),
3486			 GFP_KERNEL);
3487	if (tables == NULL) {
3488		return -ENOMEM;
3489	}
3490	memset(tables, 0, sizeof(serverworks_page_map *) * (nr_tables + 1));
3491	for (i = 0; i < nr_tables; i++) {
3492		entry = kmalloc(sizeof(serverworks_page_map), GFP_KERNEL);
3493		if (entry == NULL) {
3494			retval = -ENOMEM;
3495			break;
3496		}
3497		memset(entry, 0, sizeof(serverworks_page_map));
3498		tables[i] = entry;
3499		retval = serverworks_create_page_map(entry);
3500		if (retval != 0) break;
3501	}
3502	serverworks_private.num_tables = nr_tables;
3503	serverworks_private.gatt_pages = tables;
3504
3505	if (retval != 0) serverworks_free_gatt_pages();
3506
3507	return retval;
3508}
3509
3510#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
3511	GET_PAGE_DIR_IDX(addr)]->remapped)
3512
3513#ifndef GET_PAGE_DIR_OFF
3514#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
3515#endif
3516
3517#ifndef GET_PAGE_DIR_IDX
3518#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
3519	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
3520#endif
3521
3522#ifndef GET_GATT_OFF
3523#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
3524#endif
3525
3526static int serverworks_create_gatt_table(void)
3527{
3528	aper_size_info_lvl2 *value;
3529	serverworks_page_map page_dir;
3530	int retval;
3531	u32 temp;
3532	int i;
3533
3534	value = A_SIZE_LVL2(agp_bridge.current_size);
3535	retval = serverworks_create_page_map(&page_dir);
3536	if (retval != 0) {
3537		return retval;
3538	}
3539	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
3540	if (retval != 0) {
3541		serverworks_free_page_map(&page_dir);
3542		return retval;
3543	}
3544	/* Create a fake scratch directory */
3545	for(i = 0; i < 1024; i++) {
3546		serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page;
3547		page_dir.remapped[i] =
3548			virt_to_bus(serverworks_private.scratch_dir.real);
3549		page_dir.remapped[i] |= 0x00000001;
3550	}
3551
3552	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
3553	if (retval != 0) {
3554		serverworks_free_page_map(&page_dir);
3555		serverworks_free_page_map(&serverworks_private.scratch_dir);
3556		return retval;
3557	}
3558
3559	agp_bridge.gatt_table_real = page_dir.real;
3560	agp_bridge.gatt_table = page_dir.remapped;
3561	agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
3562
3563	/* Get the address for the gart region.
3564	 * This is a bus address even on the alpha, b/c its
3565	 * used to program the agp master not the cpu
3566	 */
3567
3568	pci_read_config_dword(agp_bridge.dev,
3569			      serverworks_private.gart_addr_ofs,
3570			      &temp);
3571	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
3572
3573	/* Calculate the agp offset */
3574
3575	for(i = 0; i < value->num_entries / 1024; i++) {
3576		page_dir.remapped[i] =
3577			virt_to_bus(serverworks_private.gatt_pages[i]->real);
3578		page_dir.remapped[i] |= 0x00000001;
3579	}
3580
3581	return 0;
3582}
3583
3584static int serverworks_free_gatt_table(void)
3585{
3586	serverworks_page_map page_dir;
3587
3588	page_dir.real = agp_bridge.gatt_table_real;
3589	page_dir.remapped = agp_bridge.gatt_table;
3590
3591	serverworks_free_gatt_pages();
3592	serverworks_free_page_map(&page_dir);
3593	serverworks_free_page_map(&serverworks_private.scratch_dir);
3594	return 0;
3595}
3596
3597static int serverworks_fetch_size(void)
3598{
3599	int i;
3600	u32 temp;
3601	u32 temp2;
3602	aper_size_info_lvl2 *values;
3603
3604	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
3605	pci_read_config_dword(agp_bridge.dev,
3606			      serverworks_private.gart_addr_ofs,
3607			      &temp);
3608	pci_write_config_dword(agp_bridge.dev,
3609			       serverworks_private.gart_addr_ofs,
3610			       SVWRKS_SIZE_MASK);
3611	pci_read_config_dword(agp_bridge.dev,
3612			      serverworks_private.gart_addr_ofs,
3613			      &temp2);
3614	pci_write_config_dword(agp_bridge.dev,
3615			       serverworks_private.gart_addr_ofs,
3616			       temp);
3617	temp2 &= SVWRKS_SIZE_MASK;
3618
3619	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
3620		if (temp2 == values[i].size_value) {
3621			agp_bridge.previous_size =
3622			    agp_bridge.current_size = (void *) (values + i);
3623
3624			agp_bridge.aperture_size_idx = i;
3625			return values[i].size;
3626		}
3627	}
3628
3629	return 0;
3630}
3631
3632static int serverworks_configure(void)
3633{
3634	aper_size_info_lvl2 *current_size;
3635	u32 temp;
3636	u8 enable_reg;
3637	u8 cap_ptr;
3638	u32 cap_id;
3639	u16 cap_reg;
3640
3641	current_size = A_SIZE_LVL2(agp_bridge.current_size);
3642
3643	/* Get the memory mapped registers */
3644	pci_read_config_dword(agp_bridge.dev,
3645			      serverworks_private.mm_addr_ofs,
3646			      &temp);
3647	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
3648	serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096);
3649
3650	OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a);
3651
3652	OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE,
3653		 agp_bridge.gatt_bus_addr);
3654
3655	cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND);
3656	cap_reg &= ~0x0007;
3657	cap_reg |= 0x4;
3658	OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg);
3659
3660	pci_read_config_byte(serverworks_private.svrwrks_dev,
3661			     SVWRKS_AGP_ENABLE, &enable_reg);
3662	enable_reg |= 0x1; /* Agp Enable bit */
3663	pci_write_config_byte(serverworks_private.svrwrks_dev,
3664			      SVWRKS_AGP_ENABLE, enable_reg);
3665	agp_bridge.tlb_flush(NULL);
3666
3667	pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr);
3668	if (cap_ptr != 0x00) {
3669		do {
3670			pci_read_config_dword(serverworks_private.svrwrks_dev,
3671					      cap_ptr, &cap_id);
3672
3673			if ((cap_id & 0xff) != 0x02)
3674				cap_ptr = (cap_id >> 8) & 0xff;
3675		}
3676		while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
3677	}
3678	agp_bridge.capndx = cap_ptr;
3679
3680	/* Fill in the mode register */
3681	pci_read_config_dword(serverworks_private.svrwrks_dev,
3682			      agp_bridge.capndx + 4,
3683			      &agp_bridge.mode);
3684
3685	pci_read_config_byte(agp_bridge.dev,
3686			     SVWRKS_CACHING,
3687			     &enable_reg);
3688	enable_reg &= ~0x3;
3689	pci_write_config_byte(agp_bridge.dev,
3690			      SVWRKS_CACHING,
3691			      enable_reg);
3692
3693	pci_read_config_byte(agp_bridge.dev,
3694			     SVWRKS_FEATURE,
3695			     &enable_reg);
3696	enable_reg |= (1<<6);
3697	pci_write_config_byte(agp_bridge.dev,
3698			      SVWRKS_FEATURE,
3699			      enable_reg);
3700
3701	return 0;
3702}
3703
3704static void serverworks_cleanup(void)
3705{
3706	iounmap((void *) serverworks_private.registers);
3707}
3708
3709/*
3710 * This routine could be implemented by taking the addresses
3711 * written to the GATT, and flushing them individually.  However
3712 * currently it just flushes the whole table.  Which is probably
3713 * more efficent, since agp_memory blocks can be a large number of
3714 * entries.
3715 */
3716
3717static void serverworks_tlbflush(agp_memory * temp)
3718{
3719	unsigned long end;
3720
3721	OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01);
3722	end = jiffies + 3*HZ;
3723	while(INREG8(serverworks_private.registers,
3724		     SVWRKS_POSTFLUSH) == 0x01) {
3725		if((signed)(end - jiffies) <= 0) {
3726			printk(KERN_ERR "Posted write buffer flush took more"
3727			       "then 3 seconds\n");
3728		}
3729	}
3730	OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001);
3731	end = jiffies + 3*HZ;
3732	while(INREG32(serverworks_private.registers,
3733		     SVWRKS_DIRFLUSH) == 0x00000001) {
3734		if((signed)(end - jiffies) <= 0) {
3735			printk(KERN_ERR "TLB flush took more"
3736			       "then 3 seconds\n");
3737		}
3738	}
3739}
3740
3741static unsigned long serverworks_mask_memory(unsigned long addr, int type)
3742{
3743	/* Only type 0 is supported by the serverworks chipsets */
3744
3745	return addr | agp_bridge.masks[0].mask;
3746}
3747
3748static int serverworks_insert_memory(agp_memory * mem,
3749			     off_t pg_start, int type)
3750{
3751	int i, j, num_entries;
3752	unsigned long *cur_gatt;
3753	unsigned long addr;
3754
3755	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
3756
3757	if (type != 0 || mem->type != 0) {
3758		return -EINVAL;
3759	}
3760	if ((pg_start + mem->page_count) > num_entries) {
3761		return -EINVAL;
3762	}
3763
3764	j = pg_start;
3765	while (j < (pg_start + mem->page_count)) {
3766		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
3767		cur_gatt = SVRWRKS_GET_GATT(addr);
3768		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
3769			return -EBUSY;
3770		}
3771		j++;
3772	}
3773
3774	if (mem->is_flushed == FALSE) {
3775		CACHE_FLUSH();
3776		mem->is_flushed = TRUE;
3777	}
3778
3779	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
3780		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
3781		cur_gatt = SVRWRKS_GET_GATT(addr);
3782		cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
3783	}
3784	agp_bridge.tlb_flush(mem);
3785	return 0;
3786}
3787
3788static int serverworks_remove_memory(agp_memory * mem, off_t pg_start,
3789			     int type)
3790{
3791	int i;
3792	unsigned long *cur_gatt;
3793	unsigned long addr;
3794
3795	if (type != 0 || mem->type != 0) {
3796		return -EINVAL;
3797	}
3798
3799	CACHE_FLUSH();
3800	agp_bridge.tlb_flush(mem);
3801
3802	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
3803		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
3804		cur_gatt = SVRWRKS_GET_GATT(addr);
3805		cur_gatt[GET_GATT_OFF(addr)] =
3806			(unsigned long) agp_bridge.scratch_page;
3807	}
3808
3809	agp_bridge.tlb_flush(mem);
3810	return 0;
3811}
3812
3813static gatt_mask serverworks_masks[] =
3814{
3815	{0x00000001, 0}
3816};
3817
3818static aper_size_info_lvl2 serverworks_sizes[7] =
3819{
3820	{2048, 524288, 0x80000000},
3821	{1024, 262144, 0xc0000000},
3822	{512, 131072, 0xe0000000},
3823	{256, 65536, 0xf0000000},
3824	{128, 32768, 0xf8000000},
3825	{64, 16384, 0xfc000000},
3826	{32, 8192, 0xfe000000}
3827};
3828
3829static void serverworks_agp_enable(u32 mode)
3830{
3831	struct pci_dev *device = NULL;
3832	u32 command, scratch, cap_id;
3833	u8 cap_ptr;
3834
3835	pci_read_config_dword(serverworks_private.svrwrks_dev,
3836			      agp_bridge.capndx + 4,
3837			      &command);
3838
3839	/*
3840	 * PASS1: go throu all devices that claim to be
3841	 *        AGP devices and collect their data.
3842	 */
3843
3844
3845	pci_for_each_dev(device) {
3846		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
3847		if (cap_ptr != 0x00) {
3848			do {
3849				pci_read_config_dword(device,
3850						      cap_ptr, &cap_id);
3851
3852				if ((cap_id & 0xff) != 0x02)
3853					cap_ptr = (cap_id >> 8) & 0xff;
3854			}
3855			while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
3856		}
3857		if (cap_ptr != 0x00) {
3858			/*
3859			 * Ok, here we have a AGP device. Disable impossible
3860			 * settings, and adjust the readqueue to the minimum.
3861			 */
3862
3863			pci_read_config_dword(device, cap_ptr + 4, &scratch);
3864
3865			/* adjust RQ depth */
3866			command =
3867			    ((command & ~0xff000000) |
3868			     min_t(u32, (mode & 0xff000000),
3869				 min_t(u32, (command & 0xff000000),
3870				     (scratch & 0xff000000))));
3871
3872			/* disable SBA if it's not supported */
3873			if (!((command & 0x00000200) &&
3874			      (scratch & 0x00000200) &&
3875			      (mode & 0x00000200)))
3876				command &= ~0x00000200;
3877
3878			/* disable FW */
3879			command &= ~0x00000010;
3880
3881			command &= ~0x00000008;
3882
3883			if (!((command & 4) &&
3884			      (scratch & 4) &&
3885			      (mode & 4)))
3886				command &= ~0x00000004;
3887
3888			if (!((command & 2) &&
3889			      (scratch & 2) &&
3890			      (mode & 2)))
3891				command &= ~0x00000002;
3892
3893			if (!((command & 1) &&
3894			      (scratch & 1) &&
3895			      (mode & 1)))
3896				command &= ~0x00000001;
3897		}
3898	}
3899	/*
3900	 * PASS2: Figure out the 4X/2X/1X setting and enable the
3901	 *        target (our motherboard chipset).
3902	 */
3903
3904	if (command & 4) {
3905		command &= ~3;	/* 4X */
3906	}
3907	if (command & 2) {
3908		command &= ~5;	/* 2X */
3909	}
3910	if (command & 1) {
3911		command &= ~6;	/* 1X */
3912	}
3913	command |= 0x00000100;
3914
3915	pci_write_config_dword(serverworks_private.svrwrks_dev,
3916			       agp_bridge.capndx + 8,
3917			       command);
3918
3919	/*
3920	 * PASS3: Go throu all AGP devices and update the
3921	 *        command registers.
3922	 */
3923
3924	pci_for_each_dev(device) {
3925		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
3926		if (cap_ptr != 0x00)
3927			pci_write_config_dword(device, cap_ptr + 8, command);
3928	}
3929}
3930
3931static int __init serverworks_setup (struct pci_dev *pdev)
3932{
3933	u32 temp;
3934	u32 temp2;
3935
3936	serverworks_private.svrwrks_dev = pdev;
3937
3938	agp_bridge.masks = serverworks_masks;
3939	agp_bridge.num_of_masks = 1;
3940	agp_bridge.aperture_sizes = (void *) serverworks_sizes;
3941	agp_bridge.size_type = LVL2_APER_SIZE;
3942	agp_bridge.num_aperture_sizes = 7;
3943	agp_bridge.dev_private_data = (void *) &serverworks_private;
3944	agp_bridge.needs_scratch_page = TRUE;
3945	agp_bridge.configure = serverworks_configure;
3946	agp_bridge.fetch_size = serverworks_fetch_size;
3947	agp_bridge.cleanup = serverworks_cleanup;
3948	agp_bridge.tlb_flush = serverworks_tlbflush;
3949	agp_bridge.mask_memory = serverworks_mask_memory;
3950	agp_bridge.agp_enable = serverworks_agp_enable;
3951	agp_bridge.cache_flush = global_cache_flush;
3952	agp_bridge.create_gatt_table = serverworks_create_gatt_table;
3953	agp_bridge.free_gatt_table = serverworks_free_gatt_table;
3954	agp_bridge.insert_memory = serverworks_insert_memory;
3955	agp_bridge.remove_memory = serverworks_remove_memory;
3956	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
3957	agp_bridge.free_by_type = agp_generic_free_by_type;
3958	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
3959	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
3960	agp_bridge.suspend = agp_generic_suspend;
3961	agp_bridge.resume = agp_generic_resume;
3962	agp_bridge.cant_use_aperture = 0;
3963
3964	pci_read_config_dword(agp_bridge.dev,
3965			      SVWRKS_APSIZE,
3966			      &temp);
3967
3968	serverworks_private.gart_addr_ofs = 0x10;
3969
3970	if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
3971		pci_read_config_dword(agp_bridge.dev,
3972				      SVWRKS_APSIZE + 4,
3973				      &temp2);
3974		if(temp2 != 0) {
3975			printk("Detected 64 bit aperture address, but top "
3976			       "bits are not zero.  Disabling agp\n");
3977			return -ENODEV;
3978		}
3979		serverworks_private.mm_addr_ofs = 0x18;
3980	} else {
3981		serverworks_private.mm_addr_ofs = 0x14;
3982	}
3983
3984	pci_read_config_dword(agp_bridge.dev,
3985			      serverworks_private.mm_addr_ofs,
3986			      &temp);
3987	if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
3988		pci_read_config_dword(agp_bridge.dev,
3989				      serverworks_private.mm_addr_ofs + 4,
3990				      &temp2);
3991		if(temp2 != 0) {
3992			printk("Detected 64 bit MMIO address, but top "
3993			       "bits are not zero.  Disabling agp\n");
3994			return -ENODEV;
3995		}
3996	}
3997
3998	return 0;
3999}
4000
4001#endif /* CONFIG_AGP_SWORKS */
4002
4003#ifdef CONFIG_AGP_HP_ZX1
4004
4005#ifndef log2
4006#define log2(x)		ffz(~(x))
4007#endif
4008
4009#define HP_ZX1_IOVA_BASE	GB(1UL)
4010#define HP_ZX1_IOVA_SIZE	GB(1UL)
4011#define HP_ZX1_GART_SIZE	(HP_ZX1_IOVA_SIZE / 2)
4012#define HP_ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL
4013
4014#define HP_ZX1_PDIR_VALID_BIT	0x8000000000000000UL
4015#define HP_ZX1_IOVA_TO_PDIR(va)	((va - hp_private.iova_base) >> \
4016					hp_private.io_tlb_shift)
4017
4018static aper_size_info_fixed hp_zx1_sizes[] =
4019{
4020	{0, 0, 0},		/* filled in by hp_zx1_fetch_size() */
4021};
4022
4023static gatt_mask hp_zx1_masks[] =
4024{
4025	{HP_ZX1_PDIR_VALID_BIT, 0}
4026};
4027
4028static struct _hp_private {
4029	struct pci_dev *ioc;
4030	volatile u8 *registers;
4031	u64 *io_pdir;		// PDIR for entire IOVA
4032	u64 *gatt;		// PDIR just for GART (subset of above)
4033	u64 gatt_entries;
4034	u64 iova_base;
4035	u64 gart_base;
4036	u64 gart_size;
4037	u64 io_pdir_size;
4038	int io_pdir_owner;	// do we own it, or share it with sba_iommu?
4039	int io_page_size;
4040	int io_tlb_shift;
4041	int io_tlb_ps;		// IOC ps config
4042	int io_pages_per_kpage;
4043} hp_private;
4044
4045static int __init hp_zx1_ioc_shared(void)
4046{
4047	struct _hp_private *hp = &hp_private;
4048
4049	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
4050
4051	/*
4052	 * IOC already configured by sba_iommu module; just use
4053	 * its setup.  We assume:
4054	 * 	- IOVA space is 1Gb in size
4055	 * 	- first 512Mb is IOMMU, second 512Mb is GART
4056	 */
4057	hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
4058	switch (hp->io_tlb_ps) {
4059		case 0: hp->io_tlb_shift = 12; break;
4060		case 1: hp->io_tlb_shift = 13; break;
4061		case 2: hp->io_tlb_shift = 14; break;
4062		case 3: hp->io_tlb_shift = 16; break;
4063		default:
4064			printk(KERN_ERR PFX "Invalid IOTLB page size "
4065			       "configuration 0x%x\n", hp->io_tlb_ps);
4066			hp->gatt = 0;
4067			hp->gatt_entries = 0;
4068			return -ENODEV;
4069	}
4070	hp->io_page_size = 1 << hp->io_tlb_shift;
4071	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
4072
4073	hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
4074	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
4075
4076	hp->gart_size = HP_ZX1_GART_SIZE;
4077	hp->gatt_entries = hp->gart_size / hp->io_page_size;
4078
4079	hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
4080	hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
4081
4082	if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
4083	    	hp->gatt = 0;
4084		hp->gatt_entries = 0;
4085		printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
4086		       "GART disabled\n");
4087		return -ENODEV;
4088	}
4089
4090	return 0;
4091}
4092
4093static int __init hp_zx1_ioc_owner(u8 ioc_rev)
4094{
4095	struct _hp_private *hp = &hp_private;
4096
4097	printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
4098
4099	/*
4100	 * Select an IOV page size no larger than system page size.
4101	 */
4102	if (PAGE_SIZE >= KB(64)) {
4103		hp->io_tlb_shift = 16;
4104		hp->io_tlb_ps = 3;
4105	} else if (PAGE_SIZE >= KB(16)) {
4106		hp->io_tlb_shift = 14;
4107		hp->io_tlb_ps = 2;
4108	} else if (PAGE_SIZE >= KB(8)) {
4109		hp->io_tlb_shift = 13;
4110		hp->io_tlb_ps = 1;
4111	} else {
4112		hp->io_tlb_shift = 12;
4113		hp->io_tlb_ps = 0;
4114	}
4115	hp->io_page_size = 1 << hp->io_tlb_shift;
4116	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
4117
4118	hp->iova_base = HP_ZX1_IOVA_BASE;
4119	hp->gart_size = HP_ZX1_GART_SIZE;
4120	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
4121
4122	hp->gatt_entries = hp->gart_size / hp->io_page_size;
4123	hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
4124
4125	return 0;
4126}
4127
4128static int __init hp_zx1_ioc_init(void)
4129{
4130	struct _hp_private *hp = &hp_private;
4131	struct pci_dev *ioc;
4132	int i;
4133	u8 ioc_rev;
4134
4135	ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
4136	if (!ioc) {
4137		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
4138		return -ENODEV;
4139	}
4140	hp->ioc = ioc;
4141
4142	pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
4143
4144	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
4145		if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
4146			hp->registers = (u8 *) ioremap(pci_resource_start(ioc,
4147									    i),
4148						    pci_resource_len(ioc, i));
4149			break;
4150		}
4151	}
4152	if (!hp->registers) {
4153		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
4154
4155		return -ENODEV;
4156	}
4157
4158	/*
4159	 * If the IOTLB is currently disabled, we can take it over.
4160	 * Otherwise, we have to share with sba_iommu.
4161	 */
4162	hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
4163
4164	if (hp->io_pdir_owner)
4165		return hp_zx1_ioc_owner(ioc_rev);
4166
4167	return hp_zx1_ioc_shared();
4168}
4169
4170static int hp_zx1_fetch_size(void)
4171{
4172	int size;
4173
4174	size = hp_private.gart_size / MB(1);
4175	hp_zx1_sizes[0].size = size;
4176	agp_bridge.current_size = (void *) &hp_zx1_sizes[0];
4177	return size;
4178}
4179
4180static int hp_zx1_configure(void)
4181{
4182	struct _hp_private *hp = &hp_private;
4183
4184	agp_bridge.gart_bus_addr = hp->gart_base;
4185	agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP);
4186	pci_read_config_dword(agp_bridge.dev,
4187		agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode);
4188
4189	if (hp->io_pdir_owner) {
4190		OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
4191			virt_to_phys(hp->io_pdir));
4192		OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
4193		OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
4194		OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
4195		OUTREG64(hp->registers, HP_ZX1_PCOM,
4196			hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
4197		INREG64(hp->registers, HP_ZX1_PCOM);
4198	}
4199
4200	return 0;
4201}
4202
4203static void hp_zx1_cleanup(void)
4204{
4205	struct _hp_private *hp = &hp_private;
4206
4207	if (hp->io_pdir_owner)
4208		OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
4209	iounmap((void *) hp->registers);
4210}
4211
4212static void hp_zx1_tlbflush(agp_memory * mem)
4213{
4214	struct _hp_private *hp = &hp_private;
4215
4216	OUTREG64(hp->registers, HP_ZX1_PCOM,
4217		hp->gart_base | log2(hp->gart_size));
4218	INREG64(hp->registers, HP_ZX1_PCOM);
4219}
4220
4221static int hp_zx1_create_gatt_table(void)
4222{
4223	struct _hp_private *hp = &hp_private;
4224	int i;
4225
4226	if (hp->io_pdir_owner) {
4227		hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
4228						get_order(hp->io_pdir_size));
4229		if (!hp->io_pdir) {
4230			printk(KERN_ERR PFX "Couldn't allocate contiguous "
4231				"memory for I/O PDIR\n");
4232			hp->gatt = 0;
4233			hp->gatt_entries = 0;
4234			return -ENOMEM;
4235		}
4236		memset(hp->io_pdir, 0, hp->io_pdir_size);
4237
4238		hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
4239	}
4240
4241	for (i = 0; i < hp->gatt_entries; i++) {
4242		hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
4243	}
4244
4245	return 0;
4246}
4247
4248static int hp_zx1_free_gatt_table(void)
4249{
4250	struct _hp_private *hp = &hp_private;
4251
4252	if (hp->io_pdir_owner)
4253		free_pages((unsigned long) hp->io_pdir,
4254			    get_order(hp->io_pdir_size));
4255	else
4256		hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
4257	return 0;
4258}
4259
4260static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
4261{
4262	struct _hp_private *hp = &hp_private;
4263	int i, k;
4264	off_t j, io_pg_start;
4265	int io_pg_count;
4266
4267	if (type != 0 || mem->type != 0) {
4268		return -EINVAL;
4269	}
4270
4271	io_pg_start = hp->io_pages_per_kpage * pg_start;
4272	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
4273	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
4274		return -EINVAL;
4275	}
4276
4277	j = io_pg_start;
4278	while (j < (io_pg_start + io_pg_count)) {
4279		if (hp->gatt[j]) {
4280			return -EBUSY;
4281		}
4282		j++;
4283	}
4284
4285	if (mem->is_flushed == FALSE) {
4286		CACHE_FLUSH();
4287		mem->is_flushed = TRUE;
4288	}
4289
4290	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
4291		unsigned long paddr;
4292
4293		paddr = mem->memory[i];
4294		for (k = 0;
4295		     k < hp->io_pages_per_kpage;
4296		     k++, j++, paddr += hp->io_page_size) {
4297			hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
4298		}
4299	}
4300
4301	agp_bridge.tlb_flush(mem);
4302	return 0;
4303}
4304
4305static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
4306{
4307	struct _hp_private *hp = &hp_private;
4308	int i, io_pg_start, io_pg_count;
4309
4310	if (type != 0 || mem->type != 0) {
4311		return -EINVAL;
4312	}
4313
4314	io_pg_start = hp->io_pages_per_kpage * pg_start;
4315	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
4316	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
4317		hp->gatt[i] = agp_bridge.scratch_page;
4318	}
4319
4320	agp_bridge.tlb_flush(mem);
4321	return 0;
4322}
4323
4324static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
4325{
4326	return HP_ZX1_PDIR_VALID_BIT | addr;
4327}
4328
4329static unsigned long hp_zx1_unmask_memory(unsigned long addr)
4330{
4331	return addr & ~(HP_ZX1_PDIR_VALID_BIT);
4332}
4333
4334static int __init hp_zx1_setup (struct pci_dev *pdev)
4335{
4336	agp_bridge.masks = hp_zx1_masks;
4337	agp_bridge.num_of_masks = 1;
4338	agp_bridge.dev_private_data = NULL;
4339	agp_bridge.size_type = FIXED_APER_SIZE;
4340	agp_bridge.needs_scratch_page = FALSE;
4341	agp_bridge.configure = hp_zx1_configure;
4342	agp_bridge.fetch_size = hp_zx1_fetch_size;
4343	agp_bridge.cleanup = hp_zx1_cleanup;
4344	agp_bridge.tlb_flush = hp_zx1_tlbflush;
4345	agp_bridge.mask_memory = hp_zx1_mask_memory;
4346	agp_bridge.unmask_memory = hp_zx1_unmask_memory;
4347	agp_bridge.agp_enable = agp_generic_agp_enable;
4348	agp_bridge.cache_flush = global_cache_flush;
4349	agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
4350	agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
4351	agp_bridge.insert_memory = hp_zx1_insert_memory;
4352	agp_bridge.remove_memory = hp_zx1_remove_memory;
4353	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
4354	agp_bridge.free_by_type = agp_generic_free_by_type;
4355	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
4356	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
4357	agp_bridge.cant_use_aperture = 1;
4358
4359	return hp_zx1_ioc_init();
4360
4361	(void) pdev; /* unused */
4362}
4363
4364#endif /* CONFIG_AGP_HP_ZX1 */
4365
4366/* per-chipset initialization data.
4367 * note -- all chipsets for a single vendor MUST be grouped together
4368 */
4369static struct {
4370	unsigned short device_id; /* first, to make table easier to read */
4371	unsigned short vendor_id;
4372	enum chipset_type chipset;
4373	const char *vendor_name;
4374	const char *chipset_name;
4375	int (*chipset_setup) (struct pci_dev *pdev);
4376} agp_bridge_info[] __initdata = {
4377
4378#ifdef CONFIG_AGP_ALI
4379	{ PCI_DEVICE_ID_AL_M1541_0,
4380		PCI_VENDOR_ID_AL,
4381		ALI_M1541,
4382		"Ali",
4383		"M1541",
4384		ali_generic_setup },
4385	{ PCI_DEVICE_ID_AL_M1621_0,
4386		PCI_VENDOR_ID_AL,
4387		ALI_M1621,
4388		"Ali",
4389		"M1621",
4390		ali_generic_setup },
4391	{ PCI_DEVICE_ID_AL_M1631_0,
4392		PCI_VENDOR_ID_AL,
4393		ALI_M1631,
4394		"Ali",
4395		"M1631",
4396		ali_generic_setup },
4397	{ PCI_DEVICE_ID_AL_M1632_0,
4398		PCI_VENDOR_ID_AL,
4399		ALI_M1632,
4400		"Ali",
4401		"M1632",
4402		ali_generic_setup },
4403	{ PCI_DEVICE_ID_AL_M1641_0,
4404		PCI_VENDOR_ID_AL,
4405		ALI_M1641,
4406		"Ali",
4407		"M1641",
4408		ali_generic_setup },
4409	{ PCI_DEVICE_ID_AL_M1644_0,
4410		PCI_VENDOR_ID_AL,
4411		ALI_M1644,
4412		"Ali",
4413		"M1644",
4414		ali_generic_setup },
4415	{ PCI_DEVICE_ID_AL_M1647_0,
4416		PCI_VENDOR_ID_AL,
4417		ALI_M1647,
4418		"Ali",
4419		"M1647",
4420		ali_generic_setup },
4421	{ PCI_DEVICE_ID_AL_M1651_0,
4422		PCI_VENDOR_ID_AL,
4423		ALI_M1651,
4424		"Ali",
4425		"M1651",
4426		ali_generic_setup },
4427	{ PCI_DEVICE_ID_AL_M1671_0,
4428		PCI_VENDOR_ID_AL,
4429		ALI_M1671,
4430		"Ali",
4431		"M1671",
4432		ali_generic_setup },
4433	{ 0,
4434		PCI_VENDOR_ID_AL,
4435		ALI_GENERIC,
4436		"Ali",
4437		"Generic",
4438		ali_generic_setup },
4439#endif /* CONFIG_AGP_ALI */
4440
4441#ifdef CONFIG_AGP_AMD
4442	{ PCI_DEVICE_ID_AMD_IRONGATE_0,
4443		PCI_VENDOR_ID_AMD,
4444		AMD_IRONGATE,
4445		"AMD",
4446		"Irongate",
4447		amd_irongate_setup },
4448	{ PCI_DEVICE_ID_AMD_762_0,
4449		PCI_VENDOR_ID_AMD,
4450		AMD_IRONGATE,
4451		"AMD",
4452		"760MP",
4453		amd_irongate_setup },
4454	{ PCI_DEVICE_ID_AMD_761_0,
4455		PCI_VENDOR_ID_AMD,
4456		AMD_761,
4457		"AMD",
4458		"761",
4459		amd_irongate_setup },
4460	{ PCI_DEVICE_ID_AMD_762_0,
4461		PCI_VENDOR_ID_AMD,
4462		AMD_762,
4463		"AMD",
4464		"760MP",
4465		amd_irongate_setup },
4466	{ 0,
4467		PCI_VENDOR_ID_AMD,
4468		AMD_GENERIC,
4469		"AMD",
4470		"Generic",
4471		amd_irongate_setup },
4472#endif /* CONFIG_AGP_AMD */
4473
4474#ifdef CONFIG_AGP_AMD_8151
4475	{ PCI_DEVICE_ID_AMD_8151_0,
4476		PCI_VENDOR_ID_AMD,
4477		AMD_8151,
4478		"AMD",
4479		"8151",
4480		amd_8151_setup },
4481#endif /* CONFIG_AGP_AMD */
4482
4483#ifdef CONFIG_AGP_INTEL
4484	{ PCI_DEVICE_ID_INTEL_82443LX_0,
4485		PCI_VENDOR_ID_INTEL,
4486		INTEL_LX,
4487		"Intel",
4488		"440LX",
4489		intel_generic_setup },
4490	{ PCI_DEVICE_ID_INTEL_82443BX_0,
4491		PCI_VENDOR_ID_INTEL,
4492		INTEL_BX,
4493		"Intel",
4494		"440BX",
4495		intel_generic_setup },
4496	{ PCI_DEVICE_ID_INTEL_82443GX_0,
4497		PCI_VENDOR_ID_INTEL,
4498		INTEL_GX,
4499		"Intel",
4500		"440GX",
4501		intel_generic_setup },
4502	{ PCI_DEVICE_ID_INTEL_815_0,
4503		PCI_VENDOR_ID_INTEL,
4504		INTEL_I815,
4505		"Intel",
4506		"i815",
4507		intel_815_setup },
4508	{ PCI_DEVICE_ID_INTEL_820_0,
4509		PCI_VENDOR_ID_INTEL,
4510		INTEL_I820,
4511		"Intel",
4512		"i820",
4513		intel_820_setup },
4514        { PCI_DEVICE_ID_INTEL_820_UP_0,
4515                PCI_VENDOR_ID_INTEL,
4516                INTEL_I820,
4517                "Intel",
4518                 "i820",
4519                 intel_820_setup },
4520	{ PCI_DEVICE_ID_INTEL_830_M_0,
4521		PCI_VENDOR_ID_INTEL,
4522		INTEL_I830_M,
4523		"Intel",
4524		"i830M",
4525		intel_830mp_setup },
4526    { PCI_DEVICE_ID_INTEL_845_G_0,
4527		 PCI_VENDOR_ID_INTEL,
4528		 INTEL_I845_G,
4529		 "Intel",
4530		 "i845G",
4531		 intel_830mp_setup },
4532	{ PCI_DEVICE_ID_INTEL_840_0,
4533		PCI_VENDOR_ID_INTEL,
4534		INTEL_I840,
4535		"Intel",
4536		"i840",
4537		intel_840_setup },
4538	{ PCI_DEVICE_ID_INTEL_845_0,
4539		PCI_VENDOR_ID_INTEL,
4540		INTEL_I845,
4541		"Intel",
4542		"i845",
4543		intel_845_setup },
4544	{ PCI_DEVICE_ID_INTEL_850_0,
4545	        PCI_VENDOR_ID_INTEL,
4546	        INTEL_I850,
4547	        "Intel",
4548	        "i850",
4549	        intel_850_setup },
4550	{ PCI_DEVICE_ID_INTEL_860_0,
4551		PCI_VENDOR_ID_INTEL,
4552		INTEL_I860,
4553		"Intel",
4554		"i860",
4555		intel_860_setup },
4556	{ 0,
4557		PCI_VENDOR_ID_INTEL,
4558		INTEL_GENERIC,
4559		"Intel",
4560		"Generic",
4561		intel_generic_setup },
4562
4563#endif /* CONFIG_AGP_INTEL */
4564
4565#ifdef CONFIG_AGP_SIS
4566	{ PCI_DEVICE_ID_SI_740,
4567		PCI_VENDOR_ID_SI,
4568		SIS_GENERIC,
4569		"SiS",
4570		"740",
4571		sis_generic_setup },
4572	{ PCI_DEVICE_ID_SI_650,
4573		PCI_VENDOR_ID_SI,
4574		SIS_GENERIC,
4575		"SiS",
4576		"650",
4577		sis_generic_setup },
4578	{ PCI_DEVICE_ID_SI_645,
4579		PCI_VENDOR_ID_SI,
4580		SIS_GENERIC,
4581		"SiS",
4582		"645",
4583		sis_generic_setup },
4584	{ PCI_DEVICE_ID_SI_646,
4585		PCI_VENDOR_ID_SI,
4586		SIS_GENERIC,
4587		"SiS",
4588		"646",
4589		sis_generic_setup },
4590	{ PCI_DEVICE_ID_SI_735,
4591		PCI_VENDOR_ID_SI,
4592		SIS_GENERIC,
4593		"SiS",
4594		"735",
4595		sis_generic_setup },
4596	{ PCI_DEVICE_ID_SI_745,
4597		PCI_VENDOR_ID_SI,
4598		SIS_GENERIC,
4599		"SiS",
4600		"745",
4601		sis_generic_setup },
4602	{ PCI_DEVICE_ID_SI_730,
4603		PCI_VENDOR_ID_SI,
4604		SIS_GENERIC,
4605		"SiS",
4606		"730",
4607		sis_generic_setup },
4608	{ PCI_DEVICE_ID_SI_630,
4609		PCI_VENDOR_ID_SI,
4610		SIS_GENERIC,
4611		"SiS",
4612		"630",
4613		sis_generic_setup },
4614	{ PCI_DEVICE_ID_SI_540,
4615		PCI_VENDOR_ID_SI,
4616		SIS_GENERIC,
4617		"SiS",
4618		"540",
4619		sis_generic_setup },
4620	{ PCI_DEVICE_ID_SI_620,
4621		PCI_VENDOR_ID_SI,
4622		SIS_GENERIC,
4623		"SiS",
4624		"620",
4625		sis_generic_setup },
4626	{ PCI_DEVICE_ID_SI_530,
4627		PCI_VENDOR_ID_SI,
4628		SIS_GENERIC,
4629		"SiS",
4630		"530",
4631		sis_generic_setup },
4632        { PCI_DEVICE_ID_SI_550,
4633		PCI_VENDOR_ID_SI,
4634		SIS_GENERIC,
4635		"SiS",
4636                "550",
4637		sis_generic_setup },
4638	{ 0,
4639		PCI_VENDOR_ID_SI,
4640		SIS_GENERIC,
4641		"SiS",
4642		"Generic",
4643		sis_generic_setup },
4644#endif /* CONFIG_AGP_SIS */
4645
4646#ifdef CONFIG_AGP_VIA
4647	{ PCI_DEVICE_ID_VIA_8501_0,
4648		PCI_VENDOR_ID_VIA,
4649		VIA_MVP4,
4650		"Via",
4651		"MVP4",
4652		via_generic_setup },
4653	{ PCI_DEVICE_ID_VIA_82C597_0,
4654		PCI_VENDOR_ID_VIA,
4655		VIA_VP3,
4656		"Via",
4657		"VP3",
4658		via_generic_setup },
4659	{ PCI_DEVICE_ID_VIA_82C598_0,
4660		PCI_VENDOR_ID_VIA,
4661		VIA_MVP3,
4662		"Via",
4663		"MVP3",
4664		via_generic_setup },
4665	{ PCI_DEVICE_ID_VIA_82C691_0,
4666		PCI_VENDOR_ID_VIA,
4667		VIA_APOLLO_PRO,
4668		"Via",
4669		"Apollo Pro",
4670		via_generic_setup },
4671	{ PCI_DEVICE_ID_VIA_8371_0,
4672		PCI_VENDOR_ID_VIA,
4673		VIA_APOLLO_KX133,
4674		"Via",
4675		"Apollo Pro KX133",
4676		via_generic_setup },
4677	{ PCI_DEVICE_ID_VIA_8363_0,
4678		PCI_VENDOR_ID_VIA,
4679		VIA_APOLLO_KT133,
4680		"Via",
4681		"Apollo Pro KT133",
4682		via_generic_setup },
4683	{ PCI_DEVICE_ID_VIA_8367_0,
4684		PCI_VENDOR_ID_VIA,
4685		VIA_APOLLO_KT133,
4686		"Via",
4687		"Apollo Pro KT266",
4688		via_generic_setup },
4689	{ 0,
4690		PCI_VENDOR_ID_VIA,
4691		VIA_GENERIC,
4692		"Via",
4693		"Generic",
4694		via_generic_setup },
4695#endif /* CONFIG_AGP_VIA */
4696
4697#ifdef CONFIG_AGP_HP_ZX1
4698	{ PCI_DEVICE_ID_HP_ZX1_LBA,
4699		PCI_VENDOR_ID_HP,
4700		HP_ZX1,
4701		"HP",
4702		"ZX1",
4703		hp_zx1_setup },
4704#endif
4705
4706	{ 0, }, /* dummy final entry, always present */
4707};
4708
4709
4710/* scan table above for supported devices */
4711static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
4712{
4713	int i;
4714
4715	for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++)
4716		if (pdev->vendor == agp_bridge_info[i].vendor_id)
4717			break;
4718
4719	if (i >= ARRAY_SIZE (agp_bridge_info)) {
4720		printk (KERN_DEBUG PFX "unsupported bridge\n");
4721		return -ENODEV;
4722	}
4723
4724	while ((i < ARRAY_SIZE (agp_bridge_info)) &&
4725	       (agp_bridge_info[i].vendor_id == pdev->vendor)) {
4726		if (pdev->device == agp_bridge_info[i].device_id) {
4727#ifdef CONFIG_AGP_ALI
4728			if (pdev->device == PCI_DEVICE_ID_AL_M1621_0) {
4729				u8 hidden_1621_id;
4730
4731				pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
4732				switch (hidden_1621_id) {
4733				case 0x31:
4734					agp_bridge_info[i].chipset_name="M1631";
4735					break;
4736				case 0x32:
4737					agp_bridge_info[i].chipset_name="M1632";
4738					break;
4739				case 0x41:
4740					agp_bridge_info[i].chipset_name="M1641";
4741					break;
4742				case 0x43:
4743					break;
4744				case 0x47:
4745					agp_bridge_info[i].chipset_name="M1647";
4746					break;
4747				case 0x51:
4748					agp_bridge_info[i].chipset_name="M1651";
4749					break;
4750				default:
4751					break;
4752				}
4753			}
4754#endif
4755
4756			printk (KERN_INFO PFX "Detected %s %s chipset\n",
4757				agp_bridge_info[i].vendor_name,
4758				agp_bridge_info[i].chipset_name);
4759			agp_bridge.type = agp_bridge_info[i].chipset;
4760			return agp_bridge_info[i].chipset_setup (pdev);
4761		}
4762
4763		i++;
4764	}
4765
4766	i--; /* point to vendor generic entry (device_id == 0) */
4767
4768	/* try init anyway, if user requests it AND
4769	 * there is a 'generic' bridge entry for this vendor */
4770	if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
4771		printk(KERN_WARNING PFX "Trying generic %s routines"
4772		       " for device id: %04x\n",
4773		       agp_bridge_info[i].vendor_name, pdev->device);
4774		agp_bridge.type = agp_bridge_info[i].chipset;
4775		return agp_bridge_info[i].chipset_setup (pdev);
4776	}
4777
4778	printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
4779	       " you might want to try agp_try_unsupported=1.\n",
4780	       agp_bridge_info[i].vendor_name, pdev->device);
4781	return -ENODEV;
4782}
4783
4784
4785/* Supported Device Scanning routine */
4786
4787static int __init agp_find_supported_device(void)
4788{
4789	struct pci_dev *dev = NULL;
4790	u8 cap_ptr = 0x00;
4791
4792	if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL)
4793		return -ENODEV;
4794
4795	agp_bridge.dev = dev;
4796
4797	/* Need to test for I810 here */
4798#ifdef CONFIG_AGP_I810
4799	if (dev->vendor == PCI_VENDOR_ID_INTEL) {
4800		struct pci_dev *i810_dev;
4801
4802		switch (dev->device) {
4803		case PCI_DEVICE_ID_INTEL_810_0:
4804			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4805					       PCI_DEVICE_ID_INTEL_810_1,
4806						   NULL);
4807			if (i810_dev == NULL) {
4808				printk(KERN_ERR PFX "Detected an Intel i810,"
4809				       " but could not find the secondary"
4810				       " device.\n");
4811				return -ENODEV;
4812			}
4813			printk(KERN_INFO PFX "Detected an Intel "
4814			       "i810 Chipset.\n");
4815			agp_bridge.type = INTEL_I810;
4816			return intel_i810_setup (i810_dev);
4817
4818		case PCI_DEVICE_ID_INTEL_810_DC100_0:
4819			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4820					 PCI_DEVICE_ID_INTEL_810_DC100_1,
4821						   NULL);
4822			if (i810_dev == NULL) {
4823				printk(KERN_ERR PFX "Detected an Intel i810 "
4824				       "DC100, but could not find the "
4825				       "secondary device.\n");
4826				return -ENODEV;
4827			}
4828			printk(KERN_INFO PFX "Detected an Intel i810 "
4829			       "DC100 Chipset.\n");
4830			agp_bridge.type = INTEL_I810;
4831			return intel_i810_setup(i810_dev);
4832
4833		case PCI_DEVICE_ID_INTEL_810_E_0:
4834			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4835					     PCI_DEVICE_ID_INTEL_810_E_1,
4836						   NULL);
4837			if (i810_dev == NULL) {
4838				printk(KERN_ERR PFX "Detected an Intel i810 E"
4839				    ", but could not find the secondary "
4840				       "device.\n");
4841				return -ENODEV;
4842			}
4843			printk(KERN_INFO PFX "Detected an Intel i810 E "
4844			       "Chipset.\n");
4845			agp_bridge.type = INTEL_I810;
4846			return intel_i810_setup(i810_dev);
4847
4848		 case PCI_DEVICE_ID_INTEL_815_0:
4849		   /* The i815 can operate either as an i810 style
4850		    * integrated device, or as an AGP4X motherboard.
4851		    *
4852		    * This only addresses the first mode:
4853		    */
4854			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4855						   PCI_DEVICE_ID_INTEL_815_1,
4856						   NULL);
4857			if (i810_dev == NULL) {
4858				printk(KERN_ERR PFX "agpgart: Detected an "
4859				       "Intel i815, but could not find the"
4860				       " secondary device. Assuming a "
4861				       "non-integrated video card.\n");
4862				break;
4863			}
4864			printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
4865			       "Chipset.\n");
4866			agp_bridge.type = INTEL_I810;
4867			return intel_i810_setup(i810_dev);
4868
4869		case PCI_DEVICE_ID_INTEL_845_G_0:
4870			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4871					PCI_DEVICE_ID_INTEL_845_G_1, NULL);
4872			if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
4873				i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4874					PCI_DEVICE_ID_INTEL_845_G_1, i810_dev);
4875			}
4876
4877			if (i810_dev == NULL) {
4878                                /*
4879                                 * We probably have a I845 G chipset
4880                                 * with an external graphics
4881                                 * card. It will be initialized later
4882                                 */
4883				agp_bridge.type = INTEL_I845_G;
4884				break;
4885			}
4886			printk(KERN_INFO PFX "Detected an Intel "
4887				   "845G Chipset.\n");
4888			agp_bridge.type = INTEL_I810;
4889			return intel_i830_setup(i810_dev);
4890
4891		case PCI_DEVICE_ID_INTEL_830_M_0:
4892			i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4893					PCI_DEVICE_ID_INTEL_830_M_1, NULL);
4894			if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
4895				i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
4896					PCI_DEVICE_ID_INTEL_830_M_1, i810_dev);
4897			}
4898
4899			if (i810_dev == NULL) {
4900                                /*
4901                                 * We probably have a I830MP chipset
4902                                 * with an external graphics
4903                                 * card. It will be initialized later
4904                                 */
4905				agp_bridge.type = INTEL_I830_M;
4906				break;
4907			}
4908			printk(KERN_INFO PFX "Detected an Intel "
4909				   "830M Chipset.\n");
4910			agp_bridge.type = INTEL_I810;
4911			return intel_i830_setup(i810_dev);
4912		default:
4913			break;
4914		}
4915	}
4916#endif /* CONFIG_AGP_I810 */
4917
4918#ifdef CONFIG_AGP_SWORKS
4919	/* Everything is on func 1 here so we are hardcoding function one */
4920	if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) {
4921		struct pci_dev *bridge_dev;
4922
4923		bridge_dev = pci_find_slot ((unsigned int)dev->bus->number,
4924					    PCI_DEVFN(0, 1));
4925		if(bridge_dev == NULL) {
4926			printk(KERN_INFO PFX "agpgart: Detected a Serverworks "
4927			       "Chipset, but could not find the secondary "
4928			       "device.\n");
4929			return -ENODEV;
4930		}
4931
4932		switch (dev->device) {
4933		case PCI_DEVICE_ID_SERVERWORKS_HE:
4934			agp_bridge.type = SVWRKS_HE;
4935			return serverworks_setup(bridge_dev);
4936
4937		case PCI_DEVICE_ID_SERVERWORKS_LE:
4938		case 0x0007:
4939			agp_bridge.type = SVWRKS_LE;
4940			return serverworks_setup(bridge_dev);
4941
4942		default:
4943			if(agp_try_unsupported) {
4944				agp_bridge.type = SVWRKS_GENERIC;
4945				return serverworks_setup(bridge_dev);
4946			}
4947			break;
4948		}
4949	}
4950
4951#endif	/* CONFIG_AGP_SWORKS */
4952
4953#ifdef CONFIG_AGP_HP_ZX1
4954	if (dev->vendor == PCI_VENDOR_ID_HP) {
4955		do {
4956			/* ZX1 LBAs can be either PCI or AGP bridges */
4957			if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
4958				printk(KERN_INFO PFX "Detected HP ZX1 AGP "
4959				       "chipset at %s\n", dev->slot_name);
4960				agp_bridge.type = HP_ZX1;
4961				agp_bridge.dev = dev;
4962				return hp_zx1_setup(dev);
4963			}
4964			dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev);
4965		} while (dev);
4966		return -ENODEV;
4967	}
4968#endif	/* CONFIG_AGP_HP_ZX1 */
4969
4970	/* find capndx */
4971	cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
4972	if (cap_ptr == 0x00)
4973		return -ENODEV;
4974	agp_bridge.capndx = cap_ptr;
4975
4976	/* Fill in the mode register */
4977	pci_read_config_dword(agp_bridge.dev,
4978			      agp_bridge.capndx + 4,
4979			      &agp_bridge.mode);
4980
4981	/* probe for known chipsets */
4982	return agp_lookup_host_bridge (dev);
4983}
4984
4985struct agp_max_table {
4986	int mem;
4987	int agp;
4988};
4989
4990static struct agp_max_table maxes_table[9] __initdata =
4991{
4992	{0, 0},
4993	{32, 4},
4994	{64, 28},
4995	{128, 96},
4996	{256, 204},
4997	{512, 440},
4998	{1024, 942},
4999	{2048, 1920},
5000	{4096, 3932}
5001};
5002
5003static int __init agp_find_max (void)
5004{
5005	long memory, index, result;
5006
5007	memory = (num_physpages << PAGE_SHIFT) >> 20;
5008	index = 1;
5009
5010	while ((memory > maxes_table[index].mem) &&
5011	       (index < 8)) {
5012		index++;
5013	}
5014
5015	result = maxes_table[index - 1].agp +
5016	   ( (memory - maxes_table[index - 1].mem)  *
5017	     (maxes_table[index].agp - maxes_table[index - 1].agp)) /
5018	   (maxes_table[index].mem - maxes_table[index - 1].mem);
5019
5020	printk(KERN_INFO PFX "Maximum main memory to use "
5021	       "for agp memory: %ldM\n", result);
5022	result = result << (20 - PAGE_SHIFT);
5023        return result;
5024}
5025
5026#define AGPGART_VERSION_MAJOR 0
5027#define AGPGART_VERSION_MINOR 99
5028
5029static agp_version agp_current_version =
5030{
5031	AGPGART_VERSION_MAJOR,
5032	AGPGART_VERSION_MINOR
5033};
5034
5035static int __init agp_backend_initialize(void)
5036{
5037	int size_value, rc, got_gatt=0, got_keylist=0;
5038
5039	memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
5040	agp_bridge.type = NOT_SUPPORTED;
5041	agp_bridge.max_memory_agp = agp_find_max();
5042	agp_bridge.version = &agp_current_version;
5043
5044	rc = agp_find_supported_device();
5045	if (rc) {
5046		/* not KERN_ERR because error msg should have already printed */
5047		printk(KERN_DEBUG PFX "no supported devices found.\n");
5048		return rc;
5049	}
5050
5051	if (agp_bridge.needs_scratch_page == TRUE) {
5052		agp_bridge.scratch_page = agp_bridge.agp_alloc_page();
5053
5054		if (agp_bridge.scratch_page == 0) {
5055			printk(KERN_ERR PFX "unable to get memory for "
5056			       "scratch page.\n");
5057			return -ENOMEM;
5058		}
5059		agp_bridge.scratch_page =
5060		    virt_to_phys((void *) agp_bridge.scratch_page);
5061		agp_bridge.scratch_page =
5062		    agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
5063	}
5064
5065	size_value = agp_bridge.fetch_size();
5066
5067	if (size_value == 0) {
5068		printk(KERN_ERR PFX "unable to determine aperture size.\n");
5069		rc = -EINVAL;
5070		goto err_out;
5071	}
5072	if (agp_bridge.create_gatt_table()) {
5073		printk(KERN_ERR PFX "unable to get memory for graphics "
5074		       "translation table.\n");
5075		rc = -ENOMEM;
5076		goto err_out;
5077	}
5078	got_gatt = 1;
5079
5080	agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
5081	if (agp_bridge.key_list == NULL) {
5082		printk(KERN_ERR PFX "error allocating memory for key lists.\n");
5083		rc = -ENOMEM;
5084		goto err_out;
5085	}
5086	got_keylist = 1;
5087
5088	memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
5089
5090	if (agp_bridge.configure()) {
5091		printk(KERN_ERR PFX "error configuring host chipset.\n");
5092		rc = -EINVAL;
5093		goto err_out;
5094	}
5095
5096	printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
5097	       size_value, agp_bridge.gart_bus_addr);
5098
5099	return 0;
5100
5101err_out:
5102	if (agp_bridge.needs_scratch_page == TRUE) {
5103		agp_bridge.scratch_page &= ~(0x00000fff);
5104		agp_bridge.agp_destroy_page((unsigned long)
5105				 phys_to_virt(agp_bridge.scratch_page));
5106	}
5107	if (got_gatt)
5108		agp_bridge.free_gatt_table();
5109	if (got_keylist)
5110		vfree(agp_bridge.key_list);
5111	return rc;
5112}
5113
5114
5115/* cannot be __exit b/c as it could be called from __init code */
5116static void agp_backend_cleanup(void)
5117{
5118	agp_bridge.cleanup();
5119	agp_bridge.free_gatt_table();
5120	vfree(agp_bridge.key_list);
5121
5122	if (agp_bridge.needs_scratch_page == TRUE) {
5123		agp_bridge.scratch_page &= ~(0x00000fff);
5124		agp_bridge.agp_destroy_page((unsigned long)
5125				 phys_to_virt(agp_bridge.scratch_page));
5126	}
5127}
5128
5129static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data)
5130{
5131	switch(rq)
5132	{
5133		case PM_SUSPEND:
5134			return agp_bridge.suspend();
5135		case PM_RESUME:
5136			agp_bridge.resume();
5137			return 0;
5138	}
5139	return 0;
5140}
5141
5142extern int agp_frontend_initialize(void);
5143extern void agp_frontend_cleanup(void);
5144
5145static const drm_agp_t drm_agp = {
5146	&agp_free_memory,
5147	&agp_allocate_memory,
5148	&agp_bind_memory,
5149	&agp_unbind_memory,
5150	&agp_enable,
5151	&agp_backend_acquire,
5152	&agp_backend_release,
5153	&agp_copy_info
5154};
5155
5156int __init agp_init(void)
5157{
5158	int ret_val;
5159
5160	printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
5161	       AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
5162
5163	ret_val = agp_backend_initialize();
5164	if (ret_val) {
5165		agp_bridge.type = NOT_SUPPORTED;
5166		return ret_val;
5167	}
5168	ret_val = agp_frontend_initialize();
5169	if (ret_val) {
5170		agp_bridge.type = NOT_SUPPORTED;
5171		agp_backend_cleanup();
5172		return ret_val;
5173	}
5174
5175	inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
5176
5177	pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power);
5178	return 0;
5179}
5180
5181static void __exit agp_cleanup(void)
5182{
5183	pm_unregister_all(agp_power);
5184	agp_frontend_cleanup();
5185	agp_backend_cleanup();
5186	inter_module_unregister("drm_agp");
5187}
5188
5189#ifndef CONFIG_GART_IOMMU
5190module_init(agp_init);
5191module_exit(agp_cleanup);
5192#endif
5193