Deleted Added
full compact
agp.c (173203) agp.c (173573)
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 173203 2007-10-30 22:09:16Z jhb $");
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 173573 2007-11-12 21:51:38Z jhb $");
29
30#include "opt_bus.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/module.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
39#include <sys/ioccom.h>
40#include <sys/agpio.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44
29
30#include "opt_bus.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/module.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
39#include <sys/ioccom.h>
40#include <sys/agpio.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44
45#include <dev/agp/agppriv.h>
46#include <dev/agp/agpvar.h>
47#include <dev/agp/agpreg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pcireg.h>
48#include <dev/pci/pcivar.h>
49#include <dev/pci/pcireg.h>
47#include <pci/agppriv.h>
48#include <pci/agpvar.h>
49#include <pci/agpreg.h>
50
51#include <vm/vm.h>
52#include <vm/vm_object.h>
53#include <vm/vm_page.h>
54#include <vm/vm_pageout.h>
55#include <vm/pmap.h>
56
57#include <machine/md_var.h>
58#include <machine/bus.h>
59#include <machine/resource.h>
60#include <sys/rman.h>
61
62MODULE_VERSION(agp, 1);
63
64MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
65
66 /* agp_drv.c */
67static d_open_t agp_open;
68static d_close_t agp_close;
69static d_ioctl_t agp_ioctl;
70static d_mmap_t agp_mmap;
71
72static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
75 .d_open = agp_open,
76 .d_close = agp_close,
77 .d_ioctl = agp_ioctl,
78 .d_mmap = agp_mmap,
79 .d_name = "agp",
80};
81
82static devclass_t agp_devclass;
83#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
84
85/* Helper functions for implementing chipset mini drivers. */
86
87void
88agp_flush_cache()
89{
90#if defined(__i386__) || defined(__amd64__)
91 wbinvd();
92#endif
93}
94
95u_int8_t
96agp_find_caps(device_t dev)
97{
98 int capreg;
99
100
101 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
102 capreg = 0;
103 return (capreg);
104}
105
106/*
107 * Find an AGP display device (if any).
108 */
109static device_t
110agp_find_display(void)
111{
112 devclass_t pci = devclass_find("pci");
113 device_t bus, dev = 0;
114 device_t *kids;
115 int busnum, numkids, i;
116
117 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
118 bus = devclass_get_device(pci, busnum);
119 if (!bus)
120 continue;
121 device_get_children(bus, &kids, &numkids);
122 for (i = 0; i < numkids; i++) {
123 dev = kids[i];
124 if (pci_get_class(dev) == PCIC_DISPLAY
125 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
126 if (agp_find_caps(dev)) {
127 free(kids, M_TEMP);
128 return dev;
129 }
130
131 }
132 free(kids, M_TEMP);
133 }
134
135 return 0;
136}
137
138struct agp_gatt *
139agp_alloc_gatt(device_t dev)
140{
141 u_int32_t apsize = AGP_GET_APERTURE(dev);
142 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
143 struct agp_gatt *gatt;
144
145 if (bootverbose)
146 device_printf(dev,
147 "allocating GATT for aperture of size %dM\n",
148 apsize / (1024*1024));
149
150 if (entries == 0) {
151 device_printf(dev, "bad aperture size\n");
152 return NULL;
153 }
154
155 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
156 if (!gatt)
157 return 0;
158
159 gatt->ag_entries = entries;
160 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
161 0, ~0, PAGE_SIZE, 0);
162 if (!gatt->ag_virtual) {
163 if (bootverbose)
164 device_printf(dev, "contiguous allocation failed\n");
165 free(gatt, M_AGP);
166 return 0;
167 }
168 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
169 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
170 agp_flush_cache();
171
172 return gatt;
173}
174
175void
176agp_free_gatt(struct agp_gatt *gatt)
177{
178 contigfree(gatt->ag_virtual,
179 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
180 free(gatt, M_AGP);
181}
182
183static u_int agp_max[][2] = {
184 {0, 0},
185 {32, 4},
186 {64, 28},
187 {128, 96},
188 {256, 204},
189 {512, 440},
190 {1024, 942},
191 {2048, 1920},
192 {4096, 3932}
193};
194#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
195
196/**
197 * Sets the PCI resource which represents the AGP aperture.
198 *
199 * If not called, the default AGP aperture resource of AGP_APBASE will
200 * be used. Must be called before agp_generic_attach().
201 */
202void
203agp_set_aperture_resource(device_t dev, int rid)
204{
205 struct agp_softc *sc = device_get_softc(dev);
206
207 sc->as_aperture_rid = rid;
208}
209
210int
211agp_generic_attach(device_t dev)
212{
213 struct agp_softc *sc = device_get_softc(dev);
214 int i;
215 u_int memsize;
216
217 /*
218 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
219 * because the kernel doesn't need to map it.
220 */
221 if (sc->as_aperture_rid == 0)
222 sc->as_aperture_rid = AGP_APBASE;
223
224 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
225 &sc->as_aperture_rid, RF_SHAREABLE);
226 if (!sc->as_aperture)
227 return ENOMEM;
228
229 /*
230 * Work out an upper bound for agp memory allocation. This
231 * uses a heurisitc table from the Linux driver.
232 */
233 memsize = ptoa(Maxmem) >> 20;
234 for (i = 0; i < agp_max_size; i++) {
235 if (memsize <= agp_max[i][0])
236 break;
237 }
238 if (i == agp_max_size) i = agp_max_size - 1;
239 sc->as_maxmem = agp_max[i][1] << 20U;
240
241 /*
242 * The lock is used to prevent re-entry to
243 * agp_generic_bind_memory() since that function can sleep.
244 */
245 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
246
247 /*
248 * Initialise stuff for the userland device.
249 */
250 agp_devclass = devclass_find("agp");
251 TAILQ_INIT(&sc->as_memory);
252 sc->as_nextid = 1;
253
254 sc->as_devnode = make_dev(&agp_cdevsw,
255 device_get_unit(dev),
256 UID_ROOT,
257 GID_WHEEL,
258 0600,
259 "agpgart");
260
261 return 0;
262}
263
264void
265agp_free_cdev(device_t dev)
266{
267 struct agp_softc *sc = device_get_softc(dev);
268
269 destroy_dev(sc->as_devnode);
270}
271
272void
273agp_free_res(device_t dev)
274{
275 struct agp_softc *sc = device_get_softc(dev);
276
277 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
278 sc->as_aperture);
279 mtx_destroy(&sc->as_lock);
280 agp_flush_cache();
281}
282
283int
284agp_generic_detach(device_t dev)
285{
286
287 agp_free_cdev(dev);
288 agp_free_res(dev);
289 return 0;
290}
291
292/**
293 * Default AGP aperture size detection which simply returns the size of
294 * the aperture's PCI resource.
295 */
296int
297agp_generic_get_aperture(device_t dev)
298{
299 struct agp_softc *sc = device_get_softc(dev);
300
301 return rman_get_size(sc->as_aperture);
302}
303
304/**
305 * Default AGP aperture size setting function, which simply doesn't allow
306 * changes to resource size.
307 */
308int
309agp_generic_set_aperture(device_t dev, u_int32_t aperture)
310{
311 u_int32_t current_aperture;
312
313 current_aperture = AGP_GET_APERTURE(dev);
314 if (current_aperture != aperture)
315 return EINVAL;
316 else
317 return 0;
318}
319
320/*
321 * This does the enable logic for v3, with the same topology
322 * restrictions as in place for v2 -- one bus, one device on the bus.
323 */
324static int
325agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
326{
327 u_int32_t tstatus, mstatus;
328 u_int32_t command;
329 int rq, sba, fw, rate, arqsz, cal;
330
331 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
332 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
333
334 /* Set RQ to the min of mode, tstatus and mstatus */
335 rq = AGP_MODE_GET_RQ(mode);
336 if (AGP_MODE_GET_RQ(tstatus) < rq)
337 rq = AGP_MODE_GET_RQ(tstatus);
338 if (AGP_MODE_GET_RQ(mstatus) < rq)
339 rq = AGP_MODE_GET_RQ(mstatus);
340
341 /*
342 * ARQSZ - Set the value to the maximum one.
343 * Don't allow the mode register to override values.
344 */
345 arqsz = AGP_MODE_GET_ARQSZ(mode);
346 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
347 rq = AGP_MODE_GET_ARQSZ(tstatus);
348 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
349 rq = AGP_MODE_GET_ARQSZ(mstatus);
350
351 /* Calibration cycle - don't allow override by mode register */
352 cal = AGP_MODE_GET_CAL(tstatus);
353 if (AGP_MODE_GET_CAL(mstatus) < cal)
354 cal = AGP_MODE_GET_CAL(mstatus);
355
356 /* SBA must be supported for AGP v3. */
357 sba = 1;
358
359 /* Set FW if all three support it. */
360 fw = (AGP_MODE_GET_FW(tstatus)
361 & AGP_MODE_GET_FW(mstatus)
362 & AGP_MODE_GET_FW(mode));
363
364 /* Figure out the max rate */
365 rate = (AGP_MODE_GET_RATE(tstatus)
366 & AGP_MODE_GET_RATE(mstatus)
367 & AGP_MODE_GET_RATE(mode));
368 if (rate & AGP_MODE_V3_RATE_8x)
369 rate = AGP_MODE_V3_RATE_8x;
370 else
371 rate = AGP_MODE_V3_RATE_4x;
372 if (bootverbose)
373 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
374
375 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
376
377 /* Construct the new mode word and tell the hardware */
378 command = 0;
379 command = AGP_MODE_SET_RQ(0, rq);
380 command = AGP_MODE_SET_ARQSZ(command, arqsz);
381 command = AGP_MODE_SET_CAL(command, cal);
382 command = AGP_MODE_SET_SBA(command, sba);
383 command = AGP_MODE_SET_FW(command, fw);
384 command = AGP_MODE_SET_RATE(command, rate);
385 command = AGP_MODE_SET_MODE_3(command, 1);
386 command = AGP_MODE_SET_AGP(command, 1);
387 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
388 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
389
390 return 0;
391}
392
393static int
394agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
395{
396 u_int32_t tstatus, mstatus;
397 u_int32_t command;
398 int rq, sba, fw, rate;
399
400 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
401 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
402
403 /* Set RQ to the min of mode, tstatus and mstatus */
404 rq = AGP_MODE_GET_RQ(mode);
405 if (AGP_MODE_GET_RQ(tstatus) < rq)
406 rq = AGP_MODE_GET_RQ(tstatus);
407 if (AGP_MODE_GET_RQ(mstatus) < rq)
408 rq = AGP_MODE_GET_RQ(mstatus);
409
410 /* Set SBA if all three can deal with SBA */
411 sba = (AGP_MODE_GET_SBA(tstatus)
412 & AGP_MODE_GET_SBA(mstatus)
413 & AGP_MODE_GET_SBA(mode));
414
415 /* Similar for FW */
416 fw = (AGP_MODE_GET_FW(tstatus)
417 & AGP_MODE_GET_FW(mstatus)
418 & AGP_MODE_GET_FW(mode));
419
420 /* Figure out the max rate */
421 rate = (AGP_MODE_GET_RATE(tstatus)
422 & AGP_MODE_GET_RATE(mstatus)
423 & AGP_MODE_GET_RATE(mode));
424 if (rate & AGP_MODE_V2_RATE_4x)
425 rate = AGP_MODE_V2_RATE_4x;
426 else if (rate & AGP_MODE_V2_RATE_2x)
427 rate = AGP_MODE_V2_RATE_2x;
428 else
429 rate = AGP_MODE_V2_RATE_1x;
430 if (bootverbose)
431 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
432
433 /* Construct the new mode word and tell the hardware */
434 command = 0;
435 command = AGP_MODE_SET_RQ(0, rq);
436 command = AGP_MODE_SET_SBA(command, sba);
437 command = AGP_MODE_SET_FW(command, fw);
438 command = AGP_MODE_SET_RATE(command, rate);
439 command = AGP_MODE_SET_AGP(command, 1);
440 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
441 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
442
443 return 0;
444}
445
446int
447agp_generic_enable(device_t dev, u_int32_t mode)
448{
449 device_t mdev = agp_find_display();
450 u_int32_t tstatus, mstatus;
451
452 if (!mdev) {
453 AGP_DPF("can't find display\n");
454 return ENXIO;
455 }
456
457 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
458 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
459
460 /*
461 * Check display and bridge for AGP v3 support. AGP v3 allows
462 * more variety in topology than v2, e.g. multiple AGP devices
463 * attached to one bridge, or multiple AGP bridges in one
464 * system. This doesn't attempt to address those situations,
465 * but should work fine for a classic single AGP slot system
466 * with AGP v3.
467 */
468 if (AGP_MODE_GET_MODE_3(mode) &&
469 AGP_MODE_GET_MODE_3(tstatus) &&
470 AGP_MODE_GET_MODE_3(mstatus))
471 return (agp_v3_enable(dev, mdev, mode));
472 else
473 return (agp_v2_enable(dev, mdev, mode));
474}
475
476struct agp_memory *
477agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
478{
479 struct agp_softc *sc = device_get_softc(dev);
480 struct agp_memory *mem;
481
482 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
483 return 0;
484
485 if (sc->as_allocated + size > sc->as_maxmem)
486 return 0;
487
488 if (type != 0) {
489 printf("agp_generic_alloc_memory: unsupported type %d\n",
490 type);
491 return 0;
492 }
493
494 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
495 mem->am_id = sc->as_nextid++;
496 mem->am_size = size;
497 mem->am_type = 0;
498 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
499 mem->am_physical = 0;
500 mem->am_offset = 0;
501 mem->am_is_bound = 0;
502 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
503 sc->as_allocated += size;
504
505 return mem;
506}
507
508int
509agp_generic_free_memory(device_t dev, struct agp_memory *mem)
510{
511 struct agp_softc *sc = device_get_softc(dev);
512
513 if (mem->am_is_bound)
514 return EBUSY;
515
516 sc->as_allocated -= mem->am_size;
517 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
518 vm_object_deallocate(mem->am_obj);
519 free(mem, M_AGP);
520 return 0;
521}
522
523int
524agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
525 vm_offset_t offset)
526{
527 struct agp_softc *sc = device_get_softc(dev);
528 vm_offset_t i, j, k;
529 vm_page_t m;
530 int error;
531
532 /* Do some sanity checks first. */
533 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
534 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
535 device_printf(dev, "binding memory at bad offset %#x\n",
536 (int)offset);
537 return EINVAL;
538 }
539
540 /*
541 * Allocate the pages early, before acquiring the lock,
542 * because vm_page_grab() used with VM_ALLOC_RETRY may
543 * block and we can't hold a mutex while blocking.
544 */
545 VM_OBJECT_LOCK(mem->am_obj);
546 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
547 /*
548 * Find a page from the object and wire it
549 * down. This page will be mapped using one or more
550 * entries in the GATT (assuming that PAGE_SIZE >=
551 * AGP_PAGE_SIZE. If this is the first call to bind,
552 * the pages will be allocated and zeroed.
553 */
554 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
555 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
556 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
557 }
558 VM_OBJECT_UNLOCK(mem->am_obj);
559
560 mtx_lock(&sc->as_lock);
561
562 if (mem->am_is_bound) {
563 device_printf(dev, "memory already bound\n");
564 error = EINVAL;
565 VM_OBJECT_LOCK(mem->am_obj);
566 goto bad;
567 }
568
569 /*
570 * Bind the individual pages and flush the chipset's
571 * TLB.
572 */
573 VM_OBJECT_LOCK(mem->am_obj);
574 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
575 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
576
577 /*
578 * Install entries in the GATT, making sure that if
579 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
580 * aligned to PAGE_SIZE, we don't modify too many GATT
581 * entries.
582 */
583 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
584 j += AGP_PAGE_SIZE) {
585 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
586 AGP_DPF("binding offset %#x to pa %#x\n",
587 offset + i + j, pa);
588 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
589 if (error) {
590 /*
591 * Bail out. Reverse all the mappings
592 * and unwire the pages.
593 */
594 vm_page_wakeup(m);
595 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
596 AGP_UNBIND_PAGE(dev, offset + k);
597 goto bad;
598 }
599 }
600 vm_page_wakeup(m);
601 }
602 VM_OBJECT_UNLOCK(mem->am_obj);
603
604 /*
605 * Flush the cpu cache since we are providing a new mapping
606 * for these pages.
607 */
608 agp_flush_cache();
609
610 /*
611 * Make sure the chipset gets the new mappings.
612 */
613 AGP_FLUSH_TLB(dev);
614
615 mem->am_offset = offset;
616 mem->am_is_bound = 1;
617
618 mtx_unlock(&sc->as_lock);
619
620 return 0;
621bad:
622 mtx_unlock(&sc->as_lock);
623 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
624 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
625 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
626 vm_page_lock_queues();
627 vm_page_unwire(m, 0);
628 vm_page_unlock_queues();
629 }
630 VM_OBJECT_UNLOCK(mem->am_obj);
631
632 return error;
633}
634
635int
636agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
637{
638 struct agp_softc *sc = device_get_softc(dev);
639 vm_page_t m;
640 int i;
641
642 mtx_lock(&sc->as_lock);
643
644 if (!mem->am_is_bound) {
645 device_printf(dev, "memory is not bound\n");
646 mtx_unlock(&sc->as_lock);
647 return EINVAL;
648 }
649
650
651 /*
652 * Unbind the individual pages and flush the chipset's
653 * TLB. Unwire the pages so they can be swapped.
654 */
655 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
656 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
657 VM_OBJECT_LOCK(mem->am_obj);
658 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
659 m = vm_page_lookup(mem->am_obj, atop(i));
660 vm_page_lock_queues();
661 vm_page_unwire(m, 0);
662 vm_page_unlock_queues();
663 }
664 VM_OBJECT_UNLOCK(mem->am_obj);
665
666 agp_flush_cache();
667 AGP_FLUSH_TLB(dev);
668
669 mem->am_offset = 0;
670 mem->am_is_bound = 0;
671
672 mtx_unlock(&sc->as_lock);
673
674 return 0;
675}
676
677/* Helper functions for implementing user/kernel api */
678
679static int
680agp_acquire_helper(device_t dev, enum agp_acquire_state state)
681{
682 struct agp_softc *sc = device_get_softc(dev);
683
684 if (sc->as_state != AGP_ACQUIRE_FREE)
685 return EBUSY;
686 sc->as_state = state;
687
688 return 0;
689}
690
691static int
692agp_release_helper(device_t dev, enum agp_acquire_state state)
693{
694 struct agp_softc *sc = device_get_softc(dev);
695
696 if (sc->as_state == AGP_ACQUIRE_FREE)
697 return 0;
698
699 if (sc->as_state != state)
700 return EBUSY;
701
702 sc->as_state = AGP_ACQUIRE_FREE;
703 return 0;
704}
705
706static struct agp_memory *
707agp_find_memory(device_t dev, int id)
708{
709 struct agp_softc *sc = device_get_softc(dev);
710 struct agp_memory *mem;
711
712 AGP_DPF("searching for memory block %d\n", id);
713 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
714 AGP_DPF("considering memory block %d\n", mem->am_id);
715 if (mem->am_id == id)
716 return mem;
717 }
718 return 0;
719}
720
721/* Implementation of the userland ioctl api */
722
723static int
724agp_info_user(device_t dev, agp_info *info)
725{
726 struct agp_softc *sc = device_get_softc(dev);
727
728 bzero(info, sizeof *info);
729 info->bridge_id = pci_get_devid(dev);
730 info->agp_mode =
731 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
732 info->aper_base = rman_get_start(sc->as_aperture);
733 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
734 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
735 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
736
737 return 0;
738}
739
740static int
741agp_setup_user(device_t dev, agp_setup *setup)
742{
743 return AGP_ENABLE(dev, setup->agp_mode);
744}
745
746static int
747agp_allocate_user(device_t dev, agp_allocate *alloc)
748{
749 struct agp_memory *mem;
750
751 mem = AGP_ALLOC_MEMORY(dev,
752 alloc->type,
753 alloc->pg_count << AGP_PAGE_SHIFT);
754 if (mem) {
755 alloc->key = mem->am_id;
756 alloc->physical = mem->am_physical;
757 return 0;
758 } else {
759 return ENOMEM;
760 }
761}
762
763static int
764agp_deallocate_user(device_t dev, int id)
765{
766 struct agp_memory *mem = agp_find_memory(dev, id);;
767
768 if (mem) {
769 AGP_FREE_MEMORY(dev, mem);
770 return 0;
771 } else {
772 return ENOENT;
773 }
774}
775
776static int
777agp_bind_user(device_t dev, agp_bind *bind)
778{
779 struct agp_memory *mem = agp_find_memory(dev, bind->key);
780
781 if (!mem)
782 return ENOENT;
783
784 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
785}
786
787static int
788agp_unbind_user(device_t dev, agp_unbind *unbind)
789{
790 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
791
792 if (!mem)
793 return ENOENT;
794
795 return AGP_UNBIND_MEMORY(dev, mem);
796}
797
798static int
799agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
800{
801 device_t dev = KDEV2DEV(kdev);
802 struct agp_softc *sc = device_get_softc(dev);
803
804 if (!sc->as_isopen) {
805 sc->as_isopen = 1;
806 device_busy(dev);
807 }
808
809 return 0;
810}
811
812static int
813agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
814{
815 device_t dev = KDEV2DEV(kdev);
816 struct agp_softc *sc = device_get_softc(dev);
817 struct agp_memory *mem;
818
819 /*
820 * Clear the GATT and force release on last close
821 */
822 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
823 if (mem->am_is_bound)
824 AGP_UNBIND_MEMORY(dev, mem);
825 AGP_FREE_MEMORY(dev, mem);
826 }
827 if (sc->as_state == AGP_ACQUIRE_USER)
828 agp_release_helper(dev, AGP_ACQUIRE_USER);
829 sc->as_isopen = 0;
830 device_unbusy(dev);
831
832 return 0;
833}
834
835static int
836agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
837{
838 device_t dev = KDEV2DEV(kdev);
839
840 switch (cmd) {
841 case AGPIOC_INFO:
842 return agp_info_user(dev, (agp_info *) data);
843
844 case AGPIOC_ACQUIRE:
845 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
846
847 case AGPIOC_RELEASE:
848 return agp_release_helper(dev, AGP_ACQUIRE_USER);
849
850 case AGPIOC_SETUP:
851 return agp_setup_user(dev, (agp_setup *)data);
852
853 case AGPIOC_ALLOCATE:
854 return agp_allocate_user(dev, (agp_allocate *)data);
855
856 case AGPIOC_DEALLOCATE:
857 return agp_deallocate_user(dev, *(int *) data);
858
859 case AGPIOC_BIND:
860 return agp_bind_user(dev, (agp_bind *)data);
861
862 case AGPIOC_UNBIND:
863 return agp_unbind_user(dev, (agp_unbind *)data);
864
865 }
866
867 return EINVAL;
868}
869
870static int
871agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
872{
873 device_t dev = KDEV2DEV(kdev);
874 struct agp_softc *sc = device_get_softc(dev);
875
876 if (offset > AGP_GET_APERTURE(dev))
877 return -1;
878 *paddr = rman_get_start(sc->as_aperture) + offset;
879 return 0;
880}
881
882/* Implementation of the kernel api */
883
884device_t
885agp_find_device()
886{
887 device_t *children, child;
888 int i, count;
889
890 if (!agp_devclass)
891 return NULL;
892 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
893 return NULL;
894 child = NULL;
895 for (i = 0; i < count; i++) {
896 if (device_is_attached(children[i])) {
897 child = children[i];
898 break;
899 }
900 }
901 free(children, M_TEMP);
902 return child;
903}
904
905enum agp_acquire_state
906agp_state(device_t dev)
907{
908 struct agp_softc *sc = device_get_softc(dev);
909 return sc->as_state;
910}
911
912void
913agp_get_info(device_t dev, struct agp_info *info)
914{
915 struct agp_softc *sc = device_get_softc(dev);
916
917 info->ai_mode =
918 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
919 info->ai_aperture_base = rman_get_start(sc->as_aperture);
920 info->ai_aperture_size = rman_get_size(sc->as_aperture);
921 info->ai_memory_allowed = sc->as_maxmem;
922 info->ai_memory_used = sc->as_allocated;
923}
924
925int
926agp_acquire(device_t dev)
927{
928 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
929}
930
931int
932agp_release(device_t dev)
933{
934 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
935}
936
937int
938agp_enable(device_t dev, u_int32_t mode)
939{
940 return AGP_ENABLE(dev, mode);
941}
942
943void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
944{
945 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
946}
947
948void agp_free_memory(device_t dev, void *handle)
949{
950 struct agp_memory *mem = (struct agp_memory *) handle;
951 AGP_FREE_MEMORY(dev, mem);
952}
953
954int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
955{
956 struct agp_memory *mem = (struct agp_memory *) handle;
957 return AGP_BIND_MEMORY(dev, mem, offset);
958}
959
960int agp_unbind_memory(device_t dev, void *handle)
961{
962 struct agp_memory *mem = (struct agp_memory *) handle;
963 return AGP_UNBIND_MEMORY(dev, mem);
964}
965
966void agp_memory_info(device_t dev, void *handle, struct
967 agp_memory_info *mi)
968{
969 struct agp_memory *mem = (struct agp_memory *) handle;
970
971 mi->ami_size = mem->am_size;
972 mi->ami_physical = mem->am_physical;
973 mi->ami_offset = mem->am_offset;
974 mi->ami_is_bound = mem->am_is_bound;
975}
50
51#include <vm/vm.h>
52#include <vm/vm_object.h>
53#include <vm/vm_page.h>
54#include <vm/vm_pageout.h>
55#include <vm/pmap.h>
56
57#include <machine/md_var.h>
58#include <machine/bus.h>
59#include <machine/resource.h>
60#include <sys/rman.h>
61
62MODULE_VERSION(agp, 1);
63
64MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
65
66 /* agp_drv.c */
67static d_open_t agp_open;
68static d_close_t agp_close;
69static d_ioctl_t agp_ioctl;
70static d_mmap_t agp_mmap;
71
72static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
75 .d_open = agp_open,
76 .d_close = agp_close,
77 .d_ioctl = agp_ioctl,
78 .d_mmap = agp_mmap,
79 .d_name = "agp",
80};
81
82static devclass_t agp_devclass;
83#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
84
85/* Helper functions for implementing chipset mini drivers. */
86
87void
88agp_flush_cache()
89{
90#if defined(__i386__) || defined(__amd64__)
91 wbinvd();
92#endif
93}
94
95u_int8_t
96agp_find_caps(device_t dev)
97{
98 int capreg;
99
100
101 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
102 capreg = 0;
103 return (capreg);
104}
105
106/*
107 * Find an AGP display device (if any).
108 */
109static device_t
110agp_find_display(void)
111{
112 devclass_t pci = devclass_find("pci");
113 device_t bus, dev = 0;
114 device_t *kids;
115 int busnum, numkids, i;
116
117 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
118 bus = devclass_get_device(pci, busnum);
119 if (!bus)
120 continue;
121 device_get_children(bus, &kids, &numkids);
122 for (i = 0; i < numkids; i++) {
123 dev = kids[i];
124 if (pci_get_class(dev) == PCIC_DISPLAY
125 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
126 if (agp_find_caps(dev)) {
127 free(kids, M_TEMP);
128 return dev;
129 }
130
131 }
132 free(kids, M_TEMP);
133 }
134
135 return 0;
136}
137
138struct agp_gatt *
139agp_alloc_gatt(device_t dev)
140{
141 u_int32_t apsize = AGP_GET_APERTURE(dev);
142 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
143 struct agp_gatt *gatt;
144
145 if (bootverbose)
146 device_printf(dev,
147 "allocating GATT for aperture of size %dM\n",
148 apsize / (1024*1024));
149
150 if (entries == 0) {
151 device_printf(dev, "bad aperture size\n");
152 return NULL;
153 }
154
155 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
156 if (!gatt)
157 return 0;
158
159 gatt->ag_entries = entries;
160 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
161 0, ~0, PAGE_SIZE, 0);
162 if (!gatt->ag_virtual) {
163 if (bootverbose)
164 device_printf(dev, "contiguous allocation failed\n");
165 free(gatt, M_AGP);
166 return 0;
167 }
168 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
169 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
170 agp_flush_cache();
171
172 return gatt;
173}
174
175void
176agp_free_gatt(struct agp_gatt *gatt)
177{
178 contigfree(gatt->ag_virtual,
179 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
180 free(gatt, M_AGP);
181}
182
183static u_int agp_max[][2] = {
184 {0, 0},
185 {32, 4},
186 {64, 28},
187 {128, 96},
188 {256, 204},
189 {512, 440},
190 {1024, 942},
191 {2048, 1920},
192 {4096, 3932}
193};
194#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
195
196/**
197 * Sets the PCI resource which represents the AGP aperture.
198 *
199 * If not called, the default AGP aperture resource of AGP_APBASE will
200 * be used. Must be called before agp_generic_attach().
201 */
202void
203agp_set_aperture_resource(device_t dev, int rid)
204{
205 struct agp_softc *sc = device_get_softc(dev);
206
207 sc->as_aperture_rid = rid;
208}
209
210int
211agp_generic_attach(device_t dev)
212{
213 struct agp_softc *sc = device_get_softc(dev);
214 int i;
215 u_int memsize;
216
217 /*
218 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
219 * because the kernel doesn't need to map it.
220 */
221 if (sc->as_aperture_rid == 0)
222 sc->as_aperture_rid = AGP_APBASE;
223
224 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
225 &sc->as_aperture_rid, RF_SHAREABLE);
226 if (!sc->as_aperture)
227 return ENOMEM;
228
229 /*
230 * Work out an upper bound for agp memory allocation. This
231 * uses a heurisitc table from the Linux driver.
232 */
233 memsize = ptoa(Maxmem) >> 20;
234 for (i = 0; i < agp_max_size; i++) {
235 if (memsize <= agp_max[i][0])
236 break;
237 }
238 if (i == agp_max_size) i = agp_max_size - 1;
239 sc->as_maxmem = agp_max[i][1] << 20U;
240
241 /*
242 * The lock is used to prevent re-entry to
243 * agp_generic_bind_memory() since that function can sleep.
244 */
245 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
246
247 /*
248 * Initialise stuff for the userland device.
249 */
250 agp_devclass = devclass_find("agp");
251 TAILQ_INIT(&sc->as_memory);
252 sc->as_nextid = 1;
253
254 sc->as_devnode = make_dev(&agp_cdevsw,
255 device_get_unit(dev),
256 UID_ROOT,
257 GID_WHEEL,
258 0600,
259 "agpgart");
260
261 return 0;
262}
263
264void
265agp_free_cdev(device_t dev)
266{
267 struct agp_softc *sc = device_get_softc(dev);
268
269 destroy_dev(sc->as_devnode);
270}
271
272void
273agp_free_res(device_t dev)
274{
275 struct agp_softc *sc = device_get_softc(dev);
276
277 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
278 sc->as_aperture);
279 mtx_destroy(&sc->as_lock);
280 agp_flush_cache();
281}
282
283int
284agp_generic_detach(device_t dev)
285{
286
287 agp_free_cdev(dev);
288 agp_free_res(dev);
289 return 0;
290}
291
292/**
293 * Default AGP aperture size detection which simply returns the size of
294 * the aperture's PCI resource.
295 */
296int
297agp_generic_get_aperture(device_t dev)
298{
299 struct agp_softc *sc = device_get_softc(dev);
300
301 return rman_get_size(sc->as_aperture);
302}
303
304/**
305 * Default AGP aperture size setting function, which simply doesn't allow
306 * changes to resource size.
307 */
308int
309agp_generic_set_aperture(device_t dev, u_int32_t aperture)
310{
311 u_int32_t current_aperture;
312
313 current_aperture = AGP_GET_APERTURE(dev);
314 if (current_aperture != aperture)
315 return EINVAL;
316 else
317 return 0;
318}
319
320/*
321 * This does the enable logic for v3, with the same topology
322 * restrictions as in place for v2 -- one bus, one device on the bus.
323 */
324static int
325agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
326{
327 u_int32_t tstatus, mstatus;
328 u_int32_t command;
329 int rq, sba, fw, rate, arqsz, cal;
330
331 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
332 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
333
334 /* Set RQ to the min of mode, tstatus and mstatus */
335 rq = AGP_MODE_GET_RQ(mode);
336 if (AGP_MODE_GET_RQ(tstatus) < rq)
337 rq = AGP_MODE_GET_RQ(tstatus);
338 if (AGP_MODE_GET_RQ(mstatus) < rq)
339 rq = AGP_MODE_GET_RQ(mstatus);
340
341 /*
342 * ARQSZ - Set the value to the maximum one.
343 * Don't allow the mode register to override values.
344 */
345 arqsz = AGP_MODE_GET_ARQSZ(mode);
346 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
347 rq = AGP_MODE_GET_ARQSZ(tstatus);
348 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
349 rq = AGP_MODE_GET_ARQSZ(mstatus);
350
351 /* Calibration cycle - don't allow override by mode register */
352 cal = AGP_MODE_GET_CAL(tstatus);
353 if (AGP_MODE_GET_CAL(mstatus) < cal)
354 cal = AGP_MODE_GET_CAL(mstatus);
355
356 /* SBA must be supported for AGP v3. */
357 sba = 1;
358
359 /* Set FW if all three support it. */
360 fw = (AGP_MODE_GET_FW(tstatus)
361 & AGP_MODE_GET_FW(mstatus)
362 & AGP_MODE_GET_FW(mode));
363
364 /* Figure out the max rate */
365 rate = (AGP_MODE_GET_RATE(tstatus)
366 & AGP_MODE_GET_RATE(mstatus)
367 & AGP_MODE_GET_RATE(mode));
368 if (rate & AGP_MODE_V3_RATE_8x)
369 rate = AGP_MODE_V3_RATE_8x;
370 else
371 rate = AGP_MODE_V3_RATE_4x;
372 if (bootverbose)
373 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
374
375 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
376
377 /* Construct the new mode word and tell the hardware */
378 command = 0;
379 command = AGP_MODE_SET_RQ(0, rq);
380 command = AGP_MODE_SET_ARQSZ(command, arqsz);
381 command = AGP_MODE_SET_CAL(command, cal);
382 command = AGP_MODE_SET_SBA(command, sba);
383 command = AGP_MODE_SET_FW(command, fw);
384 command = AGP_MODE_SET_RATE(command, rate);
385 command = AGP_MODE_SET_MODE_3(command, 1);
386 command = AGP_MODE_SET_AGP(command, 1);
387 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
388 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
389
390 return 0;
391}
392
393static int
394agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
395{
396 u_int32_t tstatus, mstatus;
397 u_int32_t command;
398 int rq, sba, fw, rate;
399
400 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
401 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
402
403 /* Set RQ to the min of mode, tstatus and mstatus */
404 rq = AGP_MODE_GET_RQ(mode);
405 if (AGP_MODE_GET_RQ(tstatus) < rq)
406 rq = AGP_MODE_GET_RQ(tstatus);
407 if (AGP_MODE_GET_RQ(mstatus) < rq)
408 rq = AGP_MODE_GET_RQ(mstatus);
409
410 /* Set SBA if all three can deal with SBA */
411 sba = (AGP_MODE_GET_SBA(tstatus)
412 & AGP_MODE_GET_SBA(mstatus)
413 & AGP_MODE_GET_SBA(mode));
414
415 /* Similar for FW */
416 fw = (AGP_MODE_GET_FW(tstatus)
417 & AGP_MODE_GET_FW(mstatus)
418 & AGP_MODE_GET_FW(mode));
419
420 /* Figure out the max rate */
421 rate = (AGP_MODE_GET_RATE(tstatus)
422 & AGP_MODE_GET_RATE(mstatus)
423 & AGP_MODE_GET_RATE(mode));
424 if (rate & AGP_MODE_V2_RATE_4x)
425 rate = AGP_MODE_V2_RATE_4x;
426 else if (rate & AGP_MODE_V2_RATE_2x)
427 rate = AGP_MODE_V2_RATE_2x;
428 else
429 rate = AGP_MODE_V2_RATE_1x;
430 if (bootverbose)
431 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
432
433 /* Construct the new mode word and tell the hardware */
434 command = 0;
435 command = AGP_MODE_SET_RQ(0, rq);
436 command = AGP_MODE_SET_SBA(command, sba);
437 command = AGP_MODE_SET_FW(command, fw);
438 command = AGP_MODE_SET_RATE(command, rate);
439 command = AGP_MODE_SET_AGP(command, 1);
440 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
441 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
442
443 return 0;
444}
445
446int
447agp_generic_enable(device_t dev, u_int32_t mode)
448{
449 device_t mdev = agp_find_display();
450 u_int32_t tstatus, mstatus;
451
452 if (!mdev) {
453 AGP_DPF("can't find display\n");
454 return ENXIO;
455 }
456
457 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
458 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
459
460 /*
461 * Check display and bridge for AGP v3 support. AGP v3 allows
462 * more variety in topology than v2, e.g. multiple AGP devices
463 * attached to one bridge, or multiple AGP bridges in one
464 * system. This doesn't attempt to address those situations,
465 * but should work fine for a classic single AGP slot system
466 * with AGP v3.
467 */
468 if (AGP_MODE_GET_MODE_3(mode) &&
469 AGP_MODE_GET_MODE_3(tstatus) &&
470 AGP_MODE_GET_MODE_3(mstatus))
471 return (agp_v3_enable(dev, mdev, mode));
472 else
473 return (agp_v2_enable(dev, mdev, mode));
474}
475
476struct agp_memory *
477agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
478{
479 struct agp_softc *sc = device_get_softc(dev);
480 struct agp_memory *mem;
481
482 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
483 return 0;
484
485 if (sc->as_allocated + size > sc->as_maxmem)
486 return 0;
487
488 if (type != 0) {
489 printf("agp_generic_alloc_memory: unsupported type %d\n",
490 type);
491 return 0;
492 }
493
494 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
495 mem->am_id = sc->as_nextid++;
496 mem->am_size = size;
497 mem->am_type = 0;
498 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
499 mem->am_physical = 0;
500 mem->am_offset = 0;
501 mem->am_is_bound = 0;
502 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
503 sc->as_allocated += size;
504
505 return mem;
506}
507
508int
509agp_generic_free_memory(device_t dev, struct agp_memory *mem)
510{
511 struct agp_softc *sc = device_get_softc(dev);
512
513 if (mem->am_is_bound)
514 return EBUSY;
515
516 sc->as_allocated -= mem->am_size;
517 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
518 vm_object_deallocate(mem->am_obj);
519 free(mem, M_AGP);
520 return 0;
521}
522
523int
524agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
525 vm_offset_t offset)
526{
527 struct agp_softc *sc = device_get_softc(dev);
528 vm_offset_t i, j, k;
529 vm_page_t m;
530 int error;
531
532 /* Do some sanity checks first. */
533 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
534 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
535 device_printf(dev, "binding memory at bad offset %#x\n",
536 (int)offset);
537 return EINVAL;
538 }
539
540 /*
541 * Allocate the pages early, before acquiring the lock,
542 * because vm_page_grab() used with VM_ALLOC_RETRY may
543 * block and we can't hold a mutex while blocking.
544 */
545 VM_OBJECT_LOCK(mem->am_obj);
546 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
547 /*
548 * Find a page from the object and wire it
549 * down. This page will be mapped using one or more
550 * entries in the GATT (assuming that PAGE_SIZE >=
551 * AGP_PAGE_SIZE. If this is the first call to bind,
552 * the pages will be allocated and zeroed.
553 */
554 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
555 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
556 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
557 }
558 VM_OBJECT_UNLOCK(mem->am_obj);
559
560 mtx_lock(&sc->as_lock);
561
562 if (mem->am_is_bound) {
563 device_printf(dev, "memory already bound\n");
564 error = EINVAL;
565 VM_OBJECT_LOCK(mem->am_obj);
566 goto bad;
567 }
568
569 /*
570 * Bind the individual pages and flush the chipset's
571 * TLB.
572 */
573 VM_OBJECT_LOCK(mem->am_obj);
574 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
575 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
576
577 /*
578 * Install entries in the GATT, making sure that if
579 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
580 * aligned to PAGE_SIZE, we don't modify too many GATT
581 * entries.
582 */
583 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
584 j += AGP_PAGE_SIZE) {
585 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
586 AGP_DPF("binding offset %#x to pa %#x\n",
587 offset + i + j, pa);
588 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
589 if (error) {
590 /*
591 * Bail out. Reverse all the mappings
592 * and unwire the pages.
593 */
594 vm_page_wakeup(m);
595 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
596 AGP_UNBIND_PAGE(dev, offset + k);
597 goto bad;
598 }
599 }
600 vm_page_wakeup(m);
601 }
602 VM_OBJECT_UNLOCK(mem->am_obj);
603
604 /*
605 * Flush the cpu cache since we are providing a new mapping
606 * for these pages.
607 */
608 agp_flush_cache();
609
610 /*
611 * Make sure the chipset gets the new mappings.
612 */
613 AGP_FLUSH_TLB(dev);
614
615 mem->am_offset = offset;
616 mem->am_is_bound = 1;
617
618 mtx_unlock(&sc->as_lock);
619
620 return 0;
621bad:
622 mtx_unlock(&sc->as_lock);
623 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
624 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
625 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
626 vm_page_lock_queues();
627 vm_page_unwire(m, 0);
628 vm_page_unlock_queues();
629 }
630 VM_OBJECT_UNLOCK(mem->am_obj);
631
632 return error;
633}
634
635int
636agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
637{
638 struct agp_softc *sc = device_get_softc(dev);
639 vm_page_t m;
640 int i;
641
642 mtx_lock(&sc->as_lock);
643
644 if (!mem->am_is_bound) {
645 device_printf(dev, "memory is not bound\n");
646 mtx_unlock(&sc->as_lock);
647 return EINVAL;
648 }
649
650
651 /*
652 * Unbind the individual pages and flush the chipset's
653 * TLB. Unwire the pages so they can be swapped.
654 */
655 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
656 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
657 VM_OBJECT_LOCK(mem->am_obj);
658 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
659 m = vm_page_lookup(mem->am_obj, atop(i));
660 vm_page_lock_queues();
661 vm_page_unwire(m, 0);
662 vm_page_unlock_queues();
663 }
664 VM_OBJECT_UNLOCK(mem->am_obj);
665
666 agp_flush_cache();
667 AGP_FLUSH_TLB(dev);
668
669 mem->am_offset = 0;
670 mem->am_is_bound = 0;
671
672 mtx_unlock(&sc->as_lock);
673
674 return 0;
675}
676
677/* Helper functions for implementing user/kernel api */
678
679static int
680agp_acquire_helper(device_t dev, enum agp_acquire_state state)
681{
682 struct agp_softc *sc = device_get_softc(dev);
683
684 if (sc->as_state != AGP_ACQUIRE_FREE)
685 return EBUSY;
686 sc->as_state = state;
687
688 return 0;
689}
690
691static int
692agp_release_helper(device_t dev, enum agp_acquire_state state)
693{
694 struct agp_softc *sc = device_get_softc(dev);
695
696 if (sc->as_state == AGP_ACQUIRE_FREE)
697 return 0;
698
699 if (sc->as_state != state)
700 return EBUSY;
701
702 sc->as_state = AGP_ACQUIRE_FREE;
703 return 0;
704}
705
706static struct agp_memory *
707agp_find_memory(device_t dev, int id)
708{
709 struct agp_softc *sc = device_get_softc(dev);
710 struct agp_memory *mem;
711
712 AGP_DPF("searching for memory block %d\n", id);
713 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
714 AGP_DPF("considering memory block %d\n", mem->am_id);
715 if (mem->am_id == id)
716 return mem;
717 }
718 return 0;
719}
720
721/* Implementation of the userland ioctl api */
722
723static int
724agp_info_user(device_t dev, agp_info *info)
725{
726 struct agp_softc *sc = device_get_softc(dev);
727
728 bzero(info, sizeof *info);
729 info->bridge_id = pci_get_devid(dev);
730 info->agp_mode =
731 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
732 info->aper_base = rman_get_start(sc->as_aperture);
733 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
734 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
735 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
736
737 return 0;
738}
739
740static int
741agp_setup_user(device_t dev, agp_setup *setup)
742{
743 return AGP_ENABLE(dev, setup->agp_mode);
744}
745
746static int
747agp_allocate_user(device_t dev, agp_allocate *alloc)
748{
749 struct agp_memory *mem;
750
751 mem = AGP_ALLOC_MEMORY(dev,
752 alloc->type,
753 alloc->pg_count << AGP_PAGE_SHIFT);
754 if (mem) {
755 alloc->key = mem->am_id;
756 alloc->physical = mem->am_physical;
757 return 0;
758 } else {
759 return ENOMEM;
760 }
761}
762
763static int
764agp_deallocate_user(device_t dev, int id)
765{
766 struct agp_memory *mem = agp_find_memory(dev, id);;
767
768 if (mem) {
769 AGP_FREE_MEMORY(dev, mem);
770 return 0;
771 } else {
772 return ENOENT;
773 }
774}
775
776static int
777agp_bind_user(device_t dev, agp_bind *bind)
778{
779 struct agp_memory *mem = agp_find_memory(dev, bind->key);
780
781 if (!mem)
782 return ENOENT;
783
784 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
785}
786
787static int
788agp_unbind_user(device_t dev, agp_unbind *unbind)
789{
790 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
791
792 if (!mem)
793 return ENOENT;
794
795 return AGP_UNBIND_MEMORY(dev, mem);
796}
797
798static int
799agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
800{
801 device_t dev = KDEV2DEV(kdev);
802 struct agp_softc *sc = device_get_softc(dev);
803
804 if (!sc->as_isopen) {
805 sc->as_isopen = 1;
806 device_busy(dev);
807 }
808
809 return 0;
810}
811
812static int
813agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
814{
815 device_t dev = KDEV2DEV(kdev);
816 struct agp_softc *sc = device_get_softc(dev);
817 struct agp_memory *mem;
818
819 /*
820 * Clear the GATT and force release on last close
821 */
822 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
823 if (mem->am_is_bound)
824 AGP_UNBIND_MEMORY(dev, mem);
825 AGP_FREE_MEMORY(dev, mem);
826 }
827 if (sc->as_state == AGP_ACQUIRE_USER)
828 agp_release_helper(dev, AGP_ACQUIRE_USER);
829 sc->as_isopen = 0;
830 device_unbusy(dev);
831
832 return 0;
833}
834
835static int
836agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
837{
838 device_t dev = KDEV2DEV(kdev);
839
840 switch (cmd) {
841 case AGPIOC_INFO:
842 return agp_info_user(dev, (agp_info *) data);
843
844 case AGPIOC_ACQUIRE:
845 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
846
847 case AGPIOC_RELEASE:
848 return agp_release_helper(dev, AGP_ACQUIRE_USER);
849
850 case AGPIOC_SETUP:
851 return agp_setup_user(dev, (agp_setup *)data);
852
853 case AGPIOC_ALLOCATE:
854 return agp_allocate_user(dev, (agp_allocate *)data);
855
856 case AGPIOC_DEALLOCATE:
857 return agp_deallocate_user(dev, *(int *) data);
858
859 case AGPIOC_BIND:
860 return agp_bind_user(dev, (agp_bind *)data);
861
862 case AGPIOC_UNBIND:
863 return agp_unbind_user(dev, (agp_unbind *)data);
864
865 }
866
867 return EINVAL;
868}
869
870static int
871agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
872{
873 device_t dev = KDEV2DEV(kdev);
874 struct agp_softc *sc = device_get_softc(dev);
875
876 if (offset > AGP_GET_APERTURE(dev))
877 return -1;
878 *paddr = rman_get_start(sc->as_aperture) + offset;
879 return 0;
880}
881
882/* Implementation of the kernel api */
883
884device_t
885agp_find_device()
886{
887 device_t *children, child;
888 int i, count;
889
890 if (!agp_devclass)
891 return NULL;
892 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
893 return NULL;
894 child = NULL;
895 for (i = 0; i < count; i++) {
896 if (device_is_attached(children[i])) {
897 child = children[i];
898 break;
899 }
900 }
901 free(children, M_TEMP);
902 return child;
903}
904
905enum agp_acquire_state
906agp_state(device_t dev)
907{
908 struct agp_softc *sc = device_get_softc(dev);
909 return sc->as_state;
910}
911
912void
913agp_get_info(device_t dev, struct agp_info *info)
914{
915 struct agp_softc *sc = device_get_softc(dev);
916
917 info->ai_mode =
918 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
919 info->ai_aperture_base = rman_get_start(sc->as_aperture);
920 info->ai_aperture_size = rman_get_size(sc->as_aperture);
921 info->ai_memory_allowed = sc->as_maxmem;
922 info->ai_memory_used = sc->as_allocated;
923}
924
925int
926agp_acquire(device_t dev)
927{
928 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
929}
930
931int
932agp_release(device_t dev)
933{
934 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
935}
936
937int
938agp_enable(device_t dev, u_int32_t mode)
939{
940 return AGP_ENABLE(dev, mode);
941}
942
943void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
944{
945 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
946}
947
948void agp_free_memory(device_t dev, void *handle)
949{
950 struct agp_memory *mem = (struct agp_memory *) handle;
951 AGP_FREE_MEMORY(dev, mem);
952}
953
954int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
955{
956 struct agp_memory *mem = (struct agp_memory *) handle;
957 return AGP_BIND_MEMORY(dev, mem, offset);
958}
959
960int agp_unbind_memory(device_t dev, void *handle)
961{
962 struct agp_memory *mem = (struct agp_memory *) handle;
963 return AGP_UNBIND_MEMORY(dev, mem);
964}
965
966void agp_memory_info(device_t dev, void *handle, struct
967 agp_memory_info *mi)
968{
969 struct agp_memory *mem = (struct agp_memory *) handle;
970
971 mi->ami_size = mem->am_size;
972 mi->ami_physical = mem->am_physical;
973 mi->ami_offset = mem->am_offset;
974 mi->ami_is_bound = mem->am_is_bound;
975}