Deleted Added
full compact
agp.c (111462) agp.c (111815)
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/agp/agp.c 111462 2003-02-25 03:21:22Z mux $
26 * $FreeBSD: head/sys/dev/agp/agp.c 111815 2003-03-03 12:15:54Z phk $
27 */
28
29#include "opt_bus.h"
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/bus.h>
36#include <sys/conf.h>
37#include <sys/ioccom.h>
38#include <sys/agpio.h>
39#include <sys/lock.h>
40#include <sys/lockmgr.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43
44#include <pci/pcivar.h>
45#include <pci/pcireg.h>
46#include <pci/agppriv.h>
47#include <pci/agpvar.h>
48#include <pci/agpreg.h>
49
50#include <vm/vm.h>
51#include <vm/vm_object.h>
52#include <vm/vm_page.h>
53#include <vm/vm_pageout.h>
54#include <vm/pmap.h>
55
56#include <machine/md_var.h>
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61MODULE_VERSION(agp, 1);
62
63MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
64
65#define CDEV_MAJOR 148
66 /* agp_drv.c */
67static d_open_t agp_open;
68static d_close_t agp_close;
69static d_ioctl_t agp_ioctl;
70static d_mmap_t agp_mmap;
71
72static struct cdevsw agp_cdevsw = {
27 */
28
29#include "opt_bus.h"
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/bus.h>
36#include <sys/conf.h>
37#include <sys/ioccom.h>
38#include <sys/agpio.h>
39#include <sys/lock.h>
40#include <sys/lockmgr.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43
44#include <pci/pcivar.h>
45#include <pci/pcireg.h>
46#include <pci/agppriv.h>
47#include <pci/agpvar.h>
48#include <pci/agpreg.h>
49
50#include <vm/vm.h>
51#include <vm/vm_object.h>
52#include <vm/vm_page.h>
53#include <vm/vm_pageout.h>
54#include <vm/pmap.h>
55
56#include <machine/md_var.h>
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61MODULE_VERSION(agp, 1);
62
63MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
64
65#define CDEV_MAJOR 148
66 /* agp_drv.c */
67static d_open_t agp_open;
68static d_close_t agp_close;
69static d_ioctl_t agp_ioctl;
70static d_mmap_t agp_mmap;
71
72static struct cdevsw agp_cdevsw = {
73 /* open */ agp_open,
74 /* close */ agp_close,
75 /* read */ noread,
76 /* write */ nowrite,
77 /* ioctl */ agp_ioctl,
78 /* poll */ nopoll,
79 /* mmap */ agp_mmap,
80 /* strategy */ nostrategy,
81 /* name */ "agp",
82 /* maj */ CDEV_MAJOR,
83 /* dump */ nodump,
84 /* psize */ nopsize,
85 /* flags */ D_TTY,
73 .d_open = agp_open,
74 .d_close = agp_close,
75 .d_ioctl = agp_ioctl,
76 .d_mmap = agp_mmap,
77 .d_name = "agp",
78 .d_maj = CDEV_MAJOR,
79 .d_flags = D_TTY,
86};
87
88static devclass_t agp_devclass;
89#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
90
91/* Helper functions for implementing chipset mini drivers. */
92
93void
94agp_flush_cache()
95{
96#ifdef __i386__
97 wbinvd();
98#endif
99#ifdef __alpha__
100 /* FIXME: This is most likely not correct as it doesn't flush CPU
101 * write caches, but we don't have a facility to do that and
102 * this is all linux does, too */
103 alpha_mb();
104#endif
105}
106
107u_int8_t
108agp_find_caps(device_t dev)
109{
110 u_int32_t status;
111 u_int8_t ptr, next;
112
113 /*
114 * Check the CAP_LIST bit of the PCI status register first.
115 */
116 status = pci_read_config(dev, PCIR_STATUS, 2);
117 if (!(status & 0x10))
118 return 0;
119
120 /*
121 * Traverse the capabilities list.
122 */
123 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
124 ptr != 0;
125 ptr = next) {
126 u_int32_t capid = pci_read_config(dev, ptr, 4);
127 next = AGP_CAPID_GET_NEXT_PTR(capid);
128
129 /*
130 * If this capability entry ID is 2, then we are done.
131 */
132 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
133 return ptr;
134 }
135
136 return 0;
137}
138
139/*
140 * Find an AGP display device (if any).
141 */
142static device_t
143agp_find_display(void)
144{
145 devclass_t pci = devclass_find("pci");
146 device_t bus, dev = 0;
147 device_t *kids;
148 int busnum, numkids, i;
149
150 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
151 bus = devclass_get_device(pci, busnum);
152 if (!bus)
153 continue;
154 device_get_children(bus, &kids, &numkids);
155 for (i = 0; i < numkids; i++) {
156 dev = kids[i];
157 if (pci_get_class(dev) == PCIC_DISPLAY
158 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
159 if (agp_find_caps(dev)) {
160 free(kids, M_TEMP);
161 return dev;
162 }
163
164 }
165 free(kids, M_TEMP);
166 }
167
168 return 0;
169}
170
171struct agp_gatt *
172agp_alloc_gatt(device_t dev)
173{
174 u_int32_t apsize = AGP_GET_APERTURE(dev);
175 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
176 struct agp_gatt *gatt;
177
178 if (bootverbose)
179 device_printf(dev,
180 "allocating GATT for aperture of size %dM\n",
181 apsize / (1024*1024));
182
183 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
184 if (!gatt)
185 return 0;
186
187 gatt->ag_entries = entries;
188 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
189 0, ~0, PAGE_SIZE, 0);
190 if (!gatt->ag_virtual) {
191 if (bootverbose)
192 device_printf(dev, "contiguous allocation failed\n");
193 free(gatt, M_AGP);
194 return 0;
195 }
196 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
197 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
198 agp_flush_cache();
199
200 return gatt;
201}
202
203void
204agp_free_gatt(struct agp_gatt *gatt)
205{
206 contigfree(gatt->ag_virtual,
207 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
208 free(gatt, M_AGP);
209}
210
211static int agp_max[][2] = {
212 {0, 0},
213 {32, 4},
214 {64, 28},
215 {128, 96},
216 {256, 204},
217 {512, 440},
218 {1024, 942},
219 {2048, 1920},
220 {4096, 3932}
221};
222#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
223
224int
225agp_generic_attach(device_t dev)
226{
227 struct agp_softc *sc = device_get_softc(dev);
228 int rid, memsize, i;
229
230 /*
231 * Find and map the aperture.
232 */
233 rid = AGP_APBASE;
234 sc->as_aperture = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
235 0, ~0, 1, RF_ACTIVE);
236 if (!sc->as_aperture)
237 return ENOMEM;
238
239 /*
240 * Work out an upper bound for agp memory allocation. This
241 * uses a heurisitc table from the Linux driver.
242 */
243 memsize = ptoa(Maxmem) >> 20;
244 for (i = 0; i < agp_max_size; i++) {
245 if (memsize <= agp_max[i][0])
246 break;
247 }
248 if (i == agp_max_size) i = agp_max_size - 1;
249 sc->as_maxmem = agp_max[i][1] << 20U;
250
251 /*
252 * The lock is used to prevent re-entry to
253 * agp_generic_bind_memory() since that function can sleep.
254 */
255 lockinit(&sc->as_lock, PZERO|PCATCH, "agplk", 0, 0);
256
257 /*
258 * Initialise stuff for the userland device.
259 */
260 agp_devclass = devclass_find("agp");
261 TAILQ_INIT(&sc->as_memory);
262 sc->as_nextid = 1;
263
264 sc->as_devnode = make_dev(&agp_cdevsw,
265 device_get_unit(dev),
266 UID_ROOT,
267 GID_WHEEL,
268 0600,
269 "agpgart");
270
271 return 0;
272}
273
274int
275agp_generic_detach(device_t dev)
276{
277 struct agp_softc *sc = device_get_softc(dev);
278 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
279 lockmgr(&sc->as_lock, LK_DRAIN, 0, curthread);
280 lockdestroy(&sc->as_lock);
281 destroy_dev(sc->as_devnode);
282 agp_flush_cache();
283 return 0;
284}
285
286int
287agp_generic_enable(device_t dev, u_int32_t mode)
288{
289 device_t mdev = agp_find_display();
290 u_int32_t tstatus, mstatus;
291 u_int32_t command;
292 int rq, sba, fw, rate;;
293
294 if (!mdev) {
295 AGP_DPF("can't find display\n");
296 return ENXIO;
297 }
298
299 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
300 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
301
302 /* Set RQ to the min of mode, tstatus and mstatus */
303 rq = AGP_MODE_GET_RQ(mode);
304 if (AGP_MODE_GET_RQ(tstatus) < rq)
305 rq = AGP_MODE_GET_RQ(tstatus);
306 if (AGP_MODE_GET_RQ(mstatus) < rq)
307 rq = AGP_MODE_GET_RQ(mstatus);
308
309 /* Set SBA if all three can deal with SBA */
310 sba = (AGP_MODE_GET_SBA(tstatus)
311 & AGP_MODE_GET_SBA(mstatus)
312 & AGP_MODE_GET_SBA(mode));
313
314 /* Similar for FW */
315 fw = (AGP_MODE_GET_FW(tstatus)
316 & AGP_MODE_GET_FW(mstatus)
317 & AGP_MODE_GET_FW(mode));
318
319 /* Figure out the max rate */
320 rate = (AGP_MODE_GET_RATE(tstatus)
321 & AGP_MODE_GET_RATE(mstatus)
322 & AGP_MODE_GET_RATE(mode));
323 if (rate & AGP_MODE_RATE_4x)
324 rate = AGP_MODE_RATE_4x;
325 else if (rate & AGP_MODE_RATE_2x)
326 rate = AGP_MODE_RATE_2x;
327 else
328 rate = AGP_MODE_RATE_1x;
329
330 /* Construct the new mode word and tell the hardware */
331 command = AGP_MODE_SET_RQ(0, rq);
332 command = AGP_MODE_SET_SBA(command, sba);
333 command = AGP_MODE_SET_FW(command, fw);
334 command = AGP_MODE_SET_RATE(command, rate);
335 command = AGP_MODE_SET_AGP(command, 1);
336 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
337 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
338
339 return 0;
340}
341
342struct agp_memory *
343agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
344{
345 struct agp_softc *sc = device_get_softc(dev);
346 struct agp_memory *mem;
347
348 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
349 return 0;
350
351 if (sc->as_allocated + size > sc->as_maxmem)
352 return 0;
353
354 if (type != 0) {
355 printf("agp_generic_alloc_memory: unsupported type %d\n",
356 type);
357 return 0;
358 }
359
360 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
361 mem->am_id = sc->as_nextid++;
362 mem->am_size = size;
363 mem->am_type = 0;
364 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
365 mem->am_physical = 0;
366 mem->am_offset = 0;
367 mem->am_is_bound = 0;
368 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
369 sc->as_allocated += size;
370
371 return mem;
372}
373
374int
375agp_generic_free_memory(device_t dev, struct agp_memory *mem)
376{
377 struct agp_softc *sc = device_get_softc(dev);
378
379 if (mem->am_is_bound)
380 return EBUSY;
381
382 sc->as_allocated -= mem->am_size;
383 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
384 vm_object_deallocate(mem->am_obj);
385 free(mem, M_AGP);
386 return 0;
387}
388
389int
390agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
391 vm_offset_t offset)
392{
393 struct agp_softc *sc = device_get_softc(dev);
394 vm_offset_t i, j, k;
395 vm_page_t m;
396 int error;
397
398 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
399
400 if (mem->am_is_bound) {
401 device_printf(dev, "memory already bound\n");
402 return EINVAL;
403 }
404
405 if (offset < 0
406 || (offset & (AGP_PAGE_SIZE - 1)) != 0
407 || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
408 device_printf(dev, "binding memory at bad offset %#x\n",
409 (int) offset);
410 return EINVAL;
411 }
412
413 /*
414 * Bind the individual pages and flush the chipset's
415 * TLB.
416 *
417 * XXX Presumably, this needs to be the pci address on alpha
418 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
419 * alpha AGP hardware to check.
420 */
421 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
422 /*
423 * Find a page from the object and wire it
424 * down. This page will be mapped using one or more
425 * entries in the GATT (assuming that PAGE_SIZE >=
426 * AGP_PAGE_SIZE. If this is the first call to bind,
427 * the pages will be allocated and zeroed.
428 */
429 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
430 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
431 if ((m->flags & PG_ZERO) == 0)
432 pmap_zero_page(m);
433 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
434
435 /*
436 * Install entries in the GATT, making sure that if
437 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
438 * aligned to PAGE_SIZE, we don't modify too many GATT
439 * entries.
440 */
441 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
442 j += AGP_PAGE_SIZE) {
443 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
444 AGP_DPF("binding offset %#x to pa %#x\n",
445 offset + i + j, pa);
446 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
447 if (error) {
448 /*
449 * Bail out. Reverse all the mappings
450 * and unwire the pages.
451 */
452 vm_page_lock_queues();
453 vm_page_wakeup(m);
454 vm_page_unlock_queues();
455 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
456 AGP_UNBIND_PAGE(dev, offset + k);
457 for (k = 0; k <= i; k += PAGE_SIZE) {
458 m = vm_page_lookup(mem->am_obj,
459 OFF_TO_IDX(k));
460 vm_page_lock_queues();
461 vm_page_unwire(m, 0);
462 vm_page_unlock_queues();
463 }
464 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
465 return error;
466 }
467 }
468 vm_page_lock_queues();
469 vm_page_wakeup(m);
470 vm_page_unlock_queues();
471 }
472
473 /*
474 * Flush the cpu cache since we are providing a new mapping
475 * for these pages.
476 */
477 agp_flush_cache();
478
479 /*
480 * Make sure the chipset gets the new mappings.
481 */
482 AGP_FLUSH_TLB(dev);
483
484 mem->am_offset = offset;
485 mem->am_is_bound = 1;
486
487 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
488
489 return 0;
490}
491
492int
493agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
494{
495 struct agp_softc *sc = device_get_softc(dev);
496 vm_page_t m;
497 int i;
498
499 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
500
501 if (!mem->am_is_bound) {
502 device_printf(dev, "memory is not bound\n");
503 return EINVAL;
504 }
505
506
507 /*
508 * Unbind the individual pages and flush the chipset's
509 * TLB. Unwire the pages so they can be swapped.
510 */
511 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
512 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
513 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
514 m = vm_page_lookup(mem->am_obj, atop(i));
515 vm_page_lock_queues();
516 vm_page_unwire(m, 0);
517 vm_page_unlock_queues();
518 }
519
520 agp_flush_cache();
521 AGP_FLUSH_TLB(dev);
522
523 mem->am_offset = 0;
524 mem->am_is_bound = 0;
525
526 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
527
528 return 0;
529}
530
531/* Helper functions for implementing user/kernel api */
532
533static int
534agp_acquire_helper(device_t dev, enum agp_acquire_state state)
535{
536 struct agp_softc *sc = device_get_softc(dev);
537
538 if (sc->as_state != AGP_ACQUIRE_FREE)
539 return EBUSY;
540 sc->as_state = state;
541
542 return 0;
543}
544
545static int
546agp_release_helper(device_t dev, enum agp_acquire_state state)
547{
548 struct agp_softc *sc = device_get_softc(dev);
549
550 if (sc->as_state == AGP_ACQUIRE_FREE)
551 return 0;
552
553 if (sc->as_state != state)
554 return EBUSY;
555
556 sc->as_state = AGP_ACQUIRE_FREE;
557 return 0;
558}
559
560static struct agp_memory *
561agp_find_memory(device_t dev, int id)
562{
563 struct agp_softc *sc = device_get_softc(dev);
564 struct agp_memory *mem;
565
566 AGP_DPF("searching for memory block %d\n", id);
567 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
568 AGP_DPF("considering memory block %d\n", mem->am_id);
569 if (mem->am_id == id)
570 return mem;
571 }
572 return 0;
573}
574
575/* Implementation of the userland ioctl api */
576
577static int
578agp_info_user(device_t dev, agp_info *info)
579{
580 struct agp_softc *sc = device_get_softc(dev);
581
582 bzero(info, sizeof *info);
583 info->bridge_id = pci_get_devid(dev);
584 info->agp_mode =
585 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
586 info->aper_base = rman_get_start(sc->as_aperture);
587 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
588 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
589 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
590
591 return 0;
592}
593
594static int
595agp_setup_user(device_t dev, agp_setup *setup)
596{
597 return AGP_ENABLE(dev, setup->agp_mode);
598}
599
600static int
601agp_allocate_user(device_t dev, agp_allocate *alloc)
602{
603 struct agp_memory *mem;
604
605 mem = AGP_ALLOC_MEMORY(dev,
606 alloc->type,
607 alloc->pg_count << AGP_PAGE_SHIFT);
608 if (mem) {
609 alloc->key = mem->am_id;
610 alloc->physical = mem->am_physical;
611 return 0;
612 } else {
613 return ENOMEM;
614 }
615}
616
617static int
618agp_deallocate_user(device_t dev, int id)
619{
620 struct agp_memory *mem = agp_find_memory(dev, id);;
621
622 if (mem) {
623 AGP_FREE_MEMORY(dev, mem);
624 return 0;
625 } else {
626 return ENOENT;
627 }
628}
629
630static int
631agp_bind_user(device_t dev, agp_bind *bind)
632{
633 struct agp_memory *mem = agp_find_memory(dev, bind->key);
634
635 if (!mem)
636 return ENOENT;
637
638 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
639}
640
641static int
642agp_unbind_user(device_t dev, agp_unbind *unbind)
643{
644 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
645
646 if (!mem)
647 return ENOENT;
648
649 return AGP_UNBIND_MEMORY(dev, mem);
650}
651
652static int
653agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
654{
655 device_t dev = KDEV2DEV(kdev);
656 struct agp_softc *sc = device_get_softc(dev);
657
658 if (!sc->as_isopen) {
659 sc->as_isopen = 1;
660 device_busy(dev);
661 }
662
663 return 0;
664}
665
666static int
667agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
668{
669 device_t dev = KDEV2DEV(kdev);
670 struct agp_softc *sc = device_get_softc(dev);
671 struct agp_memory *mem;
672
673 /*
674 * Clear the GATT and force release on last close
675 */
676 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
677 if (mem->am_is_bound)
678 AGP_UNBIND_MEMORY(dev, mem);
679 AGP_FREE_MEMORY(dev, mem);
680 }
681 if (sc->as_state == AGP_ACQUIRE_USER)
682 agp_release_helper(dev, AGP_ACQUIRE_USER);
683 sc->as_isopen = 0;
684 device_unbusy(dev);
685
686 return 0;
687}
688
689static int
690agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
691{
692 device_t dev = KDEV2DEV(kdev);
693
694 switch (cmd) {
695 case AGPIOC_INFO:
696 return agp_info_user(dev, (agp_info *) data);
697
698 case AGPIOC_ACQUIRE:
699 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
700
701 case AGPIOC_RELEASE:
702 return agp_release_helper(dev, AGP_ACQUIRE_USER);
703
704 case AGPIOC_SETUP:
705 return agp_setup_user(dev, (agp_setup *)data);
706
707 case AGPIOC_ALLOCATE:
708 return agp_allocate_user(dev, (agp_allocate *)data);
709
710 case AGPIOC_DEALLOCATE:
711 return agp_deallocate_user(dev, *(int *) data);
712
713 case AGPIOC_BIND:
714 return agp_bind_user(dev, (agp_bind *)data);
715
716 case AGPIOC_UNBIND:
717 return agp_unbind_user(dev, (agp_unbind *)data);
718
719 }
720
721 return EINVAL;
722}
723
724static int
725agp_mmap(dev_t kdev, vm_offset_t offset, vm_offset_t *paddr, int prot)
726{
727 device_t dev = KDEV2DEV(kdev);
728 struct agp_softc *sc = device_get_softc(dev);
729
730 if (offset > AGP_GET_APERTURE(dev))
731 return -1;
732 *paddr = rman_get_start(sc->as_aperture) + offset;
733 return 0;
734}
735
736/* Implementation of the kernel api */
737
738device_t
739agp_find_device()
740{
741 if (!agp_devclass)
742 return 0;
743 return devclass_get_device(agp_devclass, 0);
744}
745
746enum agp_acquire_state
747agp_state(device_t dev)
748{
749 struct agp_softc *sc = device_get_softc(dev);
750 return sc->as_state;
751}
752
753void
754agp_get_info(device_t dev, struct agp_info *info)
755{
756 struct agp_softc *sc = device_get_softc(dev);
757
758 info->ai_mode =
759 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
760 info->ai_aperture_base = rman_get_start(sc->as_aperture);
761 info->ai_aperture_size = rman_get_size(sc->as_aperture);
762 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
763 info->ai_memory_allowed = sc->as_maxmem;
764 info->ai_memory_used = sc->as_allocated;
765}
766
767int
768agp_acquire(device_t dev)
769{
770 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
771}
772
773int
774agp_release(device_t dev)
775{
776 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
777}
778
779int
780agp_enable(device_t dev, u_int32_t mode)
781{
782 return AGP_ENABLE(dev, mode);
783}
784
785void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
786{
787 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
788}
789
790void agp_free_memory(device_t dev, void *handle)
791{
792 struct agp_memory *mem = (struct agp_memory *) handle;
793 AGP_FREE_MEMORY(dev, mem);
794}
795
796int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
797{
798 struct agp_memory *mem = (struct agp_memory *) handle;
799 return AGP_BIND_MEMORY(dev, mem, offset);
800}
801
802int agp_unbind_memory(device_t dev, void *handle)
803{
804 struct agp_memory *mem = (struct agp_memory *) handle;
805 return AGP_UNBIND_MEMORY(dev, mem);
806}
807
808void agp_memory_info(device_t dev, void *handle, struct
809 agp_memory_info *mi)
810{
811 struct agp_memory *mem = (struct agp_memory *) handle;
812
813 mi->ami_size = mem->am_size;
814 mi->ami_physical = mem->am_physical;
815 mi->ami_offset = mem->am_offset;
816 mi->ami_is_bound = mem->am_is_bound;
817}
80};
81
82static devclass_t agp_devclass;
83#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
84
85/* Helper functions for implementing chipset mini drivers. */
86
87void
88agp_flush_cache()
89{
90#ifdef __i386__
91 wbinvd();
92#endif
93#ifdef __alpha__
94 /* FIXME: This is most likely not correct as it doesn't flush CPU
95 * write caches, but we don't have a facility to do that and
96 * this is all linux does, too */
97 alpha_mb();
98#endif
99}
100
101u_int8_t
102agp_find_caps(device_t dev)
103{
104 u_int32_t status;
105 u_int8_t ptr, next;
106
107 /*
108 * Check the CAP_LIST bit of the PCI status register first.
109 */
110 status = pci_read_config(dev, PCIR_STATUS, 2);
111 if (!(status & 0x10))
112 return 0;
113
114 /*
115 * Traverse the capabilities list.
116 */
117 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
118 ptr != 0;
119 ptr = next) {
120 u_int32_t capid = pci_read_config(dev, ptr, 4);
121 next = AGP_CAPID_GET_NEXT_PTR(capid);
122
123 /*
124 * If this capability entry ID is 2, then we are done.
125 */
126 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
127 return ptr;
128 }
129
130 return 0;
131}
132
133/*
134 * Find an AGP display device (if any).
135 */
136static device_t
137agp_find_display(void)
138{
139 devclass_t pci = devclass_find("pci");
140 device_t bus, dev = 0;
141 device_t *kids;
142 int busnum, numkids, i;
143
144 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
145 bus = devclass_get_device(pci, busnum);
146 if (!bus)
147 continue;
148 device_get_children(bus, &kids, &numkids);
149 for (i = 0; i < numkids; i++) {
150 dev = kids[i];
151 if (pci_get_class(dev) == PCIC_DISPLAY
152 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
153 if (agp_find_caps(dev)) {
154 free(kids, M_TEMP);
155 return dev;
156 }
157
158 }
159 free(kids, M_TEMP);
160 }
161
162 return 0;
163}
164
165struct agp_gatt *
166agp_alloc_gatt(device_t dev)
167{
168 u_int32_t apsize = AGP_GET_APERTURE(dev);
169 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
170 struct agp_gatt *gatt;
171
172 if (bootverbose)
173 device_printf(dev,
174 "allocating GATT for aperture of size %dM\n",
175 apsize / (1024*1024));
176
177 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
178 if (!gatt)
179 return 0;
180
181 gatt->ag_entries = entries;
182 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
183 0, ~0, PAGE_SIZE, 0);
184 if (!gatt->ag_virtual) {
185 if (bootverbose)
186 device_printf(dev, "contiguous allocation failed\n");
187 free(gatt, M_AGP);
188 return 0;
189 }
190 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
191 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
192 agp_flush_cache();
193
194 return gatt;
195}
196
197void
198agp_free_gatt(struct agp_gatt *gatt)
199{
200 contigfree(gatt->ag_virtual,
201 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
202 free(gatt, M_AGP);
203}
204
205static int agp_max[][2] = {
206 {0, 0},
207 {32, 4},
208 {64, 28},
209 {128, 96},
210 {256, 204},
211 {512, 440},
212 {1024, 942},
213 {2048, 1920},
214 {4096, 3932}
215};
216#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
217
218int
219agp_generic_attach(device_t dev)
220{
221 struct agp_softc *sc = device_get_softc(dev);
222 int rid, memsize, i;
223
224 /*
225 * Find and map the aperture.
226 */
227 rid = AGP_APBASE;
228 sc->as_aperture = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
229 0, ~0, 1, RF_ACTIVE);
230 if (!sc->as_aperture)
231 return ENOMEM;
232
233 /*
234 * Work out an upper bound for agp memory allocation. This
235 * uses a heurisitc table from the Linux driver.
236 */
237 memsize = ptoa(Maxmem) >> 20;
238 for (i = 0; i < agp_max_size; i++) {
239 if (memsize <= agp_max[i][0])
240 break;
241 }
242 if (i == agp_max_size) i = agp_max_size - 1;
243 sc->as_maxmem = agp_max[i][1] << 20U;
244
245 /*
246 * The lock is used to prevent re-entry to
247 * agp_generic_bind_memory() since that function can sleep.
248 */
249 lockinit(&sc->as_lock, PZERO|PCATCH, "agplk", 0, 0);
250
251 /*
252 * Initialise stuff for the userland device.
253 */
254 agp_devclass = devclass_find("agp");
255 TAILQ_INIT(&sc->as_memory);
256 sc->as_nextid = 1;
257
258 sc->as_devnode = make_dev(&agp_cdevsw,
259 device_get_unit(dev),
260 UID_ROOT,
261 GID_WHEEL,
262 0600,
263 "agpgart");
264
265 return 0;
266}
267
268int
269agp_generic_detach(device_t dev)
270{
271 struct agp_softc *sc = device_get_softc(dev);
272 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
273 lockmgr(&sc->as_lock, LK_DRAIN, 0, curthread);
274 lockdestroy(&sc->as_lock);
275 destroy_dev(sc->as_devnode);
276 agp_flush_cache();
277 return 0;
278}
279
280int
281agp_generic_enable(device_t dev, u_int32_t mode)
282{
283 device_t mdev = agp_find_display();
284 u_int32_t tstatus, mstatus;
285 u_int32_t command;
286 int rq, sba, fw, rate;;
287
288 if (!mdev) {
289 AGP_DPF("can't find display\n");
290 return ENXIO;
291 }
292
293 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
294 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
295
296 /* Set RQ to the min of mode, tstatus and mstatus */
297 rq = AGP_MODE_GET_RQ(mode);
298 if (AGP_MODE_GET_RQ(tstatus) < rq)
299 rq = AGP_MODE_GET_RQ(tstatus);
300 if (AGP_MODE_GET_RQ(mstatus) < rq)
301 rq = AGP_MODE_GET_RQ(mstatus);
302
303 /* Set SBA if all three can deal with SBA */
304 sba = (AGP_MODE_GET_SBA(tstatus)
305 & AGP_MODE_GET_SBA(mstatus)
306 & AGP_MODE_GET_SBA(mode));
307
308 /* Similar for FW */
309 fw = (AGP_MODE_GET_FW(tstatus)
310 & AGP_MODE_GET_FW(mstatus)
311 & AGP_MODE_GET_FW(mode));
312
313 /* Figure out the max rate */
314 rate = (AGP_MODE_GET_RATE(tstatus)
315 & AGP_MODE_GET_RATE(mstatus)
316 & AGP_MODE_GET_RATE(mode));
317 if (rate & AGP_MODE_RATE_4x)
318 rate = AGP_MODE_RATE_4x;
319 else if (rate & AGP_MODE_RATE_2x)
320 rate = AGP_MODE_RATE_2x;
321 else
322 rate = AGP_MODE_RATE_1x;
323
324 /* Construct the new mode word and tell the hardware */
325 command = AGP_MODE_SET_RQ(0, rq);
326 command = AGP_MODE_SET_SBA(command, sba);
327 command = AGP_MODE_SET_FW(command, fw);
328 command = AGP_MODE_SET_RATE(command, rate);
329 command = AGP_MODE_SET_AGP(command, 1);
330 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
331 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
332
333 return 0;
334}
335
336struct agp_memory *
337agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
338{
339 struct agp_softc *sc = device_get_softc(dev);
340 struct agp_memory *mem;
341
342 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
343 return 0;
344
345 if (sc->as_allocated + size > sc->as_maxmem)
346 return 0;
347
348 if (type != 0) {
349 printf("agp_generic_alloc_memory: unsupported type %d\n",
350 type);
351 return 0;
352 }
353
354 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
355 mem->am_id = sc->as_nextid++;
356 mem->am_size = size;
357 mem->am_type = 0;
358 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
359 mem->am_physical = 0;
360 mem->am_offset = 0;
361 mem->am_is_bound = 0;
362 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
363 sc->as_allocated += size;
364
365 return mem;
366}
367
368int
369agp_generic_free_memory(device_t dev, struct agp_memory *mem)
370{
371 struct agp_softc *sc = device_get_softc(dev);
372
373 if (mem->am_is_bound)
374 return EBUSY;
375
376 sc->as_allocated -= mem->am_size;
377 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
378 vm_object_deallocate(mem->am_obj);
379 free(mem, M_AGP);
380 return 0;
381}
382
383int
384agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
385 vm_offset_t offset)
386{
387 struct agp_softc *sc = device_get_softc(dev);
388 vm_offset_t i, j, k;
389 vm_page_t m;
390 int error;
391
392 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
393
394 if (mem->am_is_bound) {
395 device_printf(dev, "memory already bound\n");
396 return EINVAL;
397 }
398
399 if (offset < 0
400 || (offset & (AGP_PAGE_SIZE - 1)) != 0
401 || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
402 device_printf(dev, "binding memory at bad offset %#x\n",
403 (int) offset);
404 return EINVAL;
405 }
406
407 /*
408 * Bind the individual pages and flush the chipset's
409 * TLB.
410 *
411 * XXX Presumably, this needs to be the pci address on alpha
412 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
413 * alpha AGP hardware to check.
414 */
415 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
416 /*
417 * Find a page from the object and wire it
418 * down. This page will be mapped using one or more
419 * entries in the GATT (assuming that PAGE_SIZE >=
420 * AGP_PAGE_SIZE. If this is the first call to bind,
421 * the pages will be allocated and zeroed.
422 */
423 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
424 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
425 if ((m->flags & PG_ZERO) == 0)
426 pmap_zero_page(m);
427 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
428
429 /*
430 * Install entries in the GATT, making sure that if
431 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
432 * aligned to PAGE_SIZE, we don't modify too many GATT
433 * entries.
434 */
435 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
436 j += AGP_PAGE_SIZE) {
437 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
438 AGP_DPF("binding offset %#x to pa %#x\n",
439 offset + i + j, pa);
440 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
441 if (error) {
442 /*
443 * Bail out. Reverse all the mappings
444 * and unwire the pages.
445 */
446 vm_page_lock_queues();
447 vm_page_wakeup(m);
448 vm_page_unlock_queues();
449 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
450 AGP_UNBIND_PAGE(dev, offset + k);
451 for (k = 0; k <= i; k += PAGE_SIZE) {
452 m = vm_page_lookup(mem->am_obj,
453 OFF_TO_IDX(k));
454 vm_page_lock_queues();
455 vm_page_unwire(m, 0);
456 vm_page_unlock_queues();
457 }
458 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
459 return error;
460 }
461 }
462 vm_page_lock_queues();
463 vm_page_wakeup(m);
464 vm_page_unlock_queues();
465 }
466
467 /*
468 * Flush the cpu cache since we are providing a new mapping
469 * for these pages.
470 */
471 agp_flush_cache();
472
473 /*
474 * Make sure the chipset gets the new mappings.
475 */
476 AGP_FLUSH_TLB(dev);
477
478 mem->am_offset = offset;
479 mem->am_is_bound = 1;
480
481 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
482
483 return 0;
484}
485
486int
487agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
488{
489 struct agp_softc *sc = device_get_softc(dev);
490 vm_page_t m;
491 int i;
492
493 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
494
495 if (!mem->am_is_bound) {
496 device_printf(dev, "memory is not bound\n");
497 return EINVAL;
498 }
499
500
501 /*
502 * Unbind the individual pages and flush the chipset's
503 * TLB. Unwire the pages so they can be swapped.
504 */
505 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
506 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
507 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
508 m = vm_page_lookup(mem->am_obj, atop(i));
509 vm_page_lock_queues();
510 vm_page_unwire(m, 0);
511 vm_page_unlock_queues();
512 }
513
514 agp_flush_cache();
515 AGP_FLUSH_TLB(dev);
516
517 mem->am_offset = 0;
518 mem->am_is_bound = 0;
519
520 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
521
522 return 0;
523}
524
525/* Helper functions for implementing user/kernel api */
526
527static int
528agp_acquire_helper(device_t dev, enum agp_acquire_state state)
529{
530 struct agp_softc *sc = device_get_softc(dev);
531
532 if (sc->as_state != AGP_ACQUIRE_FREE)
533 return EBUSY;
534 sc->as_state = state;
535
536 return 0;
537}
538
539static int
540agp_release_helper(device_t dev, enum agp_acquire_state state)
541{
542 struct agp_softc *sc = device_get_softc(dev);
543
544 if (sc->as_state == AGP_ACQUIRE_FREE)
545 return 0;
546
547 if (sc->as_state != state)
548 return EBUSY;
549
550 sc->as_state = AGP_ACQUIRE_FREE;
551 return 0;
552}
553
554static struct agp_memory *
555agp_find_memory(device_t dev, int id)
556{
557 struct agp_softc *sc = device_get_softc(dev);
558 struct agp_memory *mem;
559
560 AGP_DPF("searching for memory block %d\n", id);
561 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
562 AGP_DPF("considering memory block %d\n", mem->am_id);
563 if (mem->am_id == id)
564 return mem;
565 }
566 return 0;
567}
568
569/* Implementation of the userland ioctl api */
570
571static int
572agp_info_user(device_t dev, agp_info *info)
573{
574 struct agp_softc *sc = device_get_softc(dev);
575
576 bzero(info, sizeof *info);
577 info->bridge_id = pci_get_devid(dev);
578 info->agp_mode =
579 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
580 info->aper_base = rman_get_start(sc->as_aperture);
581 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
582 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
583 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
584
585 return 0;
586}
587
588static int
589agp_setup_user(device_t dev, agp_setup *setup)
590{
591 return AGP_ENABLE(dev, setup->agp_mode);
592}
593
594static int
595agp_allocate_user(device_t dev, agp_allocate *alloc)
596{
597 struct agp_memory *mem;
598
599 mem = AGP_ALLOC_MEMORY(dev,
600 alloc->type,
601 alloc->pg_count << AGP_PAGE_SHIFT);
602 if (mem) {
603 alloc->key = mem->am_id;
604 alloc->physical = mem->am_physical;
605 return 0;
606 } else {
607 return ENOMEM;
608 }
609}
610
611static int
612agp_deallocate_user(device_t dev, int id)
613{
614 struct agp_memory *mem = agp_find_memory(dev, id);;
615
616 if (mem) {
617 AGP_FREE_MEMORY(dev, mem);
618 return 0;
619 } else {
620 return ENOENT;
621 }
622}
623
624static int
625agp_bind_user(device_t dev, agp_bind *bind)
626{
627 struct agp_memory *mem = agp_find_memory(dev, bind->key);
628
629 if (!mem)
630 return ENOENT;
631
632 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
633}
634
635static int
636agp_unbind_user(device_t dev, agp_unbind *unbind)
637{
638 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
639
640 if (!mem)
641 return ENOENT;
642
643 return AGP_UNBIND_MEMORY(dev, mem);
644}
645
646static int
647agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
648{
649 device_t dev = KDEV2DEV(kdev);
650 struct agp_softc *sc = device_get_softc(dev);
651
652 if (!sc->as_isopen) {
653 sc->as_isopen = 1;
654 device_busy(dev);
655 }
656
657 return 0;
658}
659
660static int
661agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
662{
663 device_t dev = KDEV2DEV(kdev);
664 struct agp_softc *sc = device_get_softc(dev);
665 struct agp_memory *mem;
666
667 /*
668 * Clear the GATT and force release on last close
669 */
670 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
671 if (mem->am_is_bound)
672 AGP_UNBIND_MEMORY(dev, mem);
673 AGP_FREE_MEMORY(dev, mem);
674 }
675 if (sc->as_state == AGP_ACQUIRE_USER)
676 agp_release_helper(dev, AGP_ACQUIRE_USER);
677 sc->as_isopen = 0;
678 device_unbusy(dev);
679
680 return 0;
681}
682
683static int
684agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
685{
686 device_t dev = KDEV2DEV(kdev);
687
688 switch (cmd) {
689 case AGPIOC_INFO:
690 return agp_info_user(dev, (agp_info *) data);
691
692 case AGPIOC_ACQUIRE:
693 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
694
695 case AGPIOC_RELEASE:
696 return agp_release_helper(dev, AGP_ACQUIRE_USER);
697
698 case AGPIOC_SETUP:
699 return agp_setup_user(dev, (agp_setup *)data);
700
701 case AGPIOC_ALLOCATE:
702 return agp_allocate_user(dev, (agp_allocate *)data);
703
704 case AGPIOC_DEALLOCATE:
705 return agp_deallocate_user(dev, *(int *) data);
706
707 case AGPIOC_BIND:
708 return agp_bind_user(dev, (agp_bind *)data);
709
710 case AGPIOC_UNBIND:
711 return agp_unbind_user(dev, (agp_unbind *)data);
712
713 }
714
715 return EINVAL;
716}
717
718static int
719agp_mmap(dev_t kdev, vm_offset_t offset, vm_offset_t *paddr, int prot)
720{
721 device_t dev = KDEV2DEV(kdev);
722 struct agp_softc *sc = device_get_softc(dev);
723
724 if (offset > AGP_GET_APERTURE(dev))
725 return -1;
726 *paddr = rman_get_start(sc->as_aperture) + offset;
727 return 0;
728}
729
730/* Implementation of the kernel api */
731
732device_t
733agp_find_device()
734{
735 if (!agp_devclass)
736 return 0;
737 return devclass_get_device(agp_devclass, 0);
738}
739
740enum agp_acquire_state
741agp_state(device_t dev)
742{
743 struct agp_softc *sc = device_get_softc(dev);
744 return sc->as_state;
745}
746
747void
748agp_get_info(device_t dev, struct agp_info *info)
749{
750 struct agp_softc *sc = device_get_softc(dev);
751
752 info->ai_mode =
753 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
754 info->ai_aperture_base = rman_get_start(sc->as_aperture);
755 info->ai_aperture_size = rman_get_size(sc->as_aperture);
756 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
757 info->ai_memory_allowed = sc->as_maxmem;
758 info->ai_memory_used = sc->as_allocated;
759}
760
761int
762agp_acquire(device_t dev)
763{
764 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
765}
766
767int
768agp_release(device_t dev)
769{
770 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
771}
772
773int
774agp_enable(device_t dev, u_int32_t mode)
775{
776 return AGP_ENABLE(dev, mode);
777}
778
779void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
780{
781 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
782}
783
784void agp_free_memory(device_t dev, void *handle)
785{
786 struct agp_memory *mem = (struct agp_memory *) handle;
787 AGP_FREE_MEMORY(dev, mem);
788}
789
790int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
791{
792 struct agp_memory *mem = (struct agp_memory *) handle;
793 return AGP_BIND_MEMORY(dev, mem, offset);
794}
795
796int agp_unbind_memory(device_t dev, void *handle)
797{
798 struct agp_memory *mem = (struct agp_memory *) handle;
799 return AGP_UNBIND_MEMORY(dev, mem);
800}
801
802void agp_memory_info(device_t dev, void *handle, struct
803 agp_memory_info *mi)
804{
805 struct agp_memory *mem = (struct agp_memory *) handle;
806
807 mi->ami_size = mem->am_size;
808 mi->ami_physical = mem->am_physical;
809 mi->ami_offset = mem->am_offset;
810 mi->ami_is_bound = mem->am_is_bound;
811}