Deleted Added
sdiff udiff text old ( 129567 ) new ( 129579 )
full compact
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 129579 2004-05-22 13:06:38Z mux $");
29
30#include "opt_bus.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/conf.h>
38#include <sys/ioccom.h>
39#include <sys/agpio.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43
44#include <dev/pci/pcivar.h>
45#include <dev/pci/pcireg.h>
46#include <pci/agppriv.h>
47#include <pci/agpvar.h>
48#include <pci/agpreg.h>
49
50#include <vm/vm.h>
51#include <vm/vm_object.h>
52#include <vm/vm_page.h>
53#include <vm/vm_pageout.h>
54#include <vm/pmap.h>
55
56#include <machine/md_var.h>
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61MODULE_VERSION(agp, 1);
62
63MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
64
65 /* agp_drv.c */
66static d_open_t agp_open;
67static d_close_t agp_close;
68static d_ioctl_t agp_ioctl;
69static d_mmap_t agp_mmap;
70
71static struct cdevsw agp_cdevsw = {
72 .d_version = D_VERSION,
73 .d_flags = D_NEEDGIANT,
74 .d_open = agp_open,
75 .d_close = agp_close,
76 .d_ioctl = agp_ioctl,
77 .d_mmap = agp_mmap,
78 .d_name = "agp",
79};
80
81static devclass_t agp_devclass;
82#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
83
84/* Helper functions for implementing chipset mini drivers. */
85
86void
87agp_flush_cache()
88{
89#ifdef __i386__
90 wbinvd();
91#endif
92#ifdef __alpha__
93 /* FIXME: This is most likely not correct as it doesn't flush CPU
94 * write caches, but we don't have a facility to do that and
95 * this is all linux does, too */
96 alpha_mb();
97#endif
98}
99
100u_int8_t
101agp_find_caps(device_t dev)
102{
103 u_int32_t status;
104 u_int8_t ptr, next;
105
106 /*
107 * Check the CAP_LIST bit of the PCI status register first.
108 */
109 status = pci_read_config(dev, PCIR_STATUS, 2);
110 if (!(status & 0x10))
111 return 0;
112
113 /*
114 * Traverse the capabilities list.
115 */
116 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
117 ptr != 0;
118 ptr = next) {
119 u_int32_t capid = pci_read_config(dev, ptr, 4);
120 next = AGP_CAPID_GET_NEXT_PTR(capid);
121
122 /*
123 * If this capability entry ID is 2, then we are done.
124 */
125 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
126 return ptr;
127 }
128
129 return 0;
130}
131
132/*
133 * Find an AGP display device (if any).
134 */
135static device_t
136agp_find_display(void)
137{
138 devclass_t pci = devclass_find("pci");
139 device_t bus, dev = 0;
140 device_t *kids;
141 int busnum, numkids, i;
142
143 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
144 bus = devclass_get_device(pci, busnum);
145 if (!bus)
146 continue;
147 device_get_children(bus, &kids, &numkids);
148 for (i = 0; i < numkids; i++) {
149 dev = kids[i];
150 if (pci_get_class(dev) == PCIC_DISPLAY
151 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
152 if (agp_find_caps(dev)) {
153 free(kids, M_TEMP);
154 return dev;
155 }
156
157 }
158 free(kids, M_TEMP);
159 }
160
161 return 0;
162}
163
164struct agp_gatt *
165agp_alloc_gatt(device_t dev)
166{
167 u_int32_t apsize = AGP_GET_APERTURE(dev);
168 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
169 struct agp_gatt *gatt;
170
171 if (bootverbose)
172 device_printf(dev,
173 "allocating GATT for aperture of size %dM\n",
174 apsize / (1024*1024));
175
176 if (entries == 0) {
177 device_printf(dev, "bad aperture size\n");
178 return NULL;
179 }
180
181 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
182 if (!gatt)
183 return 0;
184
185 gatt->ag_entries = entries;
186 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
187 0, ~0, PAGE_SIZE, 0);
188 if (!gatt->ag_virtual) {
189 if (bootverbose)
190 device_printf(dev, "contiguous allocation failed\n");
191 free(gatt, M_AGP);
192 return 0;
193 }
194 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
195 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
196 agp_flush_cache();
197
198 return gatt;
199}
200
201void
202agp_free_gatt(struct agp_gatt *gatt)
203{
204 contigfree(gatt->ag_virtual,
205 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
206 free(gatt, M_AGP);
207}
208
209static int agp_max[][2] = {
210 {0, 0},
211 {32, 4},
212 {64, 28},
213 {128, 96},
214 {256, 204},
215 {512, 440},
216 {1024, 942},
217 {2048, 1920},
218 {4096, 3932}
219};
220#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
221
222int
223agp_generic_attach(device_t dev)
224{
225 struct agp_softc *sc = device_get_softc(dev);
226 int rid, memsize, i;
227
228 /*
229 * Find and map the aperture.
230 */
231 rid = AGP_APBASE;
232 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
233 RF_ACTIVE);
234 if (!sc->as_aperture)
235 return ENOMEM;
236
237 /*
238 * Work out an upper bound for agp memory allocation. This
239 * uses a heurisitc table from the Linux driver.
240 */
241 memsize = ptoa(Maxmem) >> 20;
242 for (i = 0; i < agp_max_size; i++) {
243 if (memsize <= agp_max[i][0])
244 break;
245 }
246 if (i == agp_max_size) i = agp_max_size - 1;
247 sc->as_maxmem = agp_max[i][1] << 20U;
248
249 /*
250 * The lock is used to prevent re-entry to
251 * agp_generic_bind_memory() since that function can sleep.
252 */
253 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
254
255 /*
256 * Initialise stuff for the userland device.
257 */
258 agp_devclass = devclass_find("agp");
259 TAILQ_INIT(&sc->as_memory);
260 sc->as_nextid = 1;
261
262 sc->as_devnode = make_dev(&agp_cdevsw,
263 device_get_unit(dev),
264 UID_ROOT,
265 GID_WHEEL,
266 0600,
267 "agpgart");
268
269 return 0;
270}
271
272int
273agp_generic_detach(device_t dev)
274{
275 struct agp_softc *sc = device_get_softc(dev);
276 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
277 mtx_destroy(&sc->as_lock);
278 destroy_dev(sc->as_devnode);
279 agp_flush_cache();
280 return 0;
281}
282
283/*
284 * This does the enable logic for v3, with the same topology
285 * restrictions as in place for v2 -- one bus, one device on the bus.
286 */
287static int
288agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
289{
290 u_int32_t tstatus, mstatus;
291 u_int32_t command;
292 int rq, sba, fw, rate, arqsz, cal;
293
294 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
295 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
296
297 /* Set RQ to the min of mode, tstatus and mstatus */
298 rq = AGP_MODE_GET_RQ(mode);
299 if (AGP_MODE_GET_RQ(tstatus) < rq)
300 rq = AGP_MODE_GET_RQ(tstatus);
301 if (AGP_MODE_GET_RQ(mstatus) < rq)
302 rq = AGP_MODE_GET_RQ(mstatus);
303
304 /*
305 * ARQSZ - Set the value to the maximum one.
306 * Don't allow the mode register to override values.
307 */
308 arqsz = AGP_MODE_GET_ARQSZ(mode);
309 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
310 rq = AGP_MODE_GET_ARQSZ(tstatus);
311 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
312 rq = AGP_MODE_GET_ARQSZ(mstatus);
313
314 /* Calibration cycle - don't allow override by mode register */
315 cal = AGP_MODE_GET_CAL(tstatus);
316 if (AGP_MODE_GET_CAL(mstatus) < cal)
317 cal = AGP_MODE_GET_CAL(mstatus);
318
319 /* SBA must be supported for AGP v3. */
320 sba = 1;
321
322 /* Set FW if all three support it. */
323 fw = (AGP_MODE_GET_FW(tstatus)
324 & AGP_MODE_GET_FW(mstatus)
325 & AGP_MODE_GET_FW(mode));
326
327 /* Figure out the max rate */
328 rate = (AGP_MODE_GET_RATE(tstatus)
329 & AGP_MODE_GET_RATE(mstatus)
330 & AGP_MODE_GET_RATE(mode));
331 if (rate & AGP_MODE_V3_RATE_8x)
332 rate = AGP_MODE_V3_RATE_8x;
333 else
334 rate = AGP_MODE_V3_RATE_4x;
335 if (bootverbose)
336 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
337
338 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
339
340 /* Construct the new mode word and tell the hardware */
341 command = AGP_MODE_SET_RQ(0, rq);
342 command = AGP_MODE_SET_ARQSZ(command, arqsz);
343 command = AGP_MODE_SET_CAL(command, cal);
344 command = AGP_MODE_SET_SBA(command, sba);
345 command = AGP_MODE_SET_FW(command, fw);
346 command = AGP_MODE_SET_RATE(command, rate);
347 command = AGP_MODE_SET_AGP(command, 1);
348 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
349 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
350
351 return 0;
352}
353
354static int
355agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
356{
357 u_int32_t tstatus, mstatus;
358 u_int32_t command;
359 int rq, sba, fw, rate;
360
361 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
362 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
363
364 /* Set RQ to the min of mode, tstatus and mstatus */
365 rq = AGP_MODE_GET_RQ(mode);
366 if (AGP_MODE_GET_RQ(tstatus) < rq)
367 rq = AGP_MODE_GET_RQ(tstatus);
368 if (AGP_MODE_GET_RQ(mstatus) < rq)
369 rq = AGP_MODE_GET_RQ(mstatus);
370
371 /* Set SBA if all three can deal with SBA */
372 sba = (AGP_MODE_GET_SBA(tstatus)
373 & AGP_MODE_GET_SBA(mstatus)
374 & AGP_MODE_GET_SBA(mode));
375
376 /* Similar for FW */
377 fw = (AGP_MODE_GET_FW(tstatus)
378 & AGP_MODE_GET_FW(mstatus)
379 & AGP_MODE_GET_FW(mode));
380
381 /* Figure out the max rate */
382 rate = (AGP_MODE_GET_RATE(tstatus)
383 & AGP_MODE_GET_RATE(mstatus)
384 & AGP_MODE_GET_RATE(mode));
385 if (rate & AGP_MODE_V2_RATE_4x)
386 rate = AGP_MODE_V2_RATE_4x;
387 else if (rate & AGP_MODE_V2_RATE_2x)
388 rate = AGP_MODE_V2_RATE_2x;
389 else
390 rate = AGP_MODE_V2_RATE_1x;
391 if (bootverbose)
392 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
393
394 /* Construct the new mode word and tell the hardware */
395 command = AGP_MODE_SET_RQ(0, rq);
396 command = AGP_MODE_SET_SBA(command, sba);
397 command = AGP_MODE_SET_FW(command, fw);
398 command = AGP_MODE_SET_RATE(command, rate);
399 command = AGP_MODE_SET_AGP(command, 1);
400 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
401 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
402
403 return 0;
404}
405
406int
407agp_generic_enable(device_t dev, u_int32_t mode)
408{
409 device_t mdev = agp_find_display();
410 u_int32_t tstatus, mstatus;
411
412 if (!mdev) {
413 AGP_DPF("can't find display\n");
414 return ENXIO;
415 }
416
417 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
418 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
419
420 /*
421 * Check display and bridge for AGP v3 support. AGP v3 allows
422 * more variety in topology than v2, e.g. multiple AGP devices
423 * attached to one bridge, or multiple AGP bridges in one
424 * system. This doesn't attempt to address those situations,
425 * but should work fine for a classic single AGP slot system
426 * with AGP v3.
427 */
428 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
429 return (agp_v3_enable(dev, mdev, mode));
430 else
431 return (agp_v2_enable(dev, mdev, mode));
432}
433
434struct agp_memory *
435agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
436{
437 struct agp_softc *sc = device_get_softc(dev);
438 struct agp_memory *mem;
439
440 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
441 return 0;
442
443 if (sc->as_allocated + size > sc->as_maxmem)
444 return 0;
445
446 if (type != 0) {
447 printf("agp_generic_alloc_memory: unsupported type %d\n",
448 type);
449 return 0;
450 }
451
452 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
453 mem->am_id = sc->as_nextid++;
454 mem->am_size = size;
455 mem->am_type = 0;
456 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
457 mem->am_physical = 0;
458 mem->am_offset = 0;
459 mem->am_is_bound = 0;
460 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
461 sc->as_allocated += size;
462
463 return mem;
464}
465
466int
467agp_generic_free_memory(device_t dev, struct agp_memory *mem)
468{
469 struct agp_softc *sc = device_get_softc(dev);
470
471 if (mem->am_is_bound)
472 return EBUSY;
473
474 sc->as_allocated -= mem->am_size;
475 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
476 vm_object_deallocate(mem->am_obj);
477 free(mem, M_AGP);
478 return 0;
479}
480
481int
482agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
483 vm_offset_t offset)
484{
485 struct agp_softc *sc = device_get_softc(dev);
486 vm_offset_t i, j, k;
487 vm_page_t m;
488 int error;
489
490 mtx_lock(&sc->as_lock);
491
492 if (mem->am_is_bound) {
493 device_printf(dev, "memory already bound\n");
494 mtx_unlock(&sc->as_lock);
495 return EINVAL;
496 }
497
498 if (offset < 0
499 || (offset & (AGP_PAGE_SIZE - 1)) != 0
500 || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
501 device_printf(dev, "binding memory at bad offset %#x\n",
502 (int) offset);
503 mtx_unlock(&sc->as_lock);
504 return EINVAL;
505 }
506
507 /*
508 * Bind the individual pages and flush the chipset's
509 * TLB.
510 *
511 * XXX Presumably, this needs to be the pci address on alpha
512 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
513 * alpha AGP hardware to check.
514 */
515 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
516 /*
517 * Find a page from the object and wire it
518 * down. This page will be mapped using one or more
519 * entries in the GATT (assuming that PAGE_SIZE >=
520 * AGP_PAGE_SIZE. If this is the first call to bind,
521 * the pages will be allocated and zeroed.
522 */
523 VM_OBJECT_LOCK(mem->am_obj);
524 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
525 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
526 VM_OBJECT_UNLOCK(mem->am_obj);
527 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
528
529 /*
530 * Install entries in the GATT, making sure that if
531 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
532 * aligned to PAGE_SIZE, we don't modify too many GATT
533 * entries.
534 */
535 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
536 j += AGP_PAGE_SIZE) {
537 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
538 AGP_DPF("binding offset %#x to pa %#x\n",
539 offset + i + j, pa);
540 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
541 if (error) {
542 /*
543 * Bail out. Reverse all the mappings
544 * and unwire the pages.
545 */
546 vm_page_lock_queues();
547 vm_page_wakeup(m);
548 vm_page_unlock_queues();
549 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
550 AGP_UNBIND_PAGE(dev, offset + k);
551 VM_OBJECT_LOCK(mem->am_obj);
552 for (k = 0; k <= i; k += PAGE_SIZE) {
553 m = vm_page_lookup(mem->am_obj,
554 OFF_TO_IDX(k));
555 vm_page_lock_queues();
556 vm_page_unwire(m, 0);
557 vm_page_unlock_queues();
558 }
559 VM_OBJECT_UNLOCK(mem->am_obj);
560 mtx_unlock(&sc->as_lock);
561 return error;
562 }
563 }
564 vm_page_lock_queues();
565 vm_page_wakeup(m);
566 vm_page_unlock_queues();
567 }
568
569 /*
570 * Flush the cpu cache since we are providing a new mapping
571 * for these pages.
572 */
573 agp_flush_cache();
574
575 /*
576 * Make sure the chipset gets the new mappings.
577 */
578 AGP_FLUSH_TLB(dev);
579
580 mem->am_offset = offset;
581 mem->am_is_bound = 1;
582
583 mtx_unlock(&sc->as_lock);
584
585 return 0;
586}
587
588int
589agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
590{
591 struct agp_softc *sc = device_get_softc(dev);
592 vm_page_t m;
593 int i;
594
595 mtx_lock(&sc->as_lock);
596
597 if (!mem->am_is_bound) {
598 device_printf(dev, "memory is not bound\n");
599 mtx_unlock(&sc->as_lock);
600 return EINVAL;
601 }
602
603
604 /*
605 * Unbind the individual pages and flush the chipset's
606 * TLB. Unwire the pages so they can be swapped.
607 */
608 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
609 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
610 VM_OBJECT_LOCK(mem->am_obj);
611 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
612 m = vm_page_lookup(mem->am_obj, atop(i));
613 vm_page_lock_queues();
614 vm_page_unwire(m, 0);
615 vm_page_unlock_queues();
616 }
617 VM_OBJECT_UNLOCK(mem->am_obj);
618
619 agp_flush_cache();
620 AGP_FLUSH_TLB(dev);
621
622 mem->am_offset = 0;
623 mem->am_is_bound = 0;
624
625 mtx_unlock(&sc->as_lock);
626
627 return 0;
628}
629
630/* Helper functions for implementing user/kernel api */
631
632static int
633agp_acquire_helper(device_t dev, enum agp_acquire_state state)
634{
635 struct agp_softc *sc = device_get_softc(dev);
636
637 if (sc->as_state != AGP_ACQUIRE_FREE)
638 return EBUSY;
639 sc->as_state = state;
640
641 return 0;
642}
643
644static int
645agp_release_helper(device_t dev, enum agp_acquire_state state)
646{
647 struct agp_softc *sc = device_get_softc(dev);
648
649 if (sc->as_state == AGP_ACQUIRE_FREE)
650 return 0;
651
652 if (sc->as_state != state)
653 return EBUSY;
654
655 sc->as_state = AGP_ACQUIRE_FREE;
656 return 0;
657}
658
659static struct agp_memory *
660agp_find_memory(device_t dev, int id)
661{
662 struct agp_softc *sc = device_get_softc(dev);
663 struct agp_memory *mem;
664
665 AGP_DPF("searching for memory block %d\n", id);
666 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
667 AGP_DPF("considering memory block %d\n", mem->am_id);
668 if (mem->am_id == id)
669 return mem;
670 }
671 return 0;
672}
673
674/* Implementation of the userland ioctl api */
675
676static int
677agp_info_user(device_t dev, agp_info *info)
678{
679 struct agp_softc *sc = device_get_softc(dev);
680
681 bzero(info, sizeof *info);
682 info->bridge_id = pci_get_devid(dev);
683 info->agp_mode =
684 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
685 info->aper_base = rman_get_start(sc->as_aperture);
686 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
687 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
688 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
689
690 return 0;
691}
692
693static int
694agp_setup_user(device_t dev, agp_setup *setup)
695{
696 return AGP_ENABLE(dev, setup->agp_mode);
697}
698
699static int
700agp_allocate_user(device_t dev, agp_allocate *alloc)
701{
702 struct agp_memory *mem;
703
704 mem = AGP_ALLOC_MEMORY(dev,
705 alloc->type,
706 alloc->pg_count << AGP_PAGE_SHIFT);
707 if (mem) {
708 alloc->key = mem->am_id;
709 alloc->physical = mem->am_physical;
710 return 0;
711 } else {
712 return ENOMEM;
713 }
714}
715
716static int
717agp_deallocate_user(device_t dev, int id)
718{
719 struct agp_memory *mem = agp_find_memory(dev, id);;
720
721 if (mem) {
722 AGP_FREE_MEMORY(dev, mem);
723 return 0;
724 } else {
725 return ENOENT;
726 }
727}
728
729static int
730agp_bind_user(device_t dev, agp_bind *bind)
731{
732 struct agp_memory *mem = agp_find_memory(dev, bind->key);
733
734 if (!mem)
735 return ENOENT;
736
737 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
738}
739
740static int
741agp_unbind_user(device_t dev, agp_unbind *unbind)
742{
743 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
744
745 if (!mem)
746 return ENOENT;
747
748 return AGP_UNBIND_MEMORY(dev, mem);
749}
750
751static int
752agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
753{
754 device_t dev = KDEV2DEV(kdev);
755 struct agp_softc *sc = device_get_softc(dev);
756
757 if (!sc->as_isopen) {
758 sc->as_isopen = 1;
759 device_busy(dev);
760 }
761
762 return 0;
763}
764
765static int
766agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
767{
768 device_t dev = KDEV2DEV(kdev);
769 struct agp_softc *sc = device_get_softc(dev);
770 struct agp_memory *mem;
771
772 /*
773 * Clear the GATT and force release on last close
774 */
775 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
776 if (mem->am_is_bound)
777 AGP_UNBIND_MEMORY(dev, mem);
778 AGP_FREE_MEMORY(dev, mem);
779 }
780 if (sc->as_state == AGP_ACQUIRE_USER)
781 agp_release_helper(dev, AGP_ACQUIRE_USER);
782 sc->as_isopen = 0;
783 device_unbusy(dev);
784
785 return 0;
786}
787
788static int
789agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
790{
791 device_t dev = KDEV2DEV(kdev);
792
793 switch (cmd) {
794 case AGPIOC_INFO:
795 return agp_info_user(dev, (agp_info *) data);
796
797 case AGPIOC_ACQUIRE:
798 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
799
800 case AGPIOC_RELEASE:
801 return agp_release_helper(dev, AGP_ACQUIRE_USER);
802
803 case AGPIOC_SETUP:
804 return agp_setup_user(dev, (agp_setup *)data);
805
806 case AGPIOC_ALLOCATE:
807 return agp_allocate_user(dev, (agp_allocate *)data);
808
809 case AGPIOC_DEALLOCATE:
810 return agp_deallocate_user(dev, *(int *) data);
811
812 case AGPIOC_BIND:
813 return agp_bind_user(dev, (agp_bind *)data);
814
815 case AGPIOC_UNBIND:
816 return agp_unbind_user(dev, (agp_unbind *)data);
817
818 }
819
820 return EINVAL;
821}
822
823static int
824agp_mmap(dev_t kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
825{
826 device_t dev = KDEV2DEV(kdev);
827 struct agp_softc *sc = device_get_softc(dev);
828
829 if (offset > AGP_GET_APERTURE(dev))
830 return -1;
831 *paddr = rman_get_start(sc->as_aperture) + offset;
832 return 0;
833}
834
835/* Implementation of the kernel api */
836
837device_t
838agp_find_device()
839{
840 if (!agp_devclass)
841 return 0;
842 return devclass_get_device(agp_devclass, 0);
843}
844
845enum agp_acquire_state
846agp_state(device_t dev)
847{
848 struct agp_softc *sc = device_get_softc(dev);
849 return sc->as_state;
850}
851
852void
853agp_get_info(device_t dev, struct agp_info *info)
854{
855 struct agp_softc *sc = device_get_softc(dev);
856
857 info->ai_mode =
858 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
859 info->ai_aperture_base = rman_get_start(sc->as_aperture);
860 info->ai_aperture_size = rman_get_size(sc->as_aperture);
861 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
862 info->ai_memory_allowed = sc->as_maxmem;
863 info->ai_memory_used = sc->as_allocated;
864}
865
866int
867agp_acquire(device_t dev)
868{
869 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
870}
871
872int
873agp_release(device_t dev)
874{
875 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
876}
877
878int
879agp_enable(device_t dev, u_int32_t mode)
880{
881 return AGP_ENABLE(dev, mode);
882}
883
884void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
885{
886 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
887}
888
889void agp_free_memory(device_t dev, void *handle)
890{
891 struct agp_memory *mem = (struct agp_memory *) handle;
892 AGP_FREE_MEMORY(dev, mem);
893}
894
895int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
896{
897 struct agp_memory *mem = (struct agp_memory *) handle;
898 return AGP_BIND_MEMORY(dev, mem, offset);
899}
900
901int agp_unbind_memory(device_t dev, void *handle)
902{
903 struct agp_memory *mem = (struct agp_memory *) handle;
904 return AGP_UNBIND_MEMORY(dev, mem);
905}
906
907void agp_memory_info(device_t dev, void *handle, struct
908 agp_memory_info *mi)
909{
910 struct agp_memory *mem = (struct agp_memory *) handle;
911
912 mi->ami_size = mem->am_size;
913 mi->ami_physical = mem->am_physical;
914 mi->ami_offset = mem->am_offset;
915 mi->ami_is_bound = mem->am_is_bound;
916}