Deleted Added
full compact
agp.c (189578) agp.c (190169)
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 189578 2009-03-09 13:27:33Z imp $");
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 190169 2009-03-20 18:30:20Z rnoland $");
29
30#include "opt_agp.h"
31#include "opt_bus.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/bus.h>
39#include <sys/conf.h>
40#include <sys/ioccom.h>
41#include <sys/agpio.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45
46#include <dev/agp/agppriv.h>
47#include <dev/agp/agpvar.h>
48#include <dev/agp/agpreg.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcireg.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pageout.h>
56#include <vm/pmap.h>
57
58#include <machine/md_var.h>
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63MODULE_VERSION(agp, 1);
64
65MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
66
67 /* agp_drv.c */
68static d_open_t agp_open;
69static d_close_t agp_close;
70static d_ioctl_t agp_ioctl;
71static d_mmap_t agp_mmap;
72
73static struct cdevsw agp_cdevsw = {
74 .d_version = D_VERSION,
75 .d_flags = D_NEEDGIANT,
76 .d_open = agp_open,
77 .d_close = agp_close,
78 .d_ioctl = agp_ioctl,
79 .d_mmap = agp_mmap,
80 .d_name = "agp",
81};
82
83static devclass_t agp_devclass;
84#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, dev2unit(kdev))
85
86/* Helper functions for implementing chipset mini drivers. */
87
88void
89agp_flush_cache()
90{
91#if defined(__i386__) || defined(__amd64__)
92 wbinvd();
93#endif
94}
95
96u_int8_t
97agp_find_caps(device_t dev)
98{
99 int capreg;
100
101
102 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
103 capreg = 0;
104 return (capreg);
105}
106
107/*
108 * Find an AGP display device (if any).
109 */
110static device_t
111agp_find_display(void)
112{
113 devclass_t pci = devclass_find("pci");
114 device_t bus, dev = 0;
115 device_t *kids;
116 int busnum, numkids, i;
117
118 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
119 bus = devclass_get_device(pci, busnum);
120 if (!bus)
121 continue;
122 if (device_get_children(bus, &kids, &numkids) != 0)
123 continue;
124 for (i = 0; i < numkids; i++) {
125 dev = kids[i];
126 if (pci_get_class(dev) == PCIC_DISPLAY
127 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
128 if (agp_find_caps(dev)) {
129 free(kids, M_TEMP);
130 return dev;
131 }
132
133 }
134 free(kids, M_TEMP);
135 }
136
137 return 0;
138}
139
140struct agp_gatt *
141agp_alloc_gatt(device_t dev)
142{
143 u_int32_t apsize = AGP_GET_APERTURE(dev);
144 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
145 struct agp_gatt *gatt;
146
147 if (bootverbose)
148 device_printf(dev,
149 "allocating GATT for aperture of size %dM\n",
150 apsize / (1024*1024));
151
152 if (entries == 0) {
153 device_printf(dev, "bad aperture size\n");
154 return NULL;
155 }
156
157 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
158 if (!gatt)
159 return 0;
160
161 gatt->ag_entries = entries;
162 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
163 0, ~0, PAGE_SIZE, 0);
164 if (!gatt->ag_virtual) {
165 if (bootverbose)
166 device_printf(dev, "contiguous allocation failed\n");
167 free(gatt, M_AGP);
168 return 0;
169 }
170 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
171 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
172 agp_flush_cache();
173
174 return gatt;
175}
176
177void
178agp_free_gatt(struct agp_gatt *gatt)
179{
180 contigfree(gatt->ag_virtual,
181 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
182 free(gatt, M_AGP);
183}
184
185static u_int agp_max[][2] = {
186 {0, 0},
187 {32, 4},
188 {64, 28},
189 {128, 96},
190 {256, 204},
191 {512, 440},
192 {1024, 942},
193 {2048, 1920},
194 {4096, 3932}
195};
196#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
197
198/**
199 * Sets the PCI resource which represents the AGP aperture.
200 *
201 * If not called, the default AGP aperture resource of AGP_APBASE will
202 * be used. Must be called before agp_generic_attach().
203 */
204void
205agp_set_aperture_resource(device_t dev, int rid)
206{
207 struct agp_softc *sc = device_get_softc(dev);
208
209 sc->as_aperture_rid = rid;
210}
211
212int
213agp_generic_attach(device_t dev)
214{
215 struct agp_softc *sc = device_get_softc(dev);
216 int i;
217 u_int memsize;
218
219 /*
220 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
221 * because the kernel doesn't need to map it.
222 */
223 if (sc->as_aperture_rid == 0)
224 sc->as_aperture_rid = AGP_APBASE;
225
226 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
227 &sc->as_aperture_rid, RF_SHAREABLE);
228 if (!sc->as_aperture)
229 return ENOMEM;
230
231 /*
232 * Work out an upper bound for agp memory allocation. This
233 * uses a heurisitc table from the Linux driver.
234 */
235 memsize = ptoa(Maxmem) >> 20;
236 for (i = 0; i < agp_max_size; i++) {
237 if (memsize <= agp_max[i][0])
238 break;
239 }
240 if (i == agp_max_size) i = agp_max_size - 1;
241 sc->as_maxmem = agp_max[i][1] << 20U;
242
243 /*
244 * The lock is used to prevent re-entry to
245 * agp_generic_bind_memory() since that function can sleep.
246 */
247 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
248
249 /*
250 * Initialise stuff for the userland device.
251 */
252 agp_devclass = devclass_find("agp");
253 TAILQ_INIT(&sc->as_memory);
254 sc->as_nextid = 1;
255
256 sc->as_devnode = make_dev(&agp_cdevsw,
257 device_get_unit(dev),
258 UID_ROOT,
259 GID_WHEEL,
260 0600,
261 "agpgart");
262
263 return 0;
264}
265
266void
267agp_free_cdev(device_t dev)
268{
269 struct agp_softc *sc = device_get_softc(dev);
270
271 destroy_dev(sc->as_devnode);
272}
273
274void
275agp_free_res(device_t dev)
276{
277 struct agp_softc *sc = device_get_softc(dev);
278
279 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
280 sc->as_aperture);
281 mtx_destroy(&sc->as_lock);
282 agp_flush_cache();
283}
284
285int
286agp_generic_detach(device_t dev)
287{
288
289 agp_free_cdev(dev);
290 agp_free_res(dev);
291 return 0;
292}
293
294/**
295 * Default AGP aperture size detection which simply returns the size of
296 * the aperture's PCI resource.
297 */
298u_int32_t
299agp_generic_get_aperture(device_t dev)
300{
301 struct agp_softc *sc = device_get_softc(dev);
302
303 return rman_get_size(sc->as_aperture);
304}
305
306/**
307 * Default AGP aperture size setting function, which simply doesn't allow
308 * changes to resource size.
309 */
310int
311agp_generic_set_aperture(device_t dev, u_int32_t aperture)
312{
313 u_int32_t current_aperture;
314
315 current_aperture = AGP_GET_APERTURE(dev);
316 if (current_aperture != aperture)
317 return EINVAL;
318 else
319 return 0;
320}
321
322/*
323 * This does the enable logic for v3, with the same topology
324 * restrictions as in place for v2 -- one bus, one device on the bus.
325 */
326static int
327agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
328{
329 u_int32_t tstatus, mstatus;
330 u_int32_t command;
331 int rq, sba, fw, rate, arqsz, cal;
332
333 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
334 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
335
336 /* Set RQ to the min of mode, tstatus and mstatus */
337 rq = AGP_MODE_GET_RQ(mode);
338 if (AGP_MODE_GET_RQ(tstatus) < rq)
339 rq = AGP_MODE_GET_RQ(tstatus);
340 if (AGP_MODE_GET_RQ(mstatus) < rq)
341 rq = AGP_MODE_GET_RQ(mstatus);
342
343 /*
344 * ARQSZ - Set the value to the maximum one.
345 * Don't allow the mode register to override values.
346 */
347 arqsz = AGP_MODE_GET_ARQSZ(mode);
348 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
349 rq = AGP_MODE_GET_ARQSZ(tstatus);
350 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
351 rq = AGP_MODE_GET_ARQSZ(mstatus);
352
353 /* Calibration cycle - don't allow override by mode register */
354 cal = AGP_MODE_GET_CAL(tstatus);
355 if (AGP_MODE_GET_CAL(mstatus) < cal)
356 cal = AGP_MODE_GET_CAL(mstatus);
357
358 /* SBA must be supported for AGP v3. */
359 sba = 1;
360
361 /* Set FW if all three support it. */
362 fw = (AGP_MODE_GET_FW(tstatus)
363 & AGP_MODE_GET_FW(mstatus)
364 & AGP_MODE_GET_FW(mode));
365
366 /* Figure out the max rate */
367 rate = (AGP_MODE_GET_RATE(tstatus)
368 & AGP_MODE_GET_RATE(mstatus)
369 & AGP_MODE_GET_RATE(mode));
370 if (rate & AGP_MODE_V3_RATE_8x)
371 rate = AGP_MODE_V3_RATE_8x;
372 else
373 rate = AGP_MODE_V3_RATE_4x;
374 if (bootverbose)
375 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
376
377 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
378
379 /* Construct the new mode word and tell the hardware */
380 command = 0;
381 command = AGP_MODE_SET_RQ(0, rq);
382 command = AGP_MODE_SET_ARQSZ(command, arqsz);
383 command = AGP_MODE_SET_CAL(command, cal);
384 command = AGP_MODE_SET_SBA(command, sba);
385 command = AGP_MODE_SET_FW(command, fw);
386 command = AGP_MODE_SET_RATE(command, rate);
387 command = AGP_MODE_SET_MODE_3(command, 1);
388 command = AGP_MODE_SET_AGP(command, 1);
389 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
390 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
391
392 return 0;
393}
394
395static int
396agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
397{
398 u_int32_t tstatus, mstatus;
399 u_int32_t command;
400 int rq, sba, fw, rate;
401
402 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
403 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
404
405 /* Set RQ to the min of mode, tstatus and mstatus */
406 rq = AGP_MODE_GET_RQ(mode);
407 if (AGP_MODE_GET_RQ(tstatus) < rq)
408 rq = AGP_MODE_GET_RQ(tstatus);
409 if (AGP_MODE_GET_RQ(mstatus) < rq)
410 rq = AGP_MODE_GET_RQ(mstatus);
411
412 /* Set SBA if all three can deal with SBA */
413 sba = (AGP_MODE_GET_SBA(tstatus)
414 & AGP_MODE_GET_SBA(mstatus)
415 & AGP_MODE_GET_SBA(mode));
416
417 /* Similar for FW */
418 fw = (AGP_MODE_GET_FW(tstatus)
419 & AGP_MODE_GET_FW(mstatus)
420 & AGP_MODE_GET_FW(mode));
421
422 /* Figure out the max rate */
423 rate = (AGP_MODE_GET_RATE(tstatus)
424 & AGP_MODE_GET_RATE(mstatus)
425 & AGP_MODE_GET_RATE(mode));
426 if (rate & AGP_MODE_V2_RATE_4x)
427 rate = AGP_MODE_V2_RATE_4x;
428 else if (rate & AGP_MODE_V2_RATE_2x)
429 rate = AGP_MODE_V2_RATE_2x;
430 else
431 rate = AGP_MODE_V2_RATE_1x;
432 if (bootverbose)
433 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
434
435 /* Construct the new mode word and tell the hardware */
436 command = 0;
437 command = AGP_MODE_SET_RQ(0, rq);
438 command = AGP_MODE_SET_SBA(command, sba);
439 command = AGP_MODE_SET_FW(command, fw);
440 command = AGP_MODE_SET_RATE(command, rate);
441 command = AGP_MODE_SET_AGP(command, 1);
442 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
443 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
444
445 return 0;
446}
447
448int
449agp_generic_enable(device_t dev, u_int32_t mode)
450{
451 device_t mdev = agp_find_display();
452 u_int32_t tstatus, mstatus;
453
454 if (!mdev) {
455 AGP_DPF("can't find display\n");
456 return ENXIO;
457 }
458
459 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
460 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
461
462 /*
463 * Check display and bridge for AGP v3 support. AGP v3 allows
464 * more variety in topology than v2, e.g. multiple AGP devices
465 * attached to one bridge, or multiple AGP bridges in one
466 * system. This doesn't attempt to address those situations,
467 * but should work fine for a classic single AGP slot system
468 * with AGP v3.
469 */
470 if (AGP_MODE_GET_MODE_3(mode) &&
471 AGP_MODE_GET_MODE_3(tstatus) &&
472 AGP_MODE_GET_MODE_3(mstatus))
473 return (agp_v3_enable(dev, mdev, mode));
474 else
475 return (agp_v2_enable(dev, mdev, mode));
476}
477
478struct agp_memory *
479agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
480{
481 struct agp_softc *sc = device_get_softc(dev);
482 struct agp_memory *mem;
483
484 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
485 return 0;
486
487 if (sc->as_allocated + size > sc->as_maxmem)
488 return 0;
489
490 if (type != 0) {
491 printf("agp_generic_alloc_memory: unsupported type %d\n",
492 type);
493 return 0;
494 }
495
496 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
497 mem->am_id = sc->as_nextid++;
498 mem->am_size = size;
499 mem->am_type = 0;
500 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
501 mem->am_physical = 0;
502 mem->am_offset = 0;
503 mem->am_is_bound = 0;
504 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
505 sc->as_allocated += size;
506
507 return mem;
508}
509
510int
511agp_generic_free_memory(device_t dev, struct agp_memory *mem)
512{
513 struct agp_softc *sc = device_get_softc(dev);
514
515 if (mem->am_is_bound)
516 return EBUSY;
517
518 sc->as_allocated -= mem->am_size;
519 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
520 vm_object_deallocate(mem->am_obj);
521 free(mem, M_AGP);
522 return 0;
523}
524
525int
526agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
527 vm_offset_t offset)
528{
529 struct agp_softc *sc = device_get_softc(dev);
530 vm_offset_t i, j, k;
531 vm_page_t m;
532 int error;
533
534 /* Do some sanity checks first. */
29
30#include "opt_agp.h"
31#include "opt_bus.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/bus.h>
39#include <sys/conf.h>
40#include <sys/ioccom.h>
41#include <sys/agpio.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45
46#include <dev/agp/agppriv.h>
47#include <dev/agp/agpvar.h>
48#include <dev/agp/agpreg.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcireg.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pageout.h>
56#include <vm/pmap.h>
57
58#include <machine/md_var.h>
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63MODULE_VERSION(agp, 1);
64
65MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
66
67 /* agp_drv.c */
68static d_open_t agp_open;
69static d_close_t agp_close;
70static d_ioctl_t agp_ioctl;
71static d_mmap_t agp_mmap;
72
73static struct cdevsw agp_cdevsw = {
74 .d_version = D_VERSION,
75 .d_flags = D_NEEDGIANT,
76 .d_open = agp_open,
77 .d_close = agp_close,
78 .d_ioctl = agp_ioctl,
79 .d_mmap = agp_mmap,
80 .d_name = "agp",
81};
82
83static devclass_t agp_devclass;
84#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, dev2unit(kdev))
85
86/* Helper functions for implementing chipset mini drivers. */
87
88void
89agp_flush_cache()
90{
91#if defined(__i386__) || defined(__amd64__)
92 wbinvd();
93#endif
94}
95
96u_int8_t
97agp_find_caps(device_t dev)
98{
99 int capreg;
100
101
102 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
103 capreg = 0;
104 return (capreg);
105}
106
107/*
108 * Find an AGP display device (if any).
109 */
110static device_t
111agp_find_display(void)
112{
113 devclass_t pci = devclass_find("pci");
114 device_t bus, dev = 0;
115 device_t *kids;
116 int busnum, numkids, i;
117
118 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
119 bus = devclass_get_device(pci, busnum);
120 if (!bus)
121 continue;
122 if (device_get_children(bus, &kids, &numkids) != 0)
123 continue;
124 for (i = 0; i < numkids; i++) {
125 dev = kids[i];
126 if (pci_get_class(dev) == PCIC_DISPLAY
127 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
128 if (agp_find_caps(dev)) {
129 free(kids, M_TEMP);
130 return dev;
131 }
132
133 }
134 free(kids, M_TEMP);
135 }
136
137 return 0;
138}
139
140struct agp_gatt *
141agp_alloc_gatt(device_t dev)
142{
143 u_int32_t apsize = AGP_GET_APERTURE(dev);
144 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
145 struct agp_gatt *gatt;
146
147 if (bootverbose)
148 device_printf(dev,
149 "allocating GATT for aperture of size %dM\n",
150 apsize / (1024*1024));
151
152 if (entries == 0) {
153 device_printf(dev, "bad aperture size\n");
154 return NULL;
155 }
156
157 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
158 if (!gatt)
159 return 0;
160
161 gatt->ag_entries = entries;
162 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
163 0, ~0, PAGE_SIZE, 0);
164 if (!gatt->ag_virtual) {
165 if (bootverbose)
166 device_printf(dev, "contiguous allocation failed\n");
167 free(gatt, M_AGP);
168 return 0;
169 }
170 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
171 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
172 agp_flush_cache();
173
174 return gatt;
175}
176
177void
178agp_free_gatt(struct agp_gatt *gatt)
179{
180 contigfree(gatt->ag_virtual,
181 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
182 free(gatt, M_AGP);
183}
184
185static u_int agp_max[][2] = {
186 {0, 0},
187 {32, 4},
188 {64, 28},
189 {128, 96},
190 {256, 204},
191 {512, 440},
192 {1024, 942},
193 {2048, 1920},
194 {4096, 3932}
195};
196#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
197
198/**
199 * Sets the PCI resource which represents the AGP aperture.
200 *
201 * If not called, the default AGP aperture resource of AGP_APBASE will
202 * be used. Must be called before agp_generic_attach().
203 */
204void
205agp_set_aperture_resource(device_t dev, int rid)
206{
207 struct agp_softc *sc = device_get_softc(dev);
208
209 sc->as_aperture_rid = rid;
210}
211
212int
213agp_generic_attach(device_t dev)
214{
215 struct agp_softc *sc = device_get_softc(dev);
216 int i;
217 u_int memsize;
218
219 /*
220 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
221 * because the kernel doesn't need to map it.
222 */
223 if (sc->as_aperture_rid == 0)
224 sc->as_aperture_rid = AGP_APBASE;
225
226 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
227 &sc->as_aperture_rid, RF_SHAREABLE);
228 if (!sc->as_aperture)
229 return ENOMEM;
230
231 /*
232 * Work out an upper bound for agp memory allocation. This
233 * uses a heurisitc table from the Linux driver.
234 */
235 memsize = ptoa(Maxmem) >> 20;
236 for (i = 0; i < agp_max_size; i++) {
237 if (memsize <= agp_max[i][0])
238 break;
239 }
240 if (i == agp_max_size) i = agp_max_size - 1;
241 sc->as_maxmem = agp_max[i][1] << 20U;
242
243 /*
244 * The lock is used to prevent re-entry to
245 * agp_generic_bind_memory() since that function can sleep.
246 */
247 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
248
249 /*
250 * Initialise stuff for the userland device.
251 */
252 agp_devclass = devclass_find("agp");
253 TAILQ_INIT(&sc->as_memory);
254 sc->as_nextid = 1;
255
256 sc->as_devnode = make_dev(&agp_cdevsw,
257 device_get_unit(dev),
258 UID_ROOT,
259 GID_WHEEL,
260 0600,
261 "agpgart");
262
263 return 0;
264}
265
266void
267agp_free_cdev(device_t dev)
268{
269 struct agp_softc *sc = device_get_softc(dev);
270
271 destroy_dev(sc->as_devnode);
272}
273
274void
275agp_free_res(device_t dev)
276{
277 struct agp_softc *sc = device_get_softc(dev);
278
279 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
280 sc->as_aperture);
281 mtx_destroy(&sc->as_lock);
282 agp_flush_cache();
283}
284
285int
286agp_generic_detach(device_t dev)
287{
288
289 agp_free_cdev(dev);
290 agp_free_res(dev);
291 return 0;
292}
293
294/**
295 * Default AGP aperture size detection which simply returns the size of
296 * the aperture's PCI resource.
297 */
298u_int32_t
299agp_generic_get_aperture(device_t dev)
300{
301 struct agp_softc *sc = device_get_softc(dev);
302
303 return rman_get_size(sc->as_aperture);
304}
305
306/**
307 * Default AGP aperture size setting function, which simply doesn't allow
308 * changes to resource size.
309 */
310int
311agp_generic_set_aperture(device_t dev, u_int32_t aperture)
312{
313 u_int32_t current_aperture;
314
315 current_aperture = AGP_GET_APERTURE(dev);
316 if (current_aperture != aperture)
317 return EINVAL;
318 else
319 return 0;
320}
321
322/*
323 * This does the enable logic for v3, with the same topology
324 * restrictions as in place for v2 -- one bus, one device on the bus.
325 */
326static int
327agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
328{
329 u_int32_t tstatus, mstatus;
330 u_int32_t command;
331 int rq, sba, fw, rate, arqsz, cal;
332
333 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
334 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
335
336 /* Set RQ to the min of mode, tstatus and mstatus */
337 rq = AGP_MODE_GET_RQ(mode);
338 if (AGP_MODE_GET_RQ(tstatus) < rq)
339 rq = AGP_MODE_GET_RQ(tstatus);
340 if (AGP_MODE_GET_RQ(mstatus) < rq)
341 rq = AGP_MODE_GET_RQ(mstatus);
342
343 /*
344 * ARQSZ - Set the value to the maximum one.
345 * Don't allow the mode register to override values.
346 */
347 arqsz = AGP_MODE_GET_ARQSZ(mode);
348 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
349 rq = AGP_MODE_GET_ARQSZ(tstatus);
350 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
351 rq = AGP_MODE_GET_ARQSZ(mstatus);
352
353 /* Calibration cycle - don't allow override by mode register */
354 cal = AGP_MODE_GET_CAL(tstatus);
355 if (AGP_MODE_GET_CAL(mstatus) < cal)
356 cal = AGP_MODE_GET_CAL(mstatus);
357
358 /* SBA must be supported for AGP v3. */
359 sba = 1;
360
361 /* Set FW if all three support it. */
362 fw = (AGP_MODE_GET_FW(tstatus)
363 & AGP_MODE_GET_FW(mstatus)
364 & AGP_MODE_GET_FW(mode));
365
366 /* Figure out the max rate */
367 rate = (AGP_MODE_GET_RATE(tstatus)
368 & AGP_MODE_GET_RATE(mstatus)
369 & AGP_MODE_GET_RATE(mode));
370 if (rate & AGP_MODE_V3_RATE_8x)
371 rate = AGP_MODE_V3_RATE_8x;
372 else
373 rate = AGP_MODE_V3_RATE_4x;
374 if (bootverbose)
375 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
376
377 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
378
379 /* Construct the new mode word and tell the hardware */
380 command = 0;
381 command = AGP_MODE_SET_RQ(0, rq);
382 command = AGP_MODE_SET_ARQSZ(command, arqsz);
383 command = AGP_MODE_SET_CAL(command, cal);
384 command = AGP_MODE_SET_SBA(command, sba);
385 command = AGP_MODE_SET_FW(command, fw);
386 command = AGP_MODE_SET_RATE(command, rate);
387 command = AGP_MODE_SET_MODE_3(command, 1);
388 command = AGP_MODE_SET_AGP(command, 1);
389 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
390 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
391
392 return 0;
393}
394
395static int
396agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
397{
398 u_int32_t tstatus, mstatus;
399 u_int32_t command;
400 int rq, sba, fw, rate;
401
402 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
403 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
404
405 /* Set RQ to the min of mode, tstatus and mstatus */
406 rq = AGP_MODE_GET_RQ(mode);
407 if (AGP_MODE_GET_RQ(tstatus) < rq)
408 rq = AGP_MODE_GET_RQ(tstatus);
409 if (AGP_MODE_GET_RQ(mstatus) < rq)
410 rq = AGP_MODE_GET_RQ(mstatus);
411
412 /* Set SBA if all three can deal with SBA */
413 sba = (AGP_MODE_GET_SBA(tstatus)
414 & AGP_MODE_GET_SBA(mstatus)
415 & AGP_MODE_GET_SBA(mode));
416
417 /* Similar for FW */
418 fw = (AGP_MODE_GET_FW(tstatus)
419 & AGP_MODE_GET_FW(mstatus)
420 & AGP_MODE_GET_FW(mode));
421
422 /* Figure out the max rate */
423 rate = (AGP_MODE_GET_RATE(tstatus)
424 & AGP_MODE_GET_RATE(mstatus)
425 & AGP_MODE_GET_RATE(mode));
426 if (rate & AGP_MODE_V2_RATE_4x)
427 rate = AGP_MODE_V2_RATE_4x;
428 else if (rate & AGP_MODE_V2_RATE_2x)
429 rate = AGP_MODE_V2_RATE_2x;
430 else
431 rate = AGP_MODE_V2_RATE_1x;
432 if (bootverbose)
433 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
434
435 /* Construct the new mode word and tell the hardware */
436 command = 0;
437 command = AGP_MODE_SET_RQ(0, rq);
438 command = AGP_MODE_SET_SBA(command, sba);
439 command = AGP_MODE_SET_FW(command, fw);
440 command = AGP_MODE_SET_RATE(command, rate);
441 command = AGP_MODE_SET_AGP(command, 1);
442 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
443 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
444
445 return 0;
446}
447
448int
449agp_generic_enable(device_t dev, u_int32_t mode)
450{
451 device_t mdev = agp_find_display();
452 u_int32_t tstatus, mstatus;
453
454 if (!mdev) {
455 AGP_DPF("can't find display\n");
456 return ENXIO;
457 }
458
459 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
460 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
461
462 /*
463 * Check display and bridge for AGP v3 support. AGP v3 allows
464 * more variety in topology than v2, e.g. multiple AGP devices
465 * attached to one bridge, or multiple AGP bridges in one
466 * system. This doesn't attempt to address those situations,
467 * but should work fine for a classic single AGP slot system
468 * with AGP v3.
469 */
470 if (AGP_MODE_GET_MODE_3(mode) &&
471 AGP_MODE_GET_MODE_3(tstatus) &&
472 AGP_MODE_GET_MODE_3(mstatus))
473 return (agp_v3_enable(dev, mdev, mode));
474 else
475 return (agp_v2_enable(dev, mdev, mode));
476}
477
478struct agp_memory *
479agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
480{
481 struct agp_softc *sc = device_get_softc(dev);
482 struct agp_memory *mem;
483
484 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
485 return 0;
486
487 if (sc->as_allocated + size > sc->as_maxmem)
488 return 0;
489
490 if (type != 0) {
491 printf("agp_generic_alloc_memory: unsupported type %d\n",
492 type);
493 return 0;
494 }
495
496 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
497 mem->am_id = sc->as_nextid++;
498 mem->am_size = size;
499 mem->am_type = 0;
500 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
501 mem->am_physical = 0;
502 mem->am_offset = 0;
503 mem->am_is_bound = 0;
504 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
505 sc->as_allocated += size;
506
507 return mem;
508}
509
510int
511agp_generic_free_memory(device_t dev, struct agp_memory *mem)
512{
513 struct agp_softc *sc = device_get_softc(dev);
514
515 if (mem->am_is_bound)
516 return EBUSY;
517
518 sc->as_allocated -= mem->am_size;
519 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
520 vm_object_deallocate(mem->am_obj);
521 free(mem, M_AGP);
522 return 0;
523}
524
525int
526agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
527 vm_offset_t offset)
528{
529 struct agp_softc *sc = device_get_softc(dev);
530 vm_offset_t i, j, k;
531 vm_page_t m;
532 int error;
533
534 /* Do some sanity checks first. */
535 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
535 if ((offset & (AGP_PAGE_SIZE - 1)) != 0 ||
536 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
537 device_printf(dev, "binding memory at bad offset %#x\n",
538 (int)offset);
539 return EINVAL;
540 }
541
542 /*
543 * Allocate the pages early, before acquiring the lock,
544 * because vm_page_grab() used with VM_ALLOC_RETRY may
545 * block and we can't hold a mutex while blocking.
546 */
547 VM_OBJECT_LOCK(mem->am_obj);
548 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
549 /*
550 * Find a page from the object and wire it
551 * down. This page will be mapped using one or more
552 * entries in the GATT (assuming that PAGE_SIZE >=
553 * AGP_PAGE_SIZE. If this is the first call to bind,
554 * the pages will be allocated and zeroed.
555 */
556 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
557 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
558 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
559 }
560 VM_OBJECT_UNLOCK(mem->am_obj);
561
562 mtx_lock(&sc->as_lock);
563
564 if (mem->am_is_bound) {
565 device_printf(dev, "memory already bound\n");
566 error = EINVAL;
567 VM_OBJECT_LOCK(mem->am_obj);
568 i = 0;
569 goto bad;
570 }
571
572 /*
573 * Bind the individual pages and flush the chipset's
574 * TLB.
575 */
576 VM_OBJECT_LOCK(mem->am_obj);
577 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
578 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
579
580 /*
581 * Install entries in the GATT, making sure that if
582 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
583 * aligned to PAGE_SIZE, we don't modify too many GATT
584 * entries.
585 */
586 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
587 j += AGP_PAGE_SIZE) {
588 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
589 AGP_DPF("binding offset %#jx to pa %#jx\n",
590 (uintmax_t)offset + i + j, (uintmax_t)pa);
591 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
592 if (error) {
593 /*
594 * Bail out. Reverse all the mappings
595 * and unwire the pages.
596 */
597 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
598 AGP_UNBIND_PAGE(dev, offset + k);
599 goto bad;
600 }
601 }
602 vm_page_wakeup(m);
603 }
604 VM_OBJECT_UNLOCK(mem->am_obj);
605
606 /*
607 * Flush the cpu cache since we are providing a new mapping
608 * for these pages.
609 */
610 agp_flush_cache();
611
612 /*
613 * Make sure the chipset gets the new mappings.
614 */
615 AGP_FLUSH_TLB(dev);
616
617 mem->am_offset = offset;
618 mem->am_is_bound = 1;
619
620 mtx_unlock(&sc->as_lock);
621
622 return 0;
623bad:
624 mtx_unlock(&sc->as_lock);
625 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
626 for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
627 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
628 if (k >= i)
629 vm_page_wakeup(m);
630 vm_page_lock_queues();
631 vm_page_unwire(m, 0);
632 vm_page_unlock_queues();
633 }
634 VM_OBJECT_UNLOCK(mem->am_obj);
635
636 return error;
637}
638
639int
640agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
641{
642 struct agp_softc *sc = device_get_softc(dev);
643 vm_page_t m;
644 int i;
645
646 mtx_lock(&sc->as_lock);
647
648 if (!mem->am_is_bound) {
649 device_printf(dev, "memory is not bound\n");
650 mtx_unlock(&sc->as_lock);
651 return EINVAL;
652 }
653
654
655 /*
656 * Unbind the individual pages and flush the chipset's
657 * TLB. Unwire the pages so they can be swapped.
658 */
659 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
660 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
661 VM_OBJECT_LOCK(mem->am_obj);
662 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
663 m = vm_page_lookup(mem->am_obj, atop(i));
664 vm_page_lock_queues();
665 vm_page_unwire(m, 0);
666 vm_page_unlock_queues();
667 }
668 VM_OBJECT_UNLOCK(mem->am_obj);
669
670 agp_flush_cache();
671 AGP_FLUSH_TLB(dev);
672
673 mem->am_offset = 0;
674 mem->am_is_bound = 0;
675
676 mtx_unlock(&sc->as_lock);
677
678 return 0;
679}
680
681/* Helper functions for implementing user/kernel api */
682
683static int
684agp_acquire_helper(device_t dev, enum agp_acquire_state state)
685{
686 struct agp_softc *sc = device_get_softc(dev);
687
688 if (sc->as_state != AGP_ACQUIRE_FREE)
689 return EBUSY;
690 sc->as_state = state;
691
692 return 0;
693}
694
695static int
696agp_release_helper(device_t dev, enum agp_acquire_state state)
697{
698 struct agp_softc *sc = device_get_softc(dev);
699
700 if (sc->as_state == AGP_ACQUIRE_FREE)
701 return 0;
702
703 if (sc->as_state != state)
704 return EBUSY;
705
706 sc->as_state = AGP_ACQUIRE_FREE;
707 return 0;
708}
709
710static struct agp_memory *
711agp_find_memory(device_t dev, int id)
712{
713 struct agp_softc *sc = device_get_softc(dev);
714 struct agp_memory *mem;
715
716 AGP_DPF("searching for memory block %d\n", id);
717 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
718 AGP_DPF("considering memory block %d\n", mem->am_id);
719 if (mem->am_id == id)
720 return mem;
721 }
722 return 0;
723}
724
725/* Implementation of the userland ioctl api */
726
727static int
728agp_info_user(device_t dev, agp_info *info)
729{
730 struct agp_softc *sc = device_get_softc(dev);
731
732 bzero(info, sizeof *info);
733 info->bridge_id = pci_get_devid(dev);
734 info->agp_mode =
735 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
736 info->aper_base = rman_get_start(sc->as_aperture);
737 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
738 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
739 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
740
741 return 0;
742}
743
744static int
745agp_setup_user(device_t dev, agp_setup *setup)
746{
747 return AGP_ENABLE(dev, setup->agp_mode);
748}
749
750static int
751agp_allocate_user(device_t dev, agp_allocate *alloc)
752{
753 struct agp_memory *mem;
754
755 mem = AGP_ALLOC_MEMORY(dev,
756 alloc->type,
757 alloc->pg_count << AGP_PAGE_SHIFT);
758 if (mem) {
759 alloc->key = mem->am_id;
760 alloc->physical = mem->am_physical;
761 return 0;
762 } else {
763 return ENOMEM;
764 }
765}
766
767static int
768agp_deallocate_user(device_t dev, int id)
769{
770 struct agp_memory *mem = agp_find_memory(dev, id);;
771
772 if (mem) {
773 AGP_FREE_MEMORY(dev, mem);
774 return 0;
775 } else {
776 return ENOENT;
777 }
778}
779
780static int
781agp_bind_user(device_t dev, agp_bind *bind)
782{
783 struct agp_memory *mem = agp_find_memory(dev, bind->key);
784
785 if (!mem)
786 return ENOENT;
787
788 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
789}
790
791static int
792agp_unbind_user(device_t dev, agp_unbind *unbind)
793{
794 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
795
796 if (!mem)
797 return ENOENT;
798
799 return AGP_UNBIND_MEMORY(dev, mem);
800}
801
802static int
803agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
804{
805 device_t dev = KDEV2DEV(kdev);
806 struct agp_softc *sc = device_get_softc(dev);
807
808 if (!sc->as_isopen) {
809 sc->as_isopen = 1;
810 device_busy(dev);
811 }
812
813 return 0;
814}
815
816static int
817agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
818{
819 device_t dev = KDEV2DEV(kdev);
820 struct agp_softc *sc = device_get_softc(dev);
821 struct agp_memory *mem;
822
823 /*
824 * Clear the GATT and force release on last close
825 */
826 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
827 if (mem->am_is_bound)
828 AGP_UNBIND_MEMORY(dev, mem);
829 AGP_FREE_MEMORY(dev, mem);
830 }
831 if (sc->as_state == AGP_ACQUIRE_USER)
832 agp_release_helper(dev, AGP_ACQUIRE_USER);
833 sc->as_isopen = 0;
834 device_unbusy(dev);
835
836 return 0;
837}
838
839static int
840agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
841{
842 device_t dev = KDEV2DEV(kdev);
843
844 switch (cmd) {
845 case AGPIOC_INFO:
846 return agp_info_user(dev, (agp_info *) data);
847
848 case AGPIOC_ACQUIRE:
849 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
850
851 case AGPIOC_RELEASE:
852 return agp_release_helper(dev, AGP_ACQUIRE_USER);
853
854 case AGPIOC_SETUP:
855 return agp_setup_user(dev, (agp_setup *)data);
856
857 case AGPIOC_ALLOCATE:
858 return agp_allocate_user(dev, (agp_allocate *)data);
859
860 case AGPIOC_DEALLOCATE:
861 return agp_deallocate_user(dev, *(int *) data);
862
863 case AGPIOC_BIND:
864 return agp_bind_user(dev, (agp_bind *)data);
865
866 case AGPIOC_UNBIND:
867 return agp_unbind_user(dev, (agp_unbind *)data);
868
869 }
870
871 return EINVAL;
872}
873
874static int
875agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
876{
877 device_t dev = KDEV2DEV(kdev);
878 struct agp_softc *sc = device_get_softc(dev);
879
880 if (offset > AGP_GET_APERTURE(dev))
881 return -1;
882 *paddr = rman_get_start(sc->as_aperture) + offset;
883 return 0;
884}
885
886/* Implementation of the kernel api */
887
888device_t
889agp_find_device()
890{
891 device_t *children, child;
892 int i, count;
893
894 if (!agp_devclass)
895 return NULL;
896 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
897 return NULL;
898 child = NULL;
899 for (i = 0; i < count; i++) {
900 if (device_is_attached(children[i])) {
901 child = children[i];
902 break;
903 }
904 }
905 free(children, M_TEMP);
906 return child;
907}
908
909enum agp_acquire_state
910agp_state(device_t dev)
911{
912 struct agp_softc *sc = device_get_softc(dev);
913 return sc->as_state;
914}
915
916void
917agp_get_info(device_t dev, struct agp_info *info)
918{
919 struct agp_softc *sc = device_get_softc(dev);
920
921 info->ai_mode =
922 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
923 info->ai_aperture_base = rman_get_start(sc->as_aperture);
924 info->ai_aperture_size = rman_get_size(sc->as_aperture);
925 info->ai_memory_allowed = sc->as_maxmem;
926 info->ai_memory_used = sc->as_allocated;
927}
928
929int
930agp_acquire(device_t dev)
931{
932 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
933}
934
935int
936agp_release(device_t dev)
937{
938 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
939}
940
941int
942agp_enable(device_t dev, u_int32_t mode)
943{
944 return AGP_ENABLE(dev, mode);
945}
946
947void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
948{
949 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
950}
951
952void agp_free_memory(device_t dev, void *handle)
953{
954 struct agp_memory *mem = (struct agp_memory *) handle;
955 AGP_FREE_MEMORY(dev, mem);
956}
957
958int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
959{
960 struct agp_memory *mem = (struct agp_memory *) handle;
961 return AGP_BIND_MEMORY(dev, mem, offset);
962}
963
964int agp_unbind_memory(device_t dev, void *handle)
965{
966 struct agp_memory *mem = (struct agp_memory *) handle;
967 return AGP_UNBIND_MEMORY(dev, mem);
968}
969
970void agp_memory_info(device_t dev, void *handle, struct
971 agp_memory_info *mi)
972{
973 struct agp_memory *mem = (struct agp_memory *) handle;
974
975 mi->ami_size = mem->am_size;
976 mi->ami_physical = mem->am_physical;
977 mi->ami_offset = mem->am_offset;
978 mi->ami_is_bound = mem->am_is_bound;
979}
536 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
537 device_printf(dev, "binding memory at bad offset %#x\n",
538 (int)offset);
539 return EINVAL;
540 }
541
542 /*
543 * Allocate the pages early, before acquiring the lock,
544 * because vm_page_grab() used with VM_ALLOC_RETRY may
545 * block and we can't hold a mutex while blocking.
546 */
547 VM_OBJECT_LOCK(mem->am_obj);
548 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
549 /*
550 * Find a page from the object and wire it
551 * down. This page will be mapped using one or more
552 * entries in the GATT (assuming that PAGE_SIZE >=
553 * AGP_PAGE_SIZE. If this is the first call to bind,
554 * the pages will be allocated and zeroed.
555 */
556 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
557 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
558 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
559 }
560 VM_OBJECT_UNLOCK(mem->am_obj);
561
562 mtx_lock(&sc->as_lock);
563
564 if (mem->am_is_bound) {
565 device_printf(dev, "memory already bound\n");
566 error = EINVAL;
567 VM_OBJECT_LOCK(mem->am_obj);
568 i = 0;
569 goto bad;
570 }
571
572 /*
573 * Bind the individual pages and flush the chipset's
574 * TLB.
575 */
576 VM_OBJECT_LOCK(mem->am_obj);
577 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
578 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
579
580 /*
581 * Install entries in the GATT, making sure that if
582 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
583 * aligned to PAGE_SIZE, we don't modify too many GATT
584 * entries.
585 */
586 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
587 j += AGP_PAGE_SIZE) {
588 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
589 AGP_DPF("binding offset %#jx to pa %#jx\n",
590 (uintmax_t)offset + i + j, (uintmax_t)pa);
591 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
592 if (error) {
593 /*
594 * Bail out. Reverse all the mappings
595 * and unwire the pages.
596 */
597 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
598 AGP_UNBIND_PAGE(dev, offset + k);
599 goto bad;
600 }
601 }
602 vm_page_wakeup(m);
603 }
604 VM_OBJECT_UNLOCK(mem->am_obj);
605
606 /*
607 * Flush the cpu cache since we are providing a new mapping
608 * for these pages.
609 */
610 agp_flush_cache();
611
612 /*
613 * Make sure the chipset gets the new mappings.
614 */
615 AGP_FLUSH_TLB(dev);
616
617 mem->am_offset = offset;
618 mem->am_is_bound = 1;
619
620 mtx_unlock(&sc->as_lock);
621
622 return 0;
623bad:
624 mtx_unlock(&sc->as_lock);
625 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
626 for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
627 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
628 if (k >= i)
629 vm_page_wakeup(m);
630 vm_page_lock_queues();
631 vm_page_unwire(m, 0);
632 vm_page_unlock_queues();
633 }
634 VM_OBJECT_UNLOCK(mem->am_obj);
635
636 return error;
637}
638
639int
640agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
641{
642 struct agp_softc *sc = device_get_softc(dev);
643 vm_page_t m;
644 int i;
645
646 mtx_lock(&sc->as_lock);
647
648 if (!mem->am_is_bound) {
649 device_printf(dev, "memory is not bound\n");
650 mtx_unlock(&sc->as_lock);
651 return EINVAL;
652 }
653
654
655 /*
656 * Unbind the individual pages and flush the chipset's
657 * TLB. Unwire the pages so they can be swapped.
658 */
659 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
660 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
661 VM_OBJECT_LOCK(mem->am_obj);
662 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
663 m = vm_page_lookup(mem->am_obj, atop(i));
664 vm_page_lock_queues();
665 vm_page_unwire(m, 0);
666 vm_page_unlock_queues();
667 }
668 VM_OBJECT_UNLOCK(mem->am_obj);
669
670 agp_flush_cache();
671 AGP_FLUSH_TLB(dev);
672
673 mem->am_offset = 0;
674 mem->am_is_bound = 0;
675
676 mtx_unlock(&sc->as_lock);
677
678 return 0;
679}
680
681/* Helper functions for implementing user/kernel api */
682
683static int
684agp_acquire_helper(device_t dev, enum agp_acquire_state state)
685{
686 struct agp_softc *sc = device_get_softc(dev);
687
688 if (sc->as_state != AGP_ACQUIRE_FREE)
689 return EBUSY;
690 sc->as_state = state;
691
692 return 0;
693}
694
695static int
696agp_release_helper(device_t dev, enum agp_acquire_state state)
697{
698 struct agp_softc *sc = device_get_softc(dev);
699
700 if (sc->as_state == AGP_ACQUIRE_FREE)
701 return 0;
702
703 if (sc->as_state != state)
704 return EBUSY;
705
706 sc->as_state = AGP_ACQUIRE_FREE;
707 return 0;
708}
709
710static struct agp_memory *
711agp_find_memory(device_t dev, int id)
712{
713 struct agp_softc *sc = device_get_softc(dev);
714 struct agp_memory *mem;
715
716 AGP_DPF("searching for memory block %d\n", id);
717 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
718 AGP_DPF("considering memory block %d\n", mem->am_id);
719 if (mem->am_id == id)
720 return mem;
721 }
722 return 0;
723}
724
725/* Implementation of the userland ioctl api */
726
727static int
728agp_info_user(device_t dev, agp_info *info)
729{
730 struct agp_softc *sc = device_get_softc(dev);
731
732 bzero(info, sizeof *info);
733 info->bridge_id = pci_get_devid(dev);
734 info->agp_mode =
735 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
736 info->aper_base = rman_get_start(sc->as_aperture);
737 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
738 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
739 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
740
741 return 0;
742}
743
744static int
745agp_setup_user(device_t dev, agp_setup *setup)
746{
747 return AGP_ENABLE(dev, setup->agp_mode);
748}
749
750static int
751agp_allocate_user(device_t dev, agp_allocate *alloc)
752{
753 struct agp_memory *mem;
754
755 mem = AGP_ALLOC_MEMORY(dev,
756 alloc->type,
757 alloc->pg_count << AGP_PAGE_SHIFT);
758 if (mem) {
759 alloc->key = mem->am_id;
760 alloc->physical = mem->am_physical;
761 return 0;
762 } else {
763 return ENOMEM;
764 }
765}
766
767static int
768agp_deallocate_user(device_t dev, int id)
769{
770 struct agp_memory *mem = agp_find_memory(dev, id);;
771
772 if (mem) {
773 AGP_FREE_MEMORY(dev, mem);
774 return 0;
775 } else {
776 return ENOENT;
777 }
778}
779
780static int
781agp_bind_user(device_t dev, agp_bind *bind)
782{
783 struct agp_memory *mem = agp_find_memory(dev, bind->key);
784
785 if (!mem)
786 return ENOENT;
787
788 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
789}
790
791static int
792agp_unbind_user(device_t dev, agp_unbind *unbind)
793{
794 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
795
796 if (!mem)
797 return ENOENT;
798
799 return AGP_UNBIND_MEMORY(dev, mem);
800}
801
802static int
803agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
804{
805 device_t dev = KDEV2DEV(kdev);
806 struct agp_softc *sc = device_get_softc(dev);
807
808 if (!sc->as_isopen) {
809 sc->as_isopen = 1;
810 device_busy(dev);
811 }
812
813 return 0;
814}
815
816static int
817agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
818{
819 device_t dev = KDEV2DEV(kdev);
820 struct agp_softc *sc = device_get_softc(dev);
821 struct agp_memory *mem;
822
823 /*
824 * Clear the GATT and force release on last close
825 */
826 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
827 if (mem->am_is_bound)
828 AGP_UNBIND_MEMORY(dev, mem);
829 AGP_FREE_MEMORY(dev, mem);
830 }
831 if (sc->as_state == AGP_ACQUIRE_USER)
832 agp_release_helper(dev, AGP_ACQUIRE_USER);
833 sc->as_isopen = 0;
834 device_unbusy(dev);
835
836 return 0;
837}
838
839static int
840agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
841{
842 device_t dev = KDEV2DEV(kdev);
843
844 switch (cmd) {
845 case AGPIOC_INFO:
846 return agp_info_user(dev, (agp_info *) data);
847
848 case AGPIOC_ACQUIRE:
849 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
850
851 case AGPIOC_RELEASE:
852 return agp_release_helper(dev, AGP_ACQUIRE_USER);
853
854 case AGPIOC_SETUP:
855 return agp_setup_user(dev, (agp_setup *)data);
856
857 case AGPIOC_ALLOCATE:
858 return agp_allocate_user(dev, (agp_allocate *)data);
859
860 case AGPIOC_DEALLOCATE:
861 return agp_deallocate_user(dev, *(int *) data);
862
863 case AGPIOC_BIND:
864 return agp_bind_user(dev, (agp_bind *)data);
865
866 case AGPIOC_UNBIND:
867 return agp_unbind_user(dev, (agp_unbind *)data);
868
869 }
870
871 return EINVAL;
872}
873
874static int
875agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
876{
877 device_t dev = KDEV2DEV(kdev);
878 struct agp_softc *sc = device_get_softc(dev);
879
880 if (offset > AGP_GET_APERTURE(dev))
881 return -1;
882 *paddr = rman_get_start(sc->as_aperture) + offset;
883 return 0;
884}
885
886/* Implementation of the kernel api */
887
888device_t
889agp_find_device()
890{
891 device_t *children, child;
892 int i, count;
893
894 if (!agp_devclass)
895 return NULL;
896 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
897 return NULL;
898 child = NULL;
899 for (i = 0; i < count; i++) {
900 if (device_is_attached(children[i])) {
901 child = children[i];
902 break;
903 }
904 }
905 free(children, M_TEMP);
906 return child;
907}
908
909enum agp_acquire_state
910agp_state(device_t dev)
911{
912 struct agp_softc *sc = device_get_softc(dev);
913 return sc->as_state;
914}
915
916void
917agp_get_info(device_t dev, struct agp_info *info)
918{
919 struct agp_softc *sc = device_get_softc(dev);
920
921 info->ai_mode =
922 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
923 info->ai_aperture_base = rman_get_start(sc->as_aperture);
924 info->ai_aperture_size = rman_get_size(sc->as_aperture);
925 info->ai_memory_allowed = sc->as_maxmem;
926 info->ai_memory_used = sc->as_allocated;
927}
928
929int
930agp_acquire(device_t dev)
931{
932 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
933}
934
935int
936agp_release(device_t dev)
937{
938 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
939}
940
941int
942agp_enable(device_t dev, u_int32_t mode)
943{
944 return AGP_ENABLE(dev, mode);
945}
946
947void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
948{
949 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
950}
951
952void agp_free_memory(device_t dev, void *handle)
953{
954 struct agp_memory *mem = (struct agp_memory *) handle;
955 AGP_FREE_MEMORY(dev, mem);
956}
957
958int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
959{
960 struct agp_memory *mem = (struct agp_memory *) handle;
961 return AGP_BIND_MEMORY(dev, mem, offset);
962}
963
964int agp_unbind_memory(device_t dev, void *handle)
965{
966 struct agp_memory *mem = (struct agp_memory *) handle;
967 return AGP_UNBIND_MEMORY(dev, mem);
968}
969
970void agp_memory_info(device_t dev, void *handle, struct
971 agp_memory_info *mi)
972{
973 struct agp_memory *mem = (struct agp_memory *) handle;
974
975 mi->ami_size = mem->am_size;
976 mi->ami_physical = mem->am_physical;
977 mi->ami_offset = mem->am_offset;
978 mi->ami_is_bound = mem->am_is_bound;
979}