Deleted Added
full compact
agp.c (219902) agp.c (235782)
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 219902 2011-03-23 13:10:15Z jhb $");
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 235782 2012-05-22 10:59:26Z kib $");
29
30#include "opt_agp.h"
31#include "opt_bus.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/bus.h>
39#include <sys/conf.h>
40#include <sys/ioccom.h>
41#include <sys/agpio.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45
46#include <dev/agp/agppriv.h>
47#include <dev/agp/agpvar.h>
48#include <dev/agp/agpreg.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcireg.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pageout.h>
56#include <vm/pmap.h>
57
58#include <machine/md_var.h>
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63MODULE_VERSION(agp, 1);
64
65MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
66
67 /* agp_drv.c */
68static d_open_t agp_open;
69static d_close_t agp_close;
70static d_ioctl_t agp_ioctl;
71static d_mmap_t agp_mmap;
72
73static struct cdevsw agp_cdevsw = {
74 .d_version = D_VERSION,
75 .d_flags = D_NEEDGIANT,
76 .d_open = agp_open,
77 .d_close = agp_close,
78 .d_ioctl = agp_ioctl,
79 .d_mmap = agp_mmap,
80 .d_name = "agp",
81};
82
83static devclass_t agp_devclass;
84
85/* Helper functions for implementing chipset mini drivers. */
86
87void
88agp_flush_cache()
89{
90#if defined(__i386__) || defined(__amd64__)
91 wbinvd();
92#endif
93}
94
95u_int8_t
96agp_find_caps(device_t dev)
97{
98 int capreg;
99
100
101 if (pci_find_cap(dev, PCIY_AGP, &capreg) != 0)
102 capreg = 0;
103 return (capreg);
104}
105
106/*
107 * Find an AGP display device (if any).
108 */
109static device_t
110agp_find_display(void)
111{
112 devclass_t pci = devclass_find("pci");
113 device_t bus, dev = 0;
114 device_t *kids;
115 int busnum, numkids, i;
116
117 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
118 bus = devclass_get_device(pci, busnum);
119 if (!bus)
120 continue;
121 if (device_get_children(bus, &kids, &numkids) != 0)
122 continue;
123 for (i = 0; i < numkids; i++) {
124 dev = kids[i];
125 if (pci_get_class(dev) == PCIC_DISPLAY
126 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
127 if (agp_find_caps(dev)) {
128 free(kids, M_TEMP);
129 return dev;
130 }
131
132 }
133 free(kids, M_TEMP);
134 }
135
136 return 0;
137}
138
139struct agp_gatt *
140agp_alloc_gatt(device_t dev)
141{
142 u_int32_t apsize = AGP_GET_APERTURE(dev);
143 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
144 struct agp_gatt *gatt;
145
146 if (bootverbose)
147 device_printf(dev,
148 "allocating GATT for aperture of size %dM\n",
149 apsize / (1024*1024));
150
151 if (entries == 0) {
152 device_printf(dev, "bad aperture size\n");
153 return NULL;
154 }
155
156 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
157 if (!gatt)
158 return 0;
159
160 gatt->ag_entries = entries;
161 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
162 0, ~0, PAGE_SIZE, 0);
163 if (!gatt->ag_virtual) {
164 if (bootverbose)
165 device_printf(dev, "contiguous allocation failed\n");
166 free(gatt, M_AGP);
167 return 0;
168 }
169 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
170 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
171 agp_flush_cache();
172
173 return gatt;
174}
175
176void
177agp_free_gatt(struct agp_gatt *gatt)
178{
179 contigfree(gatt->ag_virtual,
180 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
181 free(gatt, M_AGP);
182}
183
184static u_int agp_max[][2] = {
185 {0, 0},
186 {32, 4},
187 {64, 28},
188 {128, 96},
189 {256, 204},
190 {512, 440},
191 {1024, 942},
192 {2048, 1920},
193 {4096, 3932}
194};
195#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
196
197/**
198 * Sets the PCI resource which represents the AGP aperture.
199 *
200 * If not called, the default AGP aperture resource of AGP_APBASE will
201 * be used. Must be called before agp_generic_attach().
202 */
203void
204agp_set_aperture_resource(device_t dev, int rid)
205{
206 struct agp_softc *sc = device_get_softc(dev);
207
208 sc->as_aperture_rid = rid;
209}
210
211int
212agp_generic_attach(device_t dev)
213{
214 struct agp_softc *sc = device_get_softc(dev);
215 int i;
216 u_int memsize;
217
218 /*
219 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
220 * because the kernel doesn't need to map it.
221 */
222
223 if (sc->as_aperture_rid != -1) {
224 if (sc->as_aperture_rid == 0)
225 sc->as_aperture_rid = AGP_APBASE;
226
227 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
228 &sc->as_aperture_rid, RF_SHAREABLE);
229 if (!sc->as_aperture)
230 return ENOMEM;
231 }
232
233 /*
234 * Work out an upper bound for agp memory allocation. This
235 * uses a heurisitc table from the Linux driver.
236 */
237 memsize = ptoa(Maxmem) >> 20;
238 for (i = 0; i < agp_max_size; i++) {
239 if (memsize <= agp_max[i][0])
240 break;
241 }
29
30#include "opt_agp.h"
31#include "opt_bus.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/bus.h>
39#include <sys/conf.h>
40#include <sys/ioccom.h>
41#include <sys/agpio.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45
46#include <dev/agp/agppriv.h>
47#include <dev/agp/agpvar.h>
48#include <dev/agp/agpreg.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcireg.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pageout.h>
56#include <vm/pmap.h>
57
58#include <machine/md_var.h>
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63MODULE_VERSION(agp, 1);
64
65MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
66
67 /* agp_drv.c */
68static d_open_t agp_open;
69static d_close_t agp_close;
70static d_ioctl_t agp_ioctl;
71static d_mmap_t agp_mmap;
72
73static struct cdevsw agp_cdevsw = {
74 .d_version = D_VERSION,
75 .d_flags = D_NEEDGIANT,
76 .d_open = agp_open,
77 .d_close = agp_close,
78 .d_ioctl = agp_ioctl,
79 .d_mmap = agp_mmap,
80 .d_name = "agp",
81};
82
83static devclass_t agp_devclass;
84
85/* Helper functions for implementing chipset mini drivers. */
86
87void
88agp_flush_cache()
89{
90#if defined(__i386__) || defined(__amd64__)
91 wbinvd();
92#endif
93}
94
95u_int8_t
96agp_find_caps(device_t dev)
97{
98 int capreg;
99
100
101 if (pci_find_cap(dev, PCIY_AGP, &capreg) != 0)
102 capreg = 0;
103 return (capreg);
104}
105
106/*
107 * Find an AGP display device (if any).
108 */
109static device_t
110agp_find_display(void)
111{
112 devclass_t pci = devclass_find("pci");
113 device_t bus, dev = 0;
114 device_t *kids;
115 int busnum, numkids, i;
116
117 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
118 bus = devclass_get_device(pci, busnum);
119 if (!bus)
120 continue;
121 if (device_get_children(bus, &kids, &numkids) != 0)
122 continue;
123 for (i = 0; i < numkids; i++) {
124 dev = kids[i];
125 if (pci_get_class(dev) == PCIC_DISPLAY
126 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
127 if (agp_find_caps(dev)) {
128 free(kids, M_TEMP);
129 return dev;
130 }
131
132 }
133 free(kids, M_TEMP);
134 }
135
136 return 0;
137}
138
139struct agp_gatt *
140agp_alloc_gatt(device_t dev)
141{
142 u_int32_t apsize = AGP_GET_APERTURE(dev);
143 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
144 struct agp_gatt *gatt;
145
146 if (bootverbose)
147 device_printf(dev,
148 "allocating GATT for aperture of size %dM\n",
149 apsize / (1024*1024));
150
151 if (entries == 0) {
152 device_printf(dev, "bad aperture size\n");
153 return NULL;
154 }
155
156 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
157 if (!gatt)
158 return 0;
159
160 gatt->ag_entries = entries;
161 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
162 0, ~0, PAGE_SIZE, 0);
163 if (!gatt->ag_virtual) {
164 if (bootverbose)
165 device_printf(dev, "contiguous allocation failed\n");
166 free(gatt, M_AGP);
167 return 0;
168 }
169 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
170 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
171 agp_flush_cache();
172
173 return gatt;
174}
175
176void
177agp_free_gatt(struct agp_gatt *gatt)
178{
179 contigfree(gatt->ag_virtual,
180 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
181 free(gatt, M_AGP);
182}
183
184static u_int agp_max[][2] = {
185 {0, 0},
186 {32, 4},
187 {64, 28},
188 {128, 96},
189 {256, 204},
190 {512, 440},
191 {1024, 942},
192 {2048, 1920},
193 {4096, 3932}
194};
195#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
196
197/**
198 * Sets the PCI resource which represents the AGP aperture.
199 *
200 * If not called, the default AGP aperture resource of AGP_APBASE will
201 * be used. Must be called before agp_generic_attach().
202 */
203void
204agp_set_aperture_resource(device_t dev, int rid)
205{
206 struct agp_softc *sc = device_get_softc(dev);
207
208 sc->as_aperture_rid = rid;
209}
210
211int
212agp_generic_attach(device_t dev)
213{
214 struct agp_softc *sc = device_get_softc(dev);
215 int i;
216 u_int memsize;
217
218 /*
219 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
220 * because the kernel doesn't need to map it.
221 */
222
223 if (sc->as_aperture_rid != -1) {
224 if (sc->as_aperture_rid == 0)
225 sc->as_aperture_rid = AGP_APBASE;
226
227 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
228 &sc->as_aperture_rid, RF_SHAREABLE);
229 if (!sc->as_aperture)
230 return ENOMEM;
231 }
232
233 /*
234 * Work out an upper bound for agp memory allocation. This
235 * uses a heurisitc table from the Linux driver.
236 */
237 memsize = ptoa(Maxmem) >> 20;
238 for (i = 0; i < agp_max_size; i++) {
239 if (memsize <= agp_max[i][0])
240 break;
241 }
242 if (i == agp_max_size) i = agp_max_size - 1;
242 if (i == agp_max_size)
243 i = agp_max_size - 1;
243 sc->as_maxmem = agp_max[i][1] << 20U;
244
245 /*
246 * The lock is used to prevent re-entry to
247 * agp_generic_bind_memory() since that function can sleep.
248 */
249 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
250
251 /*
252 * Initialise stuff for the userland device.
253 */
254 agp_devclass = devclass_find("agp");
255 TAILQ_INIT(&sc->as_memory);
256 sc->as_nextid = 1;
257
258 sc->as_devnode = make_dev(&agp_cdevsw,
259 0, UID_ROOT, GID_WHEEL, 0600, "agpgart");
260 sc->as_devnode->si_drv1 = dev;
261
262 return 0;
263}
264
265void
266agp_free_cdev(device_t dev)
267{
268 struct agp_softc *sc = device_get_softc(dev);
269
270 destroy_dev(sc->as_devnode);
271}
272
273void
274agp_free_res(device_t dev)
275{
276 struct agp_softc *sc = device_get_softc(dev);
277
278 if (sc->as_aperture != NULL)
279 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
280 sc->as_aperture);
281 mtx_destroy(&sc->as_lock);
282 agp_flush_cache();
283}
284
285int
286agp_generic_detach(device_t dev)
287{
288
289 agp_free_cdev(dev);
290 agp_free_res(dev);
291 return 0;
292}
293
294/**
295 * Default AGP aperture size detection which simply returns the size of
296 * the aperture's PCI resource.
297 */
298u_int32_t
299agp_generic_get_aperture(device_t dev)
300{
301 struct agp_softc *sc = device_get_softc(dev);
302
303 return rman_get_size(sc->as_aperture);
304}
305
306/**
307 * Default AGP aperture size setting function, which simply doesn't allow
308 * changes to resource size.
309 */
310int
311agp_generic_set_aperture(device_t dev, u_int32_t aperture)
312{
313 u_int32_t current_aperture;
314
315 current_aperture = AGP_GET_APERTURE(dev);
316 if (current_aperture != aperture)
317 return EINVAL;
318 else
319 return 0;
320}
321
322/*
323 * This does the enable logic for v3, with the same topology
324 * restrictions as in place for v2 -- one bus, one device on the bus.
325 */
326static int
327agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
328{
329 u_int32_t tstatus, mstatus;
330 u_int32_t command;
331 int rq, sba, fw, rate, arqsz, cal;
332
333 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
334 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
335
336 /* Set RQ to the min of mode, tstatus and mstatus */
337 rq = AGP_MODE_GET_RQ(mode);
338 if (AGP_MODE_GET_RQ(tstatus) < rq)
339 rq = AGP_MODE_GET_RQ(tstatus);
340 if (AGP_MODE_GET_RQ(mstatus) < rq)
341 rq = AGP_MODE_GET_RQ(mstatus);
342
343 /*
344 * ARQSZ - Set the value to the maximum one.
345 * Don't allow the mode register to override values.
346 */
347 arqsz = AGP_MODE_GET_ARQSZ(mode);
348 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
349 rq = AGP_MODE_GET_ARQSZ(tstatus);
350 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
351 rq = AGP_MODE_GET_ARQSZ(mstatus);
352
353 /* Calibration cycle - don't allow override by mode register */
354 cal = AGP_MODE_GET_CAL(tstatus);
355 if (AGP_MODE_GET_CAL(mstatus) < cal)
356 cal = AGP_MODE_GET_CAL(mstatus);
357
358 /* SBA must be supported for AGP v3. */
359 sba = 1;
360
361 /* Set FW if all three support it. */
362 fw = (AGP_MODE_GET_FW(tstatus)
363 & AGP_MODE_GET_FW(mstatus)
364 & AGP_MODE_GET_FW(mode));
365
366 /* Figure out the max rate */
367 rate = (AGP_MODE_GET_RATE(tstatus)
368 & AGP_MODE_GET_RATE(mstatus)
369 & AGP_MODE_GET_RATE(mode));
370 if (rate & AGP_MODE_V3_RATE_8x)
371 rate = AGP_MODE_V3_RATE_8x;
372 else
373 rate = AGP_MODE_V3_RATE_4x;
374 if (bootverbose)
375 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
376
377 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
378
379 /* Construct the new mode word and tell the hardware */
380 command = 0;
381 command = AGP_MODE_SET_RQ(0, rq);
382 command = AGP_MODE_SET_ARQSZ(command, arqsz);
383 command = AGP_MODE_SET_CAL(command, cal);
384 command = AGP_MODE_SET_SBA(command, sba);
385 command = AGP_MODE_SET_FW(command, fw);
386 command = AGP_MODE_SET_RATE(command, rate);
387 command = AGP_MODE_SET_MODE_3(command, 1);
388 command = AGP_MODE_SET_AGP(command, 1);
389 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
390 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
391
392 return 0;
393}
394
395static int
396agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
397{
398 u_int32_t tstatus, mstatus;
399 u_int32_t command;
400 int rq, sba, fw, rate;
401
402 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
403 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
404
405 /* Set RQ to the min of mode, tstatus and mstatus */
406 rq = AGP_MODE_GET_RQ(mode);
407 if (AGP_MODE_GET_RQ(tstatus) < rq)
408 rq = AGP_MODE_GET_RQ(tstatus);
409 if (AGP_MODE_GET_RQ(mstatus) < rq)
410 rq = AGP_MODE_GET_RQ(mstatus);
411
412 /* Set SBA if all three can deal with SBA */
413 sba = (AGP_MODE_GET_SBA(tstatus)
414 & AGP_MODE_GET_SBA(mstatus)
415 & AGP_MODE_GET_SBA(mode));
416
417 /* Similar for FW */
418 fw = (AGP_MODE_GET_FW(tstatus)
419 & AGP_MODE_GET_FW(mstatus)
420 & AGP_MODE_GET_FW(mode));
421
422 /* Figure out the max rate */
423 rate = (AGP_MODE_GET_RATE(tstatus)
424 & AGP_MODE_GET_RATE(mstatus)
425 & AGP_MODE_GET_RATE(mode));
426 if (rate & AGP_MODE_V2_RATE_4x)
427 rate = AGP_MODE_V2_RATE_4x;
428 else if (rate & AGP_MODE_V2_RATE_2x)
429 rate = AGP_MODE_V2_RATE_2x;
430 else
431 rate = AGP_MODE_V2_RATE_1x;
432 if (bootverbose)
433 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
434
435 /* Construct the new mode word and tell the hardware */
436 command = 0;
437 command = AGP_MODE_SET_RQ(0, rq);
438 command = AGP_MODE_SET_SBA(command, sba);
439 command = AGP_MODE_SET_FW(command, fw);
440 command = AGP_MODE_SET_RATE(command, rate);
441 command = AGP_MODE_SET_AGP(command, 1);
442 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
443 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
444
445 return 0;
446}
447
448int
449agp_generic_enable(device_t dev, u_int32_t mode)
450{
451 device_t mdev = agp_find_display();
452 u_int32_t tstatus, mstatus;
453
454 if (!mdev) {
455 AGP_DPF("can't find display\n");
456 return ENXIO;
457 }
458
459 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
460 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
461
462 /*
463 * Check display and bridge for AGP v3 support. AGP v3 allows
464 * more variety in topology than v2, e.g. multiple AGP devices
465 * attached to one bridge, or multiple AGP bridges in one
466 * system. This doesn't attempt to address those situations,
467 * but should work fine for a classic single AGP slot system
468 * with AGP v3.
469 */
470 if (AGP_MODE_GET_MODE_3(mode) &&
471 AGP_MODE_GET_MODE_3(tstatus) &&
472 AGP_MODE_GET_MODE_3(mstatus))
473 return (agp_v3_enable(dev, mdev, mode));
474 else
475 return (agp_v2_enable(dev, mdev, mode));
476}
477
478struct agp_memory *
479agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
480{
481 struct agp_softc *sc = device_get_softc(dev);
482 struct agp_memory *mem;
483
484 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
485 return 0;
486
487 if (sc->as_allocated + size > sc->as_maxmem)
488 return 0;
489
490 if (type != 0) {
491 printf("agp_generic_alloc_memory: unsupported type %d\n",
492 type);
493 return 0;
494 }
495
496 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
497 mem->am_id = sc->as_nextid++;
498 mem->am_size = size;
499 mem->am_type = 0;
500 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
501 mem->am_physical = 0;
502 mem->am_offset = 0;
503 mem->am_is_bound = 0;
504 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
505 sc->as_allocated += size;
506
507 return mem;
508}
509
510int
511agp_generic_free_memory(device_t dev, struct agp_memory *mem)
512{
513 struct agp_softc *sc = device_get_softc(dev);
514
515 if (mem->am_is_bound)
516 return EBUSY;
517
518 sc->as_allocated -= mem->am_size;
519 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
520 vm_object_deallocate(mem->am_obj);
521 free(mem, M_AGP);
522 return 0;
523}
524
525int
526agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
527 vm_offset_t offset)
528{
529 struct agp_softc *sc = device_get_softc(dev);
530 vm_offset_t i, j, k;
531 vm_page_t m;
532 int error;
533
534 /* Do some sanity checks first. */
535 if ((offset & (AGP_PAGE_SIZE - 1)) != 0 ||
536 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
537 device_printf(dev, "binding memory at bad offset %#x\n",
538 (int)offset);
539 return EINVAL;
540 }
541
542 /*
543 * Allocate the pages early, before acquiring the lock,
544 * because vm_page_grab() may sleep and we can't hold a mutex
545 * while sleeping.
546 */
547 VM_OBJECT_LOCK(mem->am_obj);
548 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
549 /*
550 * Find a page from the object and wire it
551 * down. This page will be mapped using one or more
552 * entries in the GATT (assuming that PAGE_SIZE >=
553 * AGP_PAGE_SIZE. If this is the first call to bind,
554 * the pages will be allocated and zeroed.
555 */
556 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
557 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
558 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
559 }
560 VM_OBJECT_UNLOCK(mem->am_obj);
561
562 mtx_lock(&sc->as_lock);
563
564 if (mem->am_is_bound) {
565 device_printf(dev, "memory already bound\n");
566 error = EINVAL;
567 VM_OBJECT_LOCK(mem->am_obj);
568 i = 0;
569 goto bad;
570 }
571
572 /*
573 * Bind the individual pages and flush the chipset's
574 * TLB.
575 */
576 VM_OBJECT_LOCK(mem->am_obj);
577 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
578 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
579
580 /*
581 * Install entries in the GATT, making sure that if
582 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
583 * aligned to PAGE_SIZE, we don't modify too many GATT
584 * entries.
585 */
586 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
587 j += AGP_PAGE_SIZE) {
588 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
589 AGP_DPF("binding offset %#jx to pa %#jx\n",
590 (uintmax_t)offset + i + j, (uintmax_t)pa);
591 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
592 if (error) {
593 /*
594 * Bail out. Reverse all the mappings
595 * and unwire the pages.
596 */
597 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
598 AGP_UNBIND_PAGE(dev, offset + k);
599 goto bad;
600 }
601 }
602 vm_page_wakeup(m);
603 }
604 VM_OBJECT_UNLOCK(mem->am_obj);
605
606 /*
607 * Flush the cpu cache since we are providing a new mapping
608 * for these pages.
609 */
610 agp_flush_cache();
611
612 /*
613 * Make sure the chipset gets the new mappings.
614 */
615 AGP_FLUSH_TLB(dev);
616
617 mem->am_offset = offset;
618 mem->am_is_bound = 1;
619
620 mtx_unlock(&sc->as_lock);
621
622 return 0;
623bad:
624 mtx_unlock(&sc->as_lock);
625 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
626 for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
627 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
628 if (k >= i)
629 vm_page_wakeup(m);
630 vm_page_lock(m);
631 vm_page_unwire(m, 0);
632 vm_page_unlock(m);
633 }
634 VM_OBJECT_UNLOCK(mem->am_obj);
635
636 return error;
637}
638
639int
640agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
641{
642 struct agp_softc *sc = device_get_softc(dev);
643 vm_page_t m;
644 int i;
645
646 mtx_lock(&sc->as_lock);
647
648 if (!mem->am_is_bound) {
649 device_printf(dev, "memory is not bound\n");
650 mtx_unlock(&sc->as_lock);
651 return EINVAL;
652 }
653
654
655 /*
656 * Unbind the individual pages and flush the chipset's
657 * TLB. Unwire the pages so they can be swapped.
658 */
659 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
660 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
661 VM_OBJECT_LOCK(mem->am_obj);
662 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
663 m = vm_page_lookup(mem->am_obj, atop(i));
664 vm_page_lock(m);
665 vm_page_unwire(m, 0);
666 vm_page_unlock(m);
667 }
668 VM_OBJECT_UNLOCK(mem->am_obj);
669
670 agp_flush_cache();
671 AGP_FLUSH_TLB(dev);
672
673 mem->am_offset = 0;
674 mem->am_is_bound = 0;
675
676 mtx_unlock(&sc->as_lock);
677
678 return 0;
679}
680
681/* Helper functions for implementing user/kernel api */
682
683static int
684agp_acquire_helper(device_t dev, enum agp_acquire_state state)
685{
686 struct agp_softc *sc = device_get_softc(dev);
687
688 if (sc->as_state != AGP_ACQUIRE_FREE)
689 return EBUSY;
690 sc->as_state = state;
691
692 return 0;
693}
694
695static int
696agp_release_helper(device_t dev, enum agp_acquire_state state)
697{
698 struct agp_softc *sc = device_get_softc(dev);
699
700 if (sc->as_state == AGP_ACQUIRE_FREE)
701 return 0;
702
703 if (sc->as_state != state)
704 return EBUSY;
705
706 sc->as_state = AGP_ACQUIRE_FREE;
707 return 0;
708}
709
710static struct agp_memory *
711agp_find_memory(device_t dev, int id)
712{
713 struct agp_softc *sc = device_get_softc(dev);
714 struct agp_memory *mem;
715
716 AGP_DPF("searching for memory block %d\n", id);
717 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
718 AGP_DPF("considering memory block %d\n", mem->am_id);
719 if (mem->am_id == id)
720 return mem;
721 }
722 return 0;
723}
724
725/* Implementation of the userland ioctl api */
726
727static int
728agp_info_user(device_t dev, agp_info *info)
729{
730 struct agp_softc *sc = device_get_softc(dev);
731
732 bzero(info, sizeof *info);
733 info->bridge_id = pci_get_devid(dev);
734 info->agp_mode =
735 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
736 if (sc->as_aperture)
737 info->aper_base = rman_get_start(sc->as_aperture);
738 else
739 info->aper_base = 0;
740 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
741 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
742 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
743
744 return 0;
745}
746
747static int
748agp_setup_user(device_t dev, agp_setup *setup)
749{
750 return AGP_ENABLE(dev, setup->agp_mode);
751}
752
753static int
754agp_allocate_user(device_t dev, agp_allocate *alloc)
755{
756 struct agp_memory *mem;
757
758 mem = AGP_ALLOC_MEMORY(dev,
759 alloc->type,
760 alloc->pg_count << AGP_PAGE_SHIFT);
761 if (mem) {
762 alloc->key = mem->am_id;
763 alloc->physical = mem->am_physical;
764 return 0;
765 } else {
766 return ENOMEM;
767 }
768}
769
770static int
771agp_deallocate_user(device_t dev, int id)
772{
773 struct agp_memory *mem = agp_find_memory(dev, id);
774
775 if (mem) {
776 AGP_FREE_MEMORY(dev, mem);
777 return 0;
778 } else {
779 return ENOENT;
780 }
781}
782
783static int
784agp_bind_user(device_t dev, agp_bind *bind)
785{
786 struct agp_memory *mem = agp_find_memory(dev, bind->key);
787
788 if (!mem)
789 return ENOENT;
790
791 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
792}
793
794static int
795agp_unbind_user(device_t dev, agp_unbind *unbind)
796{
797 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
798
799 if (!mem)
800 return ENOENT;
801
802 return AGP_UNBIND_MEMORY(dev, mem);
803}
804
805static int
244 sc->as_maxmem = agp_max[i][1] << 20U;
245
246 /*
247 * The lock is used to prevent re-entry to
248 * agp_generic_bind_memory() since that function can sleep.
249 */
250 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
251
252 /*
253 * Initialise stuff for the userland device.
254 */
255 agp_devclass = devclass_find("agp");
256 TAILQ_INIT(&sc->as_memory);
257 sc->as_nextid = 1;
258
259 sc->as_devnode = make_dev(&agp_cdevsw,
260 0, UID_ROOT, GID_WHEEL, 0600, "agpgart");
261 sc->as_devnode->si_drv1 = dev;
262
263 return 0;
264}
265
266void
267agp_free_cdev(device_t dev)
268{
269 struct agp_softc *sc = device_get_softc(dev);
270
271 destroy_dev(sc->as_devnode);
272}
273
274void
275agp_free_res(device_t dev)
276{
277 struct agp_softc *sc = device_get_softc(dev);
278
279 if (sc->as_aperture != NULL)
280 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
281 sc->as_aperture);
282 mtx_destroy(&sc->as_lock);
283 agp_flush_cache();
284}
285
286int
287agp_generic_detach(device_t dev)
288{
289
290 agp_free_cdev(dev);
291 agp_free_res(dev);
292 return 0;
293}
294
295/**
296 * Default AGP aperture size detection which simply returns the size of
297 * the aperture's PCI resource.
298 */
299u_int32_t
300agp_generic_get_aperture(device_t dev)
301{
302 struct agp_softc *sc = device_get_softc(dev);
303
304 return rman_get_size(sc->as_aperture);
305}
306
307/**
308 * Default AGP aperture size setting function, which simply doesn't allow
309 * changes to resource size.
310 */
311int
312agp_generic_set_aperture(device_t dev, u_int32_t aperture)
313{
314 u_int32_t current_aperture;
315
316 current_aperture = AGP_GET_APERTURE(dev);
317 if (current_aperture != aperture)
318 return EINVAL;
319 else
320 return 0;
321}
322
323/*
324 * This does the enable logic for v3, with the same topology
325 * restrictions as in place for v2 -- one bus, one device on the bus.
326 */
327static int
328agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
329{
330 u_int32_t tstatus, mstatus;
331 u_int32_t command;
332 int rq, sba, fw, rate, arqsz, cal;
333
334 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
335 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
336
337 /* Set RQ to the min of mode, tstatus and mstatus */
338 rq = AGP_MODE_GET_RQ(mode);
339 if (AGP_MODE_GET_RQ(tstatus) < rq)
340 rq = AGP_MODE_GET_RQ(tstatus);
341 if (AGP_MODE_GET_RQ(mstatus) < rq)
342 rq = AGP_MODE_GET_RQ(mstatus);
343
344 /*
345 * ARQSZ - Set the value to the maximum one.
346 * Don't allow the mode register to override values.
347 */
348 arqsz = AGP_MODE_GET_ARQSZ(mode);
349 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
350 rq = AGP_MODE_GET_ARQSZ(tstatus);
351 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
352 rq = AGP_MODE_GET_ARQSZ(mstatus);
353
354 /* Calibration cycle - don't allow override by mode register */
355 cal = AGP_MODE_GET_CAL(tstatus);
356 if (AGP_MODE_GET_CAL(mstatus) < cal)
357 cal = AGP_MODE_GET_CAL(mstatus);
358
359 /* SBA must be supported for AGP v3. */
360 sba = 1;
361
362 /* Set FW if all three support it. */
363 fw = (AGP_MODE_GET_FW(tstatus)
364 & AGP_MODE_GET_FW(mstatus)
365 & AGP_MODE_GET_FW(mode));
366
367 /* Figure out the max rate */
368 rate = (AGP_MODE_GET_RATE(tstatus)
369 & AGP_MODE_GET_RATE(mstatus)
370 & AGP_MODE_GET_RATE(mode));
371 if (rate & AGP_MODE_V3_RATE_8x)
372 rate = AGP_MODE_V3_RATE_8x;
373 else
374 rate = AGP_MODE_V3_RATE_4x;
375 if (bootverbose)
376 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
377
378 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
379
380 /* Construct the new mode word and tell the hardware */
381 command = 0;
382 command = AGP_MODE_SET_RQ(0, rq);
383 command = AGP_MODE_SET_ARQSZ(command, arqsz);
384 command = AGP_MODE_SET_CAL(command, cal);
385 command = AGP_MODE_SET_SBA(command, sba);
386 command = AGP_MODE_SET_FW(command, fw);
387 command = AGP_MODE_SET_RATE(command, rate);
388 command = AGP_MODE_SET_MODE_3(command, 1);
389 command = AGP_MODE_SET_AGP(command, 1);
390 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
391 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
392
393 return 0;
394}
395
396static int
397agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
398{
399 u_int32_t tstatus, mstatus;
400 u_int32_t command;
401 int rq, sba, fw, rate;
402
403 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
404 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
405
406 /* Set RQ to the min of mode, tstatus and mstatus */
407 rq = AGP_MODE_GET_RQ(mode);
408 if (AGP_MODE_GET_RQ(tstatus) < rq)
409 rq = AGP_MODE_GET_RQ(tstatus);
410 if (AGP_MODE_GET_RQ(mstatus) < rq)
411 rq = AGP_MODE_GET_RQ(mstatus);
412
413 /* Set SBA if all three can deal with SBA */
414 sba = (AGP_MODE_GET_SBA(tstatus)
415 & AGP_MODE_GET_SBA(mstatus)
416 & AGP_MODE_GET_SBA(mode));
417
418 /* Similar for FW */
419 fw = (AGP_MODE_GET_FW(tstatus)
420 & AGP_MODE_GET_FW(mstatus)
421 & AGP_MODE_GET_FW(mode));
422
423 /* Figure out the max rate */
424 rate = (AGP_MODE_GET_RATE(tstatus)
425 & AGP_MODE_GET_RATE(mstatus)
426 & AGP_MODE_GET_RATE(mode));
427 if (rate & AGP_MODE_V2_RATE_4x)
428 rate = AGP_MODE_V2_RATE_4x;
429 else if (rate & AGP_MODE_V2_RATE_2x)
430 rate = AGP_MODE_V2_RATE_2x;
431 else
432 rate = AGP_MODE_V2_RATE_1x;
433 if (bootverbose)
434 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
435
436 /* Construct the new mode word and tell the hardware */
437 command = 0;
438 command = AGP_MODE_SET_RQ(0, rq);
439 command = AGP_MODE_SET_SBA(command, sba);
440 command = AGP_MODE_SET_FW(command, fw);
441 command = AGP_MODE_SET_RATE(command, rate);
442 command = AGP_MODE_SET_AGP(command, 1);
443 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
444 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
445
446 return 0;
447}
448
449int
450agp_generic_enable(device_t dev, u_int32_t mode)
451{
452 device_t mdev = agp_find_display();
453 u_int32_t tstatus, mstatus;
454
455 if (!mdev) {
456 AGP_DPF("can't find display\n");
457 return ENXIO;
458 }
459
460 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
461 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
462
463 /*
464 * Check display and bridge for AGP v3 support. AGP v3 allows
465 * more variety in topology than v2, e.g. multiple AGP devices
466 * attached to one bridge, or multiple AGP bridges in one
467 * system. This doesn't attempt to address those situations,
468 * but should work fine for a classic single AGP slot system
469 * with AGP v3.
470 */
471 if (AGP_MODE_GET_MODE_3(mode) &&
472 AGP_MODE_GET_MODE_3(tstatus) &&
473 AGP_MODE_GET_MODE_3(mstatus))
474 return (agp_v3_enable(dev, mdev, mode));
475 else
476 return (agp_v2_enable(dev, mdev, mode));
477}
478
479struct agp_memory *
480agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
481{
482 struct agp_softc *sc = device_get_softc(dev);
483 struct agp_memory *mem;
484
485 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
486 return 0;
487
488 if (sc->as_allocated + size > sc->as_maxmem)
489 return 0;
490
491 if (type != 0) {
492 printf("agp_generic_alloc_memory: unsupported type %d\n",
493 type);
494 return 0;
495 }
496
497 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
498 mem->am_id = sc->as_nextid++;
499 mem->am_size = size;
500 mem->am_type = 0;
501 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
502 mem->am_physical = 0;
503 mem->am_offset = 0;
504 mem->am_is_bound = 0;
505 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
506 sc->as_allocated += size;
507
508 return mem;
509}
510
511int
512agp_generic_free_memory(device_t dev, struct agp_memory *mem)
513{
514 struct agp_softc *sc = device_get_softc(dev);
515
516 if (mem->am_is_bound)
517 return EBUSY;
518
519 sc->as_allocated -= mem->am_size;
520 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
521 vm_object_deallocate(mem->am_obj);
522 free(mem, M_AGP);
523 return 0;
524}
525
526int
527agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
528 vm_offset_t offset)
529{
530 struct agp_softc *sc = device_get_softc(dev);
531 vm_offset_t i, j, k;
532 vm_page_t m;
533 int error;
534
535 /* Do some sanity checks first. */
536 if ((offset & (AGP_PAGE_SIZE - 1)) != 0 ||
537 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
538 device_printf(dev, "binding memory at bad offset %#x\n",
539 (int)offset);
540 return EINVAL;
541 }
542
543 /*
544 * Allocate the pages early, before acquiring the lock,
545 * because vm_page_grab() may sleep and we can't hold a mutex
546 * while sleeping.
547 */
548 VM_OBJECT_LOCK(mem->am_obj);
549 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
550 /*
551 * Find a page from the object and wire it
552 * down. This page will be mapped using one or more
553 * entries in the GATT (assuming that PAGE_SIZE >=
554 * AGP_PAGE_SIZE. If this is the first call to bind,
555 * the pages will be allocated and zeroed.
556 */
557 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
558 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
559 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
560 }
561 VM_OBJECT_UNLOCK(mem->am_obj);
562
563 mtx_lock(&sc->as_lock);
564
565 if (mem->am_is_bound) {
566 device_printf(dev, "memory already bound\n");
567 error = EINVAL;
568 VM_OBJECT_LOCK(mem->am_obj);
569 i = 0;
570 goto bad;
571 }
572
573 /*
574 * Bind the individual pages and flush the chipset's
575 * TLB.
576 */
577 VM_OBJECT_LOCK(mem->am_obj);
578 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
579 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
580
581 /*
582 * Install entries in the GATT, making sure that if
583 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
584 * aligned to PAGE_SIZE, we don't modify too many GATT
585 * entries.
586 */
587 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
588 j += AGP_PAGE_SIZE) {
589 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
590 AGP_DPF("binding offset %#jx to pa %#jx\n",
591 (uintmax_t)offset + i + j, (uintmax_t)pa);
592 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
593 if (error) {
594 /*
595 * Bail out. Reverse all the mappings
596 * and unwire the pages.
597 */
598 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
599 AGP_UNBIND_PAGE(dev, offset + k);
600 goto bad;
601 }
602 }
603 vm_page_wakeup(m);
604 }
605 VM_OBJECT_UNLOCK(mem->am_obj);
606
607 /*
608 * Flush the cpu cache since we are providing a new mapping
609 * for these pages.
610 */
611 agp_flush_cache();
612
613 /*
614 * Make sure the chipset gets the new mappings.
615 */
616 AGP_FLUSH_TLB(dev);
617
618 mem->am_offset = offset;
619 mem->am_is_bound = 1;
620
621 mtx_unlock(&sc->as_lock);
622
623 return 0;
624bad:
625 mtx_unlock(&sc->as_lock);
626 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
627 for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
628 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
629 if (k >= i)
630 vm_page_wakeup(m);
631 vm_page_lock(m);
632 vm_page_unwire(m, 0);
633 vm_page_unlock(m);
634 }
635 VM_OBJECT_UNLOCK(mem->am_obj);
636
637 return error;
638}
639
640int
641agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
642{
643 struct agp_softc *sc = device_get_softc(dev);
644 vm_page_t m;
645 int i;
646
647 mtx_lock(&sc->as_lock);
648
649 if (!mem->am_is_bound) {
650 device_printf(dev, "memory is not bound\n");
651 mtx_unlock(&sc->as_lock);
652 return EINVAL;
653 }
654
655
656 /*
657 * Unbind the individual pages and flush the chipset's
658 * TLB. Unwire the pages so they can be swapped.
659 */
660 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
661 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
662 VM_OBJECT_LOCK(mem->am_obj);
663 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
664 m = vm_page_lookup(mem->am_obj, atop(i));
665 vm_page_lock(m);
666 vm_page_unwire(m, 0);
667 vm_page_unlock(m);
668 }
669 VM_OBJECT_UNLOCK(mem->am_obj);
670
671 agp_flush_cache();
672 AGP_FLUSH_TLB(dev);
673
674 mem->am_offset = 0;
675 mem->am_is_bound = 0;
676
677 mtx_unlock(&sc->as_lock);
678
679 return 0;
680}
681
682/* Helper functions for implementing user/kernel api */
683
684static int
685agp_acquire_helper(device_t dev, enum agp_acquire_state state)
686{
687 struct agp_softc *sc = device_get_softc(dev);
688
689 if (sc->as_state != AGP_ACQUIRE_FREE)
690 return EBUSY;
691 sc->as_state = state;
692
693 return 0;
694}
695
696static int
697agp_release_helper(device_t dev, enum agp_acquire_state state)
698{
699 struct agp_softc *sc = device_get_softc(dev);
700
701 if (sc->as_state == AGP_ACQUIRE_FREE)
702 return 0;
703
704 if (sc->as_state != state)
705 return EBUSY;
706
707 sc->as_state = AGP_ACQUIRE_FREE;
708 return 0;
709}
710
711static struct agp_memory *
712agp_find_memory(device_t dev, int id)
713{
714 struct agp_softc *sc = device_get_softc(dev);
715 struct agp_memory *mem;
716
717 AGP_DPF("searching for memory block %d\n", id);
718 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
719 AGP_DPF("considering memory block %d\n", mem->am_id);
720 if (mem->am_id == id)
721 return mem;
722 }
723 return 0;
724}
725
726/* Implementation of the userland ioctl api */
727
728static int
729agp_info_user(device_t dev, agp_info *info)
730{
731 struct agp_softc *sc = device_get_softc(dev);
732
733 bzero(info, sizeof *info);
734 info->bridge_id = pci_get_devid(dev);
735 info->agp_mode =
736 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
737 if (sc->as_aperture)
738 info->aper_base = rman_get_start(sc->as_aperture);
739 else
740 info->aper_base = 0;
741 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
742 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
743 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
744
745 return 0;
746}
747
748static int
749agp_setup_user(device_t dev, agp_setup *setup)
750{
751 return AGP_ENABLE(dev, setup->agp_mode);
752}
753
754static int
755agp_allocate_user(device_t dev, agp_allocate *alloc)
756{
757 struct agp_memory *mem;
758
759 mem = AGP_ALLOC_MEMORY(dev,
760 alloc->type,
761 alloc->pg_count << AGP_PAGE_SHIFT);
762 if (mem) {
763 alloc->key = mem->am_id;
764 alloc->physical = mem->am_physical;
765 return 0;
766 } else {
767 return ENOMEM;
768 }
769}
770
771static int
772agp_deallocate_user(device_t dev, int id)
773{
774 struct agp_memory *mem = agp_find_memory(dev, id);
775
776 if (mem) {
777 AGP_FREE_MEMORY(dev, mem);
778 return 0;
779 } else {
780 return ENOENT;
781 }
782}
783
784static int
785agp_bind_user(device_t dev, agp_bind *bind)
786{
787 struct agp_memory *mem = agp_find_memory(dev, bind->key);
788
789 if (!mem)
790 return ENOENT;
791
792 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
793}
794
795static int
796agp_unbind_user(device_t dev, agp_unbind *unbind)
797{
798 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
799
800 if (!mem)
801 return ENOENT;
802
803 return AGP_UNBIND_MEMORY(dev, mem);
804}
805
806static int
807agp_chipset_flush(device_t dev)
808{
809
810 return (AGP_CHIPSET_FLUSH(dev));
811}
812
813static int
806agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
807{
808 device_t dev = kdev->si_drv1;
809 struct agp_softc *sc = device_get_softc(dev);
810
811 if (!sc->as_isopen) {
812 sc->as_isopen = 1;
813 device_busy(dev);
814 }
815
816 return 0;
817}
818
819static int
820agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
821{
822 device_t dev = kdev->si_drv1;
823 struct agp_softc *sc = device_get_softc(dev);
824 struct agp_memory *mem;
825
826 /*
827 * Clear the GATT and force release on last close
828 */
829 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
830 if (mem->am_is_bound)
831 AGP_UNBIND_MEMORY(dev, mem);
832 AGP_FREE_MEMORY(dev, mem);
833 }
834 if (sc->as_state == AGP_ACQUIRE_USER)
835 agp_release_helper(dev, AGP_ACQUIRE_USER);
836 sc->as_isopen = 0;
837 device_unbusy(dev);
838
839 return 0;
840}
841
842static int
843agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
844{
845 device_t dev = kdev->si_drv1;
846
847 switch (cmd) {
848 case AGPIOC_INFO:
849 return agp_info_user(dev, (agp_info *) data);
850
851 case AGPIOC_ACQUIRE:
852 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
853
854 case AGPIOC_RELEASE:
855 return agp_release_helper(dev, AGP_ACQUIRE_USER);
856
857 case AGPIOC_SETUP:
858 return agp_setup_user(dev, (agp_setup *)data);
859
860 case AGPIOC_ALLOCATE:
861 return agp_allocate_user(dev, (agp_allocate *)data);
862
863 case AGPIOC_DEALLOCATE:
864 return agp_deallocate_user(dev, *(int *) data);
865
866 case AGPIOC_BIND:
867 return agp_bind_user(dev, (agp_bind *)data);
868
869 case AGPIOC_UNBIND:
870 return agp_unbind_user(dev, (agp_unbind *)data);
871
814agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
815{
816 device_t dev = kdev->si_drv1;
817 struct agp_softc *sc = device_get_softc(dev);
818
819 if (!sc->as_isopen) {
820 sc->as_isopen = 1;
821 device_busy(dev);
822 }
823
824 return 0;
825}
826
827static int
828agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
829{
830 device_t dev = kdev->si_drv1;
831 struct agp_softc *sc = device_get_softc(dev);
832 struct agp_memory *mem;
833
834 /*
835 * Clear the GATT and force release on last close
836 */
837 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
838 if (mem->am_is_bound)
839 AGP_UNBIND_MEMORY(dev, mem);
840 AGP_FREE_MEMORY(dev, mem);
841 }
842 if (sc->as_state == AGP_ACQUIRE_USER)
843 agp_release_helper(dev, AGP_ACQUIRE_USER);
844 sc->as_isopen = 0;
845 device_unbusy(dev);
846
847 return 0;
848}
849
850static int
851agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
852{
853 device_t dev = kdev->si_drv1;
854
855 switch (cmd) {
856 case AGPIOC_INFO:
857 return agp_info_user(dev, (agp_info *) data);
858
859 case AGPIOC_ACQUIRE:
860 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
861
862 case AGPIOC_RELEASE:
863 return agp_release_helper(dev, AGP_ACQUIRE_USER);
864
865 case AGPIOC_SETUP:
866 return agp_setup_user(dev, (agp_setup *)data);
867
868 case AGPIOC_ALLOCATE:
869 return agp_allocate_user(dev, (agp_allocate *)data);
870
871 case AGPIOC_DEALLOCATE:
872 return agp_deallocate_user(dev, *(int *) data);
873
874 case AGPIOC_BIND:
875 return agp_bind_user(dev, (agp_bind *)data);
876
877 case AGPIOC_UNBIND:
878 return agp_unbind_user(dev, (agp_unbind *)data);
879
880 case AGPIOC_CHIPSET_FLUSH:
881 return agp_chipset_flush(dev);
872 }
873
874 return EINVAL;
875}
876
877static int
878agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
879 int prot, vm_memattr_t *memattr)
880{
881 device_t dev = kdev->si_drv1;
882 struct agp_softc *sc = device_get_softc(dev);
883
884 if (offset > AGP_GET_APERTURE(dev))
885 return -1;
886 if (sc->as_aperture == NULL)
887 return -1;
888 *paddr = rman_get_start(sc->as_aperture) + offset;
889 return 0;
890}
891
892/* Implementation of the kernel api */
893
894device_t
895agp_find_device()
896{
897 device_t *children, child;
898 int i, count;
899
900 if (!agp_devclass)
901 return NULL;
902 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
903 return NULL;
904 child = NULL;
905 for (i = 0; i < count; i++) {
906 if (device_is_attached(children[i])) {
907 child = children[i];
908 break;
909 }
910 }
911 free(children, M_TEMP);
912 return child;
913}
914
915enum agp_acquire_state
916agp_state(device_t dev)
917{
918 struct agp_softc *sc = device_get_softc(dev);
919 return sc->as_state;
920}
921
922void
923agp_get_info(device_t dev, struct agp_info *info)
924{
925 struct agp_softc *sc = device_get_softc(dev);
926
927 info->ai_mode =
928 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
929 if (sc->as_aperture != NULL)
930 info->ai_aperture_base = rman_get_start(sc->as_aperture);
931 else
932 info->ai_aperture_base = 0;
933 info->ai_aperture_size = AGP_GET_APERTURE(dev);
934 info->ai_memory_allowed = sc->as_maxmem;
935 info->ai_memory_used = sc->as_allocated;
936}
937
938int
939agp_acquire(device_t dev)
940{
941 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
942}
943
944int
945agp_release(device_t dev)
946{
947 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
948}
949
950int
951agp_enable(device_t dev, u_int32_t mode)
952{
953 return AGP_ENABLE(dev, mode);
954}
955
956void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
957{
958 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
959}
960
961void agp_free_memory(device_t dev, void *handle)
962{
963 struct agp_memory *mem = (struct agp_memory *) handle;
964 AGP_FREE_MEMORY(dev, mem);
965}
966
967int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
968{
969 struct agp_memory *mem = (struct agp_memory *) handle;
970 return AGP_BIND_MEMORY(dev, mem, offset);
971}
972
973int agp_unbind_memory(device_t dev, void *handle)
974{
975 struct agp_memory *mem = (struct agp_memory *) handle;
976 return AGP_UNBIND_MEMORY(dev, mem);
977}
978
979void agp_memory_info(device_t dev, void *handle, struct
980 agp_memory_info *mi)
981{
982 struct agp_memory *mem = (struct agp_memory *) handle;
983
984 mi->ami_size = mem->am_size;
985 mi->ami_physical = mem->am_physical;
986 mi->ami_offset = mem->am_offset;
987 mi->ami_is_bound = mem->am_is_bound;
988}
882 }
883
884 return EINVAL;
885}
886
887static int
888agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
889 int prot, vm_memattr_t *memattr)
890{
891 device_t dev = kdev->si_drv1;
892 struct agp_softc *sc = device_get_softc(dev);
893
894 if (offset > AGP_GET_APERTURE(dev))
895 return -1;
896 if (sc->as_aperture == NULL)
897 return -1;
898 *paddr = rman_get_start(sc->as_aperture) + offset;
899 return 0;
900}
901
902/* Implementation of the kernel api */
903
904device_t
905agp_find_device()
906{
907 device_t *children, child;
908 int i, count;
909
910 if (!agp_devclass)
911 return NULL;
912 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
913 return NULL;
914 child = NULL;
915 for (i = 0; i < count; i++) {
916 if (device_is_attached(children[i])) {
917 child = children[i];
918 break;
919 }
920 }
921 free(children, M_TEMP);
922 return child;
923}
924
925enum agp_acquire_state
926agp_state(device_t dev)
927{
928 struct agp_softc *sc = device_get_softc(dev);
929 return sc->as_state;
930}
931
932void
933agp_get_info(device_t dev, struct agp_info *info)
934{
935 struct agp_softc *sc = device_get_softc(dev);
936
937 info->ai_mode =
938 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
939 if (sc->as_aperture != NULL)
940 info->ai_aperture_base = rman_get_start(sc->as_aperture);
941 else
942 info->ai_aperture_base = 0;
943 info->ai_aperture_size = AGP_GET_APERTURE(dev);
944 info->ai_memory_allowed = sc->as_maxmem;
945 info->ai_memory_used = sc->as_allocated;
946}
947
948int
949agp_acquire(device_t dev)
950{
951 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
952}
953
954int
955agp_release(device_t dev)
956{
957 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
958}
959
960int
961agp_enable(device_t dev, u_int32_t mode)
962{
963 return AGP_ENABLE(dev, mode);
964}
965
966void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
967{
968 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
969}
970
971void agp_free_memory(device_t dev, void *handle)
972{
973 struct agp_memory *mem = (struct agp_memory *) handle;
974 AGP_FREE_MEMORY(dev, mem);
975}
976
977int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
978{
979 struct agp_memory *mem = (struct agp_memory *) handle;
980 return AGP_BIND_MEMORY(dev, mem, offset);
981}
982
983int agp_unbind_memory(device_t dev, void *handle)
984{
985 struct agp_memory *mem = (struct agp_memory *) handle;
986 return AGP_UNBIND_MEMORY(dev, mem);
987}
988
989void agp_memory_info(device_t dev, void *handle, struct
990 agp_memory_info *mi)
991{
992 struct agp_memory *mem = (struct agp_memory *) handle;
993
994 mi->ami_size = mem->am_size;
995 mi->ami_physical = mem->am_physical;
996 mi->ami_offset = mem->am_offset;
997 mi->ami_is_bound = mem->am_is_bound;
998}