Deleted Added
full compact
agp.c (129567) agp.c (129579)
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 129567 2004-05-22 00:44:08Z mux $");
28__FBSDID("$FreeBSD: head/sys/dev/agp/agp.c 129579 2004-05-22 13:06:38Z mux $");
29
30#include "opt_bus.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/conf.h>
38#include <sys/ioccom.h>
39#include <sys/agpio.h>
40#include <sys/lock.h>
29
30#include "opt_bus.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/conf.h>
38#include <sys/ioccom.h>
39#include <sys/agpio.h>
40#include <sys/lock.h>
41#include <sys/lockmgr.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pcireg.h>
47#include <pci/agppriv.h>
48#include <pci/agpvar.h>
49#include <pci/agpreg.h>
50
51#include <vm/vm.h>
52#include <vm/vm_object.h>
53#include <vm/vm_page.h>
54#include <vm/vm_pageout.h>
55#include <vm/pmap.h>
56
57#include <machine/md_var.h>
58#include <machine/bus.h>
59#include <machine/resource.h>
60#include <sys/rman.h>
61
62MODULE_VERSION(agp, 1);
63
64MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
65
66 /* agp_drv.c */
67static d_open_t agp_open;
68static d_close_t agp_close;
69static d_ioctl_t agp_ioctl;
70static d_mmap_t agp_mmap;
71
72static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
75 .d_open = agp_open,
76 .d_close = agp_close,
77 .d_ioctl = agp_ioctl,
78 .d_mmap = agp_mmap,
79 .d_name = "agp",
80};
81
82static devclass_t agp_devclass;
83#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
84
85/* Helper functions for implementing chipset mini drivers. */
86
87void
88agp_flush_cache()
89{
90#ifdef __i386__
91 wbinvd();
92#endif
93#ifdef __alpha__
94 /* FIXME: This is most likely not correct as it doesn't flush CPU
95 * write caches, but we don't have a facility to do that and
96 * this is all linux does, too */
97 alpha_mb();
98#endif
99}
100
101u_int8_t
102agp_find_caps(device_t dev)
103{
104 u_int32_t status;
105 u_int8_t ptr, next;
106
107 /*
108 * Check the CAP_LIST bit of the PCI status register first.
109 */
110 status = pci_read_config(dev, PCIR_STATUS, 2);
111 if (!(status & 0x10))
112 return 0;
113
114 /*
115 * Traverse the capabilities list.
116 */
117 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
118 ptr != 0;
119 ptr = next) {
120 u_int32_t capid = pci_read_config(dev, ptr, 4);
121 next = AGP_CAPID_GET_NEXT_PTR(capid);
122
123 /*
124 * If this capability entry ID is 2, then we are done.
125 */
126 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
127 return ptr;
128 }
129
130 return 0;
131}
132
133/*
134 * Find an AGP display device (if any).
135 */
136static device_t
137agp_find_display(void)
138{
139 devclass_t pci = devclass_find("pci");
140 device_t bus, dev = 0;
141 device_t *kids;
142 int busnum, numkids, i;
143
144 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
145 bus = devclass_get_device(pci, busnum);
146 if (!bus)
147 continue;
148 device_get_children(bus, &kids, &numkids);
149 for (i = 0; i < numkids; i++) {
150 dev = kids[i];
151 if (pci_get_class(dev) == PCIC_DISPLAY
152 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
153 if (agp_find_caps(dev)) {
154 free(kids, M_TEMP);
155 return dev;
156 }
157
158 }
159 free(kids, M_TEMP);
160 }
161
162 return 0;
163}
164
165struct agp_gatt *
166agp_alloc_gatt(device_t dev)
167{
168 u_int32_t apsize = AGP_GET_APERTURE(dev);
169 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
170 struct agp_gatt *gatt;
171
172 if (bootverbose)
173 device_printf(dev,
174 "allocating GATT for aperture of size %dM\n",
175 apsize / (1024*1024));
176
177 if (entries == 0) {
178 device_printf(dev, "bad aperture size\n");
179 return NULL;
180 }
181
182 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
183 if (!gatt)
184 return 0;
185
186 gatt->ag_entries = entries;
187 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
188 0, ~0, PAGE_SIZE, 0);
189 if (!gatt->ag_virtual) {
190 if (bootverbose)
191 device_printf(dev, "contiguous allocation failed\n");
192 free(gatt, M_AGP);
193 return 0;
194 }
195 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
196 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
197 agp_flush_cache();
198
199 return gatt;
200}
201
202void
203agp_free_gatt(struct agp_gatt *gatt)
204{
205 contigfree(gatt->ag_virtual,
206 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
207 free(gatt, M_AGP);
208}
209
210static int agp_max[][2] = {
211 {0, 0},
212 {32, 4},
213 {64, 28},
214 {128, 96},
215 {256, 204},
216 {512, 440},
217 {1024, 942},
218 {2048, 1920},
219 {4096, 3932}
220};
221#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
222
223int
224agp_generic_attach(device_t dev)
225{
226 struct agp_softc *sc = device_get_softc(dev);
227 int rid, memsize, i;
228
229 /*
230 * Find and map the aperture.
231 */
232 rid = AGP_APBASE;
233 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
234 RF_ACTIVE);
235 if (!sc->as_aperture)
236 return ENOMEM;
237
238 /*
239 * Work out an upper bound for agp memory allocation. This
240 * uses a heurisitc table from the Linux driver.
241 */
242 memsize = ptoa(Maxmem) >> 20;
243 for (i = 0; i < agp_max_size; i++) {
244 if (memsize <= agp_max[i][0])
245 break;
246 }
247 if (i == agp_max_size) i = agp_max_size - 1;
248 sc->as_maxmem = agp_max[i][1] << 20U;
249
250 /*
251 * The lock is used to prevent re-entry to
252 * agp_generic_bind_memory() since that function can sleep.
253 */
41#include <sys/mutex.h>
42#include <sys/proc.h>
43
44#include <dev/pci/pcivar.h>
45#include <dev/pci/pcireg.h>
46#include <pci/agppriv.h>
47#include <pci/agpvar.h>
48#include <pci/agpreg.h>
49
50#include <vm/vm.h>
51#include <vm/vm_object.h>
52#include <vm/vm_page.h>
53#include <vm/vm_pageout.h>
54#include <vm/pmap.h>
55
56#include <machine/md_var.h>
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61MODULE_VERSION(agp, 1);
62
63MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
64
65 /* agp_drv.c */
66static d_open_t agp_open;
67static d_close_t agp_close;
68static d_ioctl_t agp_ioctl;
69static d_mmap_t agp_mmap;
70
71static struct cdevsw agp_cdevsw = {
72 .d_version = D_VERSION,
73 .d_flags = D_NEEDGIANT,
74 .d_open = agp_open,
75 .d_close = agp_close,
76 .d_ioctl = agp_ioctl,
77 .d_mmap = agp_mmap,
78 .d_name = "agp",
79};
80
81static devclass_t agp_devclass;
82#define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
83
84/* Helper functions for implementing chipset mini drivers. */
85
86void
87agp_flush_cache()
88{
89#ifdef __i386__
90 wbinvd();
91#endif
92#ifdef __alpha__
93 /* FIXME: This is most likely not correct as it doesn't flush CPU
94 * write caches, but we don't have a facility to do that and
95 * this is all linux does, too */
96 alpha_mb();
97#endif
98}
99
100u_int8_t
101agp_find_caps(device_t dev)
102{
103 u_int32_t status;
104 u_int8_t ptr, next;
105
106 /*
107 * Check the CAP_LIST bit of the PCI status register first.
108 */
109 status = pci_read_config(dev, PCIR_STATUS, 2);
110 if (!(status & 0x10))
111 return 0;
112
113 /*
114 * Traverse the capabilities list.
115 */
116 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
117 ptr != 0;
118 ptr = next) {
119 u_int32_t capid = pci_read_config(dev, ptr, 4);
120 next = AGP_CAPID_GET_NEXT_PTR(capid);
121
122 /*
123 * If this capability entry ID is 2, then we are done.
124 */
125 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
126 return ptr;
127 }
128
129 return 0;
130}
131
132/*
133 * Find an AGP display device (if any).
134 */
135static device_t
136agp_find_display(void)
137{
138 devclass_t pci = devclass_find("pci");
139 device_t bus, dev = 0;
140 device_t *kids;
141 int busnum, numkids, i;
142
143 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
144 bus = devclass_get_device(pci, busnum);
145 if (!bus)
146 continue;
147 device_get_children(bus, &kids, &numkids);
148 for (i = 0; i < numkids; i++) {
149 dev = kids[i];
150 if (pci_get_class(dev) == PCIC_DISPLAY
151 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
152 if (agp_find_caps(dev)) {
153 free(kids, M_TEMP);
154 return dev;
155 }
156
157 }
158 free(kids, M_TEMP);
159 }
160
161 return 0;
162}
163
164struct agp_gatt *
165agp_alloc_gatt(device_t dev)
166{
167 u_int32_t apsize = AGP_GET_APERTURE(dev);
168 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
169 struct agp_gatt *gatt;
170
171 if (bootverbose)
172 device_printf(dev,
173 "allocating GATT for aperture of size %dM\n",
174 apsize / (1024*1024));
175
176 if (entries == 0) {
177 device_printf(dev, "bad aperture size\n");
178 return NULL;
179 }
180
181 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
182 if (!gatt)
183 return 0;
184
185 gatt->ag_entries = entries;
186 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
187 0, ~0, PAGE_SIZE, 0);
188 if (!gatt->ag_virtual) {
189 if (bootverbose)
190 device_printf(dev, "contiguous allocation failed\n");
191 free(gatt, M_AGP);
192 return 0;
193 }
194 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
195 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
196 agp_flush_cache();
197
198 return gatt;
199}
200
201void
202agp_free_gatt(struct agp_gatt *gatt)
203{
204 contigfree(gatt->ag_virtual,
205 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
206 free(gatt, M_AGP);
207}
208
209static int agp_max[][2] = {
210 {0, 0},
211 {32, 4},
212 {64, 28},
213 {128, 96},
214 {256, 204},
215 {512, 440},
216 {1024, 942},
217 {2048, 1920},
218 {4096, 3932}
219};
220#define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
221
222int
223agp_generic_attach(device_t dev)
224{
225 struct agp_softc *sc = device_get_softc(dev);
226 int rid, memsize, i;
227
228 /*
229 * Find and map the aperture.
230 */
231 rid = AGP_APBASE;
232 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
233 RF_ACTIVE);
234 if (!sc->as_aperture)
235 return ENOMEM;
236
237 /*
238 * Work out an upper bound for agp memory allocation. This
239 * uses a heurisitc table from the Linux driver.
240 */
241 memsize = ptoa(Maxmem) >> 20;
242 for (i = 0; i < agp_max_size; i++) {
243 if (memsize <= agp_max[i][0])
244 break;
245 }
246 if (i == agp_max_size) i = agp_max_size - 1;
247 sc->as_maxmem = agp_max[i][1] << 20U;
248
249 /*
250 * The lock is used to prevent re-entry to
251 * agp_generic_bind_memory() since that function can sleep.
252 */
254 lockinit(&sc->as_lock, PZERO|PCATCH, "agplk", 0, 0);
253 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
255
256 /*
257 * Initialise stuff for the userland device.
258 */
259 agp_devclass = devclass_find("agp");
260 TAILQ_INIT(&sc->as_memory);
261 sc->as_nextid = 1;
262
263 sc->as_devnode = make_dev(&agp_cdevsw,
264 device_get_unit(dev),
265 UID_ROOT,
266 GID_WHEEL,
267 0600,
268 "agpgart");
269
270 return 0;
271}
272
273int
274agp_generic_detach(device_t dev)
275{
276 struct agp_softc *sc = device_get_softc(dev);
277 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
254
255 /*
256 * Initialise stuff for the userland device.
257 */
258 agp_devclass = devclass_find("agp");
259 TAILQ_INIT(&sc->as_memory);
260 sc->as_nextid = 1;
261
262 sc->as_devnode = make_dev(&agp_cdevsw,
263 device_get_unit(dev),
264 UID_ROOT,
265 GID_WHEEL,
266 0600,
267 "agpgart");
268
269 return 0;
270}
271
272int
273agp_generic_detach(device_t dev)
274{
275 struct agp_softc *sc = device_get_softc(dev);
276 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
278 lockmgr(&sc->as_lock, LK_DRAIN, 0, curthread);
279 lockdestroy(&sc->as_lock);
277 mtx_destroy(&sc->as_lock);
280 destroy_dev(sc->as_devnode);
281 agp_flush_cache();
282 return 0;
283}
284
285/*
286 * This does the enable logic for v3, with the same topology
287 * restrictions as in place for v2 -- one bus, one device on the bus.
288 */
289static int
290agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
291{
292 u_int32_t tstatus, mstatus;
293 u_int32_t command;
294 int rq, sba, fw, rate, arqsz, cal;
295
296 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
297 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
298
299 /* Set RQ to the min of mode, tstatus and mstatus */
300 rq = AGP_MODE_GET_RQ(mode);
301 if (AGP_MODE_GET_RQ(tstatus) < rq)
302 rq = AGP_MODE_GET_RQ(tstatus);
303 if (AGP_MODE_GET_RQ(mstatus) < rq)
304 rq = AGP_MODE_GET_RQ(mstatus);
305
306 /*
307 * ARQSZ - Set the value to the maximum one.
308 * Don't allow the mode register to override values.
309 */
310 arqsz = AGP_MODE_GET_ARQSZ(mode);
311 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
312 rq = AGP_MODE_GET_ARQSZ(tstatus);
313 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
314 rq = AGP_MODE_GET_ARQSZ(mstatus);
315
316 /* Calibration cycle - don't allow override by mode register */
317 cal = AGP_MODE_GET_CAL(tstatus);
318 if (AGP_MODE_GET_CAL(mstatus) < cal)
319 cal = AGP_MODE_GET_CAL(mstatus);
320
321 /* SBA must be supported for AGP v3. */
322 sba = 1;
323
324 /* Set FW if all three support it. */
325 fw = (AGP_MODE_GET_FW(tstatus)
326 & AGP_MODE_GET_FW(mstatus)
327 & AGP_MODE_GET_FW(mode));
328
329 /* Figure out the max rate */
330 rate = (AGP_MODE_GET_RATE(tstatus)
331 & AGP_MODE_GET_RATE(mstatus)
332 & AGP_MODE_GET_RATE(mode));
333 if (rate & AGP_MODE_V3_RATE_8x)
334 rate = AGP_MODE_V3_RATE_8x;
335 else
336 rate = AGP_MODE_V3_RATE_4x;
337 if (bootverbose)
338 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
339
340 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
341
342 /* Construct the new mode word and tell the hardware */
343 command = AGP_MODE_SET_RQ(0, rq);
344 command = AGP_MODE_SET_ARQSZ(command, arqsz);
345 command = AGP_MODE_SET_CAL(command, cal);
346 command = AGP_MODE_SET_SBA(command, sba);
347 command = AGP_MODE_SET_FW(command, fw);
348 command = AGP_MODE_SET_RATE(command, rate);
349 command = AGP_MODE_SET_AGP(command, 1);
350 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
351 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
352
353 return 0;
354}
355
356static int
357agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
358{
359 u_int32_t tstatus, mstatus;
360 u_int32_t command;
361 int rq, sba, fw, rate;
362
363 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
364 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
365
366 /* Set RQ to the min of mode, tstatus and mstatus */
367 rq = AGP_MODE_GET_RQ(mode);
368 if (AGP_MODE_GET_RQ(tstatus) < rq)
369 rq = AGP_MODE_GET_RQ(tstatus);
370 if (AGP_MODE_GET_RQ(mstatus) < rq)
371 rq = AGP_MODE_GET_RQ(mstatus);
372
373 /* Set SBA if all three can deal with SBA */
374 sba = (AGP_MODE_GET_SBA(tstatus)
375 & AGP_MODE_GET_SBA(mstatus)
376 & AGP_MODE_GET_SBA(mode));
377
378 /* Similar for FW */
379 fw = (AGP_MODE_GET_FW(tstatus)
380 & AGP_MODE_GET_FW(mstatus)
381 & AGP_MODE_GET_FW(mode));
382
383 /* Figure out the max rate */
384 rate = (AGP_MODE_GET_RATE(tstatus)
385 & AGP_MODE_GET_RATE(mstatus)
386 & AGP_MODE_GET_RATE(mode));
387 if (rate & AGP_MODE_V2_RATE_4x)
388 rate = AGP_MODE_V2_RATE_4x;
389 else if (rate & AGP_MODE_V2_RATE_2x)
390 rate = AGP_MODE_V2_RATE_2x;
391 else
392 rate = AGP_MODE_V2_RATE_1x;
393 if (bootverbose)
394 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
395
396 /* Construct the new mode word and tell the hardware */
397 command = AGP_MODE_SET_RQ(0, rq);
398 command = AGP_MODE_SET_SBA(command, sba);
399 command = AGP_MODE_SET_FW(command, fw);
400 command = AGP_MODE_SET_RATE(command, rate);
401 command = AGP_MODE_SET_AGP(command, 1);
402 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
403 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
404
405 return 0;
406}
407
408int
409agp_generic_enable(device_t dev, u_int32_t mode)
410{
411 device_t mdev = agp_find_display();
412 u_int32_t tstatus, mstatus;
413
414 if (!mdev) {
415 AGP_DPF("can't find display\n");
416 return ENXIO;
417 }
418
419 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
420 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
421
422 /*
423 * Check display and bridge for AGP v3 support. AGP v3 allows
424 * more variety in topology than v2, e.g. multiple AGP devices
425 * attached to one bridge, or multiple AGP bridges in one
426 * system. This doesn't attempt to address those situations,
427 * but should work fine for a classic single AGP slot system
428 * with AGP v3.
429 */
430 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
431 return (agp_v3_enable(dev, mdev, mode));
432 else
433 return (agp_v2_enable(dev, mdev, mode));
434}
435
436struct agp_memory *
437agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
438{
439 struct agp_softc *sc = device_get_softc(dev);
440 struct agp_memory *mem;
441
442 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
443 return 0;
444
445 if (sc->as_allocated + size > sc->as_maxmem)
446 return 0;
447
448 if (type != 0) {
449 printf("agp_generic_alloc_memory: unsupported type %d\n",
450 type);
451 return 0;
452 }
453
454 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
455 mem->am_id = sc->as_nextid++;
456 mem->am_size = size;
457 mem->am_type = 0;
458 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
459 mem->am_physical = 0;
460 mem->am_offset = 0;
461 mem->am_is_bound = 0;
462 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
463 sc->as_allocated += size;
464
465 return mem;
466}
467
468int
469agp_generic_free_memory(device_t dev, struct agp_memory *mem)
470{
471 struct agp_softc *sc = device_get_softc(dev);
472
473 if (mem->am_is_bound)
474 return EBUSY;
475
476 sc->as_allocated -= mem->am_size;
477 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
478 vm_object_deallocate(mem->am_obj);
479 free(mem, M_AGP);
480 return 0;
481}
482
483int
484agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
485 vm_offset_t offset)
486{
487 struct agp_softc *sc = device_get_softc(dev);
488 vm_offset_t i, j, k;
489 vm_page_t m;
490 int error;
491
278 destroy_dev(sc->as_devnode);
279 agp_flush_cache();
280 return 0;
281}
282
283/*
284 * This does the enable logic for v3, with the same topology
285 * restrictions as in place for v2 -- one bus, one device on the bus.
286 */
287static int
288agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
289{
290 u_int32_t tstatus, mstatus;
291 u_int32_t command;
292 int rq, sba, fw, rate, arqsz, cal;
293
294 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
295 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
296
297 /* Set RQ to the min of mode, tstatus and mstatus */
298 rq = AGP_MODE_GET_RQ(mode);
299 if (AGP_MODE_GET_RQ(tstatus) < rq)
300 rq = AGP_MODE_GET_RQ(tstatus);
301 if (AGP_MODE_GET_RQ(mstatus) < rq)
302 rq = AGP_MODE_GET_RQ(mstatus);
303
304 /*
305 * ARQSZ - Set the value to the maximum one.
306 * Don't allow the mode register to override values.
307 */
308 arqsz = AGP_MODE_GET_ARQSZ(mode);
309 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
310 rq = AGP_MODE_GET_ARQSZ(tstatus);
311 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
312 rq = AGP_MODE_GET_ARQSZ(mstatus);
313
314 /* Calibration cycle - don't allow override by mode register */
315 cal = AGP_MODE_GET_CAL(tstatus);
316 if (AGP_MODE_GET_CAL(mstatus) < cal)
317 cal = AGP_MODE_GET_CAL(mstatus);
318
319 /* SBA must be supported for AGP v3. */
320 sba = 1;
321
322 /* Set FW if all three support it. */
323 fw = (AGP_MODE_GET_FW(tstatus)
324 & AGP_MODE_GET_FW(mstatus)
325 & AGP_MODE_GET_FW(mode));
326
327 /* Figure out the max rate */
328 rate = (AGP_MODE_GET_RATE(tstatus)
329 & AGP_MODE_GET_RATE(mstatus)
330 & AGP_MODE_GET_RATE(mode));
331 if (rate & AGP_MODE_V3_RATE_8x)
332 rate = AGP_MODE_V3_RATE_8x;
333 else
334 rate = AGP_MODE_V3_RATE_4x;
335 if (bootverbose)
336 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
337
338 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
339
340 /* Construct the new mode word and tell the hardware */
341 command = AGP_MODE_SET_RQ(0, rq);
342 command = AGP_MODE_SET_ARQSZ(command, arqsz);
343 command = AGP_MODE_SET_CAL(command, cal);
344 command = AGP_MODE_SET_SBA(command, sba);
345 command = AGP_MODE_SET_FW(command, fw);
346 command = AGP_MODE_SET_RATE(command, rate);
347 command = AGP_MODE_SET_AGP(command, 1);
348 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
349 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
350
351 return 0;
352}
353
354static int
355agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
356{
357 u_int32_t tstatus, mstatus;
358 u_int32_t command;
359 int rq, sba, fw, rate;
360
361 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
362 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
363
364 /* Set RQ to the min of mode, tstatus and mstatus */
365 rq = AGP_MODE_GET_RQ(mode);
366 if (AGP_MODE_GET_RQ(tstatus) < rq)
367 rq = AGP_MODE_GET_RQ(tstatus);
368 if (AGP_MODE_GET_RQ(mstatus) < rq)
369 rq = AGP_MODE_GET_RQ(mstatus);
370
371 /* Set SBA if all three can deal with SBA */
372 sba = (AGP_MODE_GET_SBA(tstatus)
373 & AGP_MODE_GET_SBA(mstatus)
374 & AGP_MODE_GET_SBA(mode));
375
376 /* Similar for FW */
377 fw = (AGP_MODE_GET_FW(tstatus)
378 & AGP_MODE_GET_FW(mstatus)
379 & AGP_MODE_GET_FW(mode));
380
381 /* Figure out the max rate */
382 rate = (AGP_MODE_GET_RATE(tstatus)
383 & AGP_MODE_GET_RATE(mstatus)
384 & AGP_MODE_GET_RATE(mode));
385 if (rate & AGP_MODE_V2_RATE_4x)
386 rate = AGP_MODE_V2_RATE_4x;
387 else if (rate & AGP_MODE_V2_RATE_2x)
388 rate = AGP_MODE_V2_RATE_2x;
389 else
390 rate = AGP_MODE_V2_RATE_1x;
391 if (bootverbose)
392 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
393
394 /* Construct the new mode word and tell the hardware */
395 command = AGP_MODE_SET_RQ(0, rq);
396 command = AGP_MODE_SET_SBA(command, sba);
397 command = AGP_MODE_SET_FW(command, fw);
398 command = AGP_MODE_SET_RATE(command, rate);
399 command = AGP_MODE_SET_AGP(command, 1);
400 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
401 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
402
403 return 0;
404}
405
406int
407agp_generic_enable(device_t dev, u_int32_t mode)
408{
409 device_t mdev = agp_find_display();
410 u_int32_t tstatus, mstatus;
411
412 if (!mdev) {
413 AGP_DPF("can't find display\n");
414 return ENXIO;
415 }
416
417 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
418 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
419
420 /*
421 * Check display and bridge for AGP v3 support. AGP v3 allows
422 * more variety in topology than v2, e.g. multiple AGP devices
423 * attached to one bridge, or multiple AGP bridges in one
424 * system. This doesn't attempt to address those situations,
425 * but should work fine for a classic single AGP slot system
426 * with AGP v3.
427 */
428 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
429 return (agp_v3_enable(dev, mdev, mode));
430 else
431 return (agp_v2_enable(dev, mdev, mode));
432}
433
434struct agp_memory *
435agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
436{
437 struct agp_softc *sc = device_get_softc(dev);
438 struct agp_memory *mem;
439
440 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
441 return 0;
442
443 if (sc->as_allocated + size > sc->as_maxmem)
444 return 0;
445
446 if (type != 0) {
447 printf("agp_generic_alloc_memory: unsupported type %d\n",
448 type);
449 return 0;
450 }
451
452 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
453 mem->am_id = sc->as_nextid++;
454 mem->am_size = size;
455 mem->am_type = 0;
456 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
457 mem->am_physical = 0;
458 mem->am_offset = 0;
459 mem->am_is_bound = 0;
460 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
461 sc->as_allocated += size;
462
463 return mem;
464}
465
466int
467agp_generic_free_memory(device_t dev, struct agp_memory *mem)
468{
469 struct agp_softc *sc = device_get_softc(dev);
470
471 if (mem->am_is_bound)
472 return EBUSY;
473
474 sc->as_allocated -= mem->am_size;
475 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
476 vm_object_deallocate(mem->am_obj);
477 free(mem, M_AGP);
478 return 0;
479}
480
481int
482agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
483 vm_offset_t offset)
484{
485 struct agp_softc *sc = device_get_softc(dev);
486 vm_offset_t i, j, k;
487 vm_page_t m;
488 int error;
489
492 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
490 mtx_lock(&sc->as_lock);
493
494 if (mem->am_is_bound) {
495 device_printf(dev, "memory already bound\n");
491
492 if (mem->am_is_bound) {
493 device_printf(dev, "memory already bound\n");
496 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
494 mtx_unlock(&sc->as_lock);
497 return EINVAL;
498 }
499
500 if (offset < 0
501 || (offset & (AGP_PAGE_SIZE - 1)) != 0
502 || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
503 device_printf(dev, "binding memory at bad offset %#x\n",
504 (int) offset);
495 return EINVAL;
496 }
497
498 if (offset < 0
499 || (offset & (AGP_PAGE_SIZE - 1)) != 0
500 || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
501 device_printf(dev, "binding memory at bad offset %#x\n",
502 (int) offset);
505 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
503 mtx_unlock(&sc->as_lock);
506 return EINVAL;
507 }
508
509 /*
510 * Bind the individual pages and flush the chipset's
511 * TLB.
512 *
513 * XXX Presumably, this needs to be the pci address on alpha
514 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
515 * alpha AGP hardware to check.
516 */
517 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
518 /*
519 * Find a page from the object and wire it
520 * down. This page will be mapped using one or more
521 * entries in the GATT (assuming that PAGE_SIZE >=
522 * AGP_PAGE_SIZE. If this is the first call to bind,
523 * the pages will be allocated and zeroed.
524 */
525 VM_OBJECT_LOCK(mem->am_obj);
526 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
527 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
528 VM_OBJECT_UNLOCK(mem->am_obj);
529 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
530
531 /*
532 * Install entries in the GATT, making sure that if
533 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
534 * aligned to PAGE_SIZE, we don't modify too many GATT
535 * entries.
536 */
537 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
538 j += AGP_PAGE_SIZE) {
539 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
540 AGP_DPF("binding offset %#x to pa %#x\n",
541 offset + i + j, pa);
542 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
543 if (error) {
544 /*
545 * Bail out. Reverse all the mappings
546 * and unwire the pages.
547 */
548 vm_page_lock_queues();
549 vm_page_wakeup(m);
550 vm_page_unlock_queues();
551 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
552 AGP_UNBIND_PAGE(dev, offset + k);
553 VM_OBJECT_LOCK(mem->am_obj);
554 for (k = 0; k <= i; k += PAGE_SIZE) {
555 m = vm_page_lookup(mem->am_obj,
556 OFF_TO_IDX(k));
557 vm_page_lock_queues();
558 vm_page_unwire(m, 0);
559 vm_page_unlock_queues();
560 }
561 VM_OBJECT_UNLOCK(mem->am_obj);
504 return EINVAL;
505 }
506
507 /*
508 * Bind the individual pages and flush the chipset's
509 * TLB.
510 *
511 * XXX Presumably, this needs to be the pci address on alpha
512 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
513 * alpha AGP hardware to check.
514 */
515 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
516 /*
517 * Find a page from the object and wire it
518 * down. This page will be mapped using one or more
519 * entries in the GATT (assuming that PAGE_SIZE >=
520 * AGP_PAGE_SIZE. If this is the first call to bind,
521 * the pages will be allocated and zeroed.
522 */
523 VM_OBJECT_LOCK(mem->am_obj);
524 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
525 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
526 VM_OBJECT_UNLOCK(mem->am_obj);
527 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
528
529 /*
530 * Install entries in the GATT, making sure that if
531 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
532 * aligned to PAGE_SIZE, we don't modify too many GATT
533 * entries.
534 */
535 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
536 j += AGP_PAGE_SIZE) {
537 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
538 AGP_DPF("binding offset %#x to pa %#x\n",
539 offset + i + j, pa);
540 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
541 if (error) {
542 /*
543 * Bail out. Reverse all the mappings
544 * and unwire the pages.
545 */
546 vm_page_lock_queues();
547 vm_page_wakeup(m);
548 vm_page_unlock_queues();
549 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
550 AGP_UNBIND_PAGE(dev, offset + k);
551 VM_OBJECT_LOCK(mem->am_obj);
552 for (k = 0; k <= i; k += PAGE_SIZE) {
553 m = vm_page_lookup(mem->am_obj,
554 OFF_TO_IDX(k));
555 vm_page_lock_queues();
556 vm_page_unwire(m, 0);
557 vm_page_unlock_queues();
558 }
559 VM_OBJECT_UNLOCK(mem->am_obj);
562 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
560 mtx_unlock(&sc->as_lock);
563 return error;
564 }
565 }
566 vm_page_lock_queues();
567 vm_page_wakeup(m);
568 vm_page_unlock_queues();
569 }
570
571 /*
572 * Flush the cpu cache since we are providing a new mapping
573 * for these pages.
574 */
575 agp_flush_cache();
576
577 /*
578 * Make sure the chipset gets the new mappings.
579 */
580 AGP_FLUSH_TLB(dev);
581
582 mem->am_offset = offset;
583 mem->am_is_bound = 1;
584
561 return error;
562 }
563 }
564 vm_page_lock_queues();
565 vm_page_wakeup(m);
566 vm_page_unlock_queues();
567 }
568
569 /*
570 * Flush the cpu cache since we are providing a new mapping
571 * for these pages.
572 */
573 agp_flush_cache();
574
575 /*
576 * Make sure the chipset gets the new mappings.
577 */
578 AGP_FLUSH_TLB(dev);
579
580 mem->am_offset = offset;
581 mem->am_is_bound = 1;
582
585 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
583 mtx_unlock(&sc->as_lock);
586
587 return 0;
588}
589
590int
591agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
592{
593 struct agp_softc *sc = device_get_softc(dev);
594 vm_page_t m;
595 int i;
596
584
585 return 0;
586}
587
588int
589agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
590{
591 struct agp_softc *sc = device_get_softc(dev);
592 vm_page_t m;
593 int i;
594
597 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
595 mtx_lock(&sc->as_lock);
598
599 if (!mem->am_is_bound) {
600 device_printf(dev, "memory is not bound\n");
596
597 if (!mem->am_is_bound) {
598 device_printf(dev, "memory is not bound\n");
601 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
599 mtx_unlock(&sc->as_lock);
602 return EINVAL;
603 }
604
605
606 /*
607 * Unbind the individual pages and flush the chipset's
608 * TLB. Unwire the pages so they can be swapped.
609 */
610 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
611 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
612 VM_OBJECT_LOCK(mem->am_obj);
613 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
614 m = vm_page_lookup(mem->am_obj, atop(i));
615 vm_page_lock_queues();
616 vm_page_unwire(m, 0);
617 vm_page_unlock_queues();
618 }
619 VM_OBJECT_UNLOCK(mem->am_obj);
620
621 agp_flush_cache();
622 AGP_FLUSH_TLB(dev);
623
624 mem->am_offset = 0;
625 mem->am_is_bound = 0;
626
600 return EINVAL;
601 }
602
603
604 /*
605 * Unbind the individual pages and flush the chipset's
606 * TLB. Unwire the pages so they can be swapped.
607 */
608 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
609 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
610 VM_OBJECT_LOCK(mem->am_obj);
611 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
612 m = vm_page_lookup(mem->am_obj, atop(i));
613 vm_page_lock_queues();
614 vm_page_unwire(m, 0);
615 vm_page_unlock_queues();
616 }
617 VM_OBJECT_UNLOCK(mem->am_obj);
618
619 agp_flush_cache();
620 AGP_FLUSH_TLB(dev);
621
622 mem->am_offset = 0;
623 mem->am_is_bound = 0;
624
627 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
625 mtx_unlock(&sc->as_lock);
628
629 return 0;
630}
631
632/* Helper functions for implementing user/kernel api */
633
634static int
635agp_acquire_helper(device_t dev, enum agp_acquire_state state)
636{
637 struct agp_softc *sc = device_get_softc(dev);
638
639 if (sc->as_state != AGP_ACQUIRE_FREE)
640 return EBUSY;
641 sc->as_state = state;
642
643 return 0;
644}
645
646static int
647agp_release_helper(device_t dev, enum agp_acquire_state state)
648{
649 struct agp_softc *sc = device_get_softc(dev);
650
651 if (sc->as_state == AGP_ACQUIRE_FREE)
652 return 0;
653
654 if (sc->as_state != state)
655 return EBUSY;
656
657 sc->as_state = AGP_ACQUIRE_FREE;
658 return 0;
659}
660
661static struct agp_memory *
662agp_find_memory(device_t dev, int id)
663{
664 struct agp_softc *sc = device_get_softc(dev);
665 struct agp_memory *mem;
666
667 AGP_DPF("searching for memory block %d\n", id);
668 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
669 AGP_DPF("considering memory block %d\n", mem->am_id);
670 if (mem->am_id == id)
671 return mem;
672 }
673 return 0;
674}
675
676/* Implementation of the userland ioctl api */
677
678static int
679agp_info_user(device_t dev, agp_info *info)
680{
681 struct agp_softc *sc = device_get_softc(dev);
682
683 bzero(info, sizeof *info);
684 info->bridge_id = pci_get_devid(dev);
685 info->agp_mode =
686 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
687 info->aper_base = rman_get_start(sc->as_aperture);
688 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
689 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
690 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
691
692 return 0;
693}
694
695static int
696agp_setup_user(device_t dev, agp_setup *setup)
697{
698 return AGP_ENABLE(dev, setup->agp_mode);
699}
700
701static int
702agp_allocate_user(device_t dev, agp_allocate *alloc)
703{
704 struct agp_memory *mem;
705
706 mem = AGP_ALLOC_MEMORY(dev,
707 alloc->type,
708 alloc->pg_count << AGP_PAGE_SHIFT);
709 if (mem) {
710 alloc->key = mem->am_id;
711 alloc->physical = mem->am_physical;
712 return 0;
713 } else {
714 return ENOMEM;
715 }
716}
717
718static int
719agp_deallocate_user(device_t dev, int id)
720{
721 struct agp_memory *mem = agp_find_memory(dev, id);;
722
723 if (mem) {
724 AGP_FREE_MEMORY(dev, mem);
725 return 0;
726 } else {
727 return ENOENT;
728 }
729}
730
731static int
732agp_bind_user(device_t dev, agp_bind *bind)
733{
734 struct agp_memory *mem = agp_find_memory(dev, bind->key);
735
736 if (!mem)
737 return ENOENT;
738
739 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
740}
741
742static int
743agp_unbind_user(device_t dev, agp_unbind *unbind)
744{
745 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
746
747 if (!mem)
748 return ENOENT;
749
750 return AGP_UNBIND_MEMORY(dev, mem);
751}
752
753static int
754agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
755{
756 device_t dev = KDEV2DEV(kdev);
757 struct agp_softc *sc = device_get_softc(dev);
758
759 if (!sc->as_isopen) {
760 sc->as_isopen = 1;
761 device_busy(dev);
762 }
763
764 return 0;
765}
766
767static int
768agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
769{
770 device_t dev = KDEV2DEV(kdev);
771 struct agp_softc *sc = device_get_softc(dev);
772 struct agp_memory *mem;
773
774 /*
775 * Clear the GATT and force release on last close
776 */
777 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
778 if (mem->am_is_bound)
779 AGP_UNBIND_MEMORY(dev, mem);
780 AGP_FREE_MEMORY(dev, mem);
781 }
782 if (sc->as_state == AGP_ACQUIRE_USER)
783 agp_release_helper(dev, AGP_ACQUIRE_USER);
784 sc->as_isopen = 0;
785 device_unbusy(dev);
786
787 return 0;
788}
789
790static int
791agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
792{
793 device_t dev = KDEV2DEV(kdev);
794
795 switch (cmd) {
796 case AGPIOC_INFO:
797 return agp_info_user(dev, (agp_info *) data);
798
799 case AGPIOC_ACQUIRE:
800 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
801
802 case AGPIOC_RELEASE:
803 return agp_release_helper(dev, AGP_ACQUIRE_USER);
804
805 case AGPIOC_SETUP:
806 return agp_setup_user(dev, (agp_setup *)data);
807
808 case AGPIOC_ALLOCATE:
809 return agp_allocate_user(dev, (agp_allocate *)data);
810
811 case AGPIOC_DEALLOCATE:
812 return agp_deallocate_user(dev, *(int *) data);
813
814 case AGPIOC_BIND:
815 return agp_bind_user(dev, (agp_bind *)data);
816
817 case AGPIOC_UNBIND:
818 return agp_unbind_user(dev, (agp_unbind *)data);
819
820 }
821
822 return EINVAL;
823}
824
825static int
826agp_mmap(dev_t kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
827{
828 device_t dev = KDEV2DEV(kdev);
829 struct agp_softc *sc = device_get_softc(dev);
830
831 if (offset > AGP_GET_APERTURE(dev))
832 return -1;
833 *paddr = rman_get_start(sc->as_aperture) + offset;
834 return 0;
835}
836
837/* Implementation of the kernel api */
838
839device_t
840agp_find_device()
841{
842 if (!agp_devclass)
843 return 0;
844 return devclass_get_device(agp_devclass, 0);
845}
846
847enum agp_acquire_state
848agp_state(device_t dev)
849{
850 struct agp_softc *sc = device_get_softc(dev);
851 return sc->as_state;
852}
853
854void
855agp_get_info(device_t dev, struct agp_info *info)
856{
857 struct agp_softc *sc = device_get_softc(dev);
858
859 info->ai_mode =
860 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
861 info->ai_aperture_base = rman_get_start(sc->as_aperture);
862 info->ai_aperture_size = rman_get_size(sc->as_aperture);
863 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
864 info->ai_memory_allowed = sc->as_maxmem;
865 info->ai_memory_used = sc->as_allocated;
866}
867
868int
869agp_acquire(device_t dev)
870{
871 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
872}
873
874int
875agp_release(device_t dev)
876{
877 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
878}
879
880int
881agp_enable(device_t dev, u_int32_t mode)
882{
883 return AGP_ENABLE(dev, mode);
884}
885
886void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
887{
888 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
889}
890
891void agp_free_memory(device_t dev, void *handle)
892{
893 struct agp_memory *mem = (struct agp_memory *) handle;
894 AGP_FREE_MEMORY(dev, mem);
895}
896
897int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
898{
899 struct agp_memory *mem = (struct agp_memory *) handle;
900 return AGP_BIND_MEMORY(dev, mem, offset);
901}
902
903int agp_unbind_memory(device_t dev, void *handle)
904{
905 struct agp_memory *mem = (struct agp_memory *) handle;
906 return AGP_UNBIND_MEMORY(dev, mem);
907}
908
909void agp_memory_info(device_t dev, void *handle, struct
910 agp_memory_info *mi)
911{
912 struct agp_memory *mem = (struct agp_memory *) handle;
913
914 mi->ami_size = mem->am_size;
915 mi->ami_physical = mem->am_physical;
916 mi->ami_offset = mem->am_offset;
917 mi->ami_is_bound = mem->am_is_bound;
918}
626
627 return 0;
628}
629
630/* Helper functions for implementing user/kernel api */
631
632static int
633agp_acquire_helper(device_t dev, enum agp_acquire_state state)
634{
635 struct agp_softc *sc = device_get_softc(dev);
636
637 if (sc->as_state != AGP_ACQUIRE_FREE)
638 return EBUSY;
639 sc->as_state = state;
640
641 return 0;
642}
643
644static int
645agp_release_helper(device_t dev, enum agp_acquire_state state)
646{
647 struct agp_softc *sc = device_get_softc(dev);
648
649 if (sc->as_state == AGP_ACQUIRE_FREE)
650 return 0;
651
652 if (sc->as_state != state)
653 return EBUSY;
654
655 sc->as_state = AGP_ACQUIRE_FREE;
656 return 0;
657}
658
659static struct agp_memory *
660agp_find_memory(device_t dev, int id)
661{
662 struct agp_softc *sc = device_get_softc(dev);
663 struct agp_memory *mem;
664
665 AGP_DPF("searching for memory block %d\n", id);
666 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
667 AGP_DPF("considering memory block %d\n", mem->am_id);
668 if (mem->am_id == id)
669 return mem;
670 }
671 return 0;
672}
673
674/* Implementation of the userland ioctl api */
675
676static int
677agp_info_user(device_t dev, agp_info *info)
678{
679 struct agp_softc *sc = device_get_softc(dev);
680
681 bzero(info, sizeof *info);
682 info->bridge_id = pci_get_devid(dev);
683 info->agp_mode =
684 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
685 info->aper_base = rman_get_start(sc->as_aperture);
686 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
687 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
688 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
689
690 return 0;
691}
692
693static int
694agp_setup_user(device_t dev, agp_setup *setup)
695{
696 return AGP_ENABLE(dev, setup->agp_mode);
697}
698
699static int
700agp_allocate_user(device_t dev, agp_allocate *alloc)
701{
702 struct agp_memory *mem;
703
704 mem = AGP_ALLOC_MEMORY(dev,
705 alloc->type,
706 alloc->pg_count << AGP_PAGE_SHIFT);
707 if (mem) {
708 alloc->key = mem->am_id;
709 alloc->physical = mem->am_physical;
710 return 0;
711 } else {
712 return ENOMEM;
713 }
714}
715
716static int
717agp_deallocate_user(device_t dev, int id)
718{
719 struct agp_memory *mem = agp_find_memory(dev, id);;
720
721 if (mem) {
722 AGP_FREE_MEMORY(dev, mem);
723 return 0;
724 } else {
725 return ENOENT;
726 }
727}
728
729static int
730agp_bind_user(device_t dev, agp_bind *bind)
731{
732 struct agp_memory *mem = agp_find_memory(dev, bind->key);
733
734 if (!mem)
735 return ENOENT;
736
737 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
738}
739
740static int
741agp_unbind_user(device_t dev, agp_unbind *unbind)
742{
743 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
744
745 if (!mem)
746 return ENOENT;
747
748 return AGP_UNBIND_MEMORY(dev, mem);
749}
750
751static int
752agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
753{
754 device_t dev = KDEV2DEV(kdev);
755 struct agp_softc *sc = device_get_softc(dev);
756
757 if (!sc->as_isopen) {
758 sc->as_isopen = 1;
759 device_busy(dev);
760 }
761
762 return 0;
763}
764
765static int
766agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
767{
768 device_t dev = KDEV2DEV(kdev);
769 struct agp_softc *sc = device_get_softc(dev);
770 struct agp_memory *mem;
771
772 /*
773 * Clear the GATT and force release on last close
774 */
775 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
776 if (mem->am_is_bound)
777 AGP_UNBIND_MEMORY(dev, mem);
778 AGP_FREE_MEMORY(dev, mem);
779 }
780 if (sc->as_state == AGP_ACQUIRE_USER)
781 agp_release_helper(dev, AGP_ACQUIRE_USER);
782 sc->as_isopen = 0;
783 device_unbusy(dev);
784
785 return 0;
786}
787
788static int
789agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
790{
791 device_t dev = KDEV2DEV(kdev);
792
793 switch (cmd) {
794 case AGPIOC_INFO:
795 return agp_info_user(dev, (agp_info *) data);
796
797 case AGPIOC_ACQUIRE:
798 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
799
800 case AGPIOC_RELEASE:
801 return agp_release_helper(dev, AGP_ACQUIRE_USER);
802
803 case AGPIOC_SETUP:
804 return agp_setup_user(dev, (agp_setup *)data);
805
806 case AGPIOC_ALLOCATE:
807 return agp_allocate_user(dev, (agp_allocate *)data);
808
809 case AGPIOC_DEALLOCATE:
810 return agp_deallocate_user(dev, *(int *) data);
811
812 case AGPIOC_BIND:
813 return agp_bind_user(dev, (agp_bind *)data);
814
815 case AGPIOC_UNBIND:
816 return agp_unbind_user(dev, (agp_unbind *)data);
817
818 }
819
820 return EINVAL;
821}
822
823static int
824agp_mmap(dev_t kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
825{
826 device_t dev = KDEV2DEV(kdev);
827 struct agp_softc *sc = device_get_softc(dev);
828
829 if (offset > AGP_GET_APERTURE(dev))
830 return -1;
831 *paddr = rman_get_start(sc->as_aperture) + offset;
832 return 0;
833}
834
835/* Implementation of the kernel api */
836
837device_t
838agp_find_device()
839{
840 if (!agp_devclass)
841 return 0;
842 return devclass_get_device(agp_devclass, 0);
843}
844
845enum agp_acquire_state
846agp_state(device_t dev)
847{
848 struct agp_softc *sc = device_get_softc(dev);
849 return sc->as_state;
850}
851
852void
853agp_get_info(device_t dev, struct agp_info *info)
854{
855 struct agp_softc *sc = device_get_softc(dev);
856
857 info->ai_mode =
858 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
859 info->ai_aperture_base = rman_get_start(sc->as_aperture);
860 info->ai_aperture_size = rman_get_size(sc->as_aperture);
861 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
862 info->ai_memory_allowed = sc->as_maxmem;
863 info->ai_memory_used = sc->as_allocated;
864}
865
866int
867agp_acquire(device_t dev)
868{
869 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
870}
871
872int
873agp_release(device_t dev)
874{
875 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
876}
877
878int
879agp_enable(device_t dev, u_int32_t mode)
880{
881 return AGP_ENABLE(dev, mode);
882}
883
884void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
885{
886 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
887}
888
889void agp_free_memory(device_t dev, void *handle)
890{
891 struct agp_memory *mem = (struct agp_memory *) handle;
892 AGP_FREE_MEMORY(dev, mem);
893}
894
895int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
896{
897 struct agp_memory *mem = (struct agp_memory *) handle;
898 return AGP_BIND_MEMORY(dev, mem, offset);
899}
900
901int agp_unbind_memory(device_t dev, void *handle)
902{
903 struct agp_memory *mem = (struct agp_memory *) handle;
904 return AGP_UNBIND_MEMORY(dev, mem);
905}
906
907void agp_memory_info(device_t dev, void *handle, struct
908 agp_memory_info *mi)
909{
910 struct agp_memory *mem = (struct agp_memory *) handle;
911
912 mi->ami_size = mem->am_size;
913 mi->ami_physical = mem->am_physical;
914 mi->ami_offset = mem->am_offset;
915 mi->ami_is_bound = mem->am_is_bound;
916}