Deleted Added
full compact
ixp425_qmgr.c (182932) ixp425_qmgr.c (186352)
1/*-
2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30/*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58*/
59#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30/*-
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58*/
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/ixp425_qmgr.c 182932 2008-09-11 12:17:21Z raj $");
60__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/ixp425_qmgr.c 186352 2008-12-20 03:26:09Z sam $");
61
62/*
63 * Intel XScale Queue Manager support.
64 *
65 * Each IXP4XXX device has a hardware block that implements a priority
66 * queue manager that is shared between the XScale cpu and the backend
67 * devices (such as the NPE). Queues are accessed by reading/writing
68 * special memory locations. The queue contents are mapped into a shared
69 * SRAM region with entries managed in a circular buffer. The XScale
70 * processor can receive interrupts based on queue contents (a condition
71 * code determines when interrupts should be delivered).
72 *
73 * The code here basically replaces the qmgr class in the Intel Access
74 * Library (IAL).
75 */
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/module.h>
80#include <sys/time.h>
81#include <sys/bus.h>
82#include <sys/resource.h>
83#include <sys/rman.h>
84#include <sys/sysctl.h>
85
86#include <machine/bus.h>
87#include <machine/cpu.h>
88#include <machine/cpufunc.h>
89#include <machine/resource.h>
90#include <machine/intr.h>
91#include <arm/xscale/ixp425/ixp425reg.h>
92#include <arm/xscale/ixp425/ixp425var.h>
93
94#include <arm/xscale/ixp425/ixp425_qmgr.h>
95
96/*
97 * State per AQM hw queue.
98 * This structure holds q configuration and dispatch state.
99 */
100struct qmgrInfo {
101 int qSizeInWords; /* queue size in words */
102
103 uint32_t qOflowStatBitMask; /* overflow status mask */
104 int qWriteCount; /* queue write count */
105
106 bus_size_t qAccRegAddr; /* access register */
107 bus_size_t qUOStatRegAddr; /* status register */
108 bus_size_t qConfigRegAddr; /* config register */
109 int qSizeInEntries; /* queue size in entries */
110
111 uint32_t qUflowStatBitMask; /* underflow status mask */
112 int qReadCount; /* queue read count */
113
114 /* XXX union */
115 uint32_t qStatRegAddr;
116 uint32_t qStatBitsOffset;
117 uint32_t qStat0BitMask;
118 uint32_t qStat1BitMask;
119
120 uint32_t intRegCheckMask; /* interrupt reg check mask */
121 void (*cb)(int, void *); /* callback function */
122 void *cbarg; /* callback argument */
123 int priority; /* dispatch priority */
124#if 0
125 /* NB: needed only for A0 parts */
126 u_int statusWordOffset; /* status word offset */
127 uint32_t statusMask; /* status mask */
128 uint32_t statusCheckValue; /* status check value */
129#endif
130};
131
132struct ixpqmgr_softc {
133 device_t sc_dev;
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136
137 struct resource *sc_irq1; /* IRQ resource */
138 void *sc_ih1; /* interrupt handler */
139 int sc_rid1; /* resource id for irq */
140
141 struct resource *sc_irq2;
142 void *sc_ih2;
143 int sc_rid2;
144
145 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
146 /*
147 * This array contains a list of queue identifiers ordered by
148 * priority. The table is split logically between queue
149 * identifiers 0-31 and 32-63. To optimize lookups bit masks
150 * are kept for the first-32 and last-32 q's. When the
151 * table needs to be rebuilt mark rebuildTable and it'll
152 * happen after the next interrupt.
153 */
154 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
155 uint32_t lowPriorityTableFirstHalfMask;
156 uint32_t uppPriorityTableFirstHalfMask;
157 int rebuildTable; /* rebuild priorityTable */
158
159 uint32_t aqmFreeSramAddress; /* SRAM free space */
160};
161
162static int qmgr_debug = 0;
163SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug,
61
62/*
63 * Intel XScale Queue Manager support.
64 *
65 * Each IXP4XXX device has a hardware block that implements a priority
66 * queue manager that is shared between the XScale cpu and the backend
67 * devices (such as the NPE). Queues are accessed by reading/writing
68 * special memory locations. The queue contents are mapped into a shared
69 * SRAM region with entries managed in a circular buffer. The XScale
70 * processor can receive interrupts based on queue contents (a condition
71 * code determines when interrupts should be delivered).
72 *
73 * The code here basically replaces the qmgr class in the Intel Access
74 * Library (IAL).
75 */
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/module.h>
80#include <sys/time.h>
81#include <sys/bus.h>
82#include <sys/resource.h>
83#include <sys/rman.h>
84#include <sys/sysctl.h>
85
86#include <machine/bus.h>
87#include <machine/cpu.h>
88#include <machine/cpufunc.h>
89#include <machine/resource.h>
90#include <machine/intr.h>
91#include <arm/xscale/ixp425/ixp425reg.h>
92#include <arm/xscale/ixp425/ixp425var.h>
93
94#include <arm/xscale/ixp425/ixp425_qmgr.h>
95
96/*
97 * State per AQM hw queue.
98 * This structure holds q configuration and dispatch state.
99 */
100struct qmgrInfo {
101 int qSizeInWords; /* queue size in words */
102
103 uint32_t qOflowStatBitMask; /* overflow status mask */
104 int qWriteCount; /* queue write count */
105
106 bus_size_t qAccRegAddr; /* access register */
107 bus_size_t qUOStatRegAddr; /* status register */
108 bus_size_t qConfigRegAddr; /* config register */
109 int qSizeInEntries; /* queue size in entries */
110
111 uint32_t qUflowStatBitMask; /* underflow status mask */
112 int qReadCount; /* queue read count */
113
114 /* XXX union */
115 uint32_t qStatRegAddr;
116 uint32_t qStatBitsOffset;
117 uint32_t qStat0BitMask;
118 uint32_t qStat1BitMask;
119
120 uint32_t intRegCheckMask; /* interrupt reg check mask */
121 void (*cb)(int, void *); /* callback function */
122 void *cbarg; /* callback argument */
123 int priority; /* dispatch priority */
124#if 0
125 /* NB: needed only for A0 parts */
126 u_int statusWordOffset; /* status word offset */
127 uint32_t statusMask; /* status mask */
128 uint32_t statusCheckValue; /* status check value */
129#endif
130};
131
132struct ixpqmgr_softc {
133 device_t sc_dev;
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136
137 struct resource *sc_irq1; /* IRQ resource */
138 void *sc_ih1; /* interrupt handler */
139 int sc_rid1; /* resource id for irq */
140
141 struct resource *sc_irq2;
142 void *sc_ih2;
143 int sc_rid2;
144
145 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
146 /*
147 * This array contains a list of queue identifiers ordered by
148 * priority. The table is split logically between queue
149 * identifiers 0-31 and 32-63. To optimize lookups bit masks
150 * are kept for the first-32 and last-32 q's. When the
151 * table needs to be rebuilt mark rebuildTable and it'll
152 * happen after the next interrupt.
153 */
154 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
155 uint32_t lowPriorityTableFirstHalfMask;
156 uint32_t uppPriorityTableFirstHalfMask;
157 int rebuildTable; /* rebuild priorityTable */
158
159 uint32_t aqmFreeSramAddress; /* SRAM free space */
160};
161
162static int qmgr_debug = 0;
163SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug,
164 0, "IXP425 Q-Manager debug msgs");
164 0, "IXP4XX Q-Manager debug msgs");
165TUNABLE_INT("debug.qmgr", &qmgr_debug);
166#define DPRINTF(dev, fmt, ...) do { \
167 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
168} while (0)
169#define DPRINTFn(n, dev, fmt, ...) do { \
170 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
171} while (0)
172
173static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
174
175static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
176static void ixpqmgr_intr(void *);
177
178static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
179static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
180static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
181static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
182static void aqm_reset(struct ixpqmgr_softc *sc);
183
184static void
185dummyCallback(int qId, void *arg)
186{
187 /* XXX complain */
188}
189
190static uint32_t
191aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
192{
193 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
194 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
195}
196
197static void
198aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
199{
200 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
201 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
202}
203
204static int
205ixpqmgr_probe(device_t dev)
206{
165TUNABLE_INT("debug.qmgr", &qmgr_debug);
166#define DPRINTF(dev, fmt, ...) do { \
167 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
168} while (0)
169#define DPRINTFn(n, dev, fmt, ...) do { \
170 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
171} while (0)
172
173static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
174
175static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
176static void ixpqmgr_intr(void *);
177
178static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
179static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
180static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
181static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
182static void aqm_reset(struct ixpqmgr_softc *sc);
183
184static void
185dummyCallback(int qId, void *arg)
186{
187 /* XXX complain */
188}
189
190static uint32_t
191aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
192{
193 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
194 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
195}
196
197static void
198aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
199{
200 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
201 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
202}
203
204static int
205ixpqmgr_probe(device_t dev)
206{
207 device_set_desc(dev, "IXP425 Q-Manager");
207 device_set_desc(dev, "IXP4XX Q-Manager");
208 return 0;
209}
210
211static int
212ixpqmgr_attach(device_t dev)
213{
214 struct ixpqmgr_softc *sc = device_get_softc(dev);
215 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
216 int i, err;
217
218 ixpqmgr_sc = sc;
219
220 sc->sc_dev = dev;
221 sc->sc_iot = sa->sc_iot;
222 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
223 0, &sc->sc_ioh))
224 panic("%s: Cannot map registers", device_get_name(dev));
225
226 /* NB: we only use the lower 32 q's */
227
228 /* Set up QMGR interrupts */
229 sc->sc_rid1 = 0;
230 sc->sc_irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid1,
231 IXP425_INT_QUE1_32, IXP425_INT_QUE1_32, 1, RF_ACTIVE);
232 sc->sc_rid2 = 1;
233 sc->sc_irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid2,
234 IXP425_INT_QUE33_64, IXP425_INT_QUE33_64, 1, RF_ACTIVE);
235
236 if (sc->sc_irq1 == NULL || sc->sc_irq2 == NULL)
237 panic("Unable to allocate the qmgr irqs.\n");
238
239 err = bus_setup_intr(dev, sc->sc_irq1, INTR_TYPE_NET | INTR_MPSAFE,
240 NULL, ixpqmgr_intr, NULL, &sc->sc_ih1);
241 if (err) {
242 device_printf(dev, "failed to set up qmgr irq=%d\n",
243 IXP425_INT_QUE1_32);
244 return (ENXIO);
245 }
246 err = bus_setup_intr(dev, sc->sc_irq2, INTR_TYPE_NET | INTR_MPSAFE,
247 NULL, ixpqmgr_intr, NULL, &sc->sc_ih2);
248 if (err) {
249 device_printf(dev, "failed to set up qmgr irq=%d\n",
250 IXP425_INT_QUE33_64);
251 return (ENXIO);
252 }
253
254 /* NB: softc is pre-zero'd */
255 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
256 struct qmgrInfo *qi = &sc->qinfo[i];
257
258 qi->cb = dummyCallback;
259 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
260 /*
261 * There are two interrupt registers, 32 bits each. One
262 * for the lower queues(0-31) and one for the upper
263 * queues(32-63). Therefore need to mod by 32 i.e the
264 * min upper queue identifier.
265 */
266 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
267
268 /*
269 * Register addresses and bit masks are calculated and
270 * stored here to optimize QRead, QWrite and QStatusGet
271 * functions.
272 */
273
274 /* AQM Queue access reg addresses, per queue */
275 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
276 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
277 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
278
279 /* AQM Queue lower-group (0-31), only */
280 if (i < IX_QMGR_MIN_QUEUPP_QID) {
281 /* AQM Q underflow/overflow status reg address, per queue */
282 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
283 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
284 sizeof(uint32_t));
285
286 /* AQM Q underflow status bit masks for status reg per queue */
287 qi->qUflowStatBitMask =
288 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
289 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
290 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
291
292 /* AQM Q overflow status bit masks for status reg, per queue */
293 qi->qOflowStatBitMask =
294 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
295 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
296 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
297
298 /* AQM Q lower-group (0-31) status reg addresses, per queue */
299 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
300 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
301 sizeof(uint32_t));
302
303 /* AQM Q lower-group (0-31) status register bit offset */
304 qi->qStatBitsOffset =
305 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
306 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
307 } else { /* AQM Q upper-group (32-63), only */
308 qi->qUOStatRegAddr = 0; /* XXX */
309
310 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
311 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
312
313 /* AQM Q upper-group (32-63) Full status register bitmasks */
314 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
315 }
316 }
317
318 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
319
320 ixpqmgr_rebuild(sc); /* build inital priority table */
321 aqm_reset(sc); /* reset h/w */
322 return (0);
323}
324
325static int
326ixpqmgr_detach(device_t dev)
327{
328 struct ixpqmgr_softc *sc = device_get_softc(dev);
329
330 aqm_reset(sc); /* disable interrupts */
331 bus_teardown_intr(dev, sc->sc_irq1, sc->sc_ih1);
332 bus_teardown_intr(dev, sc->sc_irq2, sc->sc_ih2);
333 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid1, sc->sc_irq1);
334 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid2, sc->sc_irq2);
335 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
336 return (0);
337}
338
339int
340ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
341 void (*cb)(int, void *), void *cbarg)
342{
343 struct ixpqmgr_softc *sc = ixpqmgr_sc;
344 struct qmgrInfo *qi = &sc->qinfo[qId];
345
346 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
347 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
348
349 /* NB: entry size is always 1 */
350 qi->qSizeInWords = qEntries;
351
352 qi->qReadCount = 0;
353 qi->qWriteCount = 0;
354 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
355
356 if (cb == NULL) {
357 /* Reset to dummy callback */
358 qi->cb = dummyCallback;
359 qi->cbarg = 0;
360 } else {
361 qi->cb = cb;
362 qi->cbarg = cbarg;
363 }
364
365 /* Write the config register; NB must be AFTER qinfo setup */
366 aqm_qcfg(sc, qId, ne, nf);
367 /*
368 * Account for space just allocated to queue.
369 */
370 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
371
372 /* Set the interrupt source if this queue is in the range 0-31 */
373 if (qId < IX_QMGR_MIN_QUEUPP_QID)
374 aqm_srcsel_write(sc, qId, srcSel);
375
376 if (cb != NULL) /* Enable the interrupt */
377 aqm_int_enable(sc, qId);
378
379 sc->rebuildTable = TRUE;
380
381 return 0; /* XXX */
382}
383
384int
385ixpqmgr_qwrite(int qId, uint32_t entry)
386{
387 struct ixpqmgr_softc *sc = ixpqmgr_sc;
388 struct qmgrInfo *qi = &sc->qinfo[qId];
389
390 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
391 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
392
393 /* write the entry */
394 aqm_reg_write(sc, qi->qAccRegAddr, entry);
395
396 /* NB: overflow is available for lower queues only */
397 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
398 int qSize = qi->qSizeInEntries;
399 /*
400 * Increment the current number of entries in the queue
401 * and check for overflow .
402 */
403 if (qi->qWriteCount++ == qSize) { /* check for overflow */
404 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
405 int qPtrs;
406
407 /*
408 * Read the status twice because the status may
409 * not be immediately ready after the write operation
410 */
411 if ((status & qi->qOflowStatBitMask) ||
412 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
413 /*
414 * The queue is full, clear the overflow status bit if set.
415 */
416 aqm_reg_write(sc, qi->qUOStatRegAddr,
417 status & ~qi->qOflowStatBitMask);
418 qi->qWriteCount = qSize;
419 DPRINTFn(5, sc->sc_dev,
420 "%s(%u, 0x%x) Q full, overflow status cleared\n",
421 __func__, qId, entry);
422 return ENOSPC;
423 }
424 /*
425 * No overflow occured : someone is draining the queue
426 * and the current counter needs to be
427 * updated from the current number of entries in the queue
428 */
429
430 /* calculate number of words in q */
431 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
432 DPRINTFn(2, sc->sc_dev,
433 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
434 __func__, qId, entry, qPtrs);
435 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
436
437 if (qPtrs == 0) {
438 /*
439 * The queue may be full at the time of the
440 * snapshot. Next access will check
441 * the overflow status again.
442 */
443 qi->qWriteCount = qSize;
444 } else {
445 /* convert the number of words to a number of entries */
446 qi->qWriteCount = qPtrs & (qSize - 1);
447 }
448 }
449 }
450 return 0;
451}
452
453int
454ixpqmgr_qread(int qId, uint32_t *entry)
455{
456 struct ixpqmgr_softc *sc = ixpqmgr_sc;
457 struct qmgrInfo *qi = &sc->qinfo[qId];
458 bus_size_t off = qi->qAccRegAddr;
459
460 *entry = aqm_reg_read(sc, off);
461
462 /*
463 * Reset the current read count : next access to the read function
464 * will force a underflow status check.
465 */
466 qi->qReadCount = 0;
467
468 /* Check if underflow occurred on the read */
469 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
470 /* get the queue status */
471 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
472
473 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
474 aqm_reg_write(sc, qi->qUOStatRegAddr,
475 status &~ qi->qUflowStatBitMask);
476 return ENOSPC;
477 }
478 }
479 return 0;
480}
481
482int
483ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
484{
485 struct ixpqmgr_softc *sc = ixpqmgr_sc;
486 struct qmgrInfo *qi = &sc->qinfo[qId];
487 uint32_t entry;
488 bus_size_t off = qi->qAccRegAddr;
489
490 entry = aqm_reg_read(sc, off);
491 while (--n) {
492 if (entry == 0) {
493 /* if we read a NULL entry, stop. We have underflowed */
494 break;
495 }
496 *p++ = entry; /* store */
497 entry = aqm_reg_read(sc, off);
498 }
499 *p = entry;
500
501 /*
502 * Reset the current read count : next access to the read function
503 * will force a underflow status check.
504 */
505 qi->qReadCount = 0;
506
507 /* Check if underflow occurred on the read */
508 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
509 /* get the queue status */
510 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
511
512 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
513 aqm_reg_write(sc, qi->qUOStatRegAddr,
514 status &~ qi->qUflowStatBitMask);
515 return ENOSPC;
516 }
517 }
518 return 0;
519}
520
521uint32_t
522ixpqmgr_getqstatus(int qId)
523{
524#define QLOWSTATMASK \
525 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
526 struct ixpqmgr_softc *sc = ixpqmgr_sc;
527 const struct qmgrInfo *qi = &sc->qinfo[qId];
528 uint32_t status;
529
530 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
531 /* read the status of a queue in the range 0-31 */
532 status = aqm_reg_read(sc, qi->qStatRegAddr);
533
534 /* mask out the status bits relevant only to this queue */
535 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
536 } else { /* read status of a queue in the range 32-63 */
537 status = 0;
538 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
539 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
540 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
541 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
542 }
543 return status;
544#undef QLOWSTATMASK
545}
546
547uint32_t
548ixpqmgr_getqconfig(int qId)
549{
550 struct ixpqmgr_softc *sc = ixpqmgr_sc;
551
552 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
553}
554
555void
556ixpqmgr_dump(void)
557{
558 struct ixpqmgr_softc *sc = ixpqmgr_sc;
559 int i, a;
560
561 /* status registers */
562 printf("0x%04x: %08x %08x %08x %08x\n"
563 , 0x400
564 , aqm_reg_read(sc, 0x400)
565 , aqm_reg_read(sc, 0x400+4)
566 , aqm_reg_read(sc, 0x400+8)
567 , aqm_reg_read(sc, 0x400+12)
568 );
569 printf("0x%04x: %08x %08x %08x %08x\n"
570 , 0x410
571 , aqm_reg_read(sc, 0x410)
572 , aqm_reg_read(sc, 0x410+4)
573 , aqm_reg_read(sc, 0x410+8)
574 , aqm_reg_read(sc, 0x410+12)
575 );
576 printf("0x%04x: %08x %08x %08x %08x\n"
577 , 0x420
578 , aqm_reg_read(sc, 0x420)
579 , aqm_reg_read(sc, 0x420+4)
580 , aqm_reg_read(sc, 0x420+8)
581 , aqm_reg_read(sc, 0x420+12)
582 );
583 printf("0x%04x: %08x %08x %08x %08x\n"
584 , 0x430
585 , aqm_reg_read(sc, 0x430)
586 , aqm_reg_read(sc, 0x430+4)
587 , aqm_reg_read(sc, 0x430+8)
588 , aqm_reg_read(sc, 0x430+12)
589 );
590 /* q configuration registers */
591 for (a = 0x2000; a < 0x20ff; a += 32)
592 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
593 , a
594 , aqm_reg_read(sc, a)
595 , aqm_reg_read(sc, a+4)
596 , aqm_reg_read(sc, a+8)
597 , aqm_reg_read(sc, a+12)
598 , aqm_reg_read(sc, a+16)
599 , aqm_reg_read(sc, a+20)
600 , aqm_reg_read(sc, a+24)
601 , aqm_reg_read(sc, a+28)
602 );
603 /* allocated SRAM */
604 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
605 a = 0x2000 + i;
606 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
607 , a
608 , aqm_reg_read(sc, a)
609 , aqm_reg_read(sc, a+4)
610 , aqm_reg_read(sc, a+8)
611 , aqm_reg_read(sc, a+12)
612 , aqm_reg_read(sc, a+16)
613 , aqm_reg_read(sc, a+20)
614 , aqm_reg_read(sc, a+24)
615 , aqm_reg_read(sc, a+28)
616 );
617 }
618 for (i = 0; i < 16; i++) {
619 printf("Q[%2d] config 0x%08x status 0x%02x "
620 "Q[%2d] config 0x%08x status 0x%02x\n"
621 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
622 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
623 );
624 }
625}
626
627void
628ixpqmgr_notify_enable(int qId, int srcSel)
629{
630 struct ixpqmgr_softc *sc = ixpqmgr_sc;
631#if 0
632 /* Calculate the checkMask and checkValue for this q */
633 aqm_calc_statuscheck(sc, qId, srcSel);
634#endif
635 /* Set the interrupt source if this queue is in the range 0-31 */
636 if (qId < IX_QMGR_MIN_QUEUPP_QID)
637 aqm_srcsel_write(sc, qId, srcSel);
638
639 /* Enable the interrupt */
640 aqm_int_enable(sc, qId);
641}
642
643void
644ixpqmgr_notify_disable(int qId)
645{
646 struct ixpqmgr_softc *sc = ixpqmgr_sc;
647
648 aqm_int_disable(sc, qId);
649}
650
651/*
652 * Rebuild the priority table used by the dispatcher.
653 */
654static void
655ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
656{
657 int q, pri;
658 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
659 struct qmgrInfo *qi;
660
661 sc->lowPriorityTableFirstHalfMask = 0;
662 sc->uppPriorityTableFirstHalfMask = 0;
663
664 lowQuePriorityTableIndex = 0;
665 uppQuePriorityTableIndex = 32;
666 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
667 /* low priority q's */
668 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
669 qi = &sc->qinfo[q];
670 if (qi->priority == pri) {
671 /*
672 * Build the priority table bitmask which match the
673 * queues of the first half of the priority table.
674 */
675 if (lowQuePriorityTableIndex < 16) {
676 sc->lowPriorityTableFirstHalfMask |=
677 qi->intRegCheckMask;
678 }
679 sc->priorityTable[lowQuePriorityTableIndex++] = q;
680 }
681 }
682 /* high priority q's */
683 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
684 qi = &sc->qinfo[q];
685 if (qi->priority == pri) {
686 /*
687 * Build the priority table bitmask which match the
688 * queues of the first half of the priority table .
689 */
690 if (uppQuePriorityTableIndex < 48) {
691 sc->uppPriorityTableFirstHalfMask |=
692 qi->intRegCheckMask;
693 }
694 sc->priorityTable[uppQuePriorityTableIndex++] = q;
695 }
696 }
697 }
698 sc->rebuildTable = FALSE;
699}
700
701/*
702 * Count the number of leading zero bits in a word,
703 * and return the same value than the CLZ instruction.
704 * Note this is similar to the standard ffs function but
705 * it counts zero's from the MSB instead of the LSB.
706 *
707 * word (in) return value (out)
708 * 0x80000000 0
709 * 0x40000000 1
710 * ,,, ,,,
711 * 0x00000002 30
712 * 0x00000001 31
713 * 0x00000000 32
714 *
715 * The C version of this function is used as a replacement
716 * for system not providing the equivalent of the CLZ
717 * assembly language instruction.
718 *
719 * Note that this version is big-endian
720 */
721static unsigned int
722_lzcount(uint32_t word)
723{
724 unsigned int lzcount = 0;
725
726 if (word == 0)
727 return 32;
728 while ((word & 0x80000000) == 0) {
729 word <<= 1;
730 lzcount++;
731 }
732 return lzcount;
733}
734
735static void
736ixpqmgr_intr(void *arg)
737{
738 struct ixpqmgr_softc *sc = ixpqmgr_sc;
739 uint32_t intRegVal; /* Interrupt reg val */
740 struct qmgrInfo *qi;
741 int priorityTableIndex; /* Priority table index */
742 int qIndex; /* Current queue being processed */
743
744 /* Read the interrupt register */
745 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
746 /* Write back to clear interrupt */
747 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
748
749 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
750 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
751
752 /* No queue has interrupt register set */
753 if (intRegVal != 0) {
754 /* get the first queue Id from the interrupt register value */
755 qIndex = (32 - 1) - _lzcount(intRegVal);
756
757 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
758 __func__, intRegVal, qIndex);
759
760 /*
761 * Optimize for single callback case.
762 */
763 qi = &sc->qinfo[qIndex];
764 if (intRegVal == qi->intRegCheckMask) {
765 /*
766 * Only 1 queue event triggered a notification.
767 * Call the callback function for this queue
768 */
769 qi->cb(qIndex, qi->cbarg);
770 } else {
771 /*
772 * The event is triggered by more than 1 queue,
773 * the queue search will start from the beginning
774 * or the middle of the priority table.
775 *
776 * The search will end when all the bits of the interrupt
777 * register are cleared. There is no need to maintain
778 * a seperate value and test it at each iteration.
779 */
780 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
781 priorityTableIndex = 0;
782 } else {
783 priorityTableIndex = 16;
784 }
785 /*
786 * Iterate over the priority table until all the bits
787 * of the interrupt register are cleared.
788 */
789 do {
790 qIndex = sc->priorityTable[priorityTableIndex++];
791 qi = &sc->qinfo[qIndex];
792
793 /* If this queue caused this interrupt to be raised */
794 if (intRegVal & qi->intRegCheckMask) {
795 /* Call the callback function for this queue */
796 qi->cb(qIndex, qi->cbarg);
797 /* Clear the interrupt register bit */
798 intRegVal &= ~qi->intRegCheckMask;
799 }
800 } while (intRegVal);
801 }
802 }
803
804 /* Rebuild the priority table if needed */
805 if (sc->rebuildTable)
806 ixpqmgr_rebuild(sc);
807}
808
809#if 0
810/*
811 * Generate the parameters used to check if a Q's status matches
812 * the specified source select. We calculate which status word
813 * to check (statusWordOffset), the value to check the status
814 * against (statusCheckValue) and the mask (statusMask) to mask
815 * out all but the bits to check in the status word.
816 */
817static void
818aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
819{
820 struct qmgrInfo *qi = &qinfo[qId];
821 uint32_t shiftVal;
822
823 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
824 switch (srcSel) {
825 case IX_QMGR_Q_SOURCE_ID_E:
826 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
827 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
828 break;
829 case IX_QMGR_Q_SOURCE_ID_NE:
830 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
831 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
832 break;
833 case IX_QMGR_Q_SOURCE_ID_NF:
834 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
835 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
836 break;
837 case IX_QMGR_Q_SOURCE_ID_F:
838 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
839 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
840 break;
841 case IX_QMGR_Q_SOURCE_ID_NOT_E:
842 qi->statusCheckValue = 0;
843 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
844 break;
845 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
846 qi->statusCheckValue = 0;
847 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
848 break;
849 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
850 qi->statusCheckValue = 0;
851 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
852 break;
853 case IX_QMGR_Q_SOURCE_ID_NOT_F:
854 qi->statusCheckValue = 0;
855 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
856 break;
857 default:
858 /* Should never hit */
859 IX_OSAL_ASSERT(0);
860 break;
861 }
862
863 /* One nibble of status per queue so need to shift the
864 * check value and mask out to the correct position.
865 */
866 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
867 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
868
869 /* Calculate the which status word to check from the qId,
870 * 8 Qs status per word
871 */
872 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
873
874 qi->statusCheckValue <<= shiftVal;
875 qi->statusMask <<= shiftVal;
876 } else {
877 /* One status word */
878 qi->statusWordOffset = 0;
879 /* Single bits per queue and int source bit hardwired NE,
880 * Qs start at 32.
881 */
882 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
883 qi->statusCheckValue = qi->statusMask;
884 }
885}
886#endif
887
888static void
889aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
890{
891 bus_size_t reg;
892 uint32_t v;
893
894 if (qId < IX_QMGR_MIN_QUEUPP_QID)
895 reg = IX_QMGR_QUEIEREG0_OFFSET;
896 else
897 reg = IX_QMGR_QUEIEREG1_OFFSET;
898 v = aqm_reg_read(sc, reg);
899 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
900
901 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
902 __func__, qId, reg, v, aqm_reg_read(sc, reg));
903}
904
905static void
906aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
907{
908 bus_size_t reg;
909 uint32_t v;
910
911 if (qId < IX_QMGR_MIN_QUEUPP_QID)
912 reg = IX_QMGR_QUEIEREG0_OFFSET;
913 else
914 reg = IX_QMGR_QUEIEREG1_OFFSET;
915 v = aqm_reg_read(sc, reg);
916 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
917
918 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
919 __func__, qId, reg, v, aqm_reg_read(sc, reg));
920}
921
922static unsigned
923log2(unsigned n)
924{
925 unsigned count;
926 /*
927 * N.B. this function will return 0 if supplied 0.
928 */
929 for (count = 0; n/2; count++)
930 n /= 2;
931 return count;
932}
933
934static __inline unsigned
935toAqmEntrySize(int entrySize)
936{
937 /* entrySize 1("00"),2("01"),4("10") */
938 return log2(entrySize);
939}
940
941static __inline unsigned
942toAqmBufferSize(unsigned bufferSizeInWords)
943{
944 /* bufferSize 16("00"),32("01),64("10"),128("11") */
945 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
946}
947
948static __inline unsigned
949toAqmWatermark(int watermark)
950{
951 /*
952 * Watermarks 0("000"),1("001"),2("010"),4("011"),
953 * 8("100"),16("101"),32("110"),64("111")
954 */
955 return log2(2 * watermark);
956}
957
958static void
959aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
960{
961 const struct qmgrInfo *qi = &sc->qinfo[qId];
962 uint32_t qCfg;
963 uint32_t baseAddress;
964
965 /* Build config register */
966 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
967 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
968 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
969 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
970
971 /* baseAddress, calculated relative to start address */
972 baseAddress = sc->aqmFreeSramAddress;
973
974 /* base address must be word-aligned */
975 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0,
976 ("address not word-aligned"));
977
978 /* Now convert to a 16 word pointer as required by QUECONFIG register */
979 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
980 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
981
982 /* set watermarks */
983 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
984 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
985
986 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
987 __func__, qId, ne, nf,
988 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
989 qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
990
991 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
992}
993
994static void
995aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
996{
997 bus_size_t off;
998 uint32_t v;
999
1000 /*
1001 * Calculate the register offset; multiple queues split across registers
1002 */
1003 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1004 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1005
1006 v = aqm_reg_read(sc, off);
1007 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1008 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1009 v |= 0x7;
1010 } else {
1011 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1012 uint32_t mask;
1013 int qshift;
1014
1015 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1016 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
1017
1018 /* merge sourceId */
1019 v = (v &~ mask) | ((sourceId << qshift) & mask);
1020 }
1021
1022 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1023 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1024 aqm_reg_write(sc, off, v);
1025}
1026
1027/*
1028 * Reset AQM registers to default values.
1029 */
1030static void
1031aqm_reset(struct ixpqmgr_softc *sc)
1032{
1033 int i;
1034
1035 /* Reset queues 0..31 status registers 0..3 */
1036 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1037 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1038 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1039 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1040 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1041 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1042 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1043 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1044
1045 /* Reset underflow/overflow status registers 0..1 */
1046 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1047 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1048 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1049 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1050
1051 /* Reset queues 32..63 nearly empty status registers */
1052 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1053 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1054
1055 /* Reset queues 32..63 full status registers */
1056 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1057 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1058
1059 /* Reset int0 status flag source select registers 0..3 */
1060 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1061 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1062 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1063 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1064 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1065 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1066 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1067 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1068
1069 /* Reset queue interrupt enable register 0..1 */
1070 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1071 IX_QMGR_QUEIEREG_RESET_VALUE);
1072 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1073 IX_QMGR_QUEIEREG_RESET_VALUE);
1074
1075 /* Reset queue interrupt register 0..1 */
1076 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1077 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1078
1079 /* Reset queue configuration words 0..63 */
1080 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1081 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1082 IX_QMGR_QUECONFIG_RESET_VALUE);
1083
1084 /* XXX zero SRAM to simplify debugging */
1085 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1086 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1087 aqm_reg_write(sc, i, 0);
1088}
1089
1090static device_method_t ixpqmgr_methods[] = {
1091 DEVMETHOD(device_probe, ixpqmgr_probe),
1092 DEVMETHOD(device_attach, ixpqmgr_attach),
1093 DEVMETHOD(device_detach, ixpqmgr_detach),
1094
1095 { 0, 0 }
1096};
1097
1098static driver_t ixpqmgr_driver = {
1099 "ixpqmgr",
1100 ixpqmgr_methods,
1101 sizeof(struct ixpqmgr_softc),
1102};
1103static devclass_t ixpqmgr_devclass;
1104
1105DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
208 return 0;
209}
210
211static int
212ixpqmgr_attach(device_t dev)
213{
214 struct ixpqmgr_softc *sc = device_get_softc(dev);
215 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
216 int i, err;
217
218 ixpqmgr_sc = sc;
219
220 sc->sc_dev = dev;
221 sc->sc_iot = sa->sc_iot;
222 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
223 0, &sc->sc_ioh))
224 panic("%s: Cannot map registers", device_get_name(dev));
225
226 /* NB: we only use the lower 32 q's */
227
228 /* Set up QMGR interrupts */
229 sc->sc_rid1 = 0;
230 sc->sc_irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid1,
231 IXP425_INT_QUE1_32, IXP425_INT_QUE1_32, 1, RF_ACTIVE);
232 sc->sc_rid2 = 1;
233 sc->sc_irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid2,
234 IXP425_INT_QUE33_64, IXP425_INT_QUE33_64, 1, RF_ACTIVE);
235
236 if (sc->sc_irq1 == NULL || sc->sc_irq2 == NULL)
237 panic("Unable to allocate the qmgr irqs.\n");
238
239 err = bus_setup_intr(dev, sc->sc_irq1, INTR_TYPE_NET | INTR_MPSAFE,
240 NULL, ixpqmgr_intr, NULL, &sc->sc_ih1);
241 if (err) {
242 device_printf(dev, "failed to set up qmgr irq=%d\n",
243 IXP425_INT_QUE1_32);
244 return (ENXIO);
245 }
246 err = bus_setup_intr(dev, sc->sc_irq2, INTR_TYPE_NET | INTR_MPSAFE,
247 NULL, ixpqmgr_intr, NULL, &sc->sc_ih2);
248 if (err) {
249 device_printf(dev, "failed to set up qmgr irq=%d\n",
250 IXP425_INT_QUE33_64);
251 return (ENXIO);
252 }
253
254 /* NB: softc is pre-zero'd */
255 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
256 struct qmgrInfo *qi = &sc->qinfo[i];
257
258 qi->cb = dummyCallback;
259 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
260 /*
261 * There are two interrupt registers, 32 bits each. One
262 * for the lower queues(0-31) and one for the upper
263 * queues(32-63). Therefore need to mod by 32 i.e the
264 * min upper queue identifier.
265 */
266 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
267
268 /*
269 * Register addresses and bit masks are calculated and
270 * stored here to optimize QRead, QWrite and QStatusGet
271 * functions.
272 */
273
274 /* AQM Queue access reg addresses, per queue */
275 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
276 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
277 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
278
279 /* AQM Queue lower-group (0-31), only */
280 if (i < IX_QMGR_MIN_QUEUPP_QID) {
281 /* AQM Q underflow/overflow status reg address, per queue */
282 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
283 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
284 sizeof(uint32_t));
285
286 /* AQM Q underflow status bit masks for status reg per queue */
287 qi->qUflowStatBitMask =
288 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
289 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
290 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
291
292 /* AQM Q overflow status bit masks for status reg, per queue */
293 qi->qOflowStatBitMask =
294 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
295 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
296 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
297
298 /* AQM Q lower-group (0-31) status reg addresses, per queue */
299 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
300 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
301 sizeof(uint32_t));
302
303 /* AQM Q lower-group (0-31) status register bit offset */
304 qi->qStatBitsOffset =
305 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
306 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
307 } else { /* AQM Q upper-group (32-63), only */
308 qi->qUOStatRegAddr = 0; /* XXX */
309
310 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
311 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
312
313 /* AQM Q upper-group (32-63) Full status register bitmasks */
314 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
315 }
316 }
317
318 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
319
320 ixpqmgr_rebuild(sc); /* build inital priority table */
321 aqm_reset(sc); /* reset h/w */
322 return (0);
323}
324
325static int
326ixpqmgr_detach(device_t dev)
327{
328 struct ixpqmgr_softc *sc = device_get_softc(dev);
329
330 aqm_reset(sc); /* disable interrupts */
331 bus_teardown_intr(dev, sc->sc_irq1, sc->sc_ih1);
332 bus_teardown_intr(dev, sc->sc_irq2, sc->sc_ih2);
333 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid1, sc->sc_irq1);
334 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid2, sc->sc_irq2);
335 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
336 return (0);
337}
338
339int
340ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
341 void (*cb)(int, void *), void *cbarg)
342{
343 struct ixpqmgr_softc *sc = ixpqmgr_sc;
344 struct qmgrInfo *qi = &sc->qinfo[qId];
345
346 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
347 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
348
349 /* NB: entry size is always 1 */
350 qi->qSizeInWords = qEntries;
351
352 qi->qReadCount = 0;
353 qi->qWriteCount = 0;
354 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
355
356 if (cb == NULL) {
357 /* Reset to dummy callback */
358 qi->cb = dummyCallback;
359 qi->cbarg = 0;
360 } else {
361 qi->cb = cb;
362 qi->cbarg = cbarg;
363 }
364
365 /* Write the config register; NB must be AFTER qinfo setup */
366 aqm_qcfg(sc, qId, ne, nf);
367 /*
368 * Account for space just allocated to queue.
369 */
370 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
371
372 /* Set the interrupt source if this queue is in the range 0-31 */
373 if (qId < IX_QMGR_MIN_QUEUPP_QID)
374 aqm_srcsel_write(sc, qId, srcSel);
375
376 if (cb != NULL) /* Enable the interrupt */
377 aqm_int_enable(sc, qId);
378
379 sc->rebuildTable = TRUE;
380
381 return 0; /* XXX */
382}
383
384int
385ixpqmgr_qwrite(int qId, uint32_t entry)
386{
387 struct ixpqmgr_softc *sc = ixpqmgr_sc;
388 struct qmgrInfo *qi = &sc->qinfo[qId];
389
390 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
391 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
392
393 /* write the entry */
394 aqm_reg_write(sc, qi->qAccRegAddr, entry);
395
396 /* NB: overflow is available for lower queues only */
397 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
398 int qSize = qi->qSizeInEntries;
399 /*
400 * Increment the current number of entries in the queue
401 * and check for overflow .
402 */
403 if (qi->qWriteCount++ == qSize) { /* check for overflow */
404 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
405 int qPtrs;
406
407 /*
408 * Read the status twice because the status may
409 * not be immediately ready after the write operation
410 */
411 if ((status & qi->qOflowStatBitMask) ||
412 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
413 /*
414 * The queue is full, clear the overflow status bit if set.
415 */
416 aqm_reg_write(sc, qi->qUOStatRegAddr,
417 status & ~qi->qOflowStatBitMask);
418 qi->qWriteCount = qSize;
419 DPRINTFn(5, sc->sc_dev,
420 "%s(%u, 0x%x) Q full, overflow status cleared\n",
421 __func__, qId, entry);
422 return ENOSPC;
423 }
424 /*
425 * No overflow occured : someone is draining the queue
426 * and the current counter needs to be
427 * updated from the current number of entries in the queue
428 */
429
430 /* calculate number of words in q */
431 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
432 DPRINTFn(2, sc->sc_dev,
433 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
434 __func__, qId, entry, qPtrs);
435 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
436
437 if (qPtrs == 0) {
438 /*
439 * The queue may be full at the time of the
440 * snapshot. Next access will check
441 * the overflow status again.
442 */
443 qi->qWriteCount = qSize;
444 } else {
445 /* convert the number of words to a number of entries */
446 qi->qWriteCount = qPtrs & (qSize - 1);
447 }
448 }
449 }
450 return 0;
451}
452
453int
454ixpqmgr_qread(int qId, uint32_t *entry)
455{
456 struct ixpqmgr_softc *sc = ixpqmgr_sc;
457 struct qmgrInfo *qi = &sc->qinfo[qId];
458 bus_size_t off = qi->qAccRegAddr;
459
460 *entry = aqm_reg_read(sc, off);
461
462 /*
463 * Reset the current read count : next access to the read function
464 * will force a underflow status check.
465 */
466 qi->qReadCount = 0;
467
468 /* Check if underflow occurred on the read */
469 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
470 /* get the queue status */
471 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
472
473 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
474 aqm_reg_write(sc, qi->qUOStatRegAddr,
475 status &~ qi->qUflowStatBitMask);
476 return ENOSPC;
477 }
478 }
479 return 0;
480}
481
482int
483ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
484{
485 struct ixpqmgr_softc *sc = ixpqmgr_sc;
486 struct qmgrInfo *qi = &sc->qinfo[qId];
487 uint32_t entry;
488 bus_size_t off = qi->qAccRegAddr;
489
490 entry = aqm_reg_read(sc, off);
491 while (--n) {
492 if (entry == 0) {
493 /* if we read a NULL entry, stop. We have underflowed */
494 break;
495 }
496 *p++ = entry; /* store */
497 entry = aqm_reg_read(sc, off);
498 }
499 *p = entry;
500
501 /*
502 * Reset the current read count : next access to the read function
503 * will force a underflow status check.
504 */
505 qi->qReadCount = 0;
506
507 /* Check if underflow occurred on the read */
508 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
509 /* get the queue status */
510 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
511
512 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
513 aqm_reg_write(sc, qi->qUOStatRegAddr,
514 status &~ qi->qUflowStatBitMask);
515 return ENOSPC;
516 }
517 }
518 return 0;
519}
520
521uint32_t
522ixpqmgr_getqstatus(int qId)
523{
524#define QLOWSTATMASK \
525 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
526 struct ixpqmgr_softc *sc = ixpqmgr_sc;
527 const struct qmgrInfo *qi = &sc->qinfo[qId];
528 uint32_t status;
529
530 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
531 /* read the status of a queue in the range 0-31 */
532 status = aqm_reg_read(sc, qi->qStatRegAddr);
533
534 /* mask out the status bits relevant only to this queue */
535 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
536 } else { /* read status of a queue in the range 32-63 */
537 status = 0;
538 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
539 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
540 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
541 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
542 }
543 return status;
544#undef QLOWSTATMASK
545}
546
547uint32_t
548ixpqmgr_getqconfig(int qId)
549{
550 struct ixpqmgr_softc *sc = ixpqmgr_sc;
551
552 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
553}
554
555void
556ixpqmgr_dump(void)
557{
558 struct ixpqmgr_softc *sc = ixpqmgr_sc;
559 int i, a;
560
561 /* status registers */
562 printf("0x%04x: %08x %08x %08x %08x\n"
563 , 0x400
564 , aqm_reg_read(sc, 0x400)
565 , aqm_reg_read(sc, 0x400+4)
566 , aqm_reg_read(sc, 0x400+8)
567 , aqm_reg_read(sc, 0x400+12)
568 );
569 printf("0x%04x: %08x %08x %08x %08x\n"
570 , 0x410
571 , aqm_reg_read(sc, 0x410)
572 , aqm_reg_read(sc, 0x410+4)
573 , aqm_reg_read(sc, 0x410+8)
574 , aqm_reg_read(sc, 0x410+12)
575 );
576 printf("0x%04x: %08x %08x %08x %08x\n"
577 , 0x420
578 , aqm_reg_read(sc, 0x420)
579 , aqm_reg_read(sc, 0x420+4)
580 , aqm_reg_read(sc, 0x420+8)
581 , aqm_reg_read(sc, 0x420+12)
582 );
583 printf("0x%04x: %08x %08x %08x %08x\n"
584 , 0x430
585 , aqm_reg_read(sc, 0x430)
586 , aqm_reg_read(sc, 0x430+4)
587 , aqm_reg_read(sc, 0x430+8)
588 , aqm_reg_read(sc, 0x430+12)
589 );
590 /* q configuration registers */
591 for (a = 0x2000; a < 0x20ff; a += 32)
592 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
593 , a
594 , aqm_reg_read(sc, a)
595 , aqm_reg_read(sc, a+4)
596 , aqm_reg_read(sc, a+8)
597 , aqm_reg_read(sc, a+12)
598 , aqm_reg_read(sc, a+16)
599 , aqm_reg_read(sc, a+20)
600 , aqm_reg_read(sc, a+24)
601 , aqm_reg_read(sc, a+28)
602 );
603 /* allocated SRAM */
604 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
605 a = 0x2000 + i;
606 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
607 , a
608 , aqm_reg_read(sc, a)
609 , aqm_reg_read(sc, a+4)
610 , aqm_reg_read(sc, a+8)
611 , aqm_reg_read(sc, a+12)
612 , aqm_reg_read(sc, a+16)
613 , aqm_reg_read(sc, a+20)
614 , aqm_reg_read(sc, a+24)
615 , aqm_reg_read(sc, a+28)
616 );
617 }
618 for (i = 0; i < 16; i++) {
619 printf("Q[%2d] config 0x%08x status 0x%02x "
620 "Q[%2d] config 0x%08x status 0x%02x\n"
621 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
622 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
623 );
624 }
625}
626
627void
628ixpqmgr_notify_enable(int qId, int srcSel)
629{
630 struct ixpqmgr_softc *sc = ixpqmgr_sc;
631#if 0
632 /* Calculate the checkMask and checkValue for this q */
633 aqm_calc_statuscheck(sc, qId, srcSel);
634#endif
635 /* Set the interrupt source if this queue is in the range 0-31 */
636 if (qId < IX_QMGR_MIN_QUEUPP_QID)
637 aqm_srcsel_write(sc, qId, srcSel);
638
639 /* Enable the interrupt */
640 aqm_int_enable(sc, qId);
641}
642
643void
644ixpqmgr_notify_disable(int qId)
645{
646 struct ixpqmgr_softc *sc = ixpqmgr_sc;
647
648 aqm_int_disable(sc, qId);
649}
650
651/*
652 * Rebuild the priority table used by the dispatcher.
653 */
654static void
655ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
656{
657 int q, pri;
658 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
659 struct qmgrInfo *qi;
660
661 sc->lowPriorityTableFirstHalfMask = 0;
662 sc->uppPriorityTableFirstHalfMask = 0;
663
664 lowQuePriorityTableIndex = 0;
665 uppQuePriorityTableIndex = 32;
666 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
667 /* low priority q's */
668 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
669 qi = &sc->qinfo[q];
670 if (qi->priority == pri) {
671 /*
672 * Build the priority table bitmask which match the
673 * queues of the first half of the priority table.
674 */
675 if (lowQuePriorityTableIndex < 16) {
676 sc->lowPriorityTableFirstHalfMask |=
677 qi->intRegCheckMask;
678 }
679 sc->priorityTable[lowQuePriorityTableIndex++] = q;
680 }
681 }
682 /* high priority q's */
683 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
684 qi = &sc->qinfo[q];
685 if (qi->priority == pri) {
686 /*
687 * Build the priority table bitmask which match the
688 * queues of the first half of the priority table .
689 */
690 if (uppQuePriorityTableIndex < 48) {
691 sc->uppPriorityTableFirstHalfMask |=
692 qi->intRegCheckMask;
693 }
694 sc->priorityTable[uppQuePriorityTableIndex++] = q;
695 }
696 }
697 }
698 sc->rebuildTable = FALSE;
699}
700
701/*
702 * Count the number of leading zero bits in a word,
703 * and return the same value than the CLZ instruction.
704 * Note this is similar to the standard ffs function but
705 * it counts zero's from the MSB instead of the LSB.
706 *
707 * word (in) return value (out)
708 * 0x80000000 0
709 * 0x40000000 1
710 * ,,, ,,,
711 * 0x00000002 30
712 * 0x00000001 31
713 * 0x00000000 32
714 *
715 * The C version of this function is used as a replacement
716 * for system not providing the equivalent of the CLZ
717 * assembly language instruction.
718 *
719 * Note that this version is big-endian
720 */
721static unsigned int
722_lzcount(uint32_t word)
723{
724 unsigned int lzcount = 0;
725
726 if (word == 0)
727 return 32;
728 while ((word & 0x80000000) == 0) {
729 word <<= 1;
730 lzcount++;
731 }
732 return lzcount;
733}
734
735static void
736ixpqmgr_intr(void *arg)
737{
738 struct ixpqmgr_softc *sc = ixpqmgr_sc;
739 uint32_t intRegVal; /* Interrupt reg val */
740 struct qmgrInfo *qi;
741 int priorityTableIndex; /* Priority table index */
742 int qIndex; /* Current queue being processed */
743
744 /* Read the interrupt register */
745 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
746 /* Write back to clear interrupt */
747 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
748
749 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
750 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
751
752 /* No queue has interrupt register set */
753 if (intRegVal != 0) {
754 /* get the first queue Id from the interrupt register value */
755 qIndex = (32 - 1) - _lzcount(intRegVal);
756
757 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
758 __func__, intRegVal, qIndex);
759
760 /*
761 * Optimize for single callback case.
762 */
763 qi = &sc->qinfo[qIndex];
764 if (intRegVal == qi->intRegCheckMask) {
765 /*
766 * Only 1 queue event triggered a notification.
767 * Call the callback function for this queue
768 */
769 qi->cb(qIndex, qi->cbarg);
770 } else {
771 /*
772 * The event is triggered by more than 1 queue,
773 * the queue search will start from the beginning
774 * or the middle of the priority table.
775 *
776 * The search will end when all the bits of the interrupt
777 * register are cleared. There is no need to maintain
778 * a seperate value and test it at each iteration.
779 */
780 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
781 priorityTableIndex = 0;
782 } else {
783 priorityTableIndex = 16;
784 }
785 /*
786 * Iterate over the priority table until all the bits
787 * of the interrupt register are cleared.
788 */
789 do {
790 qIndex = sc->priorityTable[priorityTableIndex++];
791 qi = &sc->qinfo[qIndex];
792
793 /* If this queue caused this interrupt to be raised */
794 if (intRegVal & qi->intRegCheckMask) {
795 /* Call the callback function for this queue */
796 qi->cb(qIndex, qi->cbarg);
797 /* Clear the interrupt register bit */
798 intRegVal &= ~qi->intRegCheckMask;
799 }
800 } while (intRegVal);
801 }
802 }
803
804 /* Rebuild the priority table if needed */
805 if (sc->rebuildTable)
806 ixpqmgr_rebuild(sc);
807}
808
809#if 0
810/*
811 * Generate the parameters used to check if a Q's status matches
812 * the specified source select. We calculate which status word
813 * to check (statusWordOffset), the value to check the status
814 * against (statusCheckValue) and the mask (statusMask) to mask
815 * out all but the bits to check in the status word.
816 */
817static void
818aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
819{
820 struct qmgrInfo *qi = &qinfo[qId];
821 uint32_t shiftVal;
822
823 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
824 switch (srcSel) {
825 case IX_QMGR_Q_SOURCE_ID_E:
826 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
827 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
828 break;
829 case IX_QMGR_Q_SOURCE_ID_NE:
830 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
831 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
832 break;
833 case IX_QMGR_Q_SOURCE_ID_NF:
834 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
835 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
836 break;
837 case IX_QMGR_Q_SOURCE_ID_F:
838 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
839 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
840 break;
841 case IX_QMGR_Q_SOURCE_ID_NOT_E:
842 qi->statusCheckValue = 0;
843 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
844 break;
845 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
846 qi->statusCheckValue = 0;
847 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
848 break;
849 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
850 qi->statusCheckValue = 0;
851 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
852 break;
853 case IX_QMGR_Q_SOURCE_ID_NOT_F:
854 qi->statusCheckValue = 0;
855 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
856 break;
857 default:
858 /* Should never hit */
859 IX_OSAL_ASSERT(0);
860 break;
861 }
862
863 /* One nibble of status per queue so need to shift the
864 * check value and mask out to the correct position.
865 */
866 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
867 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
868
869 /* Calculate the which status word to check from the qId,
870 * 8 Qs status per word
871 */
872 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
873
874 qi->statusCheckValue <<= shiftVal;
875 qi->statusMask <<= shiftVal;
876 } else {
877 /* One status word */
878 qi->statusWordOffset = 0;
879 /* Single bits per queue and int source bit hardwired NE,
880 * Qs start at 32.
881 */
882 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
883 qi->statusCheckValue = qi->statusMask;
884 }
885}
886#endif
887
888static void
889aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
890{
891 bus_size_t reg;
892 uint32_t v;
893
894 if (qId < IX_QMGR_MIN_QUEUPP_QID)
895 reg = IX_QMGR_QUEIEREG0_OFFSET;
896 else
897 reg = IX_QMGR_QUEIEREG1_OFFSET;
898 v = aqm_reg_read(sc, reg);
899 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
900
901 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
902 __func__, qId, reg, v, aqm_reg_read(sc, reg));
903}
904
905static void
906aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
907{
908 bus_size_t reg;
909 uint32_t v;
910
911 if (qId < IX_QMGR_MIN_QUEUPP_QID)
912 reg = IX_QMGR_QUEIEREG0_OFFSET;
913 else
914 reg = IX_QMGR_QUEIEREG1_OFFSET;
915 v = aqm_reg_read(sc, reg);
916 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
917
918 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
919 __func__, qId, reg, v, aqm_reg_read(sc, reg));
920}
921
922static unsigned
923log2(unsigned n)
924{
925 unsigned count;
926 /*
927 * N.B. this function will return 0 if supplied 0.
928 */
929 for (count = 0; n/2; count++)
930 n /= 2;
931 return count;
932}
933
934static __inline unsigned
935toAqmEntrySize(int entrySize)
936{
937 /* entrySize 1("00"),2("01"),4("10") */
938 return log2(entrySize);
939}
940
941static __inline unsigned
942toAqmBufferSize(unsigned bufferSizeInWords)
943{
944 /* bufferSize 16("00"),32("01),64("10"),128("11") */
945 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
946}
947
948static __inline unsigned
949toAqmWatermark(int watermark)
950{
951 /*
952 * Watermarks 0("000"),1("001"),2("010"),4("011"),
953 * 8("100"),16("101"),32("110"),64("111")
954 */
955 return log2(2 * watermark);
956}
957
958static void
959aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
960{
961 const struct qmgrInfo *qi = &sc->qinfo[qId];
962 uint32_t qCfg;
963 uint32_t baseAddress;
964
965 /* Build config register */
966 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
967 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
968 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
969 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
970
971 /* baseAddress, calculated relative to start address */
972 baseAddress = sc->aqmFreeSramAddress;
973
974 /* base address must be word-aligned */
975 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0,
976 ("address not word-aligned"));
977
978 /* Now convert to a 16 word pointer as required by QUECONFIG register */
979 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
980 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
981
982 /* set watermarks */
983 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
984 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
985
986 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
987 __func__, qId, ne, nf,
988 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
989 qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
990
991 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
992}
993
994static void
995aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
996{
997 bus_size_t off;
998 uint32_t v;
999
1000 /*
1001 * Calculate the register offset; multiple queues split across registers
1002 */
1003 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1004 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1005
1006 v = aqm_reg_read(sc, off);
1007 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1008 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1009 v |= 0x7;
1010 } else {
1011 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1012 uint32_t mask;
1013 int qshift;
1014
1015 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1016 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
1017
1018 /* merge sourceId */
1019 v = (v &~ mask) | ((sourceId << qshift) & mask);
1020 }
1021
1022 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1023 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1024 aqm_reg_write(sc, off, v);
1025}
1026
1027/*
1028 * Reset AQM registers to default values.
1029 */
1030static void
1031aqm_reset(struct ixpqmgr_softc *sc)
1032{
1033 int i;
1034
1035 /* Reset queues 0..31 status registers 0..3 */
1036 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1037 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1038 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1039 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1040 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1041 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1042 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1043 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1044
1045 /* Reset underflow/overflow status registers 0..1 */
1046 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1047 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1048 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1049 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1050
1051 /* Reset queues 32..63 nearly empty status registers */
1052 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1053 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1054
1055 /* Reset queues 32..63 full status registers */
1056 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1057 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1058
1059 /* Reset int0 status flag source select registers 0..3 */
1060 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1061 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1062 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1063 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1064 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1065 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1066 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1067 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1068
1069 /* Reset queue interrupt enable register 0..1 */
1070 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1071 IX_QMGR_QUEIEREG_RESET_VALUE);
1072 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1073 IX_QMGR_QUEIEREG_RESET_VALUE);
1074
1075 /* Reset queue interrupt register 0..1 */
1076 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1077 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1078
1079 /* Reset queue configuration words 0..63 */
1080 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1081 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1082 IX_QMGR_QUECONFIG_RESET_VALUE);
1083
1084 /* XXX zero SRAM to simplify debugging */
1085 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1086 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1087 aqm_reg_write(sc, i, 0);
1088}
1089
1090static device_method_t ixpqmgr_methods[] = {
1091 DEVMETHOD(device_probe, ixpqmgr_probe),
1092 DEVMETHOD(device_attach, ixpqmgr_attach),
1093 DEVMETHOD(device_detach, ixpqmgr_detach),
1094
1095 { 0, 0 }
1096};
1097
1098static driver_t ixpqmgr_driver = {
1099 "ixpqmgr",
1100 ixpqmgr_methods,
1101 sizeof(struct ixpqmgr_softc),
1102};
1103static devclass_t ixpqmgr_devclass;
1104
1105DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);