Deleted Added
full compact
if_fatm.c (147721) if_fatm.c (148887)
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 147721 2005-07-01 10:45:02Z harti $");
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 148887 2005-08-09 10:20:02Z rwatson $");
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_atm.h>
62#include <net/route.h>
63#ifdef ENABLE_BPF
64#include <net/bpf.h>
65#endif
66#ifdef INET
67#include <netinet/in.h>
68#include <netinet/if_atm.h>
69#endif
70
71#include <machine/bus.h>
72#include <machine/resource.h>
73#include <sys/bus.h>
74#include <sys/rman.h>
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include <dev/utopia/utopia.h>
79
80#include <dev/fatm/if_fatmreg.h>
81#include <dev/fatm/if_fatmvar.h>
82
83#include <dev/fatm/firmware.h>
84
85devclass_t fatm_devclass;
86
87static const struct {
88 uint16_t vid;
89 uint16_t did;
90 const char *name;
91} fatm_devs[] = {
92 { 0x1127, 0x300,
93 "FORE PCA200E" },
94 { 0, 0, NULL }
95};
96
97static const struct rate {
98 uint32_t ratio;
99 uint32_t cell_rate;
100} rate_table[] = {
101#include <dev/fatm/if_fatm_rate.h>
102};
103#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
104
105SYSCTL_DECL(_hw_atm);
106
107MODULE_DEPEND(fatm, utopia, 1, 1, 1);
108
109static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
110static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
111
112static const struct utopia_methods fatm_utopia_methods = {
113 fatm_utopia_readregs,
114 fatm_utopia_writereg
115};
116
117#define VC_OK(SC, VPI, VCI) \
118 (((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \
119 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
120
121static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
122
123/*
124 * Probing is easy: step trough the list of known vendor and device
125 * ids and compare. If one is found - it's our.
126 */
127static int
128fatm_probe(device_t dev)
129{
130 int i;
131
132 for (i = 0; fatm_devs[i].name; i++)
133 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
134 pci_get_device(dev) == fatm_devs[i].did) {
135 device_set_desc(dev, fatm_devs[i].name);
136 return (BUS_PROBE_DEFAULT);
137 }
138 return (ENXIO);
139}
140
141/*
142 * Function called at completion of a SUNI writeregs/readregs command.
143 * This is called from the interrupt handler while holding the softc lock.
144 * We use the queue entry as the randevouze point.
145 */
146static void
147fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
148{
149
150 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
151 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
152 sc->istats.suni_reg_errors++;
153 q->error = EIO;
154 }
155 wakeup(q);
156}
157
158/*
159 * Write a SUNI register. The bits that are 1 in mask are written from val
160 * into register reg. We wait for the command to complete by sleeping on
161 * the register memory.
162 *
163 * We assume, that we already hold the softc mutex.
164 */
165static int
166fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
167{
168 int error;
169 struct cmdqueue *q;
170 struct fatm_softc *sc;
171
172 sc = ifatm->ifp->if_softc;
173 FATM_CHECKLOCK(sc);
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_atm.h>
62#include <net/route.h>
63#ifdef ENABLE_BPF
64#include <net/bpf.h>
65#endif
66#ifdef INET
67#include <netinet/in.h>
68#include <netinet/if_atm.h>
69#endif
70
71#include <machine/bus.h>
72#include <machine/resource.h>
73#include <sys/bus.h>
74#include <sys/rman.h>
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include <dev/utopia/utopia.h>
79
80#include <dev/fatm/if_fatmreg.h>
81#include <dev/fatm/if_fatmvar.h>
82
83#include <dev/fatm/firmware.h>
84
85devclass_t fatm_devclass;
86
87static const struct {
88 uint16_t vid;
89 uint16_t did;
90 const char *name;
91} fatm_devs[] = {
92 { 0x1127, 0x300,
93 "FORE PCA200E" },
94 { 0, 0, NULL }
95};
96
97static const struct rate {
98 uint32_t ratio;
99 uint32_t cell_rate;
100} rate_table[] = {
101#include <dev/fatm/if_fatm_rate.h>
102};
103#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
104
105SYSCTL_DECL(_hw_atm);
106
107MODULE_DEPEND(fatm, utopia, 1, 1, 1);
108
109static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
110static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
111
112static const struct utopia_methods fatm_utopia_methods = {
113 fatm_utopia_readregs,
114 fatm_utopia_writereg
115};
116
117#define VC_OK(SC, VPI, VCI) \
118 (((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \
119 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
120
121static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
122
123/*
124 * Probing is easy: step trough the list of known vendor and device
125 * ids and compare. If one is found - it's our.
126 */
127static int
128fatm_probe(device_t dev)
129{
130 int i;
131
132 for (i = 0; fatm_devs[i].name; i++)
133 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
134 pci_get_device(dev) == fatm_devs[i].did) {
135 device_set_desc(dev, fatm_devs[i].name);
136 return (BUS_PROBE_DEFAULT);
137 }
138 return (ENXIO);
139}
140
141/*
142 * Function called at completion of a SUNI writeregs/readregs command.
143 * This is called from the interrupt handler while holding the softc lock.
144 * We use the queue entry as the randevouze point.
145 */
146static void
147fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
148{
149
150 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
151 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
152 sc->istats.suni_reg_errors++;
153 q->error = EIO;
154 }
155 wakeup(q);
156}
157
158/*
159 * Write a SUNI register. The bits that are 1 in mask are written from val
160 * into register reg. We wait for the command to complete by sleeping on
161 * the register memory.
162 *
163 * We assume, that we already hold the softc mutex.
164 */
165static int
166fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
167{
168 int error;
169 struct cmdqueue *q;
170 struct fatm_softc *sc;
171
172 sc = ifatm->ifp->if_softc;
173 FATM_CHECKLOCK(sc);
174 if (!(ifatm->ifp->if_flags & IFF_RUNNING))
174 if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING))
175 return (EIO);
176
177 /* get queue element and fill it */
178 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
179
180 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
181 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
182 sc->istats.cmd_queue_full++;
183 return (EIO);
184 }
185 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
186
187 q->error = 0;
188 q->cb = fatm_utopia_writeregs_complete;
189 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
190 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
191
192 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
193 BARRIER_W(sc);
194 WRITE4(sc, q->q.card + FATMOC_OP,
195 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
196 BARRIER_W(sc);
197
198 /*
199 * Wait for the command to complete
200 */
201 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
202
203 switch(error) {
204
205 case EWOULDBLOCK:
206 error = EIO;
207 break;
208
209 case ERESTART:
210 error = EINTR;
211 break;
212
213 case 0:
214 error = q->error;
215 break;
216 }
217
218 return (error);
219}
220
221/*
222 * Function called at completion of a SUNI readregs command.
223 * This is called from the interrupt handler while holding the softc lock.
224 * We use reg_mem as the randevouze point.
225 */
226static void
227fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
228{
229
230 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
231 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
232 sc->istats.suni_reg_errors++;
233 q->error = EIO;
234 }
235 wakeup(&sc->reg_mem);
236}
237
238/*
239 * Read SUNI registers
240 *
241 * We use a preallocated buffer to read the registers. Therefor we need
242 * to protect against multiple threads trying to read registers. We do this
243 * with a condition variable and a flag. We wait for the command to complete by sleeping on
244 * the register memory.
245 *
246 * We assume, that we already hold the softc mutex.
247 */
248static int
249fatm_utopia_readregs_internal(struct fatm_softc *sc)
250{
251 int error, i;
252 uint32_t *ptr;
253 struct cmdqueue *q;
254
255 /* get the buffer */
256 for (;;) {
175 return (EIO);
176
177 /* get queue element and fill it */
178 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
179
180 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
181 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
182 sc->istats.cmd_queue_full++;
183 return (EIO);
184 }
185 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
186
187 q->error = 0;
188 q->cb = fatm_utopia_writeregs_complete;
189 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
190 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
191
192 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
193 BARRIER_W(sc);
194 WRITE4(sc, q->q.card + FATMOC_OP,
195 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
196 BARRIER_W(sc);
197
198 /*
199 * Wait for the command to complete
200 */
201 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
202
203 switch(error) {
204
205 case EWOULDBLOCK:
206 error = EIO;
207 break;
208
209 case ERESTART:
210 error = EINTR;
211 break;
212
213 case 0:
214 error = q->error;
215 break;
216 }
217
218 return (error);
219}
220
221/*
222 * Function called at completion of a SUNI readregs command.
223 * This is called from the interrupt handler while holding the softc lock.
224 * We use reg_mem as the randevouze point.
225 */
226static void
227fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
228{
229
230 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
231 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
232 sc->istats.suni_reg_errors++;
233 q->error = EIO;
234 }
235 wakeup(&sc->reg_mem);
236}
237
238/*
239 * Read SUNI registers
240 *
241 * We use a preallocated buffer to read the registers. Therefor we need
242 * to protect against multiple threads trying to read registers. We do this
243 * with a condition variable and a flag. We wait for the command to complete by sleeping on
244 * the register memory.
245 *
246 * We assume, that we already hold the softc mutex.
247 */
248static int
249fatm_utopia_readregs_internal(struct fatm_softc *sc)
250{
251 int error, i;
252 uint32_t *ptr;
253 struct cmdqueue *q;
254
255 /* get the buffer */
256 for (;;) {
257 if (!(sc->ifp->if_flags & IFF_RUNNING))
257 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
258 return (EIO);
259 if (!(sc->flags & FATM_REGS_INUSE))
260 break;
261 cv_wait(&sc->cv_regs, &sc->mtx);
262 }
263 sc->flags |= FATM_REGS_INUSE;
264
265 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
266
267 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
268 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
269 sc->istats.cmd_queue_full++;
270 return (EIO);
271 }
272 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
273
274 q->error = 0;
275 q->cb = fatm_utopia_readregs_complete;
276 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
277 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
278
279 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
280
281 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
282 BARRIER_W(sc);
283 WRITE4(sc, q->q.card + FATMOC_OP,
284 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
285 BARRIER_W(sc);
286
287 /*
288 * Wait for the command to complete
289 */
290 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
291 "fatm_getreg", hz);
292
293 switch(error) {
294
295 case EWOULDBLOCK:
296 error = EIO;
297 break;
298
299 case ERESTART:
300 error = EINTR;
301 break;
302
303 case 0:
304 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
305 BUS_DMASYNC_POSTREAD);
306 error = q->error;
307 break;
308 }
309
310 if (error != 0) {
311 /* declare buffer to be free */
312 sc->flags &= ~FATM_REGS_INUSE;
313 cv_signal(&sc->cv_regs);
314 return (error);
315 }
316
317 /* swap if needed */
318 ptr = (uint32_t *)sc->reg_mem.mem;
319 for (i = 0; i < FATM_NREGS; i++)
320 ptr[i] = le32toh(ptr[i]) & 0xff;
321
322 return (0);
323}
324
325/*
326 * Read SUNI registers for the SUNI module.
327 *
328 * We assume, that we already hold the mutex.
329 */
330static int
331fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
332{
333 int err;
334 int i;
335 struct fatm_softc *sc;
336
337 if (reg >= FATM_NREGS)
338 return (EINVAL);
339 if (reg + *np > FATM_NREGS)
340 *np = FATM_NREGS - reg;
341 sc = ifatm->ifp->if_softc;
342 FATM_CHECKLOCK(sc);
343
344 err = fatm_utopia_readregs_internal(sc);
345 if (err != 0)
346 return (err);
347
348 for (i = 0; i < *np; i++)
349 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
350
351 /* declare buffer to be free */
352 sc->flags &= ~FATM_REGS_INUSE;
353 cv_signal(&sc->cv_regs);
354
355 return (0);
356}
357
358/*
359 * Check whether the hard is beating. We remember the last heart beat and
360 * compare it to the current one. If it appears stuck for 10 times, we have
361 * a problem.
362 *
363 * Assume we hold the lock.
364 */
365static void
366fatm_check_heartbeat(struct fatm_softc *sc)
367{
368 uint32_t h;
369
370 FATM_CHECKLOCK(sc);
371
372 h = READ4(sc, FATMO_HEARTBEAT);
373 DBG(sc, BEAT, ("heartbeat %08x", h));
374
375 if (sc->stop_cnt == 10)
376 return;
377
378 if (h == sc->heartbeat) {
379 if (++sc->stop_cnt == 10) {
380 log(LOG_ERR, "i960 stopped???\n");
381 WRITE4(sc, FATMO_HIMR, 1);
382 }
383 return;
384 }
385
386 sc->stop_cnt = 0;
387 sc->heartbeat = h;
388}
389
390/*
391 * Ensure that the heart is still beating.
392 */
393static void
394fatm_watchdog(struct ifnet *ifp)
395{
396 struct fatm_softc *sc = ifp->if_softc;
397
398 FATM_LOCK(sc);
258 return (EIO);
259 if (!(sc->flags & FATM_REGS_INUSE))
260 break;
261 cv_wait(&sc->cv_regs, &sc->mtx);
262 }
263 sc->flags |= FATM_REGS_INUSE;
264
265 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
266
267 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
268 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
269 sc->istats.cmd_queue_full++;
270 return (EIO);
271 }
272 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
273
274 q->error = 0;
275 q->cb = fatm_utopia_readregs_complete;
276 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
277 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
278
279 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
280
281 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
282 BARRIER_W(sc);
283 WRITE4(sc, q->q.card + FATMOC_OP,
284 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
285 BARRIER_W(sc);
286
287 /*
288 * Wait for the command to complete
289 */
290 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
291 "fatm_getreg", hz);
292
293 switch(error) {
294
295 case EWOULDBLOCK:
296 error = EIO;
297 break;
298
299 case ERESTART:
300 error = EINTR;
301 break;
302
303 case 0:
304 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
305 BUS_DMASYNC_POSTREAD);
306 error = q->error;
307 break;
308 }
309
310 if (error != 0) {
311 /* declare buffer to be free */
312 sc->flags &= ~FATM_REGS_INUSE;
313 cv_signal(&sc->cv_regs);
314 return (error);
315 }
316
317 /* swap if needed */
318 ptr = (uint32_t *)sc->reg_mem.mem;
319 for (i = 0; i < FATM_NREGS; i++)
320 ptr[i] = le32toh(ptr[i]) & 0xff;
321
322 return (0);
323}
324
325/*
326 * Read SUNI registers for the SUNI module.
327 *
328 * We assume, that we already hold the mutex.
329 */
330static int
331fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
332{
333 int err;
334 int i;
335 struct fatm_softc *sc;
336
337 if (reg >= FATM_NREGS)
338 return (EINVAL);
339 if (reg + *np > FATM_NREGS)
340 *np = FATM_NREGS - reg;
341 sc = ifatm->ifp->if_softc;
342 FATM_CHECKLOCK(sc);
343
344 err = fatm_utopia_readregs_internal(sc);
345 if (err != 0)
346 return (err);
347
348 for (i = 0; i < *np; i++)
349 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
350
351 /* declare buffer to be free */
352 sc->flags &= ~FATM_REGS_INUSE;
353 cv_signal(&sc->cv_regs);
354
355 return (0);
356}
357
358/*
359 * Check whether the hard is beating. We remember the last heart beat and
360 * compare it to the current one. If it appears stuck for 10 times, we have
361 * a problem.
362 *
363 * Assume we hold the lock.
364 */
365static void
366fatm_check_heartbeat(struct fatm_softc *sc)
367{
368 uint32_t h;
369
370 FATM_CHECKLOCK(sc);
371
372 h = READ4(sc, FATMO_HEARTBEAT);
373 DBG(sc, BEAT, ("heartbeat %08x", h));
374
375 if (sc->stop_cnt == 10)
376 return;
377
378 if (h == sc->heartbeat) {
379 if (++sc->stop_cnt == 10) {
380 log(LOG_ERR, "i960 stopped???\n");
381 WRITE4(sc, FATMO_HIMR, 1);
382 }
383 return;
384 }
385
386 sc->stop_cnt = 0;
387 sc->heartbeat = h;
388}
389
390/*
391 * Ensure that the heart is still beating.
392 */
393static void
394fatm_watchdog(struct ifnet *ifp)
395{
396 struct fatm_softc *sc = ifp->if_softc;
397
398 FATM_LOCK(sc);
399 if (ifp->if_flags & IFF_RUNNING) {
399 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
400 fatm_check_heartbeat(sc);
401 ifp->if_timer = 5;
402 }
403 FATM_UNLOCK(sc);
404}
405
406/*
407 * Hard reset the i960 on the board. This is done by initializing registers,
408 * clearing interrupts and waiting for the selftest to finish. Not sure,
409 * whether all these barriers are actually needed.
410 *
411 * Assumes that we hold the lock.
412 */
413static int
414fatm_reset(struct fatm_softc *sc)
415{
416 int w;
417 uint32_t val;
418
419 FATM_CHECKLOCK(sc);
420
421 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
422 BARRIER_W(sc);
423
424 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
425 BARRIER_W(sc);
426
427 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
428 BARRIER_W(sc);
429
430 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
431 BARRIER_W(sc);
432
433 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
434 BARRIER_W(sc);
435
436 DELAY(1000);
437
438 WRITE1(sc, FATMO_HCR, 0);
439 BARRIER_RW(sc);
440
441 DELAY(1000);
442
443 for (w = 100; w; w--) {
444 BARRIER_R(sc);
445 val = READ4(sc, FATMO_BOOT_STATUS);
446 switch (val) {
447 case SELF_TEST_OK:
448 return (0);
449 case SELF_TEST_FAIL:
450 return (EIO);
451 }
452 DELAY(1000);
453 }
454 return (EIO);
455}
456
457/*
458 * Stop the card. Must be called WITH the lock held
459 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
460 */
461static void
462fatm_stop(struct fatm_softc *sc)
463{
464 int i;
465 struct cmdqueue *q;
466 struct rbuf *rb;
467 struct txqueue *tx;
468 uint32_t stat;
469
470 FATM_CHECKLOCK(sc);
471
472 /* Stop the board */
473 utopia_stop(&sc->utopia);
474 (void)fatm_reset(sc);
475
476 /* stop watchdog */
477 sc->ifp->if_timer = 0;
478
400 fatm_check_heartbeat(sc);
401 ifp->if_timer = 5;
402 }
403 FATM_UNLOCK(sc);
404}
405
406/*
407 * Hard reset the i960 on the board. This is done by initializing registers,
408 * clearing interrupts and waiting for the selftest to finish. Not sure,
409 * whether all these barriers are actually needed.
410 *
411 * Assumes that we hold the lock.
412 */
413static int
414fatm_reset(struct fatm_softc *sc)
415{
416 int w;
417 uint32_t val;
418
419 FATM_CHECKLOCK(sc);
420
421 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
422 BARRIER_W(sc);
423
424 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
425 BARRIER_W(sc);
426
427 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
428 BARRIER_W(sc);
429
430 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
431 BARRIER_W(sc);
432
433 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
434 BARRIER_W(sc);
435
436 DELAY(1000);
437
438 WRITE1(sc, FATMO_HCR, 0);
439 BARRIER_RW(sc);
440
441 DELAY(1000);
442
443 for (w = 100; w; w--) {
444 BARRIER_R(sc);
445 val = READ4(sc, FATMO_BOOT_STATUS);
446 switch (val) {
447 case SELF_TEST_OK:
448 return (0);
449 case SELF_TEST_FAIL:
450 return (EIO);
451 }
452 DELAY(1000);
453 }
454 return (EIO);
455}
456
457/*
458 * Stop the card. Must be called WITH the lock held
459 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
460 */
461static void
462fatm_stop(struct fatm_softc *sc)
463{
464 int i;
465 struct cmdqueue *q;
466 struct rbuf *rb;
467 struct txqueue *tx;
468 uint32_t stat;
469
470 FATM_CHECKLOCK(sc);
471
472 /* Stop the board */
473 utopia_stop(&sc->utopia);
474 (void)fatm_reset(sc);
475
476 /* stop watchdog */
477 sc->ifp->if_timer = 0;
478
479 if (sc->ifp->if_flags & IFF_RUNNING) {
480 sc->ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
479 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
480 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
481 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
482 sc->utopia.carrier == UTP_CARR_OK);
483
484 /*
485 * Collect transmit mbufs, partial receive mbufs and
486 * supplied mbufs
487 */
488 for (i = 0; i < FATM_TX_QLEN; i++) {
489 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
490 if (tx->m) {
491 bus_dmamap_unload(sc->tx_tag, tx->map);
492 m_freem(tx->m);
493 tx->m = NULL;
494 }
495 }
496
497 /* Collect supplied mbufs */
498 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
499 LIST_REMOVE(rb, link);
500 bus_dmamap_unload(sc->rbuf_tag, rb->map);
501 m_free(rb->m);
502 rb->m = NULL;
503 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
504 }
505
506 /* Unwait any waiters */
507 wakeup(&sc->sadi_mem);
508
509 /* wakeup all threads waiting for STAT or REG buffers */
510 cv_broadcast(&sc->cv_stat);
511 cv_broadcast(&sc->cv_regs);
512
513 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
514
515 /* wakeup all threads waiting on commands */
516 for (i = 0; i < FATM_CMD_QLEN; i++) {
517 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
518
519 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
520 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
521 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
522 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
523 wakeup(q);
524 }
525 }
526 utopia_reset_media(&sc->utopia);
527 }
528 sc->small_cnt = sc->large_cnt = 0;
529
530 /* Reset vcc info */
531 if (sc->vccs != NULL) {
532 sc->open_vccs = 0;
533 for (i = 0; i < FORE_MAX_VCC + 1; i++) {
534 if (sc->vccs[i] != NULL) {
535 if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
536 FATM_VCC_TRY_OPEN)) == 0) {
537 uma_zfree(sc->vcc_zone, sc->vccs[i]);
538 sc->vccs[i] = NULL;
539 } else {
540 sc->vccs[i]->vflags = 0;
541 sc->open_vccs++;
542 }
543 }
544 }
545 }
546
547}
548
549/*
550 * Load the firmware into the board and save the entry point.
551 */
552static uint32_t
553firmware_load(struct fatm_softc *sc)
554{
555 struct firmware *fw = (struct firmware *)firmware;
556
557 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
558 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
559 sizeof(firmware) / sizeof(firmware[0]));
560 BARRIER_RW(sc);
561
562 return (fw->entry);
563}
564
565/*
566 * Read a character from the virtual UART. The availability of a character
567 * is signaled by a non-null value of the 32 bit register. The eating of
568 * the character by us is signalled to the card by setting that register
569 * to zero.
570 */
571static int
572rx_getc(struct fatm_softc *sc)
573{
574 int w = 50;
575 int c;
576
577 while (w--) {
578 c = READ4(sc, FATMO_UART_TO_HOST);
579 BARRIER_RW(sc);
580 if (c != 0) {
581 WRITE4(sc, FATMO_UART_TO_HOST, 0);
582 DBGC(sc, UART, ("%c", c & 0xff));
583 return (c & 0xff);
584 }
585 DELAY(1000);
586 }
587 return (-1);
588}
589
590/*
591 * Eat up characters from the board and stuff them in the bit-bucket.
592 */
593static void
594rx_flush(struct fatm_softc *sc)
595{
596 int w = 10000;
597
598 while (w-- && rx_getc(sc) >= 0)
599 ;
600}
601
602/*
603 * Write a character to the card. The UART is available if the register
604 * is zero.
605 */
606static int
607tx_putc(struct fatm_softc *sc, u_char c)
608{
609 int w = 10;
610 int c1;
611
612 while (w--) {
613 c1 = READ4(sc, FATMO_UART_TO_960);
614 BARRIER_RW(sc);
615 if (c1 == 0) {
616 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
617 DBGC(sc, UART, ("%c", c & 0xff));
618 return (0);
619 }
620 DELAY(1000);
621 }
622 return (-1);
623}
624
625/*
626 * Start the firmware. This is doing by issuing a 'go' command with
627 * the hex entry address of the firmware. Then we wait for the self-test to
628 * succeed.
629 */
630static int
631fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
632{
633 static char hex[] = "0123456789abcdef";
634 u_int w, val;
635
636 DBG(sc, INIT, ("starting"));
637 rx_flush(sc);
638 tx_putc(sc, '\r');
639 DELAY(1000);
640
641 rx_flush(sc);
642
643 tx_putc(sc, 'g');
644 (void)rx_getc(sc);
645 tx_putc(sc, 'o');
646 (void)rx_getc(sc);
647 tx_putc(sc, ' ');
648 (void)rx_getc(sc);
649
650 tx_putc(sc, hex[(start >> 12) & 0xf]);
651 (void)rx_getc(sc);
652 tx_putc(sc, hex[(start >> 8) & 0xf]);
653 (void)rx_getc(sc);
654 tx_putc(sc, hex[(start >> 4) & 0xf]);
655 (void)rx_getc(sc);
656 tx_putc(sc, hex[(start >> 0) & 0xf]);
657 (void)rx_getc(sc);
658
659 tx_putc(sc, '\r');
660 rx_flush(sc);
661
662 for (w = 100; w; w--) {
663 BARRIER_R(sc);
664 val = READ4(sc, FATMO_BOOT_STATUS);
665 switch (val) {
666 case CP_RUNNING:
667 return (0);
668 case SELF_TEST_FAIL:
669 return (EIO);
670 }
671 DELAY(1000);
672 }
673 return (EIO);
674}
675
676/*
677 * Initialize one card and host queue.
678 */
679static void
680init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
681 size_t qel_size, size_t desc_size, cardoff_t off,
682 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
683{
684 struct fqelem *el = queue->chunk;
685
686 while (qlen--) {
687 el->card = off;
688 off += 8; /* size of card entry */
689
690 el->statp = (uint32_t *)(*statpp);
691 (*statpp) += sizeof(uint32_t);
692 H_SETSTAT(el->statp, FATM_STAT_FREE);
693 H_SYNCSTAT_PREWRITE(sc, el->statp);
694
695 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
696 (*cardstat) += sizeof(uint32_t);
697
698 el->ioblk = descp;
699 descp += desc_size;
700 el->card_ioblk = carddesc;
701 carddesc += desc_size;
702
703 el = (struct fqelem *)((u_char *)el + qel_size);
704 }
705 queue->tail = queue->head = 0;
706}
707
708/*
709 * Issue the initialize operation to the card, wait for completion and
710 * initialize the on-board and host queue structures with offsets and
711 * addresses.
712 */
713static int
714fatm_init_cmd(struct fatm_softc *sc)
715{
716 int w, c;
717 u_char *statp;
718 uint32_t card_stat;
719 u_int cnt;
720 struct fqelem *el;
721 cardoff_t off;
722
723 DBG(sc, INIT, ("command"));
724 WRITE4(sc, FATMO_ISTAT, 0);
725 WRITE4(sc, FATMO_IMASK, 1);
726 WRITE4(sc, FATMO_HLOGGER, 0);
727
728 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
729 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
730 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
731 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
732 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
733 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
734 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
735
736 /*
737 * initialize buffer descriptors
738 */
739 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
740 SMALL_SUPPLY_QLEN);
741 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
742 SMALL_BUFFER_LEN);
743 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
744 SMALL_POOL_SIZE);
745 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
746 SMALL_SUPPLY_BLKSIZE);
747
748 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
749 LARGE_SUPPLY_QLEN);
750 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
751 LARGE_BUFFER_LEN);
752 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
753 LARGE_POOL_SIZE);
754 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
755 LARGE_SUPPLY_BLKSIZE);
756
757 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
758 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
759 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
760 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
761
762 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
763 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
764 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
765 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
766
767 /*
768 * Start the command
769 */
770 BARRIER_W(sc);
771 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
772 BARRIER_W(sc);
773 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
774 BARRIER_W(sc);
775
776 /*
777 * Busy wait for completion
778 */
779 w = 100;
780 while (w--) {
781 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
782 BARRIER_R(sc);
783 if (c & FATM_STAT_COMPLETE)
784 break;
785 DELAY(1000);
786 }
787
788 if (c & FATM_STAT_ERROR)
789 return (EIO);
790
791 /*
792 * Initialize the queues
793 */
794 statp = sc->stat_mem.mem;
795 card_stat = sc->stat_mem.paddr;
796
797 /*
798 * Command queue. This is special in that it's on the card.
799 */
800 el = sc->cmdqueue.chunk;
801 off = READ4(sc, FATMO_COMMAND_QUEUE);
802 DBG(sc, INIT, ("cmd queue=%x", off));
803 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
804 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
805
806 el->card = off;
807 off += 32; /* size of card structure */
808
809 el->statp = (uint32_t *)statp;
810 statp += sizeof(uint32_t);
811 H_SETSTAT(el->statp, FATM_STAT_FREE);
812 H_SYNCSTAT_PREWRITE(sc, el->statp);
813
814 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
815 card_stat += sizeof(uint32_t);
816 }
817 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
818
819 /*
820 * Now the other queues. These are in memory
821 */
822 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
823 sizeof(struct txqueue), TPD_SIZE,
824 READ4(sc, FATMO_TRANSMIT_QUEUE),
825 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
826
827 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
828 sizeof(struct rxqueue), RPD_SIZE,
829 READ4(sc, FATMO_RECEIVE_QUEUE),
830 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
831
832 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
833 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
834 READ4(sc, FATMO_SMALL_B1_QUEUE),
835 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
836
837 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
838 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
839 READ4(sc, FATMO_LARGE_B1_QUEUE),
840 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
841
842 sc->txcnt = 0;
843
844 return (0);
845}
846
847/*
848 * Read PROM. Called only from attach code. Here we spin because the interrupt
849 * handler is not yet set up.
850 */
851static int
852fatm_getprom(struct fatm_softc *sc)
853{
854 int i;
855 struct prom *prom;
856 struct cmdqueue *q;
857
858 DBG(sc, INIT, ("reading prom"));
859 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
860 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
861
862 q->error = 0;
863 q->cb = NULL;;
864 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
865 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
866
867 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
868 BUS_DMASYNC_PREREAD);
869
870 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
871 BARRIER_W(sc);
872 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
873 BARRIER_W(sc);
874
875 for (i = 0; i < 1000; i++) {
876 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
877 if (H_GETSTAT(q->q.statp) &
878 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
879 break;
880 DELAY(1000);
881 }
882 if (i == 1000) {
883 if_printf(sc->ifp, "getprom timeout\n");
884 return (EIO);
885 }
886 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
887 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
888 if_printf(sc->ifp, "getprom error\n");
889 return (EIO);
890 }
891 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
892 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
893 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
894
895 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
896 BUS_DMASYNC_POSTREAD);
897
898
899#ifdef notdef
900 {
901 u_int i;
902
903 printf("PROM: ");
904 u_char *ptr = (u_char *)sc->prom_mem.mem;
905 for (i = 0; i < sizeof(struct prom); i++)
906 printf("%02x ", *ptr++);
907 printf("\n");
908 }
909#endif
910
911 prom = (struct prom *)sc->prom_mem.mem;
912
913 bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
914 IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
915 IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
916 IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
917
918 if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
919 "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
920 IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
921 IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
922 IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
923
924 return (0);
925}
926
927/*
928 * This is the callback function for bus_dmamap_load. We assume, that we
929 * have a 32-bit bus and so have always one segment.
930 */
931static void
932dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
933{
934 bus_addr_t *ptr = (bus_addr_t *)arg;
935
936 if (error != 0) {
937 printf("%s: error=%d\n", __func__, error);
938 return;
939 }
940 KASSERT(nsegs == 1, ("too many DMA segments"));
941 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
942 (u_long)segs[0].ds_addr));
943
944 *ptr = segs[0].ds_addr;
945}
946
947/*
948 * Allocate a chunk of DMA-able memory and map it.
949 */
950static int
951alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
952{
953 int error;
954
955 mem->mem = NULL;
956
957 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
958 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
959 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
960 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
961 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
962 nm);
963 return (ENOMEM);
964 }
965
966 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
967 if (error) {
968 if_printf(sc->ifp, "could not allocate %s DMA memory: "
969 "%d\n", nm, error);
970 bus_dma_tag_destroy(mem->dmat);
971 mem->mem = NULL;
972 return (error);
973 }
974
975 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
976 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
977 if (error) {
978 if_printf(sc->ifp, "could not load %s DMA memory: "
979 "%d\n", nm, error);
980 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
981 bus_dma_tag_destroy(mem->dmat);
982 mem->mem = NULL;
983 return (error);
984 }
985
986 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
987 (u_long)mem->paddr, mem->size, mem->align));
988
989 return (0);
990}
991
992#ifdef TEST_DMA_SYNC
993static int
994alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
995{
996 int error;
997
998 mem->mem = NULL;
999
1000 if (bus_dma_tag_create(NULL, mem->align, 0,
1001 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1002 NULL, NULL, mem->size, 1, mem->size,
1003 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1004 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1005 nm);
1006 return (ENOMEM);
1007 }
1008
1009 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1010 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1011
1012 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1013 if (error) {
1014 if_printf(sc->ifp, "could not allocate %s DMA map: "
1015 "%d\n", nm, error);
1016 contigfree(mem->mem, mem->size, M_DEVBUF);
1017 bus_dma_tag_destroy(mem->dmat);
1018 mem->mem = NULL;
1019 return (error);
1020 }
1021
1022 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1023 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1024 if (error) {
1025 if_printf(sc->ifp, "could not load %s DMA memory: "
1026 "%d\n", nm, error);
1027 bus_dmamap_destroy(mem->dmat, mem->map);
1028 contigfree(mem->mem, mem->size, M_DEVBUF);
1029 bus_dma_tag_destroy(mem->dmat);
1030 mem->mem = NULL;
1031 return (error);
1032 }
1033
1034 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1035 (u_long)mem->paddr, mem->size, mem->align));
1036
1037 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1038 (u_long)mem->paddr, mem->size, mem->align);
1039
1040 return (0);
1041}
1042#endif /* TEST_DMA_SYNC */
1043
1044/*
1045 * Destroy all resources of an dma-able memory chunk
1046 */
1047static void
1048destroy_dma_memory(struct fatm_mem *mem)
1049{
1050 if (mem->mem != NULL) {
1051 bus_dmamap_unload(mem->dmat, mem->map);
1052 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1053 bus_dma_tag_destroy(mem->dmat);
1054 mem->mem = NULL;
1055 }
1056}
1057#ifdef TEST_DMA_SYNC
1058static void
1059destroy_dma_memoryX(struct fatm_mem *mem)
1060{
1061 if (mem->mem != NULL) {
1062 bus_dmamap_unload(mem->dmat, mem->map);
1063 bus_dmamap_destroy(mem->dmat, mem->map);
1064 contigfree(mem->mem, mem->size, M_DEVBUF);
1065 bus_dma_tag_destroy(mem->dmat);
1066 mem->mem = NULL;
1067 }
1068}
1069#endif /* TEST_DMA_SYNC */
1070
1071/*
1072 * Try to supply buffers to the card if there are free entries in the queues
1073 */
1074static void
1075fatm_supply_small_buffers(struct fatm_softc *sc)
1076{
1077 int nblocks, nbufs;
1078 struct supqueue *q;
1079 struct rbd *bd;
1080 int i, j, error, cnt;
1081 struct mbuf *m;
1082 struct rbuf *rb;
1083 bus_addr_t phys;
1084
1085 nbufs = max(4 * sc->open_vccs, 32);
1086 nbufs = min(nbufs, SMALL_POOL_SIZE);
1087 nbufs -= sc->small_cnt;
1088
1089 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1090 for (cnt = 0; cnt < nblocks; cnt++) {
1091 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1092
1093 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1094 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1095 break;
1096
1097 bd = (struct rbd *)q->q.ioblk;
1098
1099 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1100 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1101 if_printf(sc->ifp, "out of rbufs\n");
1102 break;
1103 }
1104 MGETHDR(m, M_DONTWAIT, MT_DATA);
1105 if (m == NULL) {
1106 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1107 break;
1108 }
1109 MH_ALIGN(m, SMALL_BUFFER_LEN);
1110 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1111 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1112 &phys, BUS_DMA_NOWAIT);
1113 if (error) {
1114 if_printf(sc->ifp,
1115 "dmamap_load mbuf failed %d", error);
1116 m_freem(m);
1117 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1118 break;
1119 }
1120 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1121 BUS_DMASYNC_PREREAD);
1122
1123 LIST_REMOVE(rb, link);
1124 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1125
1126 rb->m = m;
1127 bd[i].handle = rb - sc->rbufs;
1128 H_SETDESC(bd[i].buffer, phys);
1129 }
1130
1131 if (i < SMALL_SUPPLY_BLKSIZE) {
1132 for (j = 0; j < i; j++) {
1133 rb = sc->rbufs + bd[j].handle;
1134 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1135 m_free(rb->m);
1136 rb->m = NULL;
1137
1138 LIST_REMOVE(rb, link);
1139 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1140 }
1141 break;
1142 }
1143 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1144 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1145
1146 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1147 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1148
1149 WRITE4(sc, q->q.card, q->q.card_ioblk);
1150 BARRIER_W(sc);
1151
1152 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1153
1154 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1155 }
1156}
1157
1158/*
1159 * Try to supply buffers to the card if there are free entries in the queues
1160 * We assume that all buffers are within the address space accessible by the
1161 * card (32-bit), so we don't need bounce buffers.
1162 */
1163static void
1164fatm_supply_large_buffers(struct fatm_softc *sc)
1165{
1166 int nbufs, nblocks, cnt;
1167 struct supqueue *q;
1168 struct rbd *bd;
1169 int i, j, error;
1170 struct mbuf *m;
1171 struct rbuf *rb;
1172 bus_addr_t phys;
1173
1174 nbufs = max(4 * sc->open_vccs, 32);
1175 nbufs = min(nbufs, LARGE_POOL_SIZE);
1176 nbufs -= sc->large_cnt;
1177
1178 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1179
1180 for (cnt = 0; cnt < nblocks; cnt++) {
1181 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1182
1183 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1184 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1185 break;
1186
1187 bd = (struct rbd *)q->q.ioblk;
1188
1189 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1190 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1191 if_printf(sc->ifp, "out of rbufs\n");
1192 break;
1193 }
1194 if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1195 M_PKTHDR)) == NULL) {
1196 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1197 break;
1198 }
1199 /* No MEXT_ALIGN */
1200 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1201 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1202 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1203 &phys, BUS_DMA_NOWAIT);
1204 if (error) {
1205 if_printf(sc->ifp,
1206 "dmamap_load mbuf failed %d", error);
1207 m_freem(m);
1208 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1209 break;
1210 }
1211
1212 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1213 BUS_DMASYNC_PREREAD);
1214
1215 LIST_REMOVE(rb, link);
1216 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1217
1218 rb->m = m;
1219 bd[i].handle = rb - sc->rbufs;
1220 H_SETDESC(bd[i].buffer, phys);
1221 }
1222
1223 if (i < LARGE_SUPPLY_BLKSIZE) {
1224 for (j = 0; j < i; j++) {
1225 rb = sc->rbufs + bd[j].handle;
1226 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1227 m_free(rb->m);
1228 rb->m = NULL;
1229
1230 LIST_REMOVE(rb, link);
1231 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1232 }
1233 break;
1234 }
1235 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1236 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1237
1238 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1239 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1240 WRITE4(sc, q->q.card, q->q.card_ioblk);
1241 BARRIER_W(sc);
1242
1243 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1244
1245 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1246 }
1247}
1248
1249
1250/*
1251 * Actually start the card. The lock must be held here.
1252 * Reset, load the firmware, start it, initializes queues, read the PROM
1253 * and supply receive buffers to the card.
1254 */
1255static void
1256fatm_init_locked(struct fatm_softc *sc)
1257{
1258 struct rxqueue *q;
1259 int i, c, error;
1260 uint32_t start;
1261
1262 DBG(sc, INIT, ("initialize"));
481 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
482 sc->utopia.carrier == UTP_CARR_OK);
483
484 /*
485 * Collect transmit mbufs, partial receive mbufs and
486 * supplied mbufs
487 */
488 for (i = 0; i < FATM_TX_QLEN; i++) {
489 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
490 if (tx->m) {
491 bus_dmamap_unload(sc->tx_tag, tx->map);
492 m_freem(tx->m);
493 tx->m = NULL;
494 }
495 }
496
497 /* Collect supplied mbufs */
498 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
499 LIST_REMOVE(rb, link);
500 bus_dmamap_unload(sc->rbuf_tag, rb->map);
501 m_free(rb->m);
502 rb->m = NULL;
503 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
504 }
505
506 /* Unwait any waiters */
507 wakeup(&sc->sadi_mem);
508
509 /* wakeup all threads waiting for STAT or REG buffers */
510 cv_broadcast(&sc->cv_stat);
511 cv_broadcast(&sc->cv_regs);
512
513 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
514
515 /* wakeup all threads waiting on commands */
516 for (i = 0; i < FATM_CMD_QLEN; i++) {
517 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
518
519 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
520 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
521 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
522 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
523 wakeup(q);
524 }
525 }
526 utopia_reset_media(&sc->utopia);
527 }
528 sc->small_cnt = sc->large_cnt = 0;
529
530 /* Reset vcc info */
531 if (sc->vccs != NULL) {
532 sc->open_vccs = 0;
533 for (i = 0; i < FORE_MAX_VCC + 1; i++) {
534 if (sc->vccs[i] != NULL) {
535 if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
536 FATM_VCC_TRY_OPEN)) == 0) {
537 uma_zfree(sc->vcc_zone, sc->vccs[i]);
538 sc->vccs[i] = NULL;
539 } else {
540 sc->vccs[i]->vflags = 0;
541 sc->open_vccs++;
542 }
543 }
544 }
545 }
546
547}
548
549/*
550 * Load the firmware into the board and save the entry point.
551 */
552static uint32_t
553firmware_load(struct fatm_softc *sc)
554{
555 struct firmware *fw = (struct firmware *)firmware;
556
557 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
558 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
559 sizeof(firmware) / sizeof(firmware[0]));
560 BARRIER_RW(sc);
561
562 return (fw->entry);
563}
564
565/*
566 * Read a character from the virtual UART. The availability of a character
567 * is signaled by a non-null value of the 32 bit register. The eating of
568 * the character by us is signalled to the card by setting that register
569 * to zero.
570 */
571static int
572rx_getc(struct fatm_softc *sc)
573{
574 int w = 50;
575 int c;
576
577 while (w--) {
578 c = READ4(sc, FATMO_UART_TO_HOST);
579 BARRIER_RW(sc);
580 if (c != 0) {
581 WRITE4(sc, FATMO_UART_TO_HOST, 0);
582 DBGC(sc, UART, ("%c", c & 0xff));
583 return (c & 0xff);
584 }
585 DELAY(1000);
586 }
587 return (-1);
588}
589
590/*
591 * Eat up characters from the board and stuff them in the bit-bucket.
592 */
593static void
594rx_flush(struct fatm_softc *sc)
595{
596 int w = 10000;
597
598 while (w-- && rx_getc(sc) >= 0)
599 ;
600}
601
602/*
603 * Write a character to the card. The UART is available if the register
604 * is zero.
605 */
606static int
607tx_putc(struct fatm_softc *sc, u_char c)
608{
609 int w = 10;
610 int c1;
611
612 while (w--) {
613 c1 = READ4(sc, FATMO_UART_TO_960);
614 BARRIER_RW(sc);
615 if (c1 == 0) {
616 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
617 DBGC(sc, UART, ("%c", c & 0xff));
618 return (0);
619 }
620 DELAY(1000);
621 }
622 return (-1);
623}
624
625/*
626 * Start the firmware. This is doing by issuing a 'go' command with
627 * the hex entry address of the firmware. Then we wait for the self-test to
628 * succeed.
629 */
630static int
631fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
632{
633 static char hex[] = "0123456789abcdef";
634 u_int w, val;
635
636 DBG(sc, INIT, ("starting"));
637 rx_flush(sc);
638 tx_putc(sc, '\r');
639 DELAY(1000);
640
641 rx_flush(sc);
642
643 tx_putc(sc, 'g');
644 (void)rx_getc(sc);
645 tx_putc(sc, 'o');
646 (void)rx_getc(sc);
647 tx_putc(sc, ' ');
648 (void)rx_getc(sc);
649
650 tx_putc(sc, hex[(start >> 12) & 0xf]);
651 (void)rx_getc(sc);
652 tx_putc(sc, hex[(start >> 8) & 0xf]);
653 (void)rx_getc(sc);
654 tx_putc(sc, hex[(start >> 4) & 0xf]);
655 (void)rx_getc(sc);
656 tx_putc(sc, hex[(start >> 0) & 0xf]);
657 (void)rx_getc(sc);
658
659 tx_putc(sc, '\r');
660 rx_flush(sc);
661
662 for (w = 100; w; w--) {
663 BARRIER_R(sc);
664 val = READ4(sc, FATMO_BOOT_STATUS);
665 switch (val) {
666 case CP_RUNNING:
667 return (0);
668 case SELF_TEST_FAIL:
669 return (EIO);
670 }
671 DELAY(1000);
672 }
673 return (EIO);
674}
675
676/*
677 * Initialize one card and host queue.
678 */
679static void
680init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
681 size_t qel_size, size_t desc_size, cardoff_t off,
682 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
683{
684 struct fqelem *el = queue->chunk;
685
686 while (qlen--) {
687 el->card = off;
688 off += 8; /* size of card entry */
689
690 el->statp = (uint32_t *)(*statpp);
691 (*statpp) += sizeof(uint32_t);
692 H_SETSTAT(el->statp, FATM_STAT_FREE);
693 H_SYNCSTAT_PREWRITE(sc, el->statp);
694
695 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
696 (*cardstat) += sizeof(uint32_t);
697
698 el->ioblk = descp;
699 descp += desc_size;
700 el->card_ioblk = carddesc;
701 carddesc += desc_size;
702
703 el = (struct fqelem *)((u_char *)el + qel_size);
704 }
705 queue->tail = queue->head = 0;
706}
707
708/*
709 * Issue the initialize operation to the card, wait for completion and
710 * initialize the on-board and host queue structures with offsets and
711 * addresses.
712 */
713static int
714fatm_init_cmd(struct fatm_softc *sc)
715{
716 int w, c;
717 u_char *statp;
718 uint32_t card_stat;
719 u_int cnt;
720 struct fqelem *el;
721 cardoff_t off;
722
723 DBG(sc, INIT, ("command"));
724 WRITE4(sc, FATMO_ISTAT, 0);
725 WRITE4(sc, FATMO_IMASK, 1);
726 WRITE4(sc, FATMO_HLOGGER, 0);
727
728 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
729 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
730 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
731 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
732 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
733 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
734 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
735
736 /*
737 * initialize buffer descriptors
738 */
739 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
740 SMALL_SUPPLY_QLEN);
741 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
742 SMALL_BUFFER_LEN);
743 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
744 SMALL_POOL_SIZE);
745 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
746 SMALL_SUPPLY_BLKSIZE);
747
748 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
749 LARGE_SUPPLY_QLEN);
750 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
751 LARGE_BUFFER_LEN);
752 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
753 LARGE_POOL_SIZE);
754 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
755 LARGE_SUPPLY_BLKSIZE);
756
757 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
758 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
759 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
760 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
761
762 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
763 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
764 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
765 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
766
767 /*
768 * Start the command
769 */
770 BARRIER_W(sc);
771 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
772 BARRIER_W(sc);
773 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
774 BARRIER_W(sc);
775
776 /*
777 * Busy wait for completion
778 */
779 w = 100;
780 while (w--) {
781 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
782 BARRIER_R(sc);
783 if (c & FATM_STAT_COMPLETE)
784 break;
785 DELAY(1000);
786 }
787
788 if (c & FATM_STAT_ERROR)
789 return (EIO);
790
791 /*
792 * Initialize the queues
793 */
794 statp = sc->stat_mem.mem;
795 card_stat = sc->stat_mem.paddr;
796
797 /*
798 * Command queue. This is special in that it's on the card.
799 */
800 el = sc->cmdqueue.chunk;
801 off = READ4(sc, FATMO_COMMAND_QUEUE);
802 DBG(sc, INIT, ("cmd queue=%x", off));
803 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
804 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
805
806 el->card = off;
807 off += 32; /* size of card structure */
808
809 el->statp = (uint32_t *)statp;
810 statp += sizeof(uint32_t);
811 H_SETSTAT(el->statp, FATM_STAT_FREE);
812 H_SYNCSTAT_PREWRITE(sc, el->statp);
813
814 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
815 card_stat += sizeof(uint32_t);
816 }
817 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
818
819 /*
820 * Now the other queues. These are in memory
821 */
822 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
823 sizeof(struct txqueue), TPD_SIZE,
824 READ4(sc, FATMO_TRANSMIT_QUEUE),
825 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
826
827 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
828 sizeof(struct rxqueue), RPD_SIZE,
829 READ4(sc, FATMO_RECEIVE_QUEUE),
830 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
831
832 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
833 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
834 READ4(sc, FATMO_SMALL_B1_QUEUE),
835 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
836
837 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
838 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
839 READ4(sc, FATMO_LARGE_B1_QUEUE),
840 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
841
842 sc->txcnt = 0;
843
844 return (0);
845}
846
847/*
848 * Read PROM. Called only from attach code. Here we spin because the interrupt
849 * handler is not yet set up.
850 */
851static int
852fatm_getprom(struct fatm_softc *sc)
853{
854 int i;
855 struct prom *prom;
856 struct cmdqueue *q;
857
858 DBG(sc, INIT, ("reading prom"));
859 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
860 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
861
862 q->error = 0;
863 q->cb = NULL;;
864 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
865 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
866
867 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
868 BUS_DMASYNC_PREREAD);
869
870 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
871 BARRIER_W(sc);
872 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
873 BARRIER_W(sc);
874
875 for (i = 0; i < 1000; i++) {
876 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
877 if (H_GETSTAT(q->q.statp) &
878 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
879 break;
880 DELAY(1000);
881 }
882 if (i == 1000) {
883 if_printf(sc->ifp, "getprom timeout\n");
884 return (EIO);
885 }
886 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
887 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
888 if_printf(sc->ifp, "getprom error\n");
889 return (EIO);
890 }
891 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
892 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
893 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
894
895 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
896 BUS_DMASYNC_POSTREAD);
897
898
899#ifdef notdef
900 {
901 u_int i;
902
903 printf("PROM: ");
904 u_char *ptr = (u_char *)sc->prom_mem.mem;
905 for (i = 0; i < sizeof(struct prom); i++)
906 printf("%02x ", *ptr++);
907 printf("\n");
908 }
909#endif
910
911 prom = (struct prom *)sc->prom_mem.mem;
912
913 bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
914 IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
915 IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
916 IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
917
918 if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
919 "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
920 IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
921 IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
922 IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
923
924 return (0);
925}
926
927/*
928 * This is the callback function for bus_dmamap_load. We assume, that we
929 * have a 32-bit bus and so have always one segment.
930 */
931static void
932dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
933{
934 bus_addr_t *ptr = (bus_addr_t *)arg;
935
936 if (error != 0) {
937 printf("%s: error=%d\n", __func__, error);
938 return;
939 }
940 KASSERT(nsegs == 1, ("too many DMA segments"));
941 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
942 (u_long)segs[0].ds_addr));
943
944 *ptr = segs[0].ds_addr;
945}
946
947/*
948 * Allocate a chunk of DMA-able memory and map it.
949 */
950static int
951alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
952{
953 int error;
954
955 mem->mem = NULL;
956
957 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
958 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
959 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
960 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
961 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
962 nm);
963 return (ENOMEM);
964 }
965
966 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
967 if (error) {
968 if_printf(sc->ifp, "could not allocate %s DMA memory: "
969 "%d\n", nm, error);
970 bus_dma_tag_destroy(mem->dmat);
971 mem->mem = NULL;
972 return (error);
973 }
974
975 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
976 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
977 if (error) {
978 if_printf(sc->ifp, "could not load %s DMA memory: "
979 "%d\n", nm, error);
980 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
981 bus_dma_tag_destroy(mem->dmat);
982 mem->mem = NULL;
983 return (error);
984 }
985
986 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
987 (u_long)mem->paddr, mem->size, mem->align));
988
989 return (0);
990}
991
992#ifdef TEST_DMA_SYNC
993static int
994alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
995{
996 int error;
997
998 mem->mem = NULL;
999
1000 if (bus_dma_tag_create(NULL, mem->align, 0,
1001 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1002 NULL, NULL, mem->size, 1, mem->size,
1003 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1004 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1005 nm);
1006 return (ENOMEM);
1007 }
1008
1009 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1010 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1011
1012 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1013 if (error) {
1014 if_printf(sc->ifp, "could not allocate %s DMA map: "
1015 "%d\n", nm, error);
1016 contigfree(mem->mem, mem->size, M_DEVBUF);
1017 bus_dma_tag_destroy(mem->dmat);
1018 mem->mem = NULL;
1019 return (error);
1020 }
1021
1022 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1023 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1024 if (error) {
1025 if_printf(sc->ifp, "could not load %s DMA memory: "
1026 "%d\n", nm, error);
1027 bus_dmamap_destroy(mem->dmat, mem->map);
1028 contigfree(mem->mem, mem->size, M_DEVBUF);
1029 bus_dma_tag_destroy(mem->dmat);
1030 mem->mem = NULL;
1031 return (error);
1032 }
1033
1034 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1035 (u_long)mem->paddr, mem->size, mem->align));
1036
1037 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1038 (u_long)mem->paddr, mem->size, mem->align);
1039
1040 return (0);
1041}
1042#endif /* TEST_DMA_SYNC */
1043
1044/*
1045 * Destroy all resources of an dma-able memory chunk
1046 */
1047static void
1048destroy_dma_memory(struct fatm_mem *mem)
1049{
1050 if (mem->mem != NULL) {
1051 bus_dmamap_unload(mem->dmat, mem->map);
1052 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1053 bus_dma_tag_destroy(mem->dmat);
1054 mem->mem = NULL;
1055 }
1056}
1057#ifdef TEST_DMA_SYNC
1058static void
1059destroy_dma_memoryX(struct fatm_mem *mem)
1060{
1061 if (mem->mem != NULL) {
1062 bus_dmamap_unload(mem->dmat, mem->map);
1063 bus_dmamap_destroy(mem->dmat, mem->map);
1064 contigfree(mem->mem, mem->size, M_DEVBUF);
1065 bus_dma_tag_destroy(mem->dmat);
1066 mem->mem = NULL;
1067 }
1068}
1069#endif /* TEST_DMA_SYNC */
1070
1071/*
1072 * Try to supply buffers to the card if there are free entries in the queues
1073 */
1074static void
1075fatm_supply_small_buffers(struct fatm_softc *sc)
1076{
1077 int nblocks, nbufs;
1078 struct supqueue *q;
1079 struct rbd *bd;
1080 int i, j, error, cnt;
1081 struct mbuf *m;
1082 struct rbuf *rb;
1083 bus_addr_t phys;
1084
1085 nbufs = max(4 * sc->open_vccs, 32);
1086 nbufs = min(nbufs, SMALL_POOL_SIZE);
1087 nbufs -= sc->small_cnt;
1088
1089 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1090 for (cnt = 0; cnt < nblocks; cnt++) {
1091 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1092
1093 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1094 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1095 break;
1096
1097 bd = (struct rbd *)q->q.ioblk;
1098
1099 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1100 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1101 if_printf(sc->ifp, "out of rbufs\n");
1102 break;
1103 }
1104 MGETHDR(m, M_DONTWAIT, MT_DATA);
1105 if (m == NULL) {
1106 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1107 break;
1108 }
1109 MH_ALIGN(m, SMALL_BUFFER_LEN);
1110 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1111 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1112 &phys, BUS_DMA_NOWAIT);
1113 if (error) {
1114 if_printf(sc->ifp,
1115 "dmamap_load mbuf failed %d", error);
1116 m_freem(m);
1117 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1118 break;
1119 }
1120 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1121 BUS_DMASYNC_PREREAD);
1122
1123 LIST_REMOVE(rb, link);
1124 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1125
1126 rb->m = m;
1127 bd[i].handle = rb - sc->rbufs;
1128 H_SETDESC(bd[i].buffer, phys);
1129 }
1130
1131 if (i < SMALL_SUPPLY_BLKSIZE) {
1132 for (j = 0; j < i; j++) {
1133 rb = sc->rbufs + bd[j].handle;
1134 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1135 m_free(rb->m);
1136 rb->m = NULL;
1137
1138 LIST_REMOVE(rb, link);
1139 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1140 }
1141 break;
1142 }
1143 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1144 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1145
1146 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1147 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1148
1149 WRITE4(sc, q->q.card, q->q.card_ioblk);
1150 BARRIER_W(sc);
1151
1152 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1153
1154 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1155 }
1156}
1157
1158/*
1159 * Try to supply buffers to the card if there are free entries in the queues
1160 * We assume that all buffers are within the address space accessible by the
1161 * card (32-bit), so we don't need bounce buffers.
1162 */
1163static void
1164fatm_supply_large_buffers(struct fatm_softc *sc)
1165{
1166 int nbufs, nblocks, cnt;
1167 struct supqueue *q;
1168 struct rbd *bd;
1169 int i, j, error;
1170 struct mbuf *m;
1171 struct rbuf *rb;
1172 bus_addr_t phys;
1173
1174 nbufs = max(4 * sc->open_vccs, 32);
1175 nbufs = min(nbufs, LARGE_POOL_SIZE);
1176 nbufs -= sc->large_cnt;
1177
1178 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1179
1180 for (cnt = 0; cnt < nblocks; cnt++) {
1181 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1182
1183 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1184 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1185 break;
1186
1187 bd = (struct rbd *)q->q.ioblk;
1188
1189 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1190 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1191 if_printf(sc->ifp, "out of rbufs\n");
1192 break;
1193 }
1194 if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1195 M_PKTHDR)) == NULL) {
1196 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1197 break;
1198 }
1199 /* No MEXT_ALIGN */
1200 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1201 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1202 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1203 &phys, BUS_DMA_NOWAIT);
1204 if (error) {
1205 if_printf(sc->ifp,
1206 "dmamap_load mbuf failed %d", error);
1207 m_freem(m);
1208 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1209 break;
1210 }
1211
1212 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1213 BUS_DMASYNC_PREREAD);
1214
1215 LIST_REMOVE(rb, link);
1216 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1217
1218 rb->m = m;
1219 bd[i].handle = rb - sc->rbufs;
1220 H_SETDESC(bd[i].buffer, phys);
1221 }
1222
1223 if (i < LARGE_SUPPLY_BLKSIZE) {
1224 for (j = 0; j < i; j++) {
1225 rb = sc->rbufs + bd[j].handle;
1226 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1227 m_free(rb->m);
1228 rb->m = NULL;
1229
1230 LIST_REMOVE(rb, link);
1231 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1232 }
1233 break;
1234 }
1235 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1236 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1237
1238 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1239 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1240 WRITE4(sc, q->q.card, q->q.card_ioblk);
1241 BARRIER_W(sc);
1242
1243 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1244
1245 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1246 }
1247}
1248
1249
1250/*
1251 * Actually start the card. The lock must be held here.
1252 * Reset, load the firmware, start it, initializes queues, read the PROM
1253 * and supply receive buffers to the card.
1254 */
1255static void
1256fatm_init_locked(struct fatm_softc *sc)
1257{
1258 struct rxqueue *q;
1259 int i, c, error;
1260 uint32_t start;
1261
1262 DBG(sc, INIT, ("initialize"));
1263 if (sc->ifp->if_flags & IFF_RUNNING)
1263 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1264 fatm_stop(sc);
1265
1266 /*
1267 * Hard reset the board
1268 */
1269 if (fatm_reset(sc))
1270 return;
1271
1272 start = firmware_load(sc);
1273 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1274 fatm_getprom(sc)) {
1275 fatm_reset(sc);
1276 return;
1277 }
1278
1279 /*
1280 * Handle media
1281 */
1282 c = READ4(sc, FATMO_MEDIA_TYPE);
1283 switch (c) {
1284
1285 case FORE_MT_TAXI_100:
1286 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1287 IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1288 break;
1289
1290 case FORE_MT_TAXI_140:
1291 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1292 IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1293 break;
1294
1295 case FORE_MT_UTP_SONET:
1296 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1297 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1298 break;
1299
1300 case FORE_MT_MM_OC3_ST:
1301 case FORE_MT_MM_OC3_SC:
1302 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1303 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1304 break;
1305
1306 case FORE_MT_SM_OC3_ST:
1307 case FORE_MT_SM_OC3_SC:
1308 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1309 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1310 break;
1311
1312 default:
1313 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1314 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1315 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1316 break;
1317 }
1318 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1319 utopia_init_media(&sc->utopia);
1320
1321 /*
1322 * Initialize the RBDs
1323 */
1324 for (i = 0; i < FATM_RX_QLEN; i++) {
1325 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1326 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1327 }
1328 BARRIER_W(sc);
1329
1330 /*
1331 * Supply buffers to the card
1332 */
1333 fatm_supply_small_buffers(sc);
1334 fatm_supply_large_buffers(sc);
1335
1336 /*
1337 * Now set flags, that we are ready
1338 */
1264 fatm_stop(sc);
1265
1266 /*
1267 * Hard reset the board
1268 */
1269 if (fatm_reset(sc))
1270 return;
1271
1272 start = firmware_load(sc);
1273 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1274 fatm_getprom(sc)) {
1275 fatm_reset(sc);
1276 return;
1277 }
1278
1279 /*
1280 * Handle media
1281 */
1282 c = READ4(sc, FATMO_MEDIA_TYPE);
1283 switch (c) {
1284
1285 case FORE_MT_TAXI_100:
1286 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1287 IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1288 break;
1289
1290 case FORE_MT_TAXI_140:
1291 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1292 IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1293 break;
1294
1295 case FORE_MT_UTP_SONET:
1296 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1297 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1298 break;
1299
1300 case FORE_MT_MM_OC3_ST:
1301 case FORE_MT_MM_OC3_SC:
1302 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1303 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1304 break;
1305
1306 case FORE_MT_SM_OC3_ST:
1307 case FORE_MT_SM_OC3_SC:
1308 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1309 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1310 break;
1311
1312 default:
1313 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1314 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1315 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1316 break;
1317 }
1318 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1319 utopia_init_media(&sc->utopia);
1320
1321 /*
1322 * Initialize the RBDs
1323 */
1324 for (i = 0; i < FATM_RX_QLEN; i++) {
1325 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1326 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1327 }
1328 BARRIER_W(sc);
1329
1330 /*
1331 * Supply buffers to the card
1332 */
1333 fatm_supply_small_buffers(sc);
1334 fatm_supply_large_buffers(sc);
1335
1336 /*
1337 * Now set flags, that we are ready
1338 */
1339 sc->ifp->if_flags |= IFF_RUNNING;
1339 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1340
1341 /*
1342 * Start the watchdog timer
1343 */
1344 sc->ifp->if_timer = 5;
1345
1346 /* start SUNI */
1347 utopia_start(&sc->utopia);
1348
1349 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1350 sc->utopia.carrier == UTP_CARR_OK);
1351
1352 /* start all channels */
1353 for (i = 0; i < FORE_MAX_VCC + 1; i++)
1354 if (sc->vccs[i] != NULL) {
1355 sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1356 error = fatm_load_vc(sc, sc->vccs[i]);
1357 if (error != 0) {
1358 if_printf(sc->ifp, "reopening %u "
1359 "failed: %d\n", i, error);
1360 sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1361 }
1362 }
1363
1364 DBG(sc, INIT, ("done"));
1365}
1366
1367/*
1368 * This is the exported as initialisation function.
1369 */
1370static void
1371fatm_init(void *p)
1372{
1373 struct fatm_softc *sc = p;
1374
1375 FATM_LOCK(sc);
1376 fatm_init_locked(sc);
1377 FATM_UNLOCK(sc);
1378}
1379
1380/************************************************************/
1381/*
1382 * The INTERRUPT handling
1383 */
1384/*
1385 * Check the command queue. If a command was completed, call the completion
1386 * function for that command.
1387 */
1388static void
1389fatm_intr_drain_cmd(struct fatm_softc *sc)
1390{
1391 struct cmdqueue *q;
1392 int stat;
1393
1394 /*
1395 * Drain command queue
1396 */
1397 for (;;) {
1398 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1399
1400 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1401 stat = H_GETSTAT(q->q.statp);
1402
1403 if (stat != FATM_STAT_COMPLETE &&
1404 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1405 stat != FATM_STAT_ERROR)
1406 break;
1407
1408 (*q->cb)(sc, q);
1409
1410 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1411 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1412
1413 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1414 }
1415}
1416
1417/*
1418 * Drain the small buffer supply queue.
1419 */
1420static void
1421fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1422{
1423 struct supqueue *q;
1424 int stat;
1425
1426 for (;;) {
1427 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1428
1429 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1430 stat = H_GETSTAT(q->q.statp);
1431
1432 if ((stat & FATM_STAT_COMPLETE) == 0)
1433 break;
1434 if (stat & FATM_STAT_ERROR)
1435 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1436
1437 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1438 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1439
1440 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1441 }
1442}
1443
1444/*
1445 * Drain the large buffer supply queue.
1446 */
1447static void
1448fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1449{
1450 struct supqueue *q;
1451 int stat;
1452
1453 for (;;) {
1454 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1455
1456 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1457 stat = H_GETSTAT(q->q.statp);
1458
1459 if ((stat & FATM_STAT_COMPLETE) == 0)
1460 break;
1461 if (stat & FATM_STAT_ERROR)
1462 log(LOG_ERR, "%s status %x\n", __func__, stat);
1463
1464 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1465 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1466
1467 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1468 }
1469}
1470
1471/*
1472 * Check the receive queue. Send any received PDU up the protocol stack
1473 * (except when there was an error or the VCI appears to be closed. In this
1474 * case discard the PDU).
1475 */
1476static void
1477fatm_intr_drain_rx(struct fatm_softc *sc)
1478{
1479 struct rxqueue *q;
1480 int stat, mlen;
1481 u_int i;
1482 uint32_t h;
1483 struct mbuf *last, *m0;
1484 struct rpd *rpd;
1485 struct rbuf *rb;
1486 u_int vci, vpi, pt;
1487 struct atm_pseudohdr aph;
1488 struct ifnet *ifp;
1489 struct card_vcc *vc;
1490
1491 for (;;) {
1492 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1493
1494 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1495 stat = H_GETSTAT(q->q.statp);
1496
1497 if ((stat & FATM_STAT_COMPLETE) == 0)
1498 break;
1499
1500 rpd = (struct rpd *)q->q.ioblk;
1501 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1502
1503 rpd->nseg = le32toh(rpd->nseg);
1504 mlen = 0;
1505 m0 = last = 0;
1506 for (i = 0; i < rpd->nseg; i++) {
1507 rb = sc->rbufs + rpd->segment[i].handle;
1508 if (m0 == NULL) {
1509 m0 = last = rb->m;
1510 } else {
1511 last->m_next = rb->m;
1512 last = rb->m;
1513 }
1514 last->m_next = NULL;
1515 if (last->m_flags & M_EXT)
1516 sc->large_cnt--;
1517 else
1518 sc->small_cnt--;
1519 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1520 BUS_DMASYNC_POSTREAD);
1521 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1522 rb->m = NULL;
1523
1524 LIST_REMOVE(rb, link);
1525 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1526
1527 last->m_len = le32toh(rpd->segment[i].length);
1528 mlen += last->m_len;
1529 }
1530
1531 m0->m_pkthdr.len = mlen;
1532 m0->m_pkthdr.rcvif = sc->ifp;
1533
1534 h = le32toh(rpd->atm_header);
1535 vpi = (h >> 20) & 0xff;
1536 vci = (h >> 4 ) & 0xffff;
1537 pt = (h >> 1 ) & 0x7;
1538
1539 /*
1540 * Locate the VCC this packet belongs to
1541 */
1542 if (!VC_OK(sc, vpi, vci))
1543 vc = NULL;
1544 else if ((vc = sc->vccs[vci]) == NULL ||
1545 !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1546 sc->istats.rx_closed++;
1547 vc = NULL;
1548 }
1549
1550 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1551 pt, mlen, vc == NULL ? "dropped" : ""));
1552
1553 if (vc == NULL) {
1554 m_freem(m0);
1555 } else {
1556#ifdef ENABLE_BPF
1557 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1558 vc->param.aal == ATMIO_AAL_5 &&
1559 (vc->param.flags & ATM_PH_LLCSNAP))
1560 BPF_MTAP(sc->ifp, m0);
1561#endif
1562
1563 ATM_PH_FLAGS(&aph) = vc->param.flags;
1564 ATM_PH_VPI(&aph) = vpi;
1565 ATM_PH_SETVCI(&aph, vci);
1566
1567 ifp = sc->ifp;
1568 ifp->if_ipackets++;
1569
1570 vc->ipackets++;
1571 vc->ibytes += m0->m_pkthdr.len;
1572
1573 atm_input(ifp, &aph, m0, vc->rxhand);
1574 }
1575
1576 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1577 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1578
1579 WRITE4(sc, q->q.card, q->q.card_ioblk);
1580 BARRIER_W(sc);
1581
1582 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1583 }
1584}
1585
1586/*
1587 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1588 */
1589static void
1590fatm_intr_drain_tx(struct fatm_softc *sc)
1591{
1592 struct txqueue *q;
1593 int stat;
1594
1595 /*
1596 * Drain tx queue
1597 */
1598 for (;;) {
1599 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1600
1601 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1602 stat = H_GETSTAT(q->q.statp);
1603
1604 if (stat != FATM_STAT_COMPLETE &&
1605 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1606 stat != FATM_STAT_ERROR)
1607 break;
1608
1609 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1610 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1611
1612 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1613 bus_dmamap_unload(sc->tx_tag, q->map);
1614
1615 m_freem(q->m);
1616 q->m = NULL;
1617 sc->txcnt--;
1618
1619 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1620 }
1621}
1622
1623/*
1624 * Interrupt handler
1625 */
1626static void
1627fatm_intr(void *p)
1628{
1629 struct fatm_softc *sc = (struct fatm_softc *)p;
1630
1631 FATM_LOCK(sc);
1632 if (!READ4(sc, FATMO_PSR)) {
1633 FATM_UNLOCK(sc);
1634 return;
1635 }
1636 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1637
1340
1341 /*
1342 * Start the watchdog timer
1343 */
1344 sc->ifp->if_timer = 5;
1345
1346 /* start SUNI */
1347 utopia_start(&sc->utopia);
1348
1349 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1350 sc->utopia.carrier == UTP_CARR_OK);
1351
1352 /* start all channels */
1353 for (i = 0; i < FORE_MAX_VCC + 1; i++)
1354 if (sc->vccs[i] != NULL) {
1355 sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1356 error = fatm_load_vc(sc, sc->vccs[i]);
1357 if (error != 0) {
1358 if_printf(sc->ifp, "reopening %u "
1359 "failed: %d\n", i, error);
1360 sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1361 }
1362 }
1363
1364 DBG(sc, INIT, ("done"));
1365}
1366
1367/*
1368 * This is the exported as initialisation function.
1369 */
1370static void
1371fatm_init(void *p)
1372{
1373 struct fatm_softc *sc = p;
1374
1375 FATM_LOCK(sc);
1376 fatm_init_locked(sc);
1377 FATM_UNLOCK(sc);
1378}
1379
1380/************************************************************/
1381/*
1382 * The INTERRUPT handling
1383 */
1384/*
1385 * Check the command queue. If a command was completed, call the completion
1386 * function for that command.
1387 */
1388static void
1389fatm_intr_drain_cmd(struct fatm_softc *sc)
1390{
1391 struct cmdqueue *q;
1392 int stat;
1393
1394 /*
1395 * Drain command queue
1396 */
1397 for (;;) {
1398 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1399
1400 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1401 stat = H_GETSTAT(q->q.statp);
1402
1403 if (stat != FATM_STAT_COMPLETE &&
1404 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1405 stat != FATM_STAT_ERROR)
1406 break;
1407
1408 (*q->cb)(sc, q);
1409
1410 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1411 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1412
1413 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1414 }
1415}
1416
1417/*
1418 * Drain the small buffer supply queue.
1419 */
1420static void
1421fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1422{
1423 struct supqueue *q;
1424 int stat;
1425
1426 for (;;) {
1427 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1428
1429 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1430 stat = H_GETSTAT(q->q.statp);
1431
1432 if ((stat & FATM_STAT_COMPLETE) == 0)
1433 break;
1434 if (stat & FATM_STAT_ERROR)
1435 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1436
1437 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1438 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1439
1440 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1441 }
1442}
1443
1444/*
1445 * Drain the large buffer supply queue.
1446 */
1447static void
1448fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1449{
1450 struct supqueue *q;
1451 int stat;
1452
1453 for (;;) {
1454 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1455
1456 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1457 stat = H_GETSTAT(q->q.statp);
1458
1459 if ((stat & FATM_STAT_COMPLETE) == 0)
1460 break;
1461 if (stat & FATM_STAT_ERROR)
1462 log(LOG_ERR, "%s status %x\n", __func__, stat);
1463
1464 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1465 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1466
1467 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1468 }
1469}
1470
1471/*
1472 * Check the receive queue. Send any received PDU up the protocol stack
1473 * (except when there was an error or the VCI appears to be closed. In this
1474 * case discard the PDU).
1475 */
1476static void
1477fatm_intr_drain_rx(struct fatm_softc *sc)
1478{
1479 struct rxqueue *q;
1480 int stat, mlen;
1481 u_int i;
1482 uint32_t h;
1483 struct mbuf *last, *m0;
1484 struct rpd *rpd;
1485 struct rbuf *rb;
1486 u_int vci, vpi, pt;
1487 struct atm_pseudohdr aph;
1488 struct ifnet *ifp;
1489 struct card_vcc *vc;
1490
1491 for (;;) {
1492 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1493
1494 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1495 stat = H_GETSTAT(q->q.statp);
1496
1497 if ((stat & FATM_STAT_COMPLETE) == 0)
1498 break;
1499
1500 rpd = (struct rpd *)q->q.ioblk;
1501 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1502
1503 rpd->nseg = le32toh(rpd->nseg);
1504 mlen = 0;
1505 m0 = last = 0;
1506 for (i = 0; i < rpd->nseg; i++) {
1507 rb = sc->rbufs + rpd->segment[i].handle;
1508 if (m0 == NULL) {
1509 m0 = last = rb->m;
1510 } else {
1511 last->m_next = rb->m;
1512 last = rb->m;
1513 }
1514 last->m_next = NULL;
1515 if (last->m_flags & M_EXT)
1516 sc->large_cnt--;
1517 else
1518 sc->small_cnt--;
1519 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1520 BUS_DMASYNC_POSTREAD);
1521 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1522 rb->m = NULL;
1523
1524 LIST_REMOVE(rb, link);
1525 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1526
1527 last->m_len = le32toh(rpd->segment[i].length);
1528 mlen += last->m_len;
1529 }
1530
1531 m0->m_pkthdr.len = mlen;
1532 m0->m_pkthdr.rcvif = sc->ifp;
1533
1534 h = le32toh(rpd->atm_header);
1535 vpi = (h >> 20) & 0xff;
1536 vci = (h >> 4 ) & 0xffff;
1537 pt = (h >> 1 ) & 0x7;
1538
1539 /*
1540 * Locate the VCC this packet belongs to
1541 */
1542 if (!VC_OK(sc, vpi, vci))
1543 vc = NULL;
1544 else if ((vc = sc->vccs[vci]) == NULL ||
1545 !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1546 sc->istats.rx_closed++;
1547 vc = NULL;
1548 }
1549
1550 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1551 pt, mlen, vc == NULL ? "dropped" : ""));
1552
1553 if (vc == NULL) {
1554 m_freem(m0);
1555 } else {
1556#ifdef ENABLE_BPF
1557 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1558 vc->param.aal == ATMIO_AAL_5 &&
1559 (vc->param.flags & ATM_PH_LLCSNAP))
1560 BPF_MTAP(sc->ifp, m0);
1561#endif
1562
1563 ATM_PH_FLAGS(&aph) = vc->param.flags;
1564 ATM_PH_VPI(&aph) = vpi;
1565 ATM_PH_SETVCI(&aph, vci);
1566
1567 ifp = sc->ifp;
1568 ifp->if_ipackets++;
1569
1570 vc->ipackets++;
1571 vc->ibytes += m0->m_pkthdr.len;
1572
1573 atm_input(ifp, &aph, m0, vc->rxhand);
1574 }
1575
1576 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1577 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1578
1579 WRITE4(sc, q->q.card, q->q.card_ioblk);
1580 BARRIER_W(sc);
1581
1582 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1583 }
1584}
1585
1586/*
1587 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1588 */
1589static void
1590fatm_intr_drain_tx(struct fatm_softc *sc)
1591{
1592 struct txqueue *q;
1593 int stat;
1594
1595 /*
1596 * Drain tx queue
1597 */
1598 for (;;) {
1599 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1600
1601 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1602 stat = H_GETSTAT(q->q.statp);
1603
1604 if (stat != FATM_STAT_COMPLETE &&
1605 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1606 stat != FATM_STAT_ERROR)
1607 break;
1608
1609 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1610 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1611
1612 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1613 bus_dmamap_unload(sc->tx_tag, q->map);
1614
1615 m_freem(q->m);
1616 q->m = NULL;
1617 sc->txcnt--;
1618
1619 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1620 }
1621}
1622
1623/*
1624 * Interrupt handler
1625 */
1626static void
1627fatm_intr(void *p)
1628{
1629 struct fatm_softc *sc = (struct fatm_softc *)p;
1630
1631 FATM_LOCK(sc);
1632 if (!READ4(sc, FATMO_PSR)) {
1633 FATM_UNLOCK(sc);
1634 return;
1635 }
1636 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1637
1638 if (!(sc->ifp->if_flags & IFF_RUNNING)) {
1638 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1639 FATM_UNLOCK(sc);
1640 return;
1641 }
1642 fatm_intr_drain_cmd(sc);
1643 fatm_intr_drain_rx(sc);
1644 fatm_intr_drain_tx(sc);
1645 fatm_intr_drain_small_buffers(sc);
1646 fatm_intr_drain_large_buffers(sc);
1647 fatm_supply_small_buffers(sc);
1648 fatm_supply_large_buffers(sc);
1649
1650 FATM_UNLOCK(sc);
1651
1652 if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1653 (*sc->ifp->if_start)(sc->ifp);
1654}
1655
1656/*
1657 * Get device statistics. This must be called with the softc locked.
1658 * We use a preallocated buffer, so we need to protect this buffer.
1659 * We do this by using a condition variable and a flag. If the flag is set
1660 * the buffer is in use by one thread (one thread is executing a GETSTAT
1661 * card command). In this case all other threads that are trying to get
1662 * statistics block on that condition variable. When the thread finishes
1663 * using the buffer it resets the flag and signals the condition variable. This
1664 * will wakeup the next thread that is waiting for the buffer. If the interface
1665 * is stopped the stopping function will broadcast the cv. All threads will
1666 * find that the interface has been stopped and return.
1667 *
1668 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1669 * must be done by the caller when he has finished using the buffer.
1670 */
1671static void
1672fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1673{
1674
1675 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1676 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1677 sc->istats.get_stat_errors++;
1678 q->error = EIO;
1679 }
1680 wakeup(&sc->sadi_mem);
1681}
1682static int
1683fatm_getstat(struct fatm_softc *sc)
1684{
1685 int error;
1686 struct cmdqueue *q;
1687
1688 /*
1689 * Wait until either the interface is stopped or we can get the
1690 * statistics buffer
1691 */
1692 for (;;) {
1639 FATM_UNLOCK(sc);
1640 return;
1641 }
1642 fatm_intr_drain_cmd(sc);
1643 fatm_intr_drain_rx(sc);
1644 fatm_intr_drain_tx(sc);
1645 fatm_intr_drain_small_buffers(sc);
1646 fatm_intr_drain_large_buffers(sc);
1647 fatm_supply_small_buffers(sc);
1648 fatm_supply_large_buffers(sc);
1649
1650 FATM_UNLOCK(sc);
1651
1652 if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1653 (*sc->ifp->if_start)(sc->ifp);
1654}
1655
1656/*
1657 * Get device statistics. This must be called with the softc locked.
1658 * We use a preallocated buffer, so we need to protect this buffer.
1659 * We do this by using a condition variable and a flag. If the flag is set
1660 * the buffer is in use by one thread (one thread is executing a GETSTAT
1661 * card command). In this case all other threads that are trying to get
1662 * statistics block on that condition variable. When the thread finishes
1663 * using the buffer it resets the flag and signals the condition variable. This
1664 * will wakeup the next thread that is waiting for the buffer. If the interface
1665 * is stopped the stopping function will broadcast the cv. All threads will
1666 * find that the interface has been stopped and return.
1667 *
1668 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1669 * must be done by the caller when he has finished using the buffer.
1670 */
1671static void
1672fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1673{
1674
1675 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1676 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1677 sc->istats.get_stat_errors++;
1678 q->error = EIO;
1679 }
1680 wakeup(&sc->sadi_mem);
1681}
1682static int
1683fatm_getstat(struct fatm_softc *sc)
1684{
1685 int error;
1686 struct cmdqueue *q;
1687
1688 /*
1689 * Wait until either the interface is stopped or we can get the
1690 * statistics buffer
1691 */
1692 for (;;) {
1693 if (!(sc->ifp->if_flags & IFF_RUNNING))
1693 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
1694 return (EIO);
1695 if (!(sc->flags & FATM_STAT_INUSE))
1696 break;
1697 cv_wait(&sc->cv_stat, &sc->mtx);
1698 }
1699 sc->flags |= FATM_STAT_INUSE;
1700
1701 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1702
1703 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1704 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1705 sc->istats.cmd_queue_full++;
1706 return (EIO);
1707 }
1708 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1709
1710 q->error = 0;
1711 q->cb = fatm_getstat_complete;
1712 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1713 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1714
1715 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1716 BUS_DMASYNC_PREREAD);
1717
1718 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1719 sc->sadi_mem.paddr);
1720 BARRIER_W(sc);
1721 WRITE4(sc, q->q.card + FATMOC_OP,
1722 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1723 BARRIER_W(sc);
1724
1725 /*
1726 * Wait for the command to complete
1727 */
1728 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1729 "fatm_stat", hz);
1730
1731 switch (error) {
1732
1733 case EWOULDBLOCK:
1734 error = EIO;
1735 break;
1736
1737 case ERESTART:
1738 error = EINTR;
1739 break;
1740
1741 case 0:
1742 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1743 BUS_DMASYNC_POSTREAD);
1744 error = q->error;
1745 break;
1746 }
1747
1748 /*
1749 * Swap statistics
1750 */
1751 if (q->error == 0) {
1752 u_int i;
1753 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1754
1755 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1756 i++, p++)
1757 *p = be32toh(*p);
1758 }
1759
1760 return (error);
1761}
1762
1763/*
1764 * Create a copy of a single mbuf. It can have either internal or
1765 * external data, it may have a packet header. External data is really
1766 * copied, so the new buffer is writeable.
1767 */
1768static struct mbuf *
1769copy_mbuf(struct mbuf *m)
1770{
1771 struct mbuf *new;
1772
1773 MGET(new, M_DONTWAIT, MT_DATA);
1774 if (new == NULL)
1775 return (NULL);
1776
1777 if (m->m_flags & M_PKTHDR) {
1778 M_MOVE_PKTHDR(new, m);
1779 if (m->m_len > MHLEN) {
1780 MCLGET(new, M_TRYWAIT);
1781 if ((m->m_flags & M_EXT) == 0) {
1782 m_free(new);
1783 return (NULL);
1784 }
1785 }
1786 } else {
1787 if (m->m_len > MLEN) {
1788 MCLGET(new, M_TRYWAIT);
1789 if ((m->m_flags & M_EXT) == 0) {
1790 m_free(new);
1791 return (NULL);
1792 }
1793 }
1794 }
1795
1796 bcopy(m->m_data, new->m_data, m->m_len);
1797 new->m_len = m->m_len;
1798 new->m_flags &= ~M_RDONLY;
1799
1800 return (new);
1801}
1802
1803/*
1804 * All segments must have a four byte aligned buffer address and a four
1805 * byte aligned length. Step through an mbuf chain and check these conditions.
1806 * If the buffer address is not aligned and this is a normal mbuf, move
1807 * the data down. Else make a copy of the mbuf with aligned data.
1808 * If the buffer length is not aligned steel data from the next mbuf.
1809 * We don't need to check whether this has more than one external reference,
1810 * because steeling data doesn't change the external cluster.
1811 * If the last mbuf is not aligned, fill with zeroes.
1812 *
1813 * Return packet length (well we should have this in the packet header),
1814 * but be careful not to count the zero fill at the end.
1815 *
1816 * If fixing fails free the chain and zero the pointer.
1817 *
1818 * We assume, that aligning the virtual address also aligns the mapped bus
1819 * address.
1820 */
1821static u_int
1822fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1823{
1824 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1825 u_int mlen = 0, fill = 0;
1826 int first, off;
1827 u_char *d, *cp;
1828
1829 do {
1830 next = m->m_next;
1831
1832 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1833 (m->m_len % 4 != 0 && next)) {
1834 /*
1835 * Needs fixing
1836 */
1837 first = (m == *mp);
1838
1839 d = mtod(m, u_char *);
1840 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1841 if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1842 sc->istats.fix_addr_copy++;
1843 bcopy(d, d - off, m->m_len);
1844 m->m_data = (caddr_t)(d - off);
1845 } else {
1846 if ((new = copy_mbuf(m)) == NULL) {
1847 sc->istats.fix_addr_noext++;
1848 goto fail;
1849 }
1850 sc->istats.fix_addr_ext++;
1851 if (prev)
1852 prev->m_next = new;
1853 new->m_next = next;
1854 m_free(m);
1855 m = new;
1856 }
1857 }
1858
1859 if ((off = m->m_len % 4) != 0) {
1860 if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1861 if ((new = copy_mbuf(m)) == NULL) {
1862 sc->istats.fix_len_noext++;
1863 goto fail;
1864 }
1865 sc->istats.fix_len_copy++;
1866 if (prev)
1867 prev->m_next = new;
1868 new->m_next = next;
1869 m_free(m);
1870 m = new;
1871 } else
1872 sc->istats.fix_len++;
1873 d = mtod(m, u_char *) + m->m_len;
1874 off = 4 - off;
1875 while (off) {
1876 if (next == NULL) {
1877 *d++ = 0;
1878 fill++;
1879 } else if (next->m_len == 0) {
1880 sc->istats.fix_empty++;
1881 next = m_free(next);
1882 continue;
1883 } else {
1884 cp = mtod(next, u_char *);
1885 *d++ = *cp++;
1886 next->m_len--;
1887 next->m_data = (caddr_t)cp;
1888 }
1889 off--;
1890 m->m_len++;
1891 }
1892 }
1893
1894 if (first)
1895 *mp = m;
1896 }
1897
1898 mlen += m->m_len;
1899 prev = m;
1900 } while ((m = next) != NULL);
1901
1902 return (mlen - fill);
1903
1904 fail:
1905 m_freem(*mp);
1906 *mp = NULL;
1907 return (0);
1908}
1909
1910/*
1911 * The helper function is used to load the computed physical addresses
1912 * into the transmit descriptor.
1913 */
1914static void
1915fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1916 bus_size_t mapsize, int error)
1917{
1918 struct tpd *tpd = varg;
1919
1920 if (error)
1921 return;
1922
1923 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1924
1925 tpd->spec = 0;
1926 while (nsegs--) {
1927 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1928 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1929 tpd->spec++;
1930 segs++;
1931 }
1932}
1933
1934/*
1935 * Start output.
1936 *
1937 * Note, that we update the internal statistics without the lock here.
1938 */
1939static int
1940fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1941{
1942 struct txqueue *q;
1943 u_int nblks;
1944 int error, aal, nsegs;
1945 struct tpd *tpd;
1946
1947 /*
1948 * Get a queue element.
1949 * If there isn't one - try to drain the transmit queue
1950 * We used to sleep here if that doesn't help, but we
1951 * should not sleep here, because we are called with locks.
1952 */
1953 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1954
1955 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1956 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1957 fatm_intr_drain_tx(sc);
1958 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1959 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1960 if (sc->retry_tx) {
1961 sc->istats.tx_retry++;
1962 IF_PREPEND(&sc->ifp->if_snd, m);
1963 return (1);
1964 }
1965 sc->istats.tx_queue_full++;
1966 m_freem(m);
1967 return (0);
1968 }
1969 sc->istats.tx_queue_almost_full++;
1970 }
1971
1972 tpd = q->q.ioblk;
1973
1974 m->m_data += sizeof(struct atm_pseudohdr);
1975 m->m_len -= sizeof(struct atm_pseudohdr);
1976
1977#ifdef ENABLE_BPF
1978 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1979 vc->param.aal == ATMIO_AAL_5 &&
1980 (vc->param.flags & ATM_PH_LLCSNAP))
1981 BPF_MTAP(sc->ifp, m);
1982#endif
1983
1984 /* map the mbuf */
1985 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1986 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1987 if(error) {
1988 sc->ifp->if_oerrors++;
1989 if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1990 m_freem(m);
1991 return (0);
1992 }
1993 nsegs = tpd->spec;
1994
1995 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1996
1997 /*
1998 * OK. Now go and do it.
1999 */
2000 aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
2001
2002 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2003 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2004 q->m = m;
2005
2006 /*
2007 * If the transmit queue is almost full, schedule a
2008 * transmit interrupt so that transmit descriptors can
2009 * be recycled.
2010 */
2011 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
2012 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
2013 H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
2014 vc->param.vci, 0, 0));
2015
2016 if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2017 H_SETDESC(tpd->stream, 0);
2018 else {
2019 u_int i;
2020
2021 for (i = 0; i < RATE_TABLE_SIZE; i++)
2022 if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2023 break;
2024 if (i > 0)
2025 i--;
2026 H_SETDESC(tpd->stream, rate_table[i].ratio);
2027 }
2028 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2029
2030 nblks = TDX_SEGS2BLKS(nsegs);
2031
2032 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2033 mlen, le32toh(tpd->spec), nsegs, nblks));
2034
2035 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2036 BARRIER_W(sc);
2037
2038 sc->txcnt++;
2039 sc->ifp->if_opackets++;
2040 vc->obytes += m->m_pkthdr.len;
2041 vc->opackets++;
2042
2043 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2044
2045 return (0);
2046}
2047
2048static void
2049fatm_start(struct ifnet *ifp)
2050{
2051 struct atm_pseudohdr aph;
2052 struct fatm_softc *sc;
2053 struct mbuf *m;
2054 u_int mlen, vpi, vci;
2055 struct card_vcc *vc;
2056
2057 sc = ifp->if_softc;
2058
2059 while (1) {
2060 IF_DEQUEUE(&ifp->if_snd, m);
2061 if (m == NULL)
2062 break;
2063
2064 /*
2065 * Loop through the mbuf chain and compute the total length
2066 * of the packet. Check that all data pointer are
2067 * 4 byte aligned. If they are not, call fatm_mfix to
2068 * fix that problem. This comes more or less from the
2069 * en driver.
2070 */
2071 mlen = fatm_fix_chain(sc, &m);
2072 if (m == NULL)
2073 continue;
2074
2075 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2076 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2077 continue;
2078
2079 aph = *mtod(m, struct atm_pseudohdr *);
2080 mlen -= sizeof(struct atm_pseudohdr);
2081
2082 if (mlen == 0) {
2083 m_freem(m);
2084 continue;
2085 }
2086 if (mlen > FATM_MAXPDU) {
2087 sc->istats.tx_pdu2big++;
2088 m_freem(m);
2089 continue;
2090 }
2091
2092 vci = ATM_PH_VCI(&aph);
2093 vpi = ATM_PH_VPI(&aph);
2094
2095 /*
2096 * From here on we need the softc
2097 */
2098 FATM_LOCK(sc);
1694 return (EIO);
1695 if (!(sc->flags & FATM_STAT_INUSE))
1696 break;
1697 cv_wait(&sc->cv_stat, &sc->mtx);
1698 }
1699 sc->flags |= FATM_STAT_INUSE;
1700
1701 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1702
1703 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1704 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1705 sc->istats.cmd_queue_full++;
1706 return (EIO);
1707 }
1708 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1709
1710 q->error = 0;
1711 q->cb = fatm_getstat_complete;
1712 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1713 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1714
1715 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1716 BUS_DMASYNC_PREREAD);
1717
1718 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1719 sc->sadi_mem.paddr);
1720 BARRIER_W(sc);
1721 WRITE4(sc, q->q.card + FATMOC_OP,
1722 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1723 BARRIER_W(sc);
1724
1725 /*
1726 * Wait for the command to complete
1727 */
1728 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1729 "fatm_stat", hz);
1730
1731 switch (error) {
1732
1733 case EWOULDBLOCK:
1734 error = EIO;
1735 break;
1736
1737 case ERESTART:
1738 error = EINTR;
1739 break;
1740
1741 case 0:
1742 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1743 BUS_DMASYNC_POSTREAD);
1744 error = q->error;
1745 break;
1746 }
1747
1748 /*
1749 * Swap statistics
1750 */
1751 if (q->error == 0) {
1752 u_int i;
1753 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1754
1755 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1756 i++, p++)
1757 *p = be32toh(*p);
1758 }
1759
1760 return (error);
1761}
1762
1763/*
1764 * Create a copy of a single mbuf. It can have either internal or
1765 * external data, it may have a packet header. External data is really
1766 * copied, so the new buffer is writeable.
1767 */
1768static struct mbuf *
1769copy_mbuf(struct mbuf *m)
1770{
1771 struct mbuf *new;
1772
1773 MGET(new, M_DONTWAIT, MT_DATA);
1774 if (new == NULL)
1775 return (NULL);
1776
1777 if (m->m_flags & M_PKTHDR) {
1778 M_MOVE_PKTHDR(new, m);
1779 if (m->m_len > MHLEN) {
1780 MCLGET(new, M_TRYWAIT);
1781 if ((m->m_flags & M_EXT) == 0) {
1782 m_free(new);
1783 return (NULL);
1784 }
1785 }
1786 } else {
1787 if (m->m_len > MLEN) {
1788 MCLGET(new, M_TRYWAIT);
1789 if ((m->m_flags & M_EXT) == 0) {
1790 m_free(new);
1791 return (NULL);
1792 }
1793 }
1794 }
1795
1796 bcopy(m->m_data, new->m_data, m->m_len);
1797 new->m_len = m->m_len;
1798 new->m_flags &= ~M_RDONLY;
1799
1800 return (new);
1801}
1802
1803/*
1804 * All segments must have a four byte aligned buffer address and a four
1805 * byte aligned length. Step through an mbuf chain and check these conditions.
1806 * If the buffer address is not aligned and this is a normal mbuf, move
1807 * the data down. Else make a copy of the mbuf with aligned data.
1808 * If the buffer length is not aligned steel data from the next mbuf.
1809 * We don't need to check whether this has more than one external reference,
1810 * because steeling data doesn't change the external cluster.
1811 * If the last mbuf is not aligned, fill with zeroes.
1812 *
1813 * Return packet length (well we should have this in the packet header),
1814 * but be careful not to count the zero fill at the end.
1815 *
1816 * If fixing fails free the chain and zero the pointer.
1817 *
1818 * We assume, that aligning the virtual address also aligns the mapped bus
1819 * address.
1820 */
1821static u_int
1822fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1823{
1824 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1825 u_int mlen = 0, fill = 0;
1826 int first, off;
1827 u_char *d, *cp;
1828
1829 do {
1830 next = m->m_next;
1831
1832 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1833 (m->m_len % 4 != 0 && next)) {
1834 /*
1835 * Needs fixing
1836 */
1837 first = (m == *mp);
1838
1839 d = mtod(m, u_char *);
1840 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1841 if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1842 sc->istats.fix_addr_copy++;
1843 bcopy(d, d - off, m->m_len);
1844 m->m_data = (caddr_t)(d - off);
1845 } else {
1846 if ((new = copy_mbuf(m)) == NULL) {
1847 sc->istats.fix_addr_noext++;
1848 goto fail;
1849 }
1850 sc->istats.fix_addr_ext++;
1851 if (prev)
1852 prev->m_next = new;
1853 new->m_next = next;
1854 m_free(m);
1855 m = new;
1856 }
1857 }
1858
1859 if ((off = m->m_len % 4) != 0) {
1860 if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1861 if ((new = copy_mbuf(m)) == NULL) {
1862 sc->istats.fix_len_noext++;
1863 goto fail;
1864 }
1865 sc->istats.fix_len_copy++;
1866 if (prev)
1867 prev->m_next = new;
1868 new->m_next = next;
1869 m_free(m);
1870 m = new;
1871 } else
1872 sc->istats.fix_len++;
1873 d = mtod(m, u_char *) + m->m_len;
1874 off = 4 - off;
1875 while (off) {
1876 if (next == NULL) {
1877 *d++ = 0;
1878 fill++;
1879 } else if (next->m_len == 0) {
1880 sc->istats.fix_empty++;
1881 next = m_free(next);
1882 continue;
1883 } else {
1884 cp = mtod(next, u_char *);
1885 *d++ = *cp++;
1886 next->m_len--;
1887 next->m_data = (caddr_t)cp;
1888 }
1889 off--;
1890 m->m_len++;
1891 }
1892 }
1893
1894 if (first)
1895 *mp = m;
1896 }
1897
1898 mlen += m->m_len;
1899 prev = m;
1900 } while ((m = next) != NULL);
1901
1902 return (mlen - fill);
1903
1904 fail:
1905 m_freem(*mp);
1906 *mp = NULL;
1907 return (0);
1908}
1909
1910/*
1911 * The helper function is used to load the computed physical addresses
1912 * into the transmit descriptor.
1913 */
1914static void
1915fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1916 bus_size_t mapsize, int error)
1917{
1918 struct tpd *tpd = varg;
1919
1920 if (error)
1921 return;
1922
1923 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1924
1925 tpd->spec = 0;
1926 while (nsegs--) {
1927 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1928 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1929 tpd->spec++;
1930 segs++;
1931 }
1932}
1933
1934/*
1935 * Start output.
1936 *
1937 * Note, that we update the internal statistics without the lock here.
1938 */
1939static int
1940fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1941{
1942 struct txqueue *q;
1943 u_int nblks;
1944 int error, aal, nsegs;
1945 struct tpd *tpd;
1946
1947 /*
1948 * Get a queue element.
1949 * If there isn't one - try to drain the transmit queue
1950 * We used to sleep here if that doesn't help, but we
1951 * should not sleep here, because we are called with locks.
1952 */
1953 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1954
1955 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1956 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1957 fatm_intr_drain_tx(sc);
1958 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1959 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1960 if (sc->retry_tx) {
1961 sc->istats.tx_retry++;
1962 IF_PREPEND(&sc->ifp->if_snd, m);
1963 return (1);
1964 }
1965 sc->istats.tx_queue_full++;
1966 m_freem(m);
1967 return (0);
1968 }
1969 sc->istats.tx_queue_almost_full++;
1970 }
1971
1972 tpd = q->q.ioblk;
1973
1974 m->m_data += sizeof(struct atm_pseudohdr);
1975 m->m_len -= sizeof(struct atm_pseudohdr);
1976
1977#ifdef ENABLE_BPF
1978 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1979 vc->param.aal == ATMIO_AAL_5 &&
1980 (vc->param.flags & ATM_PH_LLCSNAP))
1981 BPF_MTAP(sc->ifp, m);
1982#endif
1983
1984 /* map the mbuf */
1985 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1986 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1987 if(error) {
1988 sc->ifp->if_oerrors++;
1989 if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1990 m_freem(m);
1991 return (0);
1992 }
1993 nsegs = tpd->spec;
1994
1995 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1996
1997 /*
1998 * OK. Now go and do it.
1999 */
2000 aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
2001
2002 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2003 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2004 q->m = m;
2005
2006 /*
2007 * If the transmit queue is almost full, schedule a
2008 * transmit interrupt so that transmit descriptors can
2009 * be recycled.
2010 */
2011 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
2012 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
2013 H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
2014 vc->param.vci, 0, 0));
2015
2016 if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2017 H_SETDESC(tpd->stream, 0);
2018 else {
2019 u_int i;
2020
2021 for (i = 0; i < RATE_TABLE_SIZE; i++)
2022 if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2023 break;
2024 if (i > 0)
2025 i--;
2026 H_SETDESC(tpd->stream, rate_table[i].ratio);
2027 }
2028 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2029
2030 nblks = TDX_SEGS2BLKS(nsegs);
2031
2032 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2033 mlen, le32toh(tpd->spec), nsegs, nblks));
2034
2035 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2036 BARRIER_W(sc);
2037
2038 sc->txcnt++;
2039 sc->ifp->if_opackets++;
2040 vc->obytes += m->m_pkthdr.len;
2041 vc->opackets++;
2042
2043 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2044
2045 return (0);
2046}
2047
2048static void
2049fatm_start(struct ifnet *ifp)
2050{
2051 struct atm_pseudohdr aph;
2052 struct fatm_softc *sc;
2053 struct mbuf *m;
2054 u_int mlen, vpi, vci;
2055 struct card_vcc *vc;
2056
2057 sc = ifp->if_softc;
2058
2059 while (1) {
2060 IF_DEQUEUE(&ifp->if_snd, m);
2061 if (m == NULL)
2062 break;
2063
2064 /*
2065 * Loop through the mbuf chain and compute the total length
2066 * of the packet. Check that all data pointer are
2067 * 4 byte aligned. If they are not, call fatm_mfix to
2068 * fix that problem. This comes more or less from the
2069 * en driver.
2070 */
2071 mlen = fatm_fix_chain(sc, &m);
2072 if (m == NULL)
2073 continue;
2074
2075 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2076 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2077 continue;
2078
2079 aph = *mtod(m, struct atm_pseudohdr *);
2080 mlen -= sizeof(struct atm_pseudohdr);
2081
2082 if (mlen == 0) {
2083 m_freem(m);
2084 continue;
2085 }
2086 if (mlen > FATM_MAXPDU) {
2087 sc->istats.tx_pdu2big++;
2088 m_freem(m);
2089 continue;
2090 }
2091
2092 vci = ATM_PH_VCI(&aph);
2093 vpi = ATM_PH_VPI(&aph);
2094
2095 /*
2096 * From here on we need the softc
2097 */
2098 FATM_LOCK(sc);
2099 if (!(ifp->if_flags & IFF_RUNNING)) {
2099 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2100 FATM_UNLOCK(sc);
2101 m_freem(m);
2102 break;
2103 }
2104 if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2105 !(vc->vflags & FATM_VCC_OPEN)) {
2106 FATM_UNLOCK(sc);
2107 m_freem(m);
2108 continue;
2109 }
2110 if (fatm_tx(sc, m, vc, mlen)) {
2111 FATM_UNLOCK(sc);
2112 break;
2113 }
2114 FATM_UNLOCK(sc);
2115 }
2116}
2117
2118/*
2119 * VCC managment
2120 *
2121 * This may seem complicated. The reason for this is, that we need an
2122 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2123 * is called with the radix node head of the routing table locked. Therefor
2124 * we cannot sleep there and wait for the open/close to succeed. For this
2125 * reason we just initiate the operation from the ioctl.
2126 */
2127
2128/*
2129 * Command the card to open/close a VC.
2130 * Return the queue entry for waiting if we are succesful.
2131 */
2132static struct cmdqueue *
2133fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2134 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2135{
2136 struct cmdqueue *q;
2137
2138 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2139
2140 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2141 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2142 sc->istats.cmd_queue_full++;
2143 return (NULL);
2144 }
2145 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2146
2147 q->error = 0;
2148 q->cb = func;
2149 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2150 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2151
2152 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2153 BARRIER_W(sc);
2154 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2155 BARRIER_W(sc);
2156 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2157 BARRIER_W(sc);
2158
2159 return (q);
2160}
2161
2162/*
2163 * The VC has been opened/closed and somebody has been waiting for this.
2164 * Wake him up.
2165 */
2166static void
2167fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2168{
2169
2170 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2171 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2172 sc->istats.get_stat_errors++;
2173 q->error = EIO;
2174 }
2175 wakeup(q);
2176}
2177
2178/*
2179 * Open complete
2180 */
2181static void
2182fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2183{
2184 vc->vflags &= ~FATM_VCC_TRY_OPEN;
2185 vc->vflags |= FATM_VCC_OPEN;
2186
2187 if (vc->vflags & FATM_VCC_REOPEN) {
2188 vc->vflags &= ~FATM_VCC_REOPEN;
2189 return;
2190 }
2191
2192 /* inform management if this is not an NG
2193 * VCC or it's an NG PVC. */
2194 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2195 (vc->param.flags & ATMIO_FLAG_PVC))
2196 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2197}
2198
2199/*
2200 * The VC that we have tried to open asynchronuosly has been opened.
2201 */
2202static void
2203fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2204{
2205 u_int vci;
2206 struct card_vcc *vc;
2207
2208 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2209 vc = sc->vccs[vci];
2210 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2211 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2212 sc->istats.get_stat_errors++;
2213 sc->vccs[vci] = NULL;
2214 uma_zfree(sc->vcc_zone, vc);
2215 if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2216 return;
2217 }
2218 fatm_open_finish(sc, vc);
2219}
2220
2221/*
2222 * Wait on the queue entry until the VCC is opened/closed.
2223 */
2224static int
2225fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2226{
2227 int error;
2228
2229 /*
2230 * Wait for the command to complete
2231 */
2232 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2233
2234 if (error != 0)
2235 return (error);
2236 return (q->error);
2237}
2238
2239/*
2240 * Start to open a VCC. This just initiates the operation.
2241 */
2242static int
2243fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2244{
2245 int error;
2246 struct card_vcc *vc;
2247
2248 /*
2249 * Check parameters
2250 */
2251 if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2252 (op->param.flags & ATMIO_FLAG_NORX))
2253 return (EINVAL);
2254
2255 if (!VC_OK(sc, op->param.vpi, op->param.vci))
2256 return (EINVAL);
2257 if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2258 return (EINVAL);
2259
2260 vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2261 if (vc == NULL)
2262 return (ENOMEM);
2263
2264 error = 0;
2265
2266 FATM_LOCK(sc);
2100 FATM_UNLOCK(sc);
2101 m_freem(m);
2102 break;
2103 }
2104 if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2105 !(vc->vflags & FATM_VCC_OPEN)) {
2106 FATM_UNLOCK(sc);
2107 m_freem(m);
2108 continue;
2109 }
2110 if (fatm_tx(sc, m, vc, mlen)) {
2111 FATM_UNLOCK(sc);
2112 break;
2113 }
2114 FATM_UNLOCK(sc);
2115 }
2116}
2117
2118/*
2119 * VCC managment
2120 *
2121 * This may seem complicated. The reason for this is, that we need an
2122 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2123 * is called with the radix node head of the routing table locked. Therefor
2124 * we cannot sleep there and wait for the open/close to succeed. For this
2125 * reason we just initiate the operation from the ioctl.
2126 */
2127
2128/*
2129 * Command the card to open/close a VC.
2130 * Return the queue entry for waiting if we are succesful.
2131 */
2132static struct cmdqueue *
2133fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2134 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2135{
2136 struct cmdqueue *q;
2137
2138 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2139
2140 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2141 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2142 sc->istats.cmd_queue_full++;
2143 return (NULL);
2144 }
2145 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2146
2147 q->error = 0;
2148 q->cb = func;
2149 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2150 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2151
2152 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2153 BARRIER_W(sc);
2154 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2155 BARRIER_W(sc);
2156 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2157 BARRIER_W(sc);
2158
2159 return (q);
2160}
2161
2162/*
2163 * The VC has been opened/closed and somebody has been waiting for this.
2164 * Wake him up.
2165 */
2166static void
2167fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2168{
2169
2170 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2171 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2172 sc->istats.get_stat_errors++;
2173 q->error = EIO;
2174 }
2175 wakeup(q);
2176}
2177
2178/*
2179 * Open complete
2180 */
2181static void
2182fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2183{
2184 vc->vflags &= ~FATM_VCC_TRY_OPEN;
2185 vc->vflags |= FATM_VCC_OPEN;
2186
2187 if (vc->vflags & FATM_VCC_REOPEN) {
2188 vc->vflags &= ~FATM_VCC_REOPEN;
2189 return;
2190 }
2191
2192 /* inform management if this is not an NG
2193 * VCC or it's an NG PVC. */
2194 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2195 (vc->param.flags & ATMIO_FLAG_PVC))
2196 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2197}
2198
2199/*
2200 * The VC that we have tried to open asynchronuosly has been opened.
2201 */
2202static void
2203fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2204{
2205 u_int vci;
2206 struct card_vcc *vc;
2207
2208 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2209 vc = sc->vccs[vci];
2210 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2211 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2212 sc->istats.get_stat_errors++;
2213 sc->vccs[vci] = NULL;
2214 uma_zfree(sc->vcc_zone, vc);
2215 if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2216 return;
2217 }
2218 fatm_open_finish(sc, vc);
2219}
2220
2221/*
2222 * Wait on the queue entry until the VCC is opened/closed.
2223 */
2224static int
2225fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2226{
2227 int error;
2228
2229 /*
2230 * Wait for the command to complete
2231 */
2232 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2233
2234 if (error != 0)
2235 return (error);
2236 return (q->error);
2237}
2238
2239/*
2240 * Start to open a VCC. This just initiates the operation.
2241 */
2242static int
2243fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2244{
2245 int error;
2246 struct card_vcc *vc;
2247
2248 /*
2249 * Check parameters
2250 */
2251 if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2252 (op->param.flags & ATMIO_FLAG_NORX))
2253 return (EINVAL);
2254
2255 if (!VC_OK(sc, op->param.vpi, op->param.vci))
2256 return (EINVAL);
2257 if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2258 return (EINVAL);
2259
2260 vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2261 if (vc == NULL)
2262 return (ENOMEM);
2263
2264 error = 0;
2265
2266 FATM_LOCK(sc);
2267 if (!(sc->ifp->if_flags & IFF_RUNNING)) {
2267 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2268 error = EIO;
2269 goto done;
2270 }
2271 if (sc->vccs[op->param.vci] != NULL) {
2272 error = EBUSY;
2273 goto done;
2274 }
2275 vc->param = op->param;
2276 vc->rxhand = op->rxhand;
2277
2278 switch (op->param.traffic) {
2279
2280 case ATMIO_TRAFFIC_UBR:
2281 break;
2282
2283 case ATMIO_TRAFFIC_CBR:
2284 if (op->param.tparam.pcr == 0 ||
2285 op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2286 error = EINVAL;
2287 goto done;
2288 }
2289 break;
2290
2291 default:
2292 error = EINVAL;
2293 goto done;
2294 }
2295 vc->ibytes = vc->obytes = 0;
2296 vc->ipackets = vc->opackets = 0;
2297
2298 vc->vflags = FATM_VCC_TRY_OPEN;
2299 sc->vccs[op->param.vci] = vc;
2300 sc->open_vccs++;
2301
2302 error = fatm_load_vc(sc, vc);
2303 if (error != 0) {
2304 sc->vccs[op->param.vci] = NULL;
2305 sc->open_vccs--;
2306 goto done;
2307 }
2308
2309 /* don't free below */
2310 vc = NULL;
2311
2312 done:
2313 FATM_UNLOCK(sc);
2314 if (vc != NULL)
2315 uma_zfree(sc->vcc_zone, vc);
2316 return (error);
2317}
2318
2319/*
2320 * Try to initialize the given VC
2321 */
2322static int
2323fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2324{
2325 uint32_t cmd;
2326 struct cmdqueue *q;
2327 int error;
2328
2329 /* Command and buffer strategy */
2330 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2331 if (vc->param.aal == ATMIO_AAL_0)
2332 cmd |= (0 << 8);
2333 else
2334 cmd |= (5 << 8);
2335
2336 q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2337 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2338 fatm_open_complete : fatm_cmd_complete);
2339 if (q == NULL)
2340 return (EIO);
2341
2342 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2343 error = fatm_waitvcc(sc, q);
2344 if (error != 0)
2345 return (error);
2346 fatm_open_finish(sc, vc);
2347 }
2348 return (0);
2349}
2350
2351/*
2352 * Finish close
2353 */
2354static void
2355fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2356{
2357 /* inform management of this is not an NG
2358 * VCC or it's an NG PVC. */
2359 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2360 (vc->param.flags & ATMIO_FLAG_PVC))
2361 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2362
2363 sc->vccs[vc->param.vci] = NULL;
2364 sc->open_vccs--;
2365
2366 uma_zfree(sc->vcc_zone, vc);
2367}
2368
2369/*
2370 * The VC has been closed.
2371 */
2372static void
2373fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2374{
2375 u_int vci;
2376 struct card_vcc *vc;
2377
2378 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2379 vc = sc->vccs[vci];
2380 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2381 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2382 sc->istats.get_stat_errors++;
2383 /* keep the VCC in that state */
2384 if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2385 return;
2386 }
2387
2388 fatm_close_finish(sc, vc);
2389}
2390
2391/*
2392 * Initiate closing a VCC
2393 */
2394static int
2395fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2396{
2397 int error;
2398 struct cmdqueue *q;
2399 struct card_vcc *vc;
2400
2401 if (!VC_OK(sc, cl->vpi, cl->vci))
2402 return (EINVAL);
2403
2404 error = 0;
2405
2406 FATM_LOCK(sc);
2268 error = EIO;
2269 goto done;
2270 }
2271 if (sc->vccs[op->param.vci] != NULL) {
2272 error = EBUSY;
2273 goto done;
2274 }
2275 vc->param = op->param;
2276 vc->rxhand = op->rxhand;
2277
2278 switch (op->param.traffic) {
2279
2280 case ATMIO_TRAFFIC_UBR:
2281 break;
2282
2283 case ATMIO_TRAFFIC_CBR:
2284 if (op->param.tparam.pcr == 0 ||
2285 op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2286 error = EINVAL;
2287 goto done;
2288 }
2289 break;
2290
2291 default:
2292 error = EINVAL;
2293 goto done;
2294 }
2295 vc->ibytes = vc->obytes = 0;
2296 vc->ipackets = vc->opackets = 0;
2297
2298 vc->vflags = FATM_VCC_TRY_OPEN;
2299 sc->vccs[op->param.vci] = vc;
2300 sc->open_vccs++;
2301
2302 error = fatm_load_vc(sc, vc);
2303 if (error != 0) {
2304 sc->vccs[op->param.vci] = NULL;
2305 sc->open_vccs--;
2306 goto done;
2307 }
2308
2309 /* don't free below */
2310 vc = NULL;
2311
2312 done:
2313 FATM_UNLOCK(sc);
2314 if (vc != NULL)
2315 uma_zfree(sc->vcc_zone, vc);
2316 return (error);
2317}
2318
2319/*
2320 * Try to initialize the given VC
2321 */
2322static int
2323fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2324{
2325 uint32_t cmd;
2326 struct cmdqueue *q;
2327 int error;
2328
2329 /* Command and buffer strategy */
2330 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2331 if (vc->param.aal == ATMIO_AAL_0)
2332 cmd |= (0 << 8);
2333 else
2334 cmd |= (5 << 8);
2335
2336 q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2337 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2338 fatm_open_complete : fatm_cmd_complete);
2339 if (q == NULL)
2340 return (EIO);
2341
2342 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2343 error = fatm_waitvcc(sc, q);
2344 if (error != 0)
2345 return (error);
2346 fatm_open_finish(sc, vc);
2347 }
2348 return (0);
2349}
2350
2351/*
2352 * Finish close
2353 */
2354static void
2355fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2356{
2357 /* inform management of this is not an NG
2358 * VCC or it's an NG PVC. */
2359 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2360 (vc->param.flags & ATMIO_FLAG_PVC))
2361 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2362
2363 sc->vccs[vc->param.vci] = NULL;
2364 sc->open_vccs--;
2365
2366 uma_zfree(sc->vcc_zone, vc);
2367}
2368
2369/*
2370 * The VC has been closed.
2371 */
2372static void
2373fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2374{
2375 u_int vci;
2376 struct card_vcc *vc;
2377
2378 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2379 vc = sc->vccs[vci];
2380 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2381 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2382 sc->istats.get_stat_errors++;
2383 /* keep the VCC in that state */
2384 if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2385 return;
2386 }
2387
2388 fatm_close_finish(sc, vc);
2389}
2390
2391/*
2392 * Initiate closing a VCC
2393 */
2394static int
2395fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2396{
2397 int error;
2398 struct cmdqueue *q;
2399 struct card_vcc *vc;
2400
2401 if (!VC_OK(sc, cl->vpi, cl->vci))
2402 return (EINVAL);
2403
2404 error = 0;
2405
2406 FATM_LOCK(sc);
2407 if (!(sc->ifp->if_flags & IFF_RUNNING)) {
2407 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2408 error = EIO;
2409 goto done;
2410 }
2411 vc = sc->vccs[cl->vci];
2412 if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2413 error = ENOENT;
2414 goto done;
2415 }
2416
2417 q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2418 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2419 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2420 fatm_close_complete : fatm_cmd_complete);
2421 if (q == NULL) {
2422 error = EIO;
2423 goto done;
2424 }
2425
2426 vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2427 vc->vflags |= FATM_VCC_TRY_CLOSE;
2428
2429 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2430 error = fatm_waitvcc(sc, q);
2431 if (error != 0)
2432 goto done;
2433
2434 fatm_close_finish(sc, vc);
2435 }
2436
2437 done:
2438 FATM_UNLOCK(sc);
2439 return (error);
2440}
2441
2442/*
2443 * IOCTL handler
2444 */
2445static int
2446fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2447{
2448 int error;
2449 struct fatm_softc *sc = ifp->if_softc;
2450 struct ifaddr *ifa = (struct ifaddr *)arg;
2451 struct ifreq *ifr = (struct ifreq *)arg;
2452 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2453 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2454 struct atmio_vcctable *vtab;
2455
2456 error = 0;
2457 switch (cmd) {
2458
2459 case SIOCATMOPENVCC: /* kernel internal use */
2460 error = fatm_open_vcc(sc, op);
2461 break;
2462
2463 case SIOCATMCLOSEVCC: /* kernel internal use */
2464 error = fatm_close_vcc(sc, cl);
2465 break;
2466
2467 case SIOCSIFADDR:
2468 FATM_LOCK(sc);
2469 ifp->if_flags |= IFF_UP;
2408 error = EIO;
2409 goto done;
2410 }
2411 vc = sc->vccs[cl->vci];
2412 if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2413 error = ENOENT;
2414 goto done;
2415 }
2416
2417 q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2418 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2419 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2420 fatm_close_complete : fatm_cmd_complete);
2421 if (q == NULL) {
2422 error = EIO;
2423 goto done;
2424 }
2425
2426 vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2427 vc->vflags |= FATM_VCC_TRY_CLOSE;
2428
2429 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2430 error = fatm_waitvcc(sc, q);
2431 if (error != 0)
2432 goto done;
2433
2434 fatm_close_finish(sc, vc);
2435 }
2436
2437 done:
2438 FATM_UNLOCK(sc);
2439 return (error);
2440}
2441
2442/*
2443 * IOCTL handler
2444 */
2445static int
2446fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2447{
2448 int error;
2449 struct fatm_softc *sc = ifp->if_softc;
2450 struct ifaddr *ifa = (struct ifaddr *)arg;
2451 struct ifreq *ifr = (struct ifreq *)arg;
2452 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2453 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2454 struct atmio_vcctable *vtab;
2455
2456 error = 0;
2457 switch (cmd) {
2458
2459 case SIOCATMOPENVCC: /* kernel internal use */
2460 error = fatm_open_vcc(sc, op);
2461 break;
2462
2463 case SIOCATMCLOSEVCC: /* kernel internal use */
2464 error = fatm_close_vcc(sc, cl);
2465 break;
2466
2467 case SIOCSIFADDR:
2468 FATM_LOCK(sc);
2469 ifp->if_flags |= IFF_UP;
2470 if (!(ifp->if_flags & IFF_RUNNING))
2470 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2471 fatm_init_locked(sc);
2472 switch (ifa->ifa_addr->sa_family) {
2473#ifdef INET
2474 case AF_INET:
2475 case AF_INET6:
2476 ifa->ifa_rtrequest = atm_rtrequest;
2477 break;
2478#endif
2479 default:
2480 break;
2481 }
2482 FATM_UNLOCK(sc);
2483 break;
2484
2485 case SIOCSIFFLAGS:
2486 FATM_LOCK(sc);
2487 if (ifp->if_flags & IFF_UP) {
2471 fatm_init_locked(sc);
2472 switch (ifa->ifa_addr->sa_family) {
2473#ifdef INET
2474 case AF_INET:
2475 case AF_INET6:
2476 ifa->ifa_rtrequest = atm_rtrequest;
2477 break;
2478#endif
2479 default:
2480 break;
2481 }
2482 FATM_UNLOCK(sc);
2483 break;
2484
2485 case SIOCSIFFLAGS:
2486 FATM_LOCK(sc);
2487 if (ifp->if_flags & IFF_UP) {
2488 if (!(ifp->if_flags & IFF_RUNNING)) {
2488 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2489 fatm_init_locked(sc);
2490 }
2491 } else {
2489 fatm_init_locked(sc);
2490 }
2491 } else {
2492 if (ifp->if_flags & IFF_RUNNING) {
2492 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2493 fatm_stop(sc);
2494 }
2495 }
2496 FATM_UNLOCK(sc);
2497 break;
2498
2499 case SIOCGIFMEDIA:
2500 case SIOCSIFMEDIA:
2493 fatm_stop(sc);
2494 }
2495 }
2496 FATM_UNLOCK(sc);
2497 break;
2498
2499 case SIOCGIFMEDIA:
2500 case SIOCSIFMEDIA:
2501 if (ifp->if_flags & IFF_RUNNING)
2501 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2502 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2503 else
2504 error = EINVAL;
2505 break;
2506
2507 case SIOCATMGVCCS:
2508 /* return vcc table */
2509 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2510 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2511 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2512 vtab->count * sizeof(vtab->vccs[0]));
2513 free(vtab, M_DEVBUF);
2514 break;
2515
2516 case SIOCATMGETVCCS: /* internal netgraph use */
2517 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2518 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2519 if (vtab == NULL) {
2520 error = ENOMEM;
2521 break;
2522 }
2523 *(void **)arg = vtab;
2524 break;
2525
2526 default:
2527 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2528 error = EINVAL;
2529 break;
2530 }
2531
2532 return (error);
2533}
2534
2535/*
2536 * Detach from the interface and free all resources allocated during
2537 * initialisation and later.
2538 */
2539static int
2540fatm_detach(device_t dev)
2541{
2542 u_int i;
2543 struct rbuf *rb;
2544 struct fatm_softc *sc;
2545 struct txqueue *tx;
2546
2547 sc = device_get_softc(dev);
2548
2549 if (device_is_alive(dev)) {
2550 FATM_LOCK(sc);
2551 fatm_stop(sc);
2552 utopia_detach(&sc->utopia);
2553 FATM_UNLOCK(sc);
2554 atm_ifdetach(sc->ifp); /* XXX race */
2555 }
2556
2557 if (sc->ih != NULL)
2558 bus_teardown_intr(dev, sc->irqres, sc->ih);
2559
2560 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2561 if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2562 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2563 m_freem(rb->m);
2564 LIST_REMOVE(rb, link);
2565 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2566 }
2567
2568 if (sc->txqueue.chunk != NULL) {
2569 for (i = 0; i < FATM_TX_QLEN; i++) {
2570 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2571 bus_dmamap_destroy(sc->tx_tag, tx->map);
2572 }
2573 }
2574
2575 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2576 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2577 LIST_REMOVE(rb, link);
2578 }
2579
2580 if (sc->rbufs != NULL)
2581 free(sc->rbufs, M_DEVBUF);
2582 if (sc->vccs != NULL) {
2583 for (i = 0; i < FORE_MAX_VCC + 1; i++)
2584 if (sc->vccs[i] != NULL) {
2585 uma_zfree(sc->vcc_zone, sc->vccs[i]);
2586 sc->vccs[i] = NULL;
2587 }
2588 free(sc->vccs, M_DEVBUF);
2589 }
2590 if (sc->vcc_zone != NULL)
2591 uma_zdestroy(sc->vcc_zone);
2592
2593 if (sc->l1queue.chunk != NULL)
2594 free(sc->l1queue.chunk, M_DEVBUF);
2595 if (sc->s1queue.chunk != NULL)
2596 free(sc->s1queue.chunk, M_DEVBUF);
2597 if (sc->rxqueue.chunk != NULL)
2598 free(sc->rxqueue.chunk, M_DEVBUF);
2599 if (sc->txqueue.chunk != NULL)
2600 free(sc->txqueue.chunk, M_DEVBUF);
2601 if (sc->cmdqueue.chunk != NULL)
2602 free(sc->cmdqueue.chunk, M_DEVBUF);
2603
2604 destroy_dma_memory(&sc->reg_mem);
2605 destroy_dma_memory(&sc->sadi_mem);
2606 destroy_dma_memory(&sc->prom_mem);
2607#ifdef TEST_DMA_SYNC
2608 destroy_dma_memoryX(&sc->s1q_mem);
2609 destroy_dma_memoryX(&sc->l1q_mem);
2610 destroy_dma_memoryX(&sc->rxq_mem);
2611 destroy_dma_memoryX(&sc->txq_mem);
2612 destroy_dma_memoryX(&sc->stat_mem);
2613#endif
2614
2615 if (sc->tx_tag != NULL)
2616 if (bus_dma_tag_destroy(sc->tx_tag))
2617 printf("tx DMA tag busy!\n");
2618
2619 if (sc->rbuf_tag != NULL)
2620 if (bus_dma_tag_destroy(sc->rbuf_tag))
2621 printf("rbuf DMA tag busy!\n");
2622
2623 if (sc->parent_dmat != NULL)
2624 if (bus_dma_tag_destroy(sc->parent_dmat))
2625 printf("parent DMA tag busy!\n");
2626
2627 if (sc->irqres != NULL)
2628 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2629
2630 if (sc->memres != NULL)
2631 bus_release_resource(dev, SYS_RES_MEMORY,
2632 sc->memid, sc->memres);
2633
2634 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2635
2636 cv_destroy(&sc->cv_stat);
2637 cv_destroy(&sc->cv_regs);
2638
2639 mtx_destroy(&sc->mtx);
2640
2641 if_free(sc->ifp);
2642
2643 return (0);
2644}
2645
2646/*
2647 * Sysctl handler
2648 */
2649static int
2650fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2651{
2652 struct fatm_softc *sc = arg1;
2653 u_long *ret;
2654 int error;
2655
2656 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2657
2658 FATM_LOCK(sc);
2659 bcopy(&sc->istats, ret, sizeof(sc->istats));
2660 FATM_UNLOCK(sc);
2661
2662 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2663 free(ret, M_TEMP);
2664
2665 return (error);
2666}
2667
2668/*
2669 * Sysctl handler for card statistics
2670 * This is disable because it destroys the PHY statistics.
2671 */
2672static int
2673fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2674{
2675 struct fatm_softc *sc = arg1;
2676 int error;
2677 const struct fatm_stats *s;
2678 u_long *ret;
2679 u_int i;
2680
2681 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2682
2683 FATM_LOCK(sc);
2684
2685 if ((error = fatm_getstat(sc)) == 0) {
2686 s = sc->sadi_mem.mem;
2687 i = 0;
2688 ret[i++] = s->phy_4b5b.crc_header_errors;
2689 ret[i++] = s->phy_4b5b.framing_errors;
2690 ret[i++] = s->phy_oc3.section_bip8_errors;
2691 ret[i++] = s->phy_oc3.path_bip8_errors;
2692 ret[i++] = s->phy_oc3.line_bip24_errors;
2693 ret[i++] = s->phy_oc3.line_febe_errors;
2694 ret[i++] = s->phy_oc3.path_febe_errors;
2695 ret[i++] = s->phy_oc3.corr_hcs_errors;
2696 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2697 ret[i++] = s->atm.cells_transmitted;
2698 ret[i++] = s->atm.cells_received;
2699 ret[i++] = s->atm.vpi_bad_range;
2700 ret[i++] = s->atm.vpi_no_conn;
2701 ret[i++] = s->atm.vci_bad_range;
2702 ret[i++] = s->atm.vci_no_conn;
2703 ret[i++] = s->aal0.cells_transmitted;
2704 ret[i++] = s->aal0.cells_received;
2705 ret[i++] = s->aal0.cells_dropped;
2706 ret[i++] = s->aal4.cells_transmitted;
2707 ret[i++] = s->aal4.cells_received;
2708 ret[i++] = s->aal4.cells_crc_errors;
2709 ret[i++] = s->aal4.cels_protocol_errors;
2710 ret[i++] = s->aal4.cells_dropped;
2711 ret[i++] = s->aal4.cspdus_transmitted;
2712 ret[i++] = s->aal4.cspdus_received;
2713 ret[i++] = s->aal4.cspdus_protocol_errors;
2714 ret[i++] = s->aal4.cspdus_dropped;
2715 ret[i++] = s->aal5.cells_transmitted;
2716 ret[i++] = s->aal5.cells_received;
2717 ret[i++] = s->aal5.congestion_experienced;
2718 ret[i++] = s->aal5.cells_dropped;
2719 ret[i++] = s->aal5.cspdus_transmitted;
2720 ret[i++] = s->aal5.cspdus_received;
2721 ret[i++] = s->aal5.cspdus_crc_errors;
2722 ret[i++] = s->aal5.cspdus_protocol_errors;
2723 ret[i++] = s->aal5.cspdus_dropped;
2724 ret[i++] = s->aux.small_b1_failed;
2725 ret[i++] = s->aux.large_b1_failed;
2726 ret[i++] = s->aux.small_b2_failed;
2727 ret[i++] = s->aux.large_b2_failed;
2728 ret[i++] = s->aux.rpd_alloc_failed;
2729 ret[i++] = s->aux.receive_carrier;
2730 }
2731 /* declare the buffer free */
2732 sc->flags &= ~FATM_STAT_INUSE;
2733 cv_signal(&sc->cv_stat);
2734
2735 FATM_UNLOCK(sc);
2736
2737 if (error == 0)
2738 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2739 free(ret, M_TEMP);
2740
2741 return (error);
2742}
2743
2744#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2745
2746/*
2747 * Attach to the device.
2748 *
2749 * We assume, that there is a global lock (Giant in this case) that protects
2750 * multiple threads from entering this function. This makes sense, doesn't it?
2751 */
2752static int
2753fatm_attach(device_t dev)
2754{
2755 struct ifnet *ifp;
2756 struct fatm_softc *sc;
2757 int unit;
2758 uint16_t cfg;
2759 int error = 0;
2760 struct rbuf *rb;
2761 u_int i;
2762 struct txqueue *tx;
2763
2764 sc = device_get_softc(dev);
2765 unit = device_get_unit(dev);
2766
2767 ifp = sc->ifp = if_alloc(IFT_ATM);
2768 if (ifp == NULL) {
2769 error = ENOSPC;
2770 goto fail;
2771 }
2772
2773 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2774 IFP2IFATM(sc->ifp)->mib.serial = 0;
2775 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2776 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2777 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2778 IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2779 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2780 IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2781 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2782 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2783
2784 LIST_INIT(&sc->rbuf_free);
2785 LIST_INIT(&sc->rbuf_used);
2786
2787 /*
2788 * Initialize mutex and condition variables.
2789 */
2790 mtx_init(&sc->mtx, device_get_nameunit(dev),
2791 MTX_NETWORK_LOCK, MTX_DEF);
2792
2793 cv_init(&sc->cv_stat, "fatm_stat");
2794 cv_init(&sc->cv_regs, "fatm_regs");
2795
2796 sysctl_ctx_init(&sc->sysctl_ctx);
2797
2798 /*
2799 * Make the sysctl tree
2800 */
2801 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2802 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2803 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2804 goto fail;
2805
2806 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2807 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2808 "LU", "internal statistics") == NULL)
2809 goto fail;
2810
2811 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2812 OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2813 "LU", "card statistics") == NULL)
2814 goto fail;
2815
2816 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2817 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2818 "retry flag") == NULL)
2819 goto fail;
2820
2821#ifdef FATM_DEBUG
2822 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2823 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2824 == NULL)
2825 goto fail;
2826 sc->debug = FATM_DEBUG;
2827#endif
2828
2829 /*
2830 * Network subsystem stuff
2831 */
2832 ifp->if_softc = sc;
2833 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2834 ifp->if_flags = IFF_SIMPLEX;
2835 ifp->if_ioctl = fatm_ioctl;
2836 ifp->if_start = fatm_start;
2837 ifp->if_watchdog = fatm_watchdog;
2838 ifp->if_init = fatm_init;
2839 ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2840 ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2841
2842 /*
2843 * Enable memory and bustmaster
2844 */
2845 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2846 cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2847 pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2848
2849 /*
2850 * Map memory
2851 */
2852 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2853 if (!(cfg & PCIM_CMD_MEMEN)) {
2854 if_printf(ifp, "failed to enable memory mapping\n");
2855 error = ENXIO;
2856 goto fail;
2857 }
2858 sc->memid = 0x10;
2859 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2860 RF_ACTIVE);
2861 if (sc->memres == NULL) {
2862 if_printf(ifp, "could not map memory\n");
2863 error = ENXIO;
2864 goto fail;
2865 }
2866 sc->memh = rman_get_bushandle(sc->memres);
2867 sc->memt = rman_get_bustag(sc->memres);
2868
2869 /*
2870 * Convert endianess of slave access
2871 */
2872 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2873 cfg |= FATM_PCIM_SWAB;
2874 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2875
2876 /*
2877 * Allocate interrupt (activate at the end)
2878 */
2879 sc->irqid = 0;
2880 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2881 RF_SHAREABLE | RF_ACTIVE);
2882 if (sc->irqres == NULL) {
2883 if_printf(ifp, "could not allocate irq\n");
2884 error = ENXIO;
2885 goto fail;
2886 }
2887
2888 /*
2889 * Allocate the parent DMA tag. This is used simply to hold overall
2890 * restrictions for the controller (and PCI bus) and is never used
2891 * to do anything.
2892 */
2893 if (bus_dma_tag_create(NULL, 1, 0,
2894 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2895 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2896 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2897 &sc->parent_dmat)) {
2898 if_printf(ifp, "could not allocate parent DMA tag\n");
2899 error = ENOMEM;
2900 goto fail;
2901 }
2902
2903 /*
2904 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2905 * a mbuf cluster.
2906 */
2907 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2908 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2909 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2910 NULL, NULL, &sc->rbuf_tag)) {
2911 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2912 error = ENOMEM;
2913 goto fail;
2914 }
2915
2916 /*
2917 * Allocate the transmission DMA tag. Must add 1, because
2918 * rounded up PDU will be 65536 bytes long.
2919 */
2920 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2921 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2922 NULL, NULL,
2923 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2924 NULL, NULL, &sc->tx_tag)) {
2925 if_printf(ifp, "could not allocate tx DMA tag\n");
2926 error = ENOMEM;
2927 goto fail;
2928 }
2929
2930 /*
2931 * Allocate DMAable memory.
2932 */
2933 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2934 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2935 sc->stat_mem.align = 4;
2936
2937 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2938 sc->txq_mem.align = 32;
2939
2940 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2941 sc->rxq_mem.align = 32;
2942
2943 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2944 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2945 sc->s1q_mem.align = 32;
2946
2947 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2948 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2949 sc->l1q_mem.align = 32;
2950
2951#ifdef TEST_DMA_SYNC
2952 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2953 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2954 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2955 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2956 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2957 goto fail;
2958#else
2959 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2960 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2961 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2962 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2963 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2964 goto fail;
2965#endif
2966
2967 sc->prom_mem.size = sizeof(struct prom);
2968 sc->prom_mem.align = 32;
2969 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2970 goto fail;
2971
2972 sc->sadi_mem.size = sizeof(struct fatm_stats);
2973 sc->sadi_mem.align = 32;
2974 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2975 goto fail;
2976
2977 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2978 sc->reg_mem.align = 32;
2979 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2980 goto fail;
2981
2982 /*
2983 * Allocate queues
2984 */
2985 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2986 M_DEVBUF, M_ZERO | M_WAITOK);
2987 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2988 M_DEVBUF, M_ZERO | M_WAITOK);
2989 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2990 M_DEVBUF, M_ZERO | M_WAITOK);
2991 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2992 M_DEVBUF, M_ZERO | M_WAITOK);
2993 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2994 M_DEVBUF, M_ZERO | M_WAITOK);
2995
2996 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2997 M_DEVBUF, M_ZERO | M_WAITOK);
2998 sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2999 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3000 if (sc->vcc_zone == NULL) {
3001 error = ENOMEM;
3002 goto fail;
3003 }
3004
3005 /*
3006 * Allocate memory for the receive buffer headers. The total number
3007 * of headers should probably also include the maximum number of
3008 * buffers on the receive queue.
3009 */
3010 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
3011 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
3012 M_DEVBUF, M_ZERO | M_WAITOK);
3013
3014 /*
3015 * Put all rbuf headers on the free list and create DMA maps.
3016 */
3017 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
3018 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3019 if_printf(sc->ifp, "creating rx map: %d\n",
3020 error);
3021 goto fail;
3022 }
3023 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3024 }
3025
3026 /*
3027 * Create dma maps for transmission. In case of an error, free the
3028 * allocated DMA maps, because on some architectures maps are NULL
3029 * and we cannot distinguish between a failure and a NULL map in
3030 * the detach routine.
3031 */
3032 for (i = 0; i < FATM_TX_QLEN; i++) {
3033 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3034 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3035 if_printf(sc->ifp, "creating tx map: %d\n",
3036 error);
3037 while (i > 0) {
3038 tx = GET_QUEUE(sc->txqueue, struct txqueue,
3039 i - 1);
3040 bus_dmamap_destroy(sc->tx_tag, tx->map);
3041 i--;
3042 }
3043 goto fail;
3044 }
3045 }
3046
3047 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3048 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3049 &fatm_utopia_methods);
3050 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3051
3052 /*
3053 * Attach the interface
3054 */
3055 atm_ifattach(ifp);
3056 ifp->if_snd.ifq_maxlen = 512;
3057
3058#ifdef ENABLE_BPF
3059 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3060#endif
3061
3062 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3063 fatm_intr, sc, &sc->ih);
3064 if (error) {
3065 if_printf(ifp, "couldn't setup irq\n");
3066 goto fail;
3067 }
3068
3069 fail:
3070 if (error)
3071 fatm_detach(dev);
3072
3073 return (error);
3074}
3075
3076#if defined(FATM_DEBUG) && 0
3077static void
3078dump_s1_queue(struct fatm_softc *sc)
3079{
3080 int i;
3081 struct supqueue *q;
3082
3083 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3084 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3085 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3086 q->q.card,
3087 READ4(sc, q->q.card),
3088 READ4(sc, q->q.card + 4),
3089 *q->q.statp);
3090 }
3091}
3092#endif
3093
3094/*
3095 * Driver infrastructure.
3096 */
3097static device_method_t fatm_methods[] = {
3098 DEVMETHOD(device_probe, fatm_probe),
3099 DEVMETHOD(device_attach, fatm_attach),
3100 DEVMETHOD(device_detach, fatm_detach),
3101 { 0, 0 }
3102};
3103static driver_t fatm_driver = {
3104 "fatm",
3105 fatm_methods,
3106 sizeof(struct fatm_softc),
3107};
3108
3109DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
2502 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2503 else
2504 error = EINVAL;
2505 break;
2506
2507 case SIOCATMGVCCS:
2508 /* return vcc table */
2509 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2510 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2511 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2512 vtab->count * sizeof(vtab->vccs[0]));
2513 free(vtab, M_DEVBUF);
2514 break;
2515
2516 case SIOCATMGETVCCS: /* internal netgraph use */
2517 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2518 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2519 if (vtab == NULL) {
2520 error = ENOMEM;
2521 break;
2522 }
2523 *(void **)arg = vtab;
2524 break;
2525
2526 default:
2527 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2528 error = EINVAL;
2529 break;
2530 }
2531
2532 return (error);
2533}
2534
2535/*
2536 * Detach from the interface and free all resources allocated during
2537 * initialisation and later.
2538 */
2539static int
2540fatm_detach(device_t dev)
2541{
2542 u_int i;
2543 struct rbuf *rb;
2544 struct fatm_softc *sc;
2545 struct txqueue *tx;
2546
2547 sc = device_get_softc(dev);
2548
2549 if (device_is_alive(dev)) {
2550 FATM_LOCK(sc);
2551 fatm_stop(sc);
2552 utopia_detach(&sc->utopia);
2553 FATM_UNLOCK(sc);
2554 atm_ifdetach(sc->ifp); /* XXX race */
2555 }
2556
2557 if (sc->ih != NULL)
2558 bus_teardown_intr(dev, sc->irqres, sc->ih);
2559
2560 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2561 if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2562 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2563 m_freem(rb->m);
2564 LIST_REMOVE(rb, link);
2565 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2566 }
2567
2568 if (sc->txqueue.chunk != NULL) {
2569 for (i = 0; i < FATM_TX_QLEN; i++) {
2570 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2571 bus_dmamap_destroy(sc->tx_tag, tx->map);
2572 }
2573 }
2574
2575 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2576 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2577 LIST_REMOVE(rb, link);
2578 }
2579
2580 if (sc->rbufs != NULL)
2581 free(sc->rbufs, M_DEVBUF);
2582 if (sc->vccs != NULL) {
2583 for (i = 0; i < FORE_MAX_VCC + 1; i++)
2584 if (sc->vccs[i] != NULL) {
2585 uma_zfree(sc->vcc_zone, sc->vccs[i]);
2586 sc->vccs[i] = NULL;
2587 }
2588 free(sc->vccs, M_DEVBUF);
2589 }
2590 if (sc->vcc_zone != NULL)
2591 uma_zdestroy(sc->vcc_zone);
2592
2593 if (sc->l1queue.chunk != NULL)
2594 free(sc->l1queue.chunk, M_DEVBUF);
2595 if (sc->s1queue.chunk != NULL)
2596 free(sc->s1queue.chunk, M_DEVBUF);
2597 if (sc->rxqueue.chunk != NULL)
2598 free(sc->rxqueue.chunk, M_DEVBUF);
2599 if (sc->txqueue.chunk != NULL)
2600 free(sc->txqueue.chunk, M_DEVBUF);
2601 if (sc->cmdqueue.chunk != NULL)
2602 free(sc->cmdqueue.chunk, M_DEVBUF);
2603
2604 destroy_dma_memory(&sc->reg_mem);
2605 destroy_dma_memory(&sc->sadi_mem);
2606 destroy_dma_memory(&sc->prom_mem);
2607#ifdef TEST_DMA_SYNC
2608 destroy_dma_memoryX(&sc->s1q_mem);
2609 destroy_dma_memoryX(&sc->l1q_mem);
2610 destroy_dma_memoryX(&sc->rxq_mem);
2611 destroy_dma_memoryX(&sc->txq_mem);
2612 destroy_dma_memoryX(&sc->stat_mem);
2613#endif
2614
2615 if (sc->tx_tag != NULL)
2616 if (bus_dma_tag_destroy(sc->tx_tag))
2617 printf("tx DMA tag busy!\n");
2618
2619 if (sc->rbuf_tag != NULL)
2620 if (bus_dma_tag_destroy(sc->rbuf_tag))
2621 printf("rbuf DMA tag busy!\n");
2622
2623 if (sc->parent_dmat != NULL)
2624 if (bus_dma_tag_destroy(sc->parent_dmat))
2625 printf("parent DMA tag busy!\n");
2626
2627 if (sc->irqres != NULL)
2628 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2629
2630 if (sc->memres != NULL)
2631 bus_release_resource(dev, SYS_RES_MEMORY,
2632 sc->memid, sc->memres);
2633
2634 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2635
2636 cv_destroy(&sc->cv_stat);
2637 cv_destroy(&sc->cv_regs);
2638
2639 mtx_destroy(&sc->mtx);
2640
2641 if_free(sc->ifp);
2642
2643 return (0);
2644}
2645
2646/*
2647 * Sysctl handler
2648 */
2649static int
2650fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2651{
2652 struct fatm_softc *sc = arg1;
2653 u_long *ret;
2654 int error;
2655
2656 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2657
2658 FATM_LOCK(sc);
2659 bcopy(&sc->istats, ret, sizeof(sc->istats));
2660 FATM_UNLOCK(sc);
2661
2662 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2663 free(ret, M_TEMP);
2664
2665 return (error);
2666}
2667
2668/*
2669 * Sysctl handler for card statistics
2670 * This is disable because it destroys the PHY statistics.
2671 */
2672static int
2673fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2674{
2675 struct fatm_softc *sc = arg1;
2676 int error;
2677 const struct fatm_stats *s;
2678 u_long *ret;
2679 u_int i;
2680
2681 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2682
2683 FATM_LOCK(sc);
2684
2685 if ((error = fatm_getstat(sc)) == 0) {
2686 s = sc->sadi_mem.mem;
2687 i = 0;
2688 ret[i++] = s->phy_4b5b.crc_header_errors;
2689 ret[i++] = s->phy_4b5b.framing_errors;
2690 ret[i++] = s->phy_oc3.section_bip8_errors;
2691 ret[i++] = s->phy_oc3.path_bip8_errors;
2692 ret[i++] = s->phy_oc3.line_bip24_errors;
2693 ret[i++] = s->phy_oc3.line_febe_errors;
2694 ret[i++] = s->phy_oc3.path_febe_errors;
2695 ret[i++] = s->phy_oc3.corr_hcs_errors;
2696 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2697 ret[i++] = s->atm.cells_transmitted;
2698 ret[i++] = s->atm.cells_received;
2699 ret[i++] = s->atm.vpi_bad_range;
2700 ret[i++] = s->atm.vpi_no_conn;
2701 ret[i++] = s->atm.vci_bad_range;
2702 ret[i++] = s->atm.vci_no_conn;
2703 ret[i++] = s->aal0.cells_transmitted;
2704 ret[i++] = s->aal0.cells_received;
2705 ret[i++] = s->aal0.cells_dropped;
2706 ret[i++] = s->aal4.cells_transmitted;
2707 ret[i++] = s->aal4.cells_received;
2708 ret[i++] = s->aal4.cells_crc_errors;
2709 ret[i++] = s->aal4.cels_protocol_errors;
2710 ret[i++] = s->aal4.cells_dropped;
2711 ret[i++] = s->aal4.cspdus_transmitted;
2712 ret[i++] = s->aal4.cspdus_received;
2713 ret[i++] = s->aal4.cspdus_protocol_errors;
2714 ret[i++] = s->aal4.cspdus_dropped;
2715 ret[i++] = s->aal5.cells_transmitted;
2716 ret[i++] = s->aal5.cells_received;
2717 ret[i++] = s->aal5.congestion_experienced;
2718 ret[i++] = s->aal5.cells_dropped;
2719 ret[i++] = s->aal5.cspdus_transmitted;
2720 ret[i++] = s->aal5.cspdus_received;
2721 ret[i++] = s->aal5.cspdus_crc_errors;
2722 ret[i++] = s->aal5.cspdus_protocol_errors;
2723 ret[i++] = s->aal5.cspdus_dropped;
2724 ret[i++] = s->aux.small_b1_failed;
2725 ret[i++] = s->aux.large_b1_failed;
2726 ret[i++] = s->aux.small_b2_failed;
2727 ret[i++] = s->aux.large_b2_failed;
2728 ret[i++] = s->aux.rpd_alloc_failed;
2729 ret[i++] = s->aux.receive_carrier;
2730 }
2731 /* declare the buffer free */
2732 sc->flags &= ~FATM_STAT_INUSE;
2733 cv_signal(&sc->cv_stat);
2734
2735 FATM_UNLOCK(sc);
2736
2737 if (error == 0)
2738 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2739 free(ret, M_TEMP);
2740
2741 return (error);
2742}
2743
2744#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2745
2746/*
2747 * Attach to the device.
2748 *
2749 * We assume, that there is a global lock (Giant in this case) that protects
2750 * multiple threads from entering this function. This makes sense, doesn't it?
2751 */
2752static int
2753fatm_attach(device_t dev)
2754{
2755 struct ifnet *ifp;
2756 struct fatm_softc *sc;
2757 int unit;
2758 uint16_t cfg;
2759 int error = 0;
2760 struct rbuf *rb;
2761 u_int i;
2762 struct txqueue *tx;
2763
2764 sc = device_get_softc(dev);
2765 unit = device_get_unit(dev);
2766
2767 ifp = sc->ifp = if_alloc(IFT_ATM);
2768 if (ifp == NULL) {
2769 error = ENOSPC;
2770 goto fail;
2771 }
2772
2773 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2774 IFP2IFATM(sc->ifp)->mib.serial = 0;
2775 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2776 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2777 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2778 IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2779 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2780 IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2781 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2782 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2783
2784 LIST_INIT(&sc->rbuf_free);
2785 LIST_INIT(&sc->rbuf_used);
2786
2787 /*
2788 * Initialize mutex and condition variables.
2789 */
2790 mtx_init(&sc->mtx, device_get_nameunit(dev),
2791 MTX_NETWORK_LOCK, MTX_DEF);
2792
2793 cv_init(&sc->cv_stat, "fatm_stat");
2794 cv_init(&sc->cv_regs, "fatm_regs");
2795
2796 sysctl_ctx_init(&sc->sysctl_ctx);
2797
2798 /*
2799 * Make the sysctl tree
2800 */
2801 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2802 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2803 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2804 goto fail;
2805
2806 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2807 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2808 "LU", "internal statistics") == NULL)
2809 goto fail;
2810
2811 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2812 OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2813 "LU", "card statistics") == NULL)
2814 goto fail;
2815
2816 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2817 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2818 "retry flag") == NULL)
2819 goto fail;
2820
2821#ifdef FATM_DEBUG
2822 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2823 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2824 == NULL)
2825 goto fail;
2826 sc->debug = FATM_DEBUG;
2827#endif
2828
2829 /*
2830 * Network subsystem stuff
2831 */
2832 ifp->if_softc = sc;
2833 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2834 ifp->if_flags = IFF_SIMPLEX;
2835 ifp->if_ioctl = fatm_ioctl;
2836 ifp->if_start = fatm_start;
2837 ifp->if_watchdog = fatm_watchdog;
2838 ifp->if_init = fatm_init;
2839 ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2840 ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2841
2842 /*
2843 * Enable memory and bustmaster
2844 */
2845 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2846 cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2847 pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2848
2849 /*
2850 * Map memory
2851 */
2852 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2853 if (!(cfg & PCIM_CMD_MEMEN)) {
2854 if_printf(ifp, "failed to enable memory mapping\n");
2855 error = ENXIO;
2856 goto fail;
2857 }
2858 sc->memid = 0x10;
2859 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2860 RF_ACTIVE);
2861 if (sc->memres == NULL) {
2862 if_printf(ifp, "could not map memory\n");
2863 error = ENXIO;
2864 goto fail;
2865 }
2866 sc->memh = rman_get_bushandle(sc->memres);
2867 sc->memt = rman_get_bustag(sc->memres);
2868
2869 /*
2870 * Convert endianess of slave access
2871 */
2872 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2873 cfg |= FATM_PCIM_SWAB;
2874 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2875
2876 /*
2877 * Allocate interrupt (activate at the end)
2878 */
2879 sc->irqid = 0;
2880 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2881 RF_SHAREABLE | RF_ACTIVE);
2882 if (sc->irqres == NULL) {
2883 if_printf(ifp, "could not allocate irq\n");
2884 error = ENXIO;
2885 goto fail;
2886 }
2887
2888 /*
2889 * Allocate the parent DMA tag. This is used simply to hold overall
2890 * restrictions for the controller (and PCI bus) and is never used
2891 * to do anything.
2892 */
2893 if (bus_dma_tag_create(NULL, 1, 0,
2894 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2895 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2896 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2897 &sc->parent_dmat)) {
2898 if_printf(ifp, "could not allocate parent DMA tag\n");
2899 error = ENOMEM;
2900 goto fail;
2901 }
2902
2903 /*
2904 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2905 * a mbuf cluster.
2906 */
2907 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2908 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2909 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2910 NULL, NULL, &sc->rbuf_tag)) {
2911 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2912 error = ENOMEM;
2913 goto fail;
2914 }
2915
2916 /*
2917 * Allocate the transmission DMA tag. Must add 1, because
2918 * rounded up PDU will be 65536 bytes long.
2919 */
2920 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2921 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2922 NULL, NULL,
2923 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2924 NULL, NULL, &sc->tx_tag)) {
2925 if_printf(ifp, "could not allocate tx DMA tag\n");
2926 error = ENOMEM;
2927 goto fail;
2928 }
2929
2930 /*
2931 * Allocate DMAable memory.
2932 */
2933 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2934 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2935 sc->stat_mem.align = 4;
2936
2937 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2938 sc->txq_mem.align = 32;
2939
2940 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2941 sc->rxq_mem.align = 32;
2942
2943 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2944 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2945 sc->s1q_mem.align = 32;
2946
2947 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2948 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2949 sc->l1q_mem.align = 32;
2950
2951#ifdef TEST_DMA_SYNC
2952 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2953 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2954 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2955 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2956 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2957 goto fail;
2958#else
2959 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2960 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2961 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2962 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2963 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2964 goto fail;
2965#endif
2966
2967 sc->prom_mem.size = sizeof(struct prom);
2968 sc->prom_mem.align = 32;
2969 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2970 goto fail;
2971
2972 sc->sadi_mem.size = sizeof(struct fatm_stats);
2973 sc->sadi_mem.align = 32;
2974 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2975 goto fail;
2976
2977 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2978 sc->reg_mem.align = 32;
2979 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2980 goto fail;
2981
2982 /*
2983 * Allocate queues
2984 */
2985 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2986 M_DEVBUF, M_ZERO | M_WAITOK);
2987 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2988 M_DEVBUF, M_ZERO | M_WAITOK);
2989 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2990 M_DEVBUF, M_ZERO | M_WAITOK);
2991 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2992 M_DEVBUF, M_ZERO | M_WAITOK);
2993 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2994 M_DEVBUF, M_ZERO | M_WAITOK);
2995
2996 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2997 M_DEVBUF, M_ZERO | M_WAITOK);
2998 sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2999 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3000 if (sc->vcc_zone == NULL) {
3001 error = ENOMEM;
3002 goto fail;
3003 }
3004
3005 /*
3006 * Allocate memory for the receive buffer headers. The total number
3007 * of headers should probably also include the maximum number of
3008 * buffers on the receive queue.
3009 */
3010 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
3011 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
3012 M_DEVBUF, M_ZERO | M_WAITOK);
3013
3014 /*
3015 * Put all rbuf headers on the free list and create DMA maps.
3016 */
3017 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
3018 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3019 if_printf(sc->ifp, "creating rx map: %d\n",
3020 error);
3021 goto fail;
3022 }
3023 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3024 }
3025
3026 /*
3027 * Create dma maps for transmission. In case of an error, free the
3028 * allocated DMA maps, because on some architectures maps are NULL
3029 * and we cannot distinguish between a failure and a NULL map in
3030 * the detach routine.
3031 */
3032 for (i = 0; i < FATM_TX_QLEN; i++) {
3033 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3034 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3035 if_printf(sc->ifp, "creating tx map: %d\n",
3036 error);
3037 while (i > 0) {
3038 tx = GET_QUEUE(sc->txqueue, struct txqueue,
3039 i - 1);
3040 bus_dmamap_destroy(sc->tx_tag, tx->map);
3041 i--;
3042 }
3043 goto fail;
3044 }
3045 }
3046
3047 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3048 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3049 &fatm_utopia_methods);
3050 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3051
3052 /*
3053 * Attach the interface
3054 */
3055 atm_ifattach(ifp);
3056 ifp->if_snd.ifq_maxlen = 512;
3057
3058#ifdef ENABLE_BPF
3059 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3060#endif
3061
3062 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3063 fatm_intr, sc, &sc->ih);
3064 if (error) {
3065 if_printf(ifp, "couldn't setup irq\n");
3066 goto fail;
3067 }
3068
3069 fail:
3070 if (error)
3071 fatm_detach(dev);
3072
3073 return (error);
3074}
3075
3076#if defined(FATM_DEBUG) && 0
3077static void
3078dump_s1_queue(struct fatm_softc *sc)
3079{
3080 int i;
3081 struct supqueue *q;
3082
3083 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3084 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3085 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3086 q->q.card,
3087 READ4(sc, q->q.card),
3088 READ4(sc, q->q.card + 4),
3089 *q->q.statp);
3090 }
3091}
3092#endif
3093
3094/*
3095 * Driver infrastructure.
3096 */
3097static device_method_t fatm_methods[] = {
3098 DEVMETHOD(device_probe, fatm_probe),
3099 DEVMETHOD(device_attach, fatm_attach),
3100 DEVMETHOD(device_detach, fatm_detach),
3101 { 0, 0 }
3102};
3103static driver_t fatm_driver = {
3104 "fatm",
3105 fatm_methods,
3106 sizeof(struct fatm_softc),
3107};
3108
3109DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);