Deleted Added
full compact
if_fatm.c (118168) if_fatm.c (118208)
1/*
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 118168 2003-07-29 14:00:59Z harti $");
32__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 118208 2003-07-30 14:20:00Z harti $");
33
34#include "opt_inet.h"
35#include "opt_natm.h"
36
37#include <sys/types.h>
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/bus.h>
43#include <sys/errno.h>
44#include <sys/conf.h>
45#include <sys/module.h>
46#include <sys/queue.h>
47#include <sys/syslog.h>
48#include <sys/endian.h>
49#include <sys/sysctl.h>
50#include <sys/condvar.h>
33
34#include "opt_inet.h"
35#include "opt_natm.h"
36
37#include <sys/types.h>
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/bus.h>
43#include <sys/errno.h>
44#include <sys/conf.h>
45#include <sys/module.h>
46#include <sys/queue.h>
47#include <sys/syslog.h>
48#include <sys/endian.h>
49#include <sys/sysctl.h>
50#include <sys/condvar.h>
51#include <vm/uma.h>
51
52#include <sys/sockio.h>
53#include <sys/mbuf.h>
54#include <sys/socket.h>
55
56#include <net/if.h>
57#include <net/if_media.h>
58#include <net/if_atm.h>
59#include <net/route.h>
60#ifdef INET
61#include <netinet/in.h>
62#include <netinet/if_atm.h>
63#endif
64
65#include <machine/bus.h>
66#include <machine/resource.h>
67#include <sys/bus.h>
68#include <sys/rman.h>
69#include <pci/pcireg.h>
70#include <pci/pcivar.h>
71
72#include <dev/utopia/utopia.h>
73
74#include <dev/fatm/if_fatmreg.h>
75#include <dev/fatm/if_fatmvar.h>
76
77#include <dev/fatm/firmware.h>
78
79devclass_t fatm_devclass;
80
81static const struct {
82 uint16_t vid;
83 uint16_t did;
84 const char *name;
85} fatm_devs[] = {
86 { 0x1127, 0x300,
87 "FORE PCA200E" },
88 { 0, 0, NULL }
89};
90
91static const struct rate {
92 uint32_t ratio;
93 uint32_t cell_rate;
94} rate_table[] = {
95#include <dev/fatm/if_fatm_rate.h>
96};
97#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
98
99SYSCTL_DECL(_hw_atm);
100
101MODULE_DEPEND(fatm, utopia, 1, 1, 1);
102
103static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
104static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
105
106static const struct utopia_methods fatm_utopia_methods = {
107 fatm_utopia_readregs,
108 fatm_utopia_writereg
109};
110
111#define VC_OK(SC, VPI, VCI) \
112 (((VPI) & ~((1 << (SC)->ifatm.mib.vpi_bits) - 1)) == 0 && \
113 (VCI) != 0 && ((VCI) & ~((1 << (SC)->ifatm.mib.vci_bits) - 1)) == 0)
114
115/*
116 * Probing is easy: step trough the list of known vendor and device
117 * ids and compare. If one is found - it's our.
118 */
119static int
120fatm_probe(device_t dev)
121{
122 int i;
123
124 for (i = 0; fatm_devs[i].name; i++)
125 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
126 pci_get_device(dev) == fatm_devs[i].did) {
127 device_set_desc(dev, fatm_devs[i].name);
128 return (0);
129 }
130 return (ENXIO);
131}
132
133/*
134 * Function called at completion of a SUNI writeregs/readregs command.
135 * This is called from the interrupt handler while holding the softc lock.
136 * We use the queue entry as the randevouze point.
137 */
138static void
139fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
140{
141
142 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
143 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
144 sc->istats.suni_reg_errors++;
145 q->error = EIO;
146 }
147 wakeup(q);
148}
149
150/*
151 * Write a SUNI register. The bits that are 1 in mask are written from val
152 * into register reg. We wait for the command to complete by sleeping on
153 * the register memory.
154 *
155 * We assume, that we already hold the softc mutex.
156 */
157static int
158fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
159{
160 int error;
161 struct cmdqueue *q;
162 struct fatm_softc *sc;
163
164 sc = ifatm->ifnet.if_softc;
165 FATM_CHECKLOCK(sc);
166 if (!(ifatm->ifnet.if_flags & IFF_RUNNING))
167 return (EIO);
168
169 /* get queue element and fill it */
170 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
171
172 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
173 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
174 sc->istats.cmd_queue_full++;
175 return (EIO);
176 }
177 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
178
179 q->error = 0;
180 q->cb = fatm_utopia_writeregs_complete;
181 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
182 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
183
184 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
185 BARRIER_W(sc);
186 WRITE4(sc, q->q.card + FATMOC_OP,
187 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
188 BARRIER_W(sc);
189
190 /*
191 * Wait for the command to complete
192 */
193 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
194
195 switch(error) {
196
197 case EWOULDBLOCK:
198 error = EIO;
199 break;
200
201 case ERESTART:
202 error = EINTR;
203 break;
204
205 case 0:
206 error = q->error;
207 break;
208 }
209
210 return (error);
211}
212
213/*
214 * Function called at completion of a SUNI readregs command.
215 * This is called from the interrupt handler while holding the softc lock.
216 * We use reg_mem as the randevouze point.
217 */
218static void
219fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
220{
221
222 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
223 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
224 sc->istats.suni_reg_errors++;
225 q->error = EIO;
226 }
227 wakeup(&sc->reg_mem);
228}
229
230/*
231 * Read SUNI registers
232 *
233 * We use a preallocated buffer to read the registers. Therefor we need
234 * to protect against multiple threads trying to read registers. We do this
235 * with a condition variable and a flag. We wait for the command to complete by sleeping on
236 * the register memory.
237 *
238 * We assume, that we already hold the softc mutex.
239 */
240static int
241fatm_utopia_readregs_internal(struct fatm_softc *sc)
242{
243 int error, i;
244 uint32_t *ptr;
245 struct cmdqueue *q;
246
247 /* get the buffer */
248 for (;;) {
249 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
250 return (EIO);
251 if (!(sc->flags & FATM_REGS_INUSE))
252 break;
253 cv_wait(&sc->cv_regs, &sc->mtx);
254 }
255 sc->flags |= FATM_REGS_INUSE;
256
257 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
258
259 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
260 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
261 sc->istats.cmd_queue_full++;
262 return (EIO);
263 }
264 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
265
266 q->error = 0;
267 q->cb = fatm_utopia_readregs_complete;
268 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
269 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
270
271 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
272
273 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
274 BARRIER_W(sc);
275 WRITE4(sc, q->q.card + FATMOC_OP,
276 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
277 BARRIER_W(sc);
278
279 /*
280 * Wait for the command to complete
281 */
282 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
283 "fatm_getreg", hz);
284
285 switch(error) {
286
287 case EWOULDBLOCK:
288 error = EIO;
289 break;
290
291 case ERESTART:
292 error = EINTR;
293 break;
294
295 case 0:
296 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
297 BUS_DMASYNC_POSTREAD);
298 error = q->error;
299 break;
300 }
301
302 if (error != 0) {
303 /* declare buffer to be free */
304 sc->flags &= ~FATM_REGS_INUSE;
305 cv_signal(&sc->cv_regs);
306 return (error);
307 }
308
309 /* swap if needed */
310 ptr = (uint32_t *)sc->reg_mem.mem;
311 for (i = 0; i < FATM_NREGS; i++)
312 ptr[i] = le32toh(ptr[i]) & 0xff;
313
314 return (0);
315}
316
317/*
318 * Read SUNI registers for the SUNI module.
319 *
320 * We assume, that we already hold the mutex.
321 */
322static int
323fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
324{
325 int err;
326 int i;
327 struct fatm_softc *sc;
328
329 if (reg >= FATM_NREGS)
330 return (EINVAL);
331 if (reg + *np > FATM_NREGS)
332 *np = FATM_NREGS - reg;
333 sc = ifatm->ifnet.if_softc;
334 FATM_CHECKLOCK(sc);
335
336 err = fatm_utopia_readregs_internal(sc);
337 if (err != 0)
338 return (err);
339
340 for (i = 0; i < *np; i++)
341 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
342
343 /* declare buffer to be free */
344 sc->flags &= ~FATM_REGS_INUSE;
345 cv_signal(&sc->cv_regs);
346
347 return (0);
348}
349
350/*
351 * Check whether the hard is beating. We remember the last heart beat and
352 * compare it to the current one. If it appears stuck for 10 times, we have
353 * a problem.
354 *
355 * Assume we hold the lock.
356 */
357static void
358fatm_check_heartbeat(struct fatm_softc *sc)
359{
360 uint32_t h;
361
362 FATM_CHECKLOCK(sc);
363
364 h = READ4(sc, FATMO_HEARTBEAT);
365 DBG(sc, BEAT, ("heartbeat %08x", h));
366
367 if (sc->stop_cnt == 10)
368 return;
369
370 if (h == sc->heartbeat) {
371 if (++sc->stop_cnt == 10) {
372 log(LOG_ERR, "i960 stopped???\n");
373 WRITE4(sc, FATMO_HIMR, 1);
374 }
375 return;
376 }
377
378 sc->stop_cnt = 0;
379 sc->heartbeat = h;
380}
381
382/*
383 * Ensure that the heart is still beating.
384 */
385static void
386fatm_watchdog(struct ifnet *ifp)
387{
388 struct fatm_softc *sc = ifp->if_softc;
389
390 FATM_LOCK(sc);
391 if (ifp->if_flags & IFF_RUNNING) {
392 fatm_check_heartbeat(sc);
393 ifp->if_timer = 5;
394 }
395 FATM_UNLOCK(sc);
396}
397
398/*
399 * Hard reset the i960 on the board. This is done by initializing registers,
400 * clearing interrupts and waiting for the selftest to finish. Not sure,
401 * whether all these barriers are actually needed.
402 *
403 * Assumes that we hold the lock.
404 */
405static int
406fatm_reset(struct fatm_softc *sc)
407{
408 int w;
409 uint32_t val;
410
411 FATM_CHECKLOCK(sc);
412
413 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
414 BARRIER_W(sc);
415
416 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
417 BARRIER_W(sc);
418
419 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
420 BARRIER_W(sc);
421
422 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
423 BARRIER_W(sc);
424
425 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
426 BARRIER_W(sc);
427
428 DELAY(1000);
429
430 WRITE1(sc, FATMO_HCR, 0);
431 BARRIER_RW(sc);
432
433 DELAY(1000);
434
435 for (w = 100; w; w--) {
436 BARRIER_R(sc);
437 val = READ4(sc, FATMO_BOOT_STATUS);
438 switch (val) {
439 case SELF_TEST_OK:
440 return (0);
441 case SELF_TEST_FAIL:
442 return (EIO);
443 }
444 DELAY(1000);
445 }
446 return (EIO);
447}
448
449/*
450 * Stop the card. Must be called WITH the lock held
451 * Reset, free transmit and receive buffers. Wakeup everybody that may sleep.
452 */
453static void
454fatm_stop(struct fatm_softc *sc)
455{
456 int i;
457 struct cmdqueue *q;
458 struct rbuf *rb;
459 struct txqueue *tx;
460 uint32_t stat;
461
462 FATM_CHECKLOCK(sc);
463
464 /* Stop the board */
465 utopia_stop(&sc->utopia);
466 (void)fatm_reset(sc);
467
468 /* stop watchdog */
469 sc->ifatm.ifnet.if_timer = 0;
470
471 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) {
472 sc->ifatm.ifnet.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
473 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
474 sc->utopia.carrier == UTP_CARR_OK);
475
476 /*
477 * Collect transmit mbufs, partial receive mbufs and
478 * supplied mbufs
479 */
480 for (i = 0; i < FATM_TX_QLEN; i++) {
481 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
482 if (tx->m) {
483 bus_dmamap_unload(sc->tx_tag, tx->map);
484 m_freem(tx->m);
485 tx->m = NULL;
486 }
487 }
488
489 /* Collect supplied mbufs */
490 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
491 LIST_REMOVE(rb, link);
492 bus_dmamap_unload(sc->rbuf_tag, rb->map);
493 m_free(rb->m);
494 rb->m = NULL;
495 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
496 }
497
498 /* Unwait any waiters */
499 wakeup(&sc->sadi_mem);
500
501 /* wakeup all threads waiting for STAT or REG buffers */
502 cv_broadcast(&sc->cv_stat);
503 cv_broadcast(&sc->cv_regs);
504
505 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
506
507 /* wakeup all threads waiting on commands */
508 for (i = 0; i < FATM_CMD_QLEN; i++) {
509 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
510
511 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
512 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
513 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
514 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
515 wakeup(q);
516 }
517 }
518 utopia_reset_media(&sc->utopia);
519 }
520 sc->small_cnt = sc->large_cnt = 0;
521
522 /* Reset vcc info */
52
53#include <sys/sockio.h>
54#include <sys/mbuf.h>
55#include <sys/socket.h>
56
57#include <net/if.h>
58#include <net/if_media.h>
59#include <net/if_atm.h>
60#include <net/route.h>
61#ifdef INET
62#include <netinet/in.h>
63#include <netinet/if_atm.h>
64#endif
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70#include <pci/pcireg.h>
71#include <pci/pcivar.h>
72
73#include <dev/utopia/utopia.h>
74
75#include <dev/fatm/if_fatmreg.h>
76#include <dev/fatm/if_fatmvar.h>
77
78#include <dev/fatm/firmware.h>
79
80devclass_t fatm_devclass;
81
82static const struct {
83 uint16_t vid;
84 uint16_t did;
85 const char *name;
86} fatm_devs[] = {
87 { 0x1127, 0x300,
88 "FORE PCA200E" },
89 { 0, 0, NULL }
90};
91
92static const struct rate {
93 uint32_t ratio;
94 uint32_t cell_rate;
95} rate_table[] = {
96#include <dev/fatm/if_fatm_rate.h>
97};
98#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
99
100SYSCTL_DECL(_hw_atm);
101
102MODULE_DEPEND(fatm, utopia, 1, 1, 1);
103
104static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
105static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
106
107static const struct utopia_methods fatm_utopia_methods = {
108 fatm_utopia_readregs,
109 fatm_utopia_writereg
110};
111
112#define VC_OK(SC, VPI, VCI) \
113 (((VPI) & ~((1 << (SC)->ifatm.mib.vpi_bits) - 1)) == 0 && \
114 (VCI) != 0 && ((VCI) & ~((1 << (SC)->ifatm.mib.vci_bits) - 1)) == 0)
115
116/*
117 * Probing is easy: step trough the list of known vendor and device
118 * ids and compare. If one is found - it's our.
119 */
120static int
121fatm_probe(device_t dev)
122{
123 int i;
124
125 for (i = 0; fatm_devs[i].name; i++)
126 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
127 pci_get_device(dev) == fatm_devs[i].did) {
128 device_set_desc(dev, fatm_devs[i].name);
129 return (0);
130 }
131 return (ENXIO);
132}
133
134/*
135 * Function called at completion of a SUNI writeregs/readregs command.
136 * This is called from the interrupt handler while holding the softc lock.
137 * We use the queue entry as the randevouze point.
138 */
139static void
140fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
141{
142
143 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
144 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
145 sc->istats.suni_reg_errors++;
146 q->error = EIO;
147 }
148 wakeup(q);
149}
150
151/*
152 * Write a SUNI register. The bits that are 1 in mask are written from val
153 * into register reg. We wait for the command to complete by sleeping on
154 * the register memory.
155 *
156 * We assume, that we already hold the softc mutex.
157 */
158static int
159fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
160{
161 int error;
162 struct cmdqueue *q;
163 struct fatm_softc *sc;
164
165 sc = ifatm->ifnet.if_softc;
166 FATM_CHECKLOCK(sc);
167 if (!(ifatm->ifnet.if_flags & IFF_RUNNING))
168 return (EIO);
169
170 /* get queue element and fill it */
171 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
172
173 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
174 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
175 sc->istats.cmd_queue_full++;
176 return (EIO);
177 }
178 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
179
180 q->error = 0;
181 q->cb = fatm_utopia_writeregs_complete;
182 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
183 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
184
185 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
186 BARRIER_W(sc);
187 WRITE4(sc, q->q.card + FATMOC_OP,
188 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
189 BARRIER_W(sc);
190
191 /*
192 * Wait for the command to complete
193 */
194 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
195
196 switch(error) {
197
198 case EWOULDBLOCK:
199 error = EIO;
200 break;
201
202 case ERESTART:
203 error = EINTR;
204 break;
205
206 case 0:
207 error = q->error;
208 break;
209 }
210
211 return (error);
212}
213
214/*
215 * Function called at completion of a SUNI readregs command.
216 * This is called from the interrupt handler while holding the softc lock.
217 * We use reg_mem as the randevouze point.
218 */
219static void
220fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
221{
222
223 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
224 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
225 sc->istats.suni_reg_errors++;
226 q->error = EIO;
227 }
228 wakeup(&sc->reg_mem);
229}
230
231/*
232 * Read SUNI registers
233 *
234 * We use a preallocated buffer to read the registers. Therefor we need
235 * to protect against multiple threads trying to read registers. We do this
236 * with a condition variable and a flag. We wait for the command to complete by sleeping on
237 * the register memory.
238 *
239 * We assume, that we already hold the softc mutex.
240 */
241static int
242fatm_utopia_readregs_internal(struct fatm_softc *sc)
243{
244 int error, i;
245 uint32_t *ptr;
246 struct cmdqueue *q;
247
248 /* get the buffer */
249 for (;;) {
250 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
251 return (EIO);
252 if (!(sc->flags & FATM_REGS_INUSE))
253 break;
254 cv_wait(&sc->cv_regs, &sc->mtx);
255 }
256 sc->flags |= FATM_REGS_INUSE;
257
258 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
259
260 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
261 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
262 sc->istats.cmd_queue_full++;
263 return (EIO);
264 }
265 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
266
267 q->error = 0;
268 q->cb = fatm_utopia_readregs_complete;
269 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
270 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
271
272 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
273
274 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
275 BARRIER_W(sc);
276 WRITE4(sc, q->q.card + FATMOC_OP,
277 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
278 BARRIER_W(sc);
279
280 /*
281 * Wait for the command to complete
282 */
283 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
284 "fatm_getreg", hz);
285
286 switch(error) {
287
288 case EWOULDBLOCK:
289 error = EIO;
290 break;
291
292 case ERESTART:
293 error = EINTR;
294 break;
295
296 case 0:
297 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
298 BUS_DMASYNC_POSTREAD);
299 error = q->error;
300 break;
301 }
302
303 if (error != 0) {
304 /* declare buffer to be free */
305 sc->flags &= ~FATM_REGS_INUSE;
306 cv_signal(&sc->cv_regs);
307 return (error);
308 }
309
310 /* swap if needed */
311 ptr = (uint32_t *)sc->reg_mem.mem;
312 for (i = 0; i < FATM_NREGS; i++)
313 ptr[i] = le32toh(ptr[i]) & 0xff;
314
315 return (0);
316}
317
318/*
319 * Read SUNI registers for the SUNI module.
320 *
321 * We assume, that we already hold the mutex.
322 */
323static int
324fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
325{
326 int err;
327 int i;
328 struct fatm_softc *sc;
329
330 if (reg >= FATM_NREGS)
331 return (EINVAL);
332 if (reg + *np > FATM_NREGS)
333 *np = FATM_NREGS - reg;
334 sc = ifatm->ifnet.if_softc;
335 FATM_CHECKLOCK(sc);
336
337 err = fatm_utopia_readregs_internal(sc);
338 if (err != 0)
339 return (err);
340
341 for (i = 0; i < *np; i++)
342 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
343
344 /* declare buffer to be free */
345 sc->flags &= ~FATM_REGS_INUSE;
346 cv_signal(&sc->cv_regs);
347
348 return (0);
349}
350
351/*
352 * Check whether the hard is beating. We remember the last heart beat and
353 * compare it to the current one. If it appears stuck for 10 times, we have
354 * a problem.
355 *
356 * Assume we hold the lock.
357 */
358static void
359fatm_check_heartbeat(struct fatm_softc *sc)
360{
361 uint32_t h;
362
363 FATM_CHECKLOCK(sc);
364
365 h = READ4(sc, FATMO_HEARTBEAT);
366 DBG(sc, BEAT, ("heartbeat %08x", h));
367
368 if (sc->stop_cnt == 10)
369 return;
370
371 if (h == sc->heartbeat) {
372 if (++sc->stop_cnt == 10) {
373 log(LOG_ERR, "i960 stopped???\n");
374 WRITE4(sc, FATMO_HIMR, 1);
375 }
376 return;
377 }
378
379 sc->stop_cnt = 0;
380 sc->heartbeat = h;
381}
382
383/*
384 * Ensure that the heart is still beating.
385 */
386static void
387fatm_watchdog(struct ifnet *ifp)
388{
389 struct fatm_softc *sc = ifp->if_softc;
390
391 FATM_LOCK(sc);
392 if (ifp->if_flags & IFF_RUNNING) {
393 fatm_check_heartbeat(sc);
394 ifp->if_timer = 5;
395 }
396 FATM_UNLOCK(sc);
397}
398
399/*
400 * Hard reset the i960 on the board. This is done by initializing registers,
401 * clearing interrupts and waiting for the selftest to finish. Not sure,
402 * whether all these barriers are actually needed.
403 *
404 * Assumes that we hold the lock.
405 */
406static int
407fatm_reset(struct fatm_softc *sc)
408{
409 int w;
410 uint32_t val;
411
412 FATM_CHECKLOCK(sc);
413
414 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
415 BARRIER_W(sc);
416
417 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
418 BARRIER_W(sc);
419
420 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
421 BARRIER_W(sc);
422
423 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
424 BARRIER_W(sc);
425
426 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
427 BARRIER_W(sc);
428
429 DELAY(1000);
430
431 WRITE1(sc, FATMO_HCR, 0);
432 BARRIER_RW(sc);
433
434 DELAY(1000);
435
436 for (w = 100; w; w--) {
437 BARRIER_R(sc);
438 val = READ4(sc, FATMO_BOOT_STATUS);
439 switch (val) {
440 case SELF_TEST_OK:
441 return (0);
442 case SELF_TEST_FAIL:
443 return (EIO);
444 }
445 DELAY(1000);
446 }
447 return (EIO);
448}
449
450/*
451 * Stop the card. Must be called WITH the lock held
452 * Reset, free transmit and receive buffers. Wakeup everybody that may sleep.
453 */
454static void
455fatm_stop(struct fatm_softc *sc)
456{
457 int i;
458 struct cmdqueue *q;
459 struct rbuf *rb;
460 struct txqueue *tx;
461 uint32_t stat;
462
463 FATM_CHECKLOCK(sc);
464
465 /* Stop the board */
466 utopia_stop(&sc->utopia);
467 (void)fatm_reset(sc);
468
469 /* stop watchdog */
470 sc->ifatm.ifnet.if_timer = 0;
471
472 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) {
473 sc->ifatm.ifnet.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
474 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
475 sc->utopia.carrier == UTP_CARR_OK);
476
477 /*
478 * Collect transmit mbufs, partial receive mbufs and
479 * supplied mbufs
480 */
481 for (i = 0; i < FATM_TX_QLEN; i++) {
482 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
483 if (tx->m) {
484 bus_dmamap_unload(sc->tx_tag, tx->map);
485 m_freem(tx->m);
486 tx->m = NULL;
487 }
488 }
489
490 /* Collect supplied mbufs */
491 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
492 LIST_REMOVE(rb, link);
493 bus_dmamap_unload(sc->rbuf_tag, rb->map);
494 m_free(rb->m);
495 rb->m = NULL;
496 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
497 }
498
499 /* Unwait any waiters */
500 wakeup(&sc->sadi_mem);
501
502 /* wakeup all threads waiting for STAT or REG buffers */
503 cv_broadcast(&sc->cv_stat);
504 cv_broadcast(&sc->cv_regs);
505
506 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
507
508 /* wakeup all threads waiting on commands */
509 for (i = 0; i < FATM_CMD_QLEN; i++) {
510 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
511
512 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
513 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
514 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
515 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
516 wakeup(q);
517 }
518 }
519 utopia_reset_media(&sc->utopia);
520 }
521 sc->small_cnt = sc->large_cnt = 0;
522
523 /* Reset vcc info */
523 if (sc->vccs != NULL)
524 for (i = 0; i <= FORE_MAX_VCC; i++)
525 sc->vccs[i].flags = 0;
524 if (sc->vccs != NULL) {
525 for (i = 0; i < FORE_MAX_VCC + 1; i++)
526 if (sc->vccs[i] != NULL) {
527 uma_zfree(sc->vcc_zone, sc->vccs[i]);
528 sc->vccs[i] = NULL;
529 }
530 }
526
527 sc->open_vccs = 0;
528}
529
530/*
531 * Load the firmware into the board and save the entry point.
532 */
533static uint32_t
534firmware_load(struct fatm_softc *sc)
535{
536 struct firmware *fw = (struct firmware *)firmware;
537
538 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
539 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
540 sizeof(firmware) / sizeof(firmware[0]));
541 BARRIER_RW(sc);
542
543 return (fw->entry);
544}
545
546/*
547 * Read a character from the virtual UART. The availability of a character
548 * is signaled by a non-null value of the 32 bit register. The eating of
549 * the character by us is signalled to the card by setting that register
550 * to zero.
551 */
552static int
553rx_getc(struct fatm_softc *sc)
554{
555 int w = 50;
556 int c;
557
558 while (w--) {
559 c = READ4(sc, FATMO_UART_TO_HOST);
560 BARRIER_RW(sc);
561 if (c != 0) {
562 WRITE4(sc, FATMO_UART_TO_HOST, 0);
563 DBGC(sc, UART, ("%c", c & 0xff));
564 return (c & 0xff);
565 }
566 DELAY(1000);
567 }
568 return (-1);
569}
570
571/*
572 * Eat up characters from the board and stuff them in the bit-bucket.
573 */
574static void
575rx_flush(struct fatm_softc *sc)
576{
577 int w = 10000;
578
579 while (w-- && rx_getc(sc) >= 0)
580 ;
581}
582
583/*
584 * Write a character to the card. The UART is available if the register
585 * is zero.
586 */
587static int
588tx_putc(struct fatm_softc *sc, u_char c)
589{
590 int w = 10;
591 int c1;
592
593 while (w--) {
594 c1 = READ4(sc, FATMO_UART_TO_960);
595 BARRIER_RW(sc);
596 if (c1 == 0) {
597 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
598 DBGC(sc, UART, ("%c", c & 0xff));
599 return (0);
600 }
601 DELAY(1000);
602 }
603 return (-1);
604}
605
606/*
607 * Start the firmware. This is doing by issuing a 'go' command with
608 * the hex entry address of the firmware. Then we wait for the self-test to
609 * succeed.
610 */
611static int
612fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
613{
614 static char hex[] = "0123456789abcdef";
615 u_int w, val;
616
617 DBG(sc, INIT, ("starting"));
618 rx_flush(sc);
619 tx_putc(sc, '\r');
620 DELAY(1000);
621
622 rx_flush(sc);
623
624 tx_putc(sc, 'g');
625 (void)rx_getc(sc);
626 tx_putc(sc, 'o');
627 (void)rx_getc(sc);
628 tx_putc(sc, ' ');
629 (void)rx_getc(sc);
630
631 tx_putc(sc, hex[(start >> 12) & 0xf]);
632 (void)rx_getc(sc);
633 tx_putc(sc, hex[(start >> 8) & 0xf]);
634 (void)rx_getc(sc);
635 tx_putc(sc, hex[(start >> 4) & 0xf]);
636 (void)rx_getc(sc);
637 tx_putc(sc, hex[(start >> 0) & 0xf]);
638 (void)rx_getc(sc);
639
640 tx_putc(sc, '\r');
641 rx_flush(sc);
642
643 for (w = 100; w; w--) {
644 BARRIER_R(sc);
645 val = READ4(sc, FATMO_BOOT_STATUS);
646 switch (val) {
647 case CP_RUNNING:
648 return (0);
649 case SELF_TEST_FAIL:
650 return (EIO);
651 }
652 DELAY(1000);
653 }
654 return (EIO);
655}
656
657/*
658 * Initialize one card and host queue.
659 */
660static void
661init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
662 size_t qel_size, size_t desc_size, cardoff_t off,
663 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
664{
665 struct fqelem *el = queue->chunk;
666
667 while (qlen--) {
668 el->card = off;
669 off += 8; /* size of card entry */
670
671 el->statp = (uint32_t *)(*statpp);
672 (*statpp) += sizeof(uint32_t);
673 H_SETSTAT(el->statp, FATM_STAT_FREE);
674 H_SYNCSTAT_PREWRITE(sc, el->statp);
675
676 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
677 (*cardstat) += sizeof(uint32_t);
678
679 el->ioblk = descp;
680 descp += desc_size;
681 el->card_ioblk = carddesc;
682 carddesc += desc_size;
683
684 el = (struct fqelem *)((u_char *)el + qel_size);
685 }
686 queue->tail = queue->head = 0;
687}
688
689/*
690 * Issue the initialize operation to the card, wait for completion and
691 * initialize the on-board and host queue structures with offsets and
692 * addresses.
693 */
694static int
695fatm_init_cmd(struct fatm_softc *sc)
696{
697 int w, c;
698 u_char *statp;
699 uint32_t card_stat;
700 u_int cnt;
701 struct fqelem *el;
702 cardoff_t off;
703
704 DBG(sc, INIT, ("command"));
705 WRITE4(sc, FATMO_ISTAT, 0);
706 WRITE4(sc, FATMO_IMASK, 1);
707 WRITE4(sc, FATMO_HLOGGER, 0);
708
709 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
710 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
711 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
712 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
713 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
714 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
715 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
716
717 /*
718 * initialize buffer descriptors
719 */
720 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
721 SMALL_SUPPLY_QLEN);
722 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
723 SMALL_BUFFER_LEN);
724 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
725 SMALL_POOL_SIZE);
726 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
727 SMALL_SUPPLY_BLKSIZE);
728
729 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
730 LARGE_SUPPLY_QLEN);
731 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
732 LARGE_BUFFER_LEN);
733 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
734 LARGE_POOL_SIZE);
735 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
736 LARGE_SUPPLY_BLKSIZE);
737
738 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
739 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
740 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
741 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
742
743 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
744 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
745 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
746 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
747
748 /*
749 * Start the command
750 */
751 BARRIER_W(sc);
752 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
753 BARRIER_W(sc);
754 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
755 BARRIER_W(sc);
756
757 /*
758 * Busy wait for completion
759 */
760 w = 100;
761 while (w--) {
762 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
763 BARRIER_R(sc);
764 if (c & FATM_STAT_COMPLETE)
765 break;
766 DELAY(1000);
767 }
768
769 if (c & FATM_STAT_ERROR)
770 return (EIO);
771
772 /*
773 * Initialize the queues
774 */
775 statp = sc->stat_mem.mem;
776 card_stat = sc->stat_mem.paddr;
777
778 /*
779 * Command queue. This is special in that it's on the card.
780 */
781 el = sc->cmdqueue.chunk;
782 off = READ4(sc, FATMO_COMMAND_QUEUE);
783 DBG(sc, INIT, ("cmd queue=%x", off));
784 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
785 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
786
787 el->card = off;
788 off += 32; /* size of card structure */
789
790 el->statp = (uint32_t *)statp;
791 statp += sizeof(uint32_t);
792 H_SETSTAT(el->statp, FATM_STAT_FREE);
793 H_SYNCSTAT_PREWRITE(sc, el->statp);
794
795 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
796 card_stat += sizeof(uint32_t);
797 }
798 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
799
800 /*
801 * Now the other queues. These are in memory
802 */
803 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
804 sizeof(struct txqueue), TPD_SIZE,
805 READ4(sc, FATMO_TRANSMIT_QUEUE),
806 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
807
808 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
809 sizeof(struct rxqueue), RPD_SIZE,
810 READ4(sc, FATMO_RECEIVE_QUEUE),
811 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
812
813 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
814 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
815 READ4(sc, FATMO_SMALL_B1_QUEUE),
816 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
817
818 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
819 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
820 READ4(sc, FATMO_LARGE_B1_QUEUE),
821 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
822
823 sc->txcnt = 0;
824
825 return (0);
826}
827
828/*
829 * Read PROM. Called only from attach code. Here we spin because the interrupt
830 * handler is not yet set up.
831 */
832static int
833fatm_getprom(struct fatm_softc *sc)
834{
835 int i;
836 struct prom *prom;
837 struct cmdqueue *q;
838
839 DBG(sc, INIT, ("reading prom"));
840 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
841 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
842
843 q->error = 0;
844 q->cb = NULL;;
845 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
846 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
847
848 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
849 BUS_DMASYNC_PREREAD);
850
851 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
852 BARRIER_W(sc);
853 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
854 BARRIER_W(sc);
855
856 for (i = 0; i < 1000; i++) {
857 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
858 if (H_GETSTAT(q->q.statp) &
859 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
860 break;
861 DELAY(1000);
862 }
863 if (i == 1000) {
864 if_printf(&sc->ifatm.ifnet, "getprom timeout\n");
865 return (EIO);
866 }
867 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
868 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
869 if_printf(&sc->ifatm.ifnet, "getprom error\n");
870 return (EIO);
871 }
872 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
873 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
874 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
875
876 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
877 BUS_DMASYNC_POSTREAD);
878
879
880#ifdef notdef
881 {
882 u_int i;
883
884 printf("PROM: ");
885 u_char *ptr = (u_char *)sc->prom_mem.mem;
886 for (i = 0; i < sizeof(struct prom); i++)
887 printf("%02x ", *ptr++);
888 printf("\n");
889 }
890#endif
891
892 prom = (struct prom *)sc->prom_mem.mem;
893
894 bcopy(prom->mac + 2, sc->ifatm.mib.esi, 6);
895 sc->ifatm.mib.serial = le32toh(prom->serial);
896 sc->ifatm.mib.hw_version = le32toh(prom->version);
897 sc->ifatm.mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
898
899 if_printf(&sc->ifatm.ifnet, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
900 "serial=%u hw=0x%x sw=0x%x\n", sc->ifatm.mib.esi[0],
901 sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3],
902 sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->ifatm.mib.serial,
903 sc->ifatm.mib.hw_version, sc->ifatm.mib.sw_version);
904
905 return (0);
906}
907
908/*
909 * This is the callback function for bus_dmamap_load. We assume, that we
910 * have a 32-bit bus and so have always one segment.
911 */
912static void
913dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
914{
915 bus_addr_t *ptr = (bus_addr_t *)arg;
916
917 if (error != 0) {
918 printf("%s: error=%d\n", __func__, error);
919 return;
920 }
921 KASSERT(nsegs == 1, ("too many DMA segments"));
922 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
923 (u_long)segs[0].ds_addr));
924
925 *ptr = segs[0].ds_addr;
926}
927
928/*
929 * Allocate a chunk of DMA-able memory and map it.
930 */
931static int
932alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
933{
934 int error;
935
936 mem->mem = NULL;
937
938 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
939 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
940 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
941 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
942 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
943 nm);
944 return (ENOMEM);
945 }
946
947 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
948 if (error) {
949 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA memory: "
950 "%d\n", nm, error);
951 bus_dma_tag_destroy(mem->dmat);
952 mem->mem = NULL;
953 return (error);
954 }
955
956 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
957 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
958 if (error) {
959 if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
960 "%d\n", nm, error);
961 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
962 bus_dma_tag_destroy(mem->dmat);
963 mem->mem = NULL;
964 return (error);
965 }
966
967 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
968 (u_long)mem->paddr, mem->size, mem->align));
969
970 return (0);
971}
972
973#ifdef TEST_DMA_SYNC
974static int
975alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
976{
977 int error;
978
979 mem->mem = NULL;
980
981 if (bus_dma_tag_create(NULL, mem->align, 0,
982 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
983 NULL, NULL, mem->size, 1, mem->size,
984 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
985 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
986 nm);
987 return (ENOMEM);
988 }
989
990 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
991 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
992
993 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
994 if (error) {
995 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA map: "
996 "%d\n", nm, error);
997 contigfree(mem->mem, mem->size, M_DEVBUF);
998 bus_dma_tag_destroy(mem->dmat);
999 mem->mem = NULL;
1000 return (error);
1001 }
1002
1003 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1004 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1005 if (error) {
1006 if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
1007 "%d\n", nm, error);
1008 bus_dmamap_destroy(mem->dmat, mem->map);
1009 contigfree(mem->mem, mem->size, M_DEVBUF);
1010 bus_dma_tag_destroy(mem->dmat);
1011 mem->mem = NULL;
1012 return (error);
1013 }
1014
1015 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1016 (u_long)mem->paddr, mem->size, mem->align));
1017
1018 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1019 (u_long)mem->paddr, mem->size, mem->align);
1020
1021 return (0);
1022}
1023#endif /* TEST_DMA_SYNC */
1024
1025/*
1026 * Destroy all resources of an dma-able memory chunk
1027 */
1028static void
1029destroy_dma_memory(struct fatm_mem *mem)
1030{
1031 if (mem->mem != NULL) {
1032 bus_dmamap_unload(mem->dmat, mem->map);
1033 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1034 bus_dma_tag_destroy(mem->dmat);
1035 mem->mem = NULL;
1036 }
1037}
1038#ifdef TEST_DMA_SYNC
1039static void
1040destroy_dma_memoryX(struct fatm_mem *mem)
1041{
1042 if (mem->mem != NULL) {
1043 bus_dmamap_unload(mem->dmat, mem->map);
1044 bus_dmamap_destroy(mem->dmat, mem->map);
1045 contigfree(mem->mem, mem->size, M_DEVBUF);
1046 bus_dma_tag_destroy(mem->dmat);
1047 mem->mem = NULL;
1048 }
1049}
1050#endif /* TEST_DMA_SYNC */
1051
1052/*
1053 * Try to supply buffers to the card if there are free entries in the queues
1054 */
1055static void
1056fatm_supply_small_buffers(struct fatm_softc *sc)
1057{
1058 int nblocks, nbufs;
1059 struct supqueue *q;
1060 struct rbd *bd;
1061 int i, j, error, cnt;
1062 struct mbuf *m;
1063 struct rbuf *rb;
1064 bus_addr_t phys;
1065
1066 nbufs = max(4 * sc->open_vccs, 32);
1067 nbufs = min(nbufs, SMALL_POOL_SIZE);
1068 nbufs -= sc->small_cnt;
1069
1070 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1071 for (cnt = 0; cnt < nblocks; cnt++) {
1072 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1073
1074 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1075 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1076 break;
1077
1078 bd = (struct rbd *)q->q.ioblk;
1079
1080 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1081 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1082 if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1083 break;
1084 }
1085 MGETHDR(m, M_DONTWAIT, MT_DATA);
1086 if (m == NULL) {
1087 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1088 break;
1089 }
1090 MH_ALIGN(m, SMALL_BUFFER_LEN);
1091 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1092 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1093 &phys, BUS_DMA_NOWAIT);
1094 if (error) {
1095 if_printf(&sc->ifatm.ifnet,
1096 "dmamap_load mbuf failed %d", error);
1097 m_freem(m);
1098 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1099 break;
1100 }
1101 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1102 BUS_DMASYNC_PREREAD);
1103
1104 LIST_REMOVE(rb, link);
1105 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1106
1107 rb->m = m;
1108 bd[i].handle = rb - sc->rbufs;
1109 H_SETDESC(bd[i].buffer, phys);
1110 }
1111
1112 if (i < SMALL_SUPPLY_BLKSIZE) {
1113 for (j = 0; j < i; j++) {
1114 rb = sc->rbufs + bd[j].handle;
1115 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1116 m_free(rb->m);
1117 rb->m = NULL;
1118
1119 LIST_REMOVE(rb, link);
1120 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1121 }
1122 break;
1123 }
1124 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1125 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1126
1127 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1128 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1129
1130 WRITE4(sc, q->q.card, q->q.card_ioblk);
1131 BARRIER_W(sc);
1132
1133 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1134
1135 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1136 }
1137}
1138
1139/*
1140 * Try to supply buffers to the card if there are free entries in the queues
1141 * We assume that all buffers are within the address space accessible by the
1142 * card (32-bit), so we don't need bounce buffers.
1143 */
1144static void
1145fatm_supply_large_buffers(struct fatm_softc *sc)
1146{
1147 int nbufs, nblocks, cnt;
1148 struct supqueue *q;
1149 struct rbd *bd;
1150 int i, j, error;
1151 struct mbuf *m;
1152 struct rbuf *rb;
1153 bus_addr_t phys;
1154
1155 nbufs = max(4 * sc->open_vccs, 32);
1156 nbufs = min(nbufs, LARGE_POOL_SIZE);
1157 nbufs -= sc->large_cnt;
1158
1159 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1160
1161 for (cnt = 0; cnt < nblocks; cnt++) {
1162 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1163
1164 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1165 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1166 break;
1167
1168 bd = (struct rbd *)q->q.ioblk;
1169
1170 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1171 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1172 if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1173 break;
1174 }
1175 if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1176 M_PKTHDR)) == NULL) {
1177 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1178 break;
1179 }
1180 /* No MEXT_ALIGN */
1181 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1182 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1183 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1184 &phys, BUS_DMA_NOWAIT);
1185 if (error) {
1186 if_printf(&sc->ifatm.ifnet,
1187 "dmamap_load mbuf failed %d", error);
1188 m_freem(m);
1189 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1190 break;
1191 }
1192
1193 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1194 BUS_DMASYNC_PREREAD);
1195
1196 LIST_REMOVE(rb, link);
1197 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1198
1199 rb->m = m;
1200 bd[i].handle = rb - sc->rbufs;
1201 H_SETDESC(bd[i].buffer, phys);
1202 }
1203
1204 if (i < LARGE_SUPPLY_BLKSIZE) {
1205 for (j = 0; j < i; j++) {
1206 rb = sc->rbufs + bd[j].handle;
1207 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1208 m_free(rb->m);
1209 rb->m = NULL;
1210
1211 LIST_REMOVE(rb, link);
1212 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1213 }
1214 break;
1215 }
1216 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1217 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1218
1219 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1220 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1221 WRITE4(sc, q->q.card, q->q.card_ioblk);
1222 BARRIER_W(sc);
1223
1224 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1225
1226 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1227 }
1228}
1229
1230
1231/*
1232 * Actually start the card. The lock must be held here.
1233 * Reset, load the firmware, start it, initializes queues, read the PROM
1234 * and supply receive buffers to the card.
1235 */
1236static void
1237fatm_init_locked(struct fatm_softc *sc)
1238{
1239 struct rxqueue *q;
1240 int i, c;
1241 uint32_t start;
1242
1243 DBG(sc, INIT, ("initialize"));
1244 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1245 fatm_stop(sc);
1246
1247 /*
1248 * Hard reset the board
1249 */
1250 if (fatm_reset(sc))
1251 return;
1252
1253 start = firmware_load(sc);
1254 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1255 fatm_getprom(sc)) {
1256 fatm_reset(sc);
1257 return;
1258 }
1259
1260 /*
1261 * Handle media
1262 */
1263 c = READ4(sc, FATMO_MEDIA_TYPE);
1264 switch (c) {
1265
1266 case FORE_MT_TAXI_100:
1267 sc->ifatm.mib.media = IFM_ATM_TAXI_100;
1268 sc->ifatm.mib.pcr = 227273;
1269 break;
1270
1271 case FORE_MT_TAXI_140:
1272 sc->ifatm.mib.media = IFM_ATM_TAXI_140;
1273 sc->ifatm.mib.pcr = 318181;
1274 break;
1275
1276 case FORE_MT_UTP_SONET:
1277 sc->ifatm.mib.media = IFM_ATM_UTP_155;
1278 sc->ifatm.mib.pcr = 353207;
1279 break;
1280
1281 case FORE_MT_MM_OC3_ST:
1282 case FORE_MT_MM_OC3_SC:
1283 sc->ifatm.mib.media = IFM_ATM_MM_155;
1284 sc->ifatm.mib.pcr = 353207;
1285 break;
1286
1287 case FORE_MT_SM_OC3_ST:
1288 case FORE_MT_SM_OC3_SC:
1289 sc->ifatm.mib.media = IFM_ATM_SM_155;
1290 sc->ifatm.mib.pcr = 353207;
1291 break;
1292
1293 default:
1294 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1295 sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1296 sc->ifatm.mib.pcr = 353207;
1297 break;
1298 }
1299 sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
1300 utopia_init_media(&sc->utopia);
1301
1302 /*
1303 * Initialize the RBDs
1304 */
1305 for (i = 0; i < FATM_RX_QLEN; i++) {
1306 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1307 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1308 }
1309 BARRIER_W(sc);
1310
1311 /*
1312 * Supply buffers to the card
1313 */
1314 fatm_supply_small_buffers(sc);
1315 fatm_supply_large_buffers(sc);
1316
1317 /*
1318 * Now set flags, that we are ready
1319 */
1320 sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
1321
1322 /*
1323 * Start the watchdog timer
1324 */
1325 sc->ifatm.ifnet.if_timer = 5;
1326
1327 /* start SUNI */
1328 utopia_start(&sc->utopia);
1329
1330 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
1331 sc->utopia.carrier == UTP_CARR_OK);
1332
1333 DBG(sc, INIT, ("done"));
1334}
1335
1336/*
1337 * This is the exported as initialisation function.
1338 */
1339static void
1340fatm_init(void *p)
1341{
1342 struct fatm_softc *sc = p;
1343
1344 FATM_LOCK(sc);
1345 fatm_init_locked(sc);
1346 FATM_UNLOCK(sc);
1347}
1348
1349/************************************************************/
1350/*
1351 * The INTERRUPT handling
1352 */
1353/*
1354 * Check the command queue. If a command was completed, call the completion
1355 * function for that command.
1356 */
1357static void
1358fatm_intr_drain_cmd(struct fatm_softc *sc)
1359{
1360 struct cmdqueue *q;
1361 int stat;
1362
1363 /*
1364 * Drain command queue
1365 */
1366 for (;;) {
1367 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1368
1369 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1370 stat = H_GETSTAT(q->q.statp);
1371
1372 if (stat != FATM_STAT_COMPLETE &&
1373 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1374 stat != FATM_STAT_ERROR)
1375 break;
1376
1377 (*q->cb)(sc, q);
1378
1379 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1380 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1381
1382 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1383 }
1384}
1385
1386/*
1387 * Drain the small buffer supply queue.
1388 */
1389static void
1390fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1391{
1392 struct supqueue *q;
1393 int stat;
1394
1395 for (;;) {
1396 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1397
1398 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1399 stat = H_GETSTAT(q->q.statp);
1400
1401 if ((stat & FATM_STAT_COMPLETE) == 0)
1402 break;
1403 if (stat & FATM_STAT_ERROR)
1404 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1405
1406 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1407 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1408
1409 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1410 }
1411}
1412
1413/*
1414 * Drain the large buffer supply queue.
1415 */
1416static void
1417fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1418{
1419 struct supqueue *q;
1420 int stat;
1421
1422 for (;;) {
1423 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1424
1425 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1426 stat = H_GETSTAT(q->q.statp);
1427
1428 if ((stat & FATM_STAT_COMPLETE) == 0)
1429 break;
1430 if (stat & FATM_STAT_ERROR)
1431 log(LOG_ERR, "%s status %x\n", __func__, stat);
1432
1433 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1434 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1435
1436 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1437 }
1438}
1439
1440/*
1441 * Check the receive queue. Send any received PDU up the protocol stack
1442 * (except when there was an error or the VCI appears to be closed. In this
1443 * case discard the PDU).
1444 */
1445static void
1446fatm_intr_drain_rx(struct fatm_softc *sc)
1447{
1448 struct rxqueue *q;
531
532 sc->open_vccs = 0;
533}
534
535/*
536 * Load the firmware into the board and save the entry point.
537 */
538static uint32_t
539firmware_load(struct fatm_softc *sc)
540{
541 struct firmware *fw = (struct firmware *)firmware;
542
543 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
544 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
545 sizeof(firmware) / sizeof(firmware[0]));
546 BARRIER_RW(sc);
547
548 return (fw->entry);
549}
550
551/*
552 * Read a character from the virtual UART. The availability of a character
553 * is signaled by a non-null value of the 32 bit register. The eating of
554 * the character by us is signalled to the card by setting that register
555 * to zero.
556 */
557static int
558rx_getc(struct fatm_softc *sc)
559{
560 int w = 50;
561 int c;
562
563 while (w--) {
564 c = READ4(sc, FATMO_UART_TO_HOST);
565 BARRIER_RW(sc);
566 if (c != 0) {
567 WRITE4(sc, FATMO_UART_TO_HOST, 0);
568 DBGC(sc, UART, ("%c", c & 0xff));
569 return (c & 0xff);
570 }
571 DELAY(1000);
572 }
573 return (-1);
574}
575
576/*
577 * Eat up characters from the board and stuff them in the bit-bucket.
578 */
579static void
580rx_flush(struct fatm_softc *sc)
581{
582 int w = 10000;
583
584 while (w-- && rx_getc(sc) >= 0)
585 ;
586}
587
588/*
589 * Write a character to the card. The UART is available if the register
590 * is zero.
591 */
592static int
593tx_putc(struct fatm_softc *sc, u_char c)
594{
595 int w = 10;
596 int c1;
597
598 while (w--) {
599 c1 = READ4(sc, FATMO_UART_TO_960);
600 BARRIER_RW(sc);
601 if (c1 == 0) {
602 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
603 DBGC(sc, UART, ("%c", c & 0xff));
604 return (0);
605 }
606 DELAY(1000);
607 }
608 return (-1);
609}
610
611/*
612 * Start the firmware. This is doing by issuing a 'go' command with
613 * the hex entry address of the firmware. Then we wait for the self-test to
614 * succeed.
615 */
616static int
617fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
618{
619 static char hex[] = "0123456789abcdef";
620 u_int w, val;
621
622 DBG(sc, INIT, ("starting"));
623 rx_flush(sc);
624 tx_putc(sc, '\r');
625 DELAY(1000);
626
627 rx_flush(sc);
628
629 tx_putc(sc, 'g');
630 (void)rx_getc(sc);
631 tx_putc(sc, 'o');
632 (void)rx_getc(sc);
633 tx_putc(sc, ' ');
634 (void)rx_getc(sc);
635
636 tx_putc(sc, hex[(start >> 12) & 0xf]);
637 (void)rx_getc(sc);
638 tx_putc(sc, hex[(start >> 8) & 0xf]);
639 (void)rx_getc(sc);
640 tx_putc(sc, hex[(start >> 4) & 0xf]);
641 (void)rx_getc(sc);
642 tx_putc(sc, hex[(start >> 0) & 0xf]);
643 (void)rx_getc(sc);
644
645 tx_putc(sc, '\r');
646 rx_flush(sc);
647
648 for (w = 100; w; w--) {
649 BARRIER_R(sc);
650 val = READ4(sc, FATMO_BOOT_STATUS);
651 switch (val) {
652 case CP_RUNNING:
653 return (0);
654 case SELF_TEST_FAIL:
655 return (EIO);
656 }
657 DELAY(1000);
658 }
659 return (EIO);
660}
661
662/*
663 * Initialize one card and host queue.
664 */
665static void
666init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
667 size_t qel_size, size_t desc_size, cardoff_t off,
668 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
669{
670 struct fqelem *el = queue->chunk;
671
672 while (qlen--) {
673 el->card = off;
674 off += 8; /* size of card entry */
675
676 el->statp = (uint32_t *)(*statpp);
677 (*statpp) += sizeof(uint32_t);
678 H_SETSTAT(el->statp, FATM_STAT_FREE);
679 H_SYNCSTAT_PREWRITE(sc, el->statp);
680
681 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
682 (*cardstat) += sizeof(uint32_t);
683
684 el->ioblk = descp;
685 descp += desc_size;
686 el->card_ioblk = carddesc;
687 carddesc += desc_size;
688
689 el = (struct fqelem *)((u_char *)el + qel_size);
690 }
691 queue->tail = queue->head = 0;
692}
693
694/*
695 * Issue the initialize operation to the card, wait for completion and
696 * initialize the on-board and host queue structures with offsets and
697 * addresses.
698 */
699static int
700fatm_init_cmd(struct fatm_softc *sc)
701{
702 int w, c;
703 u_char *statp;
704 uint32_t card_stat;
705 u_int cnt;
706 struct fqelem *el;
707 cardoff_t off;
708
709 DBG(sc, INIT, ("command"));
710 WRITE4(sc, FATMO_ISTAT, 0);
711 WRITE4(sc, FATMO_IMASK, 1);
712 WRITE4(sc, FATMO_HLOGGER, 0);
713
714 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
715 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
716 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
717 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
718 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
719 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
720 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
721
722 /*
723 * initialize buffer descriptors
724 */
725 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
726 SMALL_SUPPLY_QLEN);
727 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
728 SMALL_BUFFER_LEN);
729 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
730 SMALL_POOL_SIZE);
731 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
732 SMALL_SUPPLY_BLKSIZE);
733
734 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
735 LARGE_SUPPLY_QLEN);
736 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
737 LARGE_BUFFER_LEN);
738 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
739 LARGE_POOL_SIZE);
740 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
741 LARGE_SUPPLY_BLKSIZE);
742
743 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
744 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
745 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
746 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
747
748 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
749 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
750 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
751 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
752
753 /*
754 * Start the command
755 */
756 BARRIER_W(sc);
757 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
758 BARRIER_W(sc);
759 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
760 BARRIER_W(sc);
761
762 /*
763 * Busy wait for completion
764 */
765 w = 100;
766 while (w--) {
767 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
768 BARRIER_R(sc);
769 if (c & FATM_STAT_COMPLETE)
770 break;
771 DELAY(1000);
772 }
773
774 if (c & FATM_STAT_ERROR)
775 return (EIO);
776
777 /*
778 * Initialize the queues
779 */
780 statp = sc->stat_mem.mem;
781 card_stat = sc->stat_mem.paddr;
782
783 /*
784 * Command queue. This is special in that it's on the card.
785 */
786 el = sc->cmdqueue.chunk;
787 off = READ4(sc, FATMO_COMMAND_QUEUE);
788 DBG(sc, INIT, ("cmd queue=%x", off));
789 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
790 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
791
792 el->card = off;
793 off += 32; /* size of card structure */
794
795 el->statp = (uint32_t *)statp;
796 statp += sizeof(uint32_t);
797 H_SETSTAT(el->statp, FATM_STAT_FREE);
798 H_SYNCSTAT_PREWRITE(sc, el->statp);
799
800 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
801 card_stat += sizeof(uint32_t);
802 }
803 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
804
805 /*
806 * Now the other queues. These are in memory
807 */
808 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
809 sizeof(struct txqueue), TPD_SIZE,
810 READ4(sc, FATMO_TRANSMIT_QUEUE),
811 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
812
813 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
814 sizeof(struct rxqueue), RPD_SIZE,
815 READ4(sc, FATMO_RECEIVE_QUEUE),
816 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
817
818 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
819 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
820 READ4(sc, FATMO_SMALL_B1_QUEUE),
821 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
822
823 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
824 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
825 READ4(sc, FATMO_LARGE_B1_QUEUE),
826 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
827
828 sc->txcnt = 0;
829
830 return (0);
831}
832
833/*
834 * Read PROM. Called only from attach code. Here we spin because the interrupt
835 * handler is not yet set up.
836 */
837static int
838fatm_getprom(struct fatm_softc *sc)
839{
840 int i;
841 struct prom *prom;
842 struct cmdqueue *q;
843
844 DBG(sc, INIT, ("reading prom"));
845 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
846 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
847
848 q->error = 0;
849 q->cb = NULL;;
850 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
851 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
852
853 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
854 BUS_DMASYNC_PREREAD);
855
856 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
857 BARRIER_W(sc);
858 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
859 BARRIER_W(sc);
860
861 for (i = 0; i < 1000; i++) {
862 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
863 if (H_GETSTAT(q->q.statp) &
864 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
865 break;
866 DELAY(1000);
867 }
868 if (i == 1000) {
869 if_printf(&sc->ifatm.ifnet, "getprom timeout\n");
870 return (EIO);
871 }
872 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
873 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
874 if_printf(&sc->ifatm.ifnet, "getprom error\n");
875 return (EIO);
876 }
877 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
878 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
879 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
880
881 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
882 BUS_DMASYNC_POSTREAD);
883
884
885#ifdef notdef
886 {
887 u_int i;
888
889 printf("PROM: ");
890 u_char *ptr = (u_char *)sc->prom_mem.mem;
891 for (i = 0; i < sizeof(struct prom); i++)
892 printf("%02x ", *ptr++);
893 printf("\n");
894 }
895#endif
896
897 prom = (struct prom *)sc->prom_mem.mem;
898
899 bcopy(prom->mac + 2, sc->ifatm.mib.esi, 6);
900 sc->ifatm.mib.serial = le32toh(prom->serial);
901 sc->ifatm.mib.hw_version = le32toh(prom->version);
902 sc->ifatm.mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
903
904 if_printf(&sc->ifatm.ifnet, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
905 "serial=%u hw=0x%x sw=0x%x\n", sc->ifatm.mib.esi[0],
906 sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3],
907 sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->ifatm.mib.serial,
908 sc->ifatm.mib.hw_version, sc->ifatm.mib.sw_version);
909
910 return (0);
911}
912
913/*
914 * This is the callback function for bus_dmamap_load. We assume, that we
915 * have a 32-bit bus and so have always one segment.
916 */
917static void
918dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
919{
920 bus_addr_t *ptr = (bus_addr_t *)arg;
921
922 if (error != 0) {
923 printf("%s: error=%d\n", __func__, error);
924 return;
925 }
926 KASSERT(nsegs == 1, ("too many DMA segments"));
927 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
928 (u_long)segs[0].ds_addr));
929
930 *ptr = segs[0].ds_addr;
931}
932
933/*
934 * Allocate a chunk of DMA-able memory and map it.
935 */
936static int
937alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
938{
939 int error;
940
941 mem->mem = NULL;
942
943 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
944 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
945 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
946 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
947 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
948 nm);
949 return (ENOMEM);
950 }
951
952 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
953 if (error) {
954 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA memory: "
955 "%d\n", nm, error);
956 bus_dma_tag_destroy(mem->dmat);
957 mem->mem = NULL;
958 return (error);
959 }
960
961 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
962 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
963 if (error) {
964 if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
965 "%d\n", nm, error);
966 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
967 bus_dma_tag_destroy(mem->dmat);
968 mem->mem = NULL;
969 return (error);
970 }
971
972 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
973 (u_long)mem->paddr, mem->size, mem->align));
974
975 return (0);
976}
977
978#ifdef TEST_DMA_SYNC
979static int
980alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
981{
982 int error;
983
984 mem->mem = NULL;
985
986 if (bus_dma_tag_create(NULL, mem->align, 0,
987 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
988 NULL, NULL, mem->size, 1, mem->size,
989 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
990 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
991 nm);
992 return (ENOMEM);
993 }
994
995 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
996 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
997
998 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
999 if (error) {
1000 if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA map: "
1001 "%d\n", nm, error);
1002 contigfree(mem->mem, mem->size, M_DEVBUF);
1003 bus_dma_tag_destroy(mem->dmat);
1004 mem->mem = NULL;
1005 return (error);
1006 }
1007
1008 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1009 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1010 if (error) {
1011 if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
1012 "%d\n", nm, error);
1013 bus_dmamap_destroy(mem->dmat, mem->map);
1014 contigfree(mem->mem, mem->size, M_DEVBUF);
1015 bus_dma_tag_destroy(mem->dmat);
1016 mem->mem = NULL;
1017 return (error);
1018 }
1019
1020 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1021 (u_long)mem->paddr, mem->size, mem->align));
1022
1023 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1024 (u_long)mem->paddr, mem->size, mem->align);
1025
1026 return (0);
1027}
1028#endif /* TEST_DMA_SYNC */
1029
1030/*
1031 * Destroy all resources of an dma-able memory chunk
1032 */
1033static void
1034destroy_dma_memory(struct fatm_mem *mem)
1035{
1036 if (mem->mem != NULL) {
1037 bus_dmamap_unload(mem->dmat, mem->map);
1038 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1039 bus_dma_tag_destroy(mem->dmat);
1040 mem->mem = NULL;
1041 }
1042}
1043#ifdef TEST_DMA_SYNC
1044static void
1045destroy_dma_memoryX(struct fatm_mem *mem)
1046{
1047 if (mem->mem != NULL) {
1048 bus_dmamap_unload(mem->dmat, mem->map);
1049 bus_dmamap_destroy(mem->dmat, mem->map);
1050 contigfree(mem->mem, mem->size, M_DEVBUF);
1051 bus_dma_tag_destroy(mem->dmat);
1052 mem->mem = NULL;
1053 }
1054}
1055#endif /* TEST_DMA_SYNC */
1056
1057/*
1058 * Try to supply buffers to the card if there are free entries in the queues
1059 */
1060static void
1061fatm_supply_small_buffers(struct fatm_softc *sc)
1062{
1063 int nblocks, nbufs;
1064 struct supqueue *q;
1065 struct rbd *bd;
1066 int i, j, error, cnt;
1067 struct mbuf *m;
1068 struct rbuf *rb;
1069 bus_addr_t phys;
1070
1071 nbufs = max(4 * sc->open_vccs, 32);
1072 nbufs = min(nbufs, SMALL_POOL_SIZE);
1073 nbufs -= sc->small_cnt;
1074
1075 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1076 for (cnt = 0; cnt < nblocks; cnt++) {
1077 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1078
1079 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1080 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1081 break;
1082
1083 bd = (struct rbd *)q->q.ioblk;
1084
1085 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1086 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1087 if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1088 break;
1089 }
1090 MGETHDR(m, M_DONTWAIT, MT_DATA);
1091 if (m == NULL) {
1092 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1093 break;
1094 }
1095 MH_ALIGN(m, SMALL_BUFFER_LEN);
1096 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1097 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1098 &phys, BUS_DMA_NOWAIT);
1099 if (error) {
1100 if_printf(&sc->ifatm.ifnet,
1101 "dmamap_load mbuf failed %d", error);
1102 m_freem(m);
1103 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1104 break;
1105 }
1106 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1107 BUS_DMASYNC_PREREAD);
1108
1109 LIST_REMOVE(rb, link);
1110 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1111
1112 rb->m = m;
1113 bd[i].handle = rb - sc->rbufs;
1114 H_SETDESC(bd[i].buffer, phys);
1115 }
1116
1117 if (i < SMALL_SUPPLY_BLKSIZE) {
1118 for (j = 0; j < i; j++) {
1119 rb = sc->rbufs + bd[j].handle;
1120 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1121 m_free(rb->m);
1122 rb->m = NULL;
1123
1124 LIST_REMOVE(rb, link);
1125 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1126 }
1127 break;
1128 }
1129 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1130 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1131
1132 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1133 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1134
1135 WRITE4(sc, q->q.card, q->q.card_ioblk);
1136 BARRIER_W(sc);
1137
1138 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1139
1140 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1141 }
1142}
1143
1144/*
1145 * Try to supply buffers to the card if there are free entries in the queues
1146 * We assume that all buffers are within the address space accessible by the
1147 * card (32-bit), so we don't need bounce buffers.
1148 */
1149static void
1150fatm_supply_large_buffers(struct fatm_softc *sc)
1151{
1152 int nbufs, nblocks, cnt;
1153 struct supqueue *q;
1154 struct rbd *bd;
1155 int i, j, error;
1156 struct mbuf *m;
1157 struct rbuf *rb;
1158 bus_addr_t phys;
1159
1160 nbufs = max(4 * sc->open_vccs, 32);
1161 nbufs = min(nbufs, LARGE_POOL_SIZE);
1162 nbufs -= sc->large_cnt;
1163
1164 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1165
1166 for (cnt = 0; cnt < nblocks; cnt++) {
1167 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1168
1169 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1170 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1171 break;
1172
1173 bd = (struct rbd *)q->q.ioblk;
1174
1175 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1176 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1177 if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1178 break;
1179 }
1180 if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1181 M_PKTHDR)) == NULL) {
1182 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1183 break;
1184 }
1185 /* No MEXT_ALIGN */
1186 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1187 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1188 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1189 &phys, BUS_DMA_NOWAIT);
1190 if (error) {
1191 if_printf(&sc->ifatm.ifnet,
1192 "dmamap_load mbuf failed %d", error);
1193 m_freem(m);
1194 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1195 break;
1196 }
1197
1198 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1199 BUS_DMASYNC_PREREAD);
1200
1201 LIST_REMOVE(rb, link);
1202 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1203
1204 rb->m = m;
1205 bd[i].handle = rb - sc->rbufs;
1206 H_SETDESC(bd[i].buffer, phys);
1207 }
1208
1209 if (i < LARGE_SUPPLY_BLKSIZE) {
1210 for (j = 0; j < i; j++) {
1211 rb = sc->rbufs + bd[j].handle;
1212 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1213 m_free(rb->m);
1214 rb->m = NULL;
1215
1216 LIST_REMOVE(rb, link);
1217 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1218 }
1219 break;
1220 }
1221 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1222 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1223
1224 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1225 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1226 WRITE4(sc, q->q.card, q->q.card_ioblk);
1227 BARRIER_W(sc);
1228
1229 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1230
1231 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1232 }
1233}
1234
1235
1236/*
1237 * Actually start the card. The lock must be held here.
1238 * Reset, load the firmware, start it, initializes queues, read the PROM
1239 * and supply receive buffers to the card.
1240 */
1241static void
1242fatm_init_locked(struct fatm_softc *sc)
1243{
1244 struct rxqueue *q;
1245 int i, c;
1246 uint32_t start;
1247
1248 DBG(sc, INIT, ("initialize"));
1249 if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1250 fatm_stop(sc);
1251
1252 /*
1253 * Hard reset the board
1254 */
1255 if (fatm_reset(sc))
1256 return;
1257
1258 start = firmware_load(sc);
1259 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1260 fatm_getprom(sc)) {
1261 fatm_reset(sc);
1262 return;
1263 }
1264
1265 /*
1266 * Handle media
1267 */
1268 c = READ4(sc, FATMO_MEDIA_TYPE);
1269 switch (c) {
1270
1271 case FORE_MT_TAXI_100:
1272 sc->ifatm.mib.media = IFM_ATM_TAXI_100;
1273 sc->ifatm.mib.pcr = 227273;
1274 break;
1275
1276 case FORE_MT_TAXI_140:
1277 sc->ifatm.mib.media = IFM_ATM_TAXI_140;
1278 sc->ifatm.mib.pcr = 318181;
1279 break;
1280
1281 case FORE_MT_UTP_SONET:
1282 sc->ifatm.mib.media = IFM_ATM_UTP_155;
1283 sc->ifatm.mib.pcr = 353207;
1284 break;
1285
1286 case FORE_MT_MM_OC3_ST:
1287 case FORE_MT_MM_OC3_SC:
1288 sc->ifatm.mib.media = IFM_ATM_MM_155;
1289 sc->ifatm.mib.pcr = 353207;
1290 break;
1291
1292 case FORE_MT_SM_OC3_ST:
1293 case FORE_MT_SM_OC3_SC:
1294 sc->ifatm.mib.media = IFM_ATM_SM_155;
1295 sc->ifatm.mib.pcr = 353207;
1296 break;
1297
1298 default:
1299 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1300 sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1301 sc->ifatm.mib.pcr = 353207;
1302 break;
1303 }
1304 sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
1305 utopia_init_media(&sc->utopia);
1306
1307 /*
1308 * Initialize the RBDs
1309 */
1310 for (i = 0; i < FATM_RX_QLEN; i++) {
1311 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1312 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1313 }
1314 BARRIER_W(sc);
1315
1316 /*
1317 * Supply buffers to the card
1318 */
1319 fatm_supply_small_buffers(sc);
1320 fatm_supply_large_buffers(sc);
1321
1322 /*
1323 * Now set flags, that we are ready
1324 */
1325 sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
1326
1327 /*
1328 * Start the watchdog timer
1329 */
1330 sc->ifatm.ifnet.if_timer = 5;
1331
1332 /* start SUNI */
1333 utopia_start(&sc->utopia);
1334
1335 ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
1336 sc->utopia.carrier == UTP_CARR_OK);
1337
1338 DBG(sc, INIT, ("done"));
1339}
1340
1341/*
1342 * This is the exported as initialisation function.
1343 */
1344static void
1345fatm_init(void *p)
1346{
1347 struct fatm_softc *sc = p;
1348
1349 FATM_LOCK(sc);
1350 fatm_init_locked(sc);
1351 FATM_UNLOCK(sc);
1352}
1353
1354/************************************************************/
1355/*
1356 * The INTERRUPT handling
1357 */
1358/*
1359 * Check the command queue. If a command was completed, call the completion
1360 * function for that command.
1361 */
1362static void
1363fatm_intr_drain_cmd(struct fatm_softc *sc)
1364{
1365 struct cmdqueue *q;
1366 int stat;
1367
1368 /*
1369 * Drain command queue
1370 */
1371 for (;;) {
1372 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1373
1374 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1375 stat = H_GETSTAT(q->q.statp);
1376
1377 if (stat != FATM_STAT_COMPLETE &&
1378 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1379 stat != FATM_STAT_ERROR)
1380 break;
1381
1382 (*q->cb)(sc, q);
1383
1384 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1385 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1386
1387 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1388 }
1389}
1390
1391/*
1392 * Drain the small buffer supply queue.
1393 */
1394static void
1395fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1396{
1397 struct supqueue *q;
1398 int stat;
1399
1400 for (;;) {
1401 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1402
1403 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1404 stat = H_GETSTAT(q->q.statp);
1405
1406 if ((stat & FATM_STAT_COMPLETE) == 0)
1407 break;
1408 if (stat & FATM_STAT_ERROR)
1409 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1410
1411 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1412 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1413
1414 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1415 }
1416}
1417
1418/*
1419 * Drain the large buffer supply queue.
1420 */
1421static void
1422fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1423{
1424 struct supqueue *q;
1425 int stat;
1426
1427 for (;;) {
1428 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1429
1430 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1431 stat = H_GETSTAT(q->q.statp);
1432
1433 if ((stat & FATM_STAT_COMPLETE) == 0)
1434 break;
1435 if (stat & FATM_STAT_ERROR)
1436 log(LOG_ERR, "%s status %x\n", __func__, stat);
1437
1438 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1439 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1440
1441 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1442 }
1443}
1444
1445/*
1446 * Check the receive queue. Send any received PDU up the protocol stack
1447 * (except when there was an error or the VCI appears to be closed. In this
1448 * case discard the PDU).
1449 */
1450static void
1451fatm_intr_drain_rx(struct fatm_softc *sc)
1452{
1453 struct rxqueue *q;
1449 int stat, mlen, drop;
1454 int stat, mlen;
1450 u_int i;
1451 uint32_t h;
1452 struct mbuf *last, *m0;
1453 struct rpd *rpd;
1454 struct rbuf *rb;
1455 u_int vci, vpi, pt;
1456 struct atm_pseudohdr aph;
1457 struct ifnet *ifp;
1455 u_int i;
1456 uint32_t h;
1457 struct mbuf *last, *m0;
1458 struct rpd *rpd;
1459 struct rbuf *rb;
1460 u_int vci, vpi, pt;
1461 struct atm_pseudohdr aph;
1462 struct ifnet *ifp;
1463 struct card_vcc *vc;
1458
1459 for (;;) {
1460 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1461
1462 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1463 stat = H_GETSTAT(q->q.statp);
1464
1465 if ((stat & FATM_STAT_COMPLETE) == 0)
1466 break;
1467
1468 rpd = (struct rpd *)q->q.ioblk;
1469 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1470
1471 rpd->nseg = le32toh(rpd->nseg);
1464
1465 for (;;) {
1466 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1467
1468 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1469 stat = H_GETSTAT(q->q.statp);
1470
1471 if ((stat & FATM_STAT_COMPLETE) == 0)
1472 break;
1473
1474 rpd = (struct rpd *)q->q.ioblk;
1475 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1476
1477 rpd->nseg = le32toh(rpd->nseg);
1472 drop = 0;
1473 mlen = 0;
1474 m0 = last = 0;
1475 for (i = 0; i < rpd->nseg; i++) {
1476 rb = sc->rbufs + rpd->segment[i].handle;
1477 if (m0 == NULL) {
1478 m0 = last = rb->m;
1479 } else {
1480 last->m_next = rb->m;
1481 last = rb->m;
1482 }
1483 last->m_next = NULL;
1484 if (last->m_flags & M_EXT)
1485 sc->large_cnt--;
1486 else
1487 sc->small_cnt--;
1488 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1489 BUS_DMASYNC_POSTREAD);
1490 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1491 rb->m = NULL;
1492
1493 LIST_REMOVE(rb, link);
1494 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1495
1496 last->m_len = le32toh(rpd->segment[i].length);
1497 mlen += last->m_len;
1498 }
1499
1500 m0->m_pkthdr.len = mlen;
1501 m0->m_pkthdr.rcvif = &sc->ifatm.ifnet;
1502
1503 h = le32toh(rpd->atm_header);
1504 vpi = (h >> 20) & 0xff;
1505 vci = (h >> 4 ) & 0xffff;
1506 pt = (h >> 1 ) & 0x7;
1507
1508 /*
1509 * Locate the VCC this packet belongs to
1510 */
1511 if (!VC_OK(sc, vpi, vci))
1478 mlen = 0;
1479 m0 = last = 0;
1480 for (i = 0; i < rpd->nseg; i++) {
1481 rb = sc->rbufs + rpd->segment[i].handle;
1482 if (m0 == NULL) {
1483 m0 = last = rb->m;
1484 } else {
1485 last->m_next = rb->m;
1486 last = rb->m;
1487 }
1488 last->m_next = NULL;
1489 if (last->m_flags & M_EXT)
1490 sc->large_cnt--;
1491 else
1492 sc->small_cnt--;
1493 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1494 BUS_DMASYNC_POSTREAD);
1495 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1496 rb->m = NULL;
1497
1498 LIST_REMOVE(rb, link);
1499 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1500
1501 last->m_len = le32toh(rpd->segment[i].length);
1502 mlen += last->m_len;
1503 }
1504
1505 m0->m_pkthdr.len = mlen;
1506 m0->m_pkthdr.rcvif = &sc->ifatm.ifnet;
1507
1508 h = le32toh(rpd->atm_header);
1509 vpi = (h >> 20) & 0xff;
1510 vci = (h >> 4 ) & 0xffff;
1511 pt = (h >> 1 ) & 0x7;
1512
1513 /*
1514 * Locate the VCC this packet belongs to
1515 */
1516 if (!VC_OK(sc, vpi, vci))
1512 drop = 1;
1513 else if ((sc->vccs[vci].flags & FATM_VCC_OPEN) == 0) {
1517 vc = NULL;
1518 else if ((vc = sc->vccs[vci]) == NULL ||
1519 !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1514 sc->istats.rx_closed++;
1520 sc->istats.rx_closed++;
1515 drop = 1;
1521 vc = NULL;
1516 }
1517
1518 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1522 }
1523
1524 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1519 pt, mlen, drop ? "dropped" : ""));
1525 pt, mlen, vc == NULL ? "dropped" : ""));
1520
1526
1521 if (drop) {
1527 if (vc == NULL) {
1522 m_freem(m0);
1523 } else {
1528 m_freem(m0);
1529 } else {
1524 ATM_PH_FLAGS(&aph) = sc->vccs[vci].flags & 0xff;
1530 ATM_PH_FLAGS(&aph) = vc->param.flags;
1525 ATM_PH_VPI(&aph) = vpi;
1526 ATM_PH_SETVCI(&aph, vci);
1527
1528 ifp = &sc->ifatm.ifnet;
1529 ifp->if_ipackets++;
1530
1531 ATM_PH_VPI(&aph) = vpi;
1532 ATM_PH_SETVCI(&aph, vci);
1533
1534 ifp = &sc->ifatm.ifnet;
1535 ifp->if_ipackets++;
1536
1531 atm_input(ifp, &aph, m0, sc->vccs[vci].rxhand);
1537 vc->ipackets++;
1538 vc->ibytes += m0->m_pkthdr.len;
1539
1540 atm_input(ifp, &aph, m0, vc->rxhand);
1532 }
1533
1534 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1535 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1536
1537 WRITE4(sc, q->q.card, q->q.card_ioblk);
1538 BARRIER_W(sc);
1539
1540 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1541 }
1542}
1543
1544/*
1545 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1546 */
1547static void
1548fatm_intr_drain_tx(struct fatm_softc *sc)
1549{
1550 struct txqueue *q;
1551 int stat;
1552
1553 /*
1554 * Drain tx queue
1555 */
1556 for (;;) {
1557 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1558
1559 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1560 stat = H_GETSTAT(q->q.statp);
1561
1562 if (stat != FATM_STAT_COMPLETE &&
1563 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1564 stat != FATM_STAT_ERROR)
1565 break;
1566
1567 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1568 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1569
1570 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1571 bus_dmamap_unload(sc->tx_tag, q->map);
1572
1573 m_freem(q->m);
1574 q->m = NULL;
1575 sc->txcnt--;
1576
1577 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1578 }
1579}
1580
1581/*
1582 * Interrupt handler
1583 */
1584static void
1585fatm_intr(void *p)
1586{
1587 struct fatm_softc *sc = (struct fatm_softc *)p;
1588
1589 FATM_LOCK(sc);
1590 if (!READ4(sc, FATMO_PSR)) {
1591 FATM_UNLOCK(sc);
1592 return;
1593 }
1594 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1595
1596 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
1597 FATM_UNLOCK(sc);
1598 return;
1599 }
1600 fatm_intr_drain_cmd(sc);
1601 fatm_intr_drain_rx(sc);
1602 fatm_intr_drain_tx(sc);
1603 fatm_intr_drain_small_buffers(sc);
1604 fatm_intr_drain_large_buffers(sc);
1605 fatm_supply_small_buffers(sc);
1606 fatm_supply_large_buffers(sc);
1607
1608 FATM_UNLOCK(sc);
1609
1610 if (sc->retry_tx && _IF_QLEN(&sc->ifatm.ifnet.if_snd))
1611 (*sc->ifatm.ifnet.if_start)(&sc->ifatm.ifnet);
1612}
1613
1614/*
1615 * Get device statistics. This must be called with the softc locked.
1616 * We use a preallocated buffer, so we need to protect this buffer.
1617 * We do this by using a condition variable and a flag. If the flag is set
1618 * the buffer is in use by one thread (one thread is executing a GETSTAT
1619 * card command). In this case all other threads that are trying to get
1620 * statistics block on that condition variable. When the thread finishes
1621 * using the buffer it resets the flag and signals the condition variable. This
1622 * will wakeup the next thread that is waiting for the buffer. If the interface
1623 * is stopped the stopping function will broadcast the cv. All threads will
1624 * find that the interface has been stopped and return.
1625 *
1626 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1627 * must be done by the caller when he has finished using the buffer.
1628 */
1629static void
1630fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1631{
1632
1633 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1634 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1635 sc->istats.get_stat_errors++;
1636 q->error = EIO;
1637 }
1638 wakeup(&sc->sadi_mem);
1639}
1640static int
1641fatm_getstat(struct fatm_softc *sc)
1642{
1643 int error;
1644 struct cmdqueue *q;
1645
1646 /*
1647 * Wait until either the interface is stopped or we can get the
1648 * statistics buffer
1649 */
1650 for (;;) {
1651 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
1652 return (EIO);
1653 if (!(sc->flags & FATM_STAT_INUSE))
1654 break;
1655 cv_wait(&sc->cv_stat, &sc->mtx);
1656 }
1657 sc->flags |= FATM_STAT_INUSE;
1658
1659 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1660
1661 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1662 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1663 sc->istats.cmd_queue_full++;
1664 return (EIO);
1665 }
1666 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1667
1668 q->error = 0;
1669 q->cb = fatm_getstat_complete;
1670 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1671 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1672
1673 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1674 BUS_DMASYNC_PREREAD);
1675
1676 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1677 sc->sadi_mem.paddr);
1678 BARRIER_W(sc);
1679 WRITE4(sc, q->q.card + FATMOC_OP,
1680 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1681 BARRIER_W(sc);
1682
1683 /*
1684 * Wait for the command to complete
1685 */
1686 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1687 "fatm_stat", hz);
1688
1689 switch (error) {
1690
1691 case EWOULDBLOCK:
1692 error = EIO;
1693 break;
1694
1695 case ERESTART:
1696 error = EINTR;
1697 break;
1698
1699 case 0:
1700 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1701 BUS_DMASYNC_POSTREAD);
1702 error = q->error;
1703 break;
1704 }
1705
1706 /*
1707 * Swap statistics
1708 */
1709 if (q->error == 0) {
1710 u_int i;
1711 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1712
1713 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1714 i++, p++)
1715 *p = be32toh(*p);
1716 }
1717
1718 return (error);
1719}
1720
1721/*
1722 * Create a copy of a single mbuf. It can have either internal or
1723 * external data, it may have a packet header. External data is really
1724 * copied, so the new buffer is writeable.
1725 */
1726static struct mbuf *
1727copy_mbuf(struct mbuf *m)
1728{
1729 struct mbuf *new;
1730
1731 MGET(new, M_DONTWAIT, MT_DATA);
1732 if (new == NULL)
1733 return (NULL);
1734
1735 if (m->m_flags & M_PKTHDR) {
1736 M_MOVE_PKTHDR(new, m);
1737 if (m->m_len > MHLEN) {
1738 MCLGET(new, M_TRYWAIT);
1739 if ((m->m_flags & M_EXT) == 0) {
1740 m_free(new);
1741 return (NULL);
1742 }
1743 }
1744 } else {
1745 if (m->m_len > MLEN) {
1746 MCLGET(new, M_TRYWAIT);
1747 if ((m->m_flags & M_EXT) == 0) {
1748 m_free(new);
1749 return (NULL);
1750 }
1751 }
1752 }
1753
1754 bcopy(m->m_data, new->m_data, m->m_len);
1755 new->m_len = m->m_len;
1756 new->m_flags &= ~M_RDONLY;
1757
1758 return (new);
1759}
1760
1761/*
1762 * All segments must have a four byte aligned buffer address and a four
1763 * byte aligned length. Step through an mbuf chain and check these conditions.
1764 * If the buffer address is not aligned and this is a normal mbuf, move
1765 * the data down. Else make a copy of the mbuf with aligned data.
1766 * If the buffer length is not aligned steel data from the next mbuf.
1767 * We don't need to check whether this has more than one external reference,
1768 * because steeling data doesn't change the external cluster.
1769 * If the last mbuf is not aligned, fill with zeroes.
1770 *
1771 * Return packet length (well we should have this in the packet header),
1772 * but be careful not to count the zero fill at the end.
1773 *
1774 * If fixing fails free the chain and zero the pointer.
1775 *
1776 * We assume, that aligning the virtual address also aligns the mapped bus
1777 * address.
1778 */
1779static u_int
1780fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1781{
1782 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1783 u_int mlen = 0, fill = 0;
1784 int first, off;
1785 u_char *d, *cp;
1786
1787 do {
1788 next = m->m_next;
1789
1790 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1791 (m->m_len % 4 != 0 && next)) {
1792 /*
1793 * Needs fixing
1794 */
1795 first = (m == *mp);
1796
1797 d = mtod(m, u_char *);
1798 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1799 if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1800 sc->istats.fix_addr_copy++;
1801 bcopy(d, d - off, m->m_len);
1802 m->m_data = (caddr_t)(d - off);
1803 } else {
1804 if ((new = copy_mbuf(m)) == NULL) {
1805 sc->istats.fix_addr_noext++;
1806 goto fail;
1807 }
1808 sc->istats.fix_addr_ext++;
1809 if (prev)
1810 prev->m_next = new;
1811 new->m_next = next;
1812 m_free(m);
1813 m = new;
1814 }
1815 }
1816
1817 if ((off = m->m_len % 4) != 0) {
1818 if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1819 if ((new = copy_mbuf(m)) == NULL) {
1820 sc->istats.fix_len_noext++;
1821 goto fail;
1822 }
1823 sc->istats.fix_len_copy++;
1824 if (prev)
1825 prev->m_next = new;
1826 new->m_next = next;
1827 m_free(m);
1828 m = new;
1829 } else
1830 sc->istats.fix_len++;
1831 d = mtod(m, u_char *) + m->m_len;
1832 off = 4 - off;
1833 while (off) {
1834 if (next == NULL) {
1835 *d++ = 0;
1836 fill++;
1837 } else if (next->m_len == 0) {
1838 sc->istats.fix_empty++;
1839 next = m_free(next);
1840 continue;
1841 } else {
1842 cp = mtod(next, u_char *);
1843 *d++ = *cp++;
1844 next->m_len--;
1845 next->m_data = (caddr_t)cp;
1846 }
1847 off--;
1848 m->m_len++;
1849 }
1850 }
1851
1852 if (first)
1853 *mp = m;
1854 }
1855
1856 mlen += m->m_len;
1857 prev = m;
1858 } while ((m = next) != NULL);
1859
1860 return (mlen - fill);
1861
1862 fail:
1863 m_freem(*mp);
1864 *mp = NULL;
1865 return (0);
1866}
1867
1868/*
1869 * The helper function is used to load the computed physical addresses
1870 * into the transmit descriptor.
1871 */
1872static void
1873fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1874 bus_size_t mapsize, int error)
1875{
1876 struct tpd *tpd = varg;
1877
1878 if (error)
1879 return;
1880
1881 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1882
1883 tpd->spec = 0;
1884 while (nsegs--) {
1885 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1886 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1887 tpd->spec++;
1888 segs++;
1889 }
1890}
1891
1892/*
1893 * Start output.
1894 *
1895 * Note, that we update the internal statistics without the lock here.
1896 */
1897static int
1541 }
1542
1543 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1544 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1545
1546 WRITE4(sc, q->q.card, q->q.card_ioblk);
1547 BARRIER_W(sc);
1548
1549 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1550 }
1551}
1552
1553/*
1554 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1555 */
1556static void
1557fatm_intr_drain_tx(struct fatm_softc *sc)
1558{
1559 struct txqueue *q;
1560 int stat;
1561
1562 /*
1563 * Drain tx queue
1564 */
1565 for (;;) {
1566 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1567
1568 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1569 stat = H_GETSTAT(q->q.statp);
1570
1571 if (stat != FATM_STAT_COMPLETE &&
1572 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1573 stat != FATM_STAT_ERROR)
1574 break;
1575
1576 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1577 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1578
1579 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1580 bus_dmamap_unload(sc->tx_tag, q->map);
1581
1582 m_freem(q->m);
1583 q->m = NULL;
1584 sc->txcnt--;
1585
1586 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1587 }
1588}
1589
1590/*
1591 * Interrupt handler
1592 */
1593static void
1594fatm_intr(void *p)
1595{
1596 struct fatm_softc *sc = (struct fatm_softc *)p;
1597
1598 FATM_LOCK(sc);
1599 if (!READ4(sc, FATMO_PSR)) {
1600 FATM_UNLOCK(sc);
1601 return;
1602 }
1603 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1604
1605 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
1606 FATM_UNLOCK(sc);
1607 return;
1608 }
1609 fatm_intr_drain_cmd(sc);
1610 fatm_intr_drain_rx(sc);
1611 fatm_intr_drain_tx(sc);
1612 fatm_intr_drain_small_buffers(sc);
1613 fatm_intr_drain_large_buffers(sc);
1614 fatm_supply_small_buffers(sc);
1615 fatm_supply_large_buffers(sc);
1616
1617 FATM_UNLOCK(sc);
1618
1619 if (sc->retry_tx && _IF_QLEN(&sc->ifatm.ifnet.if_snd))
1620 (*sc->ifatm.ifnet.if_start)(&sc->ifatm.ifnet);
1621}
1622
1623/*
1624 * Get device statistics. This must be called with the softc locked.
1625 * We use a preallocated buffer, so we need to protect this buffer.
1626 * We do this by using a condition variable and a flag. If the flag is set
1627 * the buffer is in use by one thread (one thread is executing a GETSTAT
1628 * card command). In this case all other threads that are trying to get
1629 * statistics block on that condition variable. When the thread finishes
1630 * using the buffer it resets the flag and signals the condition variable. This
1631 * will wakeup the next thread that is waiting for the buffer. If the interface
1632 * is stopped the stopping function will broadcast the cv. All threads will
1633 * find that the interface has been stopped and return.
1634 *
1635 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1636 * must be done by the caller when he has finished using the buffer.
1637 */
1638static void
1639fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1640{
1641
1642 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1643 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1644 sc->istats.get_stat_errors++;
1645 q->error = EIO;
1646 }
1647 wakeup(&sc->sadi_mem);
1648}
1649static int
1650fatm_getstat(struct fatm_softc *sc)
1651{
1652 int error;
1653 struct cmdqueue *q;
1654
1655 /*
1656 * Wait until either the interface is stopped or we can get the
1657 * statistics buffer
1658 */
1659 for (;;) {
1660 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
1661 return (EIO);
1662 if (!(sc->flags & FATM_STAT_INUSE))
1663 break;
1664 cv_wait(&sc->cv_stat, &sc->mtx);
1665 }
1666 sc->flags |= FATM_STAT_INUSE;
1667
1668 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1669
1670 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1671 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1672 sc->istats.cmd_queue_full++;
1673 return (EIO);
1674 }
1675 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1676
1677 q->error = 0;
1678 q->cb = fatm_getstat_complete;
1679 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1680 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1681
1682 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1683 BUS_DMASYNC_PREREAD);
1684
1685 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1686 sc->sadi_mem.paddr);
1687 BARRIER_W(sc);
1688 WRITE4(sc, q->q.card + FATMOC_OP,
1689 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1690 BARRIER_W(sc);
1691
1692 /*
1693 * Wait for the command to complete
1694 */
1695 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1696 "fatm_stat", hz);
1697
1698 switch (error) {
1699
1700 case EWOULDBLOCK:
1701 error = EIO;
1702 break;
1703
1704 case ERESTART:
1705 error = EINTR;
1706 break;
1707
1708 case 0:
1709 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1710 BUS_DMASYNC_POSTREAD);
1711 error = q->error;
1712 break;
1713 }
1714
1715 /*
1716 * Swap statistics
1717 */
1718 if (q->error == 0) {
1719 u_int i;
1720 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1721
1722 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1723 i++, p++)
1724 *p = be32toh(*p);
1725 }
1726
1727 return (error);
1728}
1729
1730/*
1731 * Create a copy of a single mbuf. It can have either internal or
1732 * external data, it may have a packet header. External data is really
1733 * copied, so the new buffer is writeable.
1734 */
1735static struct mbuf *
1736copy_mbuf(struct mbuf *m)
1737{
1738 struct mbuf *new;
1739
1740 MGET(new, M_DONTWAIT, MT_DATA);
1741 if (new == NULL)
1742 return (NULL);
1743
1744 if (m->m_flags & M_PKTHDR) {
1745 M_MOVE_PKTHDR(new, m);
1746 if (m->m_len > MHLEN) {
1747 MCLGET(new, M_TRYWAIT);
1748 if ((m->m_flags & M_EXT) == 0) {
1749 m_free(new);
1750 return (NULL);
1751 }
1752 }
1753 } else {
1754 if (m->m_len > MLEN) {
1755 MCLGET(new, M_TRYWAIT);
1756 if ((m->m_flags & M_EXT) == 0) {
1757 m_free(new);
1758 return (NULL);
1759 }
1760 }
1761 }
1762
1763 bcopy(m->m_data, new->m_data, m->m_len);
1764 new->m_len = m->m_len;
1765 new->m_flags &= ~M_RDONLY;
1766
1767 return (new);
1768}
1769
1770/*
1771 * All segments must have a four byte aligned buffer address and a four
1772 * byte aligned length. Step through an mbuf chain and check these conditions.
1773 * If the buffer address is not aligned and this is a normal mbuf, move
1774 * the data down. Else make a copy of the mbuf with aligned data.
1775 * If the buffer length is not aligned steel data from the next mbuf.
1776 * We don't need to check whether this has more than one external reference,
1777 * because steeling data doesn't change the external cluster.
1778 * If the last mbuf is not aligned, fill with zeroes.
1779 *
1780 * Return packet length (well we should have this in the packet header),
1781 * but be careful not to count the zero fill at the end.
1782 *
1783 * If fixing fails free the chain and zero the pointer.
1784 *
1785 * We assume, that aligning the virtual address also aligns the mapped bus
1786 * address.
1787 */
1788static u_int
1789fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1790{
1791 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1792 u_int mlen = 0, fill = 0;
1793 int first, off;
1794 u_char *d, *cp;
1795
1796 do {
1797 next = m->m_next;
1798
1799 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1800 (m->m_len % 4 != 0 && next)) {
1801 /*
1802 * Needs fixing
1803 */
1804 first = (m == *mp);
1805
1806 d = mtod(m, u_char *);
1807 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1808 if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1809 sc->istats.fix_addr_copy++;
1810 bcopy(d, d - off, m->m_len);
1811 m->m_data = (caddr_t)(d - off);
1812 } else {
1813 if ((new = copy_mbuf(m)) == NULL) {
1814 sc->istats.fix_addr_noext++;
1815 goto fail;
1816 }
1817 sc->istats.fix_addr_ext++;
1818 if (prev)
1819 prev->m_next = new;
1820 new->m_next = next;
1821 m_free(m);
1822 m = new;
1823 }
1824 }
1825
1826 if ((off = m->m_len % 4) != 0) {
1827 if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1828 if ((new = copy_mbuf(m)) == NULL) {
1829 sc->istats.fix_len_noext++;
1830 goto fail;
1831 }
1832 sc->istats.fix_len_copy++;
1833 if (prev)
1834 prev->m_next = new;
1835 new->m_next = next;
1836 m_free(m);
1837 m = new;
1838 } else
1839 sc->istats.fix_len++;
1840 d = mtod(m, u_char *) + m->m_len;
1841 off = 4 - off;
1842 while (off) {
1843 if (next == NULL) {
1844 *d++ = 0;
1845 fill++;
1846 } else if (next->m_len == 0) {
1847 sc->istats.fix_empty++;
1848 next = m_free(next);
1849 continue;
1850 } else {
1851 cp = mtod(next, u_char *);
1852 *d++ = *cp++;
1853 next->m_len--;
1854 next->m_data = (caddr_t)cp;
1855 }
1856 off--;
1857 m->m_len++;
1858 }
1859 }
1860
1861 if (first)
1862 *mp = m;
1863 }
1864
1865 mlen += m->m_len;
1866 prev = m;
1867 } while ((m = next) != NULL);
1868
1869 return (mlen - fill);
1870
1871 fail:
1872 m_freem(*mp);
1873 *mp = NULL;
1874 return (0);
1875}
1876
1877/*
1878 * The helper function is used to load the computed physical addresses
1879 * into the transmit descriptor.
1880 */
1881static void
1882fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1883 bus_size_t mapsize, int error)
1884{
1885 struct tpd *tpd = varg;
1886
1887 if (error)
1888 return;
1889
1890 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1891
1892 tpd->spec = 0;
1893 while (nsegs--) {
1894 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1895 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1896 tpd->spec++;
1897 segs++;
1898 }
1899}
1900
1901/*
1902 * Start output.
1903 *
1904 * Note, that we update the internal statistics without the lock here.
1905 */
1906static int
1898fatm_tx(struct fatm_softc *sc, struct mbuf *m, u_int vpi, u_int vci, u_int mlen)
1907fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1899{
1900 struct txqueue *q;
1901 u_int nblks;
1902 int error, aal, nsegs;
1903 struct tpd *tpd;
1904
1905 /*
1906 * Get a queue element.
1907 * If there isn't one - try to drain the transmit queue
1908 * We used to sleep here if that doesn't help, but we
1909 * should not sleep here, because we are called with locks.
1910 */
1911 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1912
1913 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1914 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1915 fatm_intr_drain_tx(sc);
1916 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1917 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1918 if (sc->retry_tx) {
1919 sc->istats.tx_retry++;
1920 IF_PREPEND(&sc->ifatm.ifnet.if_snd, m);
1921 return (1);
1922 }
1923 sc->istats.tx_queue_full++;
1924 m_freem(m);
1925 return (0);
1926 }
1927 sc->istats.tx_queue_almost_full++;
1928 }
1929
1930 tpd = q->q.ioblk;
1931
1932 m->m_data += sizeof(struct atm_pseudohdr);
1933 m->m_len -= sizeof(struct atm_pseudohdr);
1934
1935 /* map the mbuf */
1936 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1937 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1938 if(error) {
1939 sc->ifatm.ifnet.if_oerrors++;
1940 if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", error);
1941 m_freem(m);
1942 return (0);
1943 }
1944 nsegs = tpd->spec;
1945
1946 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1947
1948 /*
1949 * OK. Now go and do it.
1950 */
1908{
1909 struct txqueue *q;
1910 u_int nblks;
1911 int error, aal, nsegs;
1912 struct tpd *tpd;
1913
1914 /*
1915 * Get a queue element.
1916 * If there isn't one - try to drain the transmit queue
1917 * We used to sleep here if that doesn't help, but we
1918 * should not sleep here, because we are called with locks.
1919 */
1920 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1921
1922 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1923 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1924 fatm_intr_drain_tx(sc);
1925 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1926 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1927 if (sc->retry_tx) {
1928 sc->istats.tx_retry++;
1929 IF_PREPEND(&sc->ifatm.ifnet.if_snd, m);
1930 return (1);
1931 }
1932 sc->istats.tx_queue_full++;
1933 m_freem(m);
1934 return (0);
1935 }
1936 sc->istats.tx_queue_almost_full++;
1937 }
1938
1939 tpd = q->q.ioblk;
1940
1941 m->m_data += sizeof(struct atm_pseudohdr);
1942 m->m_len -= sizeof(struct atm_pseudohdr);
1943
1944 /* map the mbuf */
1945 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1946 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1947 if(error) {
1948 sc->ifatm.ifnet.if_oerrors++;
1949 if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", error);
1950 m_freem(m);
1951 return (0);
1952 }
1953 nsegs = tpd->spec;
1954
1955 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1956
1957 /*
1958 * OK. Now go and do it.
1959 */
1951 aal = (sc->vccs[vci].aal == ATMIO_AAL_5) ? 5 : 0;
1960 aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1952
1953 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1954 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1955 q->m = m;
1956
1957 /*
1958 * If the transmit queue is almost full, schedule a
1959 * transmit interrupt so that transmit descriptors can
1960 * be recycled.
1961 */
1962 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1963 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1961
1962 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1963 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1964 q->m = m;
1965
1966 /*
1967 * If the transmit queue is almost full, schedule a
1968 * transmit interrupt so that transmit descriptors can
1969 * be recycled.
1970 */
1971 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1972 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1964 H_SETDESC(tpd->atm_header, TDX_MKHDR(vpi, vci, 0, 0));
1973 H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
1974 vc->param.vci, 0, 0));
1965
1975
1966 if (sc->vccs[vci].traffic == ATMIO_TRAFFIC_UBR)
1976 if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
1967 H_SETDESC(tpd->stream, 0);
1968 else {
1969 u_int i;
1970
1971 for (i = 0; i < RATE_TABLE_SIZE; i++)
1977 H_SETDESC(tpd->stream, 0);
1978 else {
1979 u_int i;
1980
1981 for (i = 0; i < RATE_TABLE_SIZE; i++)
1972 if (rate_table[i].cell_rate < sc->vccs[vci].pcr)
1982 if (rate_table[i].cell_rate < vc->param.tparam.pcr)
1973 break;
1974 if (i > 0)
1975 i--;
1976 H_SETDESC(tpd->stream, rate_table[i].ratio);
1977 }
1978 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
1979
1980 nblks = TDX_SEGS2BLKS(nsegs);
1981
1982 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
1983 mlen, le32toh(tpd->spec), nsegs, nblks));
1984
1985 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
1986 BARRIER_W(sc);
1987
1988 sc->txcnt++;
1989 sc->ifatm.ifnet.if_opackets++;
1983 break;
1984 if (i > 0)
1985 i--;
1986 H_SETDESC(tpd->stream, rate_table[i].ratio);
1987 }
1988 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
1989
1990 nblks = TDX_SEGS2BLKS(nsegs);
1991
1992 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
1993 mlen, le32toh(tpd->spec), nsegs, nblks));
1994
1995 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
1996 BARRIER_W(sc);
1997
1998 sc->txcnt++;
1999 sc->ifatm.ifnet.if_opackets++;
2000 vc->obytes += m->m_pkthdr.len;
2001 vc->opackets++;
1990
1991 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
1992
1993 return (0);
1994}
1995
1996static void
1997fatm_start(struct ifnet *ifp)
1998{
1999 struct atm_pseudohdr aph;
2000 struct fatm_softc *sc;
2001 struct mbuf *m;
2002 u_int mlen, vpi, vci;
2002
2003 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2004
2005 return (0);
2006}
2007
2008static void
2009fatm_start(struct ifnet *ifp)
2010{
2011 struct atm_pseudohdr aph;
2012 struct fatm_softc *sc;
2013 struct mbuf *m;
2014 u_int mlen, vpi, vci;
2015 struct card_vcc *vc;
2003
2004 sc = (struct fatm_softc *)ifp->if_softc;
2005
2006 while (1) {
2007 IF_DEQUEUE(&ifp->if_snd, m);
2008 if (m == NULL)
2009 break;
2010
2011 /*
2012 * Loop through the mbuf chain and compute the total length
2013 * of the packet. Check that all data pointer are
2014 * 4 byte aligned. If they are not, call fatm_mfix to
2015 * fix that problem. This comes more or less from the
2016 * en driver.
2017 */
2018 mlen = fatm_fix_chain(sc, &m);
2019 if (m == NULL)
2020 continue;
2021
2022 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2023 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2024 continue;
2025
2026 aph = *mtod(m, struct atm_pseudohdr *);
2027 mlen -= sizeof(struct atm_pseudohdr);
2028
2029 if (mlen == 0) {
2030 m_freem(m);
2031 continue;
2032 }
2033 if (mlen > FATM_MAXPDU) {
2034 sc->istats.tx_pdu2big++;
2035 m_freem(m);
2036 continue;
2037 }
2038
2039 vci = ATM_PH_VCI(&aph);
2040 vpi = ATM_PH_VPI(&aph);
2041
2042 /*
2043 * From here on we need the softc
2044 */
2045 FATM_LOCK(sc);
2046 if (!(ifp->if_flags & IFF_RUNNING)) {
2047 FATM_UNLOCK(sc);
2048 m_freem(m);
2049 break;
2050 }
2016
2017 sc = (struct fatm_softc *)ifp->if_softc;
2018
2019 while (1) {
2020 IF_DEQUEUE(&ifp->if_snd, m);
2021 if (m == NULL)
2022 break;
2023
2024 /*
2025 * Loop through the mbuf chain and compute the total length
2026 * of the packet. Check that all data pointer are
2027 * 4 byte aligned. If they are not, call fatm_mfix to
2028 * fix that problem. This comes more or less from the
2029 * en driver.
2030 */
2031 mlen = fatm_fix_chain(sc, &m);
2032 if (m == NULL)
2033 continue;
2034
2035 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2036 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2037 continue;
2038
2039 aph = *mtod(m, struct atm_pseudohdr *);
2040 mlen -= sizeof(struct atm_pseudohdr);
2041
2042 if (mlen == 0) {
2043 m_freem(m);
2044 continue;
2045 }
2046 if (mlen > FATM_MAXPDU) {
2047 sc->istats.tx_pdu2big++;
2048 m_freem(m);
2049 continue;
2050 }
2051
2052 vci = ATM_PH_VCI(&aph);
2053 vpi = ATM_PH_VPI(&aph);
2054
2055 /*
2056 * From here on we need the softc
2057 */
2058 FATM_LOCK(sc);
2059 if (!(ifp->if_flags & IFF_RUNNING)) {
2060 FATM_UNLOCK(sc);
2061 m_freem(m);
2062 break;
2063 }
2051 if (!VC_OK(sc, vpi, vci) ||
2052 !(sc->vccs[vci].flags & FATM_VCC_OPEN)) {
2064 if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2065 !(vc->vflags & FATM_VCC_OPEN)) {
2053 FATM_UNLOCK(sc);
2054 m_freem(m);
2055 continue;
2056 }
2066 FATM_UNLOCK(sc);
2067 m_freem(m);
2068 continue;
2069 }
2057 if (fatm_tx(sc, m, vpi, vci, mlen)) {
2070 if (fatm_tx(sc, m, vc, mlen)) {
2058 FATM_UNLOCK(sc);
2059 break;
2060 }
2061 FATM_UNLOCK(sc);
2062 }
2063}
2064
2065/*
2071 FATM_UNLOCK(sc);
2072 break;
2073 }
2074 FATM_UNLOCK(sc);
2075 }
2076}
2077
2078/*
2066 * Return a table of all currently open VCCs.
2067 */
2068static struct atmio_vcctable *
2069get_vccs(struct fatm_softc *sc, int flags)
2070{
2071 struct atmio_vcctable *vccs;
2072 struct atmio_vcc *v;
2073 u_int i, alloc;
2074
2075 alloc = 10;
2076 vccs = NULL;
2077 for (;;) {
2078 vccs = reallocf(vccs,
2079 sizeof(*vccs) + alloc * sizeof(vccs->vccs[0]),
2080 M_DEVBUF, flags);
2081 if (vccs == NULL)
2082 return (NULL);
2083
2084 vccs->count = 0;
2085 FATM_LOCK(sc);
2086 v = vccs->vccs;
2087 for (i = 0; i < (1U << sc->ifatm.mib.vci_bits); i++) {
2088 if (sc->vccs[i].flags & FATM_VCC_OPEN) {
2089 if (vccs->count++ == alloc) {
2090 alloc *= 2;
2091 break;
2092 }
2093 v->vpi = 0;
2094 v->vci = i;
2095 v->flags = sc->vccs[i].flags;
2096 v->aal = sc->vccs[i].aal;
2097 v->traffic = sc->vccs[i].traffic;
2098 bzero(&v->tparam, sizeof(v->tparam));
2099 v->tparam.pcr = sc->vccs[i].pcr;
2100 v++;
2101 }
2102 }
2103 if (i == (1U << sc->ifatm.mib.vci_bits))
2104 break;
2105 FATM_UNLOCK(sc);
2106 }
2107 FATM_UNLOCK(sc);
2108 return (vccs);
2109}
2110
2111/*
2112 * VCC managment
2113 *
2114 * This may seem complicated. The reason for this is, that we need an
2115 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2116 * is called with the radix node head of the routing table locked. Therefor
2117 * we cannot sleep there and wait for the open/close to succeed. For this
2118 * reason we just initiate the operation from the ioctl.
2119 */
2120
2121/*
2122 * Command the card to open/close a VC.
2123 * Return the queue entry for waiting if we are succesful.
2124 */
2125static struct cmdqueue *
2126fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2127 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2128{
2129 struct cmdqueue *q;
2130
2131 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2132
2133 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2134 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2135 sc->istats.cmd_queue_full++;
2136 return (NULL);
2137 }
2138 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2139
2140 q->error = 0;
2141 q->cb = func;
2142 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2143 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2144
2145 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2146 BARRIER_W(sc);
2147 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2148 BARRIER_W(sc);
2149 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2150 BARRIER_W(sc);
2151
2152 return (q);
2153}
2154
2155/*
2079 * VCC managment
2080 *
2081 * This may seem complicated. The reason for this is, that we need an
2082 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2083 * is called with the radix node head of the routing table locked. Therefor
2084 * we cannot sleep there and wait for the open/close to succeed. For this
2085 * reason we just initiate the operation from the ioctl.
2086 */
2087
2088/*
2089 * Command the card to open/close a VC.
2090 * Return the queue entry for waiting if we are succesful.
2091 */
2092static struct cmdqueue *
2093fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2094 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2095{
2096 struct cmdqueue *q;
2097
2098 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2099
2100 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2101 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2102 sc->istats.cmd_queue_full++;
2103 return (NULL);
2104 }
2105 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2106
2107 q->error = 0;
2108 q->cb = func;
2109 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2110 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2111
2112 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2113 BARRIER_W(sc);
2114 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2115 BARRIER_W(sc);
2116 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2117 BARRIER_W(sc);
2118
2119 return (q);
2120}
2121
2122/*
2156 * Start to open a VCC. This just initiates the operation.
2123 * The VC has been opened/closed and somebody has been waiting for this.
2124 * Wake him up.
2157 */
2125 */
2158static int
2159fatm_start_open_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, u_int aal,
2160 u_int traffic, u_int pcr, u_int flags, void *rxhand,
2161 void (*func)(struct fatm_softc *, struct cmdqueue *), struct cmdqueue **qp)
2126static void
2127fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2162{
2128{
2163 int error;
2164 uint32_t cmd;
2165 struct cmdqueue *q;
2166
2129
2167 error = 0;
2130 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2131 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2132 sc->istats.get_stat_errors++;
2133 q->error = EIO;
2134 }
2135 wakeup(q);
2136}
2168
2137
2169 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2170 return (EIO);
2171 if (!VC_OK(sc, vpi, vci) ||
2172 (aal != ATMIO_AAL_0 && aal != ATMIO_AAL_5) ||
2173 (traffic != ATMIO_TRAFFIC_UBR && traffic != ATMIO_TRAFFIC_CBR))
2174 return (EINVAL);
2175 if (sc->vccs[vci].flags & FATM_VCC_BUSY)
2176 return (EBUSY);
2138/*
2139 * Open complete
2140 */
2141static void
2142fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2143{
2144 vc->vflags &= ~FATM_VCC_TRY_OPEN;
2145 vc->vflags |= FATM_VCC_OPEN;
2177
2146
2178 /* Command and buffer strategy */
2179 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2180 if (aal == ATMIO_AAL_0)
2181 cmd |= (0 << 8);
2182 else
2183 cmd |= (5 << 8);
2184
2185 if ((q = fatm_start_vcc(sc, vpi, vci, cmd, 1, func)) == NULL)
2186 return (EIO);
2187 if (qp != NULL)
2188 *qp = q;
2189
2190 sc->vccs[vci].aal = aal;
2191 sc->vccs[vci].flags = flags | FATM_VCC_TRY_OPEN;
2192 sc->vccs[vci].rxhand = rxhand;
2193 sc->vccs[vci].pcr = pcr;
2194 sc->vccs[vci].traffic = traffic;
2195
2196 return (0);
2147 /* inform management if this is not an NG
2148 * VCC or it's an NG PVC. */
2149 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2150 (vc->param.flags & ATMIO_FLAG_PVC))
2151 ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 1);
2197}
2198
2199/*
2152}
2153
2154/*
2200 * Initiate closing a VCC
2155 * The VC that we have tried to open asynchronuosly has been opened.
2201 */
2156 */
2202static int
2203fatm_start_close_vcc(struct fatm_softc *sc, u_int vpi, u_int vci,
2204 void (*func)(struct fatm_softc *, struct cmdqueue *), struct cmdqueue **qp)
2157static void
2158fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2205{
2159{
2206 int error;
2207 struct cmdqueue *q;
2160 u_int vci;
2161 struct card_vcc *vc;
2208
2162
2209 error = 0;
2210
2211 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2212 return (EIO);
2213 if (!VC_OK(sc, vpi, vci))
2214 return (EINVAL);
2215 if (!(sc->vccs[vci].flags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN)))
2216 return (ENOENT);
2217
2218 if ((q = fatm_start_vcc(sc, vpi, vci,
2219 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1, func)) == NULL)
2220 return (EIO);
2221
2222 if (qp != NULL)
2223 *qp = q;
2224
2225 sc->vccs[vci].flags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2226 sc->vccs[vci].flags |= FATM_VCC_TRY_CLOSE;
2227
2228 return (0);
2163 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2164 vc = sc->vccs[vci];
2165 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2166 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2167 sc->istats.get_stat_errors++;
2168 sc->vccs[vci] = NULL;
2169 uma_zfree(sc->vcc_zone, vc);
2170 if_printf(&sc->ifatm.ifnet, "opening VCI %u failed\n", vci);
2171 return;
2172 }
2173 fatm_open_finish(sc, vc);
2229}
2230
2231/*
2232 * Wait on the queue entry until the VCC is opened/closed.
2233 */
2234static int
2235fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2236{
2237 int error;
2238
2239 /*
2240 * Wait for the command to complete
2241 */
2242 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2243
2244 if (error != 0)
2245 return (error);
2246 return (q->error);
2247}
2248
2249/*
2174}
2175
2176/*
2177 * Wait on the queue entry until the VCC is opened/closed.
2178 */
2179static int
2180fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2181{
2182 int error;
2183
2184 /*
2185 * Wait for the command to complete
2186 */
2187 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2188
2189 if (error != 0)
2190 return (error);
2191 return (q->error);
2192}
2193
2194/*
2250 * The VC has been opened/closed and somebody has been waiting for this.
2251 * Wake him up.
2195 * Start to open a VCC. This just initiates the operation.
2252 */
2196 */
2253static void
2254fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2255{
2256
2257 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2258 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2259 sc->istats.get_stat_errors++;
2260 q->error = EIO;
2261 }
2262 wakeup(q);
2263}
2264
2265/*
2266 * Open a vcc and wait for completion
2267 */
2268static int
2197static int
2269fatm_open_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, u_int flags,
2270 u_int aal, u_int traffic, u_int pcr, void *rxhand)
2198fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op, int wait)
2271{
2199{
2200 uint32_t cmd;
2272 int error;
2273 struct cmdqueue *q;
2201 int error;
2202 struct cmdqueue *q;
2203 struct card_vcc *vc;
2274
2204
2205 /*
2206 * Check parameters
2207 */
2208 if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2209 (op->param.flags & ATMIO_FLAG_NORX))
2210 return (EINVAL);
2211
2212 if (!VC_OK(sc, op->param.vpi, op->param.vci))
2213 return (EINVAL);
2214 if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2215 return (EINVAL);
2216
2217 vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2218 if (vc == NULL)
2219 return (ENOMEM);
2220
2275 error = 0;
2276
2277 FATM_LOCK(sc);
2221 error = 0;
2222
2223 FATM_LOCK(sc);
2278 error = fatm_start_open_vcc(sc, vpi, vci, aal, traffic, pcr,
2279 flags, rxhand, fatm_cmd_complete, &q);
2280 if (error != 0) {
2281 FATM_UNLOCK(sc);
2282 return (error);
2224 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2225 error = EIO;
2226 goto done;
2283 }
2227 }
2284 error = fatm_waitvcc(sc, q);
2228 if (sc->vccs[op->param.vci] != NULL) {
2229 error = EBUSY;
2230 goto done;
2231 }
2232 vc->param = op->param;
2233 vc->rxhand = op->rxhand;
2285
2234
2286 if (error == 0) {
2287 sc->vccs[vci].flags &= ~FATM_VCC_TRY_OPEN;
2288 sc->vccs[vci].flags |= FATM_VCC_OPEN;
2289 sc->open_vccs++;
2235 switch (op->param.traffic) {
2290
2236
2291 /* inform management if this is not an NG
2292 * VCC or it's an NG PVC. */
2293 if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2294 (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2295 ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vci, 1);
2296 } else
2297 bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2237 case ATMIO_TRAFFIC_UBR:
2238 break;
2298
2239
2299 FATM_UNLOCK(sc);
2300 return (error);
2301}
2240 case ATMIO_TRAFFIC_CBR:
2241 if (op->param.tparam.pcr == 0 ||
2242 op->param.tparam.pcr > sc->ifatm.mib.pcr) {
2243 error = EINVAL;
2244 goto done;
2245 }
2246 break;
2302
2247
2303/*
2304 * Close a VCC synchronuosly
2305 */
2306static int
2307fatm_close_vcc(struct fatm_softc *sc, u_int vpi, u_int vci)
2308{
2309 int error;
2310 struct cmdqueue *q;
2248 default:
2249 error = EINVAL;
2250 goto done;
2251 return (EINVAL);
2252 }
2253 vc->ibytes = vc->obytes = 0;
2254 vc->ipackets = vc->opackets = 0;
2311
2255
2312 error = 0;
2256 /* Command and buffer strategy */
2257 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2258 if (op->param.aal == ATMIO_AAL_0)
2259 cmd |= (0 << 8);
2260 else
2261 cmd |= (5 << 8);
2313
2262
2314 FATM_LOCK(sc);
2315 error = fatm_start_close_vcc(sc, vpi, vci, fatm_cmd_complete, &q);
2316 if (error != 0) {
2317 FATM_UNLOCK(sc);
2318 return (error);
2263 q = fatm_start_vcc(sc, op->param.vpi, op->param.vci, cmd, 1,
2264 wait ? fatm_cmd_complete : fatm_open_complete);
2265 if (q == NULL) {
2266 error = EIO;
2267 goto done;
2319 }
2268 }
2320 error = fatm_waitvcc(sc, q);
2321
2269
2322 if (error == 0) {
2323 /* inform management of this is not an NG
2324 * VCC or it's an NG PVC. */
2325 if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2326 (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2327 ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vci, 0);
2270 vc->vflags = FATM_VCC_TRY_OPEN;
2271 sc->vccs[op->param.vci] = vc;
2272 sc->open_vccs++;
2328
2273
2329 bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2330 sc->open_vccs--;
2274 if (wait) {
2275 error = fatm_waitvcc(sc, q);
2276 if (error != 0) {
2277 sc->vccs[op->param.vci] = NULL;
2278 sc->open_vccs--;
2279 goto done;
2280 }
2281 fatm_open_finish(sc, vc);
2331 }
2332
2282 }
2283
2284 /* don't free below */
2285 vc = NULL;
2286
2287 done:
2333 FATM_UNLOCK(sc);
2288 FATM_UNLOCK(sc);
2289 if (vc != NULL)
2290 uma_zfree(sc->vcc_zone, vc);
2334 return (error);
2335}
2336
2337/*
2291 return (error);
2292}
2293
2294/*
2338 * The VC has been opened.
2295 * Finish close
2339 */
2340static void
2296 */
2297static void
2341fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2298fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2342{
2299{
2343 u_int vci;
2300 /* inform management of this is not an NG
2301 * VCC or it's an NG PVC. */
2302 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2303 (vc->param.flags & ATMIO_FLAG_PVC))
2304 ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 0);
2344
2305
2345 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2346 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2347 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2348 sc->istats.get_stat_errors++;
2349 bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2350 if_printf(&sc->ifatm.ifnet, "opening VCI %u failed\n", vci);
2351 return;
2352 }
2306 sc->vccs[vc->param.vci] = NULL;
2307 sc->open_vccs--;
2353
2308
2354 sc->vccs[vci].flags &= ~FATM_VCC_TRY_OPEN;
2355 sc->vccs[vci].flags |= FATM_VCC_OPEN;
2356 sc->open_vccs++;
2357
2358 /* inform management if this is not an NG
2359 * VCC or it's an NG PVC. */
2360 if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2361 (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2362 ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vci, 1);
2309 uma_zfree(sc->vcc_zone, vc);
2363}
2364
2365/*
2366 * The VC has been closed.
2367 */
2368static void
2369fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2370{
2371 u_int vci;
2310}
2311
2312/*
2313 * The VC has been closed.
2314 */
2315static void
2316fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2317{
2318 u_int vci;
2319 struct card_vcc *vc;
2372
2373 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2320
2321 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2322 vc = sc->vccs[vci];
2374 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2375 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2376 sc->istats.get_stat_errors++;
2377 /* keep the VCC in that state */
2378 if_printf(&sc->ifatm.ifnet, "closing VCI %u failed\n", vci);
2379 return;
2380 }
2381
2323 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2324 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2325 sc->istats.get_stat_errors++;
2326 /* keep the VCC in that state */
2327 if_printf(&sc->ifatm.ifnet, "closing VCI %u failed\n", vci);
2328 return;
2329 }
2330
2382 /* inform management of this is not an NG
2383 * VCC or it's an NG PVC. */
2384 if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2385 (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2386 ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vci, 0);
2387
2388 bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2389 sc->open_vccs--;
2331 fatm_close_finish(sc, vc);
2390}
2391
2392/*
2332}
2333
2334/*
2393 * Open a vcc but don't wait.
2335 * Initiate closing a VCC
2394 */
2395static int
2336 */
2337static int
2396fatm_open_vcc_nowait(struct fatm_softc *sc, u_int vpi, u_int vci, u_int flags,
2397 u_int aal, void *rxhand)
2338fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl, int wait)
2398{
2399 int error;
2339{
2340 int error;
2341 struct cmdqueue *q;
2342 struct card_vcc *vc;
2400
2343
2401 FATM_LOCK(sc);
2402 error = fatm_start_open_vcc(sc, vpi, vci, aal, ATMIO_TRAFFIC_UBR, 0,
2403 flags, rxhand, fatm_open_complete, NULL);
2404 FATM_UNLOCK(sc);
2405 return (error);
2406}
2344 if (!VC_OK(sc, cl->vpi, cl->vci))
2345 return (EINVAL);
2407
2346
2408/*
2409 * Close a VCC but don't wait
2410 */
2411static int
2412fatm_close_vcc_nowait(struct fatm_softc *sc, u_int vpi, u_int vci)
2413{
2414 int error;
2347 error = 0;
2415
2416 FATM_LOCK(sc);
2348
2349 FATM_LOCK(sc);
2417 error = fatm_start_close_vcc(sc, vpi, vci, fatm_close_complete, NULL);
2350 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2351 error = EIO;
2352 goto done;
2353 }
2354 vc = sc->vccs[cl->vci];
2355 if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2356 error = ENOENT;
2357 goto done;
2358 }
2359
2360 q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2361 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2362 wait ? fatm_cmd_complete : fatm_close_complete);
2363 if (q == NULL) {
2364 error = EIO;
2365 goto done;
2366 }
2367
2368 vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2369 vc->vflags |= FATM_VCC_TRY_CLOSE;
2370
2371 if (wait) {
2372 error = fatm_waitvcc(sc, q);
2373 if (error != 0)
2374 goto done;
2375
2376 fatm_close_finish(sc, vc);
2377 }
2378
2379 done:
2418 FATM_UNLOCK(sc);
2419 return (error);
2420}
2421
2422/*
2423 * IOCTL handler
2424 */
2425static int
2426fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2427{
2428 int error;
2429 struct fatm_softc *sc = ifp->if_softc;
2430 struct ifaddr *ifa = (struct ifaddr *)arg;
2431 struct ifreq *ifr = (struct ifreq *)arg;
2432 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2433 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2434 struct atm_pseudoioctl *pa = (struct atm_pseudoioctl *)arg;
2435 struct atmio_vcctable *vtab;
2380 FATM_UNLOCK(sc);
2381 return (error);
2382}
2383
2384/*
2385 * IOCTL handler
2386 */
2387static int
2388fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2389{
2390 int error;
2391 struct fatm_softc *sc = ifp->if_softc;
2392 struct ifaddr *ifa = (struct ifaddr *)arg;
2393 struct ifreq *ifr = (struct ifreq *)arg;
2394 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2395 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2396 struct atm_pseudoioctl *pa = (struct atm_pseudoioctl *)arg;
2397 struct atmio_vcctable *vtab;
2398 struct atmio_openvcc ena;
2399 struct atmio_closevcc dis;
2436
2437 error = 0;
2438 switch (cmd) {
2439
2440 case SIOCATMENA: /* internal NATM use */
2400
2401 error = 0;
2402 switch (cmd) {
2403
2404 case SIOCATMENA: /* internal NATM use */
2441 error = fatm_open_vcc_nowait(sc, ATM_PH_VPI(&pa->aph),
2442 ATM_PH_VCI(&pa->aph), ATM_PH_FLAGS(&pa->aph),
2443 (ATM_PH_FLAGS(&pa->aph) & ATM_PH_AAL5) ? ATMIO_AAL_5 :
2444 ATMIO_AAL_0, pa->rxhand);
2405 bzero(&ena, sizeof(ena));
2406 ena.param.flags = ATM_PH_FLAGS(&pa->aph) &
2407 (ATM_PH_AAL5 | ATM_PH_LLCSNAP);
2408 ena.param.vpi = ATM_PH_VPI(&pa->aph);
2409 ena.param.vci = ATM_PH_VCI(&pa->aph);
2410 ena.param.aal = (ATM_PH_FLAGS(&pa->aph) & ATM_PH_AAL5) ?
2411 ATMIO_AAL_5 : ATMIO_AAL_0;
2412 ena.param.traffic = ATMIO_TRAFFIC_UBR;
2413 ena.rxhand = pa->rxhand;
2414 error = fatm_open_vcc(sc, &ena, 0);
2445 break;
2446
2447 case SIOCATMDIS: /* internal NATM use */
2415 break;
2416
2417 case SIOCATMDIS: /* internal NATM use */
2448 error = fatm_close_vcc_nowait(sc, ATM_PH_VPI(&pa->aph),
2449 ATM_PH_VCI(&pa->aph));
2418 bzero(&dis, sizeof(dis));
2419 dis.vpi = ATM_PH_VPI(&pa->aph);
2420 dis.vci = ATM_PH_VCI(&pa->aph);
2421 error = fatm_close_vcc(sc, &dis, 0);
2450 break;
2451
2452 case SIOCATMOPENVCC:
2422 break;
2423
2424 case SIOCATMOPENVCC:
2453 error = fatm_open_vcc(sc, op->param.vpi, op->param.vci,
2454 op->param.flags, op->param.aal, op->param.traffic,
2455 op->param.tparam.pcr, op->rxhand);
2425 error = fatm_open_vcc(sc, op, 1);
2456 break;
2457
2458 case SIOCATMCLOSEVCC:
2426 break;
2427
2428 case SIOCATMCLOSEVCC:
2459 error = fatm_close_vcc(sc, cl->vpi, cl->vci);
2429 error = fatm_close_vcc(sc, cl, 1);
2460 break;
2461
2462 case SIOCSIFADDR:
2463 FATM_LOCK(sc);
2464 ifp->if_flags |= IFF_UP;
2465 if (!(ifp->if_flags & IFF_RUNNING))
2466 fatm_init_locked(sc);
2467 switch (ifa->ifa_addr->sa_family) {
2468#ifdef INET
2469 case AF_INET:
2470 case AF_INET6:
2471 ifa->ifa_rtrequest = atm_rtrequest;
2472 break;
2473#endif
2474 default:
2475 break;
2476 }
2477 FATM_UNLOCK(sc);
2478 break;
2479
2480 case SIOCSIFFLAGS:
2481 FATM_LOCK(sc);
2482 if (ifp->if_flags & IFF_UP) {
2483 if (!(ifp->if_flags & IFF_RUNNING)) {
2484 fatm_init_locked(sc);
2485 }
2486 } else {
2487 if (ifp->if_flags & IFF_RUNNING) {
2488 fatm_stop(sc);
2489 }
2490 }
2491 FATM_UNLOCK(sc);
2492 break;
2493
2494 case SIOCGIFMEDIA:
2495 case SIOCSIFMEDIA:
2496 if (ifp->if_flags & IFF_RUNNING)
2497 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2498 else
2499 error = EINVAL;
2500 break;
2501
2502 case SIOCATMGVCCS:
2503 /* return vcc table */
2430 break;
2431
2432 case SIOCSIFADDR:
2433 FATM_LOCK(sc);
2434 ifp->if_flags |= IFF_UP;
2435 if (!(ifp->if_flags & IFF_RUNNING))
2436 fatm_init_locked(sc);
2437 switch (ifa->ifa_addr->sa_family) {
2438#ifdef INET
2439 case AF_INET:
2440 case AF_INET6:
2441 ifa->ifa_rtrequest = atm_rtrequest;
2442 break;
2443#endif
2444 default:
2445 break;
2446 }
2447 FATM_UNLOCK(sc);
2448 break;
2449
2450 case SIOCSIFFLAGS:
2451 FATM_LOCK(sc);
2452 if (ifp->if_flags & IFF_UP) {
2453 if (!(ifp->if_flags & IFF_RUNNING)) {
2454 fatm_init_locked(sc);
2455 }
2456 } else {
2457 if (ifp->if_flags & IFF_RUNNING) {
2458 fatm_stop(sc);
2459 }
2460 }
2461 FATM_UNLOCK(sc);
2462 break;
2463
2464 case SIOCGIFMEDIA:
2465 case SIOCSIFMEDIA:
2466 if (ifp->if_flags & IFF_RUNNING)
2467 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2468 else
2469 error = EINVAL;
2470 break;
2471
2472 case SIOCATMGVCCS:
2473 /* return vcc table */
2504 vtab = get_vccs(sc, M_WAITOK);
2505 if (vtab == NULL) {
2506 error = ENOMEM;
2507 break;
2508 }
2474 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2475 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2509 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2510 vtab->count * sizeof(vtab->vccs[0]));
2511 free(vtab, M_DEVBUF);
2512 break;
2513
2514 case SIOCATMGETVCCS: /* internal netgraph use */
2476 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2477 vtab->count * sizeof(vtab->vccs[0]));
2478 free(vtab, M_DEVBUF);
2479 break;
2480
2481 case SIOCATMGETVCCS: /* internal netgraph use */
2515 vtab = get_vccs(sc, M_NOWAIT);
2482 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2483 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2516 if (vtab == NULL) {
2517 error = ENOMEM;
2518 break;
2519 }
2520 *(void **)arg = vtab;
2521 break;
2522
2523 default:
2524 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2525 error = EINVAL;
2526 break;
2527 }
2528
2529 return (error);
2530}
2531
2532/*
2533 * Detach from the interface and free all resources allocated during
2534 * initialisation and later.
2535 */
2536static int
2537fatm_detach(device_t dev)
2538{
2539 u_int i;
2540 struct rbuf *rb;
2541 struct fatm_softc *sc;
2542 struct txqueue *tx;
2543
2544 sc = (struct fatm_softc *)device_get_softc(dev);
2545
2546 if (device_is_alive(dev)) {
2547 FATM_LOCK(sc);
2548 fatm_stop(sc);
2549 utopia_detach(&sc->utopia);
2550 FATM_UNLOCK(sc);
2551 atm_ifdetach(&sc->ifatm.ifnet); /* XXX race */
2552 }
2553
2554 if (sc->ih != NULL)
2555 bus_teardown_intr(dev, sc->irqres, sc->ih);
2556
2557 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2558 if_printf(&sc->ifatm.ifnet, "rbuf %p still in use!\n", rb);
2559 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2560 m_freem(rb->m);
2561 LIST_REMOVE(rb, link);
2562 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2563 }
2564
2565 if (sc->txqueue.chunk != NULL) {
2566 for (i = 0; i < FATM_TX_QLEN; i++) {
2567 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2568 bus_dmamap_destroy(sc->tx_tag, tx->map);
2569 }
2570 }
2571
2572 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2573 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2574 LIST_REMOVE(rb, link);
2575 }
2576
2484 if (vtab == NULL) {
2485 error = ENOMEM;
2486 break;
2487 }
2488 *(void **)arg = vtab;
2489 break;
2490
2491 default:
2492 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2493 error = EINVAL;
2494 break;
2495 }
2496
2497 return (error);
2498}
2499
2500/*
2501 * Detach from the interface and free all resources allocated during
2502 * initialisation and later.
2503 */
2504static int
2505fatm_detach(device_t dev)
2506{
2507 u_int i;
2508 struct rbuf *rb;
2509 struct fatm_softc *sc;
2510 struct txqueue *tx;
2511
2512 sc = (struct fatm_softc *)device_get_softc(dev);
2513
2514 if (device_is_alive(dev)) {
2515 FATM_LOCK(sc);
2516 fatm_stop(sc);
2517 utopia_detach(&sc->utopia);
2518 FATM_UNLOCK(sc);
2519 atm_ifdetach(&sc->ifatm.ifnet); /* XXX race */
2520 }
2521
2522 if (sc->ih != NULL)
2523 bus_teardown_intr(dev, sc->irqres, sc->ih);
2524
2525 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2526 if_printf(&sc->ifatm.ifnet, "rbuf %p still in use!\n", rb);
2527 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2528 m_freem(rb->m);
2529 LIST_REMOVE(rb, link);
2530 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2531 }
2532
2533 if (sc->txqueue.chunk != NULL) {
2534 for (i = 0; i < FATM_TX_QLEN; i++) {
2535 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2536 bus_dmamap_destroy(sc->tx_tag, tx->map);
2537 }
2538 }
2539
2540 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2541 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2542 LIST_REMOVE(rb, link);
2543 }
2544
2577 free(sc->rbufs, M_DEVBUF);
2578 free(sc->vccs, M_DEVBUF);
2545 if (sc->rbufs != NULL)
2546 free(sc->rbufs, M_DEVBUF);
2547 if (sc->vccs != NULL)
2548 free(sc->vccs, M_DEVBUF);
2549 if (sc->vcc_zone != NULL)
2550 uma_zdestroy(sc->vcc_zone);
2579
2551
2580 free(sc->l1queue.chunk, M_DEVBUF);
2581 free(sc->s1queue.chunk, M_DEVBUF);
2582 free(sc->rxqueue.chunk, M_DEVBUF);
2583 free(sc->txqueue.chunk, M_DEVBUF);
2584 free(sc->cmdqueue.chunk, M_DEVBUF);
2552 if (sc->l1queue.chunk != NULL)
2553 free(sc->l1queue.chunk, M_DEVBUF);
2554 if (sc->s1queue.chunk != NULL)
2555 free(sc->s1queue.chunk, M_DEVBUF);
2556 if (sc->rxqueue.chunk != NULL)
2557 free(sc->rxqueue.chunk, M_DEVBUF);
2558 if (sc->txqueue.chunk != NULL)
2559 free(sc->txqueue.chunk, M_DEVBUF);
2560 if (sc->cmdqueue.chunk != NULL)
2561 free(sc->cmdqueue.chunk, M_DEVBUF);
2585
2586 destroy_dma_memory(&sc->reg_mem);
2587 destroy_dma_memory(&sc->sadi_mem);
2588 destroy_dma_memory(&sc->prom_mem);
2589#ifdef TEST_DMA_SYNC
2590 destroy_dma_memoryX(&sc->s1q_mem);
2591 destroy_dma_memoryX(&sc->l1q_mem);
2592 destroy_dma_memoryX(&sc->rxq_mem);
2593 destroy_dma_memoryX(&sc->txq_mem);
2594 destroy_dma_memoryX(&sc->stat_mem);
2595#endif
2596
2597 if (sc->tx_tag != NULL)
2598 if (bus_dma_tag_destroy(sc->tx_tag))
2599 printf("tx DMA tag busy!\n");
2600
2601 if (sc->rbuf_tag != NULL)
2602 if (bus_dma_tag_destroy(sc->rbuf_tag))
2603 printf("rbuf DMA tag busy!\n");
2604
2605 if (sc->parent_dmat != NULL)
2606 if (bus_dma_tag_destroy(sc->parent_dmat))
2607 printf("parent DMA tag busy!\n");
2608
2609 if (sc->irqres != NULL)
2610 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2611
2612 if (sc->memres != NULL)
2613 bus_release_resource(dev, SYS_RES_MEMORY,
2614 sc->memid, sc->memres);
2615
2616 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2617
2618 cv_destroy(&sc->cv_stat);
2619 cv_destroy(&sc->cv_regs);
2620
2621 mtx_destroy(&sc->mtx);
2622
2623 return (0);
2624}
2625
2626/*
2627 * Sysctl handler
2628 */
2629static int
2630fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2631{
2632 struct fatm_softc *sc = arg1;
2633 u_long *ret;
2634 int error;
2635
2636 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2637
2638 FATM_LOCK(sc);
2639 bcopy(&sc->istats, ret, sizeof(sc->istats));
2640 FATM_UNLOCK(sc);
2641
2642 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2643 free(ret, M_TEMP);
2644
2645 return (error);
2646}
2647
2648/*
2649 * Sysctl handler for card statistics
2650 * This is disable because it destroys the PHY statistics.
2651 */
2652static int
2653fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2654{
2655 struct fatm_softc *sc = arg1;
2656 int error;
2657 const struct fatm_stats *s;
2658 u_long *ret;
2659 u_int i;
2660
2661 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2662
2663 FATM_LOCK(sc);
2664
2665 if ((error = fatm_getstat(sc)) == 0) {
2666 s = sc->sadi_mem.mem;
2667 i = 0;
2668 ret[i++] = s->phy_4b5b.crc_header_errors;
2669 ret[i++] = s->phy_4b5b.framing_errors;
2670 ret[i++] = s->phy_oc3.section_bip8_errors;
2671 ret[i++] = s->phy_oc3.path_bip8_errors;
2672 ret[i++] = s->phy_oc3.line_bip24_errors;
2673 ret[i++] = s->phy_oc3.line_febe_errors;
2674 ret[i++] = s->phy_oc3.path_febe_errors;
2675 ret[i++] = s->phy_oc3.corr_hcs_errors;
2676 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2677 ret[i++] = s->atm.cells_transmitted;
2678 ret[i++] = s->atm.cells_received;
2679 ret[i++] = s->atm.vpi_bad_range;
2680 ret[i++] = s->atm.vpi_no_conn;
2681 ret[i++] = s->atm.vci_bad_range;
2682 ret[i++] = s->atm.vci_no_conn;
2683 ret[i++] = s->aal0.cells_transmitted;
2684 ret[i++] = s->aal0.cells_received;
2685 ret[i++] = s->aal0.cells_dropped;
2686 ret[i++] = s->aal4.cells_transmitted;
2687 ret[i++] = s->aal4.cells_received;
2688 ret[i++] = s->aal4.cells_crc_errors;
2689 ret[i++] = s->aal4.cels_protocol_errors;
2690 ret[i++] = s->aal4.cells_dropped;
2691 ret[i++] = s->aal4.cspdus_transmitted;
2692 ret[i++] = s->aal4.cspdus_received;
2693 ret[i++] = s->aal4.cspdus_protocol_errors;
2694 ret[i++] = s->aal4.cspdus_dropped;
2695 ret[i++] = s->aal5.cells_transmitted;
2696 ret[i++] = s->aal5.cells_received;
2697 ret[i++] = s->aal5.congestion_experienced;
2698 ret[i++] = s->aal5.cells_dropped;
2699 ret[i++] = s->aal5.cspdus_transmitted;
2700 ret[i++] = s->aal5.cspdus_received;
2701 ret[i++] = s->aal5.cspdus_crc_errors;
2702 ret[i++] = s->aal5.cspdus_protocol_errors;
2703 ret[i++] = s->aal5.cspdus_dropped;
2704 ret[i++] = s->aux.small_b1_failed;
2705 ret[i++] = s->aux.large_b1_failed;
2706 ret[i++] = s->aux.small_b2_failed;
2707 ret[i++] = s->aux.large_b2_failed;
2708 ret[i++] = s->aux.rpd_alloc_failed;
2709 ret[i++] = s->aux.receive_carrier;
2710 }
2711 /* declare the buffer free */
2712 sc->flags &= ~FATM_STAT_INUSE;
2713 cv_signal(&sc->cv_stat);
2714
2715 FATM_UNLOCK(sc);
2716
2717 if (error == 0)
2718 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2719 free(ret, M_TEMP);
2720
2721 return (error);
2722}
2723
2724#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2725
2726/*
2727 * Attach to the device.
2728 *
2729 * We assume, that there is a global lock (Giant in this case) that protects
2730 * multiple threads from entering this function. This makes sense, doesn't it?
2731 */
2732static int
2733fatm_attach(device_t dev)
2734{
2735 struct ifnet *ifp;
2736 struct fatm_softc *sc;
2737 int unit;
2738 uint16_t cfg;
2739 int error = 0;
2740 struct rbuf *rb;
2741 u_int i;
2742 struct txqueue *tx;
2743
2744 sc = device_get_softc(dev);
2745 unit = device_get_unit(dev);
2746
2747 sc->ifatm.mib.device = ATM_DEVICE_PCA200E;
2748 sc->ifatm.mib.serial = 0;
2749 sc->ifatm.mib.hw_version = 0;
2750 sc->ifatm.mib.sw_version = 0;
2751 sc->ifatm.mib.vpi_bits = 0;
2752 sc->ifatm.mib.vci_bits = FORE_VCIBITS;
2753 sc->ifatm.mib.max_vpcs = 0;
2754 sc->ifatm.mib.max_vccs = FORE_MAX_VCC;
2755 sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
2756 sc->ifatm.phy = &sc->utopia;
2757
2758 LIST_INIT(&sc->rbuf_free);
2759 LIST_INIT(&sc->rbuf_used);
2760
2761 /*
2762 * Initialize mutex and condition variables.
2763 */
2764 mtx_init(&sc->mtx, device_get_nameunit(dev),
2765 MTX_NETWORK_LOCK, MTX_DEF);
2766
2767 cv_init(&sc->cv_stat, "fatm_stat");
2768 cv_init(&sc->cv_regs, "fatm_regs");
2769
2770 sysctl_ctx_init(&sc->sysctl_ctx);
2771
2772 /*
2773 * Make the sysctl tree
2774 */
2775 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2776 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2777 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2778 goto fail;
2779
2780 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2781 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2782 "LU", "internal statistics") == NULL)
2783 goto fail;
2784
2785 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2786 OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2787 "LU", "card statistics") == NULL)
2788 goto fail;
2789
2790 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2791 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2792 "retry flag") == NULL)
2793 goto fail;
2794
2795#ifdef FATM_DEBUG
2796 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2797 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2798 == NULL)
2799 goto fail;
2800 sc->debug = FATM_DEBUG;
2801#endif
2802
2803 /*
2804 * Network subsystem stuff
2805 */
2806 ifp = &sc->ifatm.ifnet;
2807 ifp->if_softc = sc;
2808 ifp->if_unit = unit;
2809 ifp->if_name = "fatm";
2810 ifp->if_flags = IFF_SIMPLEX;
2811 ifp->if_ioctl = fatm_ioctl;
2812 ifp->if_start = fatm_start;
2813 ifp->if_watchdog = fatm_watchdog;
2814 ifp->if_init = fatm_init;
2815 ifp->if_linkmib = &sc->ifatm.mib;
2816 ifp->if_linkmiblen = sizeof(sc->ifatm.mib);
2817
2818 /*
2819 * Enable memory and bustmaster
2820 */
2821 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2822 cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2823 pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2824
2825 /*
2826 * Map memory
2827 */
2828 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2829 if (!(cfg & PCIM_CMD_MEMEN)) {
2830 if_printf(ifp, "failed to enable memory mapping\n");
2831 error = ENXIO;
2832 goto fail;
2833 }
2834 sc->memid = 0x10;
2835 sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
2836 0, ~0, 1, RF_ACTIVE);
2837 if (sc->memres == NULL) {
2838 if_printf(ifp, "could not map memory\n");
2839 error = ENXIO;
2840 goto fail;
2841 }
2842 sc->memh = rman_get_bushandle(sc->memres);
2843 sc->memt = rman_get_bustag(sc->memres);
2844
2845 /*
2846 * Convert endianess of slave access
2847 */
2848 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2849 cfg |= FATM_PCIM_SWAB;
2850 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2851
2852 /*
2853 * Allocate interrupt (activate at the end)
2854 */
2855 sc->irqid = 0;
2856 sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
2857 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
2858 if (sc->irqres == NULL) {
2859 if_printf(ifp, "could not allocate irq\n");
2860 error = ENXIO;
2861 goto fail;
2862 }
2863
2864 /*
2865 * Allocate the parent DMA tag. This is used simply to hold overall
2866 * restrictions for the controller (and PCI bus) and is never used
2867 * to do anything.
2868 */
2869 if (bus_dma_tag_create(NULL, 1, 0,
2870 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2871 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2872 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2873 &sc->parent_dmat)) {
2874 if_printf(ifp, "could not allocate parent DMA tag\n");
2875 error = ENOMEM;
2876 goto fail;
2877 }
2878
2879 /*
2880 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2881 * a mbuf cluster.
2882 */
2883 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2884 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2885 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2886 NULL, NULL, &sc->rbuf_tag)) {
2887 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2888 error = ENOMEM;
2889 goto fail;
2890 }
2891
2892 /*
2893 * Allocate the transmission DMA tag. Must add 1, because
2894 * rounded up PDU will be 65536 bytes long.
2895 */
2896 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2897 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2898 NULL, NULL,
2899 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2900 NULL, NULL, &sc->tx_tag)) {
2901 if_printf(ifp, "could not allocate tx DMA tag\n");
2902 error = ENOMEM;
2903 goto fail;
2904 }
2905
2906 /*
2907 * Allocate DMAable memory.
2908 */
2909 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2910 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2911 sc->stat_mem.align = 4;
2912
2913 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2914 sc->txq_mem.align = 32;
2915
2916 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2917 sc->rxq_mem.align = 32;
2918
2919 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2920 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2921 sc->s1q_mem.align = 32;
2922
2923 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2924 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2925 sc->l1q_mem.align = 32;
2926
2927#ifdef TEST_DMA_SYNC
2928 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2929 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2930 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2931 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2932 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2933 goto fail;
2934#else
2935 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2936 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2937 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2938 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2939 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2940 goto fail;
2941#endif
2942
2943 sc->prom_mem.size = sizeof(struct prom);
2944 sc->prom_mem.align = 32;
2945 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2946 goto fail;
2947
2948 sc->sadi_mem.size = sizeof(struct fatm_stats);
2949 sc->sadi_mem.align = 32;
2950 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2951 goto fail;
2952
2953 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2954 sc->reg_mem.align = 32;
2955 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2956 goto fail;
2957
2958 /*
2959 * Allocate queues
2960 */
2961 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2962 M_DEVBUF, M_ZERO | M_WAITOK);
2963 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2964 M_DEVBUF, M_ZERO | M_WAITOK);
2965 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2966 M_DEVBUF, M_ZERO | M_WAITOK);
2967 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2968 M_DEVBUF, M_ZERO | M_WAITOK);
2969 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2970 M_DEVBUF, M_ZERO | M_WAITOK);
2971
2562
2563 destroy_dma_memory(&sc->reg_mem);
2564 destroy_dma_memory(&sc->sadi_mem);
2565 destroy_dma_memory(&sc->prom_mem);
2566#ifdef TEST_DMA_SYNC
2567 destroy_dma_memoryX(&sc->s1q_mem);
2568 destroy_dma_memoryX(&sc->l1q_mem);
2569 destroy_dma_memoryX(&sc->rxq_mem);
2570 destroy_dma_memoryX(&sc->txq_mem);
2571 destroy_dma_memoryX(&sc->stat_mem);
2572#endif
2573
2574 if (sc->tx_tag != NULL)
2575 if (bus_dma_tag_destroy(sc->tx_tag))
2576 printf("tx DMA tag busy!\n");
2577
2578 if (sc->rbuf_tag != NULL)
2579 if (bus_dma_tag_destroy(sc->rbuf_tag))
2580 printf("rbuf DMA tag busy!\n");
2581
2582 if (sc->parent_dmat != NULL)
2583 if (bus_dma_tag_destroy(sc->parent_dmat))
2584 printf("parent DMA tag busy!\n");
2585
2586 if (sc->irqres != NULL)
2587 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2588
2589 if (sc->memres != NULL)
2590 bus_release_resource(dev, SYS_RES_MEMORY,
2591 sc->memid, sc->memres);
2592
2593 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2594
2595 cv_destroy(&sc->cv_stat);
2596 cv_destroy(&sc->cv_regs);
2597
2598 mtx_destroy(&sc->mtx);
2599
2600 return (0);
2601}
2602
2603/*
2604 * Sysctl handler
2605 */
2606static int
2607fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2608{
2609 struct fatm_softc *sc = arg1;
2610 u_long *ret;
2611 int error;
2612
2613 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2614
2615 FATM_LOCK(sc);
2616 bcopy(&sc->istats, ret, sizeof(sc->istats));
2617 FATM_UNLOCK(sc);
2618
2619 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2620 free(ret, M_TEMP);
2621
2622 return (error);
2623}
2624
2625/*
2626 * Sysctl handler for card statistics
2627 * This is disable because it destroys the PHY statistics.
2628 */
2629static int
2630fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2631{
2632 struct fatm_softc *sc = arg1;
2633 int error;
2634 const struct fatm_stats *s;
2635 u_long *ret;
2636 u_int i;
2637
2638 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2639
2640 FATM_LOCK(sc);
2641
2642 if ((error = fatm_getstat(sc)) == 0) {
2643 s = sc->sadi_mem.mem;
2644 i = 0;
2645 ret[i++] = s->phy_4b5b.crc_header_errors;
2646 ret[i++] = s->phy_4b5b.framing_errors;
2647 ret[i++] = s->phy_oc3.section_bip8_errors;
2648 ret[i++] = s->phy_oc3.path_bip8_errors;
2649 ret[i++] = s->phy_oc3.line_bip24_errors;
2650 ret[i++] = s->phy_oc3.line_febe_errors;
2651 ret[i++] = s->phy_oc3.path_febe_errors;
2652 ret[i++] = s->phy_oc3.corr_hcs_errors;
2653 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2654 ret[i++] = s->atm.cells_transmitted;
2655 ret[i++] = s->atm.cells_received;
2656 ret[i++] = s->atm.vpi_bad_range;
2657 ret[i++] = s->atm.vpi_no_conn;
2658 ret[i++] = s->atm.vci_bad_range;
2659 ret[i++] = s->atm.vci_no_conn;
2660 ret[i++] = s->aal0.cells_transmitted;
2661 ret[i++] = s->aal0.cells_received;
2662 ret[i++] = s->aal0.cells_dropped;
2663 ret[i++] = s->aal4.cells_transmitted;
2664 ret[i++] = s->aal4.cells_received;
2665 ret[i++] = s->aal4.cells_crc_errors;
2666 ret[i++] = s->aal4.cels_protocol_errors;
2667 ret[i++] = s->aal4.cells_dropped;
2668 ret[i++] = s->aal4.cspdus_transmitted;
2669 ret[i++] = s->aal4.cspdus_received;
2670 ret[i++] = s->aal4.cspdus_protocol_errors;
2671 ret[i++] = s->aal4.cspdus_dropped;
2672 ret[i++] = s->aal5.cells_transmitted;
2673 ret[i++] = s->aal5.cells_received;
2674 ret[i++] = s->aal5.congestion_experienced;
2675 ret[i++] = s->aal5.cells_dropped;
2676 ret[i++] = s->aal5.cspdus_transmitted;
2677 ret[i++] = s->aal5.cspdus_received;
2678 ret[i++] = s->aal5.cspdus_crc_errors;
2679 ret[i++] = s->aal5.cspdus_protocol_errors;
2680 ret[i++] = s->aal5.cspdus_dropped;
2681 ret[i++] = s->aux.small_b1_failed;
2682 ret[i++] = s->aux.large_b1_failed;
2683 ret[i++] = s->aux.small_b2_failed;
2684 ret[i++] = s->aux.large_b2_failed;
2685 ret[i++] = s->aux.rpd_alloc_failed;
2686 ret[i++] = s->aux.receive_carrier;
2687 }
2688 /* declare the buffer free */
2689 sc->flags &= ~FATM_STAT_INUSE;
2690 cv_signal(&sc->cv_stat);
2691
2692 FATM_UNLOCK(sc);
2693
2694 if (error == 0)
2695 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2696 free(ret, M_TEMP);
2697
2698 return (error);
2699}
2700
2701#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2702
2703/*
2704 * Attach to the device.
2705 *
2706 * We assume, that there is a global lock (Giant in this case) that protects
2707 * multiple threads from entering this function. This makes sense, doesn't it?
2708 */
2709static int
2710fatm_attach(device_t dev)
2711{
2712 struct ifnet *ifp;
2713 struct fatm_softc *sc;
2714 int unit;
2715 uint16_t cfg;
2716 int error = 0;
2717 struct rbuf *rb;
2718 u_int i;
2719 struct txqueue *tx;
2720
2721 sc = device_get_softc(dev);
2722 unit = device_get_unit(dev);
2723
2724 sc->ifatm.mib.device = ATM_DEVICE_PCA200E;
2725 sc->ifatm.mib.serial = 0;
2726 sc->ifatm.mib.hw_version = 0;
2727 sc->ifatm.mib.sw_version = 0;
2728 sc->ifatm.mib.vpi_bits = 0;
2729 sc->ifatm.mib.vci_bits = FORE_VCIBITS;
2730 sc->ifatm.mib.max_vpcs = 0;
2731 sc->ifatm.mib.max_vccs = FORE_MAX_VCC;
2732 sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
2733 sc->ifatm.phy = &sc->utopia;
2734
2735 LIST_INIT(&sc->rbuf_free);
2736 LIST_INIT(&sc->rbuf_used);
2737
2738 /*
2739 * Initialize mutex and condition variables.
2740 */
2741 mtx_init(&sc->mtx, device_get_nameunit(dev),
2742 MTX_NETWORK_LOCK, MTX_DEF);
2743
2744 cv_init(&sc->cv_stat, "fatm_stat");
2745 cv_init(&sc->cv_regs, "fatm_regs");
2746
2747 sysctl_ctx_init(&sc->sysctl_ctx);
2748
2749 /*
2750 * Make the sysctl tree
2751 */
2752 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2753 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2754 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2755 goto fail;
2756
2757 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2758 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2759 "LU", "internal statistics") == NULL)
2760 goto fail;
2761
2762 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2763 OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2764 "LU", "card statistics") == NULL)
2765 goto fail;
2766
2767 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2768 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2769 "retry flag") == NULL)
2770 goto fail;
2771
2772#ifdef FATM_DEBUG
2773 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2774 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2775 == NULL)
2776 goto fail;
2777 sc->debug = FATM_DEBUG;
2778#endif
2779
2780 /*
2781 * Network subsystem stuff
2782 */
2783 ifp = &sc->ifatm.ifnet;
2784 ifp->if_softc = sc;
2785 ifp->if_unit = unit;
2786 ifp->if_name = "fatm";
2787 ifp->if_flags = IFF_SIMPLEX;
2788 ifp->if_ioctl = fatm_ioctl;
2789 ifp->if_start = fatm_start;
2790 ifp->if_watchdog = fatm_watchdog;
2791 ifp->if_init = fatm_init;
2792 ifp->if_linkmib = &sc->ifatm.mib;
2793 ifp->if_linkmiblen = sizeof(sc->ifatm.mib);
2794
2795 /*
2796 * Enable memory and bustmaster
2797 */
2798 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2799 cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2800 pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2801
2802 /*
2803 * Map memory
2804 */
2805 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2806 if (!(cfg & PCIM_CMD_MEMEN)) {
2807 if_printf(ifp, "failed to enable memory mapping\n");
2808 error = ENXIO;
2809 goto fail;
2810 }
2811 sc->memid = 0x10;
2812 sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
2813 0, ~0, 1, RF_ACTIVE);
2814 if (sc->memres == NULL) {
2815 if_printf(ifp, "could not map memory\n");
2816 error = ENXIO;
2817 goto fail;
2818 }
2819 sc->memh = rman_get_bushandle(sc->memres);
2820 sc->memt = rman_get_bustag(sc->memres);
2821
2822 /*
2823 * Convert endianess of slave access
2824 */
2825 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2826 cfg |= FATM_PCIM_SWAB;
2827 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2828
2829 /*
2830 * Allocate interrupt (activate at the end)
2831 */
2832 sc->irqid = 0;
2833 sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
2834 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
2835 if (sc->irqres == NULL) {
2836 if_printf(ifp, "could not allocate irq\n");
2837 error = ENXIO;
2838 goto fail;
2839 }
2840
2841 /*
2842 * Allocate the parent DMA tag. This is used simply to hold overall
2843 * restrictions for the controller (and PCI bus) and is never used
2844 * to do anything.
2845 */
2846 if (bus_dma_tag_create(NULL, 1, 0,
2847 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2848 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2849 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2850 &sc->parent_dmat)) {
2851 if_printf(ifp, "could not allocate parent DMA tag\n");
2852 error = ENOMEM;
2853 goto fail;
2854 }
2855
2856 /*
2857 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2858 * a mbuf cluster.
2859 */
2860 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2861 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2862 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2863 NULL, NULL, &sc->rbuf_tag)) {
2864 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2865 error = ENOMEM;
2866 goto fail;
2867 }
2868
2869 /*
2870 * Allocate the transmission DMA tag. Must add 1, because
2871 * rounded up PDU will be 65536 bytes long.
2872 */
2873 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2874 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2875 NULL, NULL,
2876 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2877 NULL, NULL, &sc->tx_tag)) {
2878 if_printf(ifp, "could not allocate tx DMA tag\n");
2879 error = ENOMEM;
2880 goto fail;
2881 }
2882
2883 /*
2884 * Allocate DMAable memory.
2885 */
2886 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2887 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2888 sc->stat_mem.align = 4;
2889
2890 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2891 sc->txq_mem.align = 32;
2892
2893 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2894 sc->rxq_mem.align = 32;
2895
2896 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2897 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2898 sc->s1q_mem.align = 32;
2899
2900 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2901 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2902 sc->l1q_mem.align = 32;
2903
2904#ifdef TEST_DMA_SYNC
2905 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2906 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2907 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2908 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2909 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2910 goto fail;
2911#else
2912 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2913 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2914 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2915 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2916 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2917 goto fail;
2918#endif
2919
2920 sc->prom_mem.size = sizeof(struct prom);
2921 sc->prom_mem.align = 32;
2922 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2923 goto fail;
2924
2925 sc->sadi_mem.size = sizeof(struct fatm_stats);
2926 sc->sadi_mem.align = 32;
2927 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2928 goto fail;
2929
2930 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2931 sc->reg_mem.align = 32;
2932 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2933 goto fail;
2934
2935 /*
2936 * Allocate queues
2937 */
2938 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2939 M_DEVBUF, M_ZERO | M_WAITOK);
2940 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2941 M_DEVBUF, M_ZERO | M_WAITOK);
2942 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2943 M_DEVBUF, M_ZERO | M_WAITOK);
2944 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2945 M_DEVBUF, M_ZERO | M_WAITOK);
2946 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2947 M_DEVBUF, M_ZERO | M_WAITOK);
2948
2972 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(struct card_vcc),
2949 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2973 M_DEVBUF, M_ZERO | M_WAITOK);
2950 M_DEVBUF, M_ZERO | M_WAITOK);
2951 sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2952 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2953 if (sc->vcc_zone == NULL) {
2954 error = ENOMEM;
2955 goto fail;
2956 }
2974
2975 /*
2976 * Allocate memory for the receive buffer headers. The total number
2977 * of headers should probably also include the maximum number of
2978 * buffers on the receive queue.
2979 */
2980 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2981 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2982 M_DEVBUF, M_ZERO | M_WAITOK);
2983
2984 /*
2985 * Put all rbuf headers on the free list and create DMA maps.
2986 */
2987 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2988 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
2989 if_printf(&sc->ifatm.ifnet, "creating rx map: %d\n",
2990 error);
2991 goto fail;
2992 }
2993 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2994 }
2995
2996 /*
2997 * Create dma maps for transmission. In case of an error, free the
2998 * allocated DMA maps, because on some architectures maps are NULL
2999 * and we cannot distinguish between a failure and a NULL map in
3000 * the detach routine.
3001 */
3002 for (i = 0; i < FATM_TX_QLEN; i++) {
3003 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3004 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3005 if_printf(&sc->ifatm.ifnet, "creating tx map: %d\n",
3006 error);
3007 while (i > 0) {
3008 tx = GET_QUEUE(sc->txqueue, struct txqueue,
3009 i - 1);
3010 bus_dmamap_destroy(sc->tx_tag, tx->map);
3011 i--;
3012 }
3013 goto fail;
3014 }
3015 }
3016
3017 utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
3018 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3019 &fatm_utopia_methods);
3020 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3021
3022 /*
3023 * Attach the interface
3024 */
3025 atm_ifattach(ifp);
3026 ifp->if_snd.ifq_maxlen = 512;
3027
3028 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3029 fatm_intr, sc, &sc->ih);
3030 if (error) {
3031 if_printf(ifp, "couldn't setup irq\n");
3032 goto fail;
3033 }
3034
3035 fail:
3036 if (error)
3037 fatm_detach(dev);
3038
3039 return (error);
3040}
3041
3042#if defined(FATM_DEBUG) && 0
3043static void
3044dump_s1_queue(struct fatm_softc *sc)
3045{
3046 int i;
3047 struct supqueue *q;
3048
3049 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3050 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3051 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3052 q->q.card,
3053 READ4(sc, q->q.card),
3054 READ4(sc, q->q.card + 4),
3055 *q->q.statp);
3056 }
3057}
3058#endif
3059
3060/*
3061 * Driver infrastructure.
3062 */
3063static device_method_t fatm_methods[] = {
3064 DEVMETHOD(device_probe, fatm_probe),
3065 DEVMETHOD(device_attach, fatm_attach),
3066 DEVMETHOD(device_detach, fatm_detach),
3067 { 0, 0 }
3068};
3069static driver_t fatm_driver = {
3070 "fatm",
3071 fatm_methods,
3072 sizeof(struct fatm_softc),
3073};
3074
3075DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
2957
2958 /*
2959 * Allocate memory for the receive buffer headers. The total number
2960 * of headers should probably also include the maximum number of
2961 * buffers on the receive queue.
2962 */
2963 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2964 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2965 M_DEVBUF, M_ZERO | M_WAITOK);
2966
2967 /*
2968 * Put all rbuf headers on the free list and create DMA maps.
2969 */
2970 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2971 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
2972 if_printf(&sc->ifatm.ifnet, "creating rx map: %d\n",
2973 error);
2974 goto fail;
2975 }
2976 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2977 }
2978
2979 /*
2980 * Create dma maps for transmission. In case of an error, free the
2981 * allocated DMA maps, because on some architectures maps are NULL
2982 * and we cannot distinguish between a failure and a NULL map in
2983 * the detach routine.
2984 */
2985 for (i = 0; i < FATM_TX_QLEN; i++) {
2986 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2987 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
2988 if_printf(&sc->ifatm.ifnet, "creating tx map: %d\n",
2989 error);
2990 while (i > 0) {
2991 tx = GET_QUEUE(sc->txqueue, struct txqueue,
2992 i - 1);
2993 bus_dmamap_destroy(sc->tx_tag, tx->map);
2994 i--;
2995 }
2996 goto fail;
2997 }
2998 }
2999
3000 utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
3001 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3002 &fatm_utopia_methods);
3003 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3004
3005 /*
3006 * Attach the interface
3007 */
3008 atm_ifattach(ifp);
3009 ifp->if_snd.ifq_maxlen = 512;
3010
3011 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3012 fatm_intr, sc, &sc->ih);
3013 if (error) {
3014 if_printf(ifp, "couldn't setup irq\n");
3015 goto fail;
3016 }
3017
3018 fail:
3019 if (error)
3020 fatm_detach(dev);
3021
3022 return (error);
3023}
3024
3025#if defined(FATM_DEBUG) && 0
3026static void
3027dump_s1_queue(struct fatm_softc *sc)
3028{
3029 int i;
3030 struct supqueue *q;
3031
3032 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3033 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3034 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3035 q->q.card,
3036 READ4(sc, q->q.card),
3037 READ4(sc, q->q.card + 4),
3038 *q->q.statp);
3039 }
3040}
3041#endif
3042
3043/*
3044 * Driver infrastructure.
3045 */
3046static device_method_t fatm_methods[] = {
3047 DEVMETHOD(device_probe, fatm_probe),
3048 DEVMETHOD(device_attach, fatm_attach),
3049 DEVMETHOD(device_detach, fatm_detach),
3050 { 0, 0 }
3051};
3052static driver_t fatm_driver = {
3053 "fatm",
3054 fatm_methods,
3055 sizeof(struct fatm_softc),
3056};
3057
3058DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);