Deleted Added
full compact
if_fatm.c (257176) if_fatm.c (271849)
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 257176 2013-10-26 17:58:36Z glebius $");
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 271849 2014-09-19 03:51:26Z glebius $");
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_var.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62#include <net/if_atm.h>
63#include <net/route.h>
64#ifdef ENABLE_BPF
65#include <net/bpf.h>
66#endif
67#ifdef INET
68#include <netinet/in.h>
69#include <netinet/if_atm.h>
70#endif
71
72#include <machine/bus.h>
73#include <machine/resource.h>
74#include <sys/bus.h>
75#include <sys/rman.h>
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <dev/utopia/utopia.h>
80
81#include <dev/fatm/if_fatmreg.h>
82#include <dev/fatm/if_fatmvar.h>
83
84#include <dev/fatm/firmware.h>
85
86devclass_t fatm_devclass;
87
88static const struct {
89 uint16_t vid;
90 uint16_t did;
91 const char *name;
92} fatm_devs[] = {
93 { 0x1127, 0x300,
94 "FORE PCA200E" },
95 { 0, 0, NULL }
96};
97
98static const struct rate {
99 uint32_t ratio;
100 uint32_t cell_rate;
101} rate_table[] = {
102#include <dev/fatm/if_fatm_rate.h>
103};
104#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
105
106SYSCTL_DECL(_hw_atm);
107
108MODULE_DEPEND(fatm, utopia, 1, 1, 1);
109
110static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
111static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
112
113static const struct utopia_methods fatm_utopia_methods = {
114 fatm_utopia_readregs,
115 fatm_utopia_writereg
116};
117
118#define VC_OK(SC, VPI, VCI) \
119 (((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \
120 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
121
122static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
123
124/*
125 * Probing is easy: step trough the list of known vendor and device
126 * ids and compare. If one is found - it's our.
127 */
128static int
129fatm_probe(device_t dev)
130{
131 int i;
132
133 for (i = 0; fatm_devs[i].name; i++)
134 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
135 pci_get_device(dev) == fatm_devs[i].did) {
136 device_set_desc(dev, fatm_devs[i].name);
137 return (BUS_PROBE_DEFAULT);
138 }
139 return (ENXIO);
140}
141
142/*
143 * Function called at completion of a SUNI writeregs/readregs command.
144 * This is called from the interrupt handler while holding the softc lock.
145 * We use the queue entry as the randevouze point.
146 */
147static void
148fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
149{
150
151 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
152 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
153 sc->istats.suni_reg_errors++;
154 q->error = EIO;
155 }
156 wakeup(q);
157}
158
159/*
160 * Write a SUNI register. The bits that are 1 in mask are written from val
161 * into register reg. We wait for the command to complete by sleeping on
162 * the register memory.
163 *
164 * We assume, that we already hold the softc mutex.
165 */
166static int
167fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
168{
169 int error;
170 struct cmdqueue *q;
171 struct fatm_softc *sc;
172
173 sc = ifatm->ifp->if_softc;
174 FATM_CHECKLOCK(sc);
175 if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING))
176 return (EIO);
177
178 /* get queue element and fill it */
179 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
180
181 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
182 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
183 sc->istats.cmd_queue_full++;
184 return (EIO);
185 }
186 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
187
188 q->error = 0;
189 q->cb = fatm_utopia_writeregs_complete;
190 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
191 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
192
193 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
194 BARRIER_W(sc);
195 WRITE4(sc, q->q.card + FATMOC_OP,
196 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
197 BARRIER_W(sc);
198
199 /*
200 * Wait for the command to complete
201 */
202 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
203
204 switch(error) {
205
206 case EWOULDBLOCK:
207 error = EIO;
208 break;
209
210 case ERESTART:
211 error = EINTR;
212 break;
213
214 case 0:
215 error = q->error;
216 break;
217 }
218
219 return (error);
220}
221
222/*
223 * Function called at completion of a SUNI readregs command.
224 * This is called from the interrupt handler while holding the softc lock.
225 * We use reg_mem as the randevouze point.
226 */
227static void
228fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
229{
230
231 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
232 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
233 sc->istats.suni_reg_errors++;
234 q->error = EIO;
235 }
236 wakeup(&sc->reg_mem);
237}
238
239/*
240 * Read SUNI registers
241 *
242 * We use a preallocated buffer to read the registers. Therefor we need
243 * to protect against multiple threads trying to read registers. We do this
244 * with a condition variable and a flag. We wait for the command to complete by sleeping on
245 * the register memory.
246 *
247 * We assume, that we already hold the softc mutex.
248 */
249static int
250fatm_utopia_readregs_internal(struct fatm_softc *sc)
251{
252 int error, i;
253 uint32_t *ptr;
254 struct cmdqueue *q;
255
256 /* get the buffer */
257 for (;;) {
258 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
259 return (EIO);
260 if (!(sc->flags & FATM_REGS_INUSE))
261 break;
262 cv_wait(&sc->cv_regs, &sc->mtx);
263 }
264 sc->flags |= FATM_REGS_INUSE;
265
266 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
267
268 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
269 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
270 sc->istats.cmd_queue_full++;
271 return (EIO);
272 }
273 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
274
275 q->error = 0;
276 q->cb = fatm_utopia_readregs_complete;
277 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
278 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
279
280 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
281
282 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
283 BARRIER_W(sc);
284 WRITE4(sc, q->q.card + FATMOC_OP,
285 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
286 BARRIER_W(sc);
287
288 /*
289 * Wait for the command to complete
290 */
291 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
292 "fatm_getreg", hz);
293
294 switch(error) {
295
296 case EWOULDBLOCK:
297 error = EIO;
298 break;
299
300 case ERESTART:
301 error = EINTR;
302 break;
303
304 case 0:
305 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
306 BUS_DMASYNC_POSTREAD);
307 error = q->error;
308 break;
309 }
310
311 if (error != 0) {
312 /* declare buffer to be free */
313 sc->flags &= ~FATM_REGS_INUSE;
314 cv_signal(&sc->cv_regs);
315 return (error);
316 }
317
318 /* swap if needed */
319 ptr = (uint32_t *)sc->reg_mem.mem;
320 for (i = 0; i < FATM_NREGS; i++)
321 ptr[i] = le32toh(ptr[i]) & 0xff;
322
323 return (0);
324}
325
326/*
327 * Read SUNI registers for the SUNI module.
328 *
329 * We assume, that we already hold the mutex.
330 */
331static int
332fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
333{
334 int err;
335 int i;
336 struct fatm_softc *sc;
337
338 if (reg >= FATM_NREGS)
339 return (EINVAL);
340 if (reg + *np > FATM_NREGS)
341 *np = FATM_NREGS - reg;
342 sc = ifatm->ifp->if_softc;
343 FATM_CHECKLOCK(sc);
344
345 err = fatm_utopia_readregs_internal(sc);
346 if (err != 0)
347 return (err);
348
349 for (i = 0; i < *np; i++)
350 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
351
352 /* declare buffer to be free */
353 sc->flags &= ~FATM_REGS_INUSE;
354 cv_signal(&sc->cv_regs);
355
356 return (0);
357}
358
359/*
360 * Check whether the hard is beating. We remember the last heart beat and
361 * compare it to the current one. If it appears stuck for 10 times, we have
362 * a problem.
363 *
364 * Assume we hold the lock.
365 */
366static void
367fatm_check_heartbeat(struct fatm_softc *sc)
368{
369 uint32_t h;
370
371 FATM_CHECKLOCK(sc);
372
373 h = READ4(sc, FATMO_HEARTBEAT);
374 DBG(sc, BEAT, ("heartbeat %08x", h));
375
376 if (sc->stop_cnt == 10)
377 return;
378
379 if (h == sc->heartbeat) {
380 if (++sc->stop_cnt == 10) {
381 log(LOG_ERR, "i960 stopped???\n");
382 WRITE4(sc, FATMO_HIMR, 1);
383 }
384 return;
385 }
386
387 sc->stop_cnt = 0;
388 sc->heartbeat = h;
389}
390
391/*
392 * Ensure that the heart is still beating.
393 */
394static void
395fatm_watchdog(void *arg)
396{
397 struct fatm_softc *sc;
398
399 sc = arg;
400 FATM_CHECKLOCK(sc);
401 fatm_check_heartbeat(sc);
402 callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc);
403}
404
405/*
406 * Hard reset the i960 on the board. This is done by initializing registers,
407 * clearing interrupts and waiting for the selftest to finish. Not sure,
408 * whether all these barriers are actually needed.
409 *
410 * Assumes that we hold the lock.
411 */
412static int
413fatm_reset(struct fatm_softc *sc)
414{
415 int w;
416 uint32_t val;
417
418 FATM_CHECKLOCK(sc);
419
420 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
421 BARRIER_W(sc);
422
423 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
424 BARRIER_W(sc);
425
426 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
427 BARRIER_W(sc);
428
429 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
430 BARRIER_W(sc);
431
432 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
433 BARRIER_W(sc);
434
435 DELAY(1000);
436
437 WRITE1(sc, FATMO_HCR, 0);
438 BARRIER_RW(sc);
439
440 DELAY(1000);
441
442 for (w = 100; w; w--) {
443 BARRIER_R(sc);
444 val = READ4(sc, FATMO_BOOT_STATUS);
445 switch (val) {
446 case SELF_TEST_OK:
447 return (0);
448 case SELF_TEST_FAIL:
449 return (EIO);
450 }
451 DELAY(1000);
452 }
453 return (EIO);
454}
455
456/*
457 * Stop the card. Must be called WITH the lock held
458 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
459 */
460static void
461fatm_stop(struct fatm_softc *sc)
462{
463 int i;
464 struct cmdqueue *q;
465 struct rbuf *rb;
466 struct txqueue *tx;
467 uint32_t stat;
468
469 FATM_CHECKLOCK(sc);
470
471 /* Stop the board */
472 utopia_stop(&sc->utopia);
473 (void)fatm_reset(sc);
474
475 /* stop watchdog */
476 callout_stop(&sc->watchdog_timer);
477
478 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
479 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
480 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
481 sc->utopia.carrier == UTP_CARR_OK);
482
483 /*
484 * Collect transmit mbufs, partial receive mbufs and
485 * supplied mbufs
486 */
487 for (i = 0; i < FATM_TX_QLEN; i++) {
488 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
489 if (tx->m) {
490 bus_dmamap_unload(sc->tx_tag, tx->map);
491 m_freem(tx->m);
492 tx->m = NULL;
493 }
494 }
495
496 /* Collect supplied mbufs */
497 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
498 LIST_REMOVE(rb, link);
499 bus_dmamap_unload(sc->rbuf_tag, rb->map);
500 m_free(rb->m);
501 rb->m = NULL;
502 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
503 }
504
505 /* Unwait any waiters */
506 wakeup(&sc->sadi_mem);
507
508 /* wakeup all threads waiting for STAT or REG buffers */
509 cv_broadcast(&sc->cv_stat);
510 cv_broadcast(&sc->cv_regs);
511
512 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
513
514 /* wakeup all threads waiting on commands */
515 for (i = 0; i < FATM_CMD_QLEN; i++) {
516 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
517
518 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
519 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
520 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
521 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
522 wakeup(q);
523 }
524 }
525 utopia_reset_media(&sc->utopia);
526 }
527 sc->small_cnt = sc->large_cnt = 0;
528
529 /* Reset vcc info */
530 if (sc->vccs != NULL) {
531 sc->open_vccs = 0;
532 for (i = 0; i < FORE_MAX_VCC + 1; i++) {
533 if (sc->vccs[i] != NULL) {
534 if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
535 FATM_VCC_TRY_OPEN)) == 0) {
536 uma_zfree(sc->vcc_zone, sc->vccs[i]);
537 sc->vccs[i] = NULL;
538 } else {
539 sc->vccs[i]->vflags = 0;
540 sc->open_vccs++;
541 }
542 }
543 }
544 }
545
546}
547
548/*
549 * Load the firmware into the board and save the entry point.
550 */
551static uint32_t
552firmware_load(struct fatm_softc *sc)
553{
554 struct firmware *fw = (struct firmware *)firmware;
555
556 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
557 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
558 sizeof(firmware) / sizeof(firmware[0]));
559 BARRIER_RW(sc);
560
561 return (fw->entry);
562}
563
564/*
565 * Read a character from the virtual UART. The availability of a character
566 * is signaled by a non-null value of the 32 bit register. The eating of
567 * the character by us is signalled to the card by setting that register
568 * to zero.
569 */
570static int
571rx_getc(struct fatm_softc *sc)
572{
573 int w = 50;
574 int c;
575
576 while (w--) {
577 c = READ4(sc, FATMO_UART_TO_HOST);
578 BARRIER_RW(sc);
579 if (c != 0) {
580 WRITE4(sc, FATMO_UART_TO_HOST, 0);
581 DBGC(sc, UART, ("%c", c & 0xff));
582 return (c & 0xff);
583 }
584 DELAY(1000);
585 }
586 return (-1);
587}
588
589/*
590 * Eat up characters from the board and stuff them in the bit-bucket.
591 */
592static void
593rx_flush(struct fatm_softc *sc)
594{
595 int w = 10000;
596
597 while (w-- && rx_getc(sc) >= 0)
598 ;
599}
600
601/*
602 * Write a character to the card. The UART is available if the register
603 * is zero.
604 */
605static int
606tx_putc(struct fatm_softc *sc, u_char c)
607{
608 int w = 10;
609 int c1;
610
611 while (w--) {
612 c1 = READ4(sc, FATMO_UART_TO_960);
613 BARRIER_RW(sc);
614 if (c1 == 0) {
615 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
616 DBGC(sc, UART, ("%c", c & 0xff));
617 return (0);
618 }
619 DELAY(1000);
620 }
621 return (-1);
622}
623
624/*
625 * Start the firmware. This is doing by issuing a 'go' command with
626 * the hex entry address of the firmware. Then we wait for the self-test to
627 * succeed.
628 */
629static int
630fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
631{
632 static char hex[] = "0123456789abcdef";
633 u_int w, val;
634
635 DBG(sc, INIT, ("starting"));
636 rx_flush(sc);
637 tx_putc(sc, '\r');
638 DELAY(1000);
639
640 rx_flush(sc);
641
642 tx_putc(sc, 'g');
643 (void)rx_getc(sc);
644 tx_putc(sc, 'o');
645 (void)rx_getc(sc);
646 tx_putc(sc, ' ');
647 (void)rx_getc(sc);
648
649 tx_putc(sc, hex[(start >> 12) & 0xf]);
650 (void)rx_getc(sc);
651 tx_putc(sc, hex[(start >> 8) & 0xf]);
652 (void)rx_getc(sc);
653 tx_putc(sc, hex[(start >> 4) & 0xf]);
654 (void)rx_getc(sc);
655 tx_putc(sc, hex[(start >> 0) & 0xf]);
656 (void)rx_getc(sc);
657
658 tx_putc(sc, '\r');
659 rx_flush(sc);
660
661 for (w = 100; w; w--) {
662 BARRIER_R(sc);
663 val = READ4(sc, FATMO_BOOT_STATUS);
664 switch (val) {
665 case CP_RUNNING:
666 return (0);
667 case SELF_TEST_FAIL:
668 return (EIO);
669 }
670 DELAY(1000);
671 }
672 return (EIO);
673}
674
675/*
676 * Initialize one card and host queue.
677 */
678static void
679init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
680 size_t qel_size, size_t desc_size, cardoff_t off,
681 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
682{
683 struct fqelem *el = queue->chunk;
684
685 while (qlen--) {
686 el->card = off;
687 off += 8; /* size of card entry */
688
689 el->statp = (uint32_t *)(*statpp);
690 (*statpp) += sizeof(uint32_t);
691 H_SETSTAT(el->statp, FATM_STAT_FREE);
692 H_SYNCSTAT_PREWRITE(sc, el->statp);
693
694 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
695 (*cardstat) += sizeof(uint32_t);
696
697 el->ioblk = descp;
698 descp += desc_size;
699 el->card_ioblk = carddesc;
700 carddesc += desc_size;
701
702 el = (struct fqelem *)((u_char *)el + qel_size);
703 }
704 queue->tail = queue->head = 0;
705}
706
707/*
708 * Issue the initialize operation to the card, wait for completion and
709 * initialize the on-board and host queue structures with offsets and
710 * addresses.
711 */
712static int
713fatm_init_cmd(struct fatm_softc *sc)
714{
715 int w, c;
716 u_char *statp;
717 uint32_t card_stat;
718 u_int cnt;
719 struct fqelem *el;
720 cardoff_t off;
721
722 DBG(sc, INIT, ("command"));
723 WRITE4(sc, FATMO_ISTAT, 0);
724 WRITE4(sc, FATMO_IMASK, 1);
725 WRITE4(sc, FATMO_HLOGGER, 0);
726
727 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
728 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
729 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
730 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
731 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
732 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
733 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
734
735 /*
736 * initialize buffer descriptors
737 */
738 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
739 SMALL_SUPPLY_QLEN);
740 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
741 SMALL_BUFFER_LEN);
742 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
743 SMALL_POOL_SIZE);
744 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
745 SMALL_SUPPLY_BLKSIZE);
746
747 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
748 LARGE_SUPPLY_QLEN);
749 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
750 LARGE_BUFFER_LEN);
751 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
752 LARGE_POOL_SIZE);
753 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
754 LARGE_SUPPLY_BLKSIZE);
755
756 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
757 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
758 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
759 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
760
761 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
762 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
763 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
764 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
765
766 /*
767 * Start the command
768 */
769 BARRIER_W(sc);
770 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
771 BARRIER_W(sc);
772 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
773 BARRIER_W(sc);
774
775 /*
776 * Busy wait for completion
777 */
778 w = 100;
779 while (w--) {
780 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
781 BARRIER_R(sc);
782 if (c & FATM_STAT_COMPLETE)
783 break;
784 DELAY(1000);
785 }
786
787 if (c & FATM_STAT_ERROR)
788 return (EIO);
789
790 /*
791 * Initialize the queues
792 */
793 statp = sc->stat_mem.mem;
794 card_stat = sc->stat_mem.paddr;
795
796 /*
797 * Command queue. This is special in that it's on the card.
798 */
799 el = sc->cmdqueue.chunk;
800 off = READ4(sc, FATMO_COMMAND_QUEUE);
801 DBG(sc, INIT, ("cmd queue=%x", off));
802 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
803 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
804
805 el->card = off;
806 off += 32; /* size of card structure */
807
808 el->statp = (uint32_t *)statp;
809 statp += sizeof(uint32_t);
810 H_SETSTAT(el->statp, FATM_STAT_FREE);
811 H_SYNCSTAT_PREWRITE(sc, el->statp);
812
813 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
814 card_stat += sizeof(uint32_t);
815 }
816 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
817
818 /*
819 * Now the other queues. These are in memory
820 */
821 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
822 sizeof(struct txqueue), TPD_SIZE,
823 READ4(sc, FATMO_TRANSMIT_QUEUE),
824 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
825
826 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
827 sizeof(struct rxqueue), RPD_SIZE,
828 READ4(sc, FATMO_RECEIVE_QUEUE),
829 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
830
831 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
832 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
833 READ4(sc, FATMO_SMALL_B1_QUEUE),
834 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
835
836 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
837 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
838 READ4(sc, FATMO_LARGE_B1_QUEUE),
839 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
840
841 sc->txcnt = 0;
842
843 return (0);
844}
845
846/*
847 * Read PROM. Called only from attach code. Here we spin because the interrupt
848 * handler is not yet set up.
849 */
850static int
851fatm_getprom(struct fatm_softc *sc)
852{
853 int i;
854 struct prom *prom;
855 struct cmdqueue *q;
856
857 DBG(sc, INIT, ("reading prom"));
858 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
859 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
860
861 q->error = 0;
862 q->cb = NULL;
863 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
864 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
865
866 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
867 BUS_DMASYNC_PREREAD);
868
869 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
870 BARRIER_W(sc);
871 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
872 BARRIER_W(sc);
873
874 for (i = 0; i < 1000; i++) {
875 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
876 if (H_GETSTAT(q->q.statp) &
877 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
878 break;
879 DELAY(1000);
880 }
881 if (i == 1000) {
882 if_printf(sc->ifp, "getprom timeout\n");
883 return (EIO);
884 }
885 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
886 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
887 if_printf(sc->ifp, "getprom error\n");
888 return (EIO);
889 }
890 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
891 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
892 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
893
894 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
895 BUS_DMASYNC_POSTREAD);
896
897
898#ifdef notdef
899 {
900 u_int i;
901
902 printf("PROM: ");
903 u_char *ptr = (u_char *)sc->prom_mem.mem;
904 for (i = 0; i < sizeof(struct prom); i++)
905 printf("%02x ", *ptr++);
906 printf("\n");
907 }
908#endif
909
910 prom = (struct prom *)sc->prom_mem.mem;
911
912 bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
913 IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
914 IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
915 IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
916
917 if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
918 "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
919 IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
920 IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
921 IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
922
923 return (0);
924}
925
926/*
927 * This is the callback function for bus_dmamap_load. We assume, that we
928 * have a 32-bit bus and so have always one segment.
929 */
930static void
931dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
932{
933 bus_addr_t *ptr = (bus_addr_t *)arg;
934
935 if (error != 0) {
936 printf("%s: error=%d\n", __func__, error);
937 return;
938 }
939 KASSERT(nsegs == 1, ("too many DMA segments"));
940 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
941 (u_long)segs[0].ds_addr));
942
943 *ptr = segs[0].ds_addr;
944}
945
946/*
947 * Allocate a chunk of DMA-able memory and map it.
948 */
949static int
950alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
951{
952 int error;
953
954 mem->mem = NULL;
955
956 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
957 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
958 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
959 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
960 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
961 nm);
962 return (ENOMEM);
963 }
964
965 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
966 if (error) {
967 if_printf(sc->ifp, "could not allocate %s DMA memory: "
968 "%d\n", nm, error);
969 bus_dma_tag_destroy(mem->dmat);
970 mem->mem = NULL;
971 return (error);
972 }
973
974 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
975 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
976 if (error) {
977 if_printf(sc->ifp, "could not load %s DMA memory: "
978 "%d\n", nm, error);
979 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
980 bus_dma_tag_destroy(mem->dmat);
981 mem->mem = NULL;
982 return (error);
983 }
984
985 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
986 (u_long)mem->paddr, mem->size, mem->align));
987
988 return (0);
989}
990
991#ifdef TEST_DMA_SYNC
992static int
993alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
994{
995 int error;
996
997 mem->mem = NULL;
998
999 if (bus_dma_tag_create(NULL, mem->align, 0,
1000 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1001 NULL, NULL, mem->size, 1, mem->size,
1002 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1003 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1004 nm);
1005 return (ENOMEM);
1006 }
1007
1008 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1009 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1010
1011 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1012 if (error) {
1013 if_printf(sc->ifp, "could not allocate %s DMA map: "
1014 "%d\n", nm, error);
1015 contigfree(mem->mem, mem->size, M_DEVBUF);
1016 bus_dma_tag_destroy(mem->dmat);
1017 mem->mem = NULL;
1018 return (error);
1019 }
1020
1021 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1022 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1023 if (error) {
1024 if_printf(sc->ifp, "could not load %s DMA memory: "
1025 "%d\n", nm, error);
1026 bus_dmamap_destroy(mem->dmat, mem->map);
1027 contigfree(mem->mem, mem->size, M_DEVBUF);
1028 bus_dma_tag_destroy(mem->dmat);
1029 mem->mem = NULL;
1030 return (error);
1031 }
1032
1033 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1034 (u_long)mem->paddr, mem->size, mem->align));
1035
1036 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1037 (u_long)mem->paddr, mem->size, mem->align);
1038
1039 return (0);
1040}
1041#endif /* TEST_DMA_SYNC */
1042
1043/*
1044 * Destroy all resources of an dma-able memory chunk
1045 */
1046static void
1047destroy_dma_memory(struct fatm_mem *mem)
1048{
1049 if (mem->mem != NULL) {
1050 bus_dmamap_unload(mem->dmat, mem->map);
1051 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1052 bus_dma_tag_destroy(mem->dmat);
1053 mem->mem = NULL;
1054 }
1055}
1056#ifdef TEST_DMA_SYNC
1057static void
1058destroy_dma_memoryX(struct fatm_mem *mem)
1059{
1060 if (mem->mem != NULL) {
1061 bus_dmamap_unload(mem->dmat, mem->map);
1062 bus_dmamap_destroy(mem->dmat, mem->map);
1063 contigfree(mem->mem, mem->size, M_DEVBUF);
1064 bus_dma_tag_destroy(mem->dmat);
1065 mem->mem = NULL;
1066 }
1067}
1068#endif /* TEST_DMA_SYNC */
1069
1070/*
1071 * Try to supply buffers to the card if there are free entries in the queues
1072 */
1073static void
1074fatm_supply_small_buffers(struct fatm_softc *sc)
1075{
1076 int nblocks, nbufs;
1077 struct supqueue *q;
1078 struct rbd *bd;
1079 int i, j, error, cnt;
1080 struct mbuf *m;
1081 struct rbuf *rb;
1082 bus_addr_t phys;
1083
1084 nbufs = max(4 * sc->open_vccs, 32);
1085 nbufs = min(nbufs, SMALL_POOL_SIZE);
1086 nbufs -= sc->small_cnt;
1087
1088 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1089 for (cnt = 0; cnt < nblocks; cnt++) {
1090 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1091
1092 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1093 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1094 break;
1095
1096 bd = (struct rbd *)q->q.ioblk;
1097
1098 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1099 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1100 if_printf(sc->ifp, "out of rbufs\n");
1101 break;
1102 }
1103 MGETHDR(m, M_NOWAIT, MT_DATA);
1104 if (m == NULL) {
1105 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1106 break;
1107 }
1108 MH_ALIGN(m, SMALL_BUFFER_LEN);
1109 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1110 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1111 &phys, BUS_DMA_NOWAIT);
1112 if (error) {
1113 if_printf(sc->ifp,
1114 "dmamap_load mbuf failed %d", error);
1115 m_freem(m);
1116 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1117 break;
1118 }
1119 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1120 BUS_DMASYNC_PREREAD);
1121
1122 LIST_REMOVE(rb, link);
1123 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1124
1125 rb->m = m;
1126 bd[i].handle = rb - sc->rbufs;
1127 H_SETDESC(bd[i].buffer, phys);
1128 }
1129
1130 if (i < SMALL_SUPPLY_BLKSIZE) {
1131 for (j = 0; j < i; j++) {
1132 rb = sc->rbufs + bd[j].handle;
1133 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1134 m_free(rb->m);
1135 rb->m = NULL;
1136
1137 LIST_REMOVE(rb, link);
1138 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1139 }
1140 break;
1141 }
1142 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1143 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1144
1145 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1146 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1147
1148 WRITE4(sc, q->q.card, q->q.card_ioblk);
1149 BARRIER_W(sc);
1150
1151 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1152
1153 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1154 }
1155}
1156
1157/*
1158 * Try to supply buffers to the card if there are free entries in the queues
1159 * We assume that all buffers are within the address space accessible by the
1160 * card (32-bit), so we don't need bounce buffers.
1161 */
1162static void
1163fatm_supply_large_buffers(struct fatm_softc *sc)
1164{
1165 int nbufs, nblocks, cnt;
1166 struct supqueue *q;
1167 struct rbd *bd;
1168 int i, j, error;
1169 struct mbuf *m;
1170 struct rbuf *rb;
1171 bus_addr_t phys;
1172
1173 nbufs = max(4 * sc->open_vccs, 32);
1174 nbufs = min(nbufs, LARGE_POOL_SIZE);
1175 nbufs -= sc->large_cnt;
1176
1177 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1178
1179 for (cnt = 0; cnt < nblocks; cnt++) {
1180 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1181
1182 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1183 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1184 break;
1185
1186 bd = (struct rbd *)q->q.ioblk;
1187
1188 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1189 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1190 if_printf(sc->ifp, "out of rbufs\n");
1191 break;
1192 }
1193 if ((m = m_getcl(M_NOWAIT, MT_DATA,
1194 M_PKTHDR)) == NULL) {
1195 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1196 break;
1197 }
1198 /* No MEXT_ALIGN */
1199 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1200 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1201 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1202 &phys, BUS_DMA_NOWAIT);
1203 if (error) {
1204 if_printf(sc->ifp,
1205 "dmamap_load mbuf failed %d", error);
1206 m_freem(m);
1207 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1208 break;
1209 }
1210
1211 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1212 BUS_DMASYNC_PREREAD);
1213
1214 LIST_REMOVE(rb, link);
1215 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1216
1217 rb->m = m;
1218 bd[i].handle = rb - sc->rbufs;
1219 H_SETDESC(bd[i].buffer, phys);
1220 }
1221
1222 if (i < LARGE_SUPPLY_BLKSIZE) {
1223 for (j = 0; j < i; j++) {
1224 rb = sc->rbufs + bd[j].handle;
1225 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1226 m_free(rb->m);
1227 rb->m = NULL;
1228
1229 LIST_REMOVE(rb, link);
1230 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1231 }
1232 break;
1233 }
1234 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1235 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1236
1237 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1238 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1239 WRITE4(sc, q->q.card, q->q.card_ioblk);
1240 BARRIER_W(sc);
1241
1242 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1243
1244 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1245 }
1246}
1247
1248
1249/*
1250 * Actually start the card. The lock must be held here.
1251 * Reset, load the firmware, start it, initializes queues, read the PROM
1252 * and supply receive buffers to the card.
1253 */
1254static void
1255fatm_init_locked(struct fatm_softc *sc)
1256{
1257 struct rxqueue *q;
1258 int i, c, error;
1259 uint32_t start;
1260
1261 DBG(sc, INIT, ("initialize"));
1262 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1263 fatm_stop(sc);
1264
1265 /*
1266 * Hard reset the board
1267 */
1268 if (fatm_reset(sc))
1269 return;
1270
1271 start = firmware_load(sc);
1272 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1273 fatm_getprom(sc)) {
1274 fatm_reset(sc);
1275 return;
1276 }
1277
1278 /*
1279 * Handle media
1280 */
1281 c = READ4(sc, FATMO_MEDIA_TYPE);
1282 switch (c) {
1283
1284 case FORE_MT_TAXI_100:
1285 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1286 IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1287 break;
1288
1289 case FORE_MT_TAXI_140:
1290 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1291 IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1292 break;
1293
1294 case FORE_MT_UTP_SONET:
1295 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1296 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1297 break;
1298
1299 case FORE_MT_MM_OC3_ST:
1300 case FORE_MT_MM_OC3_SC:
1301 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1302 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1303 break;
1304
1305 case FORE_MT_SM_OC3_ST:
1306 case FORE_MT_SM_OC3_SC:
1307 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1308 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1309 break;
1310
1311 default:
1312 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1313 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1314 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1315 break;
1316 }
1317 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1318 utopia_init_media(&sc->utopia);
1319
1320 /*
1321 * Initialize the RBDs
1322 */
1323 for (i = 0; i < FATM_RX_QLEN; i++) {
1324 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1325 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1326 }
1327 BARRIER_W(sc);
1328
1329 /*
1330 * Supply buffers to the card
1331 */
1332 fatm_supply_small_buffers(sc);
1333 fatm_supply_large_buffers(sc);
1334
1335 /*
1336 * Now set flags, that we are ready
1337 */
1338 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1339
1340 /*
1341 * Start the watchdog timer
1342 */
1343 callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc);
1344
1345 /* start SUNI */
1346 utopia_start(&sc->utopia);
1347
1348 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1349 sc->utopia.carrier == UTP_CARR_OK);
1350
1351 /* start all channels */
1352 for (i = 0; i < FORE_MAX_VCC + 1; i++)
1353 if (sc->vccs[i] != NULL) {
1354 sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1355 error = fatm_load_vc(sc, sc->vccs[i]);
1356 if (error != 0) {
1357 if_printf(sc->ifp, "reopening %u "
1358 "failed: %d\n", i, error);
1359 sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1360 }
1361 }
1362
1363 DBG(sc, INIT, ("done"));
1364}
1365
1366/*
1367 * This is the exported as initialisation function.
1368 */
1369static void
1370fatm_init(void *p)
1371{
1372 struct fatm_softc *sc = p;
1373
1374 FATM_LOCK(sc);
1375 fatm_init_locked(sc);
1376 FATM_UNLOCK(sc);
1377}
1378
1379/************************************************************/
1380/*
1381 * The INTERRUPT handling
1382 */
1383/*
1384 * Check the command queue. If a command was completed, call the completion
1385 * function for that command.
1386 */
1387static void
1388fatm_intr_drain_cmd(struct fatm_softc *sc)
1389{
1390 struct cmdqueue *q;
1391 int stat;
1392
1393 /*
1394 * Drain command queue
1395 */
1396 for (;;) {
1397 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1398
1399 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1400 stat = H_GETSTAT(q->q.statp);
1401
1402 if (stat != FATM_STAT_COMPLETE &&
1403 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1404 stat != FATM_STAT_ERROR)
1405 break;
1406
1407 (*q->cb)(sc, q);
1408
1409 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1410 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1411
1412 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1413 }
1414}
1415
1416/*
1417 * Drain the small buffer supply queue.
1418 */
1419static void
1420fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1421{
1422 struct supqueue *q;
1423 int stat;
1424
1425 for (;;) {
1426 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1427
1428 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1429 stat = H_GETSTAT(q->q.statp);
1430
1431 if ((stat & FATM_STAT_COMPLETE) == 0)
1432 break;
1433 if (stat & FATM_STAT_ERROR)
1434 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1435
1436 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1437 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1438
1439 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1440 }
1441}
1442
1443/*
1444 * Drain the large buffer supply queue.
1445 */
1446static void
1447fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1448{
1449 struct supqueue *q;
1450 int stat;
1451
1452 for (;;) {
1453 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1454
1455 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1456 stat = H_GETSTAT(q->q.statp);
1457
1458 if ((stat & FATM_STAT_COMPLETE) == 0)
1459 break;
1460 if (stat & FATM_STAT_ERROR)
1461 log(LOG_ERR, "%s status %x\n", __func__, stat);
1462
1463 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1464 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1465
1466 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1467 }
1468}
1469
1470/*
1471 * Check the receive queue. Send any received PDU up the protocol stack
1472 * (except when there was an error or the VCI appears to be closed. In this
1473 * case discard the PDU).
1474 */
1475static void
1476fatm_intr_drain_rx(struct fatm_softc *sc)
1477{
1478 struct rxqueue *q;
1479 int stat, mlen;
1480 u_int i;
1481 uint32_t h;
1482 struct mbuf *last, *m0;
1483 struct rpd *rpd;
1484 struct rbuf *rb;
1485 u_int vci, vpi, pt;
1486 struct atm_pseudohdr aph;
1487 struct ifnet *ifp;
1488 struct card_vcc *vc;
1489
1490 for (;;) {
1491 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1492
1493 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1494 stat = H_GETSTAT(q->q.statp);
1495
1496 if ((stat & FATM_STAT_COMPLETE) == 0)
1497 break;
1498
1499 rpd = (struct rpd *)q->q.ioblk;
1500 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1501
1502 rpd->nseg = le32toh(rpd->nseg);
1503 mlen = 0;
1504 m0 = last = 0;
1505 for (i = 0; i < rpd->nseg; i++) {
1506 rb = sc->rbufs + rpd->segment[i].handle;
1507 if (m0 == NULL) {
1508 m0 = last = rb->m;
1509 } else {
1510 last->m_next = rb->m;
1511 last = rb->m;
1512 }
1513 last->m_next = NULL;
1514 if (last->m_flags & M_EXT)
1515 sc->large_cnt--;
1516 else
1517 sc->small_cnt--;
1518 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1519 BUS_DMASYNC_POSTREAD);
1520 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1521 rb->m = NULL;
1522
1523 LIST_REMOVE(rb, link);
1524 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1525
1526 last->m_len = le32toh(rpd->segment[i].length);
1527 mlen += last->m_len;
1528 }
1529
1530 m0->m_pkthdr.len = mlen;
1531 m0->m_pkthdr.rcvif = sc->ifp;
1532
1533 h = le32toh(rpd->atm_header);
1534 vpi = (h >> 20) & 0xff;
1535 vci = (h >> 4 ) & 0xffff;
1536 pt = (h >> 1 ) & 0x7;
1537
1538 /*
1539 * Locate the VCC this packet belongs to
1540 */
1541 if (!VC_OK(sc, vpi, vci))
1542 vc = NULL;
1543 else if ((vc = sc->vccs[vci]) == NULL ||
1544 !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1545 sc->istats.rx_closed++;
1546 vc = NULL;
1547 }
1548
1549 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1550 pt, mlen, vc == NULL ? "dropped" : ""));
1551
1552 if (vc == NULL) {
1553 m_freem(m0);
1554 } else {
1555#ifdef ENABLE_BPF
1556 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1557 vc->param.aal == ATMIO_AAL_5 &&
1558 (vc->param.flags & ATM_PH_LLCSNAP))
1559 BPF_MTAP(sc->ifp, m0);
1560#endif
1561
1562 ATM_PH_FLAGS(&aph) = vc->param.flags;
1563 ATM_PH_VPI(&aph) = vpi;
1564 ATM_PH_SETVCI(&aph, vci);
1565
1566 ifp = sc->ifp;
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_var.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62#include <net/if_atm.h>
63#include <net/route.h>
64#ifdef ENABLE_BPF
65#include <net/bpf.h>
66#endif
67#ifdef INET
68#include <netinet/in.h>
69#include <netinet/if_atm.h>
70#endif
71
72#include <machine/bus.h>
73#include <machine/resource.h>
74#include <sys/bus.h>
75#include <sys/rman.h>
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <dev/utopia/utopia.h>
80
81#include <dev/fatm/if_fatmreg.h>
82#include <dev/fatm/if_fatmvar.h>
83
84#include <dev/fatm/firmware.h>
85
86devclass_t fatm_devclass;
87
88static const struct {
89 uint16_t vid;
90 uint16_t did;
91 const char *name;
92} fatm_devs[] = {
93 { 0x1127, 0x300,
94 "FORE PCA200E" },
95 { 0, 0, NULL }
96};
97
98static const struct rate {
99 uint32_t ratio;
100 uint32_t cell_rate;
101} rate_table[] = {
102#include <dev/fatm/if_fatm_rate.h>
103};
104#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
105
106SYSCTL_DECL(_hw_atm);
107
108MODULE_DEPEND(fatm, utopia, 1, 1, 1);
109
110static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
111static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
112
113static const struct utopia_methods fatm_utopia_methods = {
114 fatm_utopia_readregs,
115 fatm_utopia_writereg
116};
117
118#define VC_OK(SC, VPI, VCI) \
119 (((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \
120 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
121
122static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
123
124/*
125 * Probing is easy: step trough the list of known vendor and device
126 * ids and compare. If one is found - it's our.
127 */
128static int
129fatm_probe(device_t dev)
130{
131 int i;
132
133 for (i = 0; fatm_devs[i].name; i++)
134 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
135 pci_get_device(dev) == fatm_devs[i].did) {
136 device_set_desc(dev, fatm_devs[i].name);
137 return (BUS_PROBE_DEFAULT);
138 }
139 return (ENXIO);
140}
141
142/*
143 * Function called at completion of a SUNI writeregs/readregs command.
144 * This is called from the interrupt handler while holding the softc lock.
145 * We use the queue entry as the randevouze point.
146 */
147static void
148fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
149{
150
151 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
152 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
153 sc->istats.suni_reg_errors++;
154 q->error = EIO;
155 }
156 wakeup(q);
157}
158
159/*
160 * Write a SUNI register. The bits that are 1 in mask are written from val
161 * into register reg. We wait for the command to complete by sleeping on
162 * the register memory.
163 *
164 * We assume, that we already hold the softc mutex.
165 */
166static int
167fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
168{
169 int error;
170 struct cmdqueue *q;
171 struct fatm_softc *sc;
172
173 sc = ifatm->ifp->if_softc;
174 FATM_CHECKLOCK(sc);
175 if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING))
176 return (EIO);
177
178 /* get queue element and fill it */
179 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
180
181 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
182 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
183 sc->istats.cmd_queue_full++;
184 return (EIO);
185 }
186 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
187
188 q->error = 0;
189 q->cb = fatm_utopia_writeregs_complete;
190 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
191 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
192
193 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
194 BARRIER_W(sc);
195 WRITE4(sc, q->q.card + FATMOC_OP,
196 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
197 BARRIER_W(sc);
198
199 /*
200 * Wait for the command to complete
201 */
202 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
203
204 switch(error) {
205
206 case EWOULDBLOCK:
207 error = EIO;
208 break;
209
210 case ERESTART:
211 error = EINTR;
212 break;
213
214 case 0:
215 error = q->error;
216 break;
217 }
218
219 return (error);
220}
221
222/*
223 * Function called at completion of a SUNI readregs command.
224 * This is called from the interrupt handler while holding the softc lock.
225 * We use reg_mem as the randevouze point.
226 */
227static void
228fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
229{
230
231 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
232 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
233 sc->istats.suni_reg_errors++;
234 q->error = EIO;
235 }
236 wakeup(&sc->reg_mem);
237}
238
239/*
240 * Read SUNI registers
241 *
242 * We use a preallocated buffer to read the registers. Therefor we need
243 * to protect against multiple threads trying to read registers. We do this
244 * with a condition variable and a flag. We wait for the command to complete by sleeping on
245 * the register memory.
246 *
247 * We assume, that we already hold the softc mutex.
248 */
249static int
250fatm_utopia_readregs_internal(struct fatm_softc *sc)
251{
252 int error, i;
253 uint32_t *ptr;
254 struct cmdqueue *q;
255
256 /* get the buffer */
257 for (;;) {
258 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
259 return (EIO);
260 if (!(sc->flags & FATM_REGS_INUSE))
261 break;
262 cv_wait(&sc->cv_regs, &sc->mtx);
263 }
264 sc->flags |= FATM_REGS_INUSE;
265
266 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
267
268 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
269 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
270 sc->istats.cmd_queue_full++;
271 return (EIO);
272 }
273 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
274
275 q->error = 0;
276 q->cb = fatm_utopia_readregs_complete;
277 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
278 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
279
280 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
281
282 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
283 BARRIER_W(sc);
284 WRITE4(sc, q->q.card + FATMOC_OP,
285 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
286 BARRIER_W(sc);
287
288 /*
289 * Wait for the command to complete
290 */
291 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
292 "fatm_getreg", hz);
293
294 switch(error) {
295
296 case EWOULDBLOCK:
297 error = EIO;
298 break;
299
300 case ERESTART:
301 error = EINTR;
302 break;
303
304 case 0:
305 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
306 BUS_DMASYNC_POSTREAD);
307 error = q->error;
308 break;
309 }
310
311 if (error != 0) {
312 /* declare buffer to be free */
313 sc->flags &= ~FATM_REGS_INUSE;
314 cv_signal(&sc->cv_regs);
315 return (error);
316 }
317
318 /* swap if needed */
319 ptr = (uint32_t *)sc->reg_mem.mem;
320 for (i = 0; i < FATM_NREGS; i++)
321 ptr[i] = le32toh(ptr[i]) & 0xff;
322
323 return (0);
324}
325
326/*
327 * Read SUNI registers for the SUNI module.
328 *
329 * We assume, that we already hold the mutex.
330 */
331static int
332fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
333{
334 int err;
335 int i;
336 struct fatm_softc *sc;
337
338 if (reg >= FATM_NREGS)
339 return (EINVAL);
340 if (reg + *np > FATM_NREGS)
341 *np = FATM_NREGS - reg;
342 sc = ifatm->ifp->if_softc;
343 FATM_CHECKLOCK(sc);
344
345 err = fatm_utopia_readregs_internal(sc);
346 if (err != 0)
347 return (err);
348
349 for (i = 0; i < *np; i++)
350 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
351
352 /* declare buffer to be free */
353 sc->flags &= ~FATM_REGS_INUSE;
354 cv_signal(&sc->cv_regs);
355
356 return (0);
357}
358
359/*
360 * Check whether the hard is beating. We remember the last heart beat and
361 * compare it to the current one. If it appears stuck for 10 times, we have
362 * a problem.
363 *
364 * Assume we hold the lock.
365 */
366static void
367fatm_check_heartbeat(struct fatm_softc *sc)
368{
369 uint32_t h;
370
371 FATM_CHECKLOCK(sc);
372
373 h = READ4(sc, FATMO_HEARTBEAT);
374 DBG(sc, BEAT, ("heartbeat %08x", h));
375
376 if (sc->stop_cnt == 10)
377 return;
378
379 if (h == sc->heartbeat) {
380 if (++sc->stop_cnt == 10) {
381 log(LOG_ERR, "i960 stopped???\n");
382 WRITE4(sc, FATMO_HIMR, 1);
383 }
384 return;
385 }
386
387 sc->stop_cnt = 0;
388 sc->heartbeat = h;
389}
390
391/*
392 * Ensure that the heart is still beating.
393 */
394static void
395fatm_watchdog(void *arg)
396{
397 struct fatm_softc *sc;
398
399 sc = arg;
400 FATM_CHECKLOCK(sc);
401 fatm_check_heartbeat(sc);
402 callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc);
403}
404
405/*
406 * Hard reset the i960 on the board. This is done by initializing registers,
407 * clearing interrupts and waiting for the selftest to finish. Not sure,
408 * whether all these barriers are actually needed.
409 *
410 * Assumes that we hold the lock.
411 */
412static int
413fatm_reset(struct fatm_softc *sc)
414{
415 int w;
416 uint32_t val;
417
418 FATM_CHECKLOCK(sc);
419
420 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
421 BARRIER_W(sc);
422
423 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
424 BARRIER_W(sc);
425
426 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
427 BARRIER_W(sc);
428
429 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
430 BARRIER_W(sc);
431
432 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
433 BARRIER_W(sc);
434
435 DELAY(1000);
436
437 WRITE1(sc, FATMO_HCR, 0);
438 BARRIER_RW(sc);
439
440 DELAY(1000);
441
442 for (w = 100; w; w--) {
443 BARRIER_R(sc);
444 val = READ4(sc, FATMO_BOOT_STATUS);
445 switch (val) {
446 case SELF_TEST_OK:
447 return (0);
448 case SELF_TEST_FAIL:
449 return (EIO);
450 }
451 DELAY(1000);
452 }
453 return (EIO);
454}
455
456/*
457 * Stop the card. Must be called WITH the lock held
458 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
459 */
460static void
461fatm_stop(struct fatm_softc *sc)
462{
463 int i;
464 struct cmdqueue *q;
465 struct rbuf *rb;
466 struct txqueue *tx;
467 uint32_t stat;
468
469 FATM_CHECKLOCK(sc);
470
471 /* Stop the board */
472 utopia_stop(&sc->utopia);
473 (void)fatm_reset(sc);
474
475 /* stop watchdog */
476 callout_stop(&sc->watchdog_timer);
477
478 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
479 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
480 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
481 sc->utopia.carrier == UTP_CARR_OK);
482
483 /*
484 * Collect transmit mbufs, partial receive mbufs and
485 * supplied mbufs
486 */
487 for (i = 0; i < FATM_TX_QLEN; i++) {
488 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
489 if (tx->m) {
490 bus_dmamap_unload(sc->tx_tag, tx->map);
491 m_freem(tx->m);
492 tx->m = NULL;
493 }
494 }
495
496 /* Collect supplied mbufs */
497 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
498 LIST_REMOVE(rb, link);
499 bus_dmamap_unload(sc->rbuf_tag, rb->map);
500 m_free(rb->m);
501 rb->m = NULL;
502 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
503 }
504
505 /* Unwait any waiters */
506 wakeup(&sc->sadi_mem);
507
508 /* wakeup all threads waiting for STAT or REG buffers */
509 cv_broadcast(&sc->cv_stat);
510 cv_broadcast(&sc->cv_regs);
511
512 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
513
514 /* wakeup all threads waiting on commands */
515 for (i = 0; i < FATM_CMD_QLEN; i++) {
516 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
517
518 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
519 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
520 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
521 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
522 wakeup(q);
523 }
524 }
525 utopia_reset_media(&sc->utopia);
526 }
527 sc->small_cnt = sc->large_cnt = 0;
528
529 /* Reset vcc info */
530 if (sc->vccs != NULL) {
531 sc->open_vccs = 0;
532 for (i = 0; i < FORE_MAX_VCC + 1; i++) {
533 if (sc->vccs[i] != NULL) {
534 if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
535 FATM_VCC_TRY_OPEN)) == 0) {
536 uma_zfree(sc->vcc_zone, sc->vccs[i]);
537 sc->vccs[i] = NULL;
538 } else {
539 sc->vccs[i]->vflags = 0;
540 sc->open_vccs++;
541 }
542 }
543 }
544 }
545
546}
547
548/*
549 * Load the firmware into the board and save the entry point.
550 */
551static uint32_t
552firmware_load(struct fatm_softc *sc)
553{
554 struct firmware *fw = (struct firmware *)firmware;
555
556 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
557 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
558 sizeof(firmware) / sizeof(firmware[0]));
559 BARRIER_RW(sc);
560
561 return (fw->entry);
562}
563
564/*
565 * Read a character from the virtual UART. The availability of a character
566 * is signaled by a non-null value of the 32 bit register. The eating of
567 * the character by us is signalled to the card by setting that register
568 * to zero.
569 */
570static int
571rx_getc(struct fatm_softc *sc)
572{
573 int w = 50;
574 int c;
575
576 while (w--) {
577 c = READ4(sc, FATMO_UART_TO_HOST);
578 BARRIER_RW(sc);
579 if (c != 0) {
580 WRITE4(sc, FATMO_UART_TO_HOST, 0);
581 DBGC(sc, UART, ("%c", c & 0xff));
582 return (c & 0xff);
583 }
584 DELAY(1000);
585 }
586 return (-1);
587}
588
589/*
590 * Eat up characters from the board and stuff them in the bit-bucket.
591 */
592static void
593rx_flush(struct fatm_softc *sc)
594{
595 int w = 10000;
596
597 while (w-- && rx_getc(sc) >= 0)
598 ;
599}
600
601/*
602 * Write a character to the card. The UART is available if the register
603 * is zero.
604 */
605static int
606tx_putc(struct fatm_softc *sc, u_char c)
607{
608 int w = 10;
609 int c1;
610
611 while (w--) {
612 c1 = READ4(sc, FATMO_UART_TO_960);
613 BARRIER_RW(sc);
614 if (c1 == 0) {
615 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
616 DBGC(sc, UART, ("%c", c & 0xff));
617 return (0);
618 }
619 DELAY(1000);
620 }
621 return (-1);
622}
623
624/*
625 * Start the firmware. This is doing by issuing a 'go' command with
626 * the hex entry address of the firmware. Then we wait for the self-test to
627 * succeed.
628 */
629static int
630fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
631{
632 static char hex[] = "0123456789abcdef";
633 u_int w, val;
634
635 DBG(sc, INIT, ("starting"));
636 rx_flush(sc);
637 tx_putc(sc, '\r');
638 DELAY(1000);
639
640 rx_flush(sc);
641
642 tx_putc(sc, 'g');
643 (void)rx_getc(sc);
644 tx_putc(sc, 'o');
645 (void)rx_getc(sc);
646 tx_putc(sc, ' ');
647 (void)rx_getc(sc);
648
649 tx_putc(sc, hex[(start >> 12) & 0xf]);
650 (void)rx_getc(sc);
651 tx_putc(sc, hex[(start >> 8) & 0xf]);
652 (void)rx_getc(sc);
653 tx_putc(sc, hex[(start >> 4) & 0xf]);
654 (void)rx_getc(sc);
655 tx_putc(sc, hex[(start >> 0) & 0xf]);
656 (void)rx_getc(sc);
657
658 tx_putc(sc, '\r');
659 rx_flush(sc);
660
661 for (w = 100; w; w--) {
662 BARRIER_R(sc);
663 val = READ4(sc, FATMO_BOOT_STATUS);
664 switch (val) {
665 case CP_RUNNING:
666 return (0);
667 case SELF_TEST_FAIL:
668 return (EIO);
669 }
670 DELAY(1000);
671 }
672 return (EIO);
673}
674
675/*
676 * Initialize one card and host queue.
677 */
678static void
679init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
680 size_t qel_size, size_t desc_size, cardoff_t off,
681 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
682{
683 struct fqelem *el = queue->chunk;
684
685 while (qlen--) {
686 el->card = off;
687 off += 8; /* size of card entry */
688
689 el->statp = (uint32_t *)(*statpp);
690 (*statpp) += sizeof(uint32_t);
691 H_SETSTAT(el->statp, FATM_STAT_FREE);
692 H_SYNCSTAT_PREWRITE(sc, el->statp);
693
694 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
695 (*cardstat) += sizeof(uint32_t);
696
697 el->ioblk = descp;
698 descp += desc_size;
699 el->card_ioblk = carddesc;
700 carddesc += desc_size;
701
702 el = (struct fqelem *)((u_char *)el + qel_size);
703 }
704 queue->tail = queue->head = 0;
705}
706
707/*
708 * Issue the initialize operation to the card, wait for completion and
709 * initialize the on-board and host queue structures with offsets and
710 * addresses.
711 */
712static int
713fatm_init_cmd(struct fatm_softc *sc)
714{
715 int w, c;
716 u_char *statp;
717 uint32_t card_stat;
718 u_int cnt;
719 struct fqelem *el;
720 cardoff_t off;
721
722 DBG(sc, INIT, ("command"));
723 WRITE4(sc, FATMO_ISTAT, 0);
724 WRITE4(sc, FATMO_IMASK, 1);
725 WRITE4(sc, FATMO_HLOGGER, 0);
726
727 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
728 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
729 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
730 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
731 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
732 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
733 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
734
735 /*
736 * initialize buffer descriptors
737 */
738 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
739 SMALL_SUPPLY_QLEN);
740 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
741 SMALL_BUFFER_LEN);
742 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
743 SMALL_POOL_SIZE);
744 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
745 SMALL_SUPPLY_BLKSIZE);
746
747 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
748 LARGE_SUPPLY_QLEN);
749 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
750 LARGE_BUFFER_LEN);
751 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
752 LARGE_POOL_SIZE);
753 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
754 LARGE_SUPPLY_BLKSIZE);
755
756 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
757 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
758 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
759 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
760
761 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
762 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
763 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
764 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
765
766 /*
767 * Start the command
768 */
769 BARRIER_W(sc);
770 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
771 BARRIER_W(sc);
772 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
773 BARRIER_W(sc);
774
775 /*
776 * Busy wait for completion
777 */
778 w = 100;
779 while (w--) {
780 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
781 BARRIER_R(sc);
782 if (c & FATM_STAT_COMPLETE)
783 break;
784 DELAY(1000);
785 }
786
787 if (c & FATM_STAT_ERROR)
788 return (EIO);
789
790 /*
791 * Initialize the queues
792 */
793 statp = sc->stat_mem.mem;
794 card_stat = sc->stat_mem.paddr;
795
796 /*
797 * Command queue. This is special in that it's on the card.
798 */
799 el = sc->cmdqueue.chunk;
800 off = READ4(sc, FATMO_COMMAND_QUEUE);
801 DBG(sc, INIT, ("cmd queue=%x", off));
802 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
803 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
804
805 el->card = off;
806 off += 32; /* size of card structure */
807
808 el->statp = (uint32_t *)statp;
809 statp += sizeof(uint32_t);
810 H_SETSTAT(el->statp, FATM_STAT_FREE);
811 H_SYNCSTAT_PREWRITE(sc, el->statp);
812
813 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
814 card_stat += sizeof(uint32_t);
815 }
816 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
817
818 /*
819 * Now the other queues. These are in memory
820 */
821 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
822 sizeof(struct txqueue), TPD_SIZE,
823 READ4(sc, FATMO_TRANSMIT_QUEUE),
824 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
825
826 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
827 sizeof(struct rxqueue), RPD_SIZE,
828 READ4(sc, FATMO_RECEIVE_QUEUE),
829 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
830
831 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
832 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
833 READ4(sc, FATMO_SMALL_B1_QUEUE),
834 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
835
836 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
837 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
838 READ4(sc, FATMO_LARGE_B1_QUEUE),
839 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
840
841 sc->txcnt = 0;
842
843 return (0);
844}
845
846/*
847 * Read PROM. Called only from attach code. Here we spin because the interrupt
848 * handler is not yet set up.
849 */
850static int
851fatm_getprom(struct fatm_softc *sc)
852{
853 int i;
854 struct prom *prom;
855 struct cmdqueue *q;
856
857 DBG(sc, INIT, ("reading prom"));
858 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
859 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
860
861 q->error = 0;
862 q->cb = NULL;
863 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
864 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
865
866 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
867 BUS_DMASYNC_PREREAD);
868
869 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
870 BARRIER_W(sc);
871 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
872 BARRIER_W(sc);
873
874 for (i = 0; i < 1000; i++) {
875 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
876 if (H_GETSTAT(q->q.statp) &
877 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
878 break;
879 DELAY(1000);
880 }
881 if (i == 1000) {
882 if_printf(sc->ifp, "getprom timeout\n");
883 return (EIO);
884 }
885 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
886 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
887 if_printf(sc->ifp, "getprom error\n");
888 return (EIO);
889 }
890 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
891 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
892 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
893
894 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
895 BUS_DMASYNC_POSTREAD);
896
897
898#ifdef notdef
899 {
900 u_int i;
901
902 printf("PROM: ");
903 u_char *ptr = (u_char *)sc->prom_mem.mem;
904 for (i = 0; i < sizeof(struct prom); i++)
905 printf("%02x ", *ptr++);
906 printf("\n");
907 }
908#endif
909
910 prom = (struct prom *)sc->prom_mem.mem;
911
912 bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
913 IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
914 IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
915 IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
916
917 if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
918 "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
919 IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
920 IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
921 IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
922
923 return (0);
924}
925
926/*
927 * This is the callback function for bus_dmamap_load. We assume, that we
928 * have a 32-bit bus and so have always one segment.
929 */
930static void
931dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
932{
933 bus_addr_t *ptr = (bus_addr_t *)arg;
934
935 if (error != 0) {
936 printf("%s: error=%d\n", __func__, error);
937 return;
938 }
939 KASSERT(nsegs == 1, ("too many DMA segments"));
940 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
941 (u_long)segs[0].ds_addr));
942
943 *ptr = segs[0].ds_addr;
944}
945
946/*
947 * Allocate a chunk of DMA-able memory and map it.
948 */
949static int
950alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
951{
952 int error;
953
954 mem->mem = NULL;
955
956 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
957 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
958 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
959 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
960 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
961 nm);
962 return (ENOMEM);
963 }
964
965 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
966 if (error) {
967 if_printf(sc->ifp, "could not allocate %s DMA memory: "
968 "%d\n", nm, error);
969 bus_dma_tag_destroy(mem->dmat);
970 mem->mem = NULL;
971 return (error);
972 }
973
974 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
975 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
976 if (error) {
977 if_printf(sc->ifp, "could not load %s DMA memory: "
978 "%d\n", nm, error);
979 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
980 bus_dma_tag_destroy(mem->dmat);
981 mem->mem = NULL;
982 return (error);
983 }
984
985 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
986 (u_long)mem->paddr, mem->size, mem->align));
987
988 return (0);
989}
990
991#ifdef TEST_DMA_SYNC
992static int
993alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
994{
995 int error;
996
997 mem->mem = NULL;
998
999 if (bus_dma_tag_create(NULL, mem->align, 0,
1000 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1001 NULL, NULL, mem->size, 1, mem->size,
1002 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1003 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1004 nm);
1005 return (ENOMEM);
1006 }
1007
1008 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1009 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1010
1011 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1012 if (error) {
1013 if_printf(sc->ifp, "could not allocate %s DMA map: "
1014 "%d\n", nm, error);
1015 contigfree(mem->mem, mem->size, M_DEVBUF);
1016 bus_dma_tag_destroy(mem->dmat);
1017 mem->mem = NULL;
1018 return (error);
1019 }
1020
1021 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1022 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1023 if (error) {
1024 if_printf(sc->ifp, "could not load %s DMA memory: "
1025 "%d\n", nm, error);
1026 bus_dmamap_destroy(mem->dmat, mem->map);
1027 contigfree(mem->mem, mem->size, M_DEVBUF);
1028 bus_dma_tag_destroy(mem->dmat);
1029 mem->mem = NULL;
1030 return (error);
1031 }
1032
1033 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1034 (u_long)mem->paddr, mem->size, mem->align));
1035
1036 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1037 (u_long)mem->paddr, mem->size, mem->align);
1038
1039 return (0);
1040}
1041#endif /* TEST_DMA_SYNC */
1042
1043/*
1044 * Destroy all resources of an dma-able memory chunk
1045 */
1046static void
1047destroy_dma_memory(struct fatm_mem *mem)
1048{
1049 if (mem->mem != NULL) {
1050 bus_dmamap_unload(mem->dmat, mem->map);
1051 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1052 bus_dma_tag_destroy(mem->dmat);
1053 mem->mem = NULL;
1054 }
1055}
1056#ifdef TEST_DMA_SYNC
1057static void
1058destroy_dma_memoryX(struct fatm_mem *mem)
1059{
1060 if (mem->mem != NULL) {
1061 bus_dmamap_unload(mem->dmat, mem->map);
1062 bus_dmamap_destroy(mem->dmat, mem->map);
1063 contigfree(mem->mem, mem->size, M_DEVBUF);
1064 bus_dma_tag_destroy(mem->dmat);
1065 mem->mem = NULL;
1066 }
1067}
1068#endif /* TEST_DMA_SYNC */
1069
1070/*
1071 * Try to supply buffers to the card if there are free entries in the queues
1072 */
1073static void
1074fatm_supply_small_buffers(struct fatm_softc *sc)
1075{
1076 int nblocks, nbufs;
1077 struct supqueue *q;
1078 struct rbd *bd;
1079 int i, j, error, cnt;
1080 struct mbuf *m;
1081 struct rbuf *rb;
1082 bus_addr_t phys;
1083
1084 nbufs = max(4 * sc->open_vccs, 32);
1085 nbufs = min(nbufs, SMALL_POOL_SIZE);
1086 nbufs -= sc->small_cnt;
1087
1088 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1089 for (cnt = 0; cnt < nblocks; cnt++) {
1090 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1091
1092 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1093 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1094 break;
1095
1096 bd = (struct rbd *)q->q.ioblk;
1097
1098 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1099 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1100 if_printf(sc->ifp, "out of rbufs\n");
1101 break;
1102 }
1103 MGETHDR(m, M_NOWAIT, MT_DATA);
1104 if (m == NULL) {
1105 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1106 break;
1107 }
1108 MH_ALIGN(m, SMALL_BUFFER_LEN);
1109 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1110 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1111 &phys, BUS_DMA_NOWAIT);
1112 if (error) {
1113 if_printf(sc->ifp,
1114 "dmamap_load mbuf failed %d", error);
1115 m_freem(m);
1116 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1117 break;
1118 }
1119 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1120 BUS_DMASYNC_PREREAD);
1121
1122 LIST_REMOVE(rb, link);
1123 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1124
1125 rb->m = m;
1126 bd[i].handle = rb - sc->rbufs;
1127 H_SETDESC(bd[i].buffer, phys);
1128 }
1129
1130 if (i < SMALL_SUPPLY_BLKSIZE) {
1131 for (j = 0; j < i; j++) {
1132 rb = sc->rbufs + bd[j].handle;
1133 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1134 m_free(rb->m);
1135 rb->m = NULL;
1136
1137 LIST_REMOVE(rb, link);
1138 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1139 }
1140 break;
1141 }
1142 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1143 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1144
1145 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1146 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1147
1148 WRITE4(sc, q->q.card, q->q.card_ioblk);
1149 BARRIER_W(sc);
1150
1151 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1152
1153 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1154 }
1155}
1156
1157/*
1158 * Try to supply buffers to the card if there are free entries in the queues
1159 * We assume that all buffers are within the address space accessible by the
1160 * card (32-bit), so we don't need bounce buffers.
1161 */
1162static void
1163fatm_supply_large_buffers(struct fatm_softc *sc)
1164{
1165 int nbufs, nblocks, cnt;
1166 struct supqueue *q;
1167 struct rbd *bd;
1168 int i, j, error;
1169 struct mbuf *m;
1170 struct rbuf *rb;
1171 bus_addr_t phys;
1172
1173 nbufs = max(4 * sc->open_vccs, 32);
1174 nbufs = min(nbufs, LARGE_POOL_SIZE);
1175 nbufs -= sc->large_cnt;
1176
1177 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1178
1179 for (cnt = 0; cnt < nblocks; cnt++) {
1180 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1181
1182 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1183 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1184 break;
1185
1186 bd = (struct rbd *)q->q.ioblk;
1187
1188 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1189 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1190 if_printf(sc->ifp, "out of rbufs\n");
1191 break;
1192 }
1193 if ((m = m_getcl(M_NOWAIT, MT_DATA,
1194 M_PKTHDR)) == NULL) {
1195 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1196 break;
1197 }
1198 /* No MEXT_ALIGN */
1199 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1200 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1201 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1202 &phys, BUS_DMA_NOWAIT);
1203 if (error) {
1204 if_printf(sc->ifp,
1205 "dmamap_load mbuf failed %d", error);
1206 m_freem(m);
1207 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1208 break;
1209 }
1210
1211 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1212 BUS_DMASYNC_PREREAD);
1213
1214 LIST_REMOVE(rb, link);
1215 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1216
1217 rb->m = m;
1218 bd[i].handle = rb - sc->rbufs;
1219 H_SETDESC(bd[i].buffer, phys);
1220 }
1221
1222 if (i < LARGE_SUPPLY_BLKSIZE) {
1223 for (j = 0; j < i; j++) {
1224 rb = sc->rbufs + bd[j].handle;
1225 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1226 m_free(rb->m);
1227 rb->m = NULL;
1228
1229 LIST_REMOVE(rb, link);
1230 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1231 }
1232 break;
1233 }
1234 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1235 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1236
1237 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1238 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1239 WRITE4(sc, q->q.card, q->q.card_ioblk);
1240 BARRIER_W(sc);
1241
1242 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1243
1244 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1245 }
1246}
1247
1248
1249/*
1250 * Actually start the card. The lock must be held here.
1251 * Reset, load the firmware, start it, initializes queues, read the PROM
1252 * and supply receive buffers to the card.
1253 */
1254static void
1255fatm_init_locked(struct fatm_softc *sc)
1256{
1257 struct rxqueue *q;
1258 int i, c, error;
1259 uint32_t start;
1260
1261 DBG(sc, INIT, ("initialize"));
1262 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1263 fatm_stop(sc);
1264
1265 /*
1266 * Hard reset the board
1267 */
1268 if (fatm_reset(sc))
1269 return;
1270
1271 start = firmware_load(sc);
1272 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1273 fatm_getprom(sc)) {
1274 fatm_reset(sc);
1275 return;
1276 }
1277
1278 /*
1279 * Handle media
1280 */
1281 c = READ4(sc, FATMO_MEDIA_TYPE);
1282 switch (c) {
1283
1284 case FORE_MT_TAXI_100:
1285 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1286 IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1287 break;
1288
1289 case FORE_MT_TAXI_140:
1290 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1291 IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1292 break;
1293
1294 case FORE_MT_UTP_SONET:
1295 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1296 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1297 break;
1298
1299 case FORE_MT_MM_OC3_ST:
1300 case FORE_MT_MM_OC3_SC:
1301 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1302 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1303 break;
1304
1305 case FORE_MT_SM_OC3_ST:
1306 case FORE_MT_SM_OC3_SC:
1307 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1308 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1309 break;
1310
1311 default:
1312 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1313 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1314 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1315 break;
1316 }
1317 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1318 utopia_init_media(&sc->utopia);
1319
1320 /*
1321 * Initialize the RBDs
1322 */
1323 for (i = 0; i < FATM_RX_QLEN; i++) {
1324 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1325 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1326 }
1327 BARRIER_W(sc);
1328
1329 /*
1330 * Supply buffers to the card
1331 */
1332 fatm_supply_small_buffers(sc);
1333 fatm_supply_large_buffers(sc);
1334
1335 /*
1336 * Now set flags, that we are ready
1337 */
1338 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1339
1340 /*
1341 * Start the watchdog timer
1342 */
1343 callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc);
1344
1345 /* start SUNI */
1346 utopia_start(&sc->utopia);
1347
1348 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1349 sc->utopia.carrier == UTP_CARR_OK);
1350
1351 /* start all channels */
1352 for (i = 0; i < FORE_MAX_VCC + 1; i++)
1353 if (sc->vccs[i] != NULL) {
1354 sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1355 error = fatm_load_vc(sc, sc->vccs[i]);
1356 if (error != 0) {
1357 if_printf(sc->ifp, "reopening %u "
1358 "failed: %d\n", i, error);
1359 sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1360 }
1361 }
1362
1363 DBG(sc, INIT, ("done"));
1364}
1365
1366/*
1367 * This is the exported as initialisation function.
1368 */
1369static void
1370fatm_init(void *p)
1371{
1372 struct fatm_softc *sc = p;
1373
1374 FATM_LOCK(sc);
1375 fatm_init_locked(sc);
1376 FATM_UNLOCK(sc);
1377}
1378
1379/************************************************************/
1380/*
1381 * The INTERRUPT handling
1382 */
1383/*
1384 * Check the command queue. If a command was completed, call the completion
1385 * function for that command.
1386 */
1387static void
1388fatm_intr_drain_cmd(struct fatm_softc *sc)
1389{
1390 struct cmdqueue *q;
1391 int stat;
1392
1393 /*
1394 * Drain command queue
1395 */
1396 for (;;) {
1397 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1398
1399 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1400 stat = H_GETSTAT(q->q.statp);
1401
1402 if (stat != FATM_STAT_COMPLETE &&
1403 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1404 stat != FATM_STAT_ERROR)
1405 break;
1406
1407 (*q->cb)(sc, q);
1408
1409 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1410 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1411
1412 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1413 }
1414}
1415
1416/*
1417 * Drain the small buffer supply queue.
1418 */
1419static void
1420fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1421{
1422 struct supqueue *q;
1423 int stat;
1424
1425 for (;;) {
1426 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1427
1428 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1429 stat = H_GETSTAT(q->q.statp);
1430
1431 if ((stat & FATM_STAT_COMPLETE) == 0)
1432 break;
1433 if (stat & FATM_STAT_ERROR)
1434 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1435
1436 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1437 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1438
1439 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1440 }
1441}
1442
1443/*
1444 * Drain the large buffer supply queue.
1445 */
1446static void
1447fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1448{
1449 struct supqueue *q;
1450 int stat;
1451
1452 for (;;) {
1453 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1454
1455 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1456 stat = H_GETSTAT(q->q.statp);
1457
1458 if ((stat & FATM_STAT_COMPLETE) == 0)
1459 break;
1460 if (stat & FATM_STAT_ERROR)
1461 log(LOG_ERR, "%s status %x\n", __func__, stat);
1462
1463 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1464 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1465
1466 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1467 }
1468}
1469
1470/*
1471 * Check the receive queue. Send any received PDU up the protocol stack
1472 * (except when there was an error or the VCI appears to be closed. In this
1473 * case discard the PDU).
1474 */
1475static void
1476fatm_intr_drain_rx(struct fatm_softc *sc)
1477{
1478 struct rxqueue *q;
1479 int stat, mlen;
1480 u_int i;
1481 uint32_t h;
1482 struct mbuf *last, *m0;
1483 struct rpd *rpd;
1484 struct rbuf *rb;
1485 u_int vci, vpi, pt;
1486 struct atm_pseudohdr aph;
1487 struct ifnet *ifp;
1488 struct card_vcc *vc;
1489
1490 for (;;) {
1491 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1492
1493 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1494 stat = H_GETSTAT(q->q.statp);
1495
1496 if ((stat & FATM_STAT_COMPLETE) == 0)
1497 break;
1498
1499 rpd = (struct rpd *)q->q.ioblk;
1500 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1501
1502 rpd->nseg = le32toh(rpd->nseg);
1503 mlen = 0;
1504 m0 = last = 0;
1505 for (i = 0; i < rpd->nseg; i++) {
1506 rb = sc->rbufs + rpd->segment[i].handle;
1507 if (m0 == NULL) {
1508 m0 = last = rb->m;
1509 } else {
1510 last->m_next = rb->m;
1511 last = rb->m;
1512 }
1513 last->m_next = NULL;
1514 if (last->m_flags & M_EXT)
1515 sc->large_cnt--;
1516 else
1517 sc->small_cnt--;
1518 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1519 BUS_DMASYNC_POSTREAD);
1520 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1521 rb->m = NULL;
1522
1523 LIST_REMOVE(rb, link);
1524 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1525
1526 last->m_len = le32toh(rpd->segment[i].length);
1527 mlen += last->m_len;
1528 }
1529
1530 m0->m_pkthdr.len = mlen;
1531 m0->m_pkthdr.rcvif = sc->ifp;
1532
1533 h = le32toh(rpd->atm_header);
1534 vpi = (h >> 20) & 0xff;
1535 vci = (h >> 4 ) & 0xffff;
1536 pt = (h >> 1 ) & 0x7;
1537
1538 /*
1539 * Locate the VCC this packet belongs to
1540 */
1541 if (!VC_OK(sc, vpi, vci))
1542 vc = NULL;
1543 else if ((vc = sc->vccs[vci]) == NULL ||
1544 !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1545 sc->istats.rx_closed++;
1546 vc = NULL;
1547 }
1548
1549 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1550 pt, mlen, vc == NULL ? "dropped" : ""));
1551
1552 if (vc == NULL) {
1553 m_freem(m0);
1554 } else {
1555#ifdef ENABLE_BPF
1556 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1557 vc->param.aal == ATMIO_AAL_5 &&
1558 (vc->param.flags & ATM_PH_LLCSNAP))
1559 BPF_MTAP(sc->ifp, m0);
1560#endif
1561
1562 ATM_PH_FLAGS(&aph) = vc->param.flags;
1563 ATM_PH_VPI(&aph) = vpi;
1564 ATM_PH_SETVCI(&aph, vci);
1565
1566 ifp = sc->ifp;
1567 ifp->if_ipackets++;
1567 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1568
1569 vc->ipackets++;
1570 vc->ibytes += m0->m_pkthdr.len;
1571
1572 atm_input(ifp, &aph, m0, vc->rxhand);
1573 }
1574
1575 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1576 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1577
1578 WRITE4(sc, q->q.card, q->q.card_ioblk);
1579 BARRIER_W(sc);
1580
1581 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1582 }
1583}
1584
1585/*
1586 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1587 */
1588static void
1589fatm_intr_drain_tx(struct fatm_softc *sc)
1590{
1591 struct txqueue *q;
1592 int stat;
1593
1594 /*
1595 * Drain tx queue
1596 */
1597 for (;;) {
1598 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1599
1600 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1601 stat = H_GETSTAT(q->q.statp);
1602
1603 if (stat != FATM_STAT_COMPLETE &&
1604 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1605 stat != FATM_STAT_ERROR)
1606 break;
1607
1608 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1609 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1610
1611 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1612 bus_dmamap_unload(sc->tx_tag, q->map);
1613
1614 m_freem(q->m);
1615 q->m = NULL;
1616 sc->txcnt--;
1617
1618 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1619 }
1620}
1621
1622/*
1623 * Interrupt handler
1624 */
1625static void
1626fatm_intr(void *p)
1627{
1628 struct fatm_softc *sc = (struct fatm_softc *)p;
1629
1630 FATM_LOCK(sc);
1631 if (!READ4(sc, FATMO_PSR)) {
1632 FATM_UNLOCK(sc);
1633 return;
1634 }
1635 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1636
1637 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1638 FATM_UNLOCK(sc);
1639 return;
1640 }
1641 fatm_intr_drain_cmd(sc);
1642 fatm_intr_drain_rx(sc);
1643 fatm_intr_drain_tx(sc);
1644 fatm_intr_drain_small_buffers(sc);
1645 fatm_intr_drain_large_buffers(sc);
1646 fatm_supply_small_buffers(sc);
1647 fatm_supply_large_buffers(sc);
1648
1649 FATM_UNLOCK(sc);
1650
1651 if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1652 (*sc->ifp->if_start)(sc->ifp);
1653}
1654
1655/*
1656 * Get device statistics. This must be called with the softc locked.
1657 * We use a preallocated buffer, so we need to protect this buffer.
1658 * We do this by using a condition variable and a flag. If the flag is set
1659 * the buffer is in use by one thread (one thread is executing a GETSTAT
1660 * card command). In this case all other threads that are trying to get
1661 * statistics block on that condition variable. When the thread finishes
1662 * using the buffer it resets the flag and signals the condition variable. This
1663 * will wakeup the next thread that is waiting for the buffer. If the interface
1664 * is stopped the stopping function will broadcast the cv. All threads will
1665 * find that the interface has been stopped and return.
1666 *
1667 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1668 * must be done by the caller when he has finished using the buffer.
1669 */
1670static void
1671fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1672{
1673
1674 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1675 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1676 sc->istats.get_stat_errors++;
1677 q->error = EIO;
1678 }
1679 wakeup(&sc->sadi_mem);
1680}
1681static int
1682fatm_getstat(struct fatm_softc *sc)
1683{
1684 int error;
1685 struct cmdqueue *q;
1686
1687 /*
1688 * Wait until either the interface is stopped or we can get the
1689 * statistics buffer
1690 */
1691 for (;;) {
1692 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
1693 return (EIO);
1694 if (!(sc->flags & FATM_STAT_INUSE))
1695 break;
1696 cv_wait(&sc->cv_stat, &sc->mtx);
1697 }
1698 sc->flags |= FATM_STAT_INUSE;
1699
1700 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1701
1702 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1703 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1704 sc->istats.cmd_queue_full++;
1705 return (EIO);
1706 }
1707 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1708
1709 q->error = 0;
1710 q->cb = fatm_getstat_complete;
1711 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1712 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1713
1714 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1715 BUS_DMASYNC_PREREAD);
1716
1717 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1718 sc->sadi_mem.paddr);
1719 BARRIER_W(sc);
1720 WRITE4(sc, q->q.card + FATMOC_OP,
1721 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1722 BARRIER_W(sc);
1723
1724 /*
1725 * Wait for the command to complete
1726 */
1727 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1728 "fatm_stat", hz);
1729
1730 switch (error) {
1731
1732 case EWOULDBLOCK:
1733 error = EIO;
1734 break;
1735
1736 case ERESTART:
1737 error = EINTR;
1738 break;
1739
1740 case 0:
1741 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1742 BUS_DMASYNC_POSTREAD);
1743 error = q->error;
1744 break;
1745 }
1746
1747 /*
1748 * Swap statistics
1749 */
1750 if (q->error == 0) {
1751 u_int i;
1752 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1753
1754 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1755 i++, p++)
1756 *p = be32toh(*p);
1757 }
1758
1759 return (error);
1760}
1761
1762/*
1763 * Create a copy of a single mbuf. It can have either internal or
1764 * external data, it may have a packet header. External data is really
1765 * copied, so the new buffer is writeable.
1766 */
1767static struct mbuf *
1768copy_mbuf(struct mbuf *m)
1769{
1770 struct mbuf *new;
1771
1772 MGET(new, M_NOWAIT, MT_DATA);
1773 if (new == NULL)
1774 return (NULL);
1775
1776 if (m->m_flags & M_PKTHDR) {
1777 M_MOVE_PKTHDR(new, m);
1778 if (m->m_len > MHLEN)
1779 MCLGET(new, M_WAITOK);
1780 } else {
1781 if (m->m_len > MLEN)
1782 MCLGET(new, M_WAITOK);
1783 }
1784
1785 bcopy(m->m_data, new->m_data, m->m_len);
1786 new->m_len = m->m_len;
1787 new->m_flags &= ~M_RDONLY;
1788
1789 return (new);
1790}
1791
1792/*
1793 * All segments must have a four byte aligned buffer address and a four
1794 * byte aligned length. Step through an mbuf chain and check these conditions.
1795 * If the buffer address is not aligned and this is a normal mbuf, move
1796 * the data down. Else make a copy of the mbuf with aligned data.
1797 * If the buffer length is not aligned steel data from the next mbuf.
1798 * We don't need to check whether this has more than one external reference,
1799 * because steeling data doesn't change the external cluster.
1800 * If the last mbuf is not aligned, fill with zeroes.
1801 *
1802 * Return packet length (well we should have this in the packet header),
1803 * but be careful not to count the zero fill at the end.
1804 *
1805 * If fixing fails free the chain and zero the pointer.
1806 *
1807 * We assume, that aligning the virtual address also aligns the mapped bus
1808 * address.
1809 */
1810static u_int
1811fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1812{
1813 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1814 u_int mlen = 0, fill = 0;
1815 int first, off;
1816 u_char *d, *cp;
1817
1818 do {
1819 next = m->m_next;
1820
1821 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1822 (m->m_len % 4 != 0 && next)) {
1823 /*
1824 * Needs fixing
1825 */
1826 first = (m == *mp);
1827
1828 d = mtod(m, u_char *);
1829 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1830 if (M_WRITABLE(m)) {
1831 sc->istats.fix_addr_copy++;
1832 bcopy(d, d - off, m->m_len);
1833 m->m_data = (caddr_t)(d - off);
1834 } else {
1835 if ((new = copy_mbuf(m)) == NULL) {
1836 sc->istats.fix_addr_noext++;
1837 goto fail;
1838 }
1839 sc->istats.fix_addr_ext++;
1840 if (prev)
1841 prev->m_next = new;
1842 new->m_next = next;
1843 m_free(m);
1844 m = new;
1845 }
1846 }
1847
1848 if ((off = m->m_len % 4) != 0) {
1849 if (!M_WRITABLE(m)) {
1850 if ((new = copy_mbuf(m)) == NULL) {
1851 sc->istats.fix_len_noext++;
1852 goto fail;
1853 }
1854 sc->istats.fix_len_copy++;
1855 if (prev)
1856 prev->m_next = new;
1857 new->m_next = next;
1858 m_free(m);
1859 m = new;
1860 } else
1861 sc->istats.fix_len++;
1862 d = mtod(m, u_char *) + m->m_len;
1863 off = 4 - off;
1864 while (off) {
1865 if (next == NULL) {
1866 *d++ = 0;
1867 fill++;
1868 } else if (next->m_len == 0) {
1869 sc->istats.fix_empty++;
1870 next = m_free(next);
1871 continue;
1872 } else {
1873 cp = mtod(next, u_char *);
1874 *d++ = *cp++;
1875 next->m_len--;
1876 next->m_data = (caddr_t)cp;
1877 }
1878 off--;
1879 m->m_len++;
1880 }
1881 }
1882
1883 if (first)
1884 *mp = m;
1885 }
1886
1887 mlen += m->m_len;
1888 prev = m;
1889 } while ((m = next) != NULL);
1890
1891 return (mlen - fill);
1892
1893 fail:
1894 m_freem(*mp);
1895 *mp = NULL;
1896 return (0);
1897}
1898
1899/*
1900 * The helper function is used to load the computed physical addresses
1901 * into the transmit descriptor.
1902 */
1903static void
1904fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1905 bus_size_t mapsize, int error)
1906{
1907 struct tpd *tpd = varg;
1908
1909 if (error)
1910 return;
1911
1912 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1913
1914 tpd->spec = 0;
1915 while (nsegs--) {
1916 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1917 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1918 tpd->spec++;
1919 segs++;
1920 }
1921}
1922
1923/*
1924 * Start output.
1925 *
1926 * Note, that we update the internal statistics without the lock here.
1927 */
1928static int
1929fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1930{
1931 struct txqueue *q;
1932 u_int nblks;
1933 int error, aal, nsegs;
1934 struct tpd *tpd;
1935
1936 /*
1937 * Get a queue element.
1938 * If there isn't one - try to drain the transmit queue
1939 * We used to sleep here if that doesn't help, but we
1940 * should not sleep here, because we are called with locks.
1941 */
1942 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1943
1944 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1945 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1946 fatm_intr_drain_tx(sc);
1947 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1948 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1949 if (sc->retry_tx) {
1950 sc->istats.tx_retry++;
1951 IF_PREPEND(&sc->ifp->if_snd, m);
1952 return (1);
1953 }
1954 sc->istats.tx_queue_full++;
1955 m_freem(m);
1956 return (0);
1957 }
1958 sc->istats.tx_queue_almost_full++;
1959 }
1960
1961 tpd = q->q.ioblk;
1962
1963 m->m_data += sizeof(struct atm_pseudohdr);
1964 m->m_len -= sizeof(struct atm_pseudohdr);
1965
1966#ifdef ENABLE_BPF
1967 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1968 vc->param.aal == ATMIO_AAL_5 &&
1969 (vc->param.flags & ATM_PH_LLCSNAP))
1970 BPF_MTAP(sc->ifp, m);
1971#endif
1972
1973 /* map the mbuf */
1974 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1975 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1976 if(error) {
1568
1569 vc->ipackets++;
1570 vc->ibytes += m0->m_pkthdr.len;
1571
1572 atm_input(ifp, &aph, m0, vc->rxhand);
1573 }
1574
1575 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1576 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1577
1578 WRITE4(sc, q->q.card, q->q.card_ioblk);
1579 BARRIER_W(sc);
1580
1581 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1582 }
1583}
1584
1585/*
1586 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1587 */
1588static void
1589fatm_intr_drain_tx(struct fatm_softc *sc)
1590{
1591 struct txqueue *q;
1592 int stat;
1593
1594 /*
1595 * Drain tx queue
1596 */
1597 for (;;) {
1598 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1599
1600 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1601 stat = H_GETSTAT(q->q.statp);
1602
1603 if (stat != FATM_STAT_COMPLETE &&
1604 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1605 stat != FATM_STAT_ERROR)
1606 break;
1607
1608 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1609 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1610
1611 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1612 bus_dmamap_unload(sc->tx_tag, q->map);
1613
1614 m_freem(q->m);
1615 q->m = NULL;
1616 sc->txcnt--;
1617
1618 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1619 }
1620}
1621
1622/*
1623 * Interrupt handler
1624 */
1625static void
1626fatm_intr(void *p)
1627{
1628 struct fatm_softc *sc = (struct fatm_softc *)p;
1629
1630 FATM_LOCK(sc);
1631 if (!READ4(sc, FATMO_PSR)) {
1632 FATM_UNLOCK(sc);
1633 return;
1634 }
1635 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1636
1637 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1638 FATM_UNLOCK(sc);
1639 return;
1640 }
1641 fatm_intr_drain_cmd(sc);
1642 fatm_intr_drain_rx(sc);
1643 fatm_intr_drain_tx(sc);
1644 fatm_intr_drain_small_buffers(sc);
1645 fatm_intr_drain_large_buffers(sc);
1646 fatm_supply_small_buffers(sc);
1647 fatm_supply_large_buffers(sc);
1648
1649 FATM_UNLOCK(sc);
1650
1651 if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1652 (*sc->ifp->if_start)(sc->ifp);
1653}
1654
1655/*
1656 * Get device statistics. This must be called with the softc locked.
1657 * We use a preallocated buffer, so we need to protect this buffer.
1658 * We do this by using a condition variable and a flag. If the flag is set
1659 * the buffer is in use by one thread (one thread is executing a GETSTAT
1660 * card command). In this case all other threads that are trying to get
1661 * statistics block on that condition variable. When the thread finishes
1662 * using the buffer it resets the flag and signals the condition variable. This
1663 * will wakeup the next thread that is waiting for the buffer. If the interface
1664 * is stopped the stopping function will broadcast the cv. All threads will
1665 * find that the interface has been stopped and return.
1666 *
1667 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1668 * must be done by the caller when he has finished using the buffer.
1669 */
1670static void
1671fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1672{
1673
1674 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1675 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1676 sc->istats.get_stat_errors++;
1677 q->error = EIO;
1678 }
1679 wakeup(&sc->sadi_mem);
1680}
1681static int
1682fatm_getstat(struct fatm_softc *sc)
1683{
1684 int error;
1685 struct cmdqueue *q;
1686
1687 /*
1688 * Wait until either the interface is stopped or we can get the
1689 * statistics buffer
1690 */
1691 for (;;) {
1692 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
1693 return (EIO);
1694 if (!(sc->flags & FATM_STAT_INUSE))
1695 break;
1696 cv_wait(&sc->cv_stat, &sc->mtx);
1697 }
1698 sc->flags |= FATM_STAT_INUSE;
1699
1700 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1701
1702 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1703 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1704 sc->istats.cmd_queue_full++;
1705 return (EIO);
1706 }
1707 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1708
1709 q->error = 0;
1710 q->cb = fatm_getstat_complete;
1711 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1712 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1713
1714 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1715 BUS_DMASYNC_PREREAD);
1716
1717 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1718 sc->sadi_mem.paddr);
1719 BARRIER_W(sc);
1720 WRITE4(sc, q->q.card + FATMOC_OP,
1721 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1722 BARRIER_W(sc);
1723
1724 /*
1725 * Wait for the command to complete
1726 */
1727 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1728 "fatm_stat", hz);
1729
1730 switch (error) {
1731
1732 case EWOULDBLOCK:
1733 error = EIO;
1734 break;
1735
1736 case ERESTART:
1737 error = EINTR;
1738 break;
1739
1740 case 0:
1741 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1742 BUS_DMASYNC_POSTREAD);
1743 error = q->error;
1744 break;
1745 }
1746
1747 /*
1748 * Swap statistics
1749 */
1750 if (q->error == 0) {
1751 u_int i;
1752 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1753
1754 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1755 i++, p++)
1756 *p = be32toh(*p);
1757 }
1758
1759 return (error);
1760}
1761
1762/*
1763 * Create a copy of a single mbuf. It can have either internal or
1764 * external data, it may have a packet header. External data is really
1765 * copied, so the new buffer is writeable.
1766 */
1767static struct mbuf *
1768copy_mbuf(struct mbuf *m)
1769{
1770 struct mbuf *new;
1771
1772 MGET(new, M_NOWAIT, MT_DATA);
1773 if (new == NULL)
1774 return (NULL);
1775
1776 if (m->m_flags & M_PKTHDR) {
1777 M_MOVE_PKTHDR(new, m);
1778 if (m->m_len > MHLEN)
1779 MCLGET(new, M_WAITOK);
1780 } else {
1781 if (m->m_len > MLEN)
1782 MCLGET(new, M_WAITOK);
1783 }
1784
1785 bcopy(m->m_data, new->m_data, m->m_len);
1786 new->m_len = m->m_len;
1787 new->m_flags &= ~M_RDONLY;
1788
1789 return (new);
1790}
1791
1792/*
1793 * All segments must have a four byte aligned buffer address and a four
1794 * byte aligned length. Step through an mbuf chain and check these conditions.
1795 * If the buffer address is not aligned and this is a normal mbuf, move
1796 * the data down. Else make a copy of the mbuf with aligned data.
1797 * If the buffer length is not aligned steel data from the next mbuf.
1798 * We don't need to check whether this has more than one external reference,
1799 * because steeling data doesn't change the external cluster.
1800 * If the last mbuf is not aligned, fill with zeroes.
1801 *
1802 * Return packet length (well we should have this in the packet header),
1803 * but be careful not to count the zero fill at the end.
1804 *
1805 * If fixing fails free the chain and zero the pointer.
1806 *
1807 * We assume, that aligning the virtual address also aligns the mapped bus
1808 * address.
1809 */
1810static u_int
1811fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1812{
1813 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1814 u_int mlen = 0, fill = 0;
1815 int first, off;
1816 u_char *d, *cp;
1817
1818 do {
1819 next = m->m_next;
1820
1821 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1822 (m->m_len % 4 != 0 && next)) {
1823 /*
1824 * Needs fixing
1825 */
1826 first = (m == *mp);
1827
1828 d = mtod(m, u_char *);
1829 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1830 if (M_WRITABLE(m)) {
1831 sc->istats.fix_addr_copy++;
1832 bcopy(d, d - off, m->m_len);
1833 m->m_data = (caddr_t)(d - off);
1834 } else {
1835 if ((new = copy_mbuf(m)) == NULL) {
1836 sc->istats.fix_addr_noext++;
1837 goto fail;
1838 }
1839 sc->istats.fix_addr_ext++;
1840 if (prev)
1841 prev->m_next = new;
1842 new->m_next = next;
1843 m_free(m);
1844 m = new;
1845 }
1846 }
1847
1848 if ((off = m->m_len % 4) != 0) {
1849 if (!M_WRITABLE(m)) {
1850 if ((new = copy_mbuf(m)) == NULL) {
1851 sc->istats.fix_len_noext++;
1852 goto fail;
1853 }
1854 sc->istats.fix_len_copy++;
1855 if (prev)
1856 prev->m_next = new;
1857 new->m_next = next;
1858 m_free(m);
1859 m = new;
1860 } else
1861 sc->istats.fix_len++;
1862 d = mtod(m, u_char *) + m->m_len;
1863 off = 4 - off;
1864 while (off) {
1865 if (next == NULL) {
1866 *d++ = 0;
1867 fill++;
1868 } else if (next->m_len == 0) {
1869 sc->istats.fix_empty++;
1870 next = m_free(next);
1871 continue;
1872 } else {
1873 cp = mtod(next, u_char *);
1874 *d++ = *cp++;
1875 next->m_len--;
1876 next->m_data = (caddr_t)cp;
1877 }
1878 off--;
1879 m->m_len++;
1880 }
1881 }
1882
1883 if (first)
1884 *mp = m;
1885 }
1886
1887 mlen += m->m_len;
1888 prev = m;
1889 } while ((m = next) != NULL);
1890
1891 return (mlen - fill);
1892
1893 fail:
1894 m_freem(*mp);
1895 *mp = NULL;
1896 return (0);
1897}
1898
1899/*
1900 * The helper function is used to load the computed physical addresses
1901 * into the transmit descriptor.
1902 */
1903static void
1904fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1905 bus_size_t mapsize, int error)
1906{
1907 struct tpd *tpd = varg;
1908
1909 if (error)
1910 return;
1911
1912 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1913
1914 tpd->spec = 0;
1915 while (nsegs--) {
1916 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1917 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1918 tpd->spec++;
1919 segs++;
1920 }
1921}
1922
1923/*
1924 * Start output.
1925 *
1926 * Note, that we update the internal statistics without the lock here.
1927 */
1928static int
1929fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1930{
1931 struct txqueue *q;
1932 u_int nblks;
1933 int error, aal, nsegs;
1934 struct tpd *tpd;
1935
1936 /*
1937 * Get a queue element.
1938 * If there isn't one - try to drain the transmit queue
1939 * We used to sleep here if that doesn't help, but we
1940 * should not sleep here, because we are called with locks.
1941 */
1942 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1943
1944 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1945 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1946 fatm_intr_drain_tx(sc);
1947 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1948 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1949 if (sc->retry_tx) {
1950 sc->istats.tx_retry++;
1951 IF_PREPEND(&sc->ifp->if_snd, m);
1952 return (1);
1953 }
1954 sc->istats.tx_queue_full++;
1955 m_freem(m);
1956 return (0);
1957 }
1958 sc->istats.tx_queue_almost_full++;
1959 }
1960
1961 tpd = q->q.ioblk;
1962
1963 m->m_data += sizeof(struct atm_pseudohdr);
1964 m->m_len -= sizeof(struct atm_pseudohdr);
1965
1966#ifdef ENABLE_BPF
1967 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1968 vc->param.aal == ATMIO_AAL_5 &&
1969 (vc->param.flags & ATM_PH_LLCSNAP))
1970 BPF_MTAP(sc->ifp, m);
1971#endif
1972
1973 /* map the mbuf */
1974 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1975 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1976 if(error) {
1977 sc->ifp->if_oerrors++;
1977 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1978 if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1979 m_freem(m);
1980 return (0);
1981 }
1982 nsegs = tpd->spec;
1983
1984 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1985
1986 /*
1987 * OK. Now go and do it.
1988 */
1989 aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1990
1991 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1992 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1993 q->m = m;
1994
1995 /*
1996 * If the transmit queue is almost full, schedule a
1997 * transmit interrupt so that transmit descriptors can
1998 * be recycled.
1999 */
2000 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
2001 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
2002 H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
2003 vc->param.vci, 0, 0));
2004
2005 if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2006 H_SETDESC(tpd->stream, 0);
2007 else {
2008 u_int i;
2009
2010 for (i = 0; i < RATE_TABLE_SIZE; i++)
2011 if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2012 break;
2013 if (i > 0)
2014 i--;
2015 H_SETDESC(tpd->stream, rate_table[i].ratio);
2016 }
2017 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2018
2019 nblks = TDX_SEGS2BLKS(nsegs);
2020
2021 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2022 mlen, le32toh(tpd->spec), nsegs, nblks));
2023
2024 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2025 BARRIER_W(sc);
2026
2027 sc->txcnt++;
1978 if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1979 m_freem(m);
1980 return (0);
1981 }
1982 nsegs = tpd->spec;
1983
1984 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1985
1986 /*
1987 * OK. Now go and do it.
1988 */
1989 aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1990
1991 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1992 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1993 q->m = m;
1994
1995 /*
1996 * If the transmit queue is almost full, schedule a
1997 * transmit interrupt so that transmit descriptors can
1998 * be recycled.
1999 */
2000 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
2001 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
2002 H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
2003 vc->param.vci, 0, 0));
2004
2005 if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2006 H_SETDESC(tpd->stream, 0);
2007 else {
2008 u_int i;
2009
2010 for (i = 0; i < RATE_TABLE_SIZE; i++)
2011 if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2012 break;
2013 if (i > 0)
2014 i--;
2015 H_SETDESC(tpd->stream, rate_table[i].ratio);
2016 }
2017 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2018
2019 nblks = TDX_SEGS2BLKS(nsegs);
2020
2021 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2022 mlen, le32toh(tpd->spec), nsegs, nblks));
2023
2024 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2025 BARRIER_W(sc);
2026
2027 sc->txcnt++;
2028 sc->ifp->if_opackets++;
2028 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
2029 vc->obytes += m->m_pkthdr.len;
2030 vc->opackets++;
2031
2032 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2033
2034 return (0);
2035}
2036
2037static void
2038fatm_start(struct ifnet *ifp)
2039{
2040 struct atm_pseudohdr aph;
2041 struct fatm_softc *sc;
2042 struct mbuf *m;
2043 u_int mlen, vpi, vci;
2044 struct card_vcc *vc;
2045
2046 sc = ifp->if_softc;
2047
2048 while (1) {
2049 IF_DEQUEUE(&ifp->if_snd, m);
2050 if (m == NULL)
2051 break;
2052
2053 /*
2054 * Loop through the mbuf chain and compute the total length
2055 * of the packet. Check that all data pointer are
2056 * 4 byte aligned. If they are not, call fatm_mfix to
2057 * fix that problem. This comes more or less from the
2058 * en driver.
2059 */
2060 mlen = fatm_fix_chain(sc, &m);
2061 if (m == NULL)
2062 continue;
2063
2064 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2065 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2066 continue;
2067
2068 aph = *mtod(m, struct atm_pseudohdr *);
2069 mlen -= sizeof(struct atm_pseudohdr);
2070
2071 if (mlen == 0) {
2072 m_freem(m);
2073 continue;
2074 }
2075 if (mlen > FATM_MAXPDU) {
2076 sc->istats.tx_pdu2big++;
2077 m_freem(m);
2078 continue;
2079 }
2080
2081 vci = ATM_PH_VCI(&aph);
2082 vpi = ATM_PH_VPI(&aph);
2083
2084 /*
2085 * From here on we need the softc
2086 */
2087 FATM_LOCK(sc);
2088 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2089 FATM_UNLOCK(sc);
2090 m_freem(m);
2091 break;
2092 }
2093 if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2094 !(vc->vflags & FATM_VCC_OPEN)) {
2095 FATM_UNLOCK(sc);
2096 m_freem(m);
2097 continue;
2098 }
2099 if (fatm_tx(sc, m, vc, mlen)) {
2100 FATM_UNLOCK(sc);
2101 break;
2102 }
2103 FATM_UNLOCK(sc);
2104 }
2105}
2106
2107/*
2108 * VCC managment
2109 *
2110 * This may seem complicated. The reason for this is, that we need an
2111 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2112 * is called with the radix node head of the routing table locked. Therefor
2113 * we cannot sleep there and wait for the open/close to succeed. For this
2114 * reason we just initiate the operation from the ioctl.
2115 */
2116
2117/*
2118 * Command the card to open/close a VC.
2119 * Return the queue entry for waiting if we are succesful.
2120 */
2121static struct cmdqueue *
2122fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2123 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2124{
2125 struct cmdqueue *q;
2126
2127 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2128
2129 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2130 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2131 sc->istats.cmd_queue_full++;
2132 return (NULL);
2133 }
2134 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2135
2136 q->error = 0;
2137 q->cb = func;
2138 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2139 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2140
2141 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2142 BARRIER_W(sc);
2143 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2144 BARRIER_W(sc);
2145 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2146 BARRIER_W(sc);
2147
2148 return (q);
2149}
2150
2151/*
2152 * The VC has been opened/closed and somebody has been waiting for this.
2153 * Wake him up.
2154 */
2155static void
2156fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2157{
2158
2159 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2160 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2161 sc->istats.get_stat_errors++;
2162 q->error = EIO;
2163 }
2164 wakeup(q);
2165}
2166
2167/*
2168 * Open complete
2169 */
2170static void
2171fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2172{
2173 vc->vflags &= ~FATM_VCC_TRY_OPEN;
2174 vc->vflags |= FATM_VCC_OPEN;
2175
2176 if (vc->vflags & FATM_VCC_REOPEN) {
2177 vc->vflags &= ~FATM_VCC_REOPEN;
2178 return;
2179 }
2180
2181 /* inform management if this is not an NG
2182 * VCC or it's an NG PVC. */
2183 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2184 (vc->param.flags & ATMIO_FLAG_PVC))
2185 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2186}
2187
2188/*
2189 * The VC that we have tried to open asynchronuosly has been opened.
2190 */
2191static void
2192fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2193{
2194 u_int vci;
2195 struct card_vcc *vc;
2196
2197 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2198 vc = sc->vccs[vci];
2199 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2200 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2201 sc->istats.get_stat_errors++;
2202 sc->vccs[vci] = NULL;
2203 uma_zfree(sc->vcc_zone, vc);
2204 if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2205 return;
2206 }
2207 fatm_open_finish(sc, vc);
2208}
2209
2210/*
2211 * Wait on the queue entry until the VCC is opened/closed.
2212 */
2213static int
2214fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2215{
2216 int error;
2217
2218 /*
2219 * Wait for the command to complete
2220 */
2221 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2222
2223 if (error != 0)
2224 return (error);
2225 return (q->error);
2226}
2227
2228/*
2229 * Start to open a VCC. This just initiates the operation.
2230 */
2231static int
2232fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2233{
2234 int error;
2235 struct card_vcc *vc;
2236
2237 /*
2238 * Check parameters
2239 */
2240 if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2241 (op->param.flags & ATMIO_FLAG_NORX))
2242 return (EINVAL);
2243
2244 if (!VC_OK(sc, op->param.vpi, op->param.vci))
2245 return (EINVAL);
2246 if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2247 return (EINVAL);
2248
2249 vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2250 if (vc == NULL)
2251 return (ENOMEM);
2252
2253 error = 0;
2254
2255 FATM_LOCK(sc);
2256 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2257 error = EIO;
2258 goto done;
2259 }
2260 if (sc->vccs[op->param.vci] != NULL) {
2261 error = EBUSY;
2262 goto done;
2263 }
2264 vc->param = op->param;
2265 vc->rxhand = op->rxhand;
2266
2267 switch (op->param.traffic) {
2268
2269 case ATMIO_TRAFFIC_UBR:
2270 break;
2271
2272 case ATMIO_TRAFFIC_CBR:
2273 if (op->param.tparam.pcr == 0 ||
2274 op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2275 error = EINVAL;
2276 goto done;
2277 }
2278 break;
2279
2280 default:
2281 error = EINVAL;
2282 goto done;
2283 }
2284 vc->ibytes = vc->obytes = 0;
2285 vc->ipackets = vc->opackets = 0;
2286
2287 vc->vflags = FATM_VCC_TRY_OPEN;
2288 sc->vccs[op->param.vci] = vc;
2289 sc->open_vccs++;
2290
2291 error = fatm_load_vc(sc, vc);
2292 if (error != 0) {
2293 sc->vccs[op->param.vci] = NULL;
2294 sc->open_vccs--;
2295 goto done;
2296 }
2297
2298 /* don't free below */
2299 vc = NULL;
2300
2301 done:
2302 FATM_UNLOCK(sc);
2303 if (vc != NULL)
2304 uma_zfree(sc->vcc_zone, vc);
2305 return (error);
2306}
2307
2308/*
2309 * Try to initialize the given VC
2310 */
2311static int
2312fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2313{
2314 uint32_t cmd;
2315 struct cmdqueue *q;
2316 int error;
2317
2318 /* Command and buffer strategy */
2319 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2320 if (vc->param.aal == ATMIO_AAL_0)
2321 cmd |= (0 << 8);
2322 else
2323 cmd |= (5 << 8);
2324
2325 q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2326 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2327 fatm_open_complete : fatm_cmd_complete);
2328 if (q == NULL)
2329 return (EIO);
2330
2331 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2332 error = fatm_waitvcc(sc, q);
2333 if (error != 0)
2334 return (error);
2335 fatm_open_finish(sc, vc);
2336 }
2337 return (0);
2338}
2339
2340/*
2341 * Finish close
2342 */
2343static void
2344fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2345{
2346 /* inform management of this is not an NG
2347 * VCC or it's an NG PVC. */
2348 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2349 (vc->param.flags & ATMIO_FLAG_PVC))
2350 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2351
2352 sc->vccs[vc->param.vci] = NULL;
2353 sc->open_vccs--;
2354
2355 uma_zfree(sc->vcc_zone, vc);
2356}
2357
2358/*
2359 * The VC has been closed.
2360 */
2361static void
2362fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2363{
2364 u_int vci;
2365 struct card_vcc *vc;
2366
2367 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2368 vc = sc->vccs[vci];
2369 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2370 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2371 sc->istats.get_stat_errors++;
2372 /* keep the VCC in that state */
2373 if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2374 return;
2375 }
2376
2377 fatm_close_finish(sc, vc);
2378}
2379
2380/*
2381 * Initiate closing a VCC
2382 */
2383static int
2384fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2385{
2386 int error;
2387 struct cmdqueue *q;
2388 struct card_vcc *vc;
2389
2390 if (!VC_OK(sc, cl->vpi, cl->vci))
2391 return (EINVAL);
2392
2393 error = 0;
2394
2395 FATM_LOCK(sc);
2396 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2397 error = EIO;
2398 goto done;
2399 }
2400 vc = sc->vccs[cl->vci];
2401 if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2402 error = ENOENT;
2403 goto done;
2404 }
2405
2406 q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2407 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2408 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2409 fatm_close_complete : fatm_cmd_complete);
2410 if (q == NULL) {
2411 error = EIO;
2412 goto done;
2413 }
2414
2415 vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2416 vc->vflags |= FATM_VCC_TRY_CLOSE;
2417
2418 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2419 error = fatm_waitvcc(sc, q);
2420 if (error != 0)
2421 goto done;
2422
2423 fatm_close_finish(sc, vc);
2424 }
2425
2426 done:
2427 FATM_UNLOCK(sc);
2428 return (error);
2429}
2430
2431/*
2432 * IOCTL handler
2433 */
2434static int
2435fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2436{
2437 int error;
2438 struct fatm_softc *sc = ifp->if_softc;
2439 struct ifaddr *ifa = (struct ifaddr *)arg;
2440 struct ifreq *ifr = (struct ifreq *)arg;
2441 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2442 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2443 struct atmio_vcctable *vtab;
2444
2445 error = 0;
2446 switch (cmd) {
2447
2448 case SIOCATMOPENVCC: /* kernel internal use */
2449 error = fatm_open_vcc(sc, op);
2450 break;
2451
2452 case SIOCATMCLOSEVCC: /* kernel internal use */
2453 error = fatm_close_vcc(sc, cl);
2454 break;
2455
2456 case SIOCSIFADDR:
2457 FATM_LOCK(sc);
2458 ifp->if_flags |= IFF_UP;
2459 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2460 fatm_init_locked(sc);
2461 switch (ifa->ifa_addr->sa_family) {
2462#ifdef INET
2463 case AF_INET:
2464 case AF_INET6:
2465 ifa->ifa_rtrequest = atm_rtrequest;
2466 break;
2467#endif
2468 default:
2469 break;
2470 }
2471 FATM_UNLOCK(sc);
2472 break;
2473
2474 case SIOCSIFFLAGS:
2475 FATM_LOCK(sc);
2476 if (ifp->if_flags & IFF_UP) {
2477 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2478 fatm_init_locked(sc);
2479 }
2480 } else {
2481 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2482 fatm_stop(sc);
2483 }
2484 }
2485 FATM_UNLOCK(sc);
2486 break;
2487
2488 case SIOCGIFMEDIA:
2489 case SIOCSIFMEDIA:
2490 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2491 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2492 else
2493 error = EINVAL;
2494 break;
2495
2496 case SIOCATMGVCCS:
2497 /* return vcc table */
2498 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2499 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2500 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2501 vtab->count * sizeof(vtab->vccs[0]));
2502 free(vtab, M_DEVBUF);
2503 break;
2504
2505 case SIOCATMGETVCCS: /* internal netgraph use */
2506 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2507 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2508 if (vtab == NULL) {
2509 error = ENOMEM;
2510 break;
2511 }
2512 *(void **)arg = vtab;
2513 break;
2514
2515 default:
2516 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2517 error = EINVAL;
2518 break;
2519 }
2520
2521 return (error);
2522}
2523
2524/*
2525 * Detach from the interface and free all resources allocated during
2526 * initialisation and later.
2527 */
2528static int
2529fatm_detach(device_t dev)
2530{
2531 u_int i;
2532 struct rbuf *rb;
2533 struct fatm_softc *sc;
2534 struct txqueue *tx;
2535
2536 sc = device_get_softc(dev);
2537
2538 if (device_is_alive(dev)) {
2539 FATM_LOCK(sc);
2540 fatm_stop(sc);
2541 utopia_detach(&sc->utopia);
2542 FATM_UNLOCK(sc);
2543 atm_ifdetach(sc->ifp); /* XXX race */
2544 }
2545 callout_drain(&sc->watchdog_timer);
2546
2547 if (sc->ih != NULL)
2548 bus_teardown_intr(dev, sc->irqres, sc->ih);
2549
2550 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2551 if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2552 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2553 m_freem(rb->m);
2554 LIST_REMOVE(rb, link);
2555 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2556 }
2557
2558 if (sc->txqueue.chunk != NULL) {
2559 for (i = 0; i < FATM_TX_QLEN; i++) {
2560 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2561 bus_dmamap_destroy(sc->tx_tag, tx->map);
2562 }
2563 }
2564
2565 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2566 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2567 LIST_REMOVE(rb, link);
2568 }
2569
2570 if (sc->rbufs != NULL)
2571 free(sc->rbufs, M_DEVBUF);
2572 if (sc->vccs != NULL) {
2573 for (i = 0; i < FORE_MAX_VCC + 1; i++)
2574 if (sc->vccs[i] != NULL) {
2575 uma_zfree(sc->vcc_zone, sc->vccs[i]);
2576 sc->vccs[i] = NULL;
2577 }
2578 free(sc->vccs, M_DEVBUF);
2579 }
2580 if (sc->vcc_zone != NULL)
2581 uma_zdestroy(sc->vcc_zone);
2582
2583 if (sc->l1queue.chunk != NULL)
2584 free(sc->l1queue.chunk, M_DEVBUF);
2585 if (sc->s1queue.chunk != NULL)
2586 free(sc->s1queue.chunk, M_DEVBUF);
2587 if (sc->rxqueue.chunk != NULL)
2588 free(sc->rxqueue.chunk, M_DEVBUF);
2589 if (sc->txqueue.chunk != NULL)
2590 free(sc->txqueue.chunk, M_DEVBUF);
2591 if (sc->cmdqueue.chunk != NULL)
2592 free(sc->cmdqueue.chunk, M_DEVBUF);
2593
2594 destroy_dma_memory(&sc->reg_mem);
2595 destroy_dma_memory(&sc->sadi_mem);
2596 destroy_dma_memory(&sc->prom_mem);
2597#ifdef TEST_DMA_SYNC
2598 destroy_dma_memoryX(&sc->s1q_mem);
2599 destroy_dma_memoryX(&sc->l1q_mem);
2600 destroy_dma_memoryX(&sc->rxq_mem);
2601 destroy_dma_memoryX(&sc->txq_mem);
2602 destroy_dma_memoryX(&sc->stat_mem);
2603#endif
2604
2605 if (sc->tx_tag != NULL)
2606 if (bus_dma_tag_destroy(sc->tx_tag))
2607 printf("tx DMA tag busy!\n");
2608
2609 if (sc->rbuf_tag != NULL)
2610 if (bus_dma_tag_destroy(sc->rbuf_tag))
2611 printf("rbuf DMA tag busy!\n");
2612
2613 if (sc->parent_dmat != NULL)
2614 if (bus_dma_tag_destroy(sc->parent_dmat))
2615 printf("parent DMA tag busy!\n");
2616
2617 if (sc->irqres != NULL)
2618 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2619
2620 if (sc->memres != NULL)
2621 bus_release_resource(dev, SYS_RES_MEMORY,
2622 sc->memid, sc->memres);
2623
2624 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2625
2626 cv_destroy(&sc->cv_stat);
2627 cv_destroy(&sc->cv_regs);
2628
2629 mtx_destroy(&sc->mtx);
2630
2631 if_free(sc->ifp);
2632
2633 return (0);
2634}
2635
2636/*
2637 * Sysctl handler
2638 */
2639static int
2640fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2641{
2642 struct fatm_softc *sc = arg1;
2643 u_long *ret;
2644 int error;
2645
2646 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2647
2648 FATM_LOCK(sc);
2649 bcopy(&sc->istats, ret, sizeof(sc->istats));
2650 FATM_UNLOCK(sc);
2651
2652 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2653 free(ret, M_TEMP);
2654
2655 return (error);
2656}
2657
2658/*
2659 * Sysctl handler for card statistics
2660 * This is disable because it destroys the PHY statistics.
2661 */
2662static int
2663fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2664{
2665 struct fatm_softc *sc = arg1;
2666 int error;
2667 const struct fatm_stats *s;
2668 u_long *ret;
2669 u_int i;
2670
2671 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2672
2673 FATM_LOCK(sc);
2674
2675 if ((error = fatm_getstat(sc)) == 0) {
2676 s = sc->sadi_mem.mem;
2677 i = 0;
2678 ret[i++] = s->phy_4b5b.crc_header_errors;
2679 ret[i++] = s->phy_4b5b.framing_errors;
2680 ret[i++] = s->phy_oc3.section_bip8_errors;
2681 ret[i++] = s->phy_oc3.path_bip8_errors;
2682 ret[i++] = s->phy_oc3.line_bip24_errors;
2683 ret[i++] = s->phy_oc3.line_febe_errors;
2684 ret[i++] = s->phy_oc3.path_febe_errors;
2685 ret[i++] = s->phy_oc3.corr_hcs_errors;
2686 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2687 ret[i++] = s->atm.cells_transmitted;
2688 ret[i++] = s->atm.cells_received;
2689 ret[i++] = s->atm.vpi_bad_range;
2690 ret[i++] = s->atm.vpi_no_conn;
2691 ret[i++] = s->atm.vci_bad_range;
2692 ret[i++] = s->atm.vci_no_conn;
2693 ret[i++] = s->aal0.cells_transmitted;
2694 ret[i++] = s->aal0.cells_received;
2695 ret[i++] = s->aal0.cells_dropped;
2696 ret[i++] = s->aal4.cells_transmitted;
2697 ret[i++] = s->aal4.cells_received;
2698 ret[i++] = s->aal4.cells_crc_errors;
2699 ret[i++] = s->aal4.cels_protocol_errors;
2700 ret[i++] = s->aal4.cells_dropped;
2701 ret[i++] = s->aal4.cspdus_transmitted;
2702 ret[i++] = s->aal4.cspdus_received;
2703 ret[i++] = s->aal4.cspdus_protocol_errors;
2704 ret[i++] = s->aal4.cspdus_dropped;
2705 ret[i++] = s->aal5.cells_transmitted;
2706 ret[i++] = s->aal5.cells_received;
2707 ret[i++] = s->aal5.congestion_experienced;
2708 ret[i++] = s->aal5.cells_dropped;
2709 ret[i++] = s->aal5.cspdus_transmitted;
2710 ret[i++] = s->aal5.cspdus_received;
2711 ret[i++] = s->aal5.cspdus_crc_errors;
2712 ret[i++] = s->aal5.cspdus_protocol_errors;
2713 ret[i++] = s->aal5.cspdus_dropped;
2714 ret[i++] = s->aux.small_b1_failed;
2715 ret[i++] = s->aux.large_b1_failed;
2716 ret[i++] = s->aux.small_b2_failed;
2717 ret[i++] = s->aux.large_b2_failed;
2718 ret[i++] = s->aux.rpd_alloc_failed;
2719 ret[i++] = s->aux.receive_carrier;
2720 }
2721 /* declare the buffer free */
2722 sc->flags &= ~FATM_STAT_INUSE;
2723 cv_signal(&sc->cv_stat);
2724
2725 FATM_UNLOCK(sc);
2726
2727 if (error == 0)
2728 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2729 free(ret, M_TEMP);
2730
2731 return (error);
2732}
2733
2734#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2735
2736/*
2737 * Attach to the device.
2738 *
2739 * We assume, that there is a global lock (Giant in this case) that protects
2740 * multiple threads from entering this function. This makes sense, doesn't it?
2741 */
2742static int
2743fatm_attach(device_t dev)
2744{
2745 struct ifnet *ifp;
2746 struct fatm_softc *sc;
2747 int unit;
2748 uint16_t cfg;
2749 int error = 0;
2750 struct rbuf *rb;
2751 u_int i;
2752 struct txqueue *tx;
2753
2754 sc = device_get_softc(dev);
2755 unit = device_get_unit(dev);
2756
2757 ifp = sc->ifp = if_alloc(IFT_ATM);
2758 if (ifp == NULL) {
2759 error = ENOSPC;
2760 goto fail;
2761 }
2762
2763 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2764 IFP2IFATM(sc->ifp)->mib.serial = 0;
2765 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2766 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2767 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2768 IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2769 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2770 IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2771 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2772 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2773
2774 LIST_INIT(&sc->rbuf_free);
2775 LIST_INIT(&sc->rbuf_used);
2776
2777 /*
2778 * Initialize mutex and condition variables.
2779 */
2780 mtx_init(&sc->mtx, device_get_nameunit(dev),
2781 MTX_NETWORK_LOCK, MTX_DEF);
2782
2783 cv_init(&sc->cv_stat, "fatm_stat");
2784 cv_init(&sc->cv_regs, "fatm_regs");
2785
2786 sysctl_ctx_init(&sc->sysctl_ctx);
2787 callout_init_mtx(&sc->watchdog_timer, &sc->mtx, 0);
2788
2789 /*
2790 * Make the sysctl tree
2791 */
2792 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2793 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2794 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2795 goto fail;
2796
2797 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2798 OID_AUTO, "istats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0,
2799 fatm_sysctl_istats, "LU", "internal statistics") == NULL)
2800 goto fail;
2801
2802 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2803 OID_AUTO, "stats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0,
2804 fatm_sysctl_stats, "LU", "card statistics") == NULL)
2805 goto fail;
2806
2807 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2808 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2809 "retry flag") == NULL)
2810 goto fail;
2811
2812#ifdef FATM_DEBUG
2813 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2814 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2815 == NULL)
2816 goto fail;
2817 sc->debug = FATM_DEBUG;
2818#endif
2819
2820 /*
2821 * Network subsystem stuff
2822 */
2823 ifp->if_softc = sc;
2824 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2825 ifp->if_flags = IFF_SIMPLEX;
2826 ifp->if_ioctl = fatm_ioctl;
2827 ifp->if_start = fatm_start;
2828 ifp->if_init = fatm_init;
2829 ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2830 ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2831
2832 /*
2833 * Enable busmaster
2834 */
2835 pci_enable_busmaster(dev);
2836
2837 /*
2838 * Map memory
2839 */
2840 sc->memid = 0x10;
2841 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2842 RF_ACTIVE);
2843 if (sc->memres == NULL) {
2844 if_printf(ifp, "could not map memory\n");
2845 error = ENXIO;
2846 goto fail;
2847 }
2848 sc->memh = rman_get_bushandle(sc->memres);
2849 sc->memt = rman_get_bustag(sc->memres);
2850
2851 /*
2852 * Convert endianess of slave access
2853 */
2854 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2855 cfg |= FATM_PCIM_SWAB;
2856 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2857
2858 /*
2859 * Allocate interrupt (activate at the end)
2860 */
2861 sc->irqid = 0;
2862 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2863 RF_SHAREABLE | RF_ACTIVE);
2864 if (sc->irqres == NULL) {
2865 if_printf(ifp, "could not allocate irq\n");
2866 error = ENXIO;
2867 goto fail;
2868 }
2869
2870 /*
2871 * Allocate the parent DMA tag. This is used simply to hold overall
2872 * restrictions for the controller (and PCI bus) and is never used
2873 * to do anything.
2874 */
2875 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
2876 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2877 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2878 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2879 &sc->parent_dmat)) {
2880 if_printf(ifp, "could not allocate parent DMA tag\n");
2881 error = ENOMEM;
2882 goto fail;
2883 }
2884
2885 /*
2886 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2887 * a mbuf cluster.
2888 */
2889 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2890 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2891 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2892 NULL, NULL, &sc->rbuf_tag)) {
2893 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2894 error = ENOMEM;
2895 goto fail;
2896 }
2897
2898 /*
2899 * Allocate the transmission DMA tag. Must add 1, because
2900 * rounded up PDU will be 65536 bytes long.
2901 */
2902 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2903 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2904 NULL, NULL,
2905 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2906 NULL, NULL, &sc->tx_tag)) {
2907 if_printf(ifp, "could not allocate tx DMA tag\n");
2908 error = ENOMEM;
2909 goto fail;
2910 }
2911
2912 /*
2913 * Allocate DMAable memory.
2914 */
2915 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2916 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2917 sc->stat_mem.align = 4;
2918
2919 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2920 sc->txq_mem.align = 32;
2921
2922 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2923 sc->rxq_mem.align = 32;
2924
2925 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2926 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2927 sc->s1q_mem.align = 32;
2928
2929 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2930 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2931 sc->l1q_mem.align = 32;
2932
2933#ifdef TEST_DMA_SYNC
2934 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2935 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2936 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2937 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2938 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2939 goto fail;
2940#else
2941 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2942 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2943 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2944 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2945 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2946 goto fail;
2947#endif
2948
2949 sc->prom_mem.size = sizeof(struct prom);
2950 sc->prom_mem.align = 32;
2951 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2952 goto fail;
2953
2954 sc->sadi_mem.size = sizeof(struct fatm_stats);
2955 sc->sadi_mem.align = 32;
2956 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2957 goto fail;
2958
2959 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2960 sc->reg_mem.align = 32;
2961 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2962 goto fail;
2963
2964 /*
2965 * Allocate queues
2966 */
2967 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2968 M_DEVBUF, M_ZERO | M_WAITOK);
2969 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2970 M_DEVBUF, M_ZERO | M_WAITOK);
2971 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2972 M_DEVBUF, M_ZERO | M_WAITOK);
2973 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2974 M_DEVBUF, M_ZERO | M_WAITOK);
2975 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2976 M_DEVBUF, M_ZERO | M_WAITOK);
2977
2978 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2979 M_DEVBUF, M_ZERO | M_WAITOK);
2980 sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2981 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2982 if (sc->vcc_zone == NULL) {
2983 error = ENOMEM;
2984 goto fail;
2985 }
2986
2987 /*
2988 * Allocate memory for the receive buffer headers. The total number
2989 * of headers should probably also include the maximum number of
2990 * buffers on the receive queue.
2991 */
2992 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2993 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2994 M_DEVBUF, M_ZERO | M_WAITOK);
2995
2996 /*
2997 * Put all rbuf headers on the free list and create DMA maps.
2998 */
2999 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
3000 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3001 if_printf(sc->ifp, "creating rx map: %d\n",
3002 error);
3003 goto fail;
3004 }
3005 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3006 }
3007
3008 /*
3009 * Create dma maps for transmission. In case of an error, free the
3010 * allocated DMA maps, because on some architectures maps are NULL
3011 * and we cannot distinguish between a failure and a NULL map in
3012 * the detach routine.
3013 */
3014 for (i = 0; i < FATM_TX_QLEN; i++) {
3015 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3016 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3017 if_printf(sc->ifp, "creating tx map: %d\n",
3018 error);
3019 while (i > 0) {
3020 tx = GET_QUEUE(sc->txqueue, struct txqueue,
3021 i - 1);
3022 bus_dmamap_destroy(sc->tx_tag, tx->map);
3023 i--;
3024 }
3025 goto fail;
3026 }
3027 }
3028
3029 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3030 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3031 &fatm_utopia_methods);
3032 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3033
3034 /*
3035 * Attach the interface
3036 */
3037 atm_ifattach(ifp);
3038 ifp->if_snd.ifq_maxlen = 512;
3039
3040#ifdef ENABLE_BPF
3041 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3042#endif
3043
3044 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET | INTR_MPSAFE,
3045 NULL, fatm_intr, sc, &sc->ih);
3046 if (error) {
3047 if_printf(ifp, "couldn't setup irq\n");
3048 goto fail;
3049 }
3050
3051 fail:
3052 if (error)
3053 fatm_detach(dev);
3054
3055 return (error);
3056}
3057
3058#if defined(FATM_DEBUG) && 0
3059static void
3060dump_s1_queue(struct fatm_softc *sc)
3061{
3062 int i;
3063 struct supqueue *q;
3064
3065 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3066 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3067 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3068 q->q.card,
3069 READ4(sc, q->q.card),
3070 READ4(sc, q->q.card + 4),
3071 *q->q.statp);
3072 }
3073}
3074#endif
3075
3076/*
3077 * Driver infrastructure.
3078 */
3079static device_method_t fatm_methods[] = {
3080 DEVMETHOD(device_probe, fatm_probe),
3081 DEVMETHOD(device_attach, fatm_attach),
3082 DEVMETHOD(device_detach, fatm_detach),
3083 { 0, 0 }
3084};
3085static driver_t fatm_driver = {
3086 "fatm",
3087 fatm_methods,
3088 sizeof(struct fatm_softc),
3089};
3090
3091DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
2029 vc->obytes += m->m_pkthdr.len;
2030 vc->opackets++;
2031
2032 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2033
2034 return (0);
2035}
2036
2037static void
2038fatm_start(struct ifnet *ifp)
2039{
2040 struct atm_pseudohdr aph;
2041 struct fatm_softc *sc;
2042 struct mbuf *m;
2043 u_int mlen, vpi, vci;
2044 struct card_vcc *vc;
2045
2046 sc = ifp->if_softc;
2047
2048 while (1) {
2049 IF_DEQUEUE(&ifp->if_snd, m);
2050 if (m == NULL)
2051 break;
2052
2053 /*
2054 * Loop through the mbuf chain and compute the total length
2055 * of the packet. Check that all data pointer are
2056 * 4 byte aligned. If they are not, call fatm_mfix to
2057 * fix that problem. This comes more or less from the
2058 * en driver.
2059 */
2060 mlen = fatm_fix_chain(sc, &m);
2061 if (m == NULL)
2062 continue;
2063
2064 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2065 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2066 continue;
2067
2068 aph = *mtod(m, struct atm_pseudohdr *);
2069 mlen -= sizeof(struct atm_pseudohdr);
2070
2071 if (mlen == 0) {
2072 m_freem(m);
2073 continue;
2074 }
2075 if (mlen > FATM_MAXPDU) {
2076 sc->istats.tx_pdu2big++;
2077 m_freem(m);
2078 continue;
2079 }
2080
2081 vci = ATM_PH_VCI(&aph);
2082 vpi = ATM_PH_VPI(&aph);
2083
2084 /*
2085 * From here on we need the softc
2086 */
2087 FATM_LOCK(sc);
2088 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2089 FATM_UNLOCK(sc);
2090 m_freem(m);
2091 break;
2092 }
2093 if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2094 !(vc->vflags & FATM_VCC_OPEN)) {
2095 FATM_UNLOCK(sc);
2096 m_freem(m);
2097 continue;
2098 }
2099 if (fatm_tx(sc, m, vc, mlen)) {
2100 FATM_UNLOCK(sc);
2101 break;
2102 }
2103 FATM_UNLOCK(sc);
2104 }
2105}
2106
2107/*
2108 * VCC managment
2109 *
2110 * This may seem complicated. The reason for this is, that we need an
2111 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2112 * is called with the radix node head of the routing table locked. Therefor
2113 * we cannot sleep there and wait for the open/close to succeed. For this
2114 * reason we just initiate the operation from the ioctl.
2115 */
2116
2117/*
2118 * Command the card to open/close a VC.
2119 * Return the queue entry for waiting if we are succesful.
2120 */
2121static struct cmdqueue *
2122fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2123 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2124{
2125 struct cmdqueue *q;
2126
2127 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2128
2129 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2130 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2131 sc->istats.cmd_queue_full++;
2132 return (NULL);
2133 }
2134 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2135
2136 q->error = 0;
2137 q->cb = func;
2138 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2139 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2140
2141 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2142 BARRIER_W(sc);
2143 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2144 BARRIER_W(sc);
2145 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2146 BARRIER_W(sc);
2147
2148 return (q);
2149}
2150
2151/*
2152 * The VC has been opened/closed and somebody has been waiting for this.
2153 * Wake him up.
2154 */
2155static void
2156fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2157{
2158
2159 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2160 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2161 sc->istats.get_stat_errors++;
2162 q->error = EIO;
2163 }
2164 wakeup(q);
2165}
2166
2167/*
2168 * Open complete
2169 */
2170static void
2171fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2172{
2173 vc->vflags &= ~FATM_VCC_TRY_OPEN;
2174 vc->vflags |= FATM_VCC_OPEN;
2175
2176 if (vc->vflags & FATM_VCC_REOPEN) {
2177 vc->vflags &= ~FATM_VCC_REOPEN;
2178 return;
2179 }
2180
2181 /* inform management if this is not an NG
2182 * VCC or it's an NG PVC. */
2183 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2184 (vc->param.flags & ATMIO_FLAG_PVC))
2185 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2186}
2187
2188/*
2189 * The VC that we have tried to open asynchronuosly has been opened.
2190 */
2191static void
2192fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2193{
2194 u_int vci;
2195 struct card_vcc *vc;
2196
2197 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2198 vc = sc->vccs[vci];
2199 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2200 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2201 sc->istats.get_stat_errors++;
2202 sc->vccs[vci] = NULL;
2203 uma_zfree(sc->vcc_zone, vc);
2204 if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2205 return;
2206 }
2207 fatm_open_finish(sc, vc);
2208}
2209
2210/*
2211 * Wait on the queue entry until the VCC is opened/closed.
2212 */
2213static int
2214fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2215{
2216 int error;
2217
2218 /*
2219 * Wait for the command to complete
2220 */
2221 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2222
2223 if (error != 0)
2224 return (error);
2225 return (q->error);
2226}
2227
2228/*
2229 * Start to open a VCC. This just initiates the operation.
2230 */
2231static int
2232fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2233{
2234 int error;
2235 struct card_vcc *vc;
2236
2237 /*
2238 * Check parameters
2239 */
2240 if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2241 (op->param.flags & ATMIO_FLAG_NORX))
2242 return (EINVAL);
2243
2244 if (!VC_OK(sc, op->param.vpi, op->param.vci))
2245 return (EINVAL);
2246 if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2247 return (EINVAL);
2248
2249 vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2250 if (vc == NULL)
2251 return (ENOMEM);
2252
2253 error = 0;
2254
2255 FATM_LOCK(sc);
2256 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2257 error = EIO;
2258 goto done;
2259 }
2260 if (sc->vccs[op->param.vci] != NULL) {
2261 error = EBUSY;
2262 goto done;
2263 }
2264 vc->param = op->param;
2265 vc->rxhand = op->rxhand;
2266
2267 switch (op->param.traffic) {
2268
2269 case ATMIO_TRAFFIC_UBR:
2270 break;
2271
2272 case ATMIO_TRAFFIC_CBR:
2273 if (op->param.tparam.pcr == 0 ||
2274 op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2275 error = EINVAL;
2276 goto done;
2277 }
2278 break;
2279
2280 default:
2281 error = EINVAL;
2282 goto done;
2283 }
2284 vc->ibytes = vc->obytes = 0;
2285 vc->ipackets = vc->opackets = 0;
2286
2287 vc->vflags = FATM_VCC_TRY_OPEN;
2288 sc->vccs[op->param.vci] = vc;
2289 sc->open_vccs++;
2290
2291 error = fatm_load_vc(sc, vc);
2292 if (error != 0) {
2293 sc->vccs[op->param.vci] = NULL;
2294 sc->open_vccs--;
2295 goto done;
2296 }
2297
2298 /* don't free below */
2299 vc = NULL;
2300
2301 done:
2302 FATM_UNLOCK(sc);
2303 if (vc != NULL)
2304 uma_zfree(sc->vcc_zone, vc);
2305 return (error);
2306}
2307
2308/*
2309 * Try to initialize the given VC
2310 */
2311static int
2312fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2313{
2314 uint32_t cmd;
2315 struct cmdqueue *q;
2316 int error;
2317
2318 /* Command and buffer strategy */
2319 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2320 if (vc->param.aal == ATMIO_AAL_0)
2321 cmd |= (0 << 8);
2322 else
2323 cmd |= (5 << 8);
2324
2325 q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2326 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2327 fatm_open_complete : fatm_cmd_complete);
2328 if (q == NULL)
2329 return (EIO);
2330
2331 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2332 error = fatm_waitvcc(sc, q);
2333 if (error != 0)
2334 return (error);
2335 fatm_open_finish(sc, vc);
2336 }
2337 return (0);
2338}
2339
2340/*
2341 * Finish close
2342 */
2343static void
2344fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2345{
2346 /* inform management of this is not an NG
2347 * VCC or it's an NG PVC. */
2348 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2349 (vc->param.flags & ATMIO_FLAG_PVC))
2350 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2351
2352 sc->vccs[vc->param.vci] = NULL;
2353 sc->open_vccs--;
2354
2355 uma_zfree(sc->vcc_zone, vc);
2356}
2357
2358/*
2359 * The VC has been closed.
2360 */
2361static void
2362fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2363{
2364 u_int vci;
2365 struct card_vcc *vc;
2366
2367 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2368 vc = sc->vccs[vci];
2369 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2370 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2371 sc->istats.get_stat_errors++;
2372 /* keep the VCC in that state */
2373 if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2374 return;
2375 }
2376
2377 fatm_close_finish(sc, vc);
2378}
2379
2380/*
2381 * Initiate closing a VCC
2382 */
2383static int
2384fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2385{
2386 int error;
2387 struct cmdqueue *q;
2388 struct card_vcc *vc;
2389
2390 if (!VC_OK(sc, cl->vpi, cl->vci))
2391 return (EINVAL);
2392
2393 error = 0;
2394
2395 FATM_LOCK(sc);
2396 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2397 error = EIO;
2398 goto done;
2399 }
2400 vc = sc->vccs[cl->vci];
2401 if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2402 error = ENOENT;
2403 goto done;
2404 }
2405
2406 q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2407 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2408 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2409 fatm_close_complete : fatm_cmd_complete);
2410 if (q == NULL) {
2411 error = EIO;
2412 goto done;
2413 }
2414
2415 vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2416 vc->vflags |= FATM_VCC_TRY_CLOSE;
2417
2418 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2419 error = fatm_waitvcc(sc, q);
2420 if (error != 0)
2421 goto done;
2422
2423 fatm_close_finish(sc, vc);
2424 }
2425
2426 done:
2427 FATM_UNLOCK(sc);
2428 return (error);
2429}
2430
2431/*
2432 * IOCTL handler
2433 */
2434static int
2435fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2436{
2437 int error;
2438 struct fatm_softc *sc = ifp->if_softc;
2439 struct ifaddr *ifa = (struct ifaddr *)arg;
2440 struct ifreq *ifr = (struct ifreq *)arg;
2441 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2442 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2443 struct atmio_vcctable *vtab;
2444
2445 error = 0;
2446 switch (cmd) {
2447
2448 case SIOCATMOPENVCC: /* kernel internal use */
2449 error = fatm_open_vcc(sc, op);
2450 break;
2451
2452 case SIOCATMCLOSEVCC: /* kernel internal use */
2453 error = fatm_close_vcc(sc, cl);
2454 break;
2455
2456 case SIOCSIFADDR:
2457 FATM_LOCK(sc);
2458 ifp->if_flags |= IFF_UP;
2459 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2460 fatm_init_locked(sc);
2461 switch (ifa->ifa_addr->sa_family) {
2462#ifdef INET
2463 case AF_INET:
2464 case AF_INET6:
2465 ifa->ifa_rtrequest = atm_rtrequest;
2466 break;
2467#endif
2468 default:
2469 break;
2470 }
2471 FATM_UNLOCK(sc);
2472 break;
2473
2474 case SIOCSIFFLAGS:
2475 FATM_LOCK(sc);
2476 if (ifp->if_flags & IFF_UP) {
2477 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2478 fatm_init_locked(sc);
2479 }
2480 } else {
2481 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2482 fatm_stop(sc);
2483 }
2484 }
2485 FATM_UNLOCK(sc);
2486 break;
2487
2488 case SIOCGIFMEDIA:
2489 case SIOCSIFMEDIA:
2490 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2491 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2492 else
2493 error = EINVAL;
2494 break;
2495
2496 case SIOCATMGVCCS:
2497 /* return vcc table */
2498 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2499 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2500 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2501 vtab->count * sizeof(vtab->vccs[0]));
2502 free(vtab, M_DEVBUF);
2503 break;
2504
2505 case SIOCATMGETVCCS: /* internal netgraph use */
2506 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2507 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2508 if (vtab == NULL) {
2509 error = ENOMEM;
2510 break;
2511 }
2512 *(void **)arg = vtab;
2513 break;
2514
2515 default:
2516 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2517 error = EINVAL;
2518 break;
2519 }
2520
2521 return (error);
2522}
2523
2524/*
2525 * Detach from the interface and free all resources allocated during
2526 * initialisation and later.
2527 */
2528static int
2529fatm_detach(device_t dev)
2530{
2531 u_int i;
2532 struct rbuf *rb;
2533 struct fatm_softc *sc;
2534 struct txqueue *tx;
2535
2536 sc = device_get_softc(dev);
2537
2538 if (device_is_alive(dev)) {
2539 FATM_LOCK(sc);
2540 fatm_stop(sc);
2541 utopia_detach(&sc->utopia);
2542 FATM_UNLOCK(sc);
2543 atm_ifdetach(sc->ifp); /* XXX race */
2544 }
2545 callout_drain(&sc->watchdog_timer);
2546
2547 if (sc->ih != NULL)
2548 bus_teardown_intr(dev, sc->irqres, sc->ih);
2549
2550 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2551 if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2552 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2553 m_freem(rb->m);
2554 LIST_REMOVE(rb, link);
2555 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2556 }
2557
2558 if (sc->txqueue.chunk != NULL) {
2559 for (i = 0; i < FATM_TX_QLEN; i++) {
2560 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2561 bus_dmamap_destroy(sc->tx_tag, tx->map);
2562 }
2563 }
2564
2565 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2566 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2567 LIST_REMOVE(rb, link);
2568 }
2569
2570 if (sc->rbufs != NULL)
2571 free(sc->rbufs, M_DEVBUF);
2572 if (sc->vccs != NULL) {
2573 for (i = 0; i < FORE_MAX_VCC + 1; i++)
2574 if (sc->vccs[i] != NULL) {
2575 uma_zfree(sc->vcc_zone, sc->vccs[i]);
2576 sc->vccs[i] = NULL;
2577 }
2578 free(sc->vccs, M_DEVBUF);
2579 }
2580 if (sc->vcc_zone != NULL)
2581 uma_zdestroy(sc->vcc_zone);
2582
2583 if (sc->l1queue.chunk != NULL)
2584 free(sc->l1queue.chunk, M_DEVBUF);
2585 if (sc->s1queue.chunk != NULL)
2586 free(sc->s1queue.chunk, M_DEVBUF);
2587 if (sc->rxqueue.chunk != NULL)
2588 free(sc->rxqueue.chunk, M_DEVBUF);
2589 if (sc->txqueue.chunk != NULL)
2590 free(sc->txqueue.chunk, M_DEVBUF);
2591 if (sc->cmdqueue.chunk != NULL)
2592 free(sc->cmdqueue.chunk, M_DEVBUF);
2593
2594 destroy_dma_memory(&sc->reg_mem);
2595 destroy_dma_memory(&sc->sadi_mem);
2596 destroy_dma_memory(&sc->prom_mem);
2597#ifdef TEST_DMA_SYNC
2598 destroy_dma_memoryX(&sc->s1q_mem);
2599 destroy_dma_memoryX(&sc->l1q_mem);
2600 destroy_dma_memoryX(&sc->rxq_mem);
2601 destroy_dma_memoryX(&sc->txq_mem);
2602 destroy_dma_memoryX(&sc->stat_mem);
2603#endif
2604
2605 if (sc->tx_tag != NULL)
2606 if (bus_dma_tag_destroy(sc->tx_tag))
2607 printf("tx DMA tag busy!\n");
2608
2609 if (sc->rbuf_tag != NULL)
2610 if (bus_dma_tag_destroy(sc->rbuf_tag))
2611 printf("rbuf DMA tag busy!\n");
2612
2613 if (sc->parent_dmat != NULL)
2614 if (bus_dma_tag_destroy(sc->parent_dmat))
2615 printf("parent DMA tag busy!\n");
2616
2617 if (sc->irqres != NULL)
2618 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2619
2620 if (sc->memres != NULL)
2621 bus_release_resource(dev, SYS_RES_MEMORY,
2622 sc->memid, sc->memres);
2623
2624 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2625
2626 cv_destroy(&sc->cv_stat);
2627 cv_destroy(&sc->cv_regs);
2628
2629 mtx_destroy(&sc->mtx);
2630
2631 if_free(sc->ifp);
2632
2633 return (0);
2634}
2635
2636/*
2637 * Sysctl handler
2638 */
2639static int
2640fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2641{
2642 struct fatm_softc *sc = arg1;
2643 u_long *ret;
2644 int error;
2645
2646 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2647
2648 FATM_LOCK(sc);
2649 bcopy(&sc->istats, ret, sizeof(sc->istats));
2650 FATM_UNLOCK(sc);
2651
2652 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2653 free(ret, M_TEMP);
2654
2655 return (error);
2656}
2657
2658/*
2659 * Sysctl handler for card statistics
2660 * This is disable because it destroys the PHY statistics.
2661 */
2662static int
2663fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2664{
2665 struct fatm_softc *sc = arg1;
2666 int error;
2667 const struct fatm_stats *s;
2668 u_long *ret;
2669 u_int i;
2670
2671 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2672
2673 FATM_LOCK(sc);
2674
2675 if ((error = fatm_getstat(sc)) == 0) {
2676 s = sc->sadi_mem.mem;
2677 i = 0;
2678 ret[i++] = s->phy_4b5b.crc_header_errors;
2679 ret[i++] = s->phy_4b5b.framing_errors;
2680 ret[i++] = s->phy_oc3.section_bip8_errors;
2681 ret[i++] = s->phy_oc3.path_bip8_errors;
2682 ret[i++] = s->phy_oc3.line_bip24_errors;
2683 ret[i++] = s->phy_oc3.line_febe_errors;
2684 ret[i++] = s->phy_oc3.path_febe_errors;
2685 ret[i++] = s->phy_oc3.corr_hcs_errors;
2686 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2687 ret[i++] = s->atm.cells_transmitted;
2688 ret[i++] = s->atm.cells_received;
2689 ret[i++] = s->atm.vpi_bad_range;
2690 ret[i++] = s->atm.vpi_no_conn;
2691 ret[i++] = s->atm.vci_bad_range;
2692 ret[i++] = s->atm.vci_no_conn;
2693 ret[i++] = s->aal0.cells_transmitted;
2694 ret[i++] = s->aal0.cells_received;
2695 ret[i++] = s->aal0.cells_dropped;
2696 ret[i++] = s->aal4.cells_transmitted;
2697 ret[i++] = s->aal4.cells_received;
2698 ret[i++] = s->aal4.cells_crc_errors;
2699 ret[i++] = s->aal4.cels_protocol_errors;
2700 ret[i++] = s->aal4.cells_dropped;
2701 ret[i++] = s->aal4.cspdus_transmitted;
2702 ret[i++] = s->aal4.cspdus_received;
2703 ret[i++] = s->aal4.cspdus_protocol_errors;
2704 ret[i++] = s->aal4.cspdus_dropped;
2705 ret[i++] = s->aal5.cells_transmitted;
2706 ret[i++] = s->aal5.cells_received;
2707 ret[i++] = s->aal5.congestion_experienced;
2708 ret[i++] = s->aal5.cells_dropped;
2709 ret[i++] = s->aal5.cspdus_transmitted;
2710 ret[i++] = s->aal5.cspdus_received;
2711 ret[i++] = s->aal5.cspdus_crc_errors;
2712 ret[i++] = s->aal5.cspdus_protocol_errors;
2713 ret[i++] = s->aal5.cspdus_dropped;
2714 ret[i++] = s->aux.small_b1_failed;
2715 ret[i++] = s->aux.large_b1_failed;
2716 ret[i++] = s->aux.small_b2_failed;
2717 ret[i++] = s->aux.large_b2_failed;
2718 ret[i++] = s->aux.rpd_alloc_failed;
2719 ret[i++] = s->aux.receive_carrier;
2720 }
2721 /* declare the buffer free */
2722 sc->flags &= ~FATM_STAT_INUSE;
2723 cv_signal(&sc->cv_stat);
2724
2725 FATM_UNLOCK(sc);
2726
2727 if (error == 0)
2728 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2729 free(ret, M_TEMP);
2730
2731 return (error);
2732}
2733
2734#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2735
2736/*
2737 * Attach to the device.
2738 *
2739 * We assume, that there is a global lock (Giant in this case) that protects
2740 * multiple threads from entering this function. This makes sense, doesn't it?
2741 */
2742static int
2743fatm_attach(device_t dev)
2744{
2745 struct ifnet *ifp;
2746 struct fatm_softc *sc;
2747 int unit;
2748 uint16_t cfg;
2749 int error = 0;
2750 struct rbuf *rb;
2751 u_int i;
2752 struct txqueue *tx;
2753
2754 sc = device_get_softc(dev);
2755 unit = device_get_unit(dev);
2756
2757 ifp = sc->ifp = if_alloc(IFT_ATM);
2758 if (ifp == NULL) {
2759 error = ENOSPC;
2760 goto fail;
2761 }
2762
2763 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2764 IFP2IFATM(sc->ifp)->mib.serial = 0;
2765 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2766 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2767 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2768 IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2769 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2770 IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2771 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2772 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2773
2774 LIST_INIT(&sc->rbuf_free);
2775 LIST_INIT(&sc->rbuf_used);
2776
2777 /*
2778 * Initialize mutex and condition variables.
2779 */
2780 mtx_init(&sc->mtx, device_get_nameunit(dev),
2781 MTX_NETWORK_LOCK, MTX_DEF);
2782
2783 cv_init(&sc->cv_stat, "fatm_stat");
2784 cv_init(&sc->cv_regs, "fatm_regs");
2785
2786 sysctl_ctx_init(&sc->sysctl_ctx);
2787 callout_init_mtx(&sc->watchdog_timer, &sc->mtx, 0);
2788
2789 /*
2790 * Make the sysctl tree
2791 */
2792 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2793 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2794 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2795 goto fail;
2796
2797 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2798 OID_AUTO, "istats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0,
2799 fatm_sysctl_istats, "LU", "internal statistics") == NULL)
2800 goto fail;
2801
2802 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2803 OID_AUTO, "stats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0,
2804 fatm_sysctl_stats, "LU", "card statistics") == NULL)
2805 goto fail;
2806
2807 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2808 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2809 "retry flag") == NULL)
2810 goto fail;
2811
2812#ifdef FATM_DEBUG
2813 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2814 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2815 == NULL)
2816 goto fail;
2817 sc->debug = FATM_DEBUG;
2818#endif
2819
2820 /*
2821 * Network subsystem stuff
2822 */
2823 ifp->if_softc = sc;
2824 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2825 ifp->if_flags = IFF_SIMPLEX;
2826 ifp->if_ioctl = fatm_ioctl;
2827 ifp->if_start = fatm_start;
2828 ifp->if_init = fatm_init;
2829 ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2830 ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2831
2832 /*
2833 * Enable busmaster
2834 */
2835 pci_enable_busmaster(dev);
2836
2837 /*
2838 * Map memory
2839 */
2840 sc->memid = 0x10;
2841 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2842 RF_ACTIVE);
2843 if (sc->memres == NULL) {
2844 if_printf(ifp, "could not map memory\n");
2845 error = ENXIO;
2846 goto fail;
2847 }
2848 sc->memh = rman_get_bushandle(sc->memres);
2849 sc->memt = rman_get_bustag(sc->memres);
2850
2851 /*
2852 * Convert endianess of slave access
2853 */
2854 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2855 cfg |= FATM_PCIM_SWAB;
2856 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2857
2858 /*
2859 * Allocate interrupt (activate at the end)
2860 */
2861 sc->irqid = 0;
2862 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2863 RF_SHAREABLE | RF_ACTIVE);
2864 if (sc->irqres == NULL) {
2865 if_printf(ifp, "could not allocate irq\n");
2866 error = ENXIO;
2867 goto fail;
2868 }
2869
2870 /*
2871 * Allocate the parent DMA tag. This is used simply to hold overall
2872 * restrictions for the controller (and PCI bus) and is never used
2873 * to do anything.
2874 */
2875 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
2876 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2877 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2878 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2879 &sc->parent_dmat)) {
2880 if_printf(ifp, "could not allocate parent DMA tag\n");
2881 error = ENOMEM;
2882 goto fail;
2883 }
2884
2885 /*
2886 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2887 * a mbuf cluster.
2888 */
2889 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2890 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2891 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2892 NULL, NULL, &sc->rbuf_tag)) {
2893 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2894 error = ENOMEM;
2895 goto fail;
2896 }
2897
2898 /*
2899 * Allocate the transmission DMA tag. Must add 1, because
2900 * rounded up PDU will be 65536 bytes long.
2901 */
2902 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2903 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2904 NULL, NULL,
2905 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2906 NULL, NULL, &sc->tx_tag)) {
2907 if_printf(ifp, "could not allocate tx DMA tag\n");
2908 error = ENOMEM;
2909 goto fail;
2910 }
2911
2912 /*
2913 * Allocate DMAable memory.
2914 */
2915 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2916 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2917 sc->stat_mem.align = 4;
2918
2919 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2920 sc->txq_mem.align = 32;
2921
2922 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2923 sc->rxq_mem.align = 32;
2924
2925 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2926 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2927 sc->s1q_mem.align = 32;
2928
2929 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2930 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2931 sc->l1q_mem.align = 32;
2932
2933#ifdef TEST_DMA_SYNC
2934 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2935 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2936 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2937 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2938 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2939 goto fail;
2940#else
2941 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2942 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2943 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2944 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2945 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2946 goto fail;
2947#endif
2948
2949 sc->prom_mem.size = sizeof(struct prom);
2950 sc->prom_mem.align = 32;
2951 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2952 goto fail;
2953
2954 sc->sadi_mem.size = sizeof(struct fatm_stats);
2955 sc->sadi_mem.align = 32;
2956 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2957 goto fail;
2958
2959 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2960 sc->reg_mem.align = 32;
2961 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2962 goto fail;
2963
2964 /*
2965 * Allocate queues
2966 */
2967 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2968 M_DEVBUF, M_ZERO | M_WAITOK);
2969 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2970 M_DEVBUF, M_ZERO | M_WAITOK);
2971 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2972 M_DEVBUF, M_ZERO | M_WAITOK);
2973 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2974 M_DEVBUF, M_ZERO | M_WAITOK);
2975 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2976 M_DEVBUF, M_ZERO | M_WAITOK);
2977
2978 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2979 M_DEVBUF, M_ZERO | M_WAITOK);
2980 sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2981 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2982 if (sc->vcc_zone == NULL) {
2983 error = ENOMEM;
2984 goto fail;
2985 }
2986
2987 /*
2988 * Allocate memory for the receive buffer headers. The total number
2989 * of headers should probably also include the maximum number of
2990 * buffers on the receive queue.
2991 */
2992 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2993 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2994 M_DEVBUF, M_ZERO | M_WAITOK);
2995
2996 /*
2997 * Put all rbuf headers on the free list and create DMA maps.
2998 */
2999 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
3000 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3001 if_printf(sc->ifp, "creating rx map: %d\n",
3002 error);
3003 goto fail;
3004 }
3005 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3006 }
3007
3008 /*
3009 * Create dma maps for transmission. In case of an error, free the
3010 * allocated DMA maps, because on some architectures maps are NULL
3011 * and we cannot distinguish between a failure and a NULL map in
3012 * the detach routine.
3013 */
3014 for (i = 0; i < FATM_TX_QLEN; i++) {
3015 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3016 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3017 if_printf(sc->ifp, "creating tx map: %d\n",
3018 error);
3019 while (i > 0) {
3020 tx = GET_QUEUE(sc->txqueue, struct txqueue,
3021 i - 1);
3022 bus_dmamap_destroy(sc->tx_tag, tx->map);
3023 i--;
3024 }
3025 goto fail;
3026 }
3027 }
3028
3029 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3030 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3031 &fatm_utopia_methods);
3032 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3033
3034 /*
3035 * Attach the interface
3036 */
3037 atm_ifattach(ifp);
3038 ifp->if_snd.ifq_maxlen = 512;
3039
3040#ifdef ENABLE_BPF
3041 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3042#endif
3043
3044 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET | INTR_MPSAFE,
3045 NULL, fatm_intr, sc, &sc->ih);
3046 if (error) {
3047 if_printf(ifp, "couldn't setup irq\n");
3048 goto fail;
3049 }
3050
3051 fail:
3052 if (error)
3053 fatm_detach(dev);
3054
3055 return (error);
3056}
3057
3058#if defined(FATM_DEBUG) && 0
3059static void
3060dump_s1_queue(struct fatm_softc *sc)
3061{
3062 int i;
3063 struct supqueue *q;
3064
3065 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3066 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3067 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3068 q->q.card,
3069 READ4(sc, q->q.card),
3070 READ4(sc, q->q.card + 4),
3071 *q->q.statp);
3072 }
3073}
3074#endif
3075
3076/*
3077 * Driver infrastructure.
3078 */
3079static device_method_t fatm_methods[] = {
3080 DEVMETHOD(device_probe, fatm_probe),
3081 DEVMETHOD(device_attach, fatm_attach),
3082 DEVMETHOD(device_detach, fatm_detach),
3083 { 0, 0 }
3084};
3085static driver_t fatm_driver = {
3086 "fatm",
3087 fatm_methods,
3088 sizeof(struct fatm_softc),
3089};
3090
3091DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);