Deleted Added
sdiff udiff text old ( 183504 ) new ( 199559 )
full compact
1/*-
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 199559 2009-11-19 22:06:40Z jhb $");
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_atm.h>
62#include <net/route.h>
63#ifdef ENABLE_BPF
64#include <net/bpf.h>
65#endif
66#ifdef INET
67#include <netinet/in.h>
68#include <netinet/if_atm.h>
69#endif
70
71#include <machine/bus.h>
72#include <machine/resource.h>
73#include <sys/bus.h>
74#include <sys/rman.h>
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include <dev/utopia/utopia.h>
79
80#include <dev/fatm/if_fatmreg.h>
81#include <dev/fatm/if_fatmvar.h>
82
83#include <dev/fatm/firmware.h>
84
85devclass_t fatm_devclass;
86
87static const struct {
88 uint16_t vid;
89 uint16_t did;
90 const char *name;
91} fatm_devs[] = {
92 { 0x1127, 0x300,
93 "FORE PCA200E" },
94 { 0, 0, NULL }
95};
96
97static const struct rate {
98 uint32_t ratio;
99 uint32_t cell_rate;
100} rate_table[] = {
101#include <dev/fatm/if_fatm_rate.h>
102};
103#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
104
105SYSCTL_DECL(_hw_atm);
106
107MODULE_DEPEND(fatm, utopia, 1, 1, 1);
108
109static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
110static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
111
112static const struct utopia_methods fatm_utopia_methods = {
113 fatm_utopia_readregs,
114 fatm_utopia_writereg
115};
116
117#define VC_OK(SC, VPI, VCI) \
118 (((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \
119 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
120
121static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
122
123/*
124 * Probing is easy: step trough the list of known vendor and device
125 * ids and compare. If one is found - it's our.
126 */
127static int
128fatm_probe(device_t dev)
129{
130 int i;
131
132 for (i = 0; fatm_devs[i].name; i++)
133 if (pci_get_vendor(dev) == fatm_devs[i].vid &&
134 pci_get_device(dev) == fatm_devs[i].did) {
135 device_set_desc(dev, fatm_devs[i].name);
136 return (BUS_PROBE_DEFAULT);
137 }
138 return (ENXIO);
139}
140
141/*
142 * Function called at completion of a SUNI writeregs/readregs command.
143 * This is called from the interrupt handler while holding the softc lock.
144 * We use the queue entry as the randevouze point.
145 */
146static void
147fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
148{
149
150 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
151 if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
152 sc->istats.suni_reg_errors++;
153 q->error = EIO;
154 }
155 wakeup(q);
156}
157
158/*
159 * Write a SUNI register. The bits that are 1 in mask are written from val
160 * into register reg. We wait for the command to complete by sleeping on
161 * the register memory.
162 *
163 * We assume, that we already hold the softc mutex.
164 */
165static int
166fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
167{
168 int error;
169 struct cmdqueue *q;
170 struct fatm_softc *sc;
171
172 sc = ifatm->ifp->if_softc;
173 FATM_CHECKLOCK(sc);
174 if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING))
175 return (EIO);
176
177 /* get queue element and fill it */
178 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
179
180 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
181 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
182 sc->istats.cmd_queue_full++;
183 return (EIO);
184 }
185 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
186
187 q->error = 0;
188 q->cb = fatm_utopia_writeregs_complete;
189 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
190 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
191
192 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
193 BARRIER_W(sc);
194 WRITE4(sc, q->q.card + FATMOC_OP,
195 FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
196 BARRIER_W(sc);
197
198 /*
199 * Wait for the command to complete
200 */
201 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
202
203 switch(error) {
204
205 case EWOULDBLOCK:
206 error = EIO;
207 break;
208
209 case ERESTART:
210 error = EINTR;
211 break;
212
213 case 0:
214 error = q->error;
215 break;
216 }
217
218 return (error);
219}
220
221/*
222 * Function called at completion of a SUNI readregs command.
223 * This is called from the interrupt handler while holding the softc lock.
224 * We use reg_mem as the randevouze point.
225 */
226static void
227fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
228{
229
230 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
231 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
232 sc->istats.suni_reg_errors++;
233 q->error = EIO;
234 }
235 wakeup(&sc->reg_mem);
236}
237
238/*
239 * Read SUNI registers
240 *
241 * We use a preallocated buffer to read the registers. Therefor we need
242 * to protect against multiple threads trying to read registers. We do this
243 * with a condition variable and a flag. We wait for the command to complete by sleeping on
244 * the register memory.
245 *
246 * We assume, that we already hold the softc mutex.
247 */
248static int
249fatm_utopia_readregs_internal(struct fatm_softc *sc)
250{
251 int error, i;
252 uint32_t *ptr;
253 struct cmdqueue *q;
254
255 /* get the buffer */
256 for (;;) {
257 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
258 return (EIO);
259 if (!(sc->flags & FATM_REGS_INUSE))
260 break;
261 cv_wait(&sc->cv_regs, &sc->mtx);
262 }
263 sc->flags |= FATM_REGS_INUSE;
264
265 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
266
267 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
268 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
269 sc->istats.cmd_queue_full++;
270 return (EIO);
271 }
272 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
273
274 q->error = 0;
275 q->cb = fatm_utopia_readregs_complete;
276 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
277 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
278
279 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
280
281 WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
282 BARRIER_W(sc);
283 WRITE4(sc, q->q.card + FATMOC_OP,
284 FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
285 BARRIER_W(sc);
286
287 /*
288 * Wait for the command to complete
289 */
290 error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
291 "fatm_getreg", hz);
292
293 switch(error) {
294
295 case EWOULDBLOCK:
296 error = EIO;
297 break;
298
299 case ERESTART:
300 error = EINTR;
301 break;
302
303 case 0:
304 bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
305 BUS_DMASYNC_POSTREAD);
306 error = q->error;
307 break;
308 }
309
310 if (error != 0) {
311 /* declare buffer to be free */
312 sc->flags &= ~FATM_REGS_INUSE;
313 cv_signal(&sc->cv_regs);
314 return (error);
315 }
316
317 /* swap if needed */
318 ptr = (uint32_t *)sc->reg_mem.mem;
319 for (i = 0; i < FATM_NREGS; i++)
320 ptr[i] = le32toh(ptr[i]) & 0xff;
321
322 return (0);
323}
324
325/*
326 * Read SUNI registers for the SUNI module.
327 *
328 * We assume, that we already hold the mutex.
329 */
330static int
331fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
332{
333 int err;
334 int i;
335 struct fatm_softc *sc;
336
337 if (reg >= FATM_NREGS)
338 return (EINVAL);
339 if (reg + *np > FATM_NREGS)
340 *np = FATM_NREGS - reg;
341 sc = ifatm->ifp->if_softc;
342 FATM_CHECKLOCK(sc);
343
344 err = fatm_utopia_readregs_internal(sc);
345 if (err != 0)
346 return (err);
347
348 for (i = 0; i < *np; i++)
349 valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
350
351 /* declare buffer to be free */
352 sc->flags &= ~FATM_REGS_INUSE;
353 cv_signal(&sc->cv_regs);
354
355 return (0);
356}
357
358/*
359 * Check whether the hard is beating. We remember the last heart beat and
360 * compare it to the current one. If it appears stuck for 10 times, we have
361 * a problem.
362 *
363 * Assume we hold the lock.
364 */
365static void
366fatm_check_heartbeat(struct fatm_softc *sc)
367{
368 uint32_t h;
369
370 FATM_CHECKLOCK(sc);
371
372 h = READ4(sc, FATMO_HEARTBEAT);
373 DBG(sc, BEAT, ("heartbeat %08x", h));
374
375 if (sc->stop_cnt == 10)
376 return;
377
378 if (h == sc->heartbeat) {
379 if (++sc->stop_cnt == 10) {
380 log(LOG_ERR, "i960 stopped???\n");
381 WRITE4(sc, FATMO_HIMR, 1);
382 }
383 return;
384 }
385
386 sc->stop_cnt = 0;
387 sc->heartbeat = h;
388}
389
390/*
391 * Ensure that the heart is still beating.
392 */
393static void
394fatm_watchdog(void *arg)
395{
396 struct fatm_softc *sc;
397
398 sc = arg;
399 FATM_CHECKLOCK(sc);
400 fatm_check_heartbeat(sc);
401 callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc);
402}
403
404/*
405 * Hard reset the i960 on the board. This is done by initializing registers,
406 * clearing interrupts and waiting for the selftest to finish. Not sure,
407 * whether all these barriers are actually needed.
408 *
409 * Assumes that we hold the lock.
410 */
411static int
412fatm_reset(struct fatm_softc *sc)
413{
414 int w;
415 uint32_t val;
416
417 FATM_CHECKLOCK(sc);
418
419 WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
420 BARRIER_W(sc);
421
422 WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
423 BARRIER_W(sc);
424
425 WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
426 BARRIER_W(sc);
427
428 WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
429 BARRIER_W(sc);
430
431 WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
432 BARRIER_W(sc);
433
434 DELAY(1000);
435
436 WRITE1(sc, FATMO_HCR, 0);
437 BARRIER_RW(sc);
438
439 DELAY(1000);
440
441 for (w = 100; w; w--) {
442 BARRIER_R(sc);
443 val = READ4(sc, FATMO_BOOT_STATUS);
444 switch (val) {
445 case SELF_TEST_OK:
446 return (0);
447 case SELF_TEST_FAIL:
448 return (EIO);
449 }
450 DELAY(1000);
451 }
452 return (EIO);
453}
454
455/*
456 * Stop the card. Must be called WITH the lock held
457 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
458 */
459static void
460fatm_stop(struct fatm_softc *sc)
461{
462 int i;
463 struct cmdqueue *q;
464 struct rbuf *rb;
465 struct txqueue *tx;
466 uint32_t stat;
467
468 FATM_CHECKLOCK(sc);
469
470 /* Stop the board */
471 utopia_stop(&sc->utopia);
472 (void)fatm_reset(sc);
473
474 /* stop watchdog */
475 callout_stop(&sc->watchdog_timer);
476
477 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
478 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
479 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
480 sc->utopia.carrier == UTP_CARR_OK);
481
482 /*
483 * Collect transmit mbufs, partial receive mbufs and
484 * supplied mbufs
485 */
486 for (i = 0; i < FATM_TX_QLEN; i++) {
487 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
488 if (tx->m) {
489 bus_dmamap_unload(sc->tx_tag, tx->map);
490 m_freem(tx->m);
491 tx->m = NULL;
492 }
493 }
494
495 /* Collect supplied mbufs */
496 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
497 LIST_REMOVE(rb, link);
498 bus_dmamap_unload(sc->rbuf_tag, rb->map);
499 m_free(rb->m);
500 rb->m = NULL;
501 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
502 }
503
504 /* Unwait any waiters */
505 wakeup(&sc->sadi_mem);
506
507 /* wakeup all threads waiting for STAT or REG buffers */
508 cv_broadcast(&sc->cv_stat);
509 cv_broadcast(&sc->cv_regs);
510
511 sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
512
513 /* wakeup all threads waiting on commands */
514 for (i = 0; i < FATM_CMD_QLEN; i++) {
515 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
516
517 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
518 if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
519 H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
520 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
521 wakeup(q);
522 }
523 }
524 utopia_reset_media(&sc->utopia);
525 }
526 sc->small_cnt = sc->large_cnt = 0;
527
528 /* Reset vcc info */
529 if (sc->vccs != NULL) {
530 sc->open_vccs = 0;
531 for (i = 0; i < FORE_MAX_VCC + 1; i++) {
532 if (sc->vccs[i] != NULL) {
533 if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
534 FATM_VCC_TRY_OPEN)) == 0) {
535 uma_zfree(sc->vcc_zone, sc->vccs[i]);
536 sc->vccs[i] = NULL;
537 } else {
538 sc->vccs[i]->vflags = 0;
539 sc->open_vccs++;
540 }
541 }
542 }
543 }
544
545}
546
547/*
548 * Load the firmware into the board and save the entry point.
549 */
550static uint32_t
551firmware_load(struct fatm_softc *sc)
552{
553 struct firmware *fw = (struct firmware *)firmware;
554
555 DBG(sc, INIT, ("loading - entry=%x", fw->entry));
556 bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
557 sizeof(firmware) / sizeof(firmware[0]));
558 BARRIER_RW(sc);
559
560 return (fw->entry);
561}
562
563/*
564 * Read a character from the virtual UART. The availability of a character
565 * is signaled by a non-null value of the 32 bit register. The eating of
566 * the character by us is signalled to the card by setting that register
567 * to zero.
568 */
569static int
570rx_getc(struct fatm_softc *sc)
571{
572 int w = 50;
573 int c;
574
575 while (w--) {
576 c = READ4(sc, FATMO_UART_TO_HOST);
577 BARRIER_RW(sc);
578 if (c != 0) {
579 WRITE4(sc, FATMO_UART_TO_HOST, 0);
580 DBGC(sc, UART, ("%c", c & 0xff));
581 return (c & 0xff);
582 }
583 DELAY(1000);
584 }
585 return (-1);
586}
587
588/*
589 * Eat up characters from the board and stuff them in the bit-bucket.
590 */
591static void
592rx_flush(struct fatm_softc *sc)
593{
594 int w = 10000;
595
596 while (w-- && rx_getc(sc) >= 0)
597 ;
598}
599
600/*
601 * Write a character to the card. The UART is available if the register
602 * is zero.
603 */
604static int
605tx_putc(struct fatm_softc *sc, u_char c)
606{
607 int w = 10;
608 int c1;
609
610 while (w--) {
611 c1 = READ4(sc, FATMO_UART_TO_960);
612 BARRIER_RW(sc);
613 if (c1 == 0) {
614 WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
615 DBGC(sc, UART, ("%c", c & 0xff));
616 return (0);
617 }
618 DELAY(1000);
619 }
620 return (-1);
621}
622
623/*
624 * Start the firmware. This is doing by issuing a 'go' command with
625 * the hex entry address of the firmware. Then we wait for the self-test to
626 * succeed.
627 */
628static int
629fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
630{
631 static char hex[] = "0123456789abcdef";
632 u_int w, val;
633
634 DBG(sc, INIT, ("starting"));
635 rx_flush(sc);
636 tx_putc(sc, '\r');
637 DELAY(1000);
638
639 rx_flush(sc);
640
641 tx_putc(sc, 'g');
642 (void)rx_getc(sc);
643 tx_putc(sc, 'o');
644 (void)rx_getc(sc);
645 tx_putc(sc, ' ');
646 (void)rx_getc(sc);
647
648 tx_putc(sc, hex[(start >> 12) & 0xf]);
649 (void)rx_getc(sc);
650 tx_putc(sc, hex[(start >> 8) & 0xf]);
651 (void)rx_getc(sc);
652 tx_putc(sc, hex[(start >> 4) & 0xf]);
653 (void)rx_getc(sc);
654 tx_putc(sc, hex[(start >> 0) & 0xf]);
655 (void)rx_getc(sc);
656
657 tx_putc(sc, '\r');
658 rx_flush(sc);
659
660 for (w = 100; w; w--) {
661 BARRIER_R(sc);
662 val = READ4(sc, FATMO_BOOT_STATUS);
663 switch (val) {
664 case CP_RUNNING:
665 return (0);
666 case SELF_TEST_FAIL:
667 return (EIO);
668 }
669 DELAY(1000);
670 }
671 return (EIO);
672}
673
674/*
675 * Initialize one card and host queue.
676 */
677static void
678init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
679 size_t qel_size, size_t desc_size, cardoff_t off,
680 u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
681{
682 struct fqelem *el = queue->chunk;
683
684 while (qlen--) {
685 el->card = off;
686 off += 8; /* size of card entry */
687
688 el->statp = (uint32_t *)(*statpp);
689 (*statpp) += sizeof(uint32_t);
690 H_SETSTAT(el->statp, FATM_STAT_FREE);
691 H_SYNCSTAT_PREWRITE(sc, el->statp);
692
693 WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
694 (*cardstat) += sizeof(uint32_t);
695
696 el->ioblk = descp;
697 descp += desc_size;
698 el->card_ioblk = carddesc;
699 carddesc += desc_size;
700
701 el = (struct fqelem *)((u_char *)el + qel_size);
702 }
703 queue->tail = queue->head = 0;
704}
705
706/*
707 * Issue the initialize operation to the card, wait for completion and
708 * initialize the on-board and host queue structures with offsets and
709 * addresses.
710 */
711static int
712fatm_init_cmd(struct fatm_softc *sc)
713{
714 int w, c;
715 u_char *statp;
716 uint32_t card_stat;
717 u_int cnt;
718 struct fqelem *el;
719 cardoff_t off;
720
721 DBG(sc, INIT, ("command"));
722 WRITE4(sc, FATMO_ISTAT, 0);
723 WRITE4(sc, FATMO_IMASK, 1);
724 WRITE4(sc, FATMO_HLOGGER, 0);
725
726 WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
727 WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
728 WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
729 WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
730 WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
731 WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
732 WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
733
734 /*
735 * initialize buffer descriptors
736 */
737 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
738 SMALL_SUPPLY_QLEN);
739 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
740 SMALL_BUFFER_LEN);
741 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
742 SMALL_POOL_SIZE);
743 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
744 SMALL_SUPPLY_BLKSIZE);
745
746 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
747 LARGE_SUPPLY_QLEN);
748 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
749 LARGE_BUFFER_LEN);
750 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
751 LARGE_POOL_SIZE);
752 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
753 LARGE_SUPPLY_BLKSIZE);
754
755 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
756 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
757 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
758 WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
759
760 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
761 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
762 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
763 WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
764
765 /*
766 * Start the command
767 */
768 BARRIER_W(sc);
769 WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
770 BARRIER_W(sc);
771 WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
772 BARRIER_W(sc);
773
774 /*
775 * Busy wait for completion
776 */
777 w = 100;
778 while (w--) {
779 c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
780 BARRIER_R(sc);
781 if (c & FATM_STAT_COMPLETE)
782 break;
783 DELAY(1000);
784 }
785
786 if (c & FATM_STAT_ERROR)
787 return (EIO);
788
789 /*
790 * Initialize the queues
791 */
792 statp = sc->stat_mem.mem;
793 card_stat = sc->stat_mem.paddr;
794
795 /*
796 * Command queue. This is special in that it's on the card.
797 */
798 el = sc->cmdqueue.chunk;
799 off = READ4(sc, FATMO_COMMAND_QUEUE);
800 DBG(sc, INIT, ("cmd queue=%x", off));
801 for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
802 el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
803
804 el->card = off;
805 off += 32; /* size of card structure */
806
807 el->statp = (uint32_t *)statp;
808 statp += sizeof(uint32_t);
809 H_SETSTAT(el->statp, FATM_STAT_FREE);
810 H_SYNCSTAT_PREWRITE(sc, el->statp);
811
812 WRITE4(sc, el->card + FATMOC_STATP, card_stat);
813 card_stat += sizeof(uint32_t);
814 }
815 sc->cmdqueue.tail = sc->cmdqueue.head = 0;
816
817 /*
818 * Now the other queues. These are in memory
819 */
820 init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
821 sizeof(struct txqueue), TPD_SIZE,
822 READ4(sc, FATMO_TRANSMIT_QUEUE),
823 &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
824
825 init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
826 sizeof(struct rxqueue), RPD_SIZE,
827 READ4(sc, FATMO_RECEIVE_QUEUE),
828 &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
829
830 init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
831 sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
832 READ4(sc, FATMO_SMALL_B1_QUEUE),
833 &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
834
835 init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
836 sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
837 READ4(sc, FATMO_LARGE_B1_QUEUE),
838 &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
839
840 sc->txcnt = 0;
841
842 return (0);
843}
844
845/*
846 * Read PROM. Called only from attach code. Here we spin because the interrupt
847 * handler is not yet set up.
848 */
849static int
850fatm_getprom(struct fatm_softc *sc)
851{
852 int i;
853 struct prom *prom;
854 struct cmdqueue *q;
855
856 DBG(sc, INIT, ("reading prom"));
857 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
858 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
859
860 q->error = 0;
861 q->cb = NULL;;
862 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
863 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
864
865 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
866 BUS_DMASYNC_PREREAD);
867
868 WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
869 BARRIER_W(sc);
870 WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
871 BARRIER_W(sc);
872
873 for (i = 0; i < 1000; i++) {
874 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
875 if (H_GETSTAT(q->q.statp) &
876 (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
877 break;
878 DELAY(1000);
879 }
880 if (i == 1000) {
881 if_printf(sc->ifp, "getprom timeout\n");
882 return (EIO);
883 }
884 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
885 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
886 if_printf(sc->ifp, "getprom error\n");
887 return (EIO);
888 }
889 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
890 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
891 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
892
893 bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
894 BUS_DMASYNC_POSTREAD);
895
896
897#ifdef notdef
898 {
899 u_int i;
900
901 printf("PROM: ");
902 u_char *ptr = (u_char *)sc->prom_mem.mem;
903 for (i = 0; i < sizeof(struct prom); i++)
904 printf("%02x ", *ptr++);
905 printf("\n");
906 }
907#endif
908
909 prom = (struct prom *)sc->prom_mem.mem;
910
911 bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
912 IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
913 IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
914 IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
915
916 if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
917 "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
918 IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
919 IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
920 IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
921
922 return (0);
923}
924
925/*
926 * This is the callback function for bus_dmamap_load. We assume, that we
927 * have a 32-bit bus and so have always one segment.
928 */
929static void
930dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
931{
932 bus_addr_t *ptr = (bus_addr_t *)arg;
933
934 if (error != 0) {
935 printf("%s: error=%d\n", __func__, error);
936 return;
937 }
938 KASSERT(nsegs == 1, ("too many DMA segments"));
939 KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
940 (u_long)segs[0].ds_addr));
941
942 *ptr = segs[0].ds_addr;
943}
944
945/*
946 * Allocate a chunk of DMA-able memory and map it.
947 */
948static int
949alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
950{
951 int error;
952
953 mem->mem = NULL;
954
955 if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
956 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
957 NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
958 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
959 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
960 nm);
961 return (ENOMEM);
962 }
963
964 error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
965 if (error) {
966 if_printf(sc->ifp, "could not allocate %s DMA memory: "
967 "%d\n", nm, error);
968 bus_dma_tag_destroy(mem->dmat);
969 mem->mem = NULL;
970 return (error);
971 }
972
973 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
974 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
975 if (error) {
976 if_printf(sc->ifp, "could not load %s DMA memory: "
977 "%d\n", nm, error);
978 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
979 bus_dma_tag_destroy(mem->dmat);
980 mem->mem = NULL;
981 return (error);
982 }
983
984 DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
985 (u_long)mem->paddr, mem->size, mem->align));
986
987 return (0);
988}
989
990#ifdef TEST_DMA_SYNC
991static int
992alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
993{
994 int error;
995
996 mem->mem = NULL;
997
998 if (bus_dma_tag_create(NULL, mem->align, 0,
999 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1000 NULL, NULL, mem->size, 1, mem->size,
1001 BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1002 if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1003 nm);
1004 return (ENOMEM);
1005 }
1006
1007 mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1008 BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1009
1010 error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1011 if (error) {
1012 if_printf(sc->ifp, "could not allocate %s DMA map: "
1013 "%d\n", nm, error);
1014 contigfree(mem->mem, mem->size, M_DEVBUF);
1015 bus_dma_tag_destroy(mem->dmat);
1016 mem->mem = NULL;
1017 return (error);
1018 }
1019
1020 error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1021 dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1022 if (error) {
1023 if_printf(sc->ifp, "could not load %s DMA memory: "
1024 "%d\n", nm, error);
1025 bus_dmamap_destroy(mem->dmat, mem->map);
1026 contigfree(mem->mem, mem->size, M_DEVBUF);
1027 bus_dma_tag_destroy(mem->dmat);
1028 mem->mem = NULL;
1029 return (error);
1030 }
1031
1032 DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1033 (u_long)mem->paddr, mem->size, mem->align));
1034
1035 printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1036 (u_long)mem->paddr, mem->size, mem->align);
1037
1038 return (0);
1039}
1040#endif /* TEST_DMA_SYNC */
1041
1042/*
1043 * Destroy all resources of an dma-able memory chunk
1044 */
1045static void
1046destroy_dma_memory(struct fatm_mem *mem)
1047{
1048 if (mem->mem != NULL) {
1049 bus_dmamap_unload(mem->dmat, mem->map);
1050 bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1051 bus_dma_tag_destroy(mem->dmat);
1052 mem->mem = NULL;
1053 }
1054}
1055#ifdef TEST_DMA_SYNC
1056static void
1057destroy_dma_memoryX(struct fatm_mem *mem)
1058{
1059 if (mem->mem != NULL) {
1060 bus_dmamap_unload(mem->dmat, mem->map);
1061 bus_dmamap_destroy(mem->dmat, mem->map);
1062 contigfree(mem->mem, mem->size, M_DEVBUF);
1063 bus_dma_tag_destroy(mem->dmat);
1064 mem->mem = NULL;
1065 }
1066}
1067#endif /* TEST_DMA_SYNC */
1068
1069/*
1070 * Try to supply buffers to the card if there are free entries in the queues
1071 */
1072static void
1073fatm_supply_small_buffers(struct fatm_softc *sc)
1074{
1075 int nblocks, nbufs;
1076 struct supqueue *q;
1077 struct rbd *bd;
1078 int i, j, error, cnt;
1079 struct mbuf *m;
1080 struct rbuf *rb;
1081 bus_addr_t phys;
1082
1083 nbufs = max(4 * sc->open_vccs, 32);
1084 nbufs = min(nbufs, SMALL_POOL_SIZE);
1085 nbufs -= sc->small_cnt;
1086
1087 nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1088 for (cnt = 0; cnt < nblocks; cnt++) {
1089 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1090
1091 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1092 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1093 break;
1094
1095 bd = (struct rbd *)q->q.ioblk;
1096
1097 for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1098 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1099 if_printf(sc->ifp, "out of rbufs\n");
1100 break;
1101 }
1102 MGETHDR(m, M_DONTWAIT, MT_DATA);
1103 if (m == NULL) {
1104 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1105 break;
1106 }
1107 MH_ALIGN(m, SMALL_BUFFER_LEN);
1108 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1109 m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1110 &phys, BUS_DMA_NOWAIT);
1111 if (error) {
1112 if_printf(sc->ifp,
1113 "dmamap_load mbuf failed %d", error);
1114 m_freem(m);
1115 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1116 break;
1117 }
1118 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1119 BUS_DMASYNC_PREREAD);
1120
1121 LIST_REMOVE(rb, link);
1122 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1123
1124 rb->m = m;
1125 bd[i].handle = rb - sc->rbufs;
1126 H_SETDESC(bd[i].buffer, phys);
1127 }
1128
1129 if (i < SMALL_SUPPLY_BLKSIZE) {
1130 for (j = 0; j < i; j++) {
1131 rb = sc->rbufs + bd[j].handle;
1132 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1133 m_free(rb->m);
1134 rb->m = NULL;
1135
1136 LIST_REMOVE(rb, link);
1137 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1138 }
1139 break;
1140 }
1141 H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1142 sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1143
1144 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1145 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1146
1147 WRITE4(sc, q->q.card, q->q.card_ioblk);
1148 BARRIER_W(sc);
1149
1150 sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1151
1152 NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1153 }
1154}
1155
1156/*
1157 * Try to supply buffers to the card if there are free entries in the queues
1158 * We assume that all buffers are within the address space accessible by the
1159 * card (32-bit), so we don't need bounce buffers.
1160 */
1161static void
1162fatm_supply_large_buffers(struct fatm_softc *sc)
1163{
1164 int nbufs, nblocks, cnt;
1165 struct supqueue *q;
1166 struct rbd *bd;
1167 int i, j, error;
1168 struct mbuf *m;
1169 struct rbuf *rb;
1170 bus_addr_t phys;
1171
1172 nbufs = max(4 * sc->open_vccs, 32);
1173 nbufs = min(nbufs, LARGE_POOL_SIZE);
1174 nbufs -= sc->large_cnt;
1175
1176 nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1177
1178 for (cnt = 0; cnt < nblocks; cnt++) {
1179 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1180
1181 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1182 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1183 break;
1184
1185 bd = (struct rbd *)q->q.ioblk;
1186
1187 for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1188 if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1189 if_printf(sc->ifp, "out of rbufs\n");
1190 break;
1191 }
1192 if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1193 M_PKTHDR)) == NULL) {
1194 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1195 break;
1196 }
1197 /* No MEXT_ALIGN */
1198 m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1199 error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1200 m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1201 &phys, BUS_DMA_NOWAIT);
1202 if (error) {
1203 if_printf(sc->ifp,
1204 "dmamap_load mbuf failed %d", error);
1205 m_freem(m);
1206 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1207 break;
1208 }
1209
1210 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1211 BUS_DMASYNC_PREREAD);
1212
1213 LIST_REMOVE(rb, link);
1214 LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1215
1216 rb->m = m;
1217 bd[i].handle = rb - sc->rbufs;
1218 H_SETDESC(bd[i].buffer, phys);
1219 }
1220
1221 if (i < LARGE_SUPPLY_BLKSIZE) {
1222 for (j = 0; j < i; j++) {
1223 rb = sc->rbufs + bd[j].handle;
1224 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1225 m_free(rb->m);
1226 rb->m = NULL;
1227
1228 LIST_REMOVE(rb, link);
1229 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1230 }
1231 break;
1232 }
1233 H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1234 sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1235
1236 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1237 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1238 WRITE4(sc, q->q.card, q->q.card_ioblk);
1239 BARRIER_W(sc);
1240
1241 sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1242
1243 NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1244 }
1245}
1246
1247
1248/*
1249 * Actually start the card. The lock must be held here.
1250 * Reset, load the firmware, start it, initializes queues, read the PROM
1251 * and supply receive buffers to the card.
1252 */
1253static void
1254fatm_init_locked(struct fatm_softc *sc)
1255{
1256 struct rxqueue *q;
1257 int i, c, error;
1258 uint32_t start;
1259
1260 DBG(sc, INIT, ("initialize"));
1261 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1262 fatm_stop(sc);
1263
1264 /*
1265 * Hard reset the board
1266 */
1267 if (fatm_reset(sc))
1268 return;
1269
1270 start = firmware_load(sc);
1271 if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1272 fatm_getprom(sc)) {
1273 fatm_reset(sc);
1274 return;
1275 }
1276
1277 /*
1278 * Handle media
1279 */
1280 c = READ4(sc, FATMO_MEDIA_TYPE);
1281 switch (c) {
1282
1283 case FORE_MT_TAXI_100:
1284 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1285 IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1286 break;
1287
1288 case FORE_MT_TAXI_140:
1289 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1290 IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1291 break;
1292
1293 case FORE_MT_UTP_SONET:
1294 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1295 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1296 break;
1297
1298 case FORE_MT_MM_OC3_ST:
1299 case FORE_MT_MM_OC3_SC:
1300 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1301 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1302 break;
1303
1304 case FORE_MT_SM_OC3_ST:
1305 case FORE_MT_SM_OC3_SC:
1306 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1307 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1308 break;
1309
1310 default:
1311 log(LOG_ERR, "fatm: unknown media type %d\n", c);
1312 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1313 IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1314 break;
1315 }
1316 sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1317 utopia_init_media(&sc->utopia);
1318
1319 /*
1320 * Initialize the RBDs
1321 */
1322 for (i = 0; i < FATM_RX_QLEN; i++) {
1323 q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1324 WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1325 }
1326 BARRIER_W(sc);
1327
1328 /*
1329 * Supply buffers to the card
1330 */
1331 fatm_supply_small_buffers(sc);
1332 fatm_supply_large_buffers(sc);
1333
1334 /*
1335 * Now set flags, that we are ready
1336 */
1337 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1338
1339 /*
1340 * Start the watchdog timer
1341 */
1342 callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc);
1343
1344 /* start SUNI */
1345 utopia_start(&sc->utopia);
1346
1347 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1348 sc->utopia.carrier == UTP_CARR_OK);
1349
1350 /* start all channels */
1351 for (i = 0; i < FORE_MAX_VCC + 1; i++)
1352 if (sc->vccs[i] != NULL) {
1353 sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1354 error = fatm_load_vc(sc, sc->vccs[i]);
1355 if (error != 0) {
1356 if_printf(sc->ifp, "reopening %u "
1357 "failed: %d\n", i, error);
1358 sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1359 }
1360 }
1361
1362 DBG(sc, INIT, ("done"));
1363}
1364
1365/*
1366 * This is the exported as initialisation function.
1367 */
1368static void
1369fatm_init(void *p)
1370{
1371 struct fatm_softc *sc = p;
1372
1373 FATM_LOCK(sc);
1374 fatm_init_locked(sc);
1375 FATM_UNLOCK(sc);
1376}
1377
1378/************************************************************/
1379/*
1380 * The INTERRUPT handling
1381 */
1382/*
1383 * Check the command queue. If a command was completed, call the completion
1384 * function for that command.
1385 */
1386static void
1387fatm_intr_drain_cmd(struct fatm_softc *sc)
1388{
1389 struct cmdqueue *q;
1390 int stat;
1391
1392 /*
1393 * Drain command queue
1394 */
1395 for (;;) {
1396 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1397
1398 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1399 stat = H_GETSTAT(q->q.statp);
1400
1401 if (stat != FATM_STAT_COMPLETE &&
1402 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1403 stat != FATM_STAT_ERROR)
1404 break;
1405
1406 (*q->cb)(sc, q);
1407
1408 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1409 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1410
1411 NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1412 }
1413}
1414
1415/*
1416 * Drain the small buffer supply queue.
1417 */
1418static void
1419fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1420{
1421 struct supqueue *q;
1422 int stat;
1423
1424 for (;;) {
1425 q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1426
1427 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1428 stat = H_GETSTAT(q->q.statp);
1429
1430 if ((stat & FATM_STAT_COMPLETE) == 0)
1431 break;
1432 if (stat & FATM_STAT_ERROR)
1433 log(LOG_ERR, "%s: status %x\n", __func__, stat);
1434
1435 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1436 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1437
1438 NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1439 }
1440}
1441
1442/*
1443 * Drain the large buffer supply queue.
1444 */
1445static void
1446fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1447{
1448 struct supqueue *q;
1449 int stat;
1450
1451 for (;;) {
1452 q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1453
1454 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1455 stat = H_GETSTAT(q->q.statp);
1456
1457 if ((stat & FATM_STAT_COMPLETE) == 0)
1458 break;
1459 if (stat & FATM_STAT_ERROR)
1460 log(LOG_ERR, "%s status %x\n", __func__, stat);
1461
1462 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1463 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1464
1465 NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1466 }
1467}
1468
1469/*
1470 * Check the receive queue. Send any received PDU up the protocol stack
1471 * (except when there was an error or the VCI appears to be closed. In this
1472 * case discard the PDU).
1473 */
1474static void
1475fatm_intr_drain_rx(struct fatm_softc *sc)
1476{
1477 struct rxqueue *q;
1478 int stat, mlen;
1479 u_int i;
1480 uint32_t h;
1481 struct mbuf *last, *m0;
1482 struct rpd *rpd;
1483 struct rbuf *rb;
1484 u_int vci, vpi, pt;
1485 struct atm_pseudohdr aph;
1486 struct ifnet *ifp;
1487 struct card_vcc *vc;
1488
1489 for (;;) {
1490 q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1491
1492 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1493 stat = H_GETSTAT(q->q.statp);
1494
1495 if ((stat & FATM_STAT_COMPLETE) == 0)
1496 break;
1497
1498 rpd = (struct rpd *)q->q.ioblk;
1499 H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1500
1501 rpd->nseg = le32toh(rpd->nseg);
1502 mlen = 0;
1503 m0 = last = 0;
1504 for (i = 0; i < rpd->nseg; i++) {
1505 rb = sc->rbufs + rpd->segment[i].handle;
1506 if (m0 == NULL) {
1507 m0 = last = rb->m;
1508 } else {
1509 last->m_next = rb->m;
1510 last = rb->m;
1511 }
1512 last->m_next = NULL;
1513 if (last->m_flags & M_EXT)
1514 sc->large_cnt--;
1515 else
1516 sc->small_cnt--;
1517 bus_dmamap_sync(sc->rbuf_tag, rb->map,
1518 BUS_DMASYNC_POSTREAD);
1519 bus_dmamap_unload(sc->rbuf_tag, rb->map);
1520 rb->m = NULL;
1521
1522 LIST_REMOVE(rb, link);
1523 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1524
1525 last->m_len = le32toh(rpd->segment[i].length);
1526 mlen += last->m_len;
1527 }
1528
1529 m0->m_pkthdr.len = mlen;
1530 m0->m_pkthdr.rcvif = sc->ifp;
1531
1532 h = le32toh(rpd->atm_header);
1533 vpi = (h >> 20) & 0xff;
1534 vci = (h >> 4 ) & 0xffff;
1535 pt = (h >> 1 ) & 0x7;
1536
1537 /*
1538 * Locate the VCC this packet belongs to
1539 */
1540 if (!VC_OK(sc, vpi, vci))
1541 vc = NULL;
1542 else if ((vc = sc->vccs[vci]) == NULL ||
1543 !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1544 sc->istats.rx_closed++;
1545 vc = NULL;
1546 }
1547
1548 DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1549 pt, mlen, vc == NULL ? "dropped" : ""));
1550
1551 if (vc == NULL) {
1552 m_freem(m0);
1553 } else {
1554#ifdef ENABLE_BPF
1555 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1556 vc->param.aal == ATMIO_AAL_5 &&
1557 (vc->param.flags & ATM_PH_LLCSNAP))
1558 BPF_MTAP(sc->ifp, m0);
1559#endif
1560
1561 ATM_PH_FLAGS(&aph) = vc->param.flags;
1562 ATM_PH_VPI(&aph) = vpi;
1563 ATM_PH_SETVCI(&aph, vci);
1564
1565 ifp = sc->ifp;
1566 ifp->if_ipackets++;
1567
1568 vc->ipackets++;
1569 vc->ibytes += m0->m_pkthdr.len;
1570
1571 atm_input(ifp, &aph, m0, vc->rxhand);
1572 }
1573
1574 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1575 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1576
1577 WRITE4(sc, q->q.card, q->q.card_ioblk);
1578 BARRIER_W(sc);
1579
1580 NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1581 }
1582}
1583
1584/*
1585 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1586 */
1587static void
1588fatm_intr_drain_tx(struct fatm_softc *sc)
1589{
1590 struct txqueue *q;
1591 int stat;
1592
1593 /*
1594 * Drain tx queue
1595 */
1596 for (;;) {
1597 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1598
1599 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1600 stat = H_GETSTAT(q->q.statp);
1601
1602 if (stat != FATM_STAT_COMPLETE &&
1603 stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1604 stat != FATM_STAT_ERROR)
1605 break;
1606
1607 H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1608 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1609
1610 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1611 bus_dmamap_unload(sc->tx_tag, q->map);
1612
1613 m_freem(q->m);
1614 q->m = NULL;
1615 sc->txcnt--;
1616
1617 NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1618 }
1619}
1620
1621/*
1622 * Interrupt handler
1623 */
1624static void
1625fatm_intr(void *p)
1626{
1627 struct fatm_softc *sc = (struct fatm_softc *)p;
1628
1629 FATM_LOCK(sc);
1630 if (!READ4(sc, FATMO_PSR)) {
1631 FATM_UNLOCK(sc);
1632 return;
1633 }
1634 WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1635
1636 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1637 FATM_UNLOCK(sc);
1638 return;
1639 }
1640 fatm_intr_drain_cmd(sc);
1641 fatm_intr_drain_rx(sc);
1642 fatm_intr_drain_tx(sc);
1643 fatm_intr_drain_small_buffers(sc);
1644 fatm_intr_drain_large_buffers(sc);
1645 fatm_supply_small_buffers(sc);
1646 fatm_supply_large_buffers(sc);
1647
1648 FATM_UNLOCK(sc);
1649
1650 if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1651 (*sc->ifp->if_start)(sc->ifp);
1652}
1653
1654/*
1655 * Get device statistics. This must be called with the softc locked.
1656 * We use a preallocated buffer, so we need to protect this buffer.
1657 * We do this by using a condition variable and a flag. If the flag is set
1658 * the buffer is in use by one thread (one thread is executing a GETSTAT
1659 * card command). In this case all other threads that are trying to get
1660 * statistics block on that condition variable. When the thread finishes
1661 * using the buffer it resets the flag and signals the condition variable. This
1662 * will wakeup the next thread that is waiting for the buffer. If the interface
1663 * is stopped the stopping function will broadcast the cv. All threads will
1664 * find that the interface has been stopped and return.
1665 *
1666 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1667 * must be done by the caller when he has finished using the buffer.
1668 */
1669static void
1670fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1671{
1672
1673 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1674 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1675 sc->istats.get_stat_errors++;
1676 q->error = EIO;
1677 }
1678 wakeup(&sc->sadi_mem);
1679}
1680static int
1681fatm_getstat(struct fatm_softc *sc)
1682{
1683 int error;
1684 struct cmdqueue *q;
1685
1686 /*
1687 * Wait until either the interface is stopped or we can get the
1688 * statistics buffer
1689 */
1690 for (;;) {
1691 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
1692 return (EIO);
1693 if (!(sc->flags & FATM_STAT_INUSE))
1694 break;
1695 cv_wait(&sc->cv_stat, &sc->mtx);
1696 }
1697 sc->flags |= FATM_STAT_INUSE;
1698
1699 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1700
1701 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1702 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1703 sc->istats.cmd_queue_full++;
1704 return (EIO);
1705 }
1706 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1707
1708 q->error = 0;
1709 q->cb = fatm_getstat_complete;
1710 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1711 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1712
1713 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1714 BUS_DMASYNC_PREREAD);
1715
1716 WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1717 sc->sadi_mem.paddr);
1718 BARRIER_W(sc);
1719 WRITE4(sc, q->q.card + FATMOC_OP,
1720 FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1721 BARRIER_W(sc);
1722
1723 /*
1724 * Wait for the command to complete
1725 */
1726 error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1727 "fatm_stat", hz);
1728
1729 switch (error) {
1730
1731 case EWOULDBLOCK:
1732 error = EIO;
1733 break;
1734
1735 case ERESTART:
1736 error = EINTR;
1737 break;
1738
1739 case 0:
1740 bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1741 BUS_DMASYNC_POSTREAD);
1742 error = q->error;
1743 break;
1744 }
1745
1746 /*
1747 * Swap statistics
1748 */
1749 if (q->error == 0) {
1750 u_int i;
1751 uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1752
1753 for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1754 i++, p++)
1755 *p = be32toh(*p);
1756 }
1757
1758 return (error);
1759}
1760
1761/*
1762 * Create a copy of a single mbuf. It can have either internal or
1763 * external data, it may have a packet header. External data is really
1764 * copied, so the new buffer is writeable.
1765 */
1766static struct mbuf *
1767copy_mbuf(struct mbuf *m)
1768{
1769 struct mbuf *new;
1770
1771 MGET(new, M_DONTWAIT, MT_DATA);
1772 if (new == NULL)
1773 return (NULL);
1774
1775 if (m->m_flags & M_PKTHDR) {
1776 M_MOVE_PKTHDR(new, m);
1777 if (m->m_len > MHLEN)
1778 MCLGET(new, M_WAIT);
1779 } else {
1780 if (m->m_len > MLEN)
1781 MCLGET(new, M_WAIT);
1782 }
1783
1784 bcopy(m->m_data, new->m_data, m->m_len);
1785 new->m_len = m->m_len;
1786 new->m_flags &= ~M_RDONLY;
1787
1788 return (new);
1789}
1790
1791/*
1792 * All segments must have a four byte aligned buffer address and a four
1793 * byte aligned length. Step through an mbuf chain and check these conditions.
1794 * If the buffer address is not aligned and this is a normal mbuf, move
1795 * the data down. Else make a copy of the mbuf with aligned data.
1796 * If the buffer length is not aligned steel data from the next mbuf.
1797 * We don't need to check whether this has more than one external reference,
1798 * because steeling data doesn't change the external cluster.
1799 * If the last mbuf is not aligned, fill with zeroes.
1800 *
1801 * Return packet length (well we should have this in the packet header),
1802 * but be careful not to count the zero fill at the end.
1803 *
1804 * If fixing fails free the chain and zero the pointer.
1805 *
1806 * We assume, that aligning the virtual address also aligns the mapped bus
1807 * address.
1808 */
1809static u_int
1810fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1811{
1812 struct mbuf *m = *mp, *prev = NULL, *next, *new;
1813 u_int mlen = 0, fill = 0;
1814 int first, off;
1815 u_char *d, *cp;
1816
1817 do {
1818 next = m->m_next;
1819
1820 if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1821 (m->m_len % 4 != 0 && next)) {
1822 /*
1823 * Needs fixing
1824 */
1825 first = (m == *mp);
1826
1827 d = mtod(m, u_char *);
1828 if ((off = (uintptr_t)(void *)d % 4) != 0) {
1829 if (M_WRITABLE(m)) {
1830 sc->istats.fix_addr_copy++;
1831 bcopy(d, d - off, m->m_len);
1832 m->m_data = (caddr_t)(d - off);
1833 } else {
1834 if ((new = copy_mbuf(m)) == NULL) {
1835 sc->istats.fix_addr_noext++;
1836 goto fail;
1837 }
1838 sc->istats.fix_addr_ext++;
1839 if (prev)
1840 prev->m_next = new;
1841 new->m_next = next;
1842 m_free(m);
1843 m = new;
1844 }
1845 }
1846
1847 if ((off = m->m_len % 4) != 0) {
1848 if (!M_WRITABLE(m)) {
1849 if ((new = copy_mbuf(m)) == NULL) {
1850 sc->istats.fix_len_noext++;
1851 goto fail;
1852 }
1853 sc->istats.fix_len_copy++;
1854 if (prev)
1855 prev->m_next = new;
1856 new->m_next = next;
1857 m_free(m);
1858 m = new;
1859 } else
1860 sc->istats.fix_len++;
1861 d = mtod(m, u_char *) + m->m_len;
1862 off = 4 - off;
1863 while (off) {
1864 if (next == NULL) {
1865 *d++ = 0;
1866 fill++;
1867 } else if (next->m_len == 0) {
1868 sc->istats.fix_empty++;
1869 next = m_free(next);
1870 continue;
1871 } else {
1872 cp = mtod(next, u_char *);
1873 *d++ = *cp++;
1874 next->m_len--;
1875 next->m_data = (caddr_t)cp;
1876 }
1877 off--;
1878 m->m_len++;
1879 }
1880 }
1881
1882 if (first)
1883 *mp = m;
1884 }
1885
1886 mlen += m->m_len;
1887 prev = m;
1888 } while ((m = next) != NULL);
1889
1890 return (mlen - fill);
1891
1892 fail:
1893 m_freem(*mp);
1894 *mp = NULL;
1895 return (0);
1896}
1897
1898/*
1899 * The helper function is used to load the computed physical addresses
1900 * into the transmit descriptor.
1901 */
1902static void
1903fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1904 bus_size_t mapsize, int error)
1905{
1906 struct tpd *tpd = varg;
1907
1908 if (error)
1909 return;
1910
1911 KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1912
1913 tpd->spec = 0;
1914 while (nsegs--) {
1915 H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1916 H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1917 tpd->spec++;
1918 segs++;
1919 }
1920}
1921
1922/*
1923 * Start output.
1924 *
1925 * Note, that we update the internal statistics without the lock here.
1926 */
1927static int
1928fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1929{
1930 struct txqueue *q;
1931 u_int nblks;
1932 int error, aal, nsegs;
1933 struct tpd *tpd;
1934
1935 /*
1936 * Get a queue element.
1937 * If there isn't one - try to drain the transmit queue
1938 * We used to sleep here if that doesn't help, but we
1939 * should not sleep here, because we are called with locks.
1940 */
1941 q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1942
1943 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1944 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1945 fatm_intr_drain_tx(sc);
1946 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1947 if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1948 if (sc->retry_tx) {
1949 sc->istats.tx_retry++;
1950 IF_PREPEND(&sc->ifp->if_snd, m);
1951 return (1);
1952 }
1953 sc->istats.tx_queue_full++;
1954 m_freem(m);
1955 return (0);
1956 }
1957 sc->istats.tx_queue_almost_full++;
1958 }
1959
1960 tpd = q->q.ioblk;
1961
1962 m->m_data += sizeof(struct atm_pseudohdr);
1963 m->m_len -= sizeof(struct atm_pseudohdr);
1964
1965#ifdef ENABLE_BPF
1966 if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1967 vc->param.aal == ATMIO_AAL_5 &&
1968 (vc->param.flags & ATM_PH_LLCSNAP))
1969 BPF_MTAP(sc->ifp, m);
1970#endif
1971
1972 /* map the mbuf */
1973 error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1974 fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1975 if(error) {
1976 sc->ifp->if_oerrors++;
1977 if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1978 m_freem(m);
1979 return (0);
1980 }
1981 nsegs = tpd->spec;
1982
1983 bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1984
1985 /*
1986 * OK. Now go and do it.
1987 */
1988 aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1989
1990 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1991 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1992 q->m = m;
1993
1994 /*
1995 * If the transmit queue is almost full, schedule a
1996 * transmit interrupt so that transmit descriptors can
1997 * be recycled.
1998 */
1999 H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
2000 (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
2001 H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
2002 vc->param.vci, 0, 0));
2003
2004 if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2005 H_SETDESC(tpd->stream, 0);
2006 else {
2007 u_int i;
2008
2009 for (i = 0; i < RATE_TABLE_SIZE; i++)
2010 if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2011 break;
2012 if (i > 0)
2013 i--;
2014 H_SETDESC(tpd->stream, rate_table[i].ratio);
2015 }
2016 H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2017
2018 nblks = TDX_SEGS2BLKS(nsegs);
2019
2020 DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2021 mlen, le32toh(tpd->spec), nsegs, nblks));
2022
2023 WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2024 BARRIER_W(sc);
2025
2026 sc->txcnt++;
2027 sc->ifp->if_opackets++;
2028 vc->obytes += m->m_pkthdr.len;
2029 vc->opackets++;
2030
2031 NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2032
2033 return (0);
2034}
2035
2036static void
2037fatm_start(struct ifnet *ifp)
2038{
2039 struct atm_pseudohdr aph;
2040 struct fatm_softc *sc;
2041 struct mbuf *m;
2042 u_int mlen, vpi, vci;
2043 struct card_vcc *vc;
2044
2045 sc = ifp->if_softc;
2046
2047 while (1) {
2048 IF_DEQUEUE(&ifp->if_snd, m);
2049 if (m == NULL)
2050 break;
2051
2052 /*
2053 * Loop through the mbuf chain and compute the total length
2054 * of the packet. Check that all data pointer are
2055 * 4 byte aligned. If they are not, call fatm_mfix to
2056 * fix that problem. This comes more or less from the
2057 * en driver.
2058 */
2059 mlen = fatm_fix_chain(sc, &m);
2060 if (m == NULL)
2061 continue;
2062
2063 if (m->m_len < sizeof(struct atm_pseudohdr) &&
2064 (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2065 continue;
2066
2067 aph = *mtod(m, struct atm_pseudohdr *);
2068 mlen -= sizeof(struct atm_pseudohdr);
2069
2070 if (mlen == 0) {
2071 m_freem(m);
2072 continue;
2073 }
2074 if (mlen > FATM_MAXPDU) {
2075 sc->istats.tx_pdu2big++;
2076 m_freem(m);
2077 continue;
2078 }
2079
2080 vci = ATM_PH_VCI(&aph);
2081 vpi = ATM_PH_VPI(&aph);
2082
2083 /*
2084 * From here on we need the softc
2085 */
2086 FATM_LOCK(sc);
2087 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2088 FATM_UNLOCK(sc);
2089 m_freem(m);
2090 break;
2091 }
2092 if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2093 !(vc->vflags & FATM_VCC_OPEN)) {
2094 FATM_UNLOCK(sc);
2095 m_freem(m);
2096 continue;
2097 }
2098 if (fatm_tx(sc, m, vc, mlen)) {
2099 FATM_UNLOCK(sc);
2100 break;
2101 }
2102 FATM_UNLOCK(sc);
2103 }
2104}
2105
2106/*
2107 * VCC managment
2108 *
2109 * This may seem complicated. The reason for this is, that we need an
2110 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2111 * is called with the radix node head of the routing table locked. Therefor
2112 * we cannot sleep there and wait for the open/close to succeed. For this
2113 * reason we just initiate the operation from the ioctl.
2114 */
2115
2116/*
2117 * Command the card to open/close a VC.
2118 * Return the queue entry for waiting if we are succesful.
2119 */
2120static struct cmdqueue *
2121fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2122 u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2123{
2124 struct cmdqueue *q;
2125
2126 q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2127
2128 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2129 if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2130 sc->istats.cmd_queue_full++;
2131 return (NULL);
2132 }
2133 NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2134
2135 q->error = 0;
2136 q->cb = func;
2137 H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2138 H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2139
2140 WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2141 BARRIER_W(sc);
2142 WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2143 BARRIER_W(sc);
2144 WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2145 BARRIER_W(sc);
2146
2147 return (q);
2148}
2149
2150/*
2151 * The VC has been opened/closed and somebody has been waiting for this.
2152 * Wake him up.
2153 */
2154static void
2155fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2156{
2157
2158 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2159 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2160 sc->istats.get_stat_errors++;
2161 q->error = EIO;
2162 }
2163 wakeup(q);
2164}
2165
2166/*
2167 * Open complete
2168 */
2169static void
2170fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2171{
2172 vc->vflags &= ~FATM_VCC_TRY_OPEN;
2173 vc->vflags |= FATM_VCC_OPEN;
2174
2175 if (vc->vflags & FATM_VCC_REOPEN) {
2176 vc->vflags &= ~FATM_VCC_REOPEN;
2177 return;
2178 }
2179
2180 /* inform management if this is not an NG
2181 * VCC or it's an NG PVC. */
2182 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2183 (vc->param.flags & ATMIO_FLAG_PVC))
2184 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2185}
2186
2187/*
2188 * The VC that we have tried to open asynchronuosly has been opened.
2189 */
2190static void
2191fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2192{
2193 u_int vci;
2194 struct card_vcc *vc;
2195
2196 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2197 vc = sc->vccs[vci];
2198 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2199 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2200 sc->istats.get_stat_errors++;
2201 sc->vccs[vci] = NULL;
2202 uma_zfree(sc->vcc_zone, vc);
2203 if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2204 return;
2205 }
2206 fatm_open_finish(sc, vc);
2207}
2208
2209/*
2210 * Wait on the queue entry until the VCC is opened/closed.
2211 */
2212static int
2213fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2214{
2215 int error;
2216
2217 /*
2218 * Wait for the command to complete
2219 */
2220 error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2221
2222 if (error != 0)
2223 return (error);
2224 return (q->error);
2225}
2226
2227/*
2228 * Start to open a VCC. This just initiates the operation.
2229 */
2230static int
2231fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2232{
2233 int error;
2234 struct card_vcc *vc;
2235
2236 /*
2237 * Check parameters
2238 */
2239 if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2240 (op->param.flags & ATMIO_FLAG_NORX))
2241 return (EINVAL);
2242
2243 if (!VC_OK(sc, op->param.vpi, op->param.vci))
2244 return (EINVAL);
2245 if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2246 return (EINVAL);
2247
2248 vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2249 if (vc == NULL)
2250 return (ENOMEM);
2251
2252 error = 0;
2253
2254 FATM_LOCK(sc);
2255 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2256 error = EIO;
2257 goto done;
2258 }
2259 if (sc->vccs[op->param.vci] != NULL) {
2260 error = EBUSY;
2261 goto done;
2262 }
2263 vc->param = op->param;
2264 vc->rxhand = op->rxhand;
2265
2266 switch (op->param.traffic) {
2267
2268 case ATMIO_TRAFFIC_UBR:
2269 break;
2270
2271 case ATMIO_TRAFFIC_CBR:
2272 if (op->param.tparam.pcr == 0 ||
2273 op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2274 error = EINVAL;
2275 goto done;
2276 }
2277 break;
2278
2279 default:
2280 error = EINVAL;
2281 goto done;
2282 }
2283 vc->ibytes = vc->obytes = 0;
2284 vc->ipackets = vc->opackets = 0;
2285
2286 vc->vflags = FATM_VCC_TRY_OPEN;
2287 sc->vccs[op->param.vci] = vc;
2288 sc->open_vccs++;
2289
2290 error = fatm_load_vc(sc, vc);
2291 if (error != 0) {
2292 sc->vccs[op->param.vci] = NULL;
2293 sc->open_vccs--;
2294 goto done;
2295 }
2296
2297 /* don't free below */
2298 vc = NULL;
2299
2300 done:
2301 FATM_UNLOCK(sc);
2302 if (vc != NULL)
2303 uma_zfree(sc->vcc_zone, vc);
2304 return (error);
2305}
2306
2307/*
2308 * Try to initialize the given VC
2309 */
2310static int
2311fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2312{
2313 uint32_t cmd;
2314 struct cmdqueue *q;
2315 int error;
2316
2317 /* Command and buffer strategy */
2318 cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2319 if (vc->param.aal == ATMIO_AAL_0)
2320 cmd |= (0 << 8);
2321 else
2322 cmd |= (5 << 8);
2323
2324 q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2325 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2326 fatm_open_complete : fatm_cmd_complete);
2327 if (q == NULL)
2328 return (EIO);
2329
2330 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2331 error = fatm_waitvcc(sc, q);
2332 if (error != 0)
2333 return (error);
2334 fatm_open_finish(sc, vc);
2335 }
2336 return (0);
2337}
2338
2339/*
2340 * Finish close
2341 */
2342static void
2343fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2344{
2345 /* inform management of this is not an NG
2346 * VCC or it's an NG PVC. */
2347 if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2348 (vc->param.flags & ATMIO_FLAG_PVC))
2349 ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2350
2351 sc->vccs[vc->param.vci] = NULL;
2352 sc->open_vccs--;
2353
2354 uma_zfree(sc->vcc_zone, vc);
2355}
2356
2357/*
2358 * The VC has been closed.
2359 */
2360static void
2361fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2362{
2363 u_int vci;
2364 struct card_vcc *vc;
2365
2366 vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2367 vc = sc->vccs[vci];
2368 H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2369 if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2370 sc->istats.get_stat_errors++;
2371 /* keep the VCC in that state */
2372 if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2373 return;
2374 }
2375
2376 fatm_close_finish(sc, vc);
2377}
2378
2379/*
2380 * Initiate closing a VCC
2381 */
2382static int
2383fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2384{
2385 int error;
2386 struct cmdqueue *q;
2387 struct card_vcc *vc;
2388
2389 if (!VC_OK(sc, cl->vpi, cl->vci))
2390 return (EINVAL);
2391
2392 error = 0;
2393
2394 FATM_LOCK(sc);
2395 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2396 error = EIO;
2397 goto done;
2398 }
2399 vc = sc->vccs[cl->vci];
2400 if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2401 error = ENOENT;
2402 goto done;
2403 }
2404
2405 q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2406 FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2407 (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2408 fatm_close_complete : fatm_cmd_complete);
2409 if (q == NULL) {
2410 error = EIO;
2411 goto done;
2412 }
2413
2414 vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2415 vc->vflags |= FATM_VCC_TRY_CLOSE;
2416
2417 if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2418 error = fatm_waitvcc(sc, q);
2419 if (error != 0)
2420 goto done;
2421
2422 fatm_close_finish(sc, vc);
2423 }
2424
2425 done:
2426 FATM_UNLOCK(sc);
2427 return (error);
2428}
2429
2430/*
2431 * IOCTL handler
2432 */
2433static int
2434fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2435{
2436 int error;
2437 struct fatm_softc *sc = ifp->if_softc;
2438 struct ifaddr *ifa = (struct ifaddr *)arg;
2439 struct ifreq *ifr = (struct ifreq *)arg;
2440 struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2441 struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2442 struct atmio_vcctable *vtab;
2443
2444 error = 0;
2445 switch (cmd) {
2446
2447 case SIOCATMOPENVCC: /* kernel internal use */
2448 error = fatm_open_vcc(sc, op);
2449 break;
2450
2451 case SIOCATMCLOSEVCC: /* kernel internal use */
2452 error = fatm_close_vcc(sc, cl);
2453 break;
2454
2455 case SIOCSIFADDR:
2456 FATM_LOCK(sc);
2457 ifp->if_flags |= IFF_UP;
2458 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2459 fatm_init_locked(sc);
2460 switch (ifa->ifa_addr->sa_family) {
2461#ifdef INET
2462 case AF_INET:
2463 case AF_INET6:
2464 ifa->ifa_rtrequest = atm_rtrequest;
2465 break;
2466#endif
2467 default:
2468 break;
2469 }
2470 FATM_UNLOCK(sc);
2471 break;
2472
2473 case SIOCSIFFLAGS:
2474 FATM_LOCK(sc);
2475 if (ifp->if_flags & IFF_UP) {
2476 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2477 fatm_init_locked(sc);
2478 }
2479 } else {
2480 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2481 fatm_stop(sc);
2482 }
2483 }
2484 FATM_UNLOCK(sc);
2485 break;
2486
2487 case SIOCGIFMEDIA:
2488 case SIOCSIFMEDIA:
2489 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2490 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2491 else
2492 error = EINVAL;
2493 break;
2494
2495 case SIOCATMGVCCS:
2496 /* return vcc table */
2497 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2498 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2499 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2500 vtab->count * sizeof(vtab->vccs[0]));
2501 free(vtab, M_DEVBUF);
2502 break;
2503
2504 case SIOCATMGETVCCS: /* internal netgraph use */
2505 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2506 FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2507 if (vtab == NULL) {
2508 error = ENOMEM;
2509 break;
2510 }
2511 *(void **)arg = vtab;
2512 break;
2513
2514 default:
2515 DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2516 error = EINVAL;
2517 break;
2518 }
2519
2520 return (error);
2521}
2522
2523/*
2524 * Detach from the interface and free all resources allocated during
2525 * initialisation and later.
2526 */
2527static int
2528fatm_detach(device_t dev)
2529{
2530 u_int i;
2531 struct rbuf *rb;
2532 struct fatm_softc *sc;
2533 struct txqueue *tx;
2534
2535 sc = device_get_softc(dev);
2536
2537 if (device_is_alive(dev)) {
2538 FATM_LOCK(sc);
2539 fatm_stop(sc);
2540 utopia_detach(&sc->utopia);
2541 FATM_UNLOCK(sc);
2542 atm_ifdetach(sc->ifp); /* XXX race */
2543 }
2544 callout_drain(&sc->watchdog_timer);
2545
2546 if (sc->ih != NULL)
2547 bus_teardown_intr(dev, sc->irqres, sc->ih);
2548
2549 while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2550 if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2551 bus_dmamap_unload(sc->rbuf_tag, rb->map);
2552 m_freem(rb->m);
2553 LIST_REMOVE(rb, link);
2554 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2555 }
2556
2557 if (sc->txqueue.chunk != NULL) {
2558 for (i = 0; i < FATM_TX_QLEN; i++) {
2559 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2560 bus_dmamap_destroy(sc->tx_tag, tx->map);
2561 }
2562 }
2563
2564 while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2565 bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2566 LIST_REMOVE(rb, link);
2567 }
2568
2569 if (sc->rbufs != NULL)
2570 free(sc->rbufs, M_DEVBUF);
2571 if (sc->vccs != NULL) {
2572 for (i = 0; i < FORE_MAX_VCC + 1; i++)
2573 if (sc->vccs[i] != NULL) {
2574 uma_zfree(sc->vcc_zone, sc->vccs[i]);
2575 sc->vccs[i] = NULL;
2576 }
2577 free(sc->vccs, M_DEVBUF);
2578 }
2579 if (sc->vcc_zone != NULL)
2580 uma_zdestroy(sc->vcc_zone);
2581
2582 if (sc->l1queue.chunk != NULL)
2583 free(sc->l1queue.chunk, M_DEVBUF);
2584 if (sc->s1queue.chunk != NULL)
2585 free(sc->s1queue.chunk, M_DEVBUF);
2586 if (sc->rxqueue.chunk != NULL)
2587 free(sc->rxqueue.chunk, M_DEVBUF);
2588 if (sc->txqueue.chunk != NULL)
2589 free(sc->txqueue.chunk, M_DEVBUF);
2590 if (sc->cmdqueue.chunk != NULL)
2591 free(sc->cmdqueue.chunk, M_DEVBUF);
2592
2593 destroy_dma_memory(&sc->reg_mem);
2594 destroy_dma_memory(&sc->sadi_mem);
2595 destroy_dma_memory(&sc->prom_mem);
2596#ifdef TEST_DMA_SYNC
2597 destroy_dma_memoryX(&sc->s1q_mem);
2598 destroy_dma_memoryX(&sc->l1q_mem);
2599 destroy_dma_memoryX(&sc->rxq_mem);
2600 destroy_dma_memoryX(&sc->txq_mem);
2601 destroy_dma_memoryX(&sc->stat_mem);
2602#endif
2603
2604 if (sc->tx_tag != NULL)
2605 if (bus_dma_tag_destroy(sc->tx_tag))
2606 printf("tx DMA tag busy!\n");
2607
2608 if (sc->rbuf_tag != NULL)
2609 if (bus_dma_tag_destroy(sc->rbuf_tag))
2610 printf("rbuf DMA tag busy!\n");
2611
2612 if (sc->parent_dmat != NULL)
2613 if (bus_dma_tag_destroy(sc->parent_dmat))
2614 printf("parent DMA tag busy!\n");
2615
2616 if (sc->irqres != NULL)
2617 bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2618
2619 if (sc->memres != NULL)
2620 bus_release_resource(dev, SYS_RES_MEMORY,
2621 sc->memid, sc->memres);
2622
2623 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2624
2625 cv_destroy(&sc->cv_stat);
2626 cv_destroy(&sc->cv_regs);
2627
2628 mtx_destroy(&sc->mtx);
2629
2630 if_free(sc->ifp);
2631
2632 return (0);
2633}
2634
2635/*
2636 * Sysctl handler
2637 */
2638static int
2639fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2640{
2641 struct fatm_softc *sc = arg1;
2642 u_long *ret;
2643 int error;
2644
2645 ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2646
2647 FATM_LOCK(sc);
2648 bcopy(&sc->istats, ret, sizeof(sc->istats));
2649 FATM_UNLOCK(sc);
2650
2651 error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2652 free(ret, M_TEMP);
2653
2654 return (error);
2655}
2656
2657/*
2658 * Sysctl handler for card statistics
2659 * This is disable because it destroys the PHY statistics.
2660 */
2661static int
2662fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2663{
2664 struct fatm_softc *sc = arg1;
2665 int error;
2666 const struct fatm_stats *s;
2667 u_long *ret;
2668 u_int i;
2669
2670 ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2671
2672 FATM_LOCK(sc);
2673
2674 if ((error = fatm_getstat(sc)) == 0) {
2675 s = sc->sadi_mem.mem;
2676 i = 0;
2677 ret[i++] = s->phy_4b5b.crc_header_errors;
2678 ret[i++] = s->phy_4b5b.framing_errors;
2679 ret[i++] = s->phy_oc3.section_bip8_errors;
2680 ret[i++] = s->phy_oc3.path_bip8_errors;
2681 ret[i++] = s->phy_oc3.line_bip24_errors;
2682 ret[i++] = s->phy_oc3.line_febe_errors;
2683 ret[i++] = s->phy_oc3.path_febe_errors;
2684 ret[i++] = s->phy_oc3.corr_hcs_errors;
2685 ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2686 ret[i++] = s->atm.cells_transmitted;
2687 ret[i++] = s->atm.cells_received;
2688 ret[i++] = s->atm.vpi_bad_range;
2689 ret[i++] = s->atm.vpi_no_conn;
2690 ret[i++] = s->atm.vci_bad_range;
2691 ret[i++] = s->atm.vci_no_conn;
2692 ret[i++] = s->aal0.cells_transmitted;
2693 ret[i++] = s->aal0.cells_received;
2694 ret[i++] = s->aal0.cells_dropped;
2695 ret[i++] = s->aal4.cells_transmitted;
2696 ret[i++] = s->aal4.cells_received;
2697 ret[i++] = s->aal4.cells_crc_errors;
2698 ret[i++] = s->aal4.cels_protocol_errors;
2699 ret[i++] = s->aal4.cells_dropped;
2700 ret[i++] = s->aal4.cspdus_transmitted;
2701 ret[i++] = s->aal4.cspdus_received;
2702 ret[i++] = s->aal4.cspdus_protocol_errors;
2703 ret[i++] = s->aal4.cspdus_dropped;
2704 ret[i++] = s->aal5.cells_transmitted;
2705 ret[i++] = s->aal5.cells_received;
2706 ret[i++] = s->aal5.congestion_experienced;
2707 ret[i++] = s->aal5.cells_dropped;
2708 ret[i++] = s->aal5.cspdus_transmitted;
2709 ret[i++] = s->aal5.cspdus_received;
2710 ret[i++] = s->aal5.cspdus_crc_errors;
2711 ret[i++] = s->aal5.cspdus_protocol_errors;
2712 ret[i++] = s->aal5.cspdus_dropped;
2713 ret[i++] = s->aux.small_b1_failed;
2714 ret[i++] = s->aux.large_b1_failed;
2715 ret[i++] = s->aux.small_b2_failed;
2716 ret[i++] = s->aux.large_b2_failed;
2717 ret[i++] = s->aux.rpd_alloc_failed;
2718 ret[i++] = s->aux.receive_carrier;
2719 }
2720 /* declare the buffer free */
2721 sc->flags &= ~FATM_STAT_INUSE;
2722 cv_signal(&sc->cv_stat);
2723
2724 FATM_UNLOCK(sc);
2725
2726 if (error == 0)
2727 error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2728 free(ret, M_TEMP);
2729
2730 return (error);
2731}
2732
2733#define MAXDMASEGS 32 /* maximum number of receive descriptors */
2734
2735/*
2736 * Attach to the device.
2737 *
2738 * We assume, that there is a global lock (Giant in this case) that protects
2739 * multiple threads from entering this function. This makes sense, doesn't it?
2740 */
2741static int
2742fatm_attach(device_t dev)
2743{
2744 struct ifnet *ifp;
2745 struct fatm_softc *sc;
2746 int unit;
2747 uint16_t cfg;
2748 int error = 0;
2749 struct rbuf *rb;
2750 u_int i;
2751 struct txqueue *tx;
2752
2753 sc = device_get_softc(dev);
2754 unit = device_get_unit(dev);
2755
2756 ifp = sc->ifp = if_alloc(IFT_ATM);
2757 if (ifp == NULL) {
2758 error = ENOSPC;
2759 goto fail;
2760 }
2761
2762 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2763 IFP2IFATM(sc->ifp)->mib.serial = 0;
2764 IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2765 IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2766 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2767 IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2768 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2769 IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2770 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2771 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2772
2773 LIST_INIT(&sc->rbuf_free);
2774 LIST_INIT(&sc->rbuf_used);
2775
2776 /*
2777 * Initialize mutex and condition variables.
2778 */
2779 mtx_init(&sc->mtx, device_get_nameunit(dev),
2780 MTX_NETWORK_LOCK, MTX_DEF);
2781
2782 cv_init(&sc->cv_stat, "fatm_stat");
2783 cv_init(&sc->cv_regs, "fatm_regs");
2784
2785 sysctl_ctx_init(&sc->sysctl_ctx);
2786 callout_init_mtx(&sc->watchdog_timer, &sc->mtx, 0);
2787
2788 /*
2789 * Make the sysctl tree
2790 */
2791 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2792 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2793 device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2794 goto fail;
2795
2796 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2797 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2798 "LU", "internal statistics") == NULL)
2799 goto fail;
2800
2801 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2802 OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2803 "LU", "card statistics") == NULL)
2804 goto fail;
2805
2806 if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2807 OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2808 "retry flag") == NULL)
2809 goto fail;
2810
2811#ifdef FATM_DEBUG
2812 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2813 OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2814 == NULL)
2815 goto fail;
2816 sc->debug = FATM_DEBUG;
2817#endif
2818
2819 /*
2820 * Network subsystem stuff
2821 */
2822 ifp->if_softc = sc;
2823 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2824 ifp->if_flags = IFF_SIMPLEX;
2825 ifp->if_ioctl = fatm_ioctl;
2826 ifp->if_start = fatm_start;
2827 ifp->if_init = fatm_init;
2828 ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2829 ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2830
2831 /*
2832 * Enable memory and bustmaster
2833 */
2834 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2835 cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2836 pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2837
2838 /*
2839 * Map memory
2840 */
2841 cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2842 if (!(cfg & PCIM_CMD_MEMEN)) {
2843 if_printf(ifp, "failed to enable memory mapping\n");
2844 error = ENXIO;
2845 goto fail;
2846 }
2847 sc->memid = 0x10;
2848 sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2849 RF_ACTIVE);
2850 if (sc->memres == NULL) {
2851 if_printf(ifp, "could not map memory\n");
2852 error = ENXIO;
2853 goto fail;
2854 }
2855 sc->memh = rman_get_bushandle(sc->memres);
2856 sc->memt = rman_get_bustag(sc->memres);
2857
2858 /*
2859 * Convert endianess of slave access
2860 */
2861 cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2862 cfg |= FATM_PCIM_SWAB;
2863 pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2864
2865 /*
2866 * Allocate interrupt (activate at the end)
2867 */
2868 sc->irqid = 0;
2869 sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2870 RF_SHAREABLE | RF_ACTIVE);
2871 if (sc->irqres == NULL) {
2872 if_printf(ifp, "could not allocate irq\n");
2873 error = ENXIO;
2874 goto fail;
2875 }
2876
2877 /*
2878 * Allocate the parent DMA tag. This is used simply to hold overall
2879 * restrictions for the controller (and PCI bus) and is never used
2880 * to do anything.
2881 */
2882 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
2883 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2884 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2885 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2886 &sc->parent_dmat)) {
2887 if_printf(ifp, "could not allocate parent DMA tag\n");
2888 error = ENOMEM;
2889 goto fail;
2890 }
2891
2892 /*
2893 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2894 * a mbuf cluster.
2895 */
2896 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2897 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2898 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2899 NULL, NULL, &sc->rbuf_tag)) {
2900 if_printf(ifp, "could not allocate rbuf DMA tag\n");
2901 error = ENOMEM;
2902 goto fail;
2903 }
2904
2905 /*
2906 * Allocate the transmission DMA tag. Must add 1, because
2907 * rounded up PDU will be 65536 bytes long.
2908 */
2909 if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2910 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2911 NULL, NULL,
2912 FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2913 NULL, NULL, &sc->tx_tag)) {
2914 if_printf(ifp, "could not allocate tx DMA tag\n");
2915 error = ENOMEM;
2916 goto fail;
2917 }
2918
2919 /*
2920 * Allocate DMAable memory.
2921 */
2922 sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2923 + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2924 sc->stat_mem.align = 4;
2925
2926 sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2927 sc->txq_mem.align = 32;
2928
2929 sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2930 sc->rxq_mem.align = 32;
2931
2932 sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2933 BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2934 sc->s1q_mem.align = 32;
2935
2936 sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2937 BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2938 sc->l1q_mem.align = 32;
2939
2940#ifdef TEST_DMA_SYNC
2941 if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2942 (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2943 (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2944 (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2945 (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2946 goto fail;
2947#else
2948 if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2949 (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2950 (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2951 (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2952 (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2953 goto fail;
2954#endif
2955
2956 sc->prom_mem.size = sizeof(struct prom);
2957 sc->prom_mem.align = 32;
2958 if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2959 goto fail;
2960
2961 sc->sadi_mem.size = sizeof(struct fatm_stats);
2962 sc->sadi_mem.align = 32;
2963 if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2964 goto fail;
2965
2966 sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2967 sc->reg_mem.align = 32;
2968 if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2969 goto fail;
2970
2971 /*
2972 * Allocate queues
2973 */
2974 sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2975 M_DEVBUF, M_ZERO | M_WAITOK);
2976 sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2977 M_DEVBUF, M_ZERO | M_WAITOK);
2978 sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2979 M_DEVBUF, M_ZERO | M_WAITOK);
2980 sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2981 M_DEVBUF, M_ZERO | M_WAITOK);
2982 sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2983 M_DEVBUF, M_ZERO | M_WAITOK);
2984
2985 sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2986 M_DEVBUF, M_ZERO | M_WAITOK);
2987 sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2988 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2989 if (sc->vcc_zone == NULL) {
2990 error = ENOMEM;
2991 goto fail;
2992 }
2993
2994 /*
2995 * Allocate memory for the receive buffer headers. The total number
2996 * of headers should probably also include the maximum number of
2997 * buffers on the receive queue.
2998 */
2999 sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
3000 sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
3001 M_DEVBUF, M_ZERO | M_WAITOK);
3002
3003 /*
3004 * Put all rbuf headers on the free list and create DMA maps.
3005 */
3006 for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
3007 if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3008 if_printf(sc->ifp, "creating rx map: %d\n",
3009 error);
3010 goto fail;
3011 }
3012 LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3013 }
3014
3015 /*
3016 * Create dma maps for transmission. In case of an error, free the
3017 * allocated DMA maps, because on some architectures maps are NULL
3018 * and we cannot distinguish between a failure and a NULL map in
3019 * the detach routine.
3020 */
3021 for (i = 0; i < FATM_TX_QLEN; i++) {
3022 tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3023 if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3024 if_printf(sc->ifp, "creating tx map: %d\n",
3025 error);
3026 while (i > 0) {
3027 tx = GET_QUEUE(sc->txqueue, struct txqueue,
3028 i - 1);
3029 bus_dmamap_destroy(sc->tx_tag, tx->map);
3030 i--;
3031 }
3032 goto fail;
3033 }
3034 }
3035
3036 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3037 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3038 &fatm_utopia_methods);
3039 sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3040
3041 /*
3042 * Attach the interface
3043 */
3044 atm_ifattach(ifp);
3045 ifp->if_snd.ifq_maxlen = 512;
3046
3047#ifdef ENABLE_BPF
3048 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3049#endif
3050
3051 error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET | INTR_MPSAFE,
3052 NULL, fatm_intr, sc, &sc->ih);
3053 if (error) {
3054 if_printf(ifp, "couldn't setup irq\n");
3055 goto fail;
3056 }
3057
3058 fail:
3059 if (error)
3060 fatm_detach(dev);
3061
3062 return (error);
3063}
3064
3065#if defined(FATM_DEBUG) && 0
3066static void
3067dump_s1_queue(struct fatm_softc *sc)
3068{
3069 int i;
3070 struct supqueue *q;
3071
3072 for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3073 q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3074 printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3075 q->q.card,
3076 READ4(sc, q->q.card),
3077 READ4(sc, q->q.card + 4),
3078 *q->q.statp);
3079 }
3080}
3081#endif
3082
3083/*
3084 * Driver infrastructure.
3085 */
3086static device_method_t fatm_methods[] = {
3087 DEVMETHOD(device_probe, fatm_probe),
3088 DEVMETHOD(device_attach, fatm_attach),
3089 DEVMETHOD(device_detach, fatm_detach),
3090 { 0, 0 }
3091};
3092static driver_t fatm_driver = {
3093 "fatm",
3094 fatm_methods,
3095 sizeof(struct fatm_softc),
3096};
3097
3098DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);