if_fatm.c revision 118538
1/*
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 118538 2003-08-06 12:37:50Z harti $");
33
34#include "opt_inet.h"
35#include "opt_natm.h"
36
37#include <sys/types.h>
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/bus.h>
43#include <sys/errno.h>
44#include <sys/conf.h>
45#include <sys/module.h>
46#include <sys/queue.h>
47#include <sys/syslog.h>
48#include <sys/endian.h>
49#include <sys/sysctl.h>
50#include <sys/condvar.h>
51#include <vm/uma.h>
52
53#include <sys/sockio.h>
54#include <sys/mbuf.h>
55#include <sys/socket.h>
56
57#include <net/if.h>
58#include <net/if_media.h>
59#include <net/if_atm.h>
60#include <net/route.h>
61#ifdef INET
62#include <netinet/in.h>
63#include <netinet/if_atm.h>
64#endif
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70#include <pci/pcireg.h>
71#include <pci/pcivar.h>
72
73#include <dev/utopia/utopia.h>
74
75#include <dev/fatm/if_fatmreg.h>
76#include <dev/fatm/if_fatmvar.h>
77
78#include <dev/fatm/firmware.h>
79
80devclass_t fatm_devclass;
81
82static const struct {
83	uint16_t	vid;
84	uint16_t	did;
85	const char	*name;
86} fatm_devs[] = {
87	{ 0x1127, 0x300,
88	  "FORE PCA200E" },
89	{ 0, 0, NULL }
90};
91
92static const struct rate {
93	uint32_t	ratio;
94	uint32_t	cell_rate;
95} rate_table[] = {
96#include <dev/fatm/if_fatm_rate.h>
97};
98#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
99
100SYSCTL_DECL(_hw_atm);
101
102MODULE_DEPEND(fatm, utopia, 1, 1, 1);
103
104static int	fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
105static int	fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
106
107static const struct utopia_methods fatm_utopia_methods = {
108	fatm_utopia_readregs,
109	fatm_utopia_writereg
110};
111
112#define VC_OK(SC, VPI, VCI)						\
113	(((VPI) & ~((1 << (SC)->ifatm.mib.vpi_bits) - 1)) == 0 &&	\
114	 (VCI) != 0 && ((VCI) & ~((1 << (SC)->ifatm.mib.vci_bits) - 1)) == 0)
115
116/*
117 * Probing is easy: step trough the list of known vendor and device
118 * ids and compare. If one is found - it's our.
119 */
120static int
121fatm_probe(device_t dev)
122{
123	int i;
124
125	for (i = 0; fatm_devs[i].name; i++)
126		if (pci_get_vendor(dev) == fatm_devs[i].vid &&
127		    pci_get_device(dev) == fatm_devs[i].did) {
128			device_set_desc(dev, fatm_devs[i].name);
129			return (0);
130		}
131	return (ENXIO);
132}
133
134/*
135 * Function called at completion of a SUNI writeregs/readregs command.
136 * This is called from the interrupt handler while holding the softc lock.
137 * We use the queue entry as the randevouze point.
138 */
139static void
140fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
141{
142
143	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
144	if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
145		sc->istats.suni_reg_errors++;
146		q->error = EIO;
147	}
148	wakeup(q);
149}
150
151/*
152 * Write a SUNI register. The bits that are 1 in mask are written from val
153 * into register reg. We wait for the command to complete by sleeping on
154 * the register memory.
155 *
156 * We assume, that we already hold the softc mutex.
157 */
158static int
159fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
160{
161	int error;
162	struct cmdqueue *q;
163	struct fatm_softc *sc;
164
165	sc = ifatm->ifnet.if_softc;
166	FATM_CHECKLOCK(sc);
167	if (!(ifatm->ifnet.if_flags & IFF_RUNNING))
168		return (EIO);
169
170	/* get queue element and fill it */
171	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
172
173	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
174	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
175		sc->istats.cmd_queue_full++;
176		return (EIO);
177	}
178	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
179
180	q->error = 0;
181	q->cb = fatm_utopia_writeregs_complete;
182	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
183	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
184
185	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
186	BARRIER_W(sc);
187	WRITE4(sc, q->q.card + FATMOC_OP,
188	    FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
189	BARRIER_W(sc);
190
191	/*
192	 * Wait for the command to complete
193	 */
194	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
195
196	switch(error) {
197
198	  case EWOULDBLOCK:
199		error = EIO;
200		break;
201
202	  case ERESTART:
203		error = EINTR;
204		break;
205
206	  case 0:
207		error = q->error;
208		break;
209	}
210
211	return (error);
212}
213
214/*
215 * Function called at completion of a SUNI readregs command.
216 * This is called from the interrupt handler while holding the softc lock.
217 * We use reg_mem as the randevouze point.
218 */
219static void
220fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
221{
222
223	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
224	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
225		sc->istats.suni_reg_errors++;
226		q->error = EIO;
227	}
228	wakeup(&sc->reg_mem);
229}
230
231/*
232 * Read SUNI registers
233 *
234 * We use a preallocated buffer to read the registers. Therefor we need
235 * to protect against multiple threads trying to read registers. We do this
236 * with a condition variable and a flag. We wait for the command to complete by sleeping on
237 * the register memory.
238 *
239 * We assume, that we already hold the softc mutex.
240 */
241static int
242fatm_utopia_readregs_internal(struct fatm_softc *sc)
243{
244	int error, i;
245	uint32_t *ptr;
246	struct cmdqueue *q;
247
248	/* get the buffer */
249	for (;;) {
250		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
251			return (EIO);
252		if (!(sc->flags & FATM_REGS_INUSE))
253			break;
254		cv_wait(&sc->cv_regs, &sc->mtx);
255	}
256	sc->flags |= FATM_REGS_INUSE;
257
258	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
259
260	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
261	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
262		sc->istats.cmd_queue_full++;
263		return (EIO);
264	}
265	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
266
267	q->error = 0;
268	q->cb = fatm_utopia_readregs_complete;
269	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
270	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
271
272	bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
273
274	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
275	BARRIER_W(sc);
276	WRITE4(sc, q->q.card + FATMOC_OP,
277	    FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
278	BARRIER_W(sc);
279
280	/*
281	 * Wait for the command to complete
282	 */
283	error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
284	    "fatm_getreg", hz);
285
286	switch(error) {
287
288	  case EWOULDBLOCK:
289		error = EIO;
290		break;
291
292	  case ERESTART:
293		error = EINTR;
294		break;
295
296	  case 0:
297		bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
298		    BUS_DMASYNC_POSTREAD);
299		error = q->error;
300		break;
301	}
302
303	if (error != 0) {
304		/* declare buffer to be free */
305		sc->flags &= ~FATM_REGS_INUSE;
306		cv_signal(&sc->cv_regs);
307		return (error);
308	}
309
310	/* swap if needed */
311	ptr = (uint32_t *)sc->reg_mem.mem;
312	for (i = 0; i < FATM_NREGS; i++)
313		ptr[i] = le32toh(ptr[i]) & 0xff;
314
315	return (0);
316}
317
318/*
319 * Read SUNI registers for the SUNI module.
320 *
321 * We assume, that we already hold the mutex.
322 */
323static int
324fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
325{
326	int err;
327	int i;
328	struct fatm_softc *sc;
329
330	if (reg >= FATM_NREGS)
331		return (EINVAL);
332	if (reg + *np > FATM_NREGS)
333		*np = FATM_NREGS - reg;
334	sc = ifatm->ifnet.if_softc;
335	FATM_CHECKLOCK(sc);
336
337	err = fatm_utopia_readregs_internal(sc);
338	if (err != 0)
339		return (err);
340
341	for (i = 0; i < *np; i++)
342		valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
343
344	/* declare buffer to be free */
345	sc->flags &= ~FATM_REGS_INUSE;
346	cv_signal(&sc->cv_regs);
347
348	return (0);
349}
350
351/*
352 * Check whether the hard is beating. We remember the last heart beat and
353 * compare it to the current one. If it appears stuck for 10 times, we have
354 * a problem.
355 *
356 * Assume we hold the lock.
357 */
358static void
359fatm_check_heartbeat(struct fatm_softc *sc)
360{
361	uint32_t h;
362
363	FATM_CHECKLOCK(sc);
364
365	h = READ4(sc, FATMO_HEARTBEAT);
366	DBG(sc, BEAT, ("heartbeat %08x", h));
367
368	if (sc->stop_cnt == 10)
369		return;
370
371	if (h == sc->heartbeat) {
372		if (++sc->stop_cnt == 10) {
373			log(LOG_ERR, "i960 stopped???\n");
374			WRITE4(sc, FATMO_HIMR, 1);
375		}
376		return;
377	}
378
379	sc->stop_cnt = 0;
380	sc->heartbeat = h;
381}
382
383/*
384 * Ensure that the heart is still beating.
385 */
386static void
387fatm_watchdog(struct ifnet *ifp)
388{
389	struct fatm_softc *sc = ifp->if_softc;
390
391	FATM_LOCK(sc);
392	if (ifp->if_flags & IFF_RUNNING) {
393		fatm_check_heartbeat(sc);
394		ifp->if_timer = 5;
395	}
396	FATM_UNLOCK(sc);
397}
398
399/*
400 * Hard reset the i960 on the board. This is done by initializing registers,
401 * clearing interrupts and waiting for the selftest to finish. Not sure,
402 * whether all these barriers are actually needed.
403 *
404 * Assumes that we hold the lock.
405 */
406static int
407fatm_reset(struct fatm_softc *sc)
408{
409	int w;
410	uint32_t val;
411
412	FATM_CHECKLOCK(sc);
413
414	WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
415	BARRIER_W(sc);
416
417	WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
418	BARRIER_W(sc);
419
420	WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
421	BARRIER_W(sc);
422
423	WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
424	BARRIER_W(sc);
425
426	WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
427	BARRIER_W(sc);
428
429	DELAY(1000);
430
431	WRITE1(sc, FATMO_HCR, 0);
432	BARRIER_RW(sc);
433
434	DELAY(1000);
435
436	for (w = 100; w; w--) {
437		BARRIER_R(sc);
438		val = READ4(sc, FATMO_BOOT_STATUS);
439		switch (val) {
440		  case SELF_TEST_OK:
441			return (0);
442		  case SELF_TEST_FAIL:
443			return (EIO);
444		}
445		DELAY(1000);
446	}
447	return (EIO);
448}
449
450/*
451 * Stop the card. Must be called WITH the lock held
452 * Reset, free transmit and receive buffers. Wakeup everybody that may sleep.
453 */
454static void
455fatm_stop(struct fatm_softc *sc)
456{
457	int i;
458	struct cmdqueue *q;
459	struct rbuf *rb;
460	struct txqueue *tx;
461	uint32_t stat;
462
463	FATM_CHECKLOCK(sc);
464
465	/* Stop the board */
466	utopia_stop(&sc->utopia);
467	(void)fatm_reset(sc);
468
469	/* stop watchdog */
470	sc->ifatm.ifnet.if_timer = 0;
471
472	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) {
473		sc->ifatm.ifnet.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
474		ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
475		    sc->utopia.carrier == UTP_CARR_OK);
476
477		/*
478		 * Collect transmit mbufs, partial receive mbufs and
479		 * supplied mbufs
480		 */
481		for (i = 0; i < FATM_TX_QLEN; i++) {
482			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
483			if (tx->m) {
484				bus_dmamap_unload(sc->tx_tag, tx->map);
485				m_freem(tx->m);
486				tx->m = NULL;
487			}
488		}
489
490		/* Collect supplied mbufs */
491		while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
492			LIST_REMOVE(rb, link);
493			bus_dmamap_unload(sc->rbuf_tag, rb->map);
494			m_free(rb->m);
495			rb->m = NULL;
496			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
497		}
498
499		/* Unwait any waiters */
500		wakeup(&sc->sadi_mem);
501
502		/* wakeup all threads waiting for STAT or REG buffers */
503		cv_broadcast(&sc->cv_stat);
504		cv_broadcast(&sc->cv_regs);
505
506		sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
507
508		/* wakeup all threads waiting on commands */
509		for (i = 0; i < FATM_CMD_QLEN; i++) {
510			q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
511
512			H_SYNCSTAT_POSTREAD(sc, q->q.statp);
513			if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
514				H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
515				H_SYNCSTAT_PREWRITE(sc, q->q.statp);
516				wakeup(q);
517			}
518		}
519		utopia_reset_media(&sc->utopia);
520	}
521	sc->small_cnt = sc->large_cnt = 0;
522
523	/* Reset vcc info */
524	if (sc->vccs != NULL) {
525		for (i = 0; i < FORE_MAX_VCC + 1; i++)
526			if (sc->vccs[i] != NULL) {
527				uma_zfree(sc->vcc_zone, sc->vccs[i]);
528				sc->vccs[i] = NULL;
529			}
530	}
531
532	sc->open_vccs = 0;
533}
534
535/*
536 * Load the firmware into the board and save the entry point.
537 */
538static uint32_t
539firmware_load(struct fatm_softc *sc)
540{
541	struct firmware *fw = (struct firmware *)firmware;
542
543	DBG(sc, INIT, ("loading - entry=%x", fw->entry));
544	bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
545	    sizeof(firmware) / sizeof(firmware[0]));
546	BARRIER_RW(sc);
547
548	return (fw->entry);
549}
550
551/*
552 * Read a character from the virtual UART. The availability of a character
553 * is signaled by a non-null value of the 32 bit register. The eating of
554 * the character by us is signalled to the card by setting that register
555 * to zero.
556 */
557static int
558rx_getc(struct fatm_softc *sc)
559{
560	int w = 50;
561	int c;
562
563	while (w--) {
564		c = READ4(sc, FATMO_UART_TO_HOST);
565		BARRIER_RW(sc);
566		if (c != 0) {
567			WRITE4(sc, FATMO_UART_TO_HOST, 0);
568			DBGC(sc, UART, ("%c", c & 0xff));
569			return (c & 0xff);
570		}
571		DELAY(1000);
572	}
573	return (-1);
574}
575
576/*
577 * Eat up characters from the board and stuff them in the bit-bucket.
578 */
579static void
580rx_flush(struct fatm_softc *sc)
581{
582	int w = 10000;
583
584	while (w-- && rx_getc(sc) >= 0)
585		;
586}
587
588/*
589 * Write a character to the card. The UART is available if the register
590 * is zero.
591 */
592static int
593tx_putc(struct fatm_softc *sc, u_char c)
594{
595	int w = 10;
596	int c1;
597
598	while (w--) {
599		c1 = READ4(sc, FATMO_UART_TO_960);
600		BARRIER_RW(sc);
601		if (c1 == 0) {
602			WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
603			DBGC(sc, UART, ("%c", c & 0xff));
604			return (0);
605		}
606		DELAY(1000);
607	}
608	return (-1);
609}
610
611/*
612 * Start the firmware. This is doing by issuing a 'go' command with
613 * the hex entry address of the firmware. Then we wait for the self-test to
614 * succeed.
615 */
616static int
617fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
618{
619	static char hex[] = "0123456789abcdef";
620	u_int w, val;
621
622	DBG(sc, INIT, ("starting"));
623	rx_flush(sc);
624	tx_putc(sc, '\r');
625	DELAY(1000);
626
627	rx_flush(sc);
628
629	tx_putc(sc, 'g');
630	(void)rx_getc(sc);
631	tx_putc(sc, 'o');
632	(void)rx_getc(sc);
633	tx_putc(sc, ' ');
634	(void)rx_getc(sc);
635
636	tx_putc(sc, hex[(start >> 12) & 0xf]);
637	(void)rx_getc(sc);
638	tx_putc(sc, hex[(start >>  8) & 0xf]);
639	(void)rx_getc(sc);
640	tx_putc(sc, hex[(start >>  4) & 0xf]);
641	(void)rx_getc(sc);
642	tx_putc(sc, hex[(start >>  0) & 0xf]);
643	(void)rx_getc(sc);
644
645	tx_putc(sc, '\r');
646	rx_flush(sc);
647
648	for (w = 100; w; w--) {
649		BARRIER_R(sc);
650		val = READ4(sc, FATMO_BOOT_STATUS);
651		switch (val) {
652		  case CP_RUNNING:
653			return (0);
654		  case SELF_TEST_FAIL:
655			return (EIO);
656		}
657		DELAY(1000);
658	}
659	return (EIO);
660}
661
662/*
663 * Initialize one card and host queue.
664 */
665static void
666init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
667    size_t qel_size, size_t desc_size, cardoff_t off,
668    u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
669{
670	struct fqelem *el = queue->chunk;
671
672	while (qlen--) {
673		el->card = off;
674		off += 8;	/* size of card entry */
675
676		el->statp = (uint32_t *)(*statpp);
677		(*statpp) += sizeof(uint32_t);
678		H_SETSTAT(el->statp, FATM_STAT_FREE);
679		H_SYNCSTAT_PREWRITE(sc, el->statp);
680
681		WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
682		(*cardstat) += sizeof(uint32_t);
683
684		el->ioblk = descp;
685		descp += desc_size;
686		el->card_ioblk = carddesc;
687		carddesc += desc_size;
688
689		el = (struct fqelem *)((u_char *)el + qel_size);
690	}
691	queue->tail = queue->head = 0;
692}
693
694/*
695 * Issue the initialize operation to the card, wait for completion and
696 * initialize the on-board and host queue structures with offsets and
697 * addresses.
698 */
699static int
700fatm_init_cmd(struct fatm_softc *sc)
701{
702	int w, c;
703	u_char *statp;
704	uint32_t card_stat;
705	u_int cnt;
706	struct fqelem *el;
707	cardoff_t off;
708
709	DBG(sc, INIT, ("command"));
710	WRITE4(sc, FATMO_ISTAT, 0);
711	WRITE4(sc, FATMO_IMASK, 1);
712	WRITE4(sc, FATMO_HLOGGER, 0);
713
714	WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
715	WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
716	WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
717	WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
718	WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
719	WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
720	WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
721
722	/*
723	 * initialize buffer descriptors
724	 */
725	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
726	    SMALL_SUPPLY_QLEN);
727	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
728	    SMALL_BUFFER_LEN);
729	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
730	    SMALL_POOL_SIZE);
731	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
732	    SMALL_SUPPLY_BLKSIZE);
733
734	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
735	    LARGE_SUPPLY_QLEN);
736	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
737	    LARGE_BUFFER_LEN);
738	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
739	    LARGE_POOL_SIZE);
740	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
741	    LARGE_SUPPLY_BLKSIZE);
742
743	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
744	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
745	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
746	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
747
748	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
749	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
750	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
751	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
752
753	/*
754	 * Start the command
755	 */
756	BARRIER_W(sc);
757	WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
758	BARRIER_W(sc);
759	WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
760	BARRIER_W(sc);
761
762	/*
763	 * Busy wait for completion
764	 */
765	w = 100;
766	while (w--) {
767		c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
768		BARRIER_R(sc);
769		if (c & FATM_STAT_COMPLETE)
770			break;
771		DELAY(1000);
772	}
773
774	if (c & FATM_STAT_ERROR)
775		return (EIO);
776
777	/*
778	 * Initialize the queues
779	 */
780	statp = sc->stat_mem.mem;
781	card_stat = sc->stat_mem.paddr;
782
783	/*
784	 * Command queue. This is special in that it's on the card.
785	 */
786	el = sc->cmdqueue.chunk;
787	off = READ4(sc, FATMO_COMMAND_QUEUE);
788	DBG(sc, INIT, ("cmd queue=%x", off));
789	for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
790		el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
791
792		el->card = off;
793		off += 32;		/* size of card structure */
794
795		el->statp = (uint32_t *)statp;
796		statp += sizeof(uint32_t);
797		H_SETSTAT(el->statp, FATM_STAT_FREE);
798		H_SYNCSTAT_PREWRITE(sc, el->statp);
799
800		WRITE4(sc, el->card + FATMOC_STATP, card_stat);
801		card_stat += sizeof(uint32_t);
802	}
803	sc->cmdqueue.tail = sc->cmdqueue.head = 0;
804
805	/*
806	 * Now the other queues. These are in memory
807	 */
808	init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
809	    sizeof(struct txqueue), TPD_SIZE,
810	    READ4(sc, FATMO_TRANSMIT_QUEUE),
811	    &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
812
813	init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
814	    sizeof(struct rxqueue), RPD_SIZE,
815	    READ4(sc, FATMO_RECEIVE_QUEUE),
816	    &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
817
818	init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
819	    sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
820	    READ4(sc, FATMO_SMALL_B1_QUEUE),
821	    &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
822
823	init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
824	    sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
825	    READ4(sc, FATMO_LARGE_B1_QUEUE),
826	    &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
827
828	sc->txcnt = 0;
829
830	return (0);
831}
832
833/*
834 * Read PROM. Called only from attach code. Here we spin because the interrupt
835 * handler is not yet set up.
836 */
837static int
838fatm_getprom(struct fatm_softc *sc)
839{
840	int i;
841	struct prom *prom;
842	struct cmdqueue *q;
843
844	DBG(sc, INIT, ("reading prom"));
845	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
846	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
847
848	q->error = 0;
849	q->cb = NULL;;
850	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
851	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
852
853	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
854	    BUS_DMASYNC_PREREAD);
855
856	WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
857	BARRIER_W(sc);
858	WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
859	BARRIER_W(sc);
860
861	for (i = 0; i < 1000; i++) {
862		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
863		if (H_GETSTAT(q->q.statp) &
864		    (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
865			break;
866		DELAY(1000);
867	}
868	if (i == 1000) {
869		if_printf(&sc->ifatm.ifnet, "getprom timeout\n");
870		return (EIO);
871	}
872	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
873	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
874		if_printf(&sc->ifatm.ifnet, "getprom error\n");
875		return (EIO);
876	}
877	H_SETSTAT(q->q.statp, FATM_STAT_FREE);
878	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
879	NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
880
881	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
882	    BUS_DMASYNC_POSTREAD);
883
884
885#ifdef notdef
886	{
887		u_int i;
888
889		printf("PROM: ");
890		u_char *ptr = (u_char *)sc->prom_mem.mem;
891		for (i = 0; i < sizeof(struct prom); i++)
892			printf("%02x ", *ptr++);
893		printf("\n");
894	}
895#endif
896
897	prom = (struct prom *)sc->prom_mem.mem;
898
899	bcopy(prom->mac + 2, sc->ifatm.mib.esi, 6);
900	sc->ifatm.mib.serial = le32toh(prom->serial);
901	sc->ifatm.mib.hw_version = le32toh(prom->version);
902	sc->ifatm.mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
903
904	if_printf(&sc->ifatm.ifnet, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
905	    "serial=%u hw=0x%x sw=0x%x\n", sc->ifatm.mib.esi[0],
906	    sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3],
907	    sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->ifatm.mib.serial,
908	    sc->ifatm.mib.hw_version, sc->ifatm.mib.sw_version);
909
910	return (0);
911}
912
913/*
914 * This is the callback function for bus_dmamap_load. We assume, that we
915 * have a 32-bit bus and so have always one segment.
916 */
917static void
918dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
919{
920	bus_addr_t *ptr = (bus_addr_t *)arg;
921
922	if (error != 0) {
923		printf("%s: error=%d\n", __func__, error);
924		return;
925	}
926	KASSERT(nsegs == 1, ("too many DMA segments"));
927	KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
928	    (u_long)segs[0].ds_addr));
929
930	*ptr = segs[0].ds_addr;
931}
932
933/*
934 * Allocate a chunk of DMA-able memory and map it.
935 */
936static int
937alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
938{
939	int error;
940
941	mem->mem = NULL;
942
943	if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
944	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
945	    NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
946	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
947		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
948		    nm);
949		return (ENOMEM);
950	}
951
952	error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
953	if (error) {
954		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA memory: "
955		    "%d\n", nm, error);
956		bus_dma_tag_destroy(mem->dmat);
957		mem->mem = NULL;
958		return (error);
959	}
960
961	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
962	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
963	if (error) {
964		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
965		    "%d\n", nm, error);
966		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
967		bus_dma_tag_destroy(mem->dmat);
968		mem->mem = NULL;
969		return (error);
970	}
971
972	DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
973	    (u_long)mem->paddr, mem->size, mem->align));
974
975	return (0);
976}
977
978#ifdef TEST_DMA_SYNC
979static int
980alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
981{
982	int error;
983
984	mem->mem = NULL;
985
986	if (bus_dma_tag_create(NULL, mem->align, 0,
987	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
988	    NULL, NULL, mem->size, 1, mem->size,
989	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
990		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
991		    nm);
992		return (ENOMEM);
993	}
994
995	mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
996	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
997
998	error = bus_dmamap_create(mem->dmat, 0, &mem->map);
999	if (error) {
1000		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA map: "
1001		    "%d\n", nm, error);
1002		contigfree(mem->mem, mem->size, M_DEVBUF);
1003		bus_dma_tag_destroy(mem->dmat);
1004		mem->mem = NULL;
1005		return (error);
1006	}
1007
1008	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1009	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1010	if (error) {
1011		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
1012		    "%d\n", nm, error);
1013		bus_dmamap_destroy(mem->dmat, mem->map);
1014		contigfree(mem->mem, mem->size, M_DEVBUF);
1015		bus_dma_tag_destroy(mem->dmat);
1016		mem->mem = NULL;
1017		return (error);
1018	}
1019
1020	DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1021	    (u_long)mem->paddr, mem->size, mem->align));
1022
1023	printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1024	    (u_long)mem->paddr, mem->size, mem->align);
1025
1026	return (0);
1027}
1028#endif /* TEST_DMA_SYNC */
1029
1030/*
1031 * Destroy all resources of an dma-able memory chunk
1032 */
1033static void
1034destroy_dma_memory(struct fatm_mem *mem)
1035{
1036	if (mem->mem != NULL) {
1037		bus_dmamap_unload(mem->dmat, mem->map);
1038		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1039		bus_dma_tag_destroy(mem->dmat);
1040		mem->mem = NULL;
1041	}
1042}
1043#ifdef TEST_DMA_SYNC
1044static void
1045destroy_dma_memoryX(struct fatm_mem *mem)
1046{
1047	if (mem->mem != NULL) {
1048		bus_dmamap_unload(mem->dmat, mem->map);
1049		bus_dmamap_destroy(mem->dmat, mem->map);
1050		contigfree(mem->mem, mem->size, M_DEVBUF);
1051		bus_dma_tag_destroy(mem->dmat);
1052		mem->mem = NULL;
1053	}
1054}
1055#endif /* TEST_DMA_SYNC */
1056
1057/*
1058 * Try to supply buffers to the card if there are free entries in the queues
1059 */
1060static void
1061fatm_supply_small_buffers(struct fatm_softc *sc)
1062{
1063	int nblocks, nbufs;
1064	struct supqueue *q;
1065	struct rbd *bd;
1066	int i, j, error, cnt;
1067	struct mbuf *m;
1068	struct rbuf *rb;
1069	bus_addr_t phys;
1070
1071	nbufs = max(4 * sc->open_vccs, 32);
1072	nbufs = min(nbufs, SMALL_POOL_SIZE);
1073	nbufs -= sc->small_cnt;
1074
1075	nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1076	for (cnt = 0; cnt < nblocks; cnt++) {
1077		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1078
1079		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1080		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1081			break;
1082
1083		bd = (struct rbd *)q->q.ioblk;
1084
1085		for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1086			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1087				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1088				break;
1089			}
1090			MGETHDR(m, M_DONTWAIT, MT_DATA);
1091			if (m == NULL) {
1092				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1093				break;
1094			}
1095			MH_ALIGN(m, SMALL_BUFFER_LEN);
1096			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1097			    m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1098			    &phys, BUS_DMA_NOWAIT);
1099			if (error) {
1100				if_printf(&sc->ifatm.ifnet,
1101				    "dmamap_load mbuf failed %d", error);
1102				m_freem(m);
1103				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1104				break;
1105			}
1106			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1107			    BUS_DMASYNC_PREREAD);
1108
1109			LIST_REMOVE(rb, link);
1110			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1111
1112			rb->m = m;
1113			bd[i].handle = rb - sc->rbufs;
1114			H_SETDESC(bd[i].buffer, phys);
1115		}
1116
1117		if (i < SMALL_SUPPLY_BLKSIZE) {
1118			for (j = 0; j < i; j++) {
1119				rb = sc->rbufs + bd[j].handle;
1120				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1121				m_free(rb->m);
1122				rb->m = NULL;
1123
1124				LIST_REMOVE(rb, link);
1125				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1126			}
1127			break;
1128		}
1129		H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1130		    sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1131
1132		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1133		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1134
1135		WRITE4(sc, q->q.card, q->q.card_ioblk);
1136		BARRIER_W(sc);
1137
1138		sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1139
1140		NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1141	}
1142}
1143
1144/*
1145 * Try to supply buffers to the card if there are free entries in the queues
1146 * We assume that all buffers are within the address space accessible by the
1147 * card (32-bit), so we don't need bounce buffers.
1148 */
1149static void
1150fatm_supply_large_buffers(struct fatm_softc *sc)
1151{
1152	int nbufs, nblocks, cnt;
1153	struct supqueue *q;
1154	struct rbd *bd;
1155	int i, j, error;
1156	struct mbuf *m;
1157	struct rbuf *rb;
1158	bus_addr_t phys;
1159
1160	nbufs = max(4 * sc->open_vccs, 32);
1161	nbufs = min(nbufs, LARGE_POOL_SIZE);
1162	nbufs -= sc->large_cnt;
1163
1164	nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1165
1166	for (cnt = 0; cnt < nblocks; cnt++) {
1167		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1168
1169		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1170		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1171			break;
1172
1173		bd = (struct rbd *)q->q.ioblk;
1174
1175		for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1176			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1177				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1178				break;
1179			}
1180			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1181			    M_PKTHDR)) == NULL) {
1182				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1183				break;
1184			}
1185			/* No MEXT_ALIGN */
1186			m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1187			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1188			    m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1189			    &phys, BUS_DMA_NOWAIT);
1190			if (error) {
1191				if_printf(&sc->ifatm.ifnet,
1192				    "dmamap_load mbuf failed %d", error);
1193				m_freem(m);
1194				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1195				break;
1196			}
1197
1198			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1199			    BUS_DMASYNC_PREREAD);
1200
1201			LIST_REMOVE(rb, link);
1202			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1203
1204			rb->m = m;
1205			bd[i].handle = rb - sc->rbufs;
1206			H_SETDESC(bd[i].buffer, phys);
1207		}
1208
1209		if (i < LARGE_SUPPLY_BLKSIZE) {
1210			for (j = 0; j < i; j++) {
1211				rb = sc->rbufs + bd[j].handle;
1212				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1213				m_free(rb->m);
1214				rb->m = NULL;
1215
1216				LIST_REMOVE(rb, link);
1217				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1218			}
1219			break;
1220		}
1221		H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1222		    sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1223
1224		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1225		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1226		WRITE4(sc, q->q.card, q->q.card_ioblk);
1227		BARRIER_W(sc);
1228
1229		sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1230
1231		NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1232	}
1233}
1234
1235
1236/*
1237 * Actually start the card. The lock must be held here.
1238 * Reset, load the firmware, start it, initializes queues, read the PROM
1239 * and supply receive buffers to the card.
1240 */
1241static void
1242fatm_init_locked(struct fatm_softc *sc)
1243{
1244	struct rxqueue *q;
1245	int i, c;
1246	uint32_t start;
1247
1248	DBG(sc, INIT, ("initialize"));
1249	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1250		fatm_stop(sc);
1251
1252	/*
1253	 * Hard reset the board
1254	 */
1255	if (fatm_reset(sc))
1256		return;
1257
1258	start = firmware_load(sc);
1259	if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1260	    fatm_getprom(sc)) {
1261		fatm_reset(sc);
1262		return;
1263	}
1264
1265	/*
1266	 * Handle media
1267	 */
1268	c = READ4(sc, FATMO_MEDIA_TYPE);
1269	switch (c) {
1270
1271	  case FORE_MT_TAXI_100:
1272		sc->ifatm.mib.media = IFM_ATM_TAXI_100;
1273		sc->ifatm.mib.pcr = 227273;
1274		break;
1275
1276	  case FORE_MT_TAXI_140:
1277		sc->ifatm.mib.media = IFM_ATM_TAXI_140;
1278		sc->ifatm.mib.pcr = 318181;
1279		break;
1280
1281	  case FORE_MT_UTP_SONET:
1282		sc->ifatm.mib.media = IFM_ATM_UTP_155;
1283		sc->ifatm.mib.pcr = 353207;
1284		break;
1285
1286	  case FORE_MT_MM_OC3_ST:
1287	  case FORE_MT_MM_OC3_SC:
1288		sc->ifatm.mib.media = IFM_ATM_MM_155;
1289		sc->ifatm.mib.pcr = 353207;
1290		break;
1291
1292	  case FORE_MT_SM_OC3_ST:
1293	  case FORE_MT_SM_OC3_SC:
1294		sc->ifatm.mib.media = IFM_ATM_SM_155;
1295		sc->ifatm.mib.pcr = 353207;
1296		break;
1297
1298	  default:
1299		log(LOG_ERR, "fatm: unknown media type %d\n", c);
1300		sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1301		sc->ifatm.mib.pcr = 353207;
1302		break;
1303	}
1304	sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
1305	utopia_init_media(&sc->utopia);
1306
1307	/*
1308	 * Initialize the RBDs
1309	 */
1310	for (i = 0; i < FATM_RX_QLEN; i++) {
1311		q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1312		WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1313	}
1314	BARRIER_W(sc);
1315
1316	/*
1317	 * Supply buffers to the card
1318	 */
1319	fatm_supply_small_buffers(sc);
1320	fatm_supply_large_buffers(sc);
1321
1322	/*
1323	 * Now set flags, that we are ready
1324	 */
1325	sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
1326
1327	/*
1328	 * Start the watchdog timer
1329	 */
1330	sc->ifatm.ifnet.if_timer = 5;
1331
1332	/* start SUNI */
1333	utopia_start(&sc->utopia);
1334
1335	ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
1336	    sc->utopia.carrier == UTP_CARR_OK);
1337
1338	DBG(sc, INIT, ("done"));
1339}
1340
1341/*
1342 * This is the exported as initialisation function.
1343 */
1344static void
1345fatm_init(void *p)
1346{
1347	struct fatm_softc *sc = p;
1348
1349	FATM_LOCK(sc);
1350	fatm_init_locked(sc);
1351	FATM_UNLOCK(sc);
1352}
1353
1354/************************************************************/
1355/*
1356 * The INTERRUPT handling
1357 */
1358/*
1359 * Check the command queue. If a command was completed, call the completion
1360 * function for that command.
1361 */
1362static void
1363fatm_intr_drain_cmd(struct fatm_softc *sc)
1364{
1365	struct cmdqueue *q;
1366	int stat;
1367
1368	/*
1369	 * Drain command queue
1370	 */
1371	for (;;) {
1372		q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1373
1374		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1375		stat = H_GETSTAT(q->q.statp);
1376
1377		if (stat != FATM_STAT_COMPLETE &&
1378		   stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1379		   stat != FATM_STAT_ERROR)
1380			break;
1381
1382		(*q->cb)(sc, q);
1383
1384		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1385		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1386
1387		NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1388	}
1389}
1390
1391/*
1392 * Drain the small buffer supply queue.
1393 */
1394static void
1395fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1396{
1397	struct supqueue *q;
1398	int stat;
1399
1400	for (;;) {
1401		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1402
1403		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1404		stat = H_GETSTAT(q->q.statp);
1405
1406		if ((stat & FATM_STAT_COMPLETE) == 0)
1407			break;
1408		if (stat & FATM_STAT_ERROR)
1409			log(LOG_ERR, "%s: status %x\n", __func__, stat);
1410
1411		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1412		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1413
1414		NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1415	}
1416}
1417
1418/*
1419 * Drain the large buffer supply queue.
1420 */
1421static void
1422fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1423{
1424	struct supqueue *q;
1425	int stat;
1426
1427	for (;;) {
1428		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1429
1430		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1431		stat = H_GETSTAT(q->q.statp);
1432
1433		if ((stat & FATM_STAT_COMPLETE) == 0)
1434			break;
1435		if (stat & FATM_STAT_ERROR)
1436			log(LOG_ERR, "%s status %x\n", __func__, stat);
1437
1438		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1439		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1440
1441		NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1442	}
1443}
1444
1445/*
1446 * Check the receive queue. Send any received PDU up the protocol stack
1447 * (except when there was an error or the VCI appears to be closed. In this
1448 * case discard the PDU).
1449 */
1450static void
1451fatm_intr_drain_rx(struct fatm_softc *sc)
1452{
1453	struct rxqueue *q;
1454	int stat, mlen;
1455	u_int i;
1456	uint32_t h;
1457	struct mbuf *last, *m0;
1458	struct rpd *rpd;
1459	struct rbuf *rb;
1460	u_int vci, vpi, pt;
1461	struct atm_pseudohdr aph;
1462	struct ifnet *ifp;
1463	struct card_vcc *vc;
1464
1465	for (;;) {
1466		q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1467
1468		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1469		stat = H_GETSTAT(q->q.statp);
1470
1471		if ((stat & FATM_STAT_COMPLETE) == 0)
1472			break;
1473
1474		rpd = (struct rpd *)q->q.ioblk;
1475		H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1476
1477		rpd->nseg = le32toh(rpd->nseg);
1478		mlen = 0;
1479		m0 = last = 0;
1480		for (i = 0; i < rpd->nseg; i++) {
1481			rb = sc->rbufs + rpd->segment[i].handle;
1482			if (m0 == NULL) {
1483				m0 = last = rb->m;
1484			} else {
1485				last->m_next = rb->m;
1486				last = rb->m;
1487			}
1488			last->m_next = NULL;
1489			if (last->m_flags & M_EXT)
1490				sc->large_cnt--;
1491			else
1492				sc->small_cnt--;
1493			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1494			    BUS_DMASYNC_POSTREAD);
1495			bus_dmamap_unload(sc->rbuf_tag, rb->map);
1496			rb->m = NULL;
1497
1498			LIST_REMOVE(rb, link);
1499			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1500
1501			last->m_len = le32toh(rpd->segment[i].length);
1502			mlen += last->m_len;
1503		}
1504
1505		m0->m_pkthdr.len = mlen;
1506		m0->m_pkthdr.rcvif = &sc->ifatm.ifnet;
1507
1508		h = le32toh(rpd->atm_header);
1509		vpi = (h >> 20) & 0xff;
1510		vci = (h >> 4 ) & 0xffff;
1511		pt  = (h >> 1 ) & 0x7;
1512
1513		/*
1514		 * Locate the VCC this packet belongs to
1515		 */
1516		if (!VC_OK(sc, vpi, vci))
1517			vc = NULL;
1518		else if ((vc = sc->vccs[vci]) == NULL ||
1519		    !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1520			sc->istats.rx_closed++;
1521			vc = NULL;
1522		}
1523
1524		DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1525		    pt, mlen, vc == NULL ? "dropped" : ""));
1526
1527		if (vc == NULL) {
1528			m_freem(m0);
1529		} else {
1530			ATM_PH_FLAGS(&aph) = vc->param.flags;
1531			ATM_PH_VPI(&aph) = vpi;
1532			ATM_PH_SETVCI(&aph, vci);
1533
1534			ifp = &sc->ifatm.ifnet;
1535			ifp->if_ipackets++;
1536
1537			vc->ipackets++;
1538			vc->ibytes += m0->m_pkthdr.len;
1539
1540			atm_input(ifp, &aph, m0, vc->rxhand);
1541		}
1542
1543		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1544		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1545
1546		WRITE4(sc, q->q.card, q->q.card_ioblk);
1547		BARRIER_W(sc);
1548
1549		NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1550	}
1551}
1552
1553/*
1554 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1555 */
1556static void
1557fatm_intr_drain_tx(struct fatm_softc *sc)
1558{
1559	struct txqueue *q;
1560	int stat;
1561
1562	/*
1563	 * Drain tx queue
1564	 */
1565	for (;;) {
1566		q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1567
1568		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1569		stat = H_GETSTAT(q->q.statp);
1570
1571		if (stat != FATM_STAT_COMPLETE &&
1572		    stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1573		    stat != FATM_STAT_ERROR)
1574			break;
1575
1576		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1577		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1578
1579		bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1580		bus_dmamap_unload(sc->tx_tag, q->map);
1581
1582		m_freem(q->m);
1583		q->m = NULL;
1584		sc->txcnt--;
1585
1586		NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1587	}
1588}
1589
1590/*
1591 * Interrupt handler
1592 */
1593static void
1594fatm_intr(void *p)
1595{
1596	struct fatm_softc *sc = (struct fatm_softc *)p;
1597
1598	FATM_LOCK(sc);
1599	if (!READ4(sc, FATMO_PSR)) {
1600		FATM_UNLOCK(sc);
1601		return;
1602	}
1603	WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1604
1605	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
1606		FATM_UNLOCK(sc);
1607		return;
1608	}
1609	fatm_intr_drain_cmd(sc);
1610	fatm_intr_drain_rx(sc);
1611	fatm_intr_drain_tx(sc);
1612	fatm_intr_drain_small_buffers(sc);
1613	fatm_intr_drain_large_buffers(sc);
1614	fatm_supply_small_buffers(sc);
1615	fatm_supply_large_buffers(sc);
1616
1617	FATM_UNLOCK(sc);
1618
1619	if (sc->retry_tx && _IF_QLEN(&sc->ifatm.ifnet.if_snd))
1620		(*sc->ifatm.ifnet.if_start)(&sc->ifatm.ifnet);
1621}
1622
1623/*
1624 * Get device statistics. This must be called with the softc locked.
1625 * We use a preallocated buffer, so we need to protect this buffer.
1626 * We do this by using a condition variable and a flag. If the flag is set
1627 * the buffer is in use by one thread (one thread is executing a GETSTAT
1628 * card command). In this case all other threads that are trying to get
1629 * statistics block on that condition variable. When the thread finishes
1630 * using the buffer it resets the flag and signals the condition variable. This
1631 * will wakeup the next thread that is waiting for the buffer. If the interface
1632 * is stopped the stopping function will broadcast the cv. All threads will
1633 * find that the interface has been stopped and return.
1634 *
1635 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1636 * must be done by the caller when he has finished using the buffer.
1637 */
1638static void
1639fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1640{
1641
1642	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1643	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1644		sc->istats.get_stat_errors++;
1645		q->error = EIO;
1646	}
1647	wakeup(&sc->sadi_mem);
1648}
1649static int
1650fatm_getstat(struct fatm_softc *sc)
1651{
1652	int error;
1653	struct cmdqueue *q;
1654
1655	/*
1656	 * Wait until either the interface is stopped or we can get the
1657	 * statistics buffer
1658	 */
1659	for (;;) {
1660		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
1661			return (EIO);
1662		if (!(sc->flags & FATM_STAT_INUSE))
1663			break;
1664		cv_wait(&sc->cv_stat, &sc->mtx);
1665	}
1666	sc->flags |= FATM_STAT_INUSE;
1667
1668	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1669
1670	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1671	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1672		sc->istats.cmd_queue_full++;
1673		return (EIO);
1674	}
1675	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1676
1677	q->error = 0;
1678	q->cb = fatm_getstat_complete;
1679	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1680	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1681
1682	bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1683	    BUS_DMASYNC_PREREAD);
1684
1685	WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1686	    sc->sadi_mem.paddr);
1687	BARRIER_W(sc);
1688	WRITE4(sc, q->q.card + FATMOC_OP,
1689	    FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1690	BARRIER_W(sc);
1691
1692	/*
1693	 * Wait for the command to complete
1694	 */
1695	error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1696	    "fatm_stat", hz);
1697
1698	switch (error) {
1699
1700	  case EWOULDBLOCK:
1701		error = EIO;
1702		break;
1703
1704	  case ERESTART:
1705		error = EINTR;
1706		break;
1707
1708	  case 0:
1709		bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1710		    BUS_DMASYNC_POSTREAD);
1711		error = q->error;
1712		break;
1713	}
1714
1715	/*
1716	 * Swap statistics
1717	 */
1718	if (q->error == 0) {
1719		u_int i;
1720		uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1721
1722		for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1723		    i++, p++)
1724			*p = be32toh(*p);
1725	}
1726
1727	return (error);
1728}
1729
1730/*
1731 * Create a copy of a single mbuf. It can have either internal or
1732 * external data, it may have a packet header. External data is really
1733 * copied, so the new buffer is writeable.
1734 */
1735static struct mbuf *
1736copy_mbuf(struct mbuf *m)
1737{
1738	struct mbuf *new;
1739
1740	MGET(new, M_DONTWAIT, MT_DATA);
1741	if (new == NULL)
1742		return (NULL);
1743
1744	if (m->m_flags & M_PKTHDR) {
1745		M_MOVE_PKTHDR(new, m);
1746		if (m->m_len > MHLEN) {
1747			MCLGET(new, M_TRYWAIT);
1748			if ((m->m_flags & M_EXT) == 0) {
1749				m_free(new);
1750				return (NULL);
1751			}
1752		}
1753	} else {
1754		if (m->m_len > MLEN) {
1755			MCLGET(new, M_TRYWAIT);
1756			if ((m->m_flags & M_EXT) == 0) {
1757				m_free(new);
1758				return (NULL);
1759			}
1760		}
1761	}
1762
1763	bcopy(m->m_data, new->m_data, m->m_len);
1764	new->m_len = m->m_len;
1765	new->m_flags &= ~M_RDONLY;
1766
1767	return (new);
1768}
1769
1770/*
1771 * All segments must have a four byte aligned buffer address and a four
1772 * byte aligned length. Step through an mbuf chain and check these conditions.
1773 * If the buffer address is not aligned and this is a normal mbuf, move
1774 * the data down. Else make a copy of the mbuf with aligned data.
1775 * If the buffer length is not aligned steel data from the next mbuf.
1776 * We don't need to check whether this has more than one external reference,
1777 * because steeling data doesn't change the external cluster.
1778 * If the last mbuf is not aligned, fill with zeroes.
1779 *
1780 * Return packet length (well we should have this in the packet header),
1781 * but be careful not to count the zero fill at the end.
1782 *
1783 * If fixing fails free the chain and zero the pointer.
1784 *
1785 * We assume, that aligning the virtual address also aligns the mapped bus
1786 * address.
1787 */
1788static u_int
1789fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1790{
1791	struct mbuf *m = *mp, *prev = NULL, *next, *new;
1792	u_int mlen = 0, fill = 0;
1793	int first, off;
1794	u_char *d, *cp;
1795
1796	do {
1797		next = m->m_next;
1798
1799		if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1800		   (m->m_len % 4 != 0 && next)) {
1801			/*
1802			 * Needs fixing
1803			 */
1804			first = (m == *mp);
1805
1806			d = mtod(m, u_char *);
1807			if ((off = (uintptr_t)(void *)d % 4) != 0) {
1808				if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1809					sc->istats.fix_addr_copy++;
1810					bcopy(d, d - off, m->m_len);
1811					m->m_data = (caddr_t)(d - off);
1812				} else {
1813					if ((new = copy_mbuf(m)) == NULL) {
1814						sc->istats.fix_addr_noext++;
1815						goto fail;
1816					}
1817					sc->istats.fix_addr_ext++;
1818					if (prev)
1819						prev->m_next = new;
1820					new->m_next = next;
1821					m_free(m);
1822					m = new;
1823				}
1824			}
1825
1826			if ((off = m->m_len % 4) != 0) {
1827				if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1828					if ((new = copy_mbuf(m)) == NULL) {
1829						sc->istats.fix_len_noext++;
1830						goto fail;
1831					}
1832					sc->istats.fix_len_copy++;
1833					if (prev)
1834						prev->m_next = new;
1835					new->m_next = next;
1836					m_free(m);
1837					m = new;
1838				} else
1839					sc->istats.fix_len++;
1840				d = mtod(m, u_char *) + m->m_len;
1841				off = 4 - off;
1842				while (off) {
1843					if (next == NULL) {
1844						*d++ = 0;
1845						fill++;
1846					} else if (next->m_len == 0) {
1847						sc->istats.fix_empty++;
1848						next = m_free(next);
1849						continue;
1850					} else {
1851						cp = mtod(next, u_char *);
1852						*d++ = *cp++;
1853						next->m_len--;
1854						next->m_data = (caddr_t)cp;
1855					}
1856					off--;
1857					m->m_len++;
1858				}
1859			}
1860
1861			if (first)
1862				*mp = m;
1863		}
1864
1865		mlen += m->m_len;
1866		prev = m;
1867	} while ((m = next) != NULL);
1868
1869	return (mlen - fill);
1870
1871  fail:
1872	m_freem(*mp);
1873	*mp = NULL;
1874	return (0);
1875}
1876
1877/*
1878 * The helper function is used to load the computed physical addresses
1879 * into the transmit descriptor.
1880 */
1881static void
1882fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1883    bus_size_t mapsize, int error)
1884{
1885	struct tpd *tpd = varg;
1886
1887	if (error)
1888		return;
1889
1890	KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1891
1892	tpd->spec = 0;
1893	while (nsegs--) {
1894		H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1895		H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1896		tpd->spec++;
1897		segs++;
1898	}
1899}
1900
1901/*
1902 * Start output.
1903 *
1904 * Note, that we update the internal statistics without the lock here.
1905 */
1906static int
1907fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1908{
1909	struct txqueue *q;
1910	u_int nblks;
1911	int error, aal, nsegs;
1912	struct tpd *tpd;
1913
1914	/*
1915	 * Get a queue element.
1916	 * If there isn't one - try to drain the transmit queue
1917	 * We used to sleep here if that doesn't help, but we
1918	 * should not sleep here, because we are called with locks.
1919	 */
1920	q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1921
1922	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1923	if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1924		fatm_intr_drain_tx(sc);
1925		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1926		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1927			if (sc->retry_tx) {
1928				sc->istats.tx_retry++;
1929				IF_PREPEND(&sc->ifatm.ifnet.if_snd, m);
1930				return (1);
1931			}
1932			sc->istats.tx_queue_full++;
1933			m_freem(m);
1934			return (0);
1935		}
1936		sc->istats.tx_queue_almost_full++;
1937	}
1938
1939	tpd = q->q.ioblk;
1940
1941	m->m_data += sizeof(struct atm_pseudohdr);
1942	m->m_len -= sizeof(struct atm_pseudohdr);
1943
1944	/* map the mbuf */
1945	error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1946	    fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1947	if(error) {
1948		sc->ifatm.ifnet.if_oerrors++;
1949		if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", error);
1950		m_freem(m);
1951		return (0);
1952	}
1953	nsegs = tpd->spec;
1954
1955	bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1956
1957	/*
1958	 * OK. Now go and do it.
1959	 */
1960	aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1961
1962	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1963	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1964	q->m = m;
1965
1966	/*
1967	 * If the transmit queue is almost full, schedule a
1968	 * transmit interrupt so that transmit descriptors can
1969	 * be recycled.
1970	 */
1971	H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1972	    (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1973	H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
1974	    vc->param.vci, 0, 0));
1975
1976	if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
1977		H_SETDESC(tpd->stream, 0);
1978	else {
1979		u_int i;
1980
1981		for (i = 0; i < RATE_TABLE_SIZE; i++)
1982			if (rate_table[i].cell_rate < vc->param.tparam.pcr)
1983				break;
1984		if (i > 0)
1985			i--;
1986		H_SETDESC(tpd->stream, rate_table[i].ratio);
1987	}
1988	H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
1989
1990	nblks = TDX_SEGS2BLKS(nsegs);
1991
1992	DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
1993	    mlen, le32toh(tpd->spec), nsegs, nblks));
1994
1995	WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
1996	BARRIER_W(sc);
1997
1998	sc->txcnt++;
1999	sc->ifatm.ifnet.if_opackets++;
2000	vc->obytes += m->m_pkthdr.len;
2001	vc->opackets++;
2002
2003	NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2004
2005	return (0);
2006}
2007
2008static void
2009fatm_start(struct ifnet *ifp)
2010{
2011	struct atm_pseudohdr aph;
2012	struct fatm_softc *sc;
2013	struct mbuf *m;
2014	u_int mlen, vpi, vci;
2015	struct card_vcc *vc;
2016
2017	sc = (struct fatm_softc *)ifp->if_softc;
2018
2019	while (1) {
2020		IF_DEQUEUE(&ifp->if_snd, m);
2021		if (m == NULL)
2022			break;
2023
2024		/*
2025		 * Loop through the mbuf chain and compute the total length
2026		 * of the packet. Check that all data pointer are
2027		 * 4 byte aligned. If they are not, call fatm_mfix to
2028		 * fix that problem. This comes more or less from the
2029		 * en driver.
2030		 */
2031		mlen = fatm_fix_chain(sc, &m);
2032		if (m == NULL)
2033			continue;
2034
2035		if (m->m_len < sizeof(struct atm_pseudohdr) &&
2036		    (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2037			continue;
2038
2039		aph = *mtod(m, struct atm_pseudohdr *);
2040		mlen -= sizeof(struct atm_pseudohdr);
2041
2042		if (mlen == 0) {
2043			m_freem(m);
2044			continue;
2045		}
2046		if (mlen > FATM_MAXPDU) {
2047			sc->istats.tx_pdu2big++;
2048			m_freem(m);
2049			continue;
2050		}
2051
2052		vci = ATM_PH_VCI(&aph);
2053		vpi = ATM_PH_VPI(&aph);
2054
2055		/*
2056		 * From here on we need the softc
2057		 */
2058		FATM_LOCK(sc);
2059		if (!(ifp->if_flags & IFF_RUNNING)) {
2060			FATM_UNLOCK(sc);
2061			m_freem(m);
2062			break;
2063		}
2064		if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2065		    !(vc->vflags & FATM_VCC_OPEN)) {
2066			FATM_UNLOCK(sc);
2067			m_freem(m);
2068			continue;
2069		}
2070		if (fatm_tx(sc, m, vc, mlen)) {
2071			FATM_UNLOCK(sc);
2072			break;
2073		}
2074		FATM_UNLOCK(sc);
2075	}
2076}
2077
2078/*
2079 * VCC managment
2080 *
2081 * This may seem complicated. The reason for this is, that we need an
2082 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2083 * is called with the radix node head of the routing table locked. Therefor
2084 * we cannot sleep there and wait for the open/close to succeed. For this
2085 * reason we just initiate the operation from the ioctl.
2086 */
2087
2088/*
2089 * Command the card to open/close a VC.
2090 * Return the queue entry for waiting if we are succesful.
2091 */
2092static struct cmdqueue *
2093fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2094    u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2095{
2096	struct cmdqueue *q;
2097
2098	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2099
2100	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2101	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2102		sc->istats.cmd_queue_full++;
2103		return (NULL);
2104	}
2105	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2106
2107	q->error = 0;
2108	q->cb = func;
2109	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2110	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2111
2112	WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2113	BARRIER_W(sc);
2114	WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2115	BARRIER_W(sc);
2116	WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2117	BARRIER_W(sc);
2118
2119	return (q);
2120}
2121
2122/*
2123 * The VC has been opened/closed and somebody has been waiting for this.
2124 * Wake him up.
2125 */
2126static void
2127fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2128{
2129
2130	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2131	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2132		sc->istats.get_stat_errors++;
2133		q->error = EIO;
2134	}
2135	wakeup(q);
2136}
2137
2138/*
2139 * Open complete
2140 */
2141static void
2142fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2143{
2144	vc->vflags &= ~FATM_VCC_TRY_OPEN;
2145	vc->vflags |= FATM_VCC_OPEN;
2146
2147	/* inform management if this is not an NG
2148	 * VCC or it's an NG PVC. */
2149	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2150	    (vc->param.flags & ATMIO_FLAG_PVC))
2151		ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 1);
2152}
2153
2154/*
2155 * The VC that we have tried to open asynchronuosly has been opened.
2156 */
2157static void
2158fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2159{
2160	u_int vci;
2161	struct card_vcc *vc;
2162
2163	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2164	vc = sc->vccs[vci];
2165	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2166	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2167		sc->istats.get_stat_errors++;
2168		sc->vccs[vci] = NULL;
2169		uma_zfree(sc->vcc_zone, vc);
2170		if_printf(&sc->ifatm.ifnet, "opening VCI %u failed\n", vci);
2171		return;
2172	}
2173	fatm_open_finish(sc, vc);
2174}
2175
2176/*
2177 * Wait on the queue entry until the VCC is opened/closed.
2178 */
2179static int
2180fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2181{
2182	int error;
2183
2184	/*
2185	 * Wait for the command to complete
2186	 */
2187	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2188
2189	if (error != 0)
2190		return (error);
2191	return (q->error);
2192}
2193
2194/*
2195 * Start to open a VCC. This just initiates the operation.
2196 */
2197static int
2198fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op, int wait)
2199{
2200	uint32_t cmd;
2201	int error;
2202	struct cmdqueue *q;
2203	struct card_vcc *vc;
2204
2205	/*
2206	 * Check parameters
2207	 */
2208	if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2209	    (op->param.flags & ATMIO_FLAG_NORX))
2210		return (EINVAL);
2211
2212	if (!VC_OK(sc, op->param.vpi, op->param.vci))
2213		return (EINVAL);
2214	if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2215		return (EINVAL);
2216
2217	vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2218	if (vc == NULL)
2219		return (ENOMEM);
2220
2221	error = 0;
2222
2223	FATM_LOCK(sc);
2224	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2225		error = EIO;
2226		goto done;
2227	}
2228	if (sc->vccs[op->param.vci] != NULL) {
2229		error = EBUSY;
2230		goto done;
2231	}
2232	vc->param = op->param;
2233	vc->rxhand = op->rxhand;
2234
2235	switch (op->param.traffic) {
2236
2237	  case ATMIO_TRAFFIC_UBR:
2238		break;
2239
2240	  case ATMIO_TRAFFIC_CBR:
2241		if (op->param.tparam.pcr == 0 ||
2242		    op->param.tparam.pcr > sc->ifatm.mib.pcr) {
2243			error = EINVAL;
2244			goto done;
2245		}
2246		break;
2247
2248	  default:
2249		error = EINVAL;
2250		goto done;
2251		return (EINVAL);
2252	}
2253	vc->ibytes = vc->obytes = 0;
2254	vc->ipackets = vc->opackets = 0;
2255
2256	/* Command and buffer strategy */
2257	cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2258	if (op->param.aal == ATMIO_AAL_0)
2259		cmd |= (0 << 8);
2260	else
2261		cmd |= (5 << 8);
2262
2263	q = fatm_start_vcc(sc, op->param.vpi, op->param.vci, cmd, 1,
2264	    (wait && !(op->param.flags & ATMIO_FLAG_ASYNC)) ?
2265	    fatm_cmd_complete : fatm_open_complete);
2266	if (q == NULL) {
2267		error = EIO;
2268		goto done;
2269	}
2270
2271	vc->vflags = FATM_VCC_TRY_OPEN;
2272	sc->vccs[op->param.vci] = vc;
2273	sc->open_vccs++;
2274
2275	if (wait && !(op->param.flags & ATMIO_FLAG_ASYNC)) {
2276		error = fatm_waitvcc(sc, q);
2277		if (error != 0) {
2278			sc->vccs[op->param.vci] = NULL;
2279			sc->open_vccs--;
2280			goto done;
2281		}
2282		fatm_open_finish(sc, vc);
2283	}
2284
2285	/* don't free below */
2286	vc = NULL;
2287
2288  done:
2289	FATM_UNLOCK(sc);
2290	if (vc != NULL)
2291		uma_zfree(sc->vcc_zone, vc);
2292	return (error);
2293}
2294
2295/*
2296 * Finish close
2297 */
2298static void
2299fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2300{
2301	/* inform management of this is not an NG
2302	 * VCC or it's an NG PVC. */
2303	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2304	    (vc->param.flags & ATMIO_FLAG_PVC))
2305		ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 0);
2306
2307	sc->vccs[vc->param.vci] = NULL;
2308	sc->open_vccs--;
2309
2310	uma_zfree(sc->vcc_zone, vc);
2311}
2312
2313/*
2314 * The VC has been closed.
2315 */
2316static void
2317fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2318{
2319	u_int vci;
2320	struct card_vcc *vc;
2321
2322	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2323	vc = sc->vccs[vci];
2324	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2325	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2326		sc->istats.get_stat_errors++;
2327		/* keep the VCC in that state */
2328		if_printf(&sc->ifatm.ifnet, "closing VCI %u failed\n", vci);
2329		return;
2330	}
2331
2332	fatm_close_finish(sc, vc);
2333}
2334
2335/*
2336 * Initiate closing a VCC
2337 */
2338static int
2339fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl, int wait)
2340{
2341	int error;
2342	struct cmdqueue *q;
2343	struct card_vcc *vc;
2344
2345	if (!VC_OK(sc, cl->vpi, cl->vci))
2346		return (EINVAL);
2347
2348	error = 0;
2349
2350	FATM_LOCK(sc);
2351	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2352		error = EIO;
2353		goto done;
2354	}
2355	vc = sc->vccs[cl->vci];
2356	if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2357		error = ENOENT;
2358		goto done;
2359	}
2360
2361	q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2362	    FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2363	    (wait && !(vc->param.flags & ATMIO_FLAG_ASYNC)) ?
2364	    fatm_cmd_complete : fatm_close_complete);
2365	if (q == NULL) {
2366		error = EIO;
2367		goto done;
2368	}
2369
2370	vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2371	vc->vflags |= FATM_VCC_TRY_CLOSE;
2372
2373	if (wait && !(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2374		error = fatm_waitvcc(sc, q);
2375		if (error != 0)
2376			goto done;
2377
2378		fatm_close_finish(sc, vc);
2379	}
2380
2381  done:
2382	FATM_UNLOCK(sc);
2383	return (error);
2384}
2385
2386/*
2387 * IOCTL handler
2388 */
2389static int
2390fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2391{
2392	int error;
2393	struct fatm_softc *sc = ifp->if_softc;
2394	struct ifaddr *ifa = (struct ifaddr *)arg;
2395	struct ifreq *ifr = (struct ifreq *)arg;
2396	struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2397	struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2398	struct atm_pseudoioctl *pa = (struct atm_pseudoioctl *)arg;
2399	struct atmio_vcctable *vtab;
2400	struct atmio_openvcc ena;
2401	struct atmio_closevcc dis;
2402
2403	error = 0;
2404	switch (cmd) {
2405
2406	  case SIOCATMENA:	/* internal NATM use */
2407		bzero(&ena, sizeof(ena));
2408		ena.param.flags = ATM_PH_FLAGS(&pa->aph) &
2409		    (ATM_PH_AAL5 | ATM_PH_LLCSNAP);
2410		ena.param.vpi = ATM_PH_VPI(&pa->aph);
2411		ena.param.vci = ATM_PH_VCI(&pa->aph);
2412		ena.param.aal = (ATM_PH_FLAGS(&pa->aph) & ATM_PH_AAL5) ?
2413		    ATMIO_AAL_5 : ATMIO_AAL_0;
2414		ena.param.traffic = ATMIO_TRAFFIC_UBR;
2415		ena.rxhand = pa->rxhand;
2416		error = fatm_open_vcc(sc, &ena, 0);
2417		break;
2418
2419	  case SIOCATMDIS:	/* internal NATM use */
2420		bzero(&dis, sizeof(dis));
2421		dis.vpi = ATM_PH_VPI(&pa->aph);
2422		dis.vci = ATM_PH_VCI(&pa->aph);
2423		error = fatm_close_vcc(sc, &dis, 0);
2424		break;
2425
2426	  case SIOCATMOPENVCC:
2427		error = fatm_open_vcc(sc, op, 1);
2428		break;
2429
2430	  case SIOCATMCLOSEVCC:
2431		error = fatm_close_vcc(sc, cl, 1);
2432		break;
2433
2434	  case SIOCSIFADDR:
2435		FATM_LOCK(sc);
2436		ifp->if_flags |= IFF_UP;
2437		if (!(ifp->if_flags & IFF_RUNNING))
2438			fatm_init_locked(sc);
2439		switch (ifa->ifa_addr->sa_family) {
2440#ifdef INET
2441		  case AF_INET:
2442		  case AF_INET6:
2443			ifa->ifa_rtrequest = atm_rtrequest;
2444			break;
2445#endif
2446		  default:
2447			break;
2448		}
2449		FATM_UNLOCK(sc);
2450		break;
2451
2452	  case SIOCSIFFLAGS:
2453		FATM_LOCK(sc);
2454		if (ifp->if_flags & IFF_UP) {
2455			if (!(ifp->if_flags & IFF_RUNNING)) {
2456				fatm_init_locked(sc);
2457			}
2458		} else {
2459			if (ifp->if_flags & IFF_RUNNING) {
2460				fatm_stop(sc);
2461			}
2462		}
2463		FATM_UNLOCK(sc);
2464		break;
2465
2466	  case SIOCGIFMEDIA:
2467	  case SIOCSIFMEDIA:
2468		if (ifp->if_flags & IFF_RUNNING)
2469			error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2470		else
2471			error = EINVAL;
2472		break;
2473
2474	  case SIOCATMGVCCS:
2475		/* return vcc table */
2476		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2477		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2478		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2479		    vtab->count * sizeof(vtab->vccs[0]));
2480		free(vtab, M_DEVBUF);
2481		break;
2482
2483	  case SIOCATMGETVCCS:	/* internal netgraph use */
2484		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2485		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2486		if (vtab == NULL) {
2487			error = ENOMEM;
2488			break;
2489		}
2490		*(void **)arg = vtab;
2491		break;
2492
2493	  default:
2494		DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2495		error = EINVAL;
2496		break;
2497	}
2498
2499	return (error);
2500}
2501
2502/*
2503 * Detach from the interface and free all resources allocated during
2504 * initialisation and later.
2505 */
2506static int
2507fatm_detach(device_t dev)
2508{
2509	u_int i;
2510	struct rbuf *rb;
2511	struct fatm_softc *sc;
2512	struct txqueue *tx;
2513
2514	sc = (struct fatm_softc *)device_get_softc(dev);
2515
2516	if (device_is_alive(dev)) {
2517		FATM_LOCK(sc);
2518		fatm_stop(sc);
2519		utopia_detach(&sc->utopia);
2520		FATM_UNLOCK(sc);
2521		atm_ifdetach(&sc->ifatm.ifnet);		/* XXX race */
2522	}
2523
2524	if (sc->ih != NULL)
2525		bus_teardown_intr(dev, sc->irqres, sc->ih);
2526
2527	while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2528		if_printf(&sc->ifatm.ifnet, "rbuf %p still in use!\n", rb);
2529		bus_dmamap_unload(sc->rbuf_tag, rb->map);
2530		m_freem(rb->m);
2531		LIST_REMOVE(rb, link);
2532		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2533	}
2534
2535	if (sc->txqueue.chunk != NULL) {
2536		for (i = 0; i < FATM_TX_QLEN; i++) {
2537			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2538			bus_dmamap_destroy(sc->tx_tag, tx->map);
2539		}
2540	}
2541
2542	while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2543		bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2544		LIST_REMOVE(rb, link);
2545	}
2546
2547	if (sc->rbufs != NULL)
2548		free(sc->rbufs, M_DEVBUF);
2549	if (sc->vccs != NULL)
2550		free(sc->vccs, M_DEVBUF);
2551	if (sc->vcc_zone != NULL)
2552		uma_zdestroy(sc->vcc_zone);
2553
2554	if (sc->l1queue.chunk != NULL)
2555		free(sc->l1queue.chunk, M_DEVBUF);
2556	if (sc->s1queue.chunk != NULL)
2557		free(sc->s1queue.chunk, M_DEVBUF);
2558	if (sc->rxqueue.chunk != NULL)
2559		free(sc->rxqueue.chunk, M_DEVBUF);
2560	if (sc->txqueue.chunk != NULL)
2561		free(sc->txqueue.chunk, M_DEVBUF);
2562	if (sc->cmdqueue.chunk != NULL)
2563		free(sc->cmdqueue.chunk, M_DEVBUF);
2564
2565	destroy_dma_memory(&sc->reg_mem);
2566	destroy_dma_memory(&sc->sadi_mem);
2567	destroy_dma_memory(&sc->prom_mem);
2568#ifdef TEST_DMA_SYNC
2569	destroy_dma_memoryX(&sc->s1q_mem);
2570	destroy_dma_memoryX(&sc->l1q_mem);
2571	destroy_dma_memoryX(&sc->rxq_mem);
2572	destroy_dma_memoryX(&sc->txq_mem);
2573	destroy_dma_memoryX(&sc->stat_mem);
2574#endif
2575
2576	if (sc->tx_tag != NULL)
2577		if (bus_dma_tag_destroy(sc->tx_tag))
2578			printf("tx DMA tag busy!\n");
2579
2580	if (sc->rbuf_tag != NULL)
2581		if (bus_dma_tag_destroy(sc->rbuf_tag))
2582			printf("rbuf DMA tag busy!\n");
2583
2584	if (sc->parent_dmat != NULL)
2585		if (bus_dma_tag_destroy(sc->parent_dmat))
2586			printf("parent DMA tag busy!\n");
2587
2588	if (sc->irqres != NULL)
2589		bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2590
2591	if (sc->memres != NULL)
2592		bus_release_resource(dev, SYS_RES_MEMORY,
2593		    sc->memid, sc->memres);
2594
2595	(void)sysctl_ctx_free(&sc->sysctl_ctx);
2596
2597	cv_destroy(&sc->cv_stat);
2598	cv_destroy(&sc->cv_regs);
2599
2600	mtx_destroy(&sc->mtx);
2601
2602	return (0);
2603}
2604
2605/*
2606 * Sysctl handler
2607 */
2608static int
2609fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2610{
2611	struct fatm_softc *sc = arg1;
2612	u_long *ret;
2613	int error;
2614
2615	ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2616
2617	FATM_LOCK(sc);
2618	bcopy(&sc->istats, ret, sizeof(sc->istats));
2619	FATM_UNLOCK(sc);
2620
2621	error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2622	free(ret, M_TEMP);
2623
2624	return (error);
2625}
2626
2627/*
2628 * Sysctl handler for card statistics
2629 * This is disable because it destroys the PHY statistics.
2630 */
2631static int
2632fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2633{
2634	struct fatm_softc *sc = arg1;
2635	int error;
2636	const struct fatm_stats *s;
2637	u_long *ret;
2638	u_int i;
2639
2640	ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2641
2642	FATM_LOCK(sc);
2643
2644	if ((error = fatm_getstat(sc)) == 0) {
2645		s = sc->sadi_mem.mem;
2646		i = 0;
2647		ret[i++] = s->phy_4b5b.crc_header_errors;
2648		ret[i++] = s->phy_4b5b.framing_errors;
2649		ret[i++] = s->phy_oc3.section_bip8_errors;
2650		ret[i++] = s->phy_oc3.path_bip8_errors;
2651		ret[i++] = s->phy_oc3.line_bip24_errors;
2652		ret[i++] = s->phy_oc3.line_febe_errors;
2653		ret[i++] = s->phy_oc3.path_febe_errors;
2654		ret[i++] = s->phy_oc3.corr_hcs_errors;
2655		ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2656		ret[i++] = s->atm.cells_transmitted;
2657		ret[i++] = s->atm.cells_received;
2658		ret[i++] = s->atm.vpi_bad_range;
2659		ret[i++] = s->atm.vpi_no_conn;
2660		ret[i++] = s->atm.vci_bad_range;
2661		ret[i++] = s->atm.vci_no_conn;
2662		ret[i++] = s->aal0.cells_transmitted;
2663		ret[i++] = s->aal0.cells_received;
2664		ret[i++] = s->aal0.cells_dropped;
2665		ret[i++] = s->aal4.cells_transmitted;
2666		ret[i++] = s->aal4.cells_received;
2667		ret[i++] = s->aal4.cells_crc_errors;
2668		ret[i++] = s->aal4.cels_protocol_errors;
2669		ret[i++] = s->aal4.cells_dropped;
2670		ret[i++] = s->aal4.cspdus_transmitted;
2671		ret[i++] = s->aal4.cspdus_received;
2672		ret[i++] = s->aal4.cspdus_protocol_errors;
2673		ret[i++] = s->aal4.cspdus_dropped;
2674		ret[i++] = s->aal5.cells_transmitted;
2675		ret[i++] = s->aal5.cells_received;
2676		ret[i++] = s->aal5.congestion_experienced;
2677		ret[i++] = s->aal5.cells_dropped;
2678		ret[i++] = s->aal5.cspdus_transmitted;
2679		ret[i++] = s->aal5.cspdus_received;
2680		ret[i++] = s->aal5.cspdus_crc_errors;
2681		ret[i++] = s->aal5.cspdus_protocol_errors;
2682		ret[i++] = s->aal5.cspdus_dropped;
2683		ret[i++] = s->aux.small_b1_failed;
2684		ret[i++] = s->aux.large_b1_failed;
2685		ret[i++] = s->aux.small_b2_failed;
2686		ret[i++] = s->aux.large_b2_failed;
2687		ret[i++] = s->aux.rpd_alloc_failed;
2688		ret[i++] = s->aux.receive_carrier;
2689	}
2690	/* declare the buffer free */
2691	sc->flags &= ~FATM_STAT_INUSE;
2692	cv_signal(&sc->cv_stat);
2693
2694	FATM_UNLOCK(sc);
2695
2696	if (error == 0)
2697		error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2698	free(ret, M_TEMP);
2699
2700	return (error);
2701}
2702
2703#define MAXDMASEGS 32		/* maximum number of receive descriptors */
2704
2705/*
2706 * Attach to the device.
2707 *
2708 * We assume, that there is a global lock (Giant in this case) that protects
2709 * multiple threads from entering this function. This makes sense, doesn't it?
2710 */
2711static int
2712fatm_attach(device_t dev)
2713{
2714	struct ifnet *ifp;
2715	struct fatm_softc *sc;
2716	int unit;
2717	uint16_t cfg;
2718	int error = 0;
2719	struct rbuf *rb;
2720	u_int i;
2721	struct txqueue *tx;
2722
2723	sc = device_get_softc(dev);
2724	unit = device_get_unit(dev);
2725
2726	sc->ifatm.mib.device = ATM_DEVICE_PCA200E;
2727	sc->ifatm.mib.serial = 0;
2728	sc->ifatm.mib.hw_version = 0;
2729	sc->ifatm.mib.sw_version = 0;
2730	sc->ifatm.mib.vpi_bits = 0;
2731	sc->ifatm.mib.vci_bits = FORE_VCIBITS;
2732	sc->ifatm.mib.max_vpcs = 0;
2733	sc->ifatm.mib.max_vccs = FORE_MAX_VCC;
2734	sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
2735	sc->ifatm.phy = &sc->utopia;
2736
2737	LIST_INIT(&sc->rbuf_free);
2738	LIST_INIT(&sc->rbuf_used);
2739
2740	/*
2741	 * Initialize mutex and condition variables.
2742	 */
2743	mtx_init(&sc->mtx, device_get_nameunit(dev),
2744	    MTX_NETWORK_LOCK, MTX_DEF);
2745
2746	cv_init(&sc->cv_stat, "fatm_stat");
2747	cv_init(&sc->cv_regs, "fatm_regs");
2748
2749	sysctl_ctx_init(&sc->sysctl_ctx);
2750
2751	/*
2752	 * Make the sysctl tree
2753	 */
2754	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2755	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2756	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2757		goto fail;
2758
2759	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2760	    OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2761	    "LU", "internal statistics") == NULL)
2762		goto fail;
2763
2764	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2765	    OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2766	    "LU", "card statistics") == NULL)
2767		goto fail;
2768
2769	if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2770	    OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2771	    "retry flag") == NULL)
2772		goto fail;
2773
2774#ifdef FATM_DEBUG
2775	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2776	    OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2777	    == NULL)
2778		goto fail;
2779	sc->debug = FATM_DEBUG;
2780#endif
2781
2782	/*
2783	 * Network subsystem stuff
2784	 */
2785	ifp = &sc->ifatm.ifnet;
2786	ifp->if_softc = sc;
2787	ifp->if_unit = unit;
2788	ifp->if_name = "fatm";
2789	ifp->if_flags = IFF_SIMPLEX;
2790	ifp->if_ioctl = fatm_ioctl;
2791	ifp->if_start = fatm_start;
2792	ifp->if_watchdog = fatm_watchdog;
2793	ifp->if_init = fatm_init;
2794	ifp->if_linkmib = &sc->ifatm.mib;
2795	ifp->if_linkmiblen = sizeof(sc->ifatm.mib);
2796
2797	/*
2798	 * Enable memory and bustmaster
2799	 */
2800	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2801	cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2802	pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2803
2804	/*
2805	 * Map memory
2806	 */
2807	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2808	if (!(cfg & PCIM_CMD_MEMEN)) {
2809		if_printf(ifp, "failed to enable memory mapping\n");
2810		error = ENXIO;
2811		goto fail;
2812	}
2813	sc->memid = 0x10;
2814	sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
2815	    0, ~0, 1, RF_ACTIVE);
2816	if (sc->memres == NULL) {
2817		if_printf(ifp, "could not map memory\n");
2818		error = ENXIO;
2819		goto fail;
2820	}
2821	sc->memh = rman_get_bushandle(sc->memres);
2822	sc->memt = rman_get_bustag(sc->memres);
2823
2824	/*
2825	 * Convert endianess of slave access
2826	 */
2827	cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2828	cfg |= FATM_PCIM_SWAB;
2829	pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2830
2831	/*
2832	 * Allocate interrupt (activate at the end)
2833	 */
2834	sc->irqid = 0;
2835	sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
2836	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
2837	if (sc->irqres == NULL) {
2838		if_printf(ifp, "could not allocate irq\n");
2839		error = ENXIO;
2840		goto fail;
2841	}
2842
2843	/*
2844	 * Allocate the parent DMA tag. This is used simply to hold overall
2845	 * restrictions for the controller (and PCI bus) and is never used
2846	 * to do anything.
2847	 */
2848	if (bus_dma_tag_create(NULL, 1, 0,
2849	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2850	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2851	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2852	    &sc->parent_dmat)) {
2853		if_printf(ifp, "could not allocate parent DMA tag\n");
2854		error = ENOMEM;
2855		goto fail;
2856	}
2857
2858	/*
2859	 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2860	 * a mbuf cluster.
2861	 */
2862	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2863	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2864	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2865	    NULL, NULL, &sc->rbuf_tag)) {
2866		if_printf(ifp, "could not allocate rbuf DMA tag\n");
2867		error = ENOMEM;
2868		goto fail;
2869	}
2870
2871	/*
2872	 * Allocate the transmission DMA tag. Must add 1, because
2873	 * rounded up PDU will be 65536 bytes long.
2874	 */
2875	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2876	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2877	    NULL, NULL,
2878	    FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2879	    NULL, NULL, &sc->tx_tag)) {
2880		if_printf(ifp, "could not allocate tx DMA tag\n");
2881		error = ENOMEM;
2882		goto fail;
2883	}
2884
2885	/*
2886	 * Allocate DMAable memory.
2887	 */
2888	sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2889	    + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2890	sc->stat_mem.align = 4;
2891
2892	sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2893	sc->txq_mem.align = 32;
2894
2895	sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2896	sc->rxq_mem.align = 32;
2897
2898	sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2899	    BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2900	sc->s1q_mem.align = 32;
2901
2902	sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2903	    BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2904	sc->l1q_mem.align = 32;
2905
2906#ifdef TEST_DMA_SYNC
2907	if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2908	    (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2909	    (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2910	    (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2911	    (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2912		goto fail;
2913#else
2914	if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2915	    (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2916	    (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2917	    (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2918	    (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2919		goto fail;
2920#endif
2921
2922	sc->prom_mem.size = sizeof(struct prom);
2923	sc->prom_mem.align = 32;
2924	if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2925		goto fail;
2926
2927	sc->sadi_mem.size = sizeof(struct fatm_stats);
2928	sc->sadi_mem.align = 32;
2929	if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2930		goto fail;
2931
2932	sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2933	sc->reg_mem.align = 32;
2934	if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2935		goto fail;
2936
2937	/*
2938	 * Allocate queues
2939	 */
2940	sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2941	    M_DEVBUF, M_ZERO | M_WAITOK);
2942	sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2943	    M_DEVBUF, M_ZERO | M_WAITOK);
2944	sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2945	    M_DEVBUF, M_ZERO | M_WAITOK);
2946	sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2947	    M_DEVBUF, M_ZERO | M_WAITOK);
2948	sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2949	    M_DEVBUF, M_ZERO | M_WAITOK);
2950
2951	sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2952	    M_DEVBUF, M_ZERO | M_WAITOK);
2953	sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2954	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2955	if (sc->vcc_zone == NULL) {
2956		error = ENOMEM;
2957		goto fail;
2958	}
2959
2960	/*
2961	 * Allocate memory for the receive buffer headers. The total number
2962	 * of headers should probably also include the maximum number of
2963	 * buffers on the receive queue.
2964	 */
2965	sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2966	sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2967	    M_DEVBUF, M_ZERO | M_WAITOK);
2968
2969	/*
2970	 * Put all rbuf headers on the free list and create DMA maps.
2971	 */
2972	for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2973		if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
2974			if_printf(&sc->ifatm.ifnet, "creating rx map: %d\n",
2975			    error);
2976			goto fail;
2977		}
2978		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2979	}
2980
2981	/*
2982	 * Create dma maps for transmission. In case of an error, free the
2983	 * allocated DMA maps, because on some architectures maps are NULL
2984	 * and we cannot distinguish between a failure and a NULL map in
2985	 * the detach routine.
2986	 */
2987	for (i = 0; i < FATM_TX_QLEN; i++) {
2988		tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2989		if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
2990			if_printf(&sc->ifatm.ifnet, "creating tx map: %d\n",
2991			    error);
2992			while (i > 0) {
2993				tx = GET_QUEUE(sc->txqueue, struct txqueue,
2994				    i - 1);
2995				bus_dmamap_destroy(sc->tx_tag, tx->map);
2996				i--;
2997			}
2998			goto fail;
2999		}
3000	}
3001
3002	utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
3003	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3004	    &fatm_utopia_methods);
3005	sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3006
3007	/*
3008	 * Attach the interface
3009	 */
3010	atm_ifattach(ifp);
3011	ifp->if_snd.ifq_maxlen = 512;
3012
3013	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3014	    fatm_intr, sc, &sc->ih);
3015	if (error) {
3016		if_printf(ifp, "couldn't setup irq\n");
3017		goto fail;
3018	}
3019
3020  fail:
3021	if (error)
3022		fatm_detach(dev);
3023
3024	return (error);
3025}
3026
3027#if defined(FATM_DEBUG) && 0
3028static void
3029dump_s1_queue(struct fatm_softc *sc)
3030{
3031	int i;
3032	struct supqueue *q;
3033
3034	for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3035		q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3036		printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3037		    q->q.card,
3038		    READ4(sc, q->q.card),
3039		    READ4(sc, q->q.card + 4),
3040		    *q->q.statp);
3041	}
3042}
3043#endif
3044
3045/*
3046 * Driver infrastructure.
3047 */
3048static device_method_t fatm_methods[] = {
3049	DEVMETHOD(device_probe,		fatm_probe),
3050	DEVMETHOD(device_attach,	fatm_attach),
3051	DEVMETHOD(device_detach,	fatm_detach),
3052	{ 0, 0 }
3053};
3054static driver_t fatm_driver = {
3055	"fatm",
3056	fatm_methods,
3057	sizeof(struct fatm_softc),
3058};
3059
3060DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
3061