if_fatm.c revision 117126
1/*
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 117126 2003-07-01 15:52:06Z scottl $");
33
34#include "opt_inet.h"
35#include "opt_natm.h"
36
37#include <sys/types.h>
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/bus.h>
43#include <sys/errno.h>
44#include <sys/conf.h>
45#include <sys/module.h>
46#include <sys/queue.h>
47#include <sys/syslog.h>
48#include <sys/endian.h>
49#include <sys/sysctl.h>
50#include <sys/condvar.h>
51
52#include <sys/sockio.h>
53#include <sys/mbuf.h>
54#include <sys/socket.h>
55
56#include <net/if.h>
57#include <net/if_media.h>
58#include <net/if_atm.h>
59#include <net/route.h>
60#ifdef INET
61#include <netinet/in.h>
62#include <netinet/if_atm.h>
63#endif
64
65#include <machine/bus.h>
66#include <machine/resource.h>
67#include <sys/bus.h>
68#include <sys/rman.h>
69#include <pci/pcireg.h>
70#include <pci/pcivar.h>
71
72#include <dev/utopia/utopia.h>
73
74#include <dev/fatm/if_fatmreg.h>
75#include <dev/fatm/if_fatmvar.h>
76
77#include <dev/fatm/firmware.h>
78
79devclass_t fatm_devclass;
80
81static const struct {
82	uint16_t	vid;
83	uint16_t	did;
84	const char	*name;
85} fatm_devs[] = {
86	{ 0x1127, 0x300,
87	  "FORE PCA200E" },
88	{ 0, 0, NULL }
89};
90
91static const struct rate {
92	uint32_t	ratio;
93	uint32_t	cell_rate;
94} rate_table[] = {
95#include <dev/fatm/if_fatm_rate.h>
96};
97#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
98
99SYSCTL_DECL(_hw_atm);
100
101MODULE_DEPEND(fatm, utopia, 1, 1, 1);
102
103static int	fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
104static int	fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
105
106static const struct utopia_methods fatm_utopia_methods = {
107	fatm_utopia_readregs,
108	fatm_utopia_writereg
109};
110
111#define VC_OK(SC, VPI, VCI)						\
112	(((VPI) & ~((1 << (SC)->ifatm.mib.vpi_bits) - 1)) == 0 &&	\
113	 (VCI) != 0 && ((VCI) & ~((1 << (SC)->ifatm.mib.vci_bits) - 1)) == 0)
114
115/*
116 * Probing is easy: step trough the list of known vendor and device
117 * ids and compare. If one is found - it's our.
118 */
119static int
120fatm_probe(device_t dev)
121{
122	int i;
123
124	for (i = 0; fatm_devs[i].name; i++)
125		if (pci_get_vendor(dev) == fatm_devs[i].vid &&
126		    pci_get_device(dev) == fatm_devs[i].did) {
127			device_set_desc(dev, fatm_devs[i].name);
128			return (0);
129		}
130	return (ENXIO);
131}
132
133/*
134 * Function called at completion of a SUNI writeregs/readregs command.
135 * This is called from the interrupt handler while holding the softc lock.
136 * We use the queue entry as the randevouze point.
137 */
138static void
139fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
140{
141
142	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
143	if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
144		sc->istats.suni_reg_errors++;
145		q->error = EIO;
146	}
147	wakeup(q);
148}
149
150/*
151 * Write a SUNI register. The bits that are 1 in mask are written from val
152 * into register reg. We wait for the command to complete by sleeping on
153 * the register memory.
154 *
155 * We assume, that we already hold the softc mutex.
156 */
157static int
158fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
159{
160	int error;
161	struct cmdqueue *q;
162	struct fatm_softc *sc;
163
164	sc = ifatm->ifnet.if_softc;
165	FATM_CHECKLOCK(sc);
166	if (!(ifatm->ifnet.if_flags & IFF_RUNNING))
167		return (EIO);
168
169	/* get queue element and fill it */
170	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
171
172	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
173	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
174		sc->istats.cmd_queue_full++;
175		return (EIO);
176	}
177	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
178
179	q->error = 0;
180	q->cb = fatm_utopia_writeregs_complete;
181	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
182	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
183
184	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
185	BARRIER_W(sc);
186	WRITE4(sc, q->q.card + FATMOC_OP,
187	    FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
188	BARRIER_W(sc);
189
190	/*
191	 * Wait for the command to complete
192	 */
193	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
194
195	switch(error) {
196
197	  case EWOULDBLOCK:
198		error = EIO;
199		break;
200
201	  case ERESTART:
202		error = EINTR;
203		break;
204
205	  case 0:
206		error = q->error;
207		break;
208	}
209
210	return (error);
211}
212
213/*
214 * Function called at completion of a SUNI readregs command.
215 * This is called from the interrupt handler while holding the softc lock.
216 * We use reg_mem as the randevouze point.
217 */
218static void
219fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
220{
221
222	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
223	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
224		sc->istats.suni_reg_errors++;
225		q->error = EIO;
226	}
227	wakeup(&sc->reg_mem);
228}
229
230/*
231 * Read SUNI registers
232 *
233 * We use a preallocated buffer to read the registers. Therefor we need
234 * to protect against multiple threads trying to read registers. We do this
235 * with a condition variable and a flag. We wait for the command to complete by sleeping on
236 * the register memory.
237 *
238 * We assume, that we already hold the softc mutex.
239 */
240static int
241fatm_utopia_readregs_internal(struct fatm_softc *sc)
242{
243	int error, i;
244	uint32_t *ptr;
245	struct cmdqueue *q;
246
247	/* get the buffer */
248	for (;;) {
249		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
250			return (EIO);
251		if (!(sc->flags & FATM_REGS_INUSE))
252			break;
253		cv_wait(&sc->cv_regs, &sc->mtx);
254	}
255	sc->flags |= FATM_REGS_INUSE;
256
257	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
258
259	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
260	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
261		sc->istats.cmd_queue_full++;
262		return (EIO);
263	}
264	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
265
266	q->error = 0;
267	q->cb = fatm_utopia_readregs_complete;
268	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
269	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
270
271	bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
272
273	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
274	BARRIER_W(sc);
275	WRITE4(sc, q->q.card + FATMOC_OP,
276	    FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
277	BARRIER_W(sc);
278
279	/*
280	 * Wait for the command to complete
281	 */
282	error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
283	    "fatm_getreg", hz);
284
285	switch(error) {
286
287	  case EWOULDBLOCK:
288		error = EIO;
289		break;
290
291	  case ERESTART:
292		error = EINTR;
293		break;
294
295	  case 0:
296		bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
297		    BUS_DMASYNC_POSTREAD);
298		error = q->error;
299		break;
300	}
301
302	if (error != 0) {
303		/* declare buffer to be free */
304		sc->flags &= ~FATM_REGS_INUSE;
305		cv_signal(&sc->cv_regs);
306		return (error);
307	}
308
309	/* swap if needed */
310	ptr = (uint32_t *)sc->reg_mem.mem;
311	for (i = 0; i < FATM_NREGS; i++)
312		ptr[i] = le32toh(ptr[i]) & 0xff;
313
314	return (0);
315}
316
317/*
318 * Read SUNI registers for the SUNI module.
319 *
320 * We assume, that we already hold the mutex.
321 */
322static int
323fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
324{
325	int err;
326	int i;
327	struct fatm_softc *sc;
328
329	if (reg >= FATM_NREGS)
330		return (EINVAL);
331	if (reg + *np > FATM_NREGS)
332		*np = FATM_NREGS - reg;
333	sc = ifatm->ifnet.if_softc;
334	FATM_CHECKLOCK(sc);
335
336	err = fatm_utopia_readregs_internal(sc);
337	if (err != 0)
338		return (err);
339
340	for (i = 0; i < *np; i++)
341		valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
342
343	/* declare buffer to be free */
344	sc->flags &= ~FATM_REGS_INUSE;
345	cv_signal(&sc->cv_regs);
346
347	return (0);
348}
349
350/*
351 * Check whether the hard is beating. We remember the last heart beat and
352 * compare it to the current one. If it appears stuck for 10 times, we have
353 * a problem.
354 *
355 * Assume we hold the lock.
356 */
357static void
358fatm_check_heartbeat(struct fatm_softc *sc)
359{
360	uint32_t h;
361
362	FATM_CHECKLOCK(sc);
363
364	h = READ4(sc, FATMO_HEARTBEAT);
365	DBG(sc, BEAT, ("heartbeat %08x", h));
366
367	if (sc->stop_cnt == 10)
368		return;
369
370	if (h == sc->heartbeat) {
371		if (++sc->stop_cnt == 10) {
372			log(LOG_ERR, "i960 stopped???\n");
373			WRITE4(sc, FATMO_HIMR, 1);
374		}
375		return;
376	}
377
378	sc->stop_cnt = 0;
379	sc->heartbeat = h;
380}
381
382/*
383 * Ensure that the heart is still beating.
384 */
385static void
386fatm_watchdog(struct ifnet *ifp)
387{
388	struct fatm_softc *sc = ifp->if_softc;
389
390	FATM_LOCK(sc);
391	if (ifp->if_flags & IFF_RUNNING) {
392		fatm_check_heartbeat(sc);
393		ifp->if_timer = 5;
394	}
395	FATM_UNLOCK(sc);
396}
397
398/*
399 * Hard reset the i960 on the board. This is done by initializing registers,
400 * clearing interrupts and waiting for the selftest to finish. Not sure,
401 * whether all these barriers are actually needed.
402 *
403 * Assumes that we hold the lock.
404 */
405static int
406fatm_reset(struct fatm_softc *sc)
407{
408	int w;
409	uint32_t val;
410
411	FATM_CHECKLOCK(sc);
412
413	WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
414	BARRIER_W(sc);
415
416	WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
417	BARRIER_W(sc);
418
419	WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
420	BARRIER_W(sc);
421
422	WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
423	BARRIER_W(sc);
424
425	WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
426	BARRIER_W(sc);
427
428	DELAY(1000);
429
430	WRITE1(sc, FATMO_HCR, 0);
431	BARRIER_RW(sc);
432
433	DELAY(1000);
434
435	for (w = 100; w; w--) {
436		BARRIER_R(sc);
437		val = READ4(sc, FATMO_BOOT_STATUS);
438		switch (val) {
439		  case SELF_TEST_OK:
440			return (0);
441		  case SELF_TEST_FAIL:
442			return (EIO);
443		}
444		DELAY(1000);
445	}
446	return (EIO);
447}
448
449/*
450 * Stop the card. Must be called WITH the lock held
451 * Reset, free transmit and receive buffers. Wakeup everybody that may sleep.
452 */
453static void
454fatm_stop(struct fatm_softc *sc)
455{
456	int i;
457	struct cmdqueue *q;
458	struct rbuf *rb;
459	struct txqueue *tx;
460	uint32_t stat;
461
462	FATM_CHECKLOCK(sc);
463
464	/* Stop the board */
465	utopia_stop(&sc->utopia);
466	(void)fatm_reset(sc);
467
468	/* stop watchdog */
469	sc->ifatm.ifnet.if_timer = 0;
470
471	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) {
472		sc->ifatm.ifnet.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
473
474		/*
475		 * Collect transmit mbufs, partial receive mbufs and
476		 * supplied mbufs
477		 */
478		for (i = 0; i < FATM_TX_QLEN; i++) {
479			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
480			if (tx->m) {
481				bus_dmamap_unload(sc->tx_tag, tx->map);
482				m_freem(tx->m);
483				tx->m = NULL;
484			}
485		}
486
487		/* Collect supplied mbufs */
488		while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
489			LIST_REMOVE(rb, link);
490			bus_dmamap_unload(sc->rbuf_tag, rb->map);
491			m_free(rb->m);
492			rb->m = NULL;
493			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
494		}
495
496		/* Unwait any waiters */
497		wakeup(&sc->sadi_mem);
498
499		/* wakeup all threads waiting for STAT or REG buffers */
500		cv_broadcast(&sc->cv_stat);
501		cv_broadcast(&sc->cv_regs);
502
503		sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
504
505		/* wakeup all threads waiting on commands */
506		for (i = 0; i < FATM_CMD_QLEN; i++) {
507			q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
508
509			H_SYNCSTAT_POSTREAD(sc, q->q.statp);
510			if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
511				H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
512				H_SYNCSTAT_PREWRITE(sc, q->q.statp);
513				wakeup(q);
514			}
515		}
516		utopia_reset_media(&sc->utopia);
517	}
518	sc->small_cnt = sc->large_cnt = 0;
519
520	/* Reset vcc info */
521	if (sc->vccs != NULL)
522		for (i = 0; i <= FORE_MAX_VCC; i++)
523			sc->vccs[i].flags = 0;
524
525	sc->open_vccs = 0;
526}
527
528/*
529 * Load the firmware into the board and save the entry point.
530 */
531static uint32_t
532firmware_load(struct fatm_softc *sc)
533{
534	struct firmware *fw = (struct firmware *)firmware;
535
536	DBG(sc, INIT, ("loading - entry=%x", fw->entry));
537	bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
538	    sizeof(firmware) / sizeof(firmware[0]));
539	BARRIER_RW(sc);
540
541	return (fw->entry);
542}
543
544/*
545 * Read a character from the virtual UART. The availability of a character
546 * is signaled by a non-null value of the 32 bit register. The eating of
547 * the character by us is signalled to the card by setting that register
548 * to zero.
549 */
550static int
551rx_getc(struct fatm_softc *sc)
552{
553	int w = 50;
554	int c;
555
556	while (w--) {
557		c = READ4(sc, FATMO_UART_TO_HOST);
558		BARRIER_RW(sc);
559		if (c != 0) {
560			WRITE4(sc, FATMO_UART_TO_HOST, 0);
561			DBGC(sc, UART, ("%c", c & 0xff));
562			return (c & 0xff);
563		}
564		DELAY(1000);
565	}
566	return (-1);
567}
568
569/*
570 * Eat up characters from the board and stuff them in the bit-bucket.
571 */
572static void
573rx_flush(struct fatm_softc *sc)
574{
575	int w = 10000;
576
577	while (w-- && rx_getc(sc) >= 0)
578		;
579}
580
581/*
582 * Write a character to the card. The UART is available if the register
583 * is zero.
584 */
585static int
586tx_putc(struct fatm_softc *sc, u_char c)
587{
588	int w = 10;
589	int c1;
590
591	while (w--) {
592		c1 = READ4(sc, FATMO_UART_TO_960);
593		BARRIER_RW(sc);
594		if (c1 == 0) {
595			WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
596			DBGC(sc, UART, ("%c", c & 0xff));
597			return (0);
598		}
599		DELAY(1000);
600	}
601	return (-1);
602}
603
604/*
605 * Start the firmware. This is doing by issuing a 'go' command with
606 * the hex entry address of the firmware. Then we wait for the self-test to
607 * succeed.
608 */
609static int
610fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
611{
612	static char hex[] = "0123456789abcdef";
613	u_int w, val;
614
615	DBG(sc, INIT, ("starting"));
616	rx_flush(sc);
617	tx_putc(sc, '\r');
618	DELAY(1000);
619
620	rx_flush(sc);
621
622	tx_putc(sc, 'g');
623	(void)rx_getc(sc);
624	tx_putc(sc, 'o');
625	(void)rx_getc(sc);
626	tx_putc(sc, ' ');
627	(void)rx_getc(sc);
628
629	tx_putc(sc, hex[(start >> 12) & 0xf]);
630	(void)rx_getc(sc);
631	tx_putc(sc, hex[(start >>  8) & 0xf]);
632	(void)rx_getc(sc);
633	tx_putc(sc, hex[(start >>  4) & 0xf]);
634	(void)rx_getc(sc);
635	tx_putc(sc, hex[(start >>  0) & 0xf]);
636	(void)rx_getc(sc);
637
638	tx_putc(sc, '\r');
639	rx_flush(sc);
640
641	for (w = 100; w; w--) {
642		BARRIER_R(sc);
643		val = READ4(sc, FATMO_BOOT_STATUS);
644		switch (val) {
645		  case CP_RUNNING:
646			return (0);
647		  case SELF_TEST_FAIL:
648			return (EIO);
649		}
650		DELAY(1000);
651	}
652	return (EIO);
653}
654
655/*
656 * Initialize one card and host queue.
657 */
658static void
659init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
660    size_t qel_size, size_t desc_size, cardoff_t off,
661    u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
662{
663	struct fqelem *el = queue->chunk;
664
665	while (qlen--) {
666		el->card = off;
667		off += 8;	/* size of card entry */
668
669		el->statp = (uint32_t *)(*statpp);
670		(*statpp) += sizeof(uint32_t);
671		H_SETSTAT(el->statp, FATM_STAT_FREE);
672		H_SYNCSTAT_PREWRITE(sc, el->statp);
673
674		WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
675		(*cardstat) += sizeof(uint32_t);
676
677		el->ioblk = descp;
678		descp += desc_size;
679		el->card_ioblk = carddesc;
680		carddesc += desc_size;
681
682		el = (struct fqelem *)((u_char *)el + qel_size);
683	}
684	queue->tail = queue->head = 0;
685}
686
687/*
688 * Issue the initialize operation to the card, wait for completion and
689 * initialize the on-board and host queue structures with offsets and
690 * addresses.
691 */
692static int
693fatm_init_cmd(struct fatm_softc *sc)
694{
695	int w, c;
696	u_char *statp;
697	uint32_t card_stat;
698	u_int cnt;
699	struct fqelem *el;
700	cardoff_t off;
701
702	DBG(sc, INIT, ("command"));
703	WRITE4(sc, FATMO_ISTAT, 0);
704	WRITE4(sc, FATMO_IMASK, 1);
705	WRITE4(sc, FATMO_HLOGGER, 0);
706
707	WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
708	WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
709	WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
710	WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
711	WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
712	WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
713	WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
714
715	/*
716	 * initialize buffer descriptors
717	 */
718	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
719	    SMALL_SUPPLY_QLEN);
720	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
721	    SMALL_BUFFER_LEN);
722	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
723	    SMALL_POOL_SIZE);
724	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
725	    SMALL_SUPPLY_BLKSIZE);
726
727	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
728	    LARGE_SUPPLY_QLEN);
729	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
730	    LARGE_BUFFER_LEN);
731	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
732	    LARGE_POOL_SIZE);
733	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
734	    LARGE_SUPPLY_BLKSIZE);
735
736	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
737	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
738	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
739	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
740
741	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
742	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
743	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
744	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
745
746	/*
747	 * Start the command
748	 */
749	BARRIER_W(sc);
750	WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
751	BARRIER_W(sc);
752	WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
753	BARRIER_W(sc);
754
755	/*
756	 * Busy wait for completion
757	 */
758	w = 100;
759	while (w--) {
760		c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
761		BARRIER_R(sc);
762		if (c & FATM_STAT_COMPLETE)
763			break;
764		DELAY(1000);
765	}
766
767	if (c & FATM_STAT_ERROR)
768		return (EIO);
769
770	/*
771	 * Initialize the queues
772	 */
773	statp = sc->stat_mem.mem;
774	card_stat = sc->stat_mem.paddr;
775
776	/*
777	 * Command queue. This is special in that it's on the card.
778	 */
779	el = sc->cmdqueue.chunk;
780	off = READ4(sc, FATMO_COMMAND_QUEUE);
781	DBG(sc, INIT, ("cmd queue=%x", off));
782	for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
783		el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
784
785		el->card = off;
786		off += 32;		/* size of card structure */
787
788		el->statp = (uint32_t *)statp;
789		statp += sizeof(uint32_t);
790		H_SETSTAT(el->statp, FATM_STAT_FREE);
791		H_SYNCSTAT_PREWRITE(sc, el->statp);
792
793		WRITE4(sc, el->card + FATMOC_STATP, card_stat);
794		card_stat += sizeof(uint32_t);
795	}
796	sc->cmdqueue.tail = sc->cmdqueue.head = 0;
797
798	/*
799	 * Now the other queues. These are in memory
800	 */
801	init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
802	    sizeof(struct txqueue), TPD_SIZE,
803	    READ4(sc, FATMO_TRANSMIT_QUEUE),
804	    &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
805
806	init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
807	    sizeof(struct rxqueue), RPD_SIZE,
808	    READ4(sc, FATMO_RECEIVE_QUEUE),
809	    &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
810
811	init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
812	    sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
813	    READ4(sc, FATMO_SMALL_B1_QUEUE),
814	    &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
815
816	init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
817	    sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
818	    READ4(sc, FATMO_LARGE_B1_QUEUE),
819	    &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
820
821	sc->txcnt = 0;
822
823	return (0);
824}
825
826/*
827 * Read PROM. Called only from attach code. Here we spin because the interrupt
828 * handler is not yet set up.
829 */
830static int
831fatm_getprom(struct fatm_softc *sc)
832{
833	int i;
834	struct prom *prom;
835	struct cmdqueue *q;
836
837	DBG(sc, INIT, ("reading prom"));
838	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
839	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
840
841	q->error = 0;
842	q->cb = NULL;;
843	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
844	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
845
846	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
847	    BUS_DMASYNC_PREREAD);
848
849	WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
850	BARRIER_W(sc);
851	WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
852	BARRIER_W(sc);
853
854	for (i = 0; i < 1000; i++) {
855		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
856		if (H_GETSTAT(q->q.statp) &
857		    (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
858			break;
859		DELAY(1000);
860	}
861	if (i == 1000) {
862		if_printf(&sc->ifatm.ifnet, "getprom timeout\n");
863		return (EIO);
864	}
865	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
866	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
867		if_printf(&sc->ifatm.ifnet, "getprom error\n");
868		return (EIO);
869	}
870	H_SETSTAT(q->q.statp, FATM_STAT_FREE);
871	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
872	NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
873
874	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
875	    BUS_DMASYNC_POSTREAD);
876
877
878#ifdef notdef
879	{
880		u_int i;
881
882		printf("PROM: ");
883		u_char *ptr = (u_char *)sc->prom_mem.mem;
884		for (i = 0; i < sizeof(struct prom); i++)
885			printf("%02x ", *ptr++);
886		printf("\n");
887	}
888#endif
889
890	prom = (struct prom *)sc->prom_mem.mem;
891
892	bcopy(prom->mac + 2, sc->ifatm.mib.esi, 6);
893	sc->ifatm.mib.serial = le32toh(prom->serial);
894	sc->ifatm.mib.hw_version = le32toh(prom->version);
895	sc->ifatm.mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
896
897	if_printf(&sc->ifatm.ifnet, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
898	    "serial=%u hw=0x%x sw=0x%x\n", sc->ifatm.mib.esi[0],
899	    sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3],
900	    sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->ifatm.mib.serial,
901	    sc->ifatm.mib.hw_version, sc->ifatm.mib.sw_version);
902
903	return (0);
904}
905
906/*
907 * This is the callback function for bus_dmamap_load. We assume, that we
908 * have a 32-bit bus and so have always one segment.
909 */
910static void
911dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
912{
913	bus_addr_t *ptr = (bus_addr_t *)arg;
914
915	if (error != 0) {
916		printf("%s: error=%d\n", __func__, error);
917		return;
918	}
919	KASSERT(nsegs == 1, ("too many DMA segments"));
920	KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
921	    (u_long)segs[0].ds_addr));
922
923	*ptr = segs[0].ds_addr;
924}
925
926/*
927 * Allocate a chunk of DMA-able memory and map it.
928 */
929static int
930alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
931{
932	int error;
933
934	mem->mem = NULL;
935
936	if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
937	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
938	    NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
939	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
940		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
941		    nm);
942		return (ENOMEM);
943	}
944
945	error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
946	if (error) {
947		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA memory: "
948		    "%d\n", nm, error);
949		bus_dma_tag_destroy(mem->dmat);
950		mem->mem = NULL;
951		return (error);
952	}
953
954	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
955	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
956	if (error) {
957		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
958		    "%d\n", nm, error);
959		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
960		bus_dma_tag_destroy(mem->dmat);
961		mem->mem = NULL;
962		return (error);
963	}
964
965	DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
966	    (u_long)mem->paddr, mem->size, mem->align));
967
968	return (0);
969}
970
971#ifdef TEST_DMA_SYNC
972static int
973alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
974{
975	int error;
976
977	mem->mem = NULL;
978
979	if (bus_dma_tag_create(NULL, mem->align, 0,
980	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
981	    NULL, NULL, mem->size, 1, mem->size,
982	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
983		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
984		    nm);
985		return (ENOMEM);
986	}
987
988	mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
989	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
990
991	error = bus_dmamap_create(mem->dmat, 0, &mem->map);
992	if (error) {
993		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA map: "
994		    "%d\n", nm, error);
995		contigfree(mem->mem, mem->size, M_DEVBUF);
996		bus_dma_tag_destroy(mem->dmat);
997		mem->mem = NULL;
998		return (error);
999	}
1000
1001	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1002	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1003	if (error) {
1004		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
1005		    "%d\n", nm, error);
1006		bus_dmamap_destroy(mem->dmat, mem->map);
1007		contigfree(mem->mem, mem->size, M_DEVBUF);
1008		bus_dma_tag_destroy(mem->dmat);
1009		mem->mem = NULL;
1010		return (error);
1011	}
1012
1013	DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1014	    (u_long)mem->paddr, mem->size, mem->align));
1015
1016	printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1017	    (u_long)mem->paddr, mem->size, mem->align);
1018
1019	return (0);
1020}
1021#endif /* TEST_DMA_SYNC */
1022
1023/*
1024 * Destroy all resources of an dma-able memory chunk
1025 */
1026static void
1027destroy_dma_memory(struct fatm_mem *mem)
1028{
1029	if (mem->mem != NULL) {
1030		bus_dmamap_unload(mem->dmat, mem->map);
1031		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1032		bus_dma_tag_destroy(mem->dmat);
1033		mem->mem = NULL;
1034	}
1035}
1036#ifdef TEST_DMA_SYNC
1037static void
1038destroy_dma_memoryX(struct fatm_mem *mem)
1039{
1040	if (mem->mem != NULL) {
1041		bus_dmamap_unload(mem->dmat, mem->map);
1042		bus_dmamap_destroy(mem->dmat, mem->map);
1043		contigfree(mem->mem, mem->size, M_DEVBUF);
1044		bus_dma_tag_destroy(mem->dmat);
1045		mem->mem = NULL;
1046	}
1047}
1048#endif /* TEST_DMA_SYNC */
1049
1050/*
1051 * Try to supply buffers to the card if there are free entries in the queues
1052 */
1053static void
1054fatm_supply_small_buffers(struct fatm_softc *sc)
1055{
1056	int nblocks, nbufs;
1057	struct supqueue *q;
1058	struct rbd *bd;
1059	int i, j, error, cnt;
1060	struct mbuf *m;
1061	struct rbuf *rb;
1062	bus_addr_t phys;
1063
1064	nbufs = max(4 * sc->open_vccs, 32);
1065	nbufs = min(nbufs, SMALL_POOL_SIZE);
1066	nbufs -= sc->small_cnt;
1067
1068	nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1069	for (cnt = 0; cnt < nblocks; cnt++) {
1070		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1071
1072		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1073		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1074			break;
1075
1076		bd = (struct rbd *)q->q.ioblk;
1077
1078		for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1079			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1080				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1081				break;
1082			}
1083			MGETHDR(m, M_DONTWAIT, MT_DATA);
1084			if (m == NULL) {
1085				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1086				break;
1087			}
1088			MH_ALIGN(m, SMALL_BUFFER_LEN);
1089			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1090			    m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1091			    &phys, BUS_DMA_NOWAIT);
1092			if (error) {
1093				if_printf(&sc->ifatm.ifnet,
1094				    "dmamap_load mbuf failed %d", error);
1095				m_freem(m);
1096				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1097				break;
1098			}
1099			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1100			    BUS_DMASYNC_PREREAD);
1101
1102			LIST_REMOVE(rb, link);
1103			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1104
1105			rb->m = m;
1106			bd[i].handle = rb - sc->rbufs;
1107			H_SETDESC(bd[i].buffer, phys);
1108		}
1109
1110		if (i < SMALL_SUPPLY_BLKSIZE) {
1111			for (j = 0; j < i; j++) {
1112				rb = sc->rbufs + bd[j].handle;
1113				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1114				m_free(rb->m);
1115				rb->m = NULL;
1116
1117				LIST_REMOVE(rb, link);
1118				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1119			}
1120			break;
1121		}
1122		H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1123		    sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1124
1125		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1126		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1127
1128		WRITE4(sc, q->q.card, q->q.card_ioblk);
1129		BARRIER_W(sc);
1130
1131		sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1132
1133		NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1134	}
1135}
1136
1137/*
1138 * Try to supply buffers to the card if there are free entries in the queues
1139 * We assume that all buffers are within the address space accessible by the
1140 * card (32-bit), so we don't need bounce buffers.
1141 */
1142static void
1143fatm_supply_large_buffers(struct fatm_softc *sc)
1144{
1145	int nbufs, nblocks, cnt;
1146	struct supqueue *q;
1147	struct rbd *bd;
1148	int i, j, error;
1149	struct mbuf *m;
1150	struct rbuf *rb;
1151	bus_addr_t phys;
1152
1153	nbufs = max(4 * sc->open_vccs, 32);
1154	nbufs = min(nbufs, LARGE_POOL_SIZE);
1155	nbufs -= sc->large_cnt;
1156
1157	nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1158
1159	for (cnt = 0; cnt < nblocks; cnt++) {
1160		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1161
1162		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1163		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1164			break;
1165
1166		bd = (struct rbd *)q->q.ioblk;
1167
1168		for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1169			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1170				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1171				break;
1172			}
1173			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1174			    M_PKTHDR)) == NULL) {
1175				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1176				break;
1177			}
1178			/* No MEXT_ALIGN */
1179			m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1180			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1181			    m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1182			    &phys, BUS_DMA_NOWAIT);
1183			if (error) {
1184				if_printf(&sc->ifatm.ifnet,
1185				    "dmamap_load mbuf failed %d", error);
1186				m_freem(m);
1187				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1188				break;
1189			}
1190
1191			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1192			    BUS_DMASYNC_PREREAD);
1193
1194			LIST_REMOVE(rb, link);
1195			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1196
1197			rb->m = m;
1198			bd[i].handle = rb - sc->rbufs;
1199			H_SETDESC(bd[i].buffer, phys);
1200		}
1201
1202		if (i < LARGE_SUPPLY_BLKSIZE) {
1203			for (j = 0; j < i; j++) {
1204				rb = sc->rbufs + bd[j].handle;
1205				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1206				m_free(rb->m);
1207				rb->m = NULL;
1208
1209				LIST_REMOVE(rb, link);
1210				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1211			}
1212			break;
1213		}
1214		H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1215		    sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1216
1217		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1218		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1219		WRITE4(sc, q->q.card, q->q.card_ioblk);
1220		BARRIER_W(sc);
1221
1222		sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1223
1224		NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1225	}
1226}
1227
1228
1229/*
1230 * Actually start the card. The lock must be held here.
1231 * Reset, load the firmware, start it, initializes queues, read the PROM
1232 * and supply receive buffers to the card.
1233 */
1234static void
1235fatm_init_locked(struct fatm_softc *sc)
1236{
1237	struct rxqueue *q;
1238	int i, c;
1239	uint32_t start;
1240
1241	DBG(sc, INIT, ("initialize"));
1242	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1243		fatm_stop(sc);
1244
1245	/*
1246	 * Hard reset the board
1247	 */
1248	if (fatm_reset(sc))
1249		return;
1250
1251	start = firmware_load(sc);
1252	if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1253	    fatm_getprom(sc)) {
1254		fatm_reset(sc);
1255		return;
1256	}
1257
1258	/*
1259	 * Handle media
1260	 */
1261	c = READ4(sc, FATMO_MEDIA_TYPE);
1262	switch (c) {
1263
1264	  case FORE_MT_TAXI_100:
1265		sc->ifatm.mib.media = IFM_ATM_TAXI_100;
1266		sc->ifatm.mib.pcr = 227273;
1267		break;
1268
1269	  case FORE_MT_TAXI_140:
1270		sc->ifatm.mib.media = IFM_ATM_TAXI_140;
1271		sc->ifatm.mib.pcr = 318181;
1272		break;
1273
1274	  case FORE_MT_UTP_SONET:
1275		sc->ifatm.mib.media = IFM_ATM_UTP_155;
1276		sc->ifatm.mib.pcr = 353207;
1277		break;
1278
1279	  case FORE_MT_MM_OC3_ST:
1280	  case FORE_MT_MM_OC3_SC:
1281		sc->ifatm.mib.media = IFM_ATM_MM_155;
1282		sc->ifatm.mib.pcr = 353207;
1283		break;
1284
1285	  case FORE_MT_SM_OC3_ST:
1286	  case FORE_MT_SM_OC3_SC:
1287		sc->ifatm.mib.media = IFM_ATM_SM_155;
1288		sc->ifatm.mib.pcr = 353207;
1289		break;
1290
1291	  default:
1292		log(LOG_ERR, "fatm: unknown media type %d\n", c);
1293		sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1294		sc->ifatm.mib.pcr = 353207;
1295		break;
1296	}
1297	sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
1298	utopia_init_media(&sc->utopia);
1299
1300	/*
1301	 * Initialize the RBDs
1302	 */
1303	for (i = 0; i < FATM_RX_QLEN; i++) {
1304		q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1305		WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1306	}
1307	BARRIER_W(sc);
1308
1309	/*
1310	 * Supply buffers to the card
1311	 */
1312	fatm_supply_small_buffers(sc);
1313	fatm_supply_large_buffers(sc);
1314
1315	/*
1316	 * Now set flags, that we are ready
1317	 */
1318	sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
1319
1320	/*
1321	 * Start the watchdog timer
1322	 */
1323	sc->ifatm.ifnet.if_timer = 5;
1324
1325	/* start SUNI */
1326	utopia_start(&sc->utopia);
1327
1328	DBG(sc, INIT, ("done"));
1329}
1330
1331/*
1332 * This is the exported as initialisation function.
1333 */
1334static void
1335fatm_init(void *p)
1336{
1337	struct fatm_softc *sc = p;
1338
1339	FATM_LOCK(sc);
1340	fatm_init_locked(sc);
1341	FATM_UNLOCK(sc);
1342}
1343
1344/************************************************************/
1345/*
1346 * The INTERRUPT handling
1347 */
1348/*
1349 * Check the command queue. If a command was completed, call the completion
1350 * function for that command.
1351 */
1352static void
1353fatm_intr_drain_cmd(struct fatm_softc *sc)
1354{
1355	struct cmdqueue *q;
1356	int stat;
1357
1358	/*
1359	 * Drain command queue
1360	 */
1361	for (;;) {
1362		q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1363
1364		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1365		stat = H_GETSTAT(q->q.statp);
1366
1367		if (stat != FATM_STAT_COMPLETE &&
1368		   stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1369		   stat != FATM_STAT_ERROR)
1370			break;
1371
1372		(*q->cb)(sc, q);
1373
1374		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1375		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1376
1377		NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1378	}
1379}
1380
1381/*
1382 * Drain the small buffer supply queue.
1383 */
1384static void
1385fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1386{
1387	struct supqueue *q;
1388	int stat;
1389
1390	for (;;) {
1391		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1392
1393		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1394		stat = H_GETSTAT(q->q.statp);
1395
1396		if ((stat & FATM_STAT_COMPLETE) == 0)
1397			break;
1398		if (stat & FATM_STAT_ERROR)
1399			log(LOG_ERR, "%s: status %x\n", __func__, stat);
1400
1401		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1402		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1403
1404		NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1405	}
1406}
1407
1408/*
1409 * Drain the large buffer supply queue.
1410 */
1411static void
1412fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1413{
1414	struct supqueue *q;
1415	int stat;
1416
1417	for (;;) {
1418		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1419
1420		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1421		stat = H_GETSTAT(q->q.statp);
1422
1423		if ((stat & FATM_STAT_COMPLETE) == 0)
1424			break;
1425		if (stat & FATM_STAT_ERROR)
1426			log(LOG_ERR, "%s status %x\n", __func__, stat);
1427
1428		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1429		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1430
1431		NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1432	}
1433}
1434
1435/*
1436 * Check the receive queue. Send any received PDU up the protocol stack
1437 * (except when there was an error or the VCI appears to be closed. In this
1438 * case discard the PDU).
1439 */
1440static void
1441fatm_intr_drain_rx(struct fatm_softc *sc)
1442{
1443	struct rxqueue *q;
1444	int stat, mlen, drop;
1445	u_int i;
1446	uint32_t h;
1447	struct mbuf *last, *m0;
1448	struct rpd *rpd;
1449	struct rbuf *rb;
1450	u_int vci, vpi, pt;
1451	struct atm_pseudohdr aph;
1452	struct ifnet *ifp;
1453
1454	for (;;) {
1455		q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1456
1457		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1458		stat = H_GETSTAT(q->q.statp);
1459
1460		if ((stat & FATM_STAT_COMPLETE) == 0)
1461			break;
1462
1463		rpd = (struct rpd *)q->q.ioblk;
1464		H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1465
1466		rpd->nseg = le32toh(rpd->nseg);
1467		drop = 0;
1468		mlen = 0;
1469		m0 = last = 0;
1470		for (i = 0; i < rpd->nseg; i++) {
1471			rb = sc->rbufs + rpd->segment[i].handle;
1472			if (m0 == NULL) {
1473				m0 = last = rb->m;
1474			} else {
1475				last->m_next = rb->m;
1476				last = rb->m;
1477			}
1478			last->m_next = NULL;
1479			if (last->m_flags & M_EXT)
1480				sc->large_cnt--;
1481			else
1482				sc->small_cnt--;
1483			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1484			    BUS_DMASYNC_POSTREAD);
1485			bus_dmamap_unload(sc->rbuf_tag, rb->map);
1486			rb->m = NULL;
1487
1488			LIST_REMOVE(rb, link);
1489			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1490
1491			last->m_len = le32toh(rpd->segment[i].length);
1492			mlen += last->m_len;
1493		}
1494
1495		m0->m_pkthdr.len = mlen;
1496		m0->m_pkthdr.rcvif = &sc->ifatm.ifnet;
1497
1498		h = le32toh(rpd->atm_header);
1499		vpi = (h >> 20) & 0xff;
1500		vci = (h >> 4 ) & 0xffff;
1501		pt  = (h >> 1 ) & 0x7;
1502
1503		/*
1504		 * Locate the VCC this packet belongs to
1505		 */
1506		if (!VC_OK(sc, vpi, vci))
1507			drop = 1;
1508		else if ((sc->vccs[vci].flags & FATM_VCC_OPEN) == 0) {
1509			sc->istats.rx_closed++;
1510			drop = 1;
1511		}
1512
1513		DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1514		    pt, mlen, drop ? "dropped" : ""));
1515
1516		if (drop) {
1517			m_freem(m0);
1518		} else {
1519			ATM_PH_FLAGS(&aph) = sc->vccs[vci].flags & 0xff;
1520			ATM_PH_VPI(&aph) = vpi;
1521			ATM_PH_SETVCI(&aph, vci);
1522
1523			ifp = &sc->ifatm.ifnet;
1524			ifp->if_ipackets++;
1525
1526			atm_input(ifp, &aph, m0, sc->vccs[vci].rxhand);
1527		}
1528
1529		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1530		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1531
1532		WRITE4(sc, q->q.card, q->q.card_ioblk);
1533		BARRIER_W(sc);
1534
1535		NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1536	}
1537}
1538
1539/*
1540 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1541 */
1542static void
1543fatm_intr_drain_tx(struct fatm_softc *sc)
1544{
1545	struct txqueue *q;
1546	int stat;
1547
1548	/*
1549	 * Drain tx queue
1550	 */
1551	for (;;) {
1552		q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1553
1554		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1555		stat = H_GETSTAT(q->q.statp);
1556
1557		if (stat != FATM_STAT_COMPLETE &&
1558		    stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1559		    stat != FATM_STAT_ERROR)
1560			break;
1561
1562		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1563		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1564
1565		bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1566		bus_dmamap_unload(sc->tx_tag, q->map);
1567
1568		m_freem(q->m);
1569		q->m = NULL;
1570		sc->txcnt--;
1571
1572		NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1573	}
1574}
1575
1576/*
1577 * Interrupt handler
1578 */
1579static void
1580fatm_intr(void *p)
1581{
1582	struct fatm_softc *sc = (struct fatm_softc *)p;
1583
1584	FATM_LOCK(sc);
1585	if (!READ4(sc, FATMO_PSR)) {
1586		FATM_UNLOCK(sc);
1587		return;
1588	}
1589	WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1590
1591	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
1592		FATM_UNLOCK(sc);
1593		return;
1594	}
1595	fatm_intr_drain_cmd(sc);
1596	fatm_intr_drain_rx(sc);
1597	fatm_intr_drain_tx(sc);
1598	fatm_intr_drain_small_buffers(sc);
1599	fatm_intr_drain_large_buffers(sc);
1600	fatm_supply_small_buffers(sc);
1601	fatm_supply_large_buffers(sc);
1602
1603	FATM_UNLOCK(sc);
1604
1605	if (sc->retry_tx && _IF_QLEN(&sc->ifatm.ifnet.if_snd))
1606		(*sc->ifatm.ifnet.if_start)(&sc->ifatm.ifnet);
1607}
1608
1609/*
1610 * Get device statistics. This must be called with the softc locked.
1611 * We use a preallocated buffer, so we need to protect this buffer.
1612 * We do this by using a condition variable and a flag. If the flag is set
1613 * the buffer is in use by one thread (one thread is executing a GETSTAT
1614 * card command). In this case all other threads that are trying to get
1615 * statistics block on that condition variable. When the thread finishes
1616 * using the buffer it resets the flag and signals the condition variable. This
1617 * will wakeup the next thread that is waiting for the buffer. If the interface
1618 * is stopped the stopping function will broadcast the cv. All threads will
1619 * find that the interface has been stopped and return.
1620 *
1621 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1622 * must be done by the caller when he has finished using the buffer.
1623 */
1624static void
1625fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1626{
1627
1628	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1629	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1630		sc->istats.get_stat_errors++;
1631		q->error = EIO;
1632	}
1633	wakeup(&sc->sadi_mem);
1634}
1635static int
1636fatm_getstat(struct fatm_softc *sc)
1637{
1638	int error;
1639	struct cmdqueue *q;
1640
1641	/*
1642	 * Wait until either the interface is stopped or we can get the
1643	 * statistics buffer
1644	 */
1645	for (;;) {
1646		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
1647			return (EIO);
1648		if (!(sc->flags & FATM_STAT_INUSE))
1649			break;
1650		cv_wait(&sc->cv_stat, &sc->mtx);
1651	}
1652	sc->flags |= FATM_STAT_INUSE;
1653
1654	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1655
1656	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1657	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1658		sc->istats.cmd_queue_full++;
1659		return (EIO);
1660	}
1661	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1662
1663	q->error = 0;
1664	q->cb = fatm_getstat_complete;
1665	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1666	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1667
1668	bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1669	    BUS_DMASYNC_PREREAD);
1670
1671	WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1672	    sc->sadi_mem.paddr);
1673	BARRIER_W(sc);
1674	WRITE4(sc, q->q.card + FATMOC_OP,
1675	    FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1676	BARRIER_W(sc);
1677
1678	/*
1679	 * Wait for the command to complete
1680	 */
1681	error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1682	    "fatm_stat", hz);
1683
1684	switch (error) {
1685
1686	  case EWOULDBLOCK:
1687		error = EIO;
1688		break;
1689
1690	  case ERESTART:
1691		error = EINTR;
1692		break;
1693
1694	  case 0:
1695		bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1696		    BUS_DMASYNC_POSTREAD);
1697		error = q->error;
1698		break;
1699	}
1700
1701	/*
1702	 * Swap statistics
1703	 */
1704	if (q->error == 0) {
1705		u_int i;
1706		uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1707
1708		for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1709		    i++, p++)
1710			*p = be32toh(*p);
1711	}
1712
1713	return (error);
1714}
1715
1716/*
1717 * Create a copy of a single mbuf. It can have either internal or
1718 * external data, it may have a packet header. External data is really
1719 * copied, so the new buffer is writeable.
1720 */
1721static struct mbuf *
1722copy_mbuf(struct mbuf *m)
1723{
1724	struct mbuf *new;
1725
1726	MGET(new, M_DONTWAIT, MT_DATA);
1727	if (new == NULL)
1728		return (NULL);
1729
1730	if (m->m_flags & M_PKTHDR) {
1731		M_MOVE_PKTHDR(new, m);
1732		if (m->m_len > MHLEN) {
1733			MCLGET(new, M_TRYWAIT);
1734			if ((m->m_flags & M_EXT) == 0) {
1735				m_free(new);
1736				return (NULL);
1737			}
1738		}
1739	} else {
1740		if (m->m_len > MLEN) {
1741			MCLGET(new, M_TRYWAIT);
1742			if ((m->m_flags & M_EXT) == 0) {
1743				m_free(new);
1744				return (NULL);
1745			}
1746		}
1747	}
1748
1749	bcopy(m->m_data, new->m_data, m->m_len);
1750	new->m_len = m->m_len;
1751	new->m_flags &= ~M_RDONLY;
1752
1753	return (new);
1754}
1755
1756/*
1757 * All segments must have a four byte aligned buffer address and a four
1758 * byte aligned length. Step through an mbuf chain and check these conditions.
1759 * If the buffer address is not aligned and this is a normal mbuf, move
1760 * the data down. Else make a copy of the mbuf with aligned data.
1761 * If the buffer length is not aligned steel data from the next mbuf.
1762 * We don't need to check whether this has more than one external reference,
1763 * because steeling data doesn't change the external cluster.
1764 * If the last mbuf is not aligned, fill with zeroes.
1765 *
1766 * Return packet length (well we should have this in the packet header),
1767 * but be careful not to count the zero fill at the end.
1768 *
1769 * If fixing fails free the chain and zero the pointer.
1770 *
1771 * We assume, that aligning the virtual address also aligns the mapped bus
1772 * address.
1773 */
1774static u_int
1775fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1776{
1777	struct mbuf *m = *mp, *prev = NULL, *next, *new;
1778	u_int mlen = 0, fill = 0;
1779	int first, off;
1780	u_char *d, *cp;
1781
1782	do {
1783		next = m->m_next;
1784
1785		if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1786		   (m->m_len % 4 != 0 && next)) {
1787			/*
1788			 * Needs fixing
1789			 */
1790			first = (m == *mp);
1791
1792			d = mtod(m, u_char *);
1793			if ((off = (uintptr_t)(void *)d % 4) != 0) {
1794				if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1795					sc->istats.fix_addr_copy++;
1796					bcopy(d, d - off, m->m_len);
1797					m->m_data = (caddr_t)(d - off);
1798				} else {
1799					if ((new = copy_mbuf(m)) == NULL) {
1800						sc->istats.fix_addr_noext++;
1801						goto fail;
1802					}
1803					sc->istats.fix_addr_ext++;
1804					if (prev)
1805						prev->m_next = new;
1806					new->m_next = next;
1807					m_free(m);
1808					m = new;
1809				}
1810			}
1811
1812			if ((off = m->m_len % 4) != 0) {
1813				if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1814					if ((new = copy_mbuf(m)) == NULL) {
1815						sc->istats.fix_len_noext++;
1816						goto fail;
1817					}
1818					sc->istats.fix_len_copy++;
1819					if (prev)
1820						prev->m_next = new;
1821					new->m_next = next;
1822					m_free(m);
1823					m = new;
1824				} else
1825					sc->istats.fix_len++;
1826				d = mtod(m, u_char *) + m->m_len;
1827				off = 4 - off;
1828				while (off) {
1829					if (next == NULL) {
1830						*d++ = 0;
1831						fill++;
1832					} else if (next->m_len == 0) {
1833						sc->istats.fix_empty++;
1834						next = m_free(next);
1835						continue;
1836					} else {
1837						cp = mtod(next, u_char *);
1838						*d++ = *cp++;
1839						next->m_len--;
1840						next->m_data = (caddr_t)cp;
1841					}
1842					off--;
1843					m->m_len++;
1844				}
1845			}
1846
1847			if (first)
1848				*mp = m;
1849		}
1850
1851		mlen += m->m_len;
1852		prev = m;
1853	} while ((m = next) != NULL);
1854
1855	return (mlen - fill);
1856
1857  fail:
1858	m_freem(*mp);
1859	*mp = NULL;
1860	return (0);
1861}
1862
1863/*
1864 * The helper function is used to load the computed physical addresses
1865 * into the transmit descriptor.
1866 */
1867static void
1868fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1869    bus_size_t mapsize, int error)
1870{
1871	struct tpd *tpd = varg;
1872
1873	if (error)
1874		return;
1875
1876	KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1877
1878	tpd->spec = 0;
1879	while (nsegs--) {
1880		H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1881		H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1882		tpd->spec++;
1883		segs++;
1884	}
1885}
1886
1887/*
1888 * Start output.
1889 *
1890 * Note, that we update the internal statistics without the lock here.
1891 */
1892static int
1893fatm_tx(struct fatm_softc *sc, struct mbuf *m, u_int vpi, u_int vci, u_int mlen)
1894{
1895	struct txqueue *q;
1896	u_int nblks;
1897	int error, aal, nsegs;
1898	struct tpd *tpd;
1899
1900	/*
1901	 * Get a queue element.
1902	 * If there isn't one - try to drain the transmit queue
1903	 * We used to sleep here if that doesn't help, but we
1904	 * should not sleep here, because we are called with locks.
1905	 */
1906	q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1907
1908	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1909	if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1910		fatm_intr_drain_tx(sc);
1911		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1912		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1913			if (sc->retry_tx) {
1914				sc->istats.tx_retry++;
1915				IF_PREPEND(&sc->ifatm.ifnet.if_snd, m);
1916				return (1);
1917			}
1918			sc->istats.tx_queue_full++;
1919			m_freem(m);
1920			return (0);
1921		}
1922		sc->istats.tx_queue_almost_full++;
1923	}
1924
1925	tpd = q->q.ioblk;
1926
1927	m->m_data += sizeof(struct atm_pseudohdr);
1928	m->m_len -= sizeof(struct atm_pseudohdr);
1929
1930	/* map the mbuf */
1931	error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1932	    fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1933	if(error) {
1934		sc->ifatm.ifnet.if_oerrors++;
1935		if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", error);
1936		m_freem(m);
1937		return (0);
1938	}
1939	nsegs = tpd->spec;
1940
1941	bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1942
1943	/*
1944	 * OK. Now go and do it.
1945	 */
1946	aal = (sc->vccs[vci].aal == ATMIO_AAL_5) ? 5 : 0;
1947
1948	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1949	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1950	q->m = m;
1951
1952	/*
1953	 * If the transmit queue is almost full, schedule a
1954	 * transmit interrupt so that transmit descriptors can
1955	 * be recycled.
1956	 */
1957	H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1958	    (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1959	H_SETDESC(tpd->atm_header, TDX_MKHDR(vpi, vci, 0, 0));
1960
1961	if (sc->vccs[vci].traffic == ATMIO_TRAFFIC_UBR)
1962		H_SETDESC(tpd->stream, 0);
1963	else {
1964		u_int i;
1965
1966		for (i = 0; i < RATE_TABLE_SIZE; i++)
1967			if (rate_table[i].cell_rate < sc->vccs[vci].pcr)
1968				break;
1969		if (i > 0)
1970			i--;
1971		H_SETDESC(tpd->stream, rate_table[i].ratio);
1972	}
1973	H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
1974
1975	nblks = TDX_SEGS2BLKS(nsegs);
1976
1977	DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
1978	    mlen, le32toh(tpd->spec), nsegs, nblks));
1979
1980	WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
1981	BARRIER_W(sc);
1982
1983	sc->txcnt++;
1984	sc->ifatm.ifnet.if_opackets++;
1985
1986	NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
1987
1988	return (0);
1989}
1990
1991static void
1992fatm_start(struct ifnet *ifp)
1993{
1994	struct atm_pseudohdr aph;
1995	struct fatm_softc *sc;
1996	struct mbuf *m;
1997	u_int mlen, vpi, vci;
1998
1999	sc = (struct fatm_softc *)ifp->if_softc;
2000
2001	while (1) {
2002		IF_DEQUEUE(&ifp->if_snd, m);
2003		if (m == NULL)
2004			break;
2005
2006		/*
2007		 * Loop through the mbuf chain and compute the total length
2008		 * of the packet. Check that all data pointer are
2009		 * 4 byte aligned. If they are not, call fatm_mfix to
2010		 * fix that problem. This comes more or less from the
2011		 * en driver.
2012		 */
2013		mlen = fatm_fix_chain(sc, &m);
2014		if (m == NULL)
2015			continue;
2016
2017		if (m->m_len < sizeof(struct atm_pseudohdr) &&
2018		    (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2019			continue;
2020
2021		aph = *mtod(m, struct atm_pseudohdr *);
2022		mlen -= sizeof(struct atm_pseudohdr);
2023
2024		if (mlen == 0) {
2025			m_freem(m);
2026			continue;
2027		}
2028		if (mlen > FATM_MAXPDU) {
2029			sc->istats.tx_pdu2big++;
2030			m_freem(m);
2031			continue;
2032		}
2033
2034		vci = ATM_PH_VCI(&aph);
2035		vpi = ATM_PH_VPI(&aph);
2036
2037		/*
2038		 * From here on we need the softc
2039		 */
2040		FATM_LOCK(sc);
2041		if (!(ifp->if_flags & IFF_RUNNING)) {
2042			FATM_UNLOCK(sc);
2043			m_freem(m);
2044			break;
2045		}
2046		if (!VC_OK(sc, vpi, vci) ||
2047		    !(sc->vccs[vci].flags & FATM_VCC_OPEN)) {
2048			FATM_UNLOCK(sc);
2049			m_freem(m);
2050			continue;
2051		}
2052		if (fatm_tx(sc, m, vpi, vci, mlen)) {
2053			FATM_UNLOCK(sc);
2054			break;
2055		}
2056		FATM_UNLOCK(sc);
2057	}
2058}
2059
2060/*
2061 * Return a table of all currently open VCCs.
2062 */
2063static struct atmio_vcctable *
2064get_vccs(struct fatm_softc *sc, int flags)
2065{
2066	struct atmio_vcctable *vccs;
2067	struct atmio_vcc *v;
2068	u_int i, alloc;
2069
2070	alloc = 10;
2071	vccs = NULL;
2072	for (;;) {
2073		vccs = reallocf(vccs,
2074		    sizeof(*vccs) + alloc * sizeof(vccs->vccs[0]),
2075		    M_DEVBUF, flags);
2076		if (vccs == NULL)
2077			return (NULL);
2078
2079		vccs->count = 0;
2080		FATM_LOCK(sc);
2081		v = vccs->vccs;
2082		for (i = 0; i < (1U << sc->ifatm.mib.vci_bits); i++) {
2083			if (sc->vccs[i].flags & FATM_VCC_OPEN) {
2084				if (vccs->count++ == alloc) {
2085					alloc *= 2;
2086					break;
2087				}
2088				v->vpi = 0;
2089				v->vci = i;
2090				v->flags = sc->vccs[i].flags;
2091				v->aal = sc->vccs[i].aal;
2092				v->traffic = sc->vccs[i].traffic;
2093				bzero(&v->tparam, sizeof(v->tparam));
2094				v->tparam.pcr = sc->vccs[i].pcr;
2095				v++;
2096			}
2097		}
2098		if (i == (1U << sc->ifatm.mib.vci_bits))
2099			break;
2100		FATM_UNLOCK(sc);
2101	}
2102	FATM_UNLOCK(sc);
2103	return (vccs);
2104}
2105
2106/*
2107 * VCC managment
2108 *
2109 * This may seem complicated. The reason for this is, that we need an
2110 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2111 * is called with the radix node head of the routing table locked. Therefor
2112 * we cannot sleep there and wait for the open/close to succeed. For this
2113 * reason we just initiate the operation from the ioctl.
2114 */
2115
2116/*
2117 * Command the card to open/close a VC.
2118 * Return the queue entry for waiting if we are succesful.
2119 */
2120static struct cmdqueue *
2121fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2122    u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2123{
2124	struct cmdqueue *q;
2125
2126	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2127
2128	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2129	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2130		sc->istats.cmd_queue_full++;
2131		return (NULL);
2132	}
2133	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2134
2135	q->error = 0;
2136	q->cb = func;
2137	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2138	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2139
2140	WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2141	BARRIER_W(sc);
2142	WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2143	BARRIER_W(sc);
2144	WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2145	BARRIER_W(sc);
2146
2147	return (q);
2148}
2149
2150/*
2151 * Start to open a VCC. This just initiates the operation.
2152 */
2153static int
2154fatm_start_open_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, u_int aal,
2155    u_int traffic, u_int pcr, u_int flags, void *rxhand,
2156    void (*func)(struct fatm_softc *, struct cmdqueue *), struct cmdqueue **qp)
2157{
2158	int error;
2159	uint32_t cmd;
2160	struct cmdqueue *q;
2161
2162	error = 0;
2163
2164	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2165		return (EIO);
2166	if (!VC_OK(sc, vpi, vci) ||
2167	    (aal != ATMIO_AAL_0 && aal != ATMIO_AAL_5) ||
2168	    (traffic != ATMIO_TRAFFIC_UBR && traffic != ATMIO_TRAFFIC_CBR))
2169		return (EINVAL);
2170	if (sc->vccs[vci].flags & FATM_VCC_BUSY)
2171		return (EBUSY);
2172
2173	/* Command and buffer strategy */
2174	cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2175	if (aal == ATMIO_AAL_0)
2176		cmd |= (0 << 8);
2177	else
2178		cmd |= (5 << 8);
2179
2180	if ((q = fatm_start_vcc(sc, vpi, vci, cmd, 1, func)) == NULL)
2181		return (EIO);
2182	if (qp != NULL)
2183		*qp = q;
2184
2185	sc->vccs[vci].aal = aal;
2186	sc->vccs[vci].flags = flags | FATM_VCC_TRY_OPEN;
2187	sc->vccs[vci].rxhand = rxhand;
2188	sc->vccs[vci].pcr = pcr;
2189	sc->vccs[vci].traffic = traffic;
2190
2191	return (0);
2192}
2193
2194/*
2195 * Initiate closing a VCC
2196 */
2197static int
2198fatm_start_close_vcc(struct fatm_softc *sc, u_int vpi, u_int vci,
2199    void (*func)(struct fatm_softc *, struct cmdqueue *), struct cmdqueue **qp)
2200{
2201	int error;
2202	struct cmdqueue *q;
2203
2204	error = 0;
2205
2206	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
2207		return (EIO);
2208	if (!VC_OK(sc, vpi, vci))
2209		return (EINVAL);
2210	if (!(sc->vccs[vci].flags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN)))
2211		return (ENOENT);
2212
2213	if ((q = fatm_start_vcc(sc, vpi, vci,
2214	    FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1, func)) == NULL)
2215		return (EIO);
2216
2217	if (qp != NULL)
2218		*qp = q;
2219
2220	sc->vccs[vci].flags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2221	sc->vccs[vci].flags |= FATM_VCC_TRY_CLOSE;
2222
2223	return (0);
2224}
2225
2226/*
2227 * Wait on the queue entry until the VCC is opened/closed.
2228 */
2229static int
2230fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2231{
2232	int error;
2233
2234	/*
2235	 * Wait for the command to complete
2236	 */
2237	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2238
2239	if (error != 0)
2240		return (error);
2241	return (q->error);
2242}
2243
2244/*
2245 * The VC has been opened/closed and somebody has been waiting for this.
2246 * Wake him up.
2247 */
2248static void
2249fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2250{
2251
2252	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2253	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2254		sc->istats.get_stat_errors++;
2255		q->error = EIO;
2256	}
2257	wakeup(q);
2258}
2259
2260/*
2261 * Open a vcc and wait for completion
2262 */
2263static int
2264fatm_open_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, u_int flags,
2265    u_int aal, u_int traffic, u_int pcr, void *rxhand)
2266{
2267	int error;
2268	struct cmdqueue *q;
2269
2270	error = 0;
2271
2272	FATM_LOCK(sc);
2273	error = fatm_start_open_vcc(sc, vpi, vci, aal, traffic, pcr,
2274	    flags, rxhand, fatm_cmd_complete, &q);
2275	if (error != 0) {
2276		FATM_UNLOCK(sc);
2277		return (error);
2278	}
2279	error = fatm_waitvcc(sc, q);
2280
2281	if (error == 0) {
2282		sc->vccs[vci].flags &= ~FATM_VCC_TRY_OPEN;
2283		sc->vccs[vci].flags |= FATM_VCC_OPEN;
2284		sc->open_vccs++;
2285
2286#ifdef notyet
2287		/* inform management if this is not an NG
2288		 * VCC or it's an NG PVC. */
2289		if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2290		    (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2291			atm_message(&sc->ifatm.ifnet,
2292			    ATM_MSG_VCC_CHANGED,
2293			    (1 << 24) | (0 << 16) | vci);
2294#endif
2295	} else
2296		bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2297
2298	FATM_UNLOCK(sc);
2299	return (error);
2300}
2301
2302/*
2303 * Close a VCC synchronuosly
2304 */
2305static int
2306fatm_close_vcc(struct fatm_softc *sc, u_int vpi, u_int vci)
2307{
2308	int error;
2309	struct cmdqueue *q;
2310
2311	error = 0;
2312
2313	FATM_LOCK(sc);
2314	error = fatm_start_close_vcc(sc, vpi, vci, fatm_cmd_complete, &q);
2315	if (error != 0) {
2316		FATM_UNLOCK(sc);
2317		return (error);
2318	}
2319	error = fatm_waitvcc(sc, q);
2320
2321	if (error == 0) {
2322#ifdef notyet
2323		/* inform management of this is not an NG
2324		 * VCC or it's an NG PVC. */
2325		if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2326		    (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2327			atm_message(&sc->ifatm.ifnet,
2328			    ATM_MSG_VCC_CHANGED,
2329			    (0 << 24) | (0 << 16) | vci);
2330#endif
2331
2332		bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2333		sc->open_vccs--;
2334	}
2335
2336	FATM_UNLOCK(sc);
2337	return (error);
2338}
2339
2340/*
2341 * The VC has been opened.
2342 */
2343static void
2344fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2345{
2346	u_int vci;
2347
2348	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2349	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2350	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2351		sc->istats.get_stat_errors++;
2352		bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2353		if_printf(&sc->ifatm.ifnet, "opening VCI %u failed\n", vci);
2354		return;
2355	}
2356
2357	sc->vccs[vci].flags &= ~FATM_VCC_TRY_OPEN;
2358	sc->vccs[vci].flags |= FATM_VCC_OPEN;
2359	sc->open_vccs++;
2360
2361#ifdef notyet
2362	/* inform management if this is not an NG
2363	 * VCC or it's an NG PVC. */
2364	if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2365	    (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2366		atm_message(&sc->ifatm.ifnet, ATM_MSG_VCC_CHANGED,
2367		    (1 << 24) | (0 << 16) | vci);
2368#endif
2369}
2370
2371/*
2372 * The VC has been closed.
2373 */
2374static void
2375fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2376{
2377	u_int vci;
2378
2379	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2380	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2381	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2382		sc->istats.get_stat_errors++;
2383		/* keep the VCC in that state */
2384		if_printf(&sc->ifatm.ifnet, "closing VCI %u failed\n", vci);
2385		return;
2386	}
2387
2388#ifdef notyet
2389	/* inform management of this is not an NG
2390	 * VCC or it's an NG PVC. */
2391	if (!(sc->vccs[vci].flags & ATMIO_FLAG_NG) ||
2392	    (sc->vccs[vci].flags & ATMIO_FLAG_PVC))
2393		atm_message(&sc->ifatm.ifnet, ATM_MSG_VCC_CHANGED,
2394		    (0 << 24) | (0 << 16) | vci);
2395#endif
2396
2397	bzero(&sc->vccs[vci], sizeof(sc->vccs[vci]));
2398	sc->open_vccs--;
2399}
2400
2401/*
2402 * Open a vcc but don't wait.
2403 */
2404static int
2405fatm_open_vcc_nowait(struct fatm_softc *sc, u_int vpi, u_int vci, u_int flags,
2406    u_int aal, void *rxhand)
2407{
2408	int error;
2409
2410	FATM_LOCK(sc);
2411	error = fatm_start_open_vcc(sc, vpi, vci, aal, ATMIO_TRAFFIC_UBR, 0,
2412	    flags, rxhand, fatm_open_complete, NULL);
2413	FATM_UNLOCK(sc);
2414	return (error);
2415}
2416
2417/*
2418 * Close a VCC but don't wait
2419 */
2420static int
2421fatm_close_vcc_nowait(struct fatm_softc *sc, u_int vpi, u_int vci)
2422{
2423	int error;
2424
2425	FATM_LOCK(sc);
2426	error = fatm_start_close_vcc(sc, vpi, vci, fatm_close_complete, NULL);
2427	FATM_UNLOCK(sc);
2428	return (error);
2429}
2430
2431/*
2432 * IOCTL handler
2433 */
2434static int
2435fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2436{
2437	int error;
2438	struct fatm_softc *sc = ifp->if_softc;
2439	struct ifaddr *ifa = (struct ifaddr *)arg;
2440	struct ifreq *ifr = (struct ifreq *)arg;
2441	struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2442	struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2443	struct atm_pseudoioctl *pa = (struct atm_pseudoioctl *)arg;
2444	struct atmio_vcctable *vtab;
2445
2446	error = 0;
2447	switch (cmd) {
2448
2449	  case SIOCATMENA:	/* internal NATM use */
2450		error = fatm_open_vcc_nowait(sc, ATM_PH_VPI(&pa->aph),
2451		    ATM_PH_VCI(&pa->aph), ATM_PH_FLAGS(&pa->aph),
2452		    (ATM_PH_FLAGS(&pa->aph) & ATM_PH_AAL5) ? ATMIO_AAL_5 :
2453		    ATMIO_AAL_0, pa->rxhand);
2454		break;
2455
2456	  case SIOCATMDIS:	/* internal NATM use */
2457		error = fatm_close_vcc_nowait(sc, ATM_PH_VPI(&pa->aph),
2458		    ATM_PH_VCI(&pa->aph));
2459		break;
2460
2461	  case SIOCATMOPENVCC:
2462		error = fatm_open_vcc(sc, op->param.vpi, op->param.vci,
2463		    op->param.flags, op->param.aal, op->param.traffic,
2464		    op->param.tparam.pcr, op->rxhand);
2465		break;
2466
2467	  case SIOCATMCLOSEVCC:
2468		error = fatm_close_vcc(sc, cl->vpi, cl->vci);
2469		break;
2470
2471	  case SIOCSIFADDR:
2472		FATM_LOCK(sc);
2473		ifp->if_flags |= IFF_UP;
2474		if (!(ifp->if_flags & IFF_RUNNING))
2475			fatm_init_locked(sc);
2476		switch (ifa->ifa_addr->sa_family) {
2477#ifdef INET
2478		  case AF_INET:
2479		  case AF_INET6:
2480			ifa->ifa_rtrequest = atm_rtrequest;
2481			break;
2482#endif
2483		  default:
2484			break;
2485		}
2486		FATM_UNLOCK(sc);
2487		break;
2488
2489	  case SIOCSIFFLAGS:
2490		FATM_LOCK(sc);
2491		if (ifp->if_flags & IFF_UP) {
2492			if (!(ifp->if_flags & IFF_RUNNING)) {
2493				fatm_init_locked(sc);
2494			}
2495		} else {
2496			if (ifp->if_flags & IFF_RUNNING) {
2497				fatm_stop(sc);
2498			}
2499		}
2500		FATM_UNLOCK(sc);
2501		break;
2502
2503	  case SIOCGIFMEDIA:
2504	  case SIOCSIFMEDIA:
2505		if (ifp->if_flags & IFF_RUNNING)
2506			error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2507		else
2508			error = EINVAL;
2509		break;
2510
2511	  case SIOCATMGVCCS:
2512		/* return vcc table */
2513		vtab = get_vccs(sc, M_WAITOK);
2514		if (vtab == NULL) {
2515			error = ENOMEM;
2516			break;
2517		}
2518		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2519		    vtab->count * sizeof(vtab->vccs[0]));
2520		free(vtab, M_DEVBUF);
2521		break;
2522
2523	  case SIOCATMGETVCCS:	/* internal netgraph use */
2524		vtab = get_vccs(sc, M_NOWAIT);
2525		if (vtab == NULL) {
2526			error = ENOMEM;
2527			break;
2528		}
2529		*(void **)arg = vtab;
2530		break;
2531
2532	  default:
2533		DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2534		error = EINVAL;
2535		break;
2536	}
2537
2538	return (error);
2539}
2540
2541/*
2542 * Detach from the interface and free all resources allocated during
2543 * initialisation and later.
2544 */
2545static int
2546fatm_detach(device_t dev)
2547{
2548	u_int i;
2549	struct rbuf *rb;
2550	struct fatm_softc *sc;
2551	struct txqueue *tx;
2552
2553	sc = (struct fatm_softc *)device_get_softc(dev);
2554
2555	if (device_is_alive(dev)) {
2556		FATM_LOCK(sc);
2557		fatm_stop(sc);
2558		utopia_detach(&sc->utopia);
2559		FATM_UNLOCK(sc);
2560		atm_ifdetach(&sc->ifatm.ifnet);		/* XXX race */
2561	}
2562
2563	if (sc->ih != NULL)
2564		bus_teardown_intr(dev, sc->irqres, sc->ih);
2565
2566	while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2567		if_printf(&sc->ifatm.ifnet, "rbuf %p still in use!\n", rb);
2568		bus_dmamap_unload(sc->rbuf_tag, rb->map);
2569		m_freem(rb->m);
2570		LIST_REMOVE(rb, link);
2571		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2572	}
2573
2574	if (sc->txqueue.chunk != NULL) {
2575		for (i = 0; i < FATM_TX_QLEN; i++) {
2576			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2577			bus_dmamap_destroy(sc->tx_tag, tx->map);
2578		}
2579	}
2580
2581	while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2582		bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2583		LIST_REMOVE(rb, link);
2584	}
2585
2586	free(sc->rbufs, M_DEVBUF);
2587	free(sc->vccs, M_DEVBUF);
2588
2589	free(sc->l1queue.chunk, M_DEVBUF);
2590	free(sc->s1queue.chunk, M_DEVBUF);
2591	free(sc->rxqueue.chunk, M_DEVBUF);
2592	free(sc->txqueue.chunk, M_DEVBUF);
2593	free(sc->cmdqueue.chunk, M_DEVBUF);
2594
2595	destroy_dma_memory(&sc->reg_mem);
2596	destroy_dma_memory(&sc->sadi_mem);
2597	destroy_dma_memory(&sc->prom_mem);
2598#ifdef TEST_DMA_SYNC
2599	destroy_dma_memoryX(&sc->s1q_mem);
2600	destroy_dma_memoryX(&sc->l1q_mem);
2601	destroy_dma_memoryX(&sc->rxq_mem);
2602	destroy_dma_memoryX(&sc->txq_mem);
2603	destroy_dma_memoryX(&sc->stat_mem);
2604#endif
2605
2606	if (sc->tx_tag != NULL)
2607		if (bus_dma_tag_destroy(sc->tx_tag))
2608			printf("tx DMA tag busy!\n");
2609
2610	if (sc->rbuf_tag != NULL)
2611		if (bus_dma_tag_destroy(sc->rbuf_tag))
2612			printf("rbuf DMA tag busy!\n");
2613
2614	if (sc->parent_dmat != NULL)
2615		if (bus_dma_tag_destroy(sc->parent_dmat))
2616			printf("parent DMA tag busy!\n");
2617
2618	if (sc->irqres != NULL)
2619		bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2620
2621	if (sc->memres != NULL)
2622		bus_release_resource(dev, SYS_RES_MEMORY,
2623		    sc->memid, sc->memres);
2624
2625	(void)sysctl_ctx_free(&sc->sysctl_ctx);
2626
2627	cv_destroy(&sc->cv_stat);
2628	cv_destroy(&sc->cv_regs);
2629
2630	mtx_destroy(&sc->mtx);
2631
2632	return (0);
2633}
2634
2635/*
2636 * Sysctl handler
2637 */
2638static int
2639fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2640{
2641	struct fatm_softc *sc = arg1;
2642	u_long *ret;
2643	int error;
2644
2645	ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2646
2647	FATM_LOCK(sc);
2648	bcopy(&sc->istats, ret, sizeof(sc->istats));
2649	FATM_UNLOCK(sc);
2650
2651	error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2652	free(ret, M_TEMP);
2653
2654	return (error);
2655}
2656
2657/*
2658 * Sysctl handler for card statistics
2659 */
2660static int
2661fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2662{
2663	struct fatm_softc *sc = arg1;
2664	int error;
2665	const struct fatm_stats *s;
2666	u_long *ret;
2667	u_int i;
2668
2669	ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2670
2671	FATM_LOCK(sc);
2672
2673	if ((error = fatm_getstat(sc)) == 0) {
2674		s = sc->sadi_mem.mem;
2675		i = 0;
2676		ret[i++] = s->phy_4b5b.crc_header_errors;
2677		ret[i++] = s->phy_4b5b.framing_errors;
2678		ret[i++] = s->phy_oc3.section_bip8_errors;
2679		ret[i++] = s->phy_oc3.path_bip8_errors;
2680		ret[i++] = s->phy_oc3.line_bip24_errors;
2681		ret[i++] = s->phy_oc3.line_febe_errors;
2682		ret[i++] = s->phy_oc3.path_febe_errors;
2683		ret[i++] = s->phy_oc3.corr_hcs_errors;
2684		ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2685		ret[i++] = s->atm.cells_transmitted;
2686		ret[i++] = s->atm.cells_received;
2687		ret[i++] = s->atm.vpi_bad_range;
2688		ret[i++] = s->atm.vpi_no_conn;
2689		ret[i++] = s->atm.vci_bad_range;
2690		ret[i++] = s->atm.vci_no_conn;
2691		ret[i++] = s->aal0.cells_transmitted;
2692		ret[i++] = s->aal0.cells_received;
2693		ret[i++] = s->aal0.cells_dropped;
2694		ret[i++] = s->aal4.cells_transmitted;
2695		ret[i++] = s->aal4.cells_received;
2696		ret[i++] = s->aal4.cells_crc_errors;
2697		ret[i++] = s->aal4.cels_protocol_errors;
2698		ret[i++] = s->aal4.cells_dropped;
2699		ret[i++] = s->aal4.cspdus_transmitted;
2700		ret[i++] = s->aal4.cspdus_received;
2701		ret[i++] = s->aal4.cspdus_protocol_errors;
2702		ret[i++] = s->aal4.cspdus_dropped;
2703		ret[i++] = s->aal5.cells_transmitted;
2704		ret[i++] = s->aal5.cells_received;
2705		ret[i++] = s->aal5.congestion_experienced;
2706		ret[i++] = s->aal5.cells_dropped;
2707		ret[i++] = s->aal5.cspdus_transmitted;
2708		ret[i++] = s->aal5.cspdus_received;
2709		ret[i++] = s->aal5.cspdus_crc_errors;
2710		ret[i++] = s->aal5.cspdus_protocol_errors;
2711		ret[i++] = s->aal5.cspdus_dropped;
2712		ret[i++] = s->aux.small_b1_failed;
2713		ret[i++] = s->aux.large_b1_failed;
2714		ret[i++] = s->aux.small_b2_failed;
2715		ret[i++] = s->aux.large_b2_failed;
2716		ret[i++] = s->aux.rpd_alloc_failed;
2717		ret[i++] = s->aux.receive_carrier;
2718	}
2719	/* declare the buffer free */
2720	sc->flags &= ~FATM_STAT_INUSE;
2721	cv_signal(&sc->cv_stat);
2722
2723	FATM_UNLOCK(sc);
2724
2725	if (error == 0)
2726		error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2727	free(ret, M_TEMP);
2728
2729	return (error);
2730}
2731
2732#define MAXDMASEGS 32		/* maximum number of receive descriptors */
2733
2734/*
2735 * Attach to the device.
2736 *
2737 * We assume, that there is a global lock (Giant in this case) that protects
2738 * multiple threads from entering this function. This makes sense, doesn't it?
2739 */
2740static int
2741fatm_attach(device_t dev)
2742{
2743	struct ifnet *ifp;
2744	struct fatm_softc *sc;
2745	int unit;
2746	uint16_t cfg;
2747	int error = 0;
2748	struct rbuf *rb;
2749	u_int i;
2750	struct txqueue *tx;
2751
2752	sc = device_get_softc(dev);
2753	unit = device_get_unit(dev);
2754
2755	sc->ifatm.mib.device = ATM_DEVICE_PCA200E;
2756	sc->ifatm.mib.serial = 0;
2757	sc->ifatm.mib.hw_version = 0;
2758	sc->ifatm.mib.sw_version = 0;
2759	sc->ifatm.mib.vpi_bits = 0;
2760	sc->ifatm.mib.vci_bits = FORE_VCIBITS;
2761	sc->ifatm.mib.max_vpcs = 0;
2762	sc->ifatm.mib.max_vccs = FORE_MAX_VCC;
2763	sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
2764	sc->ifatm.phy = &sc->utopia;
2765
2766	LIST_INIT(&sc->rbuf_free);
2767	LIST_INIT(&sc->rbuf_used);
2768
2769	/*
2770	 * Initialize mutex and condition variables.
2771	 */
2772	mtx_init(&sc->mtx, device_get_nameunit(dev),
2773	    MTX_NETWORK_LOCK, MTX_DEF);
2774
2775	cv_init(&sc->cv_stat, "fatm_stat");
2776	cv_init(&sc->cv_regs, "fatm_regs");
2777
2778	sysctl_ctx_init(&sc->sysctl_ctx);
2779
2780	/*
2781	 * Make the sysctl tree
2782	 */
2783	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2784	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2785	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2786		goto fail;
2787
2788	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2789	    OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2790	    "LU", "internal statistics") == NULL)
2791		goto fail;
2792
2793	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2794	    OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2795	    "LU", "card statistics") == NULL)
2796		goto fail;
2797
2798	if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2799	    OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2800	    "retry flag") == NULL)
2801		goto fail;
2802
2803#ifdef FATM_DEBUG
2804	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2805	    OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2806	    == NULL)
2807		goto fail;
2808	sc->debug = FATM_DEBUG;
2809#endif
2810
2811	/*
2812	 * Network subsystem stuff
2813	 */
2814	ifp = &sc->ifatm.ifnet;
2815	ifp->if_softc = sc;
2816	ifp->if_unit = unit;
2817	ifp->if_name = "fatm";
2818	ifp->if_flags = IFF_SIMPLEX;
2819	ifp->if_ioctl = fatm_ioctl;
2820	ifp->if_start = fatm_start;
2821	ifp->if_watchdog = fatm_watchdog;
2822	ifp->if_init = fatm_init;
2823	ifp->if_linkmib = &sc->ifatm.mib;
2824	ifp->if_linkmiblen = sizeof(sc->ifatm.mib);
2825
2826	/*
2827	 * Enable memory and bustmaster
2828	 */
2829	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2830	cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2831	pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2832
2833	/*
2834	 * Map memory
2835	 */
2836	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2837	if (!(cfg & PCIM_CMD_MEMEN)) {
2838		if_printf(ifp, "failed to enable memory mapping\n");
2839		error = ENXIO;
2840		goto fail;
2841	}
2842	sc->memid = 0x10;
2843	sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
2844	    0, ~0, 1, RF_ACTIVE);
2845	if (sc->memres == NULL) {
2846		if_printf(ifp, "could not map memory\n");
2847		error = ENXIO;
2848		goto fail;
2849	}
2850	sc->memh = rman_get_bushandle(sc->memres);
2851	sc->memt = rman_get_bustag(sc->memres);
2852
2853	/*
2854	 * Convert endianess of slave access
2855	 */
2856	cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2857	cfg |= FATM_PCIM_SWAB;
2858	pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2859
2860	/*
2861	 * Allocate interrupt (activate at the end)
2862	 */
2863	sc->irqid = 0;
2864	sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
2865	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
2866	if (sc->irqres == NULL) {
2867		if_printf(ifp, "could not allocate irq\n");
2868		error = ENXIO;
2869		goto fail;
2870	}
2871
2872	/*
2873	 * Allocate the parent DMA tag. This is used simply to hold overall
2874	 * restrictions for the controller (and PCI bus) and is never used
2875	 * to do anything.
2876	 */
2877	if (bus_dma_tag_create(NULL, 1, 0,
2878	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2879	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2880	    BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &Giant,
2881	    &sc->parent_dmat)) {
2882		if_printf(ifp, "could not allocate parent DMA tag\n");
2883		error = ENOMEM;
2884		goto fail;
2885	}
2886
2887	/*
2888	 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2889	 * a mbuf cluster.
2890	 */
2891	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2892	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2893	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2894	    busdma_lock_mutex, &Giant, &sc->rbuf_tag)) {
2895		if_printf(ifp, "could not allocate rbuf DMA tag\n");
2896		error = ENOMEM;
2897		goto fail;
2898	}
2899
2900	/*
2901	 * Allocate the transmission DMA tag.
2902	 */
2903	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2904	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2905	    NULL, NULL,
2906	    FATM_MAXPDU, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2907	    busdma_lock_mutex, &Giant, &sc->tx_tag)) {
2908		if_printf(ifp, "could not allocate tx DMA tag\n");
2909		error = ENOMEM;
2910		goto fail;
2911	}
2912
2913	/*
2914	 * Allocate DMAable memory.
2915	 */
2916	sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2917	    + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2918	sc->stat_mem.align = 4;
2919
2920	sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2921	sc->txq_mem.align = 32;
2922
2923	sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2924	sc->rxq_mem.align = 32;
2925
2926	sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2927	    BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2928	sc->s1q_mem.align = 32;
2929
2930	sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2931	    BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2932	sc->l1q_mem.align = 32;
2933
2934#ifdef TEST_DMA_SYNC
2935	if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2936	    (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2937	    (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2938	    (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2939	    (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2940		goto fail;
2941#else
2942	if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2943	    (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2944	    (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2945	    (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2946	    (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2947		goto fail;
2948#endif
2949
2950	sc->prom_mem.size = sizeof(struct prom);
2951	sc->prom_mem.align = 32;
2952	if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2953		goto fail;
2954
2955	sc->sadi_mem.size = sizeof(struct fatm_stats);
2956	sc->sadi_mem.align = 32;
2957	if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2958		goto fail;
2959
2960	sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2961	sc->reg_mem.align = 32;
2962	if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2963		goto fail;
2964
2965	/*
2966	 * Allocate queues
2967	 */
2968	sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2969	    M_DEVBUF, M_ZERO | M_WAITOK);
2970	sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2971	    M_DEVBUF, M_ZERO | M_WAITOK);
2972	sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2973	    M_DEVBUF, M_ZERO | M_WAITOK);
2974	sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2975	    M_DEVBUF, M_ZERO | M_WAITOK);
2976	sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2977	    M_DEVBUF, M_ZERO | M_WAITOK);
2978
2979	sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(struct card_vcc),
2980	    M_DEVBUF, M_ZERO | M_WAITOK);
2981
2982	/*
2983	 * Allocate memory for the receive buffer headers. The total number
2984	 * of headers should probably also include the maximum number of
2985	 * buffers on the receive queue.
2986	 */
2987	sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2988	sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2989	    M_DEVBUF, M_ZERO | M_WAITOK);
2990
2991	/*
2992	 * Put all rbuf headers on the free list and create DMA maps.
2993	 */
2994	for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2995		if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
2996			if_printf(&sc->ifatm.ifnet, "creating rx map: %d\n",
2997			    error);
2998			goto fail;
2999		}
3000		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3001	}
3002
3003	/*
3004	 * Create dma maps for transmission. In case of an error, free the
3005	 * allocated DMA maps, because on some architectures maps are NULL
3006	 * and we cannot distinguish between a failure and a NULL map in
3007	 * the detach routine.
3008	 */
3009	for (i = 0; i < FATM_TX_QLEN; i++) {
3010		tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3011		if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3012			if_printf(&sc->ifatm.ifnet, "creating tx map: %d\n",
3013			    error);
3014			while (i > 0) {
3015				tx = GET_QUEUE(sc->txqueue, struct txqueue,
3016				    i - 1);
3017				bus_dmamap_destroy(sc->tx_tag, tx->map);
3018				i--;
3019			}
3020			goto fail;
3021		}
3022	}
3023
3024	utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
3025	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3026	    &fatm_utopia_methods);
3027	sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3028
3029	/*
3030	 * Attach the interface
3031	 */
3032	atm_ifattach(ifp);
3033	ifp->if_snd.ifq_maxlen = 512;
3034
3035	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3036	    fatm_intr, sc, &sc->ih);
3037	if (error) {
3038		if_printf(ifp, "couldn't setup irq\n");
3039		goto fail;
3040	}
3041
3042  fail:
3043	if (error)
3044		fatm_detach(dev);
3045
3046	return (error);
3047}
3048
3049#if defined(FATM_DEBUG) && 0
3050static void
3051dump_s1_queue(struct fatm_softc *sc)
3052{
3053	int i;
3054	struct supqueue *q;
3055
3056	for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3057		q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3058		printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3059		    q->q.card,
3060		    READ4(sc, q->q.card),
3061		    READ4(sc, q->q.card + 4),
3062		    *q->q.statp);
3063	}
3064}
3065#endif
3066
3067/*
3068 * Driver infrastructure.
3069 */
3070static device_method_t fatm_methods[] = {
3071	DEVMETHOD(device_probe,		fatm_probe),
3072	DEVMETHOD(device_attach,	fatm_attach),
3073	DEVMETHOD(device_detach,	fatm_detach),
3074	{ 0, 0 }
3075};
3076static driver_t fatm_driver = {
3077	"fatm",
3078	fatm_methods,
3079	sizeof(struct fatm_softc),
3080};
3081
3082DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
3083