if_fatm.c revision 119418
1/*
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 119418 2003-08-24 17:55:58Z obrien $");
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 119418 2003-08-24 17:55:58Z obrien $");
36
37#include "opt_inet.h"
38#include "opt_natm.h"
39
40#include <sys/types.h>
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/malloc.h>
44#include <sys/kernel.h>
45#include <sys/bus.h>
46#include <sys/errno.h>
47#include <sys/conf.h>
48#include <sys/module.h>
49#include <sys/queue.h>
50#include <sys/syslog.h>
51#include <sys/endian.h>
52#include <sys/sysctl.h>
53#include <sys/condvar.h>
54#include <vm/uma.h>
55
56#include <sys/sockio.h>
57#include <sys/mbuf.h>
58#include <sys/socket.h>
59
60#include <net/if.h>
61#include <net/if_media.h>
62#include <net/if_atm.h>
63#include <net/route.h>
64#ifdef INET
65#include <netinet/in.h>
66#include <netinet/if_atm.h>
67#endif
68
69#include <machine/bus.h>
70#include <machine/resource.h>
71#include <sys/bus.h>
72#include <sys/rman.h>
73#include <dev/pci/pcireg.h>
74#include <dev/pci/pcivar.h>
75
76#include <dev/utopia/utopia.h>
77
78#include <dev/fatm/if_fatmreg.h>
79#include <dev/fatm/if_fatmvar.h>
80
81#include <dev/fatm/firmware.h>
82
83devclass_t fatm_devclass;
84
85static const struct {
86	uint16_t	vid;
87	uint16_t	did;
88	const char	*name;
89} fatm_devs[] = {
90	{ 0x1127, 0x300,
91	  "FORE PCA200E" },
92	{ 0, 0, NULL }
93};
94
95static const struct rate {
96	uint32_t	ratio;
97	uint32_t	cell_rate;
98} rate_table[] = {
99#include <dev/fatm/if_fatm_rate.h>
100};
101#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
102
103SYSCTL_DECL(_hw_atm);
104
105MODULE_DEPEND(fatm, utopia, 1, 1, 1);
106
107static int	fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
108static int	fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
109
110static const struct utopia_methods fatm_utopia_methods = {
111	fatm_utopia_readregs,
112	fatm_utopia_writereg
113};
114
115#define VC_OK(SC, VPI, VCI)						\
116	(((VPI) & ~((1 << (SC)->ifatm.mib.vpi_bits) - 1)) == 0 &&	\
117	 (VCI) != 0 && ((VCI) & ~((1 << (SC)->ifatm.mib.vci_bits) - 1)) == 0)
118
119static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
120
121/*
122 * Probing is easy: step trough the list of known vendor and device
123 * ids and compare. If one is found - it's our.
124 */
125static int
126fatm_probe(device_t dev)
127{
128	int i;
129
130	for (i = 0; fatm_devs[i].name; i++)
131		if (pci_get_vendor(dev) == fatm_devs[i].vid &&
132		    pci_get_device(dev) == fatm_devs[i].did) {
133			device_set_desc(dev, fatm_devs[i].name);
134			return (0);
135		}
136	return (ENXIO);
137}
138
139/*
140 * Function called at completion of a SUNI writeregs/readregs command.
141 * This is called from the interrupt handler while holding the softc lock.
142 * We use the queue entry as the randevouze point.
143 */
144static void
145fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
146{
147
148	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
149	if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
150		sc->istats.suni_reg_errors++;
151		q->error = EIO;
152	}
153	wakeup(q);
154}
155
156/*
157 * Write a SUNI register. The bits that are 1 in mask are written from val
158 * into register reg. We wait for the command to complete by sleeping on
159 * the register memory.
160 *
161 * We assume, that we already hold the softc mutex.
162 */
163static int
164fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
165{
166	int error;
167	struct cmdqueue *q;
168	struct fatm_softc *sc;
169
170	sc = ifatm->ifnet.if_softc;
171	FATM_CHECKLOCK(sc);
172	if (!(ifatm->ifnet.if_flags & IFF_RUNNING))
173		return (EIO);
174
175	/* get queue element and fill it */
176	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
177
178	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
179	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
180		sc->istats.cmd_queue_full++;
181		return (EIO);
182	}
183	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
184
185	q->error = 0;
186	q->cb = fatm_utopia_writeregs_complete;
187	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
188	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
189
190	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
191	BARRIER_W(sc);
192	WRITE4(sc, q->q.card + FATMOC_OP,
193	    FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
194	BARRIER_W(sc);
195
196	/*
197	 * Wait for the command to complete
198	 */
199	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
200
201	switch(error) {
202
203	  case EWOULDBLOCK:
204		error = EIO;
205		break;
206
207	  case ERESTART:
208		error = EINTR;
209		break;
210
211	  case 0:
212		error = q->error;
213		break;
214	}
215
216	return (error);
217}
218
219/*
220 * Function called at completion of a SUNI readregs command.
221 * This is called from the interrupt handler while holding the softc lock.
222 * We use reg_mem as the randevouze point.
223 */
224static void
225fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
226{
227
228	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
229	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
230		sc->istats.suni_reg_errors++;
231		q->error = EIO;
232	}
233	wakeup(&sc->reg_mem);
234}
235
236/*
237 * Read SUNI registers
238 *
239 * We use a preallocated buffer to read the registers. Therefor we need
240 * to protect against multiple threads trying to read registers. We do this
241 * with a condition variable and a flag. We wait for the command to complete by sleeping on
242 * the register memory.
243 *
244 * We assume, that we already hold the softc mutex.
245 */
246static int
247fatm_utopia_readregs_internal(struct fatm_softc *sc)
248{
249	int error, i;
250	uint32_t *ptr;
251	struct cmdqueue *q;
252
253	/* get the buffer */
254	for (;;) {
255		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
256			return (EIO);
257		if (!(sc->flags & FATM_REGS_INUSE))
258			break;
259		cv_wait(&sc->cv_regs, &sc->mtx);
260	}
261	sc->flags |= FATM_REGS_INUSE;
262
263	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
264
265	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
266	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
267		sc->istats.cmd_queue_full++;
268		return (EIO);
269	}
270	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
271
272	q->error = 0;
273	q->cb = fatm_utopia_readregs_complete;
274	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
275	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
276
277	bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
278
279	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
280	BARRIER_W(sc);
281	WRITE4(sc, q->q.card + FATMOC_OP,
282	    FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
283	BARRIER_W(sc);
284
285	/*
286	 * Wait for the command to complete
287	 */
288	error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
289	    "fatm_getreg", hz);
290
291	switch(error) {
292
293	  case EWOULDBLOCK:
294		error = EIO;
295		break;
296
297	  case ERESTART:
298		error = EINTR;
299		break;
300
301	  case 0:
302		bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
303		    BUS_DMASYNC_POSTREAD);
304		error = q->error;
305		break;
306	}
307
308	if (error != 0) {
309		/* declare buffer to be free */
310		sc->flags &= ~FATM_REGS_INUSE;
311		cv_signal(&sc->cv_regs);
312		return (error);
313	}
314
315	/* swap if needed */
316	ptr = (uint32_t *)sc->reg_mem.mem;
317	for (i = 0; i < FATM_NREGS; i++)
318		ptr[i] = le32toh(ptr[i]) & 0xff;
319
320	return (0);
321}
322
323/*
324 * Read SUNI registers for the SUNI module.
325 *
326 * We assume, that we already hold the mutex.
327 */
328static int
329fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
330{
331	int err;
332	int i;
333	struct fatm_softc *sc;
334
335	if (reg >= FATM_NREGS)
336		return (EINVAL);
337	if (reg + *np > FATM_NREGS)
338		*np = FATM_NREGS - reg;
339	sc = ifatm->ifnet.if_softc;
340	FATM_CHECKLOCK(sc);
341
342	err = fatm_utopia_readregs_internal(sc);
343	if (err != 0)
344		return (err);
345
346	for (i = 0; i < *np; i++)
347		valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
348
349	/* declare buffer to be free */
350	sc->flags &= ~FATM_REGS_INUSE;
351	cv_signal(&sc->cv_regs);
352
353	return (0);
354}
355
356/*
357 * Check whether the hard is beating. We remember the last heart beat and
358 * compare it to the current one. If it appears stuck for 10 times, we have
359 * a problem.
360 *
361 * Assume we hold the lock.
362 */
363static void
364fatm_check_heartbeat(struct fatm_softc *sc)
365{
366	uint32_t h;
367
368	FATM_CHECKLOCK(sc);
369
370	h = READ4(sc, FATMO_HEARTBEAT);
371	DBG(sc, BEAT, ("heartbeat %08x", h));
372
373	if (sc->stop_cnt == 10)
374		return;
375
376	if (h == sc->heartbeat) {
377		if (++sc->stop_cnt == 10) {
378			log(LOG_ERR, "i960 stopped???\n");
379			WRITE4(sc, FATMO_HIMR, 1);
380		}
381		return;
382	}
383
384	sc->stop_cnt = 0;
385	sc->heartbeat = h;
386}
387
388/*
389 * Ensure that the heart is still beating.
390 */
391static void
392fatm_watchdog(struct ifnet *ifp)
393{
394	struct fatm_softc *sc = ifp->if_softc;
395
396	FATM_LOCK(sc);
397	if (ifp->if_flags & IFF_RUNNING) {
398		fatm_check_heartbeat(sc);
399		ifp->if_timer = 5;
400	}
401	FATM_UNLOCK(sc);
402}
403
404/*
405 * Hard reset the i960 on the board. This is done by initializing registers,
406 * clearing interrupts and waiting for the selftest to finish. Not sure,
407 * whether all these barriers are actually needed.
408 *
409 * Assumes that we hold the lock.
410 */
411static int
412fatm_reset(struct fatm_softc *sc)
413{
414	int w;
415	uint32_t val;
416
417	FATM_CHECKLOCK(sc);
418
419	WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
420	BARRIER_W(sc);
421
422	WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
423	BARRIER_W(sc);
424
425	WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
426	BARRIER_W(sc);
427
428	WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
429	BARRIER_W(sc);
430
431	WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
432	BARRIER_W(sc);
433
434	DELAY(1000);
435
436	WRITE1(sc, FATMO_HCR, 0);
437	BARRIER_RW(sc);
438
439	DELAY(1000);
440
441	for (w = 100; w; w--) {
442		BARRIER_R(sc);
443		val = READ4(sc, FATMO_BOOT_STATUS);
444		switch (val) {
445		  case SELF_TEST_OK:
446			return (0);
447		  case SELF_TEST_FAIL:
448			return (EIO);
449		}
450		DELAY(1000);
451	}
452	return (EIO);
453}
454
455/*
456 * Stop the card. Must be called WITH the lock held
457 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
458 */
459static void
460fatm_stop(struct fatm_softc *sc)
461{
462	int i;
463	struct cmdqueue *q;
464	struct rbuf *rb;
465	struct txqueue *tx;
466	uint32_t stat;
467
468	FATM_CHECKLOCK(sc);
469
470	/* Stop the board */
471	utopia_stop(&sc->utopia);
472	(void)fatm_reset(sc);
473
474	/* stop watchdog */
475	sc->ifatm.ifnet.if_timer = 0;
476
477	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) {
478		sc->ifatm.ifnet.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
479		ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
480		    sc->utopia.carrier == UTP_CARR_OK);
481
482		/*
483		 * Collect transmit mbufs, partial receive mbufs and
484		 * supplied mbufs
485		 */
486		for (i = 0; i < FATM_TX_QLEN; i++) {
487			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
488			if (tx->m) {
489				bus_dmamap_unload(sc->tx_tag, tx->map);
490				m_freem(tx->m);
491				tx->m = NULL;
492			}
493		}
494
495		/* Collect supplied mbufs */
496		while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
497			LIST_REMOVE(rb, link);
498			bus_dmamap_unload(sc->rbuf_tag, rb->map);
499			m_free(rb->m);
500			rb->m = NULL;
501			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
502		}
503
504		/* Unwait any waiters */
505		wakeup(&sc->sadi_mem);
506
507		/* wakeup all threads waiting for STAT or REG buffers */
508		cv_broadcast(&sc->cv_stat);
509		cv_broadcast(&sc->cv_regs);
510
511		sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
512
513		/* wakeup all threads waiting on commands */
514		for (i = 0; i < FATM_CMD_QLEN; i++) {
515			q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
516
517			H_SYNCSTAT_POSTREAD(sc, q->q.statp);
518			if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
519				H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
520				H_SYNCSTAT_PREWRITE(sc, q->q.statp);
521				wakeup(q);
522			}
523		}
524		utopia_reset_media(&sc->utopia);
525	}
526	sc->small_cnt = sc->large_cnt = 0;
527
528	/* Reset vcc info */
529	if (sc->vccs != NULL) {
530		sc->open_vccs = 0;
531		for (i = 0; i < FORE_MAX_VCC + 1; i++) {
532			if (sc->vccs[i] != NULL) {
533				if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
534				    FATM_VCC_TRY_OPEN)) == 0) {
535					uma_zfree(sc->vcc_zone, sc->vccs[i]);
536					sc->vccs[i] = NULL;
537				} else {
538					sc->vccs[i]->vflags = 0;
539					sc->open_vccs++;
540				}
541			}
542		}
543	}
544
545}
546
547/*
548 * Load the firmware into the board and save the entry point.
549 */
550static uint32_t
551firmware_load(struct fatm_softc *sc)
552{
553	struct firmware *fw = (struct firmware *)firmware;
554
555	DBG(sc, INIT, ("loading - entry=%x", fw->entry));
556	bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
557	    sizeof(firmware) / sizeof(firmware[0]));
558	BARRIER_RW(sc);
559
560	return (fw->entry);
561}
562
563/*
564 * Read a character from the virtual UART. The availability of a character
565 * is signaled by a non-null value of the 32 bit register. The eating of
566 * the character by us is signalled to the card by setting that register
567 * to zero.
568 */
569static int
570rx_getc(struct fatm_softc *sc)
571{
572	int w = 50;
573	int c;
574
575	while (w--) {
576		c = READ4(sc, FATMO_UART_TO_HOST);
577		BARRIER_RW(sc);
578		if (c != 0) {
579			WRITE4(sc, FATMO_UART_TO_HOST, 0);
580			DBGC(sc, UART, ("%c", c & 0xff));
581			return (c & 0xff);
582		}
583		DELAY(1000);
584	}
585	return (-1);
586}
587
588/*
589 * Eat up characters from the board and stuff them in the bit-bucket.
590 */
591static void
592rx_flush(struct fatm_softc *sc)
593{
594	int w = 10000;
595
596	while (w-- && rx_getc(sc) >= 0)
597		;
598}
599
600/*
601 * Write a character to the card. The UART is available if the register
602 * is zero.
603 */
604static int
605tx_putc(struct fatm_softc *sc, u_char c)
606{
607	int w = 10;
608	int c1;
609
610	while (w--) {
611		c1 = READ4(sc, FATMO_UART_TO_960);
612		BARRIER_RW(sc);
613		if (c1 == 0) {
614			WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
615			DBGC(sc, UART, ("%c", c & 0xff));
616			return (0);
617		}
618		DELAY(1000);
619	}
620	return (-1);
621}
622
623/*
624 * Start the firmware. This is doing by issuing a 'go' command with
625 * the hex entry address of the firmware. Then we wait for the self-test to
626 * succeed.
627 */
628static int
629fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
630{
631	static char hex[] = "0123456789abcdef";
632	u_int w, val;
633
634	DBG(sc, INIT, ("starting"));
635	rx_flush(sc);
636	tx_putc(sc, '\r');
637	DELAY(1000);
638
639	rx_flush(sc);
640
641	tx_putc(sc, 'g');
642	(void)rx_getc(sc);
643	tx_putc(sc, 'o');
644	(void)rx_getc(sc);
645	tx_putc(sc, ' ');
646	(void)rx_getc(sc);
647
648	tx_putc(sc, hex[(start >> 12) & 0xf]);
649	(void)rx_getc(sc);
650	tx_putc(sc, hex[(start >>  8) & 0xf]);
651	(void)rx_getc(sc);
652	tx_putc(sc, hex[(start >>  4) & 0xf]);
653	(void)rx_getc(sc);
654	tx_putc(sc, hex[(start >>  0) & 0xf]);
655	(void)rx_getc(sc);
656
657	tx_putc(sc, '\r');
658	rx_flush(sc);
659
660	for (w = 100; w; w--) {
661		BARRIER_R(sc);
662		val = READ4(sc, FATMO_BOOT_STATUS);
663		switch (val) {
664		  case CP_RUNNING:
665			return (0);
666		  case SELF_TEST_FAIL:
667			return (EIO);
668		}
669		DELAY(1000);
670	}
671	return (EIO);
672}
673
674/*
675 * Initialize one card and host queue.
676 */
677static void
678init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
679    size_t qel_size, size_t desc_size, cardoff_t off,
680    u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
681{
682	struct fqelem *el = queue->chunk;
683
684	while (qlen--) {
685		el->card = off;
686		off += 8;	/* size of card entry */
687
688		el->statp = (uint32_t *)(*statpp);
689		(*statpp) += sizeof(uint32_t);
690		H_SETSTAT(el->statp, FATM_STAT_FREE);
691		H_SYNCSTAT_PREWRITE(sc, el->statp);
692
693		WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
694		(*cardstat) += sizeof(uint32_t);
695
696		el->ioblk = descp;
697		descp += desc_size;
698		el->card_ioblk = carddesc;
699		carddesc += desc_size;
700
701		el = (struct fqelem *)((u_char *)el + qel_size);
702	}
703	queue->tail = queue->head = 0;
704}
705
706/*
707 * Issue the initialize operation to the card, wait for completion and
708 * initialize the on-board and host queue structures with offsets and
709 * addresses.
710 */
711static int
712fatm_init_cmd(struct fatm_softc *sc)
713{
714	int w, c;
715	u_char *statp;
716	uint32_t card_stat;
717	u_int cnt;
718	struct fqelem *el;
719	cardoff_t off;
720
721	DBG(sc, INIT, ("command"));
722	WRITE4(sc, FATMO_ISTAT, 0);
723	WRITE4(sc, FATMO_IMASK, 1);
724	WRITE4(sc, FATMO_HLOGGER, 0);
725
726	WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
727	WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
728	WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
729	WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
730	WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
731	WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
732	WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
733
734	/*
735	 * initialize buffer descriptors
736	 */
737	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
738	    SMALL_SUPPLY_QLEN);
739	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
740	    SMALL_BUFFER_LEN);
741	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
742	    SMALL_POOL_SIZE);
743	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
744	    SMALL_SUPPLY_BLKSIZE);
745
746	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
747	    LARGE_SUPPLY_QLEN);
748	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
749	    LARGE_BUFFER_LEN);
750	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
751	    LARGE_POOL_SIZE);
752	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
753	    LARGE_SUPPLY_BLKSIZE);
754
755	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
756	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
757	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
758	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
759
760	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
761	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
762	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
763	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
764
765	/*
766	 * Start the command
767	 */
768	BARRIER_W(sc);
769	WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
770	BARRIER_W(sc);
771	WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
772	BARRIER_W(sc);
773
774	/*
775	 * Busy wait for completion
776	 */
777	w = 100;
778	while (w--) {
779		c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
780		BARRIER_R(sc);
781		if (c & FATM_STAT_COMPLETE)
782			break;
783		DELAY(1000);
784	}
785
786	if (c & FATM_STAT_ERROR)
787		return (EIO);
788
789	/*
790	 * Initialize the queues
791	 */
792	statp = sc->stat_mem.mem;
793	card_stat = sc->stat_mem.paddr;
794
795	/*
796	 * Command queue. This is special in that it's on the card.
797	 */
798	el = sc->cmdqueue.chunk;
799	off = READ4(sc, FATMO_COMMAND_QUEUE);
800	DBG(sc, INIT, ("cmd queue=%x", off));
801	for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
802		el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
803
804		el->card = off;
805		off += 32;		/* size of card structure */
806
807		el->statp = (uint32_t *)statp;
808		statp += sizeof(uint32_t);
809		H_SETSTAT(el->statp, FATM_STAT_FREE);
810		H_SYNCSTAT_PREWRITE(sc, el->statp);
811
812		WRITE4(sc, el->card + FATMOC_STATP, card_stat);
813		card_stat += sizeof(uint32_t);
814	}
815	sc->cmdqueue.tail = sc->cmdqueue.head = 0;
816
817	/*
818	 * Now the other queues. These are in memory
819	 */
820	init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
821	    sizeof(struct txqueue), TPD_SIZE,
822	    READ4(sc, FATMO_TRANSMIT_QUEUE),
823	    &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
824
825	init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
826	    sizeof(struct rxqueue), RPD_SIZE,
827	    READ4(sc, FATMO_RECEIVE_QUEUE),
828	    &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
829
830	init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
831	    sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
832	    READ4(sc, FATMO_SMALL_B1_QUEUE),
833	    &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
834
835	init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
836	    sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
837	    READ4(sc, FATMO_LARGE_B1_QUEUE),
838	    &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
839
840	sc->txcnt = 0;
841
842	return (0);
843}
844
845/*
846 * Read PROM. Called only from attach code. Here we spin because the interrupt
847 * handler is not yet set up.
848 */
849static int
850fatm_getprom(struct fatm_softc *sc)
851{
852	int i;
853	struct prom *prom;
854	struct cmdqueue *q;
855
856	DBG(sc, INIT, ("reading prom"));
857	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
858	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
859
860	q->error = 0;
861	q->cb = NULL;;
862	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
863	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
864
865	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
866	    BUS_DMASYNC_PREREAD);
867
868	WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
869	BARRIER_W(sc);
870	WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
871	BARRIER_W(sc);
872
873	for (i = 0; i < 1000; i++) {
874		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
875		if (H_GETSTAT(q->q.statp) &
876		    (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
877			break;
878		DELAY(1000);
879	}
880	if (i == 1000) {
881		if_printf(&sc->ifatm.ifnet, "getprom timeout\n");
882		return (EIO);
883	}
884	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
885	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
886		if_printf(&sc->ifatm.ifnet, "getprom error\n");
887		return (EIO);
888	}
889	H_SETSTAT(q->q.statp, FATM_STAT_FREE);
890	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
891	NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
892
893	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
894	    BUS_DMASYNC_POSTREAD);
895
896
897#ifdef notdef
898	{
899		u_int i;
900
901		printf("PROM: ");
902		u_char *ptr = (u_char *)sc->prom_mem.mem;
903		for (i = 0; i < sizeof(struct prom); i++)
904			printf("%02x ", *ptr++);
905		printf("\n");
906	}
907#endif
908
909	prom = (struct prom *)sc->prom_mem.mem;
910
911	bcopy(prom->mac + 2, sc->ifatm.mib.esi, 6);
912	sc->ifatm.mib.serial = le32toh(prom->serial);
913	sc->ifatm.mib.hw_version = le32toh(prom->version);
914	sc->ifatm.mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
915
916	if_printf(&sc->ifatm.ifnet, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
917	    "serial=%u hw=0x%x sw=0x%x\n", sc->ifatm.mib.esi[0],
918	    sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3],
919	    sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->ifatm.mib.serial,
920	    sc->ifatm.mib.hw_version, sc->ifatm.mib.sw_version);
921
922	return (0);
923}
924
925/*
926 * This is the callback function for bus_dmamap_load. We assume, that we
927 * have a 32-bit bus and so have always one segment.
928 */
929static void
930dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
931{
932	bus_addr_t *ptr = (bus_addr_t *)arg;
933
934	if (error != 0) {
935		printf("%s: error=%d\n", __func__, error);
936		return;
937	}
938	KASSERT(nsegs == 1, ("too many DMA segments"));
939	KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
940	    (u_long)segs[0].ds_addr));
941
942	*ptr = segs[0].ds_addr;
943}
944
945/*
946 * Allocate a chunk of DMA-able memory and map it.
947 */
948static int
949alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
950{
951	int error;
952
953	mem->mem = NULL;
954
955	if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
956	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
957	    NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
958	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
959		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
960		    nm);
961		return (ENOMEM);
962	}
963
964	error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
965	if (error) {
966		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA memory: "
967		    "%d\n", nm, error);
968		bus_dma_tag_destroy(mem->dmat);
969		mem->mem = NULL;
970		return (error);
971	}
972
973	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
974	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
975	if (error) {
976		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
977		    "%d\n", nm, error);
978		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
979		bus_dma_tag_destroy(mem->dmat);
980		mem->mem = NULL;
981		return (error);
982	}
983
984	DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
985	    (u_long)mem->paddr, mem->size, mem->align));
986
987	return (0);
988}
989
990#ifdef TEST_DMA_SYNC
991static int
992alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
993{
994	int error;
995
996	mem->mem = NULL;
997
998	if (bus_dma_tag_create(NULL, mem->align, 0,
999	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1000	    NULL, NULL, mem->size, 1, mem->size,
1001	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1002		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
1003		    nm);
1004		return (ENOMEM);
1005	}
1006
1007	mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1008	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1009
1010	error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1011	if (error) {
1012		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA map: "
1013		    "%d\n", nm, error);
1014		contigfree(mem->mem, mem->size, M_DEVBUF);
1015		bus_dma_tag_destroy(mem->dmat);
1016		mem->mem = NULL;
1017		return (error);
1018	}
1019
1020	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1021	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1022	if (error) {
1023		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
1024		    "%d\n", nm, error);
1025		bus_dmamap_destroy(mem->dmat, mem->map);
1026		contigfree(mem->mem, mem->size, M_DEVBUF);
1027		bus_dma_tag_destroy(mem->dmat);
1028		mem->mem = NULL;
1029		return (error);
1030	}
1031
1032	DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1033	    (u_long)mem->paddr, mem->size, mem->align));
1034
1035	printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1036	    (u_long)mem->paddr, mem->size, mem->align);
1037
1038	return (0);
1039}
1040#endif /* TEST_DMA_SYNC */
1041
1042/*
1043 * Destroy all resources of an dma-able memory chunk
1044 */
1045static void
1046destroy_dma_memory(struct fatm_mem *mem)
1047{
1048	if (mem->mem != NULL) {
1049		bus_dmamap_unload(mem->dmat, mem->map);
1050		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1051		bus_dma_tag_destroy(mem->dmat);
1052		mem->mem = NULL;
1053	}
1054}
1055#ifdef TEST_DMA_SYNC
1056static void
1057destroy_dma_memoryX(struct fatm_mem *mem)
1058{
1059	if (mem->mem != NULL) {
1060		bus_dmamap_unload(mem->dmat, mem->map);
1061		bus_dmamap_destroy(mem->dmat, mem->map);
1062		contigfree(mem->mem, mem->size, M_DEVBUF);
1063		bus_dma_tag_destroy(mem->dmat);
1064		mem->mem = NULL;
1065	}
1066}
1067#endif /* TEST_DMA_SYNC */
1068
1069/*
1070 * Try to supply buffers to the card if there are free entries in the queues
1071 */
1072static void
1073fatm_supply_small_buffers(struct fatm_softc *sc)
1074{
1075	int nblocks, nbufs;
1076	struct supqueue *q;
1077	struct rbd *bd;
1078	int i, j, error, cnt;
1079	struct mbuf *m;
1080	struct rbuf *rb;
1081	bus_addr_t phys;
1082
1083	nbufs = max(4 * sc->open_vccs, 32);
1084	nbufs = min(nbufs, SMALL_POOL_SIZE);
1085	nbufs -= sc->small_cnt;
1086
1087	nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1088	for (cnt = 0; cnt < nblocks; cnt++) {
1089		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1090
1091		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1092		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1093			break;
1094
1095		bd = (struct rbd *)q->q.ioblk;
1096
1097		for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1098			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1099				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1100				break;
1101			}
1102			MGETHDR(m, M_DONTWAIT, MT_DATA);
1103			if (m == NULL) {
1104				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1105				break;
1106			}
1107			MH_ALIGN(m, SMALL_BUFFER_LEN);
1108			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1109			    m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1110			    &phys, BUS_DMA_NOWAIT);
1111			if (error) {
1112				if_printf(&sc->ifatm.ifnet,
1113				    "dmamap_load mbuf failed %d", error);
1114				m_freem(m);
1115				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1116				break;
1117			}
1118			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1119			    BUS_DMASYNC_PREREAD);
1120
1121			LIST_REMOVE(rb, link);
1122			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1123
1124			rb->m = m;
1125			bd[i].handle = rb - sc->rbufs;
1126			H_SETDESC(bd[i].buffer, phys);
1127		}
1128
1129		if (i < SMALL_SUPPLY_BLKSIZE) {
1130			for (j = 0; j < i; j++) {
1131				rb = sc->rbufs + bd[j].handle;
1132				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1133				m_free(rb->m);
1134				rb->m = NULL;
1135
1136				LIST_REMOVE(rb, link);
1137				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1138			}
1139			break;
1140		}
1141		H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1142		    sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1143
1144		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1145		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1146
1147		WRITE4(sc, q->q.card, q->q.card_ioblk);
1148		BARRIER_W(sc);
1149
1150		sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1151
1152		NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1153	}
1154}
1155
1156/*
1157 * Try to supply buffers to the card if there are free entries in the queues
1158 * We assume that all buffers are within the address space accessible by the
1159 * card (32-bit), so we don't need bounce buffers.
1160 */
1161static void
1162fatm_supply_large_buffers(struct fatm_softc *sc)
1163{
1164	int nbufs, nblocks, cnt;
1165	struct supqueue *q;
1166	struct rbd *bd;
1167	int i, j, error;
1168	struct mbuf *m;
1169	struct rbuf *rb;
1170	bus_addr_t phys;
1171
1172	nbufs = max(4 * sc->open_vccs, 32);
1173	nbufs = min(nbufs, LARGE_POOL_SIZE);
1174	nbufs -= sc->large_cnt;
1175
1176	nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1177
1178	for (cnt = 0; cnt < nblocks; cnt++) {
1179		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1180
1181		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1182		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1183			break;
1184
1185		bd = (struct rbd *)q->q.ioblk;
1186
1187		for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1188			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1189				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1190				break;
1191			}
1192			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1193			    M_PKTHDR)) == NULL) {
1194				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1195				break;
1196			}
1197			/* No MEXT_ALIGN */
1198			m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1199			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1200			    m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1201			    &phys, BUS_DMA_NOWAIT);
1202			if (error) {
1203				if_printf(&sc->ifatm.ifnet,
1204				    "dmamap_load mbuf failed %d", error);
1205				m_freem(m);
1206				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1207				break;
1208			}
1209
1210			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1211			    BUS_DMASYNC_PREREAD);
1212
1213			LIST_REMOVE(rb, link);
1214			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1215
1216			rb->m = m;
1217			bd[i].handle = rb - sc->rbufs;
1218			H_SETDESC(bd[i].buffer, phys);
1219		}
1220
1221		if (i < LARGE_SUPPLY_BLKSIZE) {
1222			for (j = 0; j < i; j++) {
1223				rb = sc->rbufs + bd[j].handle;
1224				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1225				m_free(rb->m);
1226				rb->m = NULL;
1227
1228				LIST_REMOVE(rb, link);
1229				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1230			}
1231			break;
1232		}
1233		H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1234		    sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1235
1236		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1237		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1238		WRITE4(sc, q->q.card, q->q.card_ioblk);
1239		BARRIER_W(sc);
1240
1241		sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1242
1243		NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1244	}
1245}
1246
1247
1248/*
1249 * Actually start the card. The lock must be held here.
1250 * Reset, load the firmware, start it, initializes queues, read the PROM
1251 * and supply receive buffers to the card.
1252 */
1253static void
1254fatm_init_locked(struct fatm_softc *sc)
1255{
1256	struct rxqueue *q;
1257	int i, c, error;
1258	uint32_t start;
1259
1260	DBG(sc, INIT, ("initialize"));
1261	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1262		fatm_stop(sc);
1263
1264	/*
1265	 * Hard reset the board
1266	 */
1267	if (fatm_reset(sc))
1268		return;
1269
1270	start = firmware_load(sc);
1271	if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1272	    fatm_getprom(sc)) {
1273		fatm_reset(sc);
1274		return;
1275	}
1276
1277	/*
1278	 * Handle media
1279	 */
1280	c = READ4(sc, FATMO_MEDIA_TYPE);
1281	switch (c) {
1282
1283	  case FORE_MT_TAXI_100:
1284		sc->ifatm.mib.media = IFM_ATM_TAXI_100;
1285		sc->ifatm.mib.pcr = 227273;
1286		break;
1287
1288	  case FORE_MT_TAXI_140:
1289		sc->ifatm.mib.media = IFM_ATM_TAXI_140;
1290		sc->ifatm.mib.pcr = 318181;
1291		break;
1292
1293	  case FORE_MT_UTP_SONET:
1294		sc->ifatm.mib.media = IFM_ATM_UTP_155;
1295		sc->ifatm.mib.pcr = 353207;
1296		break;
1297
1298	  case FORE_MT_MM_OC3_ST:
1299	  case FORE_MT_MM_OC3_SC:
1300		sc->ifatm.mib.media = IFM_ATM_MM_155;
1301		sc->ifatm.mib.pcr = 353207;
1302		break;
1303
1304	  case FORE_MT_SM_OC3_ST:
1305	  case FORE_MT_SM_OC3_SC:
1306		sc->ifatm.mib.media = IFM_ATM_SM_155;
1307		sc->ifatm.mib.pcr = 353207;
1308		break;
1309
1310	  default:
1311		log(LOG_ERR, "fatm: unknown media type %d\n", c);
1312		sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1313		sc->ifatm.mib.pcr = 353207;
1314		break;
1315	}
1316	sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
1317	utopia_init_media(&sc->utopia);
1318
1319	/*
1320	 * Initialize the RBDs
1321	 */
1322	for (i = 0; i < FATM_RX_QLEN; i++) {
1323		q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1324		WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1325	}
1326	BARRIER_W(sc);
1327
1328	/*
1329	 * Supply buffers to the card
1330	 */
1331	fatm_supply_small_buffers(sc);
1332	fatm_supply_large_buffers(sc);
1333
1334	/*
1335	 * Now set flags, that we are ready
1336	 */
1337	sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
1338
1339	/*
1340	 * Start the watchdog timer
1341	 */
1342	sc->ifatm.ifnet.if_timer = 5;
1343
1344	/* start SUNI */
1345	utopia_start(&sc->utopia);
1346
1347	ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
1348	    sc->utopia.carrier == UTP_CARR_OK);
1349
1350	/* start all channels */
1351	for (i = 0; i < FORE_MAX_VCC + 1; i++)
1352		if (sc->vccs[i] != NULL) {
1353			sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1354			error = fatm_load_vc(sc, sc->vccs[i]);
1355			if (error != 0) {
1356				if_printf(&sc->ifatm.ifnet, "reopening %u "
1357				    "failed: %d\n", i, error);
1358				sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1359			}
1360		}
1361
1362	DBG(sc, INIT, ("done"));
1363}
1364
1365/*
1366 * This is the exported as initialisation function.
1367 */
1368static void
1369fatm_init(void *p)
1370{
1371	struct fatm_softc *sc = p;
1372
1373	FATM_LOCK(sc);
1374	fatm_init_locked(sc);
1375	FATM_UNLOCK(sc);
1376}
1377
1378/************************************************************/
1379/*
1380 * The INTERRUPT handling
1381 */
1382/*
1383 * Check the command queue. If a command was completed, call the completion
1384 * function for that command.
1385 */
1386static void
1387fatm_intr_drain_cmd(struct fatm_softc *sc)
1388{
1389	struct cmdqueue *q;
1390	int stat;
1391
1392	/*
1393	 * Drain command queue
1394	 */
1395	for (;;) {
1396		q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1397
1398		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1399		stat = H_GETSTAT(q->q.statp);
1400
1401		if (stat != FATM_STAT_COMPLETE &&
1402		   stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1403		   stat != FATM_STAT_ERROR)
1404			break;
1405
1406		(*q->cb)(sc, q);
1407
1408		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1409		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1410
1411		NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1412	}
1413}
1414
1415/*
1416 * Drain the small buffer supply queue.
1417 */
1418static void
1419fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1420{
1421	struct supqueue *q;
1422	int stat;
1423
1424	for (;;) {
1425		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1426
1427		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1428		stat = H_GETSTAT(q->q.statp);
1429
1430		if ((stat & FATM_STAT_COMPLETE) == 0)
1431			break;
1432		if (stat & FATM_STAT_ERROR)
1433			log(LOG_ERR, "%s: status %x\n", __func__, stat);
1434
1435		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1436		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1437
1438		NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1439	}
1440}
1441
1442/*
1443 * Drain the large buffer supply queue.
1444 */
1445static void
1446fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1447{
1448	struct supqueue *q;
1449	int stat;
1450
1451	for (;;) {
1452		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1453
1454		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1455		stat = H_GETSTAT(q->q.statp);
1456
1457		if ((stat & FATM_STAT_COMPLETE) == 0)
1458			break;
1459		if (stat & FATM_STAT_ERROR)
1460			log(LOG_ERR, "%s status %x\n", __func__, stat);
1461
1462		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1463		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1464
1465		NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1466	}
1467}
1468
1469/*
1470 * Check the receive queue. Send any received PDU up the protocol stack
1471 * (except when there was an error or the VCI appears to be closed. In this
1472 * case discard the PDU).
1473 */
1474static void
1475fatm_intr_drain_rx(struct fatm_softc *sc)
1476{
1477	struct rxqueue *q;
1478	int stat, mlen;
1479	u_int i;
1480	uint32_t h;
1481	struct mbuf *last, *m0;
1482	struct rpd *rpd;
1483	struct rbuf *rb;
1484	u_int vci, vpi, pt;
1485	struct atm_pseudohdr aph;
1486	struct ifnet *ifp;
1487	struct card_vcc *vc;
1488
1489	for (;;) {
1490		q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1491
1492		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1493		stat = H_GETSTAT(q->q.statp);
1494
1495		if ((stat & FATM_STAT_COMPLETE) == 0)
1496			break;
1497
1498		rpd = (struct rpd *)q->q.ioblk;
1499		H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1500
1501		rpd->nseg = le32toh(rpd->nseg);
1502		mlen = 0;
1503		m0 = last = 0;
1504		for (i = 0; i < rpd->nseg; i++) {
1505			rb = sc->rbufs + rpd->segment[i].handle;
1506			if (m0 == NULL) {
1507				m0 = last = rb->m;
1508			} else {
1509				last->m_next = rb->m;
1510				last = rb->m;
1511			}
1512			last->m_next = NULL;
1513			if (last->m_flags & M_EXT)
1514				sc->large_cnt--;
1515			else
1516				sc->small_cnt--;
1517			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1518			    BUS_DMASYNC_POSTREAD);
1519			bus_dmamap_unload(sc->rbuf_tag, rb->map);
1520			rb->m = NULL;
1521
1522			LIST_REMOVE(rb, link);
1523			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1524
1525			last->m_len = le32toh(rpd->segment[i].length);
1526			mlen += last->m_len;
1527		}
1528
1529		m0->m_pkthdr.len = mlen;
1530		m0->m_pkthdr.rcvif = &sc->ifatm.ifnet;
1531
1532		h = le32toh(rpd->atm_header);
1533		vpi = (h >> 20) & 0xff;
1534		vci = (h >> 4 ) & 0xffff;
1535		pt  = (h >> 1 ) & 0x7;
1536
1537		/*
1538		 * Locate the VCC this packet belongs to
1539		 */
1540		if (!VC_OK(sc, vpi, vci))
1541			vc = NULL;
1542		else if ((vc = sc->vccs[vci]) == NULL ||
1543		    !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1544			sc->istats.rx_closed++;
1545			vc = NULL;
1546		}
1547
1548		DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1549		    pt, mlen, vc == NULL ? "dropped" : ""));
1550
1551		if (vc == NULL) {
1552			m_freem(m0);
1553		} else {
1554			ATM_PH_FLAGS(&aph) = vc->param.flags;
1555			ATM_PH_VPI(&aph) = vpi;
1556			ATM_PH_SETVCI(&aph, vci);
1557
1558			ifp = &sc->ifatm.ifnet;
1559			ifp->if_ipackets++;
1560
1561			vc->ipackets++;
1562			vc->ibytes += m0->m_pkthdr.len;
1563
1564			atm_input(ifp, &aph, m0, vc->rxhand);
1565		}
1566
1567		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1568		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1569
1570		WRITE4(sc, q->q.card, q->q.card_ioblk);
1571		BARRIER_W(sc);
1572
1573		NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1574	}
1575}
1576
1577/*
1578 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1579 */
1580static void
1581fatm_intr_drain_tx(struct fatm_softc *sc)
1582{
1583	struct txqueue *q;
1584	int stat;
1585
1586	/*
1587	 * Drain tx queue
1588	 */
1589	for (;;) {
1590		q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1591
1592		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1593		stat = H_GETSTAT(q->q.statp);
1594
1595		if (stat != FATM_STAT_COMPLETE &&
1596		    stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1597		    stat != FATM_STAT_ERROR)
1598			break;
1599
1600		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1601		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1602
1603		bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1604		bus_dmamap_unload(sc->tx_tag, q->map);
1605
1606		m_freem(q->m);
1607		q->m = NULL;
1608		sc->txcnt--;
1609
1610		NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1611	}
1612}
1613
1614/*
1615 * Interrupt handler
1616 */
1617static void
1618fatm_intr(void *p)
1619{
1620	struct fatm_softc *sc = (struct fatm_softc *)p;
1621
1622	FATM_LOCK(sc);
1623	if (!READ4(sc, FATMO_PSR)) {
1624		FATM_UNLOCK(sc);
1625		return;
1626	}
1627	WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1628
1629	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
1630		FATM_UNLOCK(sc);
1631		return;
1632	}
1633	fatm_intr_drain_cmd(sc);
1634	fatm_intr_drain_rx(sc);
1635	fatm_intr_drain_tx(sc);
1636	fatm_intr_drain_small_buffers(sc);
1637	fatm_intr_drain_large_buffers(sc);
1638	fatm_supply_small_buffers(sc);
1639	fatm_supply_large_buffers(sc);
1640
1641	FATM_UNLOCK(sc);
1642
1643	if (sc->retry_tx && _IF_QLEN(&sc->ifatm.ifnet.if_snd))
1644		(*sc->ifatm.ifnet.if_start)(&sc->ifatm.ifnet);
1645}
1646
1647/*
1648 * Get device statistics. This must be called with the softc locked.
1649 * We use a preallocated buffer, so we need to protect this buffer.
1650 * We do this by using a condition variable and a flag. If the flag is set
1651 * the buffer is in use by one thread (one thread is executing a GETSTAT
1652 * card command). In this case all other threads that are trying to get
1653 * statistics block on that condition variable. When the thread finishes
1654 * using the buffer it resets the flag and signals the condition variable. This
1655 * will wakeup the next thread that is waiting for the buffer. If the interface
1656 * is stopped the stopping function will broadcast the cv. All threads will
1657 * find that the interface has been stopped and return.
1658 *
1659 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1660 * must be done by the caller when he has finished using the buffer.
1661 */
1662static void
1663fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1664{
1665
1666	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1667	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1668		sc->istats.get_stat_errors++;
1669		q->error = EIO;
1670	}
1671	wakeup(&sc->sadi_mem);
1672}
1673static int
1674fatm_getstat(struct fatm_softc *sc)
1675{
1676	int error;
1677	struct cmdqueue *q;
1678
1679	/*
1680	 * Wait until either the interface is stopped or we can get the
1681	 * statistics buffer
1682	 */
1683	for (;;) {
1684		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
1685			return (EIO);
1686		if (!(sc->flags & FATM_STAT_INUSE))
1687			break;
1688		cv_wait(&sc->cv_stat, &sc->mtx);
1689	}
1690	sc->flags |= FATM_STAT_INUSE;
1691
1692	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1693
1694	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1695	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1696		sc->istats.cmd_queue_full++;
1697		return (EIO);
1698	}
1699	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1700
1701	q->error = 0;
1702	q->cb = fatm_getstat_complete;
1703	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1704	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1705
1706	bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1707	    BUS_DMASYNC_PREREAD);
1708
1709	WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1710	    sc->sadi_mem.paddr);
1711	BARRIER_W(sc);
1712	WRITE4(sc, q->q.card + FATMOC_OP,
1713	    FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1714	BARRIER_W(sc);
1715
1716	/*
1717	 * Wait for the command to complete
1718	 */
1719	error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1720	    "fatm_stat", hz);
1721
1722	switch (error) {
1723
1724	  case EWOULDBLOCK:
1725		error = EIO;
1726		break;
1727
1728	  case ERESTART:
1729		error = EINTR;
1730		break;
1731
1732	  case 0:
1733		bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1734		    BUS_DMASYNC_POSTREAD);
1735		error = q->error;
1736		break;
1737	}
1738
1739	/*
1740	 * Swap statistics
1741	 */
1742	if (q->error == 0) {
1743		u_int i;
1744		uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1745
1746		for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1747		    i++, p++)
1748			*p = be32toh(*p);
1749	}
1750
1751	return (error);
1752}
1753
1754/*
1755 * Create a copy of a single mbuf. It can have either internal or
1756 * external data, it may have a packet header. External data is really
1757 * copied, so the new buffer is writeable.
1758 */
1759static struct mbuf *
1760copy_mbuf(struct mbuf *m)
1761{
1762	struct mbuf *new;
1763
1764	MGET(new, M_DONTWAIT, MT_DATA);
1765	if (new == NULL)
1766		return (NULL);
1767
1768	if (m->m_flags & M_PKTHDR) {
1769		M_MOVE_PKTHDR(new, m);
1770		if (m->m_len > MHLEN) {
1771			MCLGET(new, M_TRYWAIT);
1772			if ((m->m_flags & M_EXT) == 0) {
1773				m_free(new);
1774				return (NULL);
1775			}
1776		}
1777	} else {
1778		if (m->m_len > MLEN) {
1779			MCLGET(new, M_TRYWAIT);
1780			if ((m->m_flags & M_EXT) == 0) {
1781				m_free(new);
1782				return (NULL);
1783			}
1784		}
1785	}
1786
1787	bcopy(m->m_data, new->m_data, m->m_len);
1788	new->m_len = m->m_len;
1789	new->m_flags &= ~M_RDONLY;
1790
1791	return (new);
1792}
1793
1794/*
1795 * All segments must have a four byte aligned buffer address and a four
1796 * byte aligned length. Step through an mbuf chain and check these conditions.
1797 * If the buffer address is not aligned and this is a normal mbuf, move
1798 * the data down. Else make a copy of the mbuf with aligned data.
1799 * If the buffer length is not aligned steel data from the next mbuf.
1800 * We don't need to check whether this has more than one external reference,
1801 * because steeling data doesn't change the external cluster.
1802 * If the last mbuf is not aligned, fill with zeroes.
1803 *
1804 * Return packet length (well we should have this in the packet header),
1805 * but be careful not to count the zero fill at the end.
1806 *
1807 * If fixing fails free the chain and zero the pointer.
1808 *
1809 * We assume, that aligning the virtual address also aligns the mapped bus
1810 * address.
1811 */
1812static u_int
1813fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1814{
1815	struct mbuf *m = *mp, *prev = NULL, *next, *new;
1816	u_int mlen = 0, fill = 0;
1817	int first, off;
1818	u_char *d, *cp;
1819
1820	do {
1821		next = m->m_next;
1822
1823		if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1824		   (m->m_len % 4 != 0 && next)) {
1825			/*
1826			 * Needs fixing
1827			 */
1828			first = (m == *mp);
1829
1830			d = mtod(m, u_char *);
1831			if ((off = (uintptr_t)(void *)d % 4) != 0) {
1832				if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1833					sc->istats.fix_addr_copy++;
1834					bcopy(d, d - off, m->m_len);
1835					m->m_data = (caddr_t)(d - off);
1836				} else {
1837					if ((new = copy_mbuf(m)) == NULL) {
1838						sc->istats.fix_addr_noext++;
1839						goto fail;
1840					}
1841					sc->istats.fix_addr_ext++;
1842					if (prev)
1843						prev->m_next = new;
1844					new->m_next = next;
1845					m_free(m);
1846					m = new;
1847				}
1848			}
1849
1850			if ((off = m->m_len % 4) != 0) {
1851				if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1852					if ((new = copy_mbuf(m)) == NULL) {
1853						sc->istats.fix_len_noext++;
1854						goto fail;
1855					}
1856					sc->istats.fix_len_copy++;
1857					if (prev)
1858						prev->m_next = new;
1859					new->m_next = next;
1860					m_free(m);
1861					m = new;
1862				} else
1863					sc->istats.fix_len++;
1864				d = mtod(m, u_char *) + m->m_len;
1865				off = 4 - off;
1866				while (off) {
1867					if (next == NULL) {
1868						*d++ = 0;
1869						fill++;
1870					} else if (next->m_len == 0) {
1871						sc->istats.fix_empty++;
1872						next = m_free(next);
1873						continue;
1874					} else {
1875						cp = mtod(next, u_char *);
1876						*d++ = *cp++;
1877						next->m_len--;
1878						next->m_data = (caddr_t)cp;
1879					}
1880					off--;
1881					m->m_len++;
1882				}
1883			}
1884
1885			if (first)
1886				*mp = m;
1887		}
1888
1889		mlen += m->m_len;
1890		prev = m;
1891	} while ((m = next) != NULL);
1892
1893	return (mlen - fill);
1894
1895  fail:
1896	m_freem(*mp);
1897	*mp = NULL;
1898	return (0);
1899}
1900
1901/*
1902 * The helper function is used to load the computed physical addresses
1903 * into the transmit descriptor.
1904 */
1905static void
1906fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1907    bus_size_t mapsize, int error)
1908{
1909	struct tpd *tpd = varg;
1910
1911	if (error)
1912		return;
1913
1914	KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1915
1916	tpd->spec = 0;
1917	while (nsegs--) {
1918		H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1919		H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1920		tpd->spec++;
1921		segs++;
1922	}
1923}
1924
1925/*
1926 * Start output.
1927 *
1928 * Note, that we update the internal statistics without the lock here.
1929 */
1930static int
1931fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1932{
1933	struct txqueue *q;
1934	u_int nblks;
1935	int error, aal, nsegs;
1936	struct tpd *tpd;
1937
1938	/*
1939	 * Get a queue element.
1940	 * If there isn't one - try to drain the transmit queue
1941	 * We used to sleep here if that doesn't help, but we
1942	 * should not sleep here, because we are called with locks.
1943	 */
1944	q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1945
1946	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1947	if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1948		fatm_intr_drain_tx(sc);
1949		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1950		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1951			if (sc->retry_tx) {
1952				sc->istats.tx_retry++;
1953				IF_PREPEND(&sc->ifatm.ifnet.if_snd, m);
1954				return (1);
1955			}
1956			sc->istats.tx_queue_full++;
1957			m_freem(m);
1958			return (0);
1959		}
1960		sc->istats.tx_queue_almost_full++;
1961	}
1962
1963	tpd = q->q.ioblk;
1964
1965	m->m_data += sizeof(struct atm_pseudohdr);
1966	m->m_len -= sizeof(struct atm_pseudohdr);
1967
1968	/* map the mbuf */
1969	error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1970	    fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1971	if(error) {
1972		sc->ifatm.ifnet.if_oerrors++;
1973		if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", error);
1974		m_freem(m);
1975		return (0);
1976	}
1977	nsegs = tpd->spec;
1978
1979	bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1980
1981	/*
1982	 * OK. Now go and do it.
1983	 */
1984	aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1985
1986	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1987	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1988	q->m = m;
1989
1990	/*
1991	 * If the transmit queue is almost full, schedule a
1992	 * transmit interrupt so that transmit descriptors can
1993	 * be recycled.
1994	 */
1995	H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1996	    (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1997	H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
1998	    vc->param.vci, 0, 0));
1999
2000	if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2001		H_SETDESC(tpd->stream, 0);
2002	else {
2003		u_int i;
2004
2005		for (i = 0; i < RATE_TABLE_SIZE; i++)
2006			if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2007				break;
2008		if (i > 0)
2009			i--;
2010		H_SETDESC(tpd->stream, rate_table[i].ratio);
2011	}
2012	H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2013
2014	nblks = TDX_SEGS2BLKS(nsegs);
2015
2016	DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2017	    mlen, le32toh(tpd->spec), nsegs, nblks));
2018
2019	WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2020	BARRIER_W(sc);
2021
2022	sc->txcnt++;
2023	sc->ifatm.ifnet.if_opackets++;
2024	vc->obytes += m->m_pkthdr.len;
2025	vc->opackets++;
2026
2027	NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2028
2029	return (0);
2030}
2031
2032static void
2033fatm_start(struct ifnet *ifp)
2034{
2035	struct atm_pseudohdr aph;
2036	struct fatm_softc *sc;
2037	struct mbuf *m;
2038	u_int mlen, vpi, vci;
2039	struct card_vcc *vc;
2040
2041	sc = (struct fatm_softc *)ifp->if_softc;
2042
2043	while (1) {
2044		IF_DEQUEUE(&ifp->if_snd, m);
2045		if (m == NULL)
2046			break;
2047
2048		/*
2049		 * Loop through the mbuf chain and compute the total length
2050		 * of the packet. Check that all data pointer are
2051		 * 4 byte aligned. If they are not, call fatm_mfix to
2052		 * fix that problem. This comes more or less from the
2053		 * en driver.
2054		 */
2055		mlen = fatm_fix_chain(sc, &m);
2056		if (m == NULL)
2057			continue;
2058
2059		if (m->m_len < sizeof(struct atm_pseudohdr) &&
2060		    (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2061			continue;
2062
2063		aph = *mtod(m, struct atm_pseudohdr *);
2064		mlen -= sizeof(struct atm_pseudohdr);
2065
2066		if (mlen == 0) {
2067			m_freem(m);
2068			continue;
2069		}
2070		if (mlen > FATM_MAXPDU) {
2071			sc->istats.tx_pdu2big++;
2072			m_freem(m);
2073			continue;
2074		}
2075
2076		vci = ATM_PH_VCI(&aph);
2077		vpi = ATM_PH_VPI(&aph);
2078
2079		/*
2080		 * From here on we need the softc
2081		 */
2082		FATM_LOCK(sc);
2083		if (!(ifp->if_flags & IFF_RUNNING)) {
2084			FATM_UNLOCK(sc);
2085			m_freem(m);
2086			break;
2087		}
2088		if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2089		    !(vc->vflags & FATM_VCC_OPEN)) {
2090			FATM_UNLOCK(sc);
2091			m_freem(m);
2092			continue;
2093		}
2094		if (fatm_tx(sc, m, vc, mlen)) {
2095			FATM_UNLOCK(sc);
2096			break;
2097		}
2098		FATM_UNLOCK(sc);
2099	}
2100}
2101
2102/*
2103 * VCC managment
2104 *
2105 * This may seem complicated. The reason for this is, that we need an
2106 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2107 * is called with the radix node head of the routing table locked. Therefor
2108 * we cannot sleep there and wait for the open/close to succeed. For this
2109 * reason we just initiate the operation from the ioctl.
2110 */
2111
2112/*
2113 * Command the card to open/close a VC.
2114 * Return the queue entry for waiting if we are succesful.
2115 */
2116static struct cmdqueue *
2117fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2118    u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2119{
2120	struct cmdqueue *q;
2121
2122	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2123
2124	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2125	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2126		sc->istats.cmd_queue_full++;
2127		return (NULL);
2128	}
2129	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2130
2131	q->error = 0;
2132	q->cb = func;
2133	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2134	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2135
2136	WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2137	BARRIER_W(sc);
2138	WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2139	BARRIER_W(sc);
2140	WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2141	BARRIER_W(sc);
2142
2143	return (q);
2144}
2145
2146/*
2147 * The VC has been opened/closed and somebody has been waiting for this.
2148 * Wake him up.
2149 */
2150static void
2151fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2152{
2153
2154	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2155	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2156		sc->istats.get_stat_errors++;
2157		q->error = EIO;
2158	}
2159	wakeup(q);
2160}
2161
2162/*
2163 * Open complete
2164 */
2165static void
2166fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2167{
2168	vc->vflags &= ~FATM_VCC_TRY_OPEN;
2169	vc->vflags |= FATM_VCC_OPEN;
2170
2171	if (vc->vflags & FATM_VCC_REOPEN) {
2172		vc->vflags &= ~FATM_VCC_REOPEN;
2173		return;
2174	}
2175
2176	/* inform management if this is not an NG
2177	 * VCC or it's an NG PVC. */
2178	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2179	    (vc->param.flags & ATMIO_FLAG_PVC))
2180		ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 1);
2181}
2182
2183/*
2184 * The VC that we have tried to open asynchronuosly has been opened.
2185 */
2186static void
2187fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2188{
2189	u_int vci;
2190	struct card_vcc *vc;
2191
2192	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2193	vc = sc->vccs[vci];
2194	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2195	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2196		sc->istats.get_stat_errors++;
2197		sc->vccs[vci] = NULL;
2198		uma_zfree(sc->vcc_zone, vc);
2199		if_printf(&sc->ifatm.ifnet, "opening VCI %u failed\n", vci);
2200		return;
2201	}
2202	fatm_open_finish(sc, vc);
2203}
2204
2205/*
2206 * Wait on the queue entry until the VCC is opened/closed.
2207 */
2208static int
2209fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2210{
2211	int error;
2212
2213	/*
2214	 * Wait for the command to complete
2215	 */
2216	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2217
2218	if (error != 0)
2219		return (error);
2220	return (q->error);
2221}
2222
2223/*
2224 * Start to open a VCC. This just initiates the operation.
2225 */
2226static int
2227fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2228{
2229	int error;
2230	struct card_vcc *vc;
2231
2232	/*
2233	 * Check parameters
2234	 */
2235	if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2236	    (op->param.flags & ATMIO_FLAG_NORX))
2237		return (EINVAL);
2238
2239	if (!VC_OK(sc, op->param.vpi, op->param.vci))
2240		return (EINVAL);
2241	if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2242		return (EINVAL);
2243
2244	vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2245	if (vc == NULL)
2246		return (ENOMEM);
2247
2248	error = 0;
2249
2250	FATM_LOCK(sc);
2251	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2252		error = EIO;
2253		goto done;
2254	}
2255	if (sc->vccs[op->param.vci] != NULL) {
2256		error = EBUSY;
2257		goto done;
2258	}
2259	vc->param = op->param;
2260	vc->rxhand = op->rxhand;
2261
2262	switch (op->param.traffic) {
2263
2264	  case ATMIO_TRAFFIC_UBR:
2265		break;
2266
2267	  case ATMIO_TRAFFIC_CBR:
2268		if (op->param.tparam.pcr == 0 ||
2269		    op->param.tparam.pcr > sc->ifatm.mib.pcr) {
2270			error = EINVAL;
2271			goto done;
2272		}
2273		break;
2274
2275	  default:
2276		error = EINVAL;
2277		goto done;
2278	}
2279	vc->ibytes = vc->obytes = 0;
2280	vc->ipackets = vc->opackets = 0;
2281
2282	vc->vflags = FATM_VCC_TRY_OPEN;
2283	sc->vccs[op->param.vci] = vc;
2284	sc->open_vccs++;
2285
2286	error = fatm_load_vc(sc, vc);
2287	if (error != NULL) {
2288		sc->vccs[op->param.vci] = NULL;
2289		sc->open_vccs--;
2290		goto done;
2291	}
2292
2293	/* don't free below */
2294	vc = NULL;
2295
2296  done:
2297	FATM_UNLOCK(sc);
2298	if (vc != NULL)
2299		uma_zfree(sc->vcc_zone, vc);
2300	return (error);
2301}
2302
2303/*
2304 * Try to initialize the given VC
2305 */
2306static int
2307fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2308{
2309	uint32_t cmd;
2310	struct cmdqueue *q;
2311	int error;
2312
2313	/* Command and buffer strategy */
2314	cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2315	if (vc->param.aal == ATMIO_AAL_0)
2316		cmd |= (0 << 8);
2317	else
2318		cmd |= (5 << 8);
2319
2320	q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2321	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2322	    fatm_open_complete : fatm_cmd_complete);
2323	if (q == NULL)
2324		return (EIO);
2325
2326	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2327		error = fatm_waitvcc(sc, q);
2328		if (error != 0)
2329			return (error);
2330		fatm_open_finish(sc, vc);
2331	}
2332	return (0);
2333}
2334
2335/*
2336 * Finish close
2337 */
2338static void
2339fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2340{
2341	/* inform management of this is not an NG
2342	 * VCC or it's an NG PVC. */
2343	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2344	    (vc->param.flags & ATMIO_FLAG_PVC))
2345		ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 0);
2346
2347	sc->vccs[vc->param.vci] = NULL;
2348	sc->open_vccs--;
2349
2350	uma_zfree(sc->vcc_zone, vc);
2351}
2352
2353/*
2354 * The VC has been closed.
2355 */
2356static void
2357fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2358{
2359	u_int vci;
2360	struct card_vcc *vc;
2361
2362	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2363	vc = sc->vccs[vci];
2364	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2365	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2366		sc->istats.get_stat_errors++;
2367		/* keep the VCC in that state */
2368		if_printf(&sc->ifatm.ifnet, "closing VCI %u failed\n", vci);
2369		return;
2370	}
2371
2372	fatm_close_finish(sc, vc);
2373}
2374
2375/*
2376 * Initiate closing a VCC
2377 */
2378static int
2379fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2380{
2381	int error;
2382	struct cmdqueue *q;
2383	struct card_vcc *vc;
2384
2385	if (!VC_OK(sc, cl->vpi, cl->vci))
2386		return (EINVAL);
2387
2388	error = 0;
2389
2390	FATM_LOCK(sc);
2391	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2392		error = EIO;
2393		goto done;
2394	}
2395	vc = sc->vccs[cl->vci];
2396	if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2397		error = ENOENT;
2398		goto done;
2399	}
2400
2401	q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2402	    FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2403	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2404	    fatm_close_complete : fatm_cmd_complete);
2405	if (q == NULL) {
2406		error = EIO;
2407		goto done;
2408	}
2409
2410	vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2411	vc->vflags |= FATM_VCC_TRY_CLOSE;
2412
2413	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2414		error = fatm_waitvcc(sc, q);
2415		if (error != 0)
2416			goto done;
2417
2418		fatm_close_finish(sc, vc);
2419	}
2420
2421  done:
2422	FATM_UNLOCK(sc);
2423	return (error);
2424}
2425
2426/*
2427 * IOCTL handler
2428 */
2429static int
2430fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2431{
2432	int error;
2433	struct fatm_softc *sc = ifp->if_softc;
2434	struct ifaddr *ifa = (struct ifaddr *)arg;
2435	struct ifreq *ifr = (struct ifreq *)arg;
2436	struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2437	struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2438	struct atmio_vcctable *vtab;
2439
2440	error = 0;
2441	switch (cmd) {
2442
2443	  case SIOCATMOPENVCC:		/* kernel internal use */
2444		error = fatm_open_vcc(sc, op);
2445		break;
2446
2447	  case SIOCATMCLOSEVCC:		/* kernel internal use */
2448		error = fatm_close_vcc(sc, cl);
2449		break;
2450
2451	  case SIOCSIFADDR:
2452		FATM_LOCK(sc);
2453		ifp->if_flags |= IFF_UP;
2454		if (!(ifp->if_flags & IFF_RUNNING))
2455			fatm_init_locked(sc);
2456		switch (ifa->ifa_addr->sa_family) {
2457#ifdef INET
2458		  case AF_INET:
2459		  case AF_INET6:
2460			ifa->ifa_rtrequest = atm_rtrequest;
2461			break;
2462#endif
2463		  default:
2464			break;
2465		}
2466		FATM_UNLOCK(sc);
2467		break;
2468
2469	  case SIOCSIFFLAGS:
2470		FATM_LOCK(sc);
2471		if (ifp->if_flags & IFF_UP) {
2472			if (!(ifp->if_flags & IFF_RUNNING)) {
2473				fatm_init_locked(sc);
2474			}
2475		} else {
2476			if (ifp->if_flags & IFF_RUNNING) {
2477				fatm_stop(sc);
2478			}
2479		}
2480		FATM_UNLOCK(sc);
2481		break;
2482
2483	  case SIOCGIFMEDIA:
2484	  case SIOCSIFMEDIA:
2485		if (ifp->if_flags & IFF_RUNNING)
2486			error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2487		else
2488			error = EINVAL;
2489		break;
2490
2491	  case SIOCATMGVCCS:
2492		/* return vcc table */
2493		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2494		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2495		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2496		    vtab->count * sizeof(vtab->vccs[0]));
2497		free(vtab, M_DEVBUF);
2498		break;
2499
2500	  case SIOCATMGETVCCS:	/* internal netgraph use */
2501		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2502		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2503		if (vtab == NULL) {
2504			error = ENOMEM;
2505			break;
2506		}
2507		*(void **)arg = vtab;
2508		break;
2509
2510	  default:
2511		DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2512		error = EINVAL;
2513		break;
2514	}
2515
2516	return (error);
2517}
2518
2519/*
2520 * Detach from the interface and free all resources allocated during
2521 * initialisation and later.
2522 */
2523static int
2524fatm_detach(device_t dev)
2525{
2526	u_int i;
2527	struct rbuf *rb;
2528	struct fatm_softc *sc;
2529	struct txqueue *tx;
2530
2531	sc = (struct fatm_softc *)device_get_softc(dev);
2532
2533	if (device_is_alive(dev)) {
2534		FATM_LOCK(sc);
2535		fatm_stop(sc);
2536		utopia_detach(&sc->utopia);
2537		FATM_UNLOCK(sc);
2538		atm_ifdetach(&sc->ifatm.ifnet);		/* XXX race */
2539	}
2540
2541	if (sc->ih != NULL)
2542		bus_teardown_intr(dev, sc->irqres, sc->ih);
2543
2544	while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2545		if_printf(&sc->ifatm.ifnet, "rbuf %p still in use!\n", rb);
2546		bus_dmamap_unload(sc->rbuf_tag, rb->map);
2547		m_freem(rb->m);
2548		LIST_REMOVE(rb, link);
2549		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2550	}
2551
2552	if (sc->txqueue.chunk != NULL) {
2553		for (i = 0; i < FATM_TX_QLEN; i++) {
2554			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2555			bus_dmamap_destroy(sc->tx_tag, tx->map);
2556		}
2557	}
2558
2559	while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2560		bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2561		LIST_REMOVE(rb, link);
2562	}
2563
2564	if (sc->rbufs != NULL)
2565		free(sc->rbufs, M_DEVBUF);
2566	if (sc->vccs != NULL) {
2567		for (i = 0; i < FORE_MAX_VCC + 1; i++)
2568			if (sc->vccs[i] != NULL) {
2569				uma_zfree(sc->vcc_zone, sc->vccs[i]);
2570				sc->vccs[i] = NULL;
2571			}
2572		free(sc->vccs, M_DEVBUF);
2573	}
2574	if (sc->vcc_zone != NULL)
2575		uma_zdestroy(sc->vcc_zone);
2576
2577	if (sc->l1queue.chunk != NULL)
2578		free(sc->l1queue.chunk, M_DEVBUF);
2579	if (sc->s1queue.chunk != NULL)
2580		free(sc->s1queue.chunk, M_DEVBUF);
2581	if (sc->rxqueue.chunk != NULL)
2582		free(sc->rxqueue.chunk, M_DEVBUF);
2583	if (sc->txqueue.chunk != NULL)
2584		free(sc->txqueue.chunk, M_DEVBUF);
2585	if (sc->cmdqueue.chunk != NULL)
2586		free(sc->cmdqueue.chunk, M_DEVBUF);
2587
2588	destroy_dma_memory(&sc->reg_mem);
2589	destroy_dma_memory(&sc->sadi_mem);
2590	destroy_dma_memory(&sc->prom_mem);
2591#ifdef TEST_DMA_SYNC
2592	destroy_dma_memoryX(&sc->s1q_mem);
2593	destroy_dma_memoryX(&sc->l1q_mem);
2594	destroy_dma_memoryX(&sc->rxq_mem);
2595	destroy_dma_memoryX(&sc->txq_mem);
2596	destroy_dma_memoryX(&sc->stat_mem);
2597#endif
2598
2599	if (sc->tx_tag != NULL)
2600		if (bus_dma_tag_destroy(sc->tx_tag))
2601			printf("tx DMA tag busy!\n");
2602
2603	if (sc->rbuf_tag != NULL)
2604		if (bus_dma_tag_destroy(sc->rbuf_tag))
2605			printf("rbuf DMA tag busy!\n");
2606
2607	if (sc->parent_dmat != NULL)
2608		if (bus_dma_tag_destroy(sc->parent_dmat))
2609			printf("parent DMA tag busy!\n");
2610
2611	if (sc->irqres != NULL)
2612		bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2613
2614	if (sc->memres != NULL)
2615		bus_release_resource(dev, SYS_RES_MEMORY,
2616		    sc->memid, sc->memres);
2617
2618	(void)sysctl_ctx_free(&sc->sysctl_ctx);
2619
2620	cv_destroy(&sc->cv_stat);
2621	cv_destroy(&sc->cv_regs);
2622
2623	mtx_destroy(&sc->mtx);
2624
2625	return (0);
2626}
2627
2628/*
2629 * Sysctl handler
2630 */
2631static int
2632fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2633{
2634	struct fatm_softc *sc = arg1;
2635	u_long *ret;
2636	int error;
2637
2638	ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2639
2640	FATM_LOCK(sc);
2641	bcopy(&sc->istats, ret, sizeof(sc->istats));
2642	FATM_UNLOCK(sc);
2643
2644	error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2645	free(ret, M_TEMP);
2646
2647	return (error);
2648}
2649
2650/*
2651 * Sysctl handler for card statistics
2652 * This is disable because it destroys the PHY statistics.
2653 */
2654static int
2655fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2656{
2657	struct fatm_softc *sc = arg1;
2658	int error;
2659	const struct fatm_stats *s;
2660	u_long *ret;
2661	u_int i;
2662
2663	ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2664
2665	FATM_LOCK(sc);
2666
2667	if ((error = fatm_getstat(sc)) == 0) {
2668		s = sc->sadi_mem.mem;
2669		i = 0;
2670		ret[i++] = s->phy_4b5b.crc_header_errors;
2671		ret[i++] = s->phy_4b5b.framing_errors;
2672		ret[i++] = s->phy_oc3.section_bip8_errors;
2673		ret[i++] = s->phy_oc3.path_bip8_errors;
2674		ret[i++] = s->phy_oc3.line_bip24_errors;
2675		ret[i++] = s->phy_oc3.line_febe_errors;
2676		ret[i++] = s->phy_oc3.path_febe_errors;
2677		ret[i++] = s->phy_oc3.corr_hcs_errors;
2678		ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2679		ret[i++] = s->atm.cells_transmitted;
2680		ret[i++] = s->atm.cells_received;
2681		ret[i++] = s->atm.vpi_bad_range;
2682		ret[i++] = s->atm.vpi_no_conn;
2683		ret[i++] = s->atm.vci_bad_range;
2684		ret[i++] = s->atm.vci_no_conn;
2685		ret[i++] = s->aal0.cells_transmitted;
2686		ret[i++] = s->aal0.cells_received;
2687		ret[i++] = s->aal0.cells_dropped;
2688		ret[i++] = s->aal4.cells_transmitted;
2689		ret[i++] = s->aal4.cells_received;
2690		ret[i++] = s->aal4.cells_crc_errors;
2691		ret[i++] = s->aal4.cels_protocol_errors;
2692		ret[i++] = s->aal4.cells_dropped;
2693		ret[i++] = s->aal4.cspdus_transmitted;
2694		ret[i++] = s->aal4.cspdus_received;
2695		ret[i++] = s->aal4.cspdus_protocol_errors;
2696		ret[i++] = s->aal4.cspdus_dropped;
2697		ret[i++] = s->aal5.cells_transmitted;
2698		ret[i++] = s->aal5.cells_received;
2699		ret[i++] = s->aal5.congestion_experienced;
2700		ret[i++] = s->aal5.cells_dropped;
2701		ret[i++] = s->aal5.cspdus_transmitted;
2702		ret[i++] = s->aal5.cspdus_received;
2703		ret[i++] = s->aal5.cspdus_crc_errors;
2704		ret[i++] = s->aal5.cspdus_protocol_errors;
2705		ret[i++] = s->aal5.cspdus_dropped;
2706		ret[i++] = s->aux.small_b1_failed;
2707		ret[i++] = s->aux.large_b1_failed;
2708		ret[i++] = s->aux.small_b2_failed;
2709		ret[i++] = s->aux.large_b2_failed;
2710		ret[i++] = s->aux.rpd_alloc_failed;
2711		ret[i++] = s->aux.receive_carrier;
2712	}
2713	/* declare the buffer free */
2714	sc->flags &= ~FATM_STAT_INUSE;
2715	cv_signal(&sc->cv_stat);
2716
2717	FATM_UNLOCK(sc);
2718
2719	if (error == 0)
2720		error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2721	free(ret, M_TEMP);
2722
2723	return (error);
2724}
2725
2726#define MAXDMASEGS 32		/* maximum number of receive descriptors */
2727
2728/*
2729 * Attach to the device.
2730 *
2731 * We assume, that there is a global lock (Giant in this case) that protects
2732 * multiple threads from entering this function. This makes sense, doesn't it?
2733 */
2734static int
2735fatm_attach(device_t dev)
2736{
2737	struct ifnet *ifp;
2738	struct fatm_softc *sc;
2739	int unit;
2740	uint16_t cfg;
2741	int error = 0;
2742	struct rbuf *rb;
2743	u_int i;
2744	struct txqueue *tx;
2745
2746	sc = device_get_softc(dev);
2747	unit = device_get_unit(dev);
2748
2749	sc->ifatm.mib.device = ATM_DEVICE_PCA200E;
2750	sc->ifatm.mib.serial = 0;
2751	sc->ifatm.mib.hw_version = 0;
2752	sc->ifatm.mib.sw_version = 0;
2753	sc->ifatm.mib.vpi_bits = 0;
2754	sc->ifatm.mib.vci_bits = FORE_VCIBITS;
2755	sc->ifatm.mib.max_vpcs = 0;
2756	sc->ifatm.mib.max_vccs = FORE_MAX_VCC;
2757	sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
2758	sc->ifatm.phy = &sc->utopia;
2759
2760	LIST_INIT(&sc->rbuf_free);
2761	LIST_INIT(&sc->rbuf_used);
2762
2763	/*
2764	 * Initialize mutex and condition variables.
2765	 */
2766	mtx_init(&sc->mtx, device_get_nameunit(dev),
2767	    MTX_NETWORK_LOCK, MTX_DEF);
2768
2769	cv_init(&sc->cv_stat, "fatm_stat");
2770	cv_init(&sc->cv_regs, "fatm_regs");
2771
2772	sysctl_ctx_init(&sc->sysctl_ctx);
2773
2774	/*
2775	 * Make the sysctl tree
2776	 */
2777	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2778	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2779	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2780		goto fail;
2781
2782	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2783	    OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2784	    "LU", "internal statistics") == NULL)
2785		goto fail;
2786
2787	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2788	    OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2789	    "LU", "card statistics") == NULL)
2790		goto fail;
2791
2792	if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2793	    OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2794	    "retry flag") == NULL)
2795		goto fail;
2796
2797#ifdef FATM_DEBUG
2798	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2799	    OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2800	    == NULL)
2801		goto fail;
2802	sc->debug = FATM_DEBUG;
2803#endif
2804
2805	/*
2806	 * Network subsystem stuff
2807	 */
2808	ifp = &sc->ifatm.ifnet;
2809	ifp->if_softc = sc;
2810	ifp->if_unit = unit;
2811	ifp->if_name = "fatm";
2812	ifp->if_flags = IFF_SIMPLEX;
2813	ifp->if_ioctl = fatm_ioctl;
2814	ifp->if_start = fatm_start;
2815	ifp->if_watchdog = fatm_watchdog;
2816	ifp->if_init = fatm_init;
2817	ifp->if_linkmib = &sc->ifatm.mib;
2818	ifp->if_linkmiblen = sizeof(sc->ifatm.mib);
2819
2820	/*
2821	 * Enable memory and bustmaster
2822	 */
2823	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2824	cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2825	pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2826
2827	/*
2828	 * Map memory
2829	 */
2830	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2831	if (!(cfg & PCIM_CMD_MEMEN)) {
2832		if_printf(ifp, "failed to enable memory mapping\n");
2833		error = ENXIO;
2834		goto fail;
2835	}
2836	sc->memid = 0x10;
2837	sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid,
2838	    0, ~0, 1, RF_ACTIVE);
2839	if (sc->memres == NULL) {
2840		if_printf(ifp, "could not map memory\n");
2841		error = ENXIO;
2842		goto fail;
2843	}
2844	sc->memh = rman_get_bushandle(sc->memres);
2845	sc->memt = rman_get_bustag(sc->memres);
2846
2847	/*
2848	 * Convert endianess of slave access
2849	 */
2850	cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2851	cfg |= FATM_PCIM_SWAB;
2852	pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2853
2854	/*
2855	 * Allocate interrupt (activate at the end)
2856	 */
2857	sc->irqid = 0;
2858	sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid,
2859	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
2860	if (sc->irqres == NULL) {
2861		if_printf(ifp, "could not allocate irq\n");
2862		error = ENXIO;
2863		goto fail;
2864	}
2865
2866	/*
2867	 * Allocate the parent DMA tag. This is used simply to hold overall
2868	 * restrictions for the controller (and PCI bus) and is never used
2869	 * to do anything.
2870	 */
2871	if (bus_dma_tag_create(NULL, 1, 0,
2872	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2873	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2874	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2875	    &sc->parent_dmat)) {
2876		if_printf(ifp, "could not allocate parent DMA tag\n");
2877		error = ENOMEM;
2878		goto fail;
2879	}
2880
2881	/*
2882	 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2883	 * a mbuf cluster.
2884	 */
2885	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2886	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2887	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2888	    NULL, NULL, &sc->rbuf_tag)) {
2889		if_printf(ifp, "could not allocate rbuf DMA tag\n");
2890		error = ENOMEM;
2891		goto fail;
2892	}
2893
2894	/*
2895	 * Allocate the transmission DMA tag. Must add 1, because
2896	 * rounded up PDU will be 65536 bytes long.
2897	 */
2898	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2899	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2900	    NULL, NULL,
2901	    FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2902	    NULL, NULL, &sc->tx_tag)) {
2903		if_printf(ifp, "could not allocate tx DMA tag\n");
2904		error = ENOMEM;
2905		goto fail;
2906	}
2907
2908	/*
2909	 * Allocate DMAable memory.
2910	 */
2911	sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2912	    + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2913	sc->stat_mem.align = 4;
2914
2915	sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2916	sc->txq_mem.align = 32;
2917
2918	sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2919	sc->rxq_mem.align = 32;
2920
2921	sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2922	    BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2923	sc->s1q_mem.align = 32;
2924
2925	sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2926	    BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2927	sc->l1q_mem.align = 32;
2928
2929#ifdef TEST_DMA_SYNC
2930	if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2931	    (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2932	    (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2933	    (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2934	    (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2935		goto fail;
2936#else
2937	if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2938	    (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2939	    (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2940	    (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2941	    (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2942		goto fail;
2943#endif
2944
2945	sc->prom_mem.size = sizeof(struct prom);
2946	sc->prom_mem.align = 32;
2947	if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2948		goto fail;
2949
2950	sc->sadi_mem.size = sizeof(struct fatm_stats);
2951	sc->sadi_mem.align = 32;
2952	if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2953		goto fail;
2954
2955	sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2956	sc->reg_mem.align = 32;
2957	if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2958		goto fail;
2959
2960	/*
2961	 * Allocate queues
2962	 */
2963	sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2964	    M_DEVBUF, M_ZERO | M_WAITOK);
2965	sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2966	    M_DEVBUF, M_ZERO | M_WAITOK);
2967	sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2968	    M_DEVBUF, M_ZERO | M_WAITOK);
2969	sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2970	    M_DEVBUF, M_ZERO | M_WAITOK);
2971	sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2972	    M_DEVBUF, M_ZERO | M_WAITOK);
2973
2974	sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2975	    M_DEVBUF, M_ZERO | M_WAITOK);
2976	sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2977	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2978	if (sc->vcc_zone == NULL) {
2979		error = ENOMEM;
2980		goto fail;
2981	}
2982
2983	/*
2984	 * Allocate memory for the receive buffer headers. The total number
2985	 * of headers should probably also include the maximum number of
2986	 * buffers on the receive queue.
2987	 */
2988	sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2989	sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2990	    M_DEVBUF, M_ZERO | M_WAITOK);
2991
2992	/*
2993	 * Put all rbuf headers on the free list and create DMA maps.
2994	 */
2995	for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2996		if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
2997			if_printf(&sc->ifatm.ifnet, "creating rx map: %d\n",
2998			    error);
2999			goto fail;
3000		}
3001		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3002	}
3003
3004	/*
3005	 * Create dma maps for transmission. In case of an error, free the
3006	 * allocated DMA maps, because on some architectures maps are NULL
3007	 * and we cannot distinguish between a failure and a NULL map in
3008	 * the detach routine.
3009	 */
3010	for (i = 0; i < FATM_TX_QLEN; i++) {
3011		tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3012		if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3013			if_printf(&sc->ifatm.ifnet, "creating tx map: %d\n",
3014			    error);
3015			while (i > 0) {
3016				tx = GET_QUEUE(sc->txqueue, struct txqueue,
3017				    i - 1);
3018				bus_dmamap_destroy(sc->tx_tag, tx->map);
3019				i--;
3020			}
3021			goto fail;
3022		}
3023	}
3024
3025	utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
3026	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3027	    &fatm_utopia_methods);
3028	sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3029
3030	/*
3031	 * Attach the interface
3032	 */
3033	atm_ifattach(ifp);
3034	ifp->if_snd.ifq_maxlen = 512;
3035
3036	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3037	    fatm_intr, sc, &sc->ih);
3038	if (error) {
3039		if_printf(ifp, "couldn't setup irq\n");
3040		goto fail;
3041	}
3042
3043  fail:
3044	if (error)
3045		fatm_detach(dev);
3046
3047	return (error);
3048}
3049
3050#if defined(FATM_DEBUG) && 0
3051static void
3052dump_s1_queue(struct fatm_softc *sc)
3053{
3054	int i;
3055	struct supqueue *q;
3056
3057	for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3058		q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3059		printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3060		    q->q.card,
3061		    READ4(sc, q->q.card),
3062		    READ4(sc, q->q.card + 4),
3063		    *q->q.statp);
3064	}
3065}
3066#endif
3067
3068/*
3069 * Driver infrastructure.
3070 */
3071static device_method_t fatm_methods[] = {
3072	DEVMETHOD(device_probe,		fatm_probe),
3073	DEVMETHOD(device_attach,	fatm_attach),
3074	DEVMETHOD(device_detach,	fatm_detach),
3075	{ 0, 0 }
3076};
3077static driver_t fatm_driver = {
3078	"fatm",
3079	fatm_methods,
3080	sizeof(struct fatm_softc),
3081};
3082
3083DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
3084