if_fatm.c revision 183504
1/*-
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 183504 2008-09-30 18:52:43Z marius $");
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_atm.h>
62#include <net/route.h>
63#ifdef ENABLE_BPF
64#include <net/bpf.h>
65#endif
66#ifdef INET
67#include <netinet/in.h>
68#include <netinet/if_atm.h>
69#endif
70
71#include <machine/bus.h>
72#include <machine/resource.h>
73#include <sys/bus.h>
74#include <sys/rman.h>
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include <dev/utopia/utopia.h>
79
80#include <dev/fatm/if_fatmreg.h>
81#include <dev/fatm/if_fatmvar.h>
82
83#include <dev/fatm/firmware.h>
84
85devclass_t fatm_devclass;
86
87static const struct {
88	uint16_t	vid;
89	uint16_t	did;
90	const char	*name;
91} fatm_devs[] = {
92	{ 0x1127, 0x300,
93	  "FORE PCA200E" },
94	{ 0, 0, NULL }
95};
96
97static const struct rate {
98	uint32_t	ratio;
99	uint32_t	cell_rate;
100} rate_table[] = {
101#include <dev/fatm/if_fatm_rate.h>
102};
103#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
104
105SYSCTL_DECL(_hw_atm);
106
107MODULE_DEPEND(fatm, utopia, 1, 1, 1);
108
109static int	fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
110static int	fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
111
112static const struct utopia_methods fatm_utopia_methods = {
113	fatm_utopia_readregs,
114	fatm_utopia_writereg
115};
116
117#define VC_OK(SC, VPI, VCI)						\
118	(((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 &&	\
119	 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
120
121static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
122
123/*
124 * Probing is easy: step trough the list of known vendor and device
125 * ids and compare. If one is found - it's our.
126 */
127static int
128fatm_probe(device_t dev)
129{
130	int i;
131
132	for (i = 0; fatm_devs[i].name; i++)
133		if (pci_get_vendor(dev) == fatm_devs[i].vid &&
134		    pci_get_device(dev) == fatm_devs[i].did) {
135			device_set_desc(dev, fatm_devs[i].name);
136			return (BUS_PROBE_DEFAULT);
137		}
138	return (ENXIO);
139}
140
141/*
142 * Function called at completion of a SUNI writeregs/readregs command.
143 * This is called from the interrupt handler while holding the softc lock.
144 * We use the queue entry as the randevouze point.
145 */
146static void
147fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
148{
149
150	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
151	if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
152		sc->istats.suni_reg_errors++;
153		q->error = EIO;
154	}
155	wakeup(q);
156}
157
158/*
159 * Write a SUNI register. The bits that are 1 in mask are written from val
160 * into register reg. We wait for the command to complete by sleeping on
161 * the register memory.
162 *
163 * We assume, that we already hold the softc mutex.
164 */
165static int
166fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
167{
168	int error;
169	struct cmdqueue *q;
170	struct fatm_softc *sc;
171
172	sc = ifatm->ifp->if_softc;
173	FATM_CHECKLOCK(sc);
174	if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING))
175		return (EIO);
176
177	/* get queue element and fill it */
178	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
179
180	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
181	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
182		sc->istats.cmd_queue_full++;
183		return (EIO);
184	}
185	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
186
187	q->error = 0;
188	q->cb = fatm_utopia_writeregs_complete;
189	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
190	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
191
192	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
193	BARRIER_W(sc);
194	WRITE4(sc, q->q.card + FATMOC_OP,
195	    FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
196	BARRIER_W(sc);
197
198	/*
199	 * Wait for the command to complete
200	 */
201	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
202
203	switch(error) {
204
205	  case EWOULDBLOCK:
206		error = EIO;
207		break;
208
209	  case ERESTART:
210		error = EINTR;
211		break;
212
213	  case 0:
214		error = q->error;
215		break;
216	}
217
218	return (error);
219}
220
221/*
222 * Function called at completion of a SUNI readregs command.
223 * This is called from the interrupt handler while holding the softc lock.
224 * We use reg_mem as the randevouze point.
225 */
226static void
227fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
228{
229
230	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
231	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
232		sc->istats.suni_reg_errors++;
233		q->error = EIO;
234	}
235	wakeup(&sc->reg_mem);
236}
237
238/*
239 * Read SUNI registers
240 *
241 * We use a preallocated buffer to read the registers. Therefor we need
242 * to protect against multiple threads trying to read registers. We do this
243 * with a condition variable and a flag. We wait for the command to complete by sleeping on
244 * the register memory.
245 *
246 * We assume, that we already hold the softc mutex.
247 */
248static int
249fatm_utopia_readregs_internal(struct fatm_softc *sc)
250{
251	int error, i;
252	uint32_t *ptr;
253	struct cmdqueue *q;
254
255	/* get the buffer */
256	for (;;) {
257		if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
258			return (EIO);
259		if (!(sc->flags & FATM_REGS_INUSE))
260			break;
261		cv_wait(&sc->cv_regs, &sc->mtx);
262	}
263	sc->flags |= FATM_REGS_INUSE;
264
265	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
266
267	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
268	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
269		sc->istats.cmd_queue_full++;
270		return (EIO);
271	}
272	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
273
274	q->error = 0;
275	q->cb = fatm_utopia_readregs_complete;
276	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
277	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
278
279	bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
280
281	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
282	BARRIER_W(sc);
283	WRITE4(sc, q->q.card + FATMOC_OP,
284	    FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
285	BARRIER_W(sc);
286
287	/*
288	 * Wait for the command to complete
289	 */
290	error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
291	    "fatm_getreg", hz);
292
293	switch(error) {
294
295	  case EWOULDBLOCK:
296		error = EIO;
297		break;
298
299	  case ERESTART:
300		error = EINTR;
301		break;
302
303	  case 0:
304		bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
305		    BUS_DMASYNC_POSTREAD);
306		error = q->error;
307		break;
308	}
309
310	if (error != 0) {
311		/* declare buffer to be free */
312		sc->flags &= ~FATM_REGS_INUSE;
313		cv_signal(&sc->cv_regs);
314		return (error);
315	}
316
317	/* swap if needed */
318	ptr = (uint32_t *)sc->reg_mem.mem;
319	for (i = 0; i < FATM_NREGS; i++)
320		ptr[i] = le32toh(ptr[i]) & 0xff;
321
322	return (0);
323}
324
325/*
326 * Read SUNI registers for the SUNI module.
327 *
328 * We assume, that we already hold the mutex.
329 */
330static int
331fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
332{
333	int err;
334	int i;
335	struct fatm_softc *sc;
336
337	if (reg >= FATM_NREGS)
338		return (EINVAL);
339	if (reg + *np > FATM_NREGS)
340		*np = FATM_NREGS - reg;
341	sc = ifatm->ifp->if_softc;
342	FATM_CHECKLOCK(sc);
343
344	err = fatm_utopia_readregs_internal(sc);
345	if (err != 0)
346		return (err);
347
348	for (i = 0; i < *np; i++)
349		valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
350
351	/* declare buffer to be free */
352	sc->flags &= ~FATM_REGS_INUSE;
353	cv_signal(&sc->cv_regs);
354
355	return (0);
356}
357
358/*
359 * Check whether the hard is beating. We remember the last heart beat and
360 * compare it to the current one. If it appears stuck for 10 times, we have
361 * a problem.
362 *
363 * Assume we hold the lock.
364 */
365static void
366fatm_check_heartbeat(struct fatm_softc *sc)
367{
368	uint32_t h;
369
370	FATM_CHECKLOCK(sc);
371
372	h = READ4(sc, FATMO_HEARTBEAT);
373	DBG(sc, BEAT, ("heartbeat %08x", h));
374
375	if (sc->stop_cnt == 10)
376		return;
377
378	if (h == sc->heartbeat) {
379		if (++sc->stop_cnt == 10) {
380			log(LOG_ERR, "i960 stopped???\n");
381			WRITE4(sc, FATMO_HIMR, 1);
382		}
383		return;
384	}
385
386	sc->stop_cnt = 0;
387	sc->heartbeat = h;
388}
389
390/*
391 * Ensure that the heart is still beating.
392 */
393static void
394fatm_watchdog(struct ifnet *ifp)
395{
396	struct fatm_softc *sc = ifp->if_softc;
397
398	FATM_LOCK(sc);
399	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
400		fatm_check_heartbeat(sc);
401		ifp->if_timer = 5;
402	}
403	FATM_UNLOCK(sc);
404}
405
406/*
407 * Hard reset the i960 on the board. This is done by initializing registers,
408 * clearing interrupts and waiting for the selftest to finish. Not sure,
409 * whether all these barriers are actually needed.
410 *
411 * Assumes that we hold the lock.
412 */
413static int
414fatm_reset(struct fatm_softc *sc)
415{
416	int w;
417	uint32_t val;
418
419	FATM_CHECKLOCK(sc);
420
421	WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
422	BARRIER_W(sc);
423
424	WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
425	BARRIER_W(sc);
426
427	WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
428	BARRIER_W(sc);
429
430	WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
431	BARRIER_W(sc);
432
433	WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
434	BARRIER_W(sc);
435
436	DELAY(1000);
437
438	WRITE1(sc, FATMO_HCR, 0);
439	BARRIER_RW(sc);
440
441	DELAY(1000);
442
443	for (w = 100; w; w--) {
444		BARRIER_R(sc);
445		val = READ4(sc, FATMO_BOOT_STATUS);
446		switch (val) {
447		  case SELF_TEST_OK:
448			return (0);
449		  case SELF_TEST_FAIL:
450			return (EIO);
451		}
452		DELAY(1000);
453	}
454	return (EIO);
455}
456
457/*
458 * Stop the card. Must be called WITH the lock held
459 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
460 */
461static void
462fatm_stop(struct fatm_softc *sc)
463{
464	int i;
465	struct cmdqueue *q;
466	struct rbuf *rb;
467	struct txqueue *tx;
468	uint32_t stat;
469
470	FATM_CHECKLOCK(sc);
471
472	/* Stop the board */
473	utopia_stop(&sc->utopia);
474	(void)fatm_reset(sc);
475
476	/* stop watchdog */
477	sc->ifp->if_timer = 0;
478
479	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
480		sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
481		ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
482		    sc->utopia.carrier == UTP_CARR_OK);
483
484		/*
485		 * Collect transmit mbufs, partial receive mbufs and
486		 * supplied mbufs
487		 */
488		for (i = 0; i < FATM_TX_QLEN; i++) {
489			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
490			if (tx->m) {
491				bus_dmamap_unload(sc->tx_tag, tx->map);
492				m_freem(tx->m);
493				tx->m = NULL;
494			}
495		}
496
497		/* Collect supplied mbufs */
498		while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
499			LIST_REMOVE(rb, link);
500			bus_dmamap_unload(sc->rbuf_tag, rb->map);
501			m_free(rb->m);
502			rb->m = NULL;
503			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
504		}
505
506		/* Unwait any waiters */
507		wakeup(&sc->sadi_mem);
508
509		/* wakeup all threads waiting for STAT or REG buffers */
510		cv_broadcast(&sc->cv_stat);
511		cv_broadcast(&sc->cv_regs);
512
513		sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
514
515		/* wakeup all threads waiting on commands */
516		for (i = 0; i < FATM_CMD_QLEN; i++) {
517			q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
518
519			H_SYNCSTAT_POSTREAD(sc, q->q.statp);
520			if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
521				H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
522				H_SYNCSTAT_PREWRITE(sc, q->q.statp);
523				wakeup(q);
524			}
525		}
526		utopia_reset_media(&sc->utopia);
527	}
528	sc->small_cnt = sc->large_cnt = 0;
529
530	/* Reset vcc info */
531	if (sc->vccs != NULL) {
532		sc->open_vccs = 0;
533		for (i = 0; i < FORE_MAX_VCC + 1; i++) {
534			if (sc->vccs[i] != NULL) {
535				if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
536				    FATM_VCC_TRY_OPEN)) == 0) {
537					uma_zfree(sc->vcc_zone, sc->vccs[i]);
538					sc->vccs[i] = NULL;
539				} else {
540					sc->vccs[i]->vflags = 0;
541					sc->open_vccs++;
542				}
543			}
544		}
545	}
546
547}
548
549/*
550 * Load the firmware into the board and save the entry point.
551 */
552static uint32_t
553firmware_load(struct fatm_softc *sc)
554{
555	struct firmware *fw = (struct firmware *)firmware;
556
557	DBG(sc, INIT, ("loading - entry=%x", fw->entry));
558	bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
559	    sizeof(firmware) / sizeof(firmware[0]));
560	BARRIER_RW(sc);
561
562	return (fw->entry);
563}
564
565/*
566 * Read a character from the virtual UART. The availability of a character
567 * is signaled by a non-null value of the 32 bit register. The eating of
568 * the character by us is signalled to the card by setting that register
569 * to zero.
570 */
571static int
572rx_getc(struct fatm_softc *sc)
573{
574	int w = 50;
575	int c;
576
577	while (w--) {
578		c = READ4(sc, FATMO_UART_TO_HOST);
579		BARRIER_RW(sc);
580		if (c != 0) {
581			WRITE4(sc, FATMO_UART_TO_HOST, 0);
582			DBGC(sc, UART, ("%c", c & 0xff));
583			return (c & 0xff);
584		}
585		DELAY(1000);
586	}
587	return (-1);
588}
589
590/*
591 * Eat up characters from the board and stuff them in the bit-bucket.
592 */
593static void
594rx_flush(struct fatm_softc *sc)
595{
596	int w = 10000;
597
598	while (w-- && rx_getc(sc) >= 0)
599		;
600}
601
602/*
603 * Write a character to the card. The UART is available if the register
604 * is zero.
605 */
606static int
607tx_putc(struct fatm_softc *sc, u_char c)
608{
609	int w = 10;
610	int c1;
611
612	while (w--) {
613		c1 = READ4(sc, FATMO_UART_TO_960);
614		BARRIER_RW(sc);
615		if (c1 == 0) {
616			WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
617			DBGC(sc, UART, ("%c", c & 0xff));
618			return (0);
619		}
620		DELAY(1000);
621	}
622	return (-1);
623}
624
625/*
626 * Start the firmware. This is doing by issuing a 'go' command with
627 * the hex entry address of the firmware. Then we wait for the self-test to
628 * succeed.
629 */
630static int
631fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
632{
633	static char hex[] = "0123456789abcdef";
634	u_int w, val;
635
636	DBG(sc, INIT, ("starting"));
637	rx_flush(sc);
638	tx_putc(sc, '\r');
639	DELAY(1000);
640
641	rx_flush(sc);
642
643	tx_putc(sc, 'g');
644	(void)rx_getc(sc);
645	tx_putc(sc, 'o');
646	(void)rx_getc(sc);
647	tx_putc(sc, ' ');
648	(void)rx_getc(sc);
649
650	tx_putc(sc, hex[(start >> 12) & 0xf]);
651	(void)rx_getc(sc);
652	tx_putc(sc, hex[(start >>  8) & 0xf]);
653	(void)rx_getc(sc);
654	tx_putc(sc, hex[(start >>  4) & 0xf]);
655	(void)rx_getc(sc);
656	tx_putc(sc, hex[(start >>  0) & 0xf]);
657	(void)rx_getc(sc);
658
659	tx_putc(sc, '\r');
660	rx_flush(sc);
661
662	for (w = 100; w; w--) {
663		BARRIER_R(sc);
664		val = READ4(sc, FATMO_BOOT_STATUS);
665		switch (val) {
666		  case CP_RUNNING:
667			return (0);
668		  case SELF_TEST_FAIL:
669			return (EIO);
670		}
671		DELAY(1000);
672	}
673	return (EIO);
674}
675
676/*
677 * Initialize one card and host queue.
678 */
679static void
680init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
681    size_t qel_size, size_t desc_size, cardoff_t off,
682    u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
683{
684	struct fqelem *el = queue->chunk;
685
686	while (qlen--) {
687		el->card = off;
688		off += 8;	/* size of card entry */
689
690		el->statp = (uint32_t *)(*statpp);
691		(*statpp) += sizeof(uint32_t);
692		H_SETSTAT(el->statp, FATM_STAT_FREE);
693		H_SYNCSTAT_PREWRITE(sc, el->statp);
694
695		WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
696		(*cardstat) += sizeof(uint32_t);
697
698		el->ioblk = descp;
699		descp += desc_size;
700		el->card_ioblk = carddesc;
701		carddesc += desc_size;
702
703		el = (struct fqelem *)((u_char *)el + qel_size);
704	}
705	queue->tail = queue->head = 0;
706}
707
708/*
709 * Issue the initialize operation to the card, wait for completion and
710 * initialize the on-board and host queue structures with offsets and
711 * addresses.
712 */
713static int
714fatm_init_cmd(struct fatm_softc *sc)
715{
716	int w, c;
717	u_char *statp;
718	uint32_t card_stat;
719	u_int cnt;
720	struct fqelem *el;
721	cardoff_t off;
722
723	DBG(sc, INIT, ("command"));
724	WRITE4(sc, FATMO_ISTAT, 0);
725	WRITE4(sc, FATMO_IMASK, 1);
726	WRITE4(sc, FATMO_HLOGGER, 0);
727
728	WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
729	WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
730	WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
731	WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
732	WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
733	WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
734	WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
735
736	/*
737	 * initialize buffer descriptors
738	 */
739	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
740	    SMALL_SUPPLY_QLEN);
741	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
742	    SMALL_BUFFER_LEN);
743	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
744	    SMALL_POOL_SIZE);
745	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
746	    SMALL_SUPPLY_BLKSIZE);
747
748	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
749	    LARGE_SUPPLY_QLEN);
750	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
751	    LARGE_BUFFER_LEN);
752	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
753	    LARGE_POOL_SIZE);
754	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
755	    LARGE_SUPPLY_BLKSIZE);
756
757	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
758	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
759	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
760	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
761
762	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
763	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
764	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
765	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
766
767	/*
768	 * Start the command
769	 */
770	BARRIER_W(sc);
771	WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
772	BARRIER_W(sc);
773	WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
774	BARRIER_W(sc);
775
776	/*
777	 * Busy wait for completion
778	 */
779	w = 100;
780	while (w--) {
781		c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
782		BARRIER_R(sc);
783		if (c & FATM_STAT_COMPLETE)
784			break;
785		DELAY(1000);
786	}
787
788	if (c & FATM_STAT_ERROR)
789		return (EIO);
790
791	/*
792	 * Initialize the queues
793	 */
794	statp = sc->stat_mem.mem;
795	card_stat = sc->stat_mem.paddr;
796
797	/*
798	 * Command queue. This is special in that it's on the card.
799	 */
800	el = sc->cmdqueue.chunk;
801	off = READ4(sc, FATMO_COMMAND_QUEUE);
802	DBG(sc, INIT, ("cmd queue=%x", off));
803	for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
804		el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
805
806		el->card = off;
807		off += 32;		/* size of card structure */
808
809		el->statp = (uint32_t *)statp;
810		statp += sizeof(uint32_t);
811		H_SETSTAT(el->statp, FATM_STAT_FREE);
812		H_SYNCSTAT_PREWRITE(sc, el->statp);
813
814		WRITE4(sc, el->card + FATMOC_STATP, card_stat);
815		card_stat += sizeof(uint32_t);
816	}
817	sc->cmdqueue.tail = sc->cmdqueue.head = 0;
818
819	/*
820	 * Now the other queues. These are in memory
821	 */
822	init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
823	    sizeof(struct txqueue), TPD_SIZE,
824	    READ4(sc, FATMO_TRANSMIT_QUEUE),
825	    &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
826
827	init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
828	    sizeof(struct rxqueue), RPD_SIZE,
829	    READ4(sc, FATMO_RECEIVE_QUEUE),
830	    &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
831
832	init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
833	    sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
834	    READ4(sc, FATMO_SMALL_B1_QUEUE),
835	    &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
836
837	init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
838	    sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
839	    READ4(sc, FATMO_LARGE_B1_QUEUE),
840	    &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
841
842	sc->txcnt = 0;
843
844	return (0);
845}
846
847/*
848 * Read PROM. Called only from attach code. Here we spin because the interrupt
849 * handler is not yet set up.
850 */
851static int
852fatm_getprom(struct fatm_softc *sc)
853{
854	int i;
855	struct prom *prom;
856	struct cmdqueue *q;
857
858	DBG(sc, INIT, ("reading prom"));
859	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
860	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
861
862	q->error = 0;
863	q->cb = NULL;;
864	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
865	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
866
867	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
868	    BUS_DMASYNC_PREREAD);
869
870	WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
871	BARRIER_W(sc);
872	WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
873	BARRIER_W(sc);
874
875	for (i = 0; i < 1000; i++) {
876		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
877		if (H_GETSTAT(q->q.statp) &
878		    (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
879			break;
880		DELAY(1000);
881	}
882	if (i == 1000) {
883		if_printf(sc->ifp, "getprom timeout\n");
884		return (EIO);
885	}
886	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
887	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
888		if_printf(sc->ifp, "getprom error\n");
889		return (EIO);
890	}
891	H_SETSTAT(q->q.statp, FATM_STAT_FREE);
892	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
893	NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
894
895	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
896	    BUS_DMASYNC_POSTREAD);
897
898
899#ifdef notdef
900	{
901		u_int i;
902
903		printf("PROM: ");
904		u_char *ptr = (u_char *)sc->prom_mem.mem;
905		for (i = 0; i < sizeof(struct prom); i++)
906			printf("%02x ", *ptr++);
907		printf("\n");
908	}
909#endif
910
911	prom = (struct prom *)sc->prom_mem.mem;
912
913	bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
914	IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
915	IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
916	IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
917
918	if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
919	    "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
920	    IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
921	    IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
922	    IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
923
924	return (0);
925}
926
927/*
928 * This is the callback function for bus_dmamap_load. We assume, that we
929 * have a 32-bit bus and so have always one segment.
930 */
931static void
932dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
933{
934	bus_addr_t *ptr = (bus_addr_t *)arg;
935
936	if (error != 0) {
937		printf("%s: error=%d\n", __func__, error);
938		return;
939	}
940	KASSERT(nsegs == 1, ("too many DMA segments"));
941	KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
942	    (u_long)segs[0].ds_addr));
943
944	*ptr = segs[0].ds_addr;
945}
946
947/*
948 * Allocate a chunk of DMA-able memory and map it.
949 */
950static int
951alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
952{
953	int error;
954
955	mem->mem = NULL;
956
957	if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
958	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
959	    NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
960	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
961		if_printf(sc->ifp, "could not allocate %s DMA tag\n",
962		    nm);
963		return (ENOMEM);
964	}
965
966	error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
967	if (error) {
968		if_printf(sc->ifp, "could not allocate %s DMA memory: "
969		    "%d\n", nm, error);
970		bus_dma_tag_destroy(mem->dmat);
971		mem->mem = NULL;
972		return (error);
973	}
974
975	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
976	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
977	if (error) {
978		if_printf(sc->ifp, "could not load %s DMA memory: "
979		    "%d\n", nm, error);
980		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
981		bus_dma_tag_destroy(mem->dmat);
982		mem->mem = NULL;
983		return (error);
984	}
985
986	DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
987	    (u_long)mem->paddr, mem->size, mem->align));
988
989	return (0);
990}
991
992#ifdef TEST_DMA_SYNC
993static int
994alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
995{
996	int error;
997
998	mem->mem = NULL;
999
1000	if (bus_dma_tag_create(NULL, mem->align, 0,
1001	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
1002	    NULL, NULL, mem->size, 1, mem->size,
1003	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1004		if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1005		    nm);
1006		return (ENOMEM);
1007	}
1008
1009	mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1010	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1011
1012	error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1013	if (error) {
1014		if_printf(sc->ifp, "could not allocate %s DMA map: "
1015		    "%d\n", nm, error);
1016		contigfree(mem->mem, mem->size, M_DEVBUF);
1017		bus_dma_tag_destroy(mem->dmat);
1018		mem->mem = NULL;
1019		return (error);
1020	}
1021
1022	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1023	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1024	if (error) {
1025		if_printf(sc->ifp, "could not load %s DMA memory: "
1026		    "%d\n", nm, error);
1027		bus_dmamap_destroy(mem->dmat, mem->map);
1028		contigfree(mem->mem, mem->size, M_DEVBUF);
1029		bus_dma_tag_destroy(mem->dmat);
1030		mem->mem = NULL;
1031		return (error);
1032	}
1033
1034	DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1035	    (u_long)mem->paddr, mem->size, mem->align));
1036
1037	printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1038	    (u_long)mem->paddr, mem->size, mem->align);
1039
1040	return (0);
1041}
1042#endif /* TEST_DMA_SYNC */
1043
1044/*
1045 * Destroy all resources of an dma-able memory chunk
1046 */
1047static void
1048destroy_dma_memory(struct fatm_mem *mem)
1049{
1050	if (mem->mem != NULL) {
1051		bus_dmamap_unload(mem->dmat, mem->map);
1052		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1053		bus_dma_tag_destroy(mem->dmat);
1054		mem->mem = NULL;
1055	}
1056}
1057#ifdef TEST_DMA_SYNC
1058static void
1059destroy_dma_memoryX(struct fatm_mem *mem)
1060{
1061	if (mem->mem != NULL) {
1062		bus_dmamap_unload(mem->dmat, mem->map);
1063		bus_dmamap_destroy(mem->dmat, mem->map);
1064		contigfree(mem->mem, mem->size, M_DEVBUF);
1065		bus_dma_tag_destroy(mem->dmat);
1066		mem->mem = NULL;
1067	}
1068}
1069#endif /* TEST_DMA_SYNC */
1070
1071/*
1072 * Try to supply buffers to the card if there are free entries in the queues
1073 */
1074static void
1075fatm_supply_small_buffers(struct fatm_softc *sc)
1076{
1077	int nblocks, nbufs;
1078	struct supqueue *q;
1079	struct rbd *bd;
1080	int i, j, error, cnt;
1081	struct mbuf *m;
1082	struct rbuf *rb;
1083	bus_addr_t phys;
1084
1085	nbufs = max(4 * sc->open_vccs, 32);
1086	nbufs = min(nbufs, SMALL_POOL_SIZE);
1087	nbufs -= sc->small_cnt;
1088
1089	nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1090	for (cnt = 0; cnt < nblocks; cnt++) {
1091		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1092
1093		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1094		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1095			break;
1096
1097		bd = (struct rbd *)q->q.ioblk;
1098
1099		for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1100			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1101				if_printf(sc->ifp, "out of rbufs\n");
1102				break;
1103			}
1104			MGETHDR(m, M_DONTWAIT, MT_DATA);
1105			if (m == NULL) {
1106				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1107				break;
1108			}
1109			MH_ALIGN(m, SMALL_BUFFER_LEN);
1110			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1111			    m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1112			    &phys, BUS_DMA_NOWAIT);
1113			if (error) {
1114				if_printf(sc->ifp,
1115				    "dmamap_load mbuf failed %d", error);
1116				m_freem(m);
1117				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1118				break;
1119			}
1120			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1121			    BUS_DMASYNC_PREREAD);
1122
1123			LIST_REMOVE(rb, link);
1124			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1125
1126			rb->m = m;
1127			bd[i].handle = rb - sc->rbufs;
1128			H_SETDESC(bd[i].buffer, phys);
1129		}
1130
1131		if (i < SMALL_SUPPLY_BLKSIZE) {
1132			for (j = 0; j < i; j++) {
1133				rb = sc->rbufs + bd[j].handle;
1134				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1135				m_free(rb->m);
1136				rb->m = NULL;
1137
1138				LIST_REMOVE(rb, link);
1139				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1140			}
1141			break;
1142		}
1143		H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1144		    sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1145
1146		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1147		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1148
1149		WRITE4(sc, q->q.card, q->q.card_ioblk);
1150		BARRIER_W(sc);
1151
1152		sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1153
1154		NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1155	}
1156}
1157
1158/*
1159 * Try to supply buffers to the card if there are free entries in the queues
1160 * We assume that all buffers are within the address space accessible by the
1161 * card (32-bit), so we don't need bounce buffers.
1162 */
1163static void
1164fatm_supply_large_buffers(struct fatm_softc *sc)
1165{
1166	int nbufs, nblocks, cnt;
1167	struct supqueue *q;
1168	struct rbd *bd;
1169	int i, j, error;
1170	struct mbuf *m;
1171	struct rbuf *rb;
1172	bus_addr_t phys;
1173
1174	nbufs = max(4 * sc->open_vccs, 32);
1175	nbufs = min(nbufs, LARGE_POOL_SIZE);
1176	nbufs -= sc->large_cnt;
1177
1178	nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1179
1180	for (cnt = 0; cnt < nblocks; cnt++) {
1181		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1182
1183		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1184		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1185			break;
1186
1187		bd = (struct rbd *)q->q.ioblk;
1188
1189		for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1190			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1191				if_printf(sc->ifp, "out of rbufs\n");
1192				break;
1193			}
1194			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1195			    M_PKTHDR)) == NULL) {
1196				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1197				break;
1198			}
1199			/* No MEXT_ALIGN */
1200			m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1201			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1202			    m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1203			    &phys, BUS_DMA_NOWAIT);
1204			if (error) {
1205				if_printf(sc->ifp,
1206				    "dmamap_load mbuf failed %d", error);
1207				m_freem(m);
1208				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1209				break;
1210			}
1211
1212			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1213			    BUS_DMASYNC_PREREAD);
1214
1215			LIST_REMOVE(rb, link);
1216			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1217
1218			rb->m = m;
1219			bd[i].handle = rb - sc->rbufs;
1220			H_SETDESC(bd[i].buffer, phys);
1221		}
1222
1223		if (i < LARGE_SUPPLY_BLKSIZE) {
1224			for (j = 0; j < i; j++) {
1225				rb = sc->rbufs + bd[j].handle;
1226				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1227				m_free(rb->m);
1228				rb->m = NULL;
1229
1230				LIST_REMOVE(rb, link);
1231				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1232			}
1233			break;
1234		}
1235		H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1236		    sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1237
1238		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1239		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1240		WRITE4(sc, q->q.card, q->q.card_ioblk);
1241		BARRIER_W(sc);
1242
1243		sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1244
1245		NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1246	}
1247}
1248
1249
1250/*
1251 * Actually start the card. The lock must be held here.
1252 * Reset, load the firmware, start it, initializes queues, read the PROM
1253 * and supply receive buffers to the card.
1254 */
1255static void
1256fatm_init_locked(struct fatm_softc *sc)
1257{
1258	struct rxqueue *q;
1259	int i, c, error;
1260	uint32_t start;
1261
1262	DBG(sc, INIT, ("initialize"));
1263	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1264		fatm_stop(sc);
1265
1266	/*
1267	 * Hard reset the board
1268	 */
1269	if (fatm_reset(sc))
1270		return;
1271
1272	start = firmware_load(sc);
1273	if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1274	    fatm_getprom(sc)) {
1275		fatm_reset(sc);
1276		return;
1277	}
1278
1279	/*
1280	 * Handle media
1281	 */
1282	c = READ4(sc, FATMO_MEDIA_TYPE);
1283	switch (c) {
1284
1285	  case FORE_MT_TAXI_100:
1286		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1287		IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1288		break;
1289
1290	  case FORE_MT_TAXI_140:
1291		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1292		IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1293		break;
1294
1295	  case FORE_MT_UTP_SONET:
1296		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1297		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1298		break;
1299
1300	  case FORE_MT_MM_OC3_ST:
1301	  case FORE_MT_MM_OC3_SC:
1302		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1303		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1304		break;
1305
1306	  case FORE_MT_SM_OC3_ST:
1307	  case FORE_MT_SM_OC3_SC:
1308		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1309		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1310		break;
1311
1312	  default:
1313		log(LOG_ERR, "fatm: unknown media type %d\n", c);
1314		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1315		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1316		break;
1317	}
1318	sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1319	utopia_init_media(&sc->utopia);
1320
1321	/*
1322	 * Initialize the RBDs
1323	 */
1324	for (i = 0; i < FATM_RX_QLEN; i++) {
1325		q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1326		WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1327	}
1328	BARRIER_W(sc);
1329
1330	/*
1331	 * Supply buffers to the card
1332	 */
1333	fatm_supply_small_buffers(sc);
1334	fatm_supply_large_buffers(sc);
1335
1336	/*
1337	 * Now set flags, that we are ready
1338	 */
1339	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1340
1341	/*
1342	 * Start the watchdog timer
1343	 */
1344	sc->ifp->if_timer = 5;
1345
1346	/* start SUNI */
1347	utopia_start(&sc->utopia);
1348
1349	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1350	    sc->utopia.carrier == UTP_CARR_OK);
1351
1352	/* start all channels */
1353	for (i = 0; i < FORE_MAX_VCC + 1; i++)
1354		if (sc->vccs[i] != NULL) {
1355			sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1356			error = fatm_load_vc(sc, sc->vccs[i]);
1357			if (error != 0) {
1358				if_printf(sc->ifp, "reopening %u "
1359				    "failed: %d\n", i, error);
1360				sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1361			}
1362		}
1363
1364	DBG(sc, INIT, ("done"));
1365}
1366
1367/*
1368 * This is the exported as initialisation function.
1369 */
1370static void
1371fatm_init(void *p)
1372{
1373	struct fatm_softc *sc = p;
1374
1375	FATM_LOCK(sc);
1376	fatm_init_locked(sc);
1377	FATM_UNLOCK(sc);
1378}
1379
1380/************************************************************/
1381/*
1382 * The INTERRUPT handling
1383 */
1384/*
1385 * Check the command queue. If a command was completed, call the completion
1386 * function for that command.
1387 */
1388static void
1389fatm_intr_drain_cmd(struct fatm_softc *sc)
1390{
1391	struct cmdqueue *q;
1392	int stat;
1393
1394	/*
1395	 * Drain command queue
1396	 */
1397	for (;;) {
1398		q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1399
1400		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1401		stat = H_GETSTAT(q->q.statp);
1402
1403		if (stat != FATM_STAT_COMPLETE &&
1404		   stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1405		   stat != FATM_STAT_ERROR)
1406			break;
1407
1408		(*q->cb)(sc, q);
1409
1410		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1411		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1412
1413		NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1414	}
1415}
1416
1417/*
1418 * Drain the small buffer supply queue.
1419 */
1420static void
1421fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1422{
1423	struct supqueue *q;
1424	int stat;
1425
1426	for (;;) {
1427		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1428
1429		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1430		stat = H_GETSTAT(q->q.statp);
1431
1432		if ((stat & FATM_STAT_COMPLETE) == 0)
1433			break;
1434		if (stat & FATM_STAT_ERROR)
1435			log(LOG_ERR, "%s: status %x\n", __func__, stat);
1436
1437		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1438		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1439
1440		NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1441	}
1442}
1443
1444/*
1445 * Drain the large buffer supply queue.
1446 */
1447static void
1448fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1449{
1450	struct supqueue *q;
1451	int stat;
1452
1453	for (;;) {
1454		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1455
1456		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1457		stat = H_GETSTAT(q->q.statp);
1458
1459		if ((stat & FATM_STAT_COMPLETE) == 0)
1460			break;
1461		if (stat & FATM_STAT_ERROR)
1462			log(LOG_ERR, "%s status %x\n", __func__, stat);
1463
1464		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1465		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1466
1467		NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1468	}
1469}
1470
1471/*
1472 * Check the receive queue. Send any received PDU up the protocol stack
1473 * (except when there was an error or the VCI appears to be closed. In this
1474 * case discard the PDU).
1475 */
1476static void
1477fatm_intr_drain_rx(struct fatm_softc *sc)
1478{
1479	struct rxqueue *q;
1480	int stat, mlen;
1481	u_int i;
1482	uint32_t h;
1483	struct mbuf *last, *m0;
1484	struct rpd *rpd;
1485	struct rbuf *rb;
1486	u_int vci, vpi, pt;
1487	struct atm_pseudohdr aph;
1488	struct ifnet *ifp;
1489	struct card_vcc *vc;
1490
1491	for (;;) {
1492		q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1493
1494		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1495		stat = H_GETSTAT(q->q.statp);
1496
1497		if ((stat & FATM_STAT_COMPLETE) == 0)
1498			break;
1499
1500		rpd = (struct rpd *)q->q.ioblk;
1501		H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1502
1503		rpd->nseg = le32toh(rpd->nseg);
1504		mlen = 0;
1505		m0 = last = 0;
1506		for (i = 0; i < rpd->nseg; i++) {
1507			rb = sc->rbufs + rpd->segment[i].handle;
1508			if (m0 == NULL) {
1509				m0 = last = rb->m;
1510			} else {
1511				last->m_next = rb->m;
1512				last = rb->m;
1513			}
1514			last->m_next = NULL;
1515			if (last->m_flags & M_EXT)
1516				sc->large_cnt--;
1517			else
1518				sc->small_cnt--;
1519			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1520			    BUS_DMASYNC_POSTREAD);
1521			bus_dmamap_unload(sc->rbuf_tag, rb->map);
1522			rb->m = NULL;
1523
1524			LIST_REMOVE(rb, link);
1525			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1526
1527			last->m_len = le32toh(rpd->segment[i].length);
1528			mlen += last->m_len;
1529		}
1530
1531		m0->m_pkthdr.len = mlen;
1532		m0->m_pkthdr.rcvif = sc->ifp;
1533
1534		h = le32toh(rpd->atm_header);
1535		vpi = (h >> 20) & 0xff;
1536		vci = (h >> 4 ) & 0xffff;
1537		pt  = (h >> 1 ) & 0x7;
1538
1539		/*
1540		 * Locate the VCC this packet belongs to
1541		 */
1542		if (!VC_OK(sc, vpi, vci))
1543			vc = NULL;
1544		else if ((vc = sc->vccs[vci]) == NULL ||
1545		    !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1546			sc->istats.rx_closed++;
1547			vc = NULL;
1548		}
1549
1550		DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1551		    pt, mlen, vc == NULL ? "dropped" : ""));
1552
1553		if (vc == NULL) {
1554			m_freem(m0);
1555		} else {
1556#ifdef ENABLE_BPF
1557			if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1558			    vc->param.aal == ATMIO_AAL_5 &&
1559			    (vc->param.flags & ATM_PH_LLCSNAP))
1560				BPF_MTAP(sc->ifp, m0);
1561#endif
1562
1563			ATM_PH_FLAGS(&aph) = vc->param.flags;
1564			ATM_PH_VPI(&aph) = vpi;
1565			ATM_PH_SETVCI(&aph, vci);
1566
1567			ifp = sc->ifp;
1568			ifp->if_ipackets++;
1569
1570			vc->ipackets++;
1571			vc->ibytes += m0->m_pkthdr.len;
1572
1573			atm_input(ifp, &aph, m0, vc->rxhand);
1574		}
1575
1576		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1577		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1578
1579		WRITE4(sc, q->q.card, q->q.card_ioblk);
1580		BARRIER_W(sc);
1581
1582		NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1583	}
1584}
1585
1586/*
1587 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1588 */
1589static void
1590fatm_intr_drain_tx(struct fatm_softc *sc)
1591{
1592	struct txqueue *q;
1593	int stat;
1594
1595	/*
1596	 * Drain tx queue
1597	 */
1598	for (;;) {
1599		q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1600
1601		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1602		stat = H_GETSTAT(q->q.statp);
1603
1604		if (stat != FATM_STAT_COMPLETE &&
1605		    stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1606		    stat != FATM_STAT_ERROR)
1607			break;
1608
1609		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1610		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1611
1612		bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1613		bus_dmamap_unload(sc->tx_tag, q->map);
1614
1615		m_freem(q->m);
1616		q->m = NULL;
1617		sc->txcnt--;
1618
1619		NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1620	}
1621}
1622
1623/*
1624 * Interrupt handler
1625 */
1626static void
1627fatm_intr(void *p)
1628{
1629	struct fatm_softc *sc = (struct fatm_softc *)p;
1630
1631	FATM_LOCK(sc);
1632	if (!READ4(sc, FATMO_PSR)) {
1633		FATM_UNLOCK(sc);
1634		return;
1635	}
1636	WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1637
1638	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1639		FATM_UNLOCK(sc);
1640		return;
1641	}
1642	fatm_intr_drain_cmd(sc);
1643	fatm_intr_drain_rx(sc);
1644	fatm_intr_drain_tx(sc);
1645	fatm_intr_drain_small_buffers(sc);
1646	fatm_intr_drain_large_buffers(sc);
1647	fatm_supply_small_buffers(sc);
1648	fatm_supply_large_buffers(sc);
1649
1650	FATM_UNLOCK(sc);
1651
1652	if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1653		(*sc->ifp->if_start)(sc->ifp);
1654}
1655
1656/*
1657 * Get device statistics. This must be called with the softc locked.
1658 * We use a preallocated buffer, so we need to protect this buffer.
1659 * We do this by using a condition variable and a flag. If the flag is set
1660 * the buffer is in use by one thread (one thread is executing a GETSTAT
1661 * card command). In this case all other threads that are trying to get
1662 * statistics block on that condition variable. When the thread finishes
1663 * using the buffer it resets the flag and signals the condition variable. This
1664 * will wakeup the next thread that is waiting for the buffer. If the interface
1665 * is stopped the stopping function will broadcast the cv. All threads will
1666 * find that the interface has been stopped and return.
1667 *
1668 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1669 * must be done by the caller when he has finished using the buffer.
1670 */
1671static void
1672fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1673{
1674
1675	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1676	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1677		sc->istats.get_stat_errors++;
1678		q->error = EIO;
1679	}
1680	wakeup(&sc->sadi_mem);
1681}
1682static int
1683fatm_getstat(struct fatm_softc *sc)
1684{
1685	int error;
1686	struct cmdqueue *q;
1687
1688	/*
1689	 * Wait until either the interface is stopped or we can get the
1690	 * statistics buffer
1691	 */
1692	for (;;) {
1693		if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
1694			return (EIO);
1695		if (!(sc->flags & FATM_STAT_INUSE))
1696			break;
1697		cv_wait(&sc->cv_stat, &sc->mtx);
1698	}
1699	sc->flags |= FATM_STAT_INUSE;
1700
1701	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1702
1703	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1704	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1705		sc->istats.cmd_queue_full++;
1706		return (EIO);
1707	}
1708	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1709
1710	q->error = 0;
1711	q->cb = fatm_getstat_complete;
1712	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1713	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1714
1715	bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1716	    BUS_DMASYNC_PREREAD);
1717
1718	WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1719	    sc->sadi_mem.paddr);
1720	BARRIER_W(sc);
1721	WRITE4(sc, q->q.card + FATMOC_OP,
1722	    FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1723	BARRIER_W(sc);
1724
1725	/*
1726	 * Wait for the command to complete
1727	 */
1728	error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1729	    "fatm_stat", hz);
1730
1731	switch (error) {
1732
1733	  case EWOULDBLOCK:
1734		error = EIO;
1735		break;
1736
1737	  case ERESTART:
1738		error = EINTR;
1739		break;
1740
1741	  case 0:
1742		bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1743		    BUS_DMASYNC_POSTREAD);
1744		error = q->error;
1745		break;
1746	}
1747
1748	/*
1749	 * Swap statistics
1750	 */
1751	if (q->error == 0) {
1752		u_int i;
1753		uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1754
1755		for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1756		    i++, p++)
1757			*p = be32toh(*p);
1758	}
1759
1760	return (error);
1761}
1762
1763/*
1764 * Create a copy of a single mbuf. It can have either internal or
1765 * external data, it may have a packet header. External data is really
1766 * copied, so the new buffer is writeable.
1767 */
1768static struct mbuf *
1769copy_mbuf(struct mbuf *m)
1770{
1771	struct mbuf *new;
1772
1773	MGET(new, M_DONTWAIT, MT_DATA);
1774	if (new == NULL)
1775		return (NULL);
1776
1777	if (m->m_flags & M_PKTHDR) {
1778		M_MOVE_PKTHDR(new, m);
1779		if (m->m_len > MHLEN)
1780			MCLGET(new, M_WAIT);
1781	} else {
1782		if (m->m_len > MLEN)
1783			MCLGET(new, M_WAIT);
1784	}
1785
1786	bcopy(m->m_data, new->m_data, m->m_len);
1787	new->m_len = m->m_len;
1788	new->m_flags &= ~M_RDONLY;
1789
1790	return (new);
1791}
1792
1793/*
1794 * All segments must have a four byte aligned buffer address and a four
1795 * byte aligned length. Step through an mbuf chain and check these conditions.
1796 * If the buffer address is not aligned and this is a normal mbuf, move
1797 * the data down. Else make a copy of the mbuf with aligned data.
1798 * If the buffer length is not aligned steel data from the next mbuf.
1799 * We don't need to check whether this has more than one external reference,
1800 * because steeling data doesn't change the external cluster.
1801 * If the last mbuf is not aligned, fill with zeroes.
1802 *
1803 * Return packet length (well we should have this in the packet header),
1804 * but be careful not to count the zero fill at the end.
1805 *
1806 * If fixing fails free the chain and zero the pointer.
1807 *
1808 * We assume, that aligning the virtual address also aligns the mapped bus
1809 * address.
1810 */
1811static u_int
1812fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1813{
1814	struct mbuf *m = *mp, *prev = NULL, *next, *new;
1815	u_int mlen = 0, fill = 0;
1816	int first, off;
1817	u_char *d, *cp;
1818
1819	do {
1820		next = m->m_next;
1821
1822		if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1823		   (m->m_len % 4 != 0 && next)) {
1824			/*
1825			 * Needs fixing
1826			 */
1827			first = (m == *mp);
1828
1829			d = mtod(m, u_char *);
1830			if ((off = (uintptr_t)(void *)d % 4) != 0) {
1831				if (M_WRITABLE(m)) {
1832					sc->istats.fix_addr_copy++;
1833					bcopy(d, d - off, m->m_len);
1834					m->m_data = (caddr_t)(d - off);
1835				} else {
1836					if ((new = copy_mbuf(m)) == NULL) {
1837						sc->istats.fix_addr_noext++;
1838						goto fail;
1839					}
1840					sc->istats.fix_addr_ext++;
1841					if (prev)
1842						prev->m_next = new;
1843					new->m_next = next;
1844					m_free(m);
1845					m = new;
1846				}
1847			}
1848
1849			if ((off = m->m_len % 4) != 0) {
1850				if (!M_WRITABLE(m)) {
1851					if ((new = copy_mbuf(m)) == NULL) {
1852						sc->istats.fix_len_noext++;
1853						goto fail;
1854					}
1855					sc->istats.fix_len_copy++;
1856					if (prev)
1857						prev->m_next = new;
1858					new->m_next = next;
1859					m_free(m);
1860					m = new;
1861				} else
1862					sc->istats.fix_len++;
1863				d = mtod(m, u_char *) + m->m_len;
1864				off = 4 - off;
1865				while (off) {
1866					if (next == NULL) {
1867						*d++ = 0;
1868						fill++;
1869					} else if (next->m_len == 0) {
1870						sc->istats.fix_empty++;
1871						next = m_free(next);
1872						continue;
1873					} else {
1874						cp = mtod(next, u_char *);
1875						*d++ = *cp++;
1876						next->m_len--;
1877						next->m_data = (caddr_t)cp;
1878					}
1879					off--;
1880					m->m_len++;
1881				}
1882			}
1883
1884			if (first)
1885				*mp = m;
1886		}
1887
1888		mlen += m->m_len;
1889		prev = m;
1890	} while ((m = next) != NULL);
1891
1892	return (mlen - fill);
1893
1894  fail:
1895	m_freem(*mp);
1896	*mp = NULL;
1897	return (0);
1898}
1899
1900/*
1901 * The helper function is used to load the computed physical addresses
1902 * into the transmit descriptor.
1903 */
1904static void
1905fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1906    bus_size_t mapsize, int error)
1907{
1908	struct tpd *tpd = varg;
1909
1910	if (error)
1911		return;
1912
1913	KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1914
1915	tpd->spec = 0;
1916	while (nsegs--) {
1917		H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1918		H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1919		tpd->spec++;
1920		segs++;
1921	}
1922}
1923
1924/*
1925 * Start output.
1926 *
1927 * Note, that we update the internal statistics without the lock here.
1928 */
1929static int
1930fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1931{
1932	struct txqueue *q;
1933	u_int nblks;
1934	int error, aal, nsegs;
1935	struct tpd *tpd;
1936
1937	/*
1938	 * Get a queue element.
1939	 * If there isn't one - try to drain the transmit queue
1940	 * We used to sleep here if that doesn't help, but we
1941	 * should not sleep here, because we are called with locks.
1942	 */
1943	q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1944
1945	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1946	if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1947		fatm_intr_drain_tx(sc);
1948		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1949		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1950			if (sc->retry_tx) {
1951				sc->istats.tx_retry++;
1952				IF_PREPEND(&sc->ifp->if_snd, m);
1953				return (1);
1954			}
1955			sc->istats.tx_queue_full++;
1956			m_freem(m);
1957			return (0);
1958		}
1959		sc->istats.tx_queue_almost_full++;
1960	}
1961
1962	tpd = q->q.ioblk;
1963
1964	m->m_data += sizeof(struct atm_pseudohdr);
1965	m->m_len -= sizeof(struct atm_pseudohdr);
1966
1967#ifdef ENABLE_BPF
1968	if (!(vc->param.flags & ATMIO_FLAG_NG) &&
1969	    vc->param.aal == ATMIO_AAL_5 &&
1970	    (vc->param.flags & ATM_PH_LLCSNAP))
1971		BPF_MTAP(sc->ifp, m);
1972#endif
1973
1974	/* map the mbuf */
1975	error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1976	    fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1977	if(error) {
1978		sc->ifp->if_oerrors++;
1979		if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1980		m_freem(m);
1981		return (0);
1982	}
1983	nsegs = tpd->spec;
1984
1985	bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1986
1987	/*
1988	 * OK. Now go and do it.
1989	 */
1990	aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1991
1992	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1993	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1994	q->m = m;
1995
1996	/*
1997	 * If the transmit queue is almost full, schedule a
1998	 * transmit interrupt so that transmit descriptors can
1999	 * be recycled.
2000	 */
2001	H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
2002	    (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
2003	H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
2004	    vc->param.vci, 0, 0));
2005
2006	if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2007		H_SETDESC(tpd->stream, 0);
2008	else {
2009		u_int i;
2010
2011		for (i = 0; i < RATE_TABLE_SIZE; i++)
2012			if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2013				break;
2014		if (i > 0)
2015			i--;
2016		H_SETDESC(tpd->stream, rate_table[i].ratio);
2017	}
2018	H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2019
2020	nblks = TDX_SEGS2BLKS(nsegs);
2021
2022	DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2023	    mlen, le32toh(tpd->spec), nsegs, nblks));
2024
2025	WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2026	BARRIER_W(sc);
2027
2028	sc->txcnt++;
2029	sc->ifp->if_opackets++;
2030	vc->obytes += m->m_pkthdr.len;
2031	vc->opackets++;
2032
2033	NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2034
2035	return (0);
2036}
2037
2038static void
2039fatm_start(struct ifnet *ifp)
2040{
2041	struct atm_pseudohdr aph;
2042	struct fatm_softc *sc;
2043	struct mbuf *m;
2044	u_int mlen, vpi, vci;
2045	struct card_vcc *vc;
2046
2047	sc = ifp->if_softc;
2048
2049	while (1) {
2050		IF_DEQUEUE(&ifp->if_snd, m);
2051		if (m == NULL)
2052			break;
2053
2054		/*
2055		 * Loop through the mbuf chain and compute the total length
2056		 * of the packet. Check that all data pointer are
2057		 * 4 byte aligned. If they are not, call fatm_mfix to
2058		 * fix that problem. This comes more or less from the
2059		 * en driver.
2060		 */
2061		mlen = fatm_fix_chain(sc, &m);
2062		if (m == NULL)
2063			continue;
2064
2065		if (m->m_len < sizeof(struct atm_pseudohdr) &&
2066		    (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2067			continue;
2068
2069		aph = *mtod(m, struct atm_pseudohdr *);
2070		mlen -= sizeof(struct atm_pseudohdr);
2071
2072		if (mlen == 0) {
2073			m_freem(m);
2074			continue;
2075		}
2076		if (mlen > FATM_MAXPDU) {
2077			sc->istats.tx_pdu2big++;
2078			m_freem(m);
2079			continue;
2080		}
2081
2082		vci = ATM_PH_VCI(&aph);
2083		vpi = ATM_PH_VPI(&aph);
2084
2085		/*
2086		 * From here on we need the softc
2087		 */
2088		FATM_LOCK(sc);
2089		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2090			FATM_UNLOCK(sc);
2091			m_freem(m);
2092			break;
2093		}
2094		if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2095		    !(vc->vflags & FATM_VCC_OPEN)) {
2096			FATM_UNLOCK(sc);
2097			m_freem(m);
2098			continue;
2099		}
2100		if (fatm_tx(sc, m, vc, mlen)) {
2101			FATM_UNLOCK(sc);
2102			break;
2103		}
2104		FATM_UNLOCK(sc);
2105	}
2106}
2107
2108/*
2109 * VCC managment
2110 *
2111 * This may seem complicated. The reason for this is, that we need an
2112 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2113 * is called with the radix node head of the routing table locked. Therefor
2114 * we cannot sleep there and wait for the open/close to succeed. For this
2115 * reason we just initiate the operation from the ioctl.
2116 */
2117
2118/*
2119 * Command the card to open/close a VC.
2120 * Return the queue entry for waiting if we are succesful.
2121 */
2122static struct cmdqueue *
2123fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2124    u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2125{
2126	struct cmdqueue *q;
2127
2128	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2129
2130	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2131	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2132		sc->istats.cmd_queue_full++;
2133		return (NULL);
2134	}
2135	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2136
2137	q->error = 0;
2138	q->cb = func;
2139	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2140	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2141
2142	WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2143	BARRIER_W(sc);
2144	WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2145	BARRIER_W(sc);
2146	WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2147	BARRIER_W(sc);
2148
2149	return (q);
2150}
2151
2152/*
2153 * The VC has been opened/closed and somebody has been waiting for this.
2154 * Wake him up.
2155 */
2156static void
2157fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2158{
2159
2160	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2161	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2162		sc->istats.get_stat_errors++;
2163		q->error = EIO;
2164	}
2165	wakeup(q);
2166}
2167
2168/*
2169 * Open complete
2170 */
2171static void
2172fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2173{
2174	vc->vflags &= ~FATM_VCC_TRY_OPEN;
2175	vc->vflags |= FATM_VCC_OPEN;
2176
2177	if (vc->vflags & FATM_VCC_REOPEN) {
2178		vc->vflags &= ~FATM_VCC_REOPEN;
2179		return;
2180	}
2181
2182	/* inform management if this is not an NG
2183	 * VCC or it's an NG PVC. */
2184	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2185	    (vc->param.flags & ATMIO_FLAG_PVC))
2186		ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2187}
2188
2189/*
2190 * The VC that we have tried to open asynchronuosly has been opened.
2191 */
2192static void
2193fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2194{
2195	u_int vci;
2196	struct card_vcc *vc;
2197
2198	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2199	vc = sc->vccs[vci];
2200	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2201	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2202		sc->istats.get_stat_errors++;
2203		sc->vccs[vci] = NULL;
2204		uma_zfree(sc->vcc_zone, vc);
2205		if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2206		return;
2207	}
2208	fatm_open_finish(sc, vc);
2209}
2210
2211/*
2212 * Wait on the queue entry until the VCC is opened/closed.
2213 */
2214static int
2215fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2216{
2217	int error;
2218
2219	/*
2220	 * Wait for the command to complete
2221	 */
2222	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2223
2224	if (error != 0)
2225		return (error);
2226	return (q->error);
2227}
2228
2229/*
2230 * Start to open a VCC. This just initiates the operation.
2231 */
2232static int
2233fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2234{
2235	int error;
2236	struct card_vcc *vc;
2237
2238	/*
2239	 * Check parameters
2240	 */
2241	if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2242	    (op->param.flags & ATMIO_FLAG_NORX))
2243		return (EINVAL);
2244
2245	if (!VC_OK(sc, op->param.vpi, op->param.vci))
2246		return (EINVAL);
2247	if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2248		return (EINVAL);
2249
2250	vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2251	if (vc == NULL)
2252		return (ENOMEM);
2253
2254	error = 0;
2255
2256	FATM_LOCK(sc);
2257	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2258		error = EIO;
2259		goto done;
2260	}
2261	if (sc->vccs[op->param.vci] != NULL) {
2262		error = EBUSY;
2263		goto done;
2264	}
2265	vc->param = op->param;
2266	vc->rxhand = op->rxhand;
2267
2268	switch (op->param.traffic) {
2269
2270	  case ATMIO_TRAFFIC_UBR:
2271		break;
2272
2273	  case ATMIO_TRAFFIC_CBR:
2274		if (op->param.tparam.pcr == 0 ||
2275		    op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2276			error = EINVAL;
2277			goto done;
2278		}
2279		break;
2280
2281	  default:
2282		error = EINVAL;
2283		goto done;
2284	}
2285	vc->ibytes = vc->obytes = 0;
2286	vc->ipackets = vc->opackets = 0;
2287
2288	vc->vflags = FATM_VCC_TRY_OPEN;
2289	sc->vccs[op->param.vci] = vc;
2290	sc->open_vccs++;
2291
2292	error = fatm_load_vc(sc, vc);
2293	if (error != 0) {
2294		sc->vccs[op->param.vci] = NULL;
2295		sc->open_vccs--;
2296		goto done;
2297	}
2298
2299	/* don't free below */
2300	vc = NULL;
2301
2302  done:
2303	FATM_UNLOCK(sc);
2304	if (vc != NULL)
2305		uma_zfree(sc->vcc_zone, vc);
2306	return (error);
2307}
2308
2309/*
2310 * Try to initialize the given VC
2311 */
2312static int
2313fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2314{
2315	uint32_t cmd;
2316	struct cmdqueue *q;
2317	int error;
2318
2319	/* Command and buffer strategy */
2320	cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2321	if (vc->param.aal == ATMIO_AAL_0)
2322		cmd |= (0 << 8);
2323	else
2324		cmd |= (5 << 8);
2325
2326	q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2327	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2328	    fatm_open_complete : fatm_cmd_complete);
2329	if (q == NULL)
2330		return (EIO);
2331
2332	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2333		error = fatm_waitvcc(sc, q);
2334		if (error != 0)
2335			return (error);
2336		fatm_open_finish(sc, vc);
2337	}
2338	return (0);
2339}
2340
2341/*
2342 * Finish close
2343 */
2344static void
2345fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2346{
2347	/* inform management of this is not an NG
2348	 * VCC or it's an NG PVC. */
2349	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2350	    (vc->param.flags & ATMIO_FLAG_PVC))
2351		ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2352
2353	sc->vccs[vc->param.vci] = NULL;
2354	sc->open_vccs--;
2355
2356	uma_zfree(sc->vcc_zone, vc);
2357}
2358
2359/*
2360 * The VC has been closed.
2361 */
2362static void
2363fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2364{
2365	u_int vci;
2366	struct card_vcc *vc;
2367
2368	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2369	vc = sc->vccs[vci];
2370	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2371	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2372		sc->istats.get_stat_errors++;
2373		/* keep the VCC in that state */
2374		if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2375		return;
2376	}
2377
2378	fatm_close_finish(sc, vc);
2379}
2380
2381/*
2382 * Initiate closing a VCC
2383 */
2384static int
2385fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2386{
2387	int error;
2388	struct cmdqueue *q;
2389	struct card_vcc *vc;
2390
2391	if (!VC_OK(sc, cl->vpi, cl->vci))
2392		return (EINVAL);
2393
2394	error = 0;
2395
2396	FATM_LOCK(sc);
2397	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2398		error = EIO;
2399		goto done;
2400	}
2401	vc = sc->vccs[cl->vci];
2402	if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2403		error = ENOENT;
2404		goto done;
2405	}
2406
2407	q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2408	    FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2409	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2410	    fatm_close_complete : fatm_cmd_complete);
2411	if (q == NULL) {
2412		error = EIO;
2413		goto done;
2414	}
2415
2416	vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2417	vc->vflags |= FATM_VCC_TRY_CLOSE;
2418
2419	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2420		error = fatm_waitvcc(sc, q);
2421		if (error != 0)
2422			goto done;
2423
2424		fatm_close_finish(sc, vc);
2425	}
2426
2427  done:
2428	FATM_UNLOCK(sc);
2429	return (error);
2430}
2431
2432/*
2433 * IOCTL handler
2434 */
2435static int
2436fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2437{
2438	int error;
2439	struct fatm_softc *sc = ifp->if_softc;
2440	struct ifaddr *ifa = (struct ifaddr *)arg;
2441	struct ifreq *ifr = (struct ifreq *)arg;
2442	struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2443	struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2444	struct atmio_vcctable *vtab;
2445
2446	error = 0;
2447	switch (cmd) {
2448
2449	  case SIOCATMOPENVCC:		/* kernel internal use */
2450		error = fatm_open_vcc(sc, op);
2451		break;
2452
2453	  case SIOCATMCLOSEVCC:		/* kernel internal use */
2454		error = fatm_close_vcc(sc, cl);
2455		break;
2456
2457	  case SIOCSIFADDR:
2458		FATM_LOCK(sc);
2459		ifp->if_flags |= IFF_UP;
2460		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2461			fatm_init_locked(sc);
2462		switch (ifa->ifa_addr->sa_family) {
2463#ifdef INET
2464		  case AF_INET:
2465		  case AF_INET6:
2466			ifa->ifa_rtrequest = atm_rtrequest;
2467			break;
2468#endif
2469		  default:
2470			break;
2471		}
2472		FATM_UNLOCK(sc);
2473		break;
2474
2475	  case SIOCSIFFLAGS:
2476		FATM_LOCK(sc);
2477		if (ifp->if_flags & IFF_UP) {
2478			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2479				fatm_init_locked(sc);
2480			}
2481		} else {
2482			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2483				fatm_stop(sc);
2484			}
2485		}
2486		FATM_UNLOCK(sc);
2487		break;
2488
2489	  case SIOCGIFMEDIA:
2490	  case SIOCSIFMEDIA:
2491		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2492			error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2493		else
2494			error = EINVAL;
2495		break;
2496
2497	  case SIOCATMGVCCS:
2498		/* return vcc table */
2499		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2500		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2501		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2502		    vtab->count * sizeof(vtab->vccs[0]));
2503		free(vtab, M_DEVBUF);
2504		break;
2505
2506	  case SIOCATMGETVCCS:	/* internal netgraph use */
2507		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2508		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2509		if (vtab == NULL) {
2510			error = ENOMEM;
2511			break;
2512		}
2513		*(void **)arg = vtab;
2514		break;
2515
2516	  default:
2517		DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2518		error = EINVAL;
2519		break;
2520	}
2521
2522	return (error);
2523}
2524
2525/*
2526 * Detach from the interface and free all resources allocated during
2527 * initialisation and later.
2528 */
2529static int
2530fatm_detach(device_t dev)
2531{
2532	u_int i;
2533	struct rbuf *rb;
2534	struct fatm_softc *sc;
2535	struct txqueue *tx;
2536
2537	sc = device_get_softc(dev);
2538
2539	if (device_is_alive(dev)) {
2540		FATM_LOCK(sc);
2541		fatm_stop(sc);
2542		utopia_detach(&sc->utopia);
2543		FATM_UNLOCK(sc);
2544		atm_ifdetach(sc->ifp);		/* XXX race */
2545	}
2546
2547	if (sc->ih != NULL)
2548		bus_teardown_intr(dev, sc->irqres, sc->ih);
2549
2550	while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2551		if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2552		bus_dmamap_unload(sc->rbuf_tag, rb->map);
2553		m_freem(rb->m);
2554		LIST_REMOVE(rb, link);
2555		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2556	}
2557
2558	if (sc->txqueue.chunk != NULL) {
2559		for (i = 0; i < FATM_TX_QLEN; i++) {
2560			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2561			bus_dmamap_destroy(sc->tx_tag, tx->map);
2562		}
2563	}
2564
2565	while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2566		bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2567		LIST_REMOVE(rb, link);
2568	}
2569
2570	if (sc->rbufs != NULL)
2571		free(sc->rbufs, M_DEVBUF);
2572	if (sc->vccs != NULL) {
2573		for (i = 0; i < FORE_MAX_VCC + 1; i++)
2574			if (sc->vccs[i] != NULL) {
2575				uma_zfree(sc->vcc_zone, sc->vccs[i]);
2576				sc->vccs[i] = NULL;
2577			}
2578		free(sc->vccs, M_DEVBUF);
2579	}
2580	if (sc->vcc_zone != NULL)
2581		uma_zdestroy(sc->vcc_zone);
2582
2583	if (sc->l1queue.chunk != NULL)
2584		free(sc->l1queue.chunk, M_DEVBUF);
2585	if (sc->s1queue.chunk != NULL)
2586		free(sc->s1queue.chunk, M_DEVBUF);
2587	if (sc->rxqueue.chunk != NULL)
2588		free(sc->rxqueue.chunk, M_DEVBUF);
2589	if (sc->txqueue.chunk != NULL)
2590		free(sc->txqueue.chunk, M_DEVBUF);
2591	if (sc->cmdqueue.chunk != NULL)
2592		free(sc->cmdqueue.chunk, M_DEVBUF);
2593
2594	destroy_dma_memory(&sc->reg_mem);
2595	destroy_dma_memory(&sc->sadi_mem);
2596	destroy_dma_memory(&sc->prom_mem);
2597#ifdef TEST_DMA_SYNC
2598	destroy_dma_memoryX(&sc->s1q_mem);
2599	destroy_dma_memoryX(&sc->l1q_mem);
2600	destroy_dma_memoryX(&sc->rxq_mem);
2601	destroy_dma_memoryX(&sc->txq_mem);
2602	destroy_dma_memoryX(&sc->stat_mem);
2603#endif
2604
2605	if (sc->tx_tag != NULL)
2606		if (bus_dma_tag_destroy(sc->tx_tag))
2607			printf("tx DMA tag busy!\n");
2608
2609	if (sc->rbuf_tag != NULL)
2610		if (bus_dma_tag_destroy(sc->rbuf_tag))
2611			printf("rbuf DMA tag busy!\n");
2612
2613	if (sc->parent_dmat != NULL)
2614		if (bus_dma_tag_destroy(sc->parent_dmat))
2615			printf("parent DMA tag busy!\n");
2616
2617	if (sc->irqres != NULL)
2618		bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2619
2620	if (sc->memres != NULL)
2621		bus_release_resource(dev, SYS_RES_MEMORY,
2622		    sc->memid, sc->memres);
2623
2624	(void)sysctl_ctx_free(&sc->sysctl_ctx);
2625
2626	cv_destroy(&sc->cv_stat);
2627	cv_destroy(&sc->cv_regs);
2628
2629	mtx_destroy(&sc->mtx);
2630
2631	if_free(sc->ifp);
2632
2633	return (0);
2634}
2635
2636/*
2637 * Sysctl handler
2638 */
2639static int
2640fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2641{
2642	struct fatm_softc *sc = arg1;
2643	u_long *ret;
2644	int error;
2645
2646	ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2647
2648	FATM_LOCK(sc);
2649	bcopy(&sc->istats, ret, sizeof(sc->istats));
2650	FATM_UNLOCK(sc);
2651
2652	error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2653	free(ret, M_TEMP);
2654
2655	return (error);
2656}
2657
2658/*
2659 * Sysctl handler for card statistics
2660 * This is disable because it destroys the PHY statistics.
2661 */
2662static int
2663fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2664{
2665	struct fatm_softc *sc = arg1;
2666	int error;
2667	const struct fatm_stats *s;
2668	u_long *ret;
2669	u_int i;
2670
2671	ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2672
2673	FATM_LOCK(sc);
2674
2675	if ((error = fatm_getstat(sc)) == 0) {
2676		s = sc->sadi_mem.mem;
2677		i = 0;
2678		ret[i++] = s->phy_4b5b.crc_header_errors;
2679		ret[i++] = s->phy_4b5b.framing_errors;
2680		ret[i++] = s->phy_oc3.section_bip8_errors;
2681		ret[i++] = s->phy_oc3.path_bip8_errors;
2682		ret[i++] = s->phy_oc3.line_bip24_errors;
2683		ret[i++] = s->phy_oc3.line_febe_errors;
2684		ret[i++] = s->phy_oc3.path_febe_errors;
2685		ret[i++] = s->phy_oc3.corr_hcs_errors;
2686		ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2687		ret[i++] = s->atm.cells_transmitted;
2688		ret[i++] = s->atm.cells_received;
2689		ret[i++] = s->atm.vpi_bad_range;
2690		ret[i++] = s->atm.vpi_no_conn;
2691		ret[i++] = s->atm.vci_bad_range;
2692		ret[i++] = s->atm.vci_no_conn;
2693		ret[i++] = s->aal0.cells_transmitted;
2694		ret[i++] = s->aal0.cells_received;
2695		ret[i++] = s->aal0.cells_dropped;
2696		ret[i++] = s->aal4.cells_transmitted;
2697		ret[i++] = s->aal4.cells_received;
2698		ret[i++] = s->aal4.cells_crc_errors;
2699		ret[i++] = s->aal4.cels_protocol_errors;
2700		ret[i++] = s->aal4.cells_dropped;
2701		ret[i++] = s->aal4.cspdus_transmitted;
2702		ret[i++] = s->aal4.cspdus_received;
2703		ret[i++] = s->aal4.cspdus_protocol_errors;
2704		ret[i++] = s->aal4.cspdus_dropped;
2705		ret[i++] = s->aal5.cells_transmitted;
2706		ret[i++] = s->aal5.cells_received;
2707		ret[i++] = s->aal5.congestion_experienced;
2708		ret[i++] = s->aal5.cells_dropped;
2709		ret[i++] = s->aal5.cspdus_transmitted;
2710		ret[i++] = s->aal5.cspdus_received;
2711		ret[i++] = s->aal5.cspdus_crc_errors;
2712		ret[i++] = s->aal5.cspdus_protocol_errors;
2713		ret[i++] = s->aal5.cspdus_dropped;
2714		ret[i++] = s->aux.small_b1_failed;
2715		ret[i++] = s->aux.large_b1_failed;
2716		ret[i++] = s->aux.small_b2_failed;
2717		ret[i++] = s->aux.large_b2_failed;
2718		ret[i++] = s->aux.rpd_alloc_failed;
2719		ret[i++] = s->aux.receive_carrier;
2720	}
2721	/* declare the buffer free */
2722	sc->flags &= ~FATM_STAT_INUSE;
2723	cv_signal(&sc->cv_stat);
2724
2725	FATM_UNLOCK(sc);
2726
2727	if (error == 0)
2728		error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2729	free(ret, M_TEMP);
2730
2731	return (error);
2732}
2733
2734#define MAXDMASEGS 32		/* maximum number of receive descriptors */
2735
2736/*
2737 * Attach to the device.
2738 *
2739 * We assume, that there is a global lock (Giant in this case) that protects
2740 * multiple threads from entering this function. This makes sense, doesn't it?
2741 */
2742static int
2743fatm_attach(device_t dev)
2744{
2745	struct ifnet *ifp;
2746	struct fatm_softc *sc;
2747	int unit;
2748	uint16_t cfg;
2749	int error = 0;
2750	struct rbuf *rb;
2751	u_int i;
2752	struct txqueue *tx;
2753
2754	sc = device_get_softc(dev);
2755	unit = device_get_unit(dev);
2756
2757	ifp = sc->ifp = if_alloc(IFT_ATM);
2758	if (ifp == NULL) {
2759		error = ENOSPC;
2760		goto fail;
2761	}
2762
2763	IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2764	IFP2IFATM(sc->ifp)->mib.serial = 0;
2765	IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2766	IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2767	IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2768	IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2769	IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2770	IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2771	IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2772	IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2773
2774	LIST_INIT(&sc->rbuf_free);
2775	LIST_INIT(&sc->rbuf_used);
2776
2777	/*
2778	 * Initialize mutex and condition variables.
2779	 */
2780	mtx_init(&sc->mtx, device_get_nameunit(dev),
2781	    MTX_NETWORK_LOCK, MTX_DEF);
2782
2783	cv_init(&sc->cv_stat, "fatm_stat");
2784	cv_init(&sc->cv_regs, "fatm_regs");
2785
2786	sysctl_ctx_init(&sc->sysctl_ctx);
2787
2788	/*
2789	 * Make the sysctl tree
2790	 */
2791	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2792	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2793	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2794		goto fail;
2795
2796	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2797	    OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2798	    "LU", "internal statistics") == NULL)
2799		goto fail;
2800
2801	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2802	    OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2803	    "LU", "card statistics") == NULL)
2804		goto fail;
2805
2806	if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2807	    OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2808	    "retry flag") == NULL)
2809		goto fail;
2810
2811#ifdef FATM_DEBUG
2812	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2813	    OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2814	    == NULL)
2815		goto fail;
2816	sc->debug = FATM_DEBUG;
2817#endif
2818
2819	/*
2820	 * Network subsystem stuff
2821	 */
2822	ifp->if_softc = sc;
2823	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2824	ifp->if_flags = IFF_SIMPLEX;
2825	ifp->if_ioctl = fatm_ioctl;
2826	ifp->if_start = fatm_start;
2827	ifp->if_watchdog = fatm_watchdog;
2828	ifp->if_init = fatm_init;
2829	ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2830	ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2831
2832	/*
2833	 * Enable memory and bustmaster
2834	 */
2835	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2836	cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2837	pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2838
2839	/*
2840	 * Map memory
2841	 */
2842	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2843	if (!(cfg & PCIM_CMD_MEMEN)) {
2844		if_printf(ifp, "failed to enable memory mapping\n");
2845		error = ENXIO;
2846		goto fail;
2847	}
2848	sc->memid = 0x10;
2849	sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2850	    RF_ACTIVE);
2851	if (sc->memres == NULL) {
2852		if_printf(ifp, "could not map memory\n");
2853		error = ENXIO;
2854		goto fail;
2855	}
2856	sc->memh = rman_get_bushandle(sc->memres);
2857	sc->memt = rman_get_bustag(sc->memres);
2858
2859	/*
2860	 * Convert endianess of slave access
2861	 */
2862	cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2863	cfg |= FATM_PCIM_SWAB;
2864	pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2865
2866	/*
2867	 * Allocate interrupt (activate at the end)
2868	 */
2869	sc->irqid = 0;
2870	sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2871	    RF_SHAREABLE | RF_ACTIVE);
2872	if (sc->irqres == NULL) {
2873		if_printf(ifp, "could not allocate irq\n");
2874		error = ENXIO;
2875		goto fail;
2876	}
2877
2878	/*
2879	 * Allocate the parent DMA tag. This is used simply to hold overall
2880	 * restrictions for the controller (and PCI bus) and is never used
2881	 * to do anything.
2882	 */
2883	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
2884	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2885	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2886	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2887	    &sc->parent_dmat)) {
2888		if_printf(ifp, "could not allocate parent DMA tag\n");
2889		error = ENOMEM;
2890		goto fail;
2891	}
2892
2893	/*
2894	 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2895	 * a mbuf cluster.
2896	 */
2897	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2898	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2899	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2900	    NULL, NULL, &sc->rbuf_tag)) {
2901		if_printf(ifp, "could not allocate rbuf DMA tag\n");
2902		error = ENOMEM;
2903		goto fail;
2904	}
2905
2906	/*
2907	 * Allocate the transmission DMA tag. Must add 1, because
2908	 * rounded up PDU will be 65536 bytes long.
2909	 */
2910	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2911	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2912	    NULL, NULL,
2913	    FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2914	    NULL, NULL, &sc->tx_tag)) {
2915		if_printf(ifp, "could not allocate tx DMA tag\n");
2916		error = ENOMEM;
2917		goto fail;
2918	}
2919
2920	/*
2921	 * Allocate DMAable memory.
2922	 */
2923	sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2924	    + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2925	sc->stat_mem.align = 4;
2926
2927	sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2928	sc->txq_mem.align = 32;
2929
2930	sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2931	sc->rxq_mem.align = 32;
2932
2933	sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2934	    BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2935	sc->s1q_mem.align = 32;
2936
2937	sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2938	    BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2939	sc->l1q_mem.align = 32;
2940
2941#ifdef TEST_DMA_SYNC
2942	if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2943	    (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2944	    (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2945	    (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2946	    (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2947		goto fail;
2948#else
2949	if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2950	    (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2951	    (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2952	    (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2953	    (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2954		goto fail;
2955#endif
2956
2957	sc->prom_mem.size = sizeof(struct prom);
2958	sc->prom_mem.align = 32;
2959	if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2960		goto fail;
2961
2962	sc->sadi_mem.size = sizeof(struct fatm_stats);
2963	sc->sadi_mem.align = 32;
2964	if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2965		goto fail;
2966
2967	sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2968	sc->reg_mem.align = 32;
2969	if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2970		goto fail;
2971
2972	/*
2973	 * Allocate queues
2974	 */
2975	sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2976	    M_DEVBUF, M_ZERO | M_WAITOK);
2977	sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2978	    M_DEVBUF, M_ZERO | M_WAITOK);
2979	sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2980	    M_DEVBUF, M_ZERO | M_WAITOK);
2981	sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2982	    M_DEVBUF, M_ZERO | M_WAITOK);
2983	sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2984	    M_DEVBUF, M_ZERO | M_WAITOK);
2985
2986	sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2987	    M_DEVBUF, M_ZERO | M_WAITOK);
2988	sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2989	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2990	if (sc->vcc_zone == NULL) {
2991		error = ENOMEM;
2992		goto fail;
2993	}
2994
2995	/*
2996	 * Allocate memory for the receive buffer headers. The total number
2997	 * of headers should probably also include the maximum number of
2998	 * buffers on the receive queue.
2999	 */
3000	sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
3001	sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
3002	    M_DEVBUF, M_ZERO | M_WAITOK);
3003
3004	/*
3005	 * Put all rbuf headers on the free list and create DMA maps.
3006	 */
3007	for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
3008		if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3009			if_printf(sc->ifp, "creating rx map: %d\n",
3010			    error);
3011			goto fail;
3012		}
3013		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3014	}
3015
3016	/*
3017	 * Create dma maps for transmission. In case of an error, free the
3018	 * allocated DMA maps, because on some architectures maps are NULL
3019	 * and we cannot distinguish between a failure and a NULL map in
3020	 * the detach routine.
3021	 */
3022	for (i = 0; i < FATM_TX_QLEN; i++) {
3023		tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3024		if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3025			if_printf(sc->ifp, "creating tx map: %d\n",
3026			    error);
3027			while (i > 0) {
3028				tx = GET_QUEUE(sc->txqueue, struct txqueue,
3029				    i - 1);
3030				bus_dmamap_destroy(sc->tx_tag, tx->map);
3031				i--;
3032			}
3033			goto fail;
3034		}
3035	}
3036
3037	utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3038	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3039	    &fatm_utopia_methods);
3040	sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3041
3042	/*
3043	 * Attach the interface
3044	 */
3045	atm_ifattach(ifp);
3046	ifp->if_snd.ifq_maxlen = 512;
3047
3048#ifdef ENABLE_BPF
3049	bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3050#endif
3051
3052	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET | INTR_MPSAFE,
3053	    NULL, fatm_intr, sc, &sc->ih);
3054	if (error) {
3055		if_printf(ifp, "couldn't setup irq\n");
3056		goto fail;
3057	}
3058
3059  fail:
3060	if (error)
3061		fatm_detach(dev);
3062
3063	return (error);
3064}
3065
3066#if defined(FATM_DEBUG) && 0
3067static void
3068dump_s1_queue(struct fatm_softc *sc)
3069{
3070	int i;
3071	struct supqueue *q;
3072
3073	for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3074		q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3075		printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3076		    q->q.card,
3077		    READ4(sc, q->q.card),
3078		    READ4(sc, q->q.card + 4),
3079		    *q->q.statp);
3080	}
3081}
3082#endif
3083
3084/*
3085 * Driver infrastructure.
3086 */
3087static device_method_t fatm_methods[] = {
3088	DEVMETHOD(device_probe,		fatm_probe),
3089	DEVMETHOD(device_attach,	fatm_attach),
3090	DEVMETHOD(device_detach,	fatm_detach),
3091	{ 0, 0 }
3092};
3093static driver_t fatm_driver = {
3094	"fatm",
3095	fatm_methods,
3096	sizeof(struct fatm_softc),
3097};
3098
3099DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
3100