if_fatm.c revision 147256
1/*-
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 147256 2005-06-10 16:49:24Z brooks $");
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_atm.h>
62#include <net/route.h>
63#ifdef INET
64#include <netinet/in.h>
65#include <netinet/if_atm.h>
66#endif
67
68#include <machine/bus.h>
69#include <machine/resource.h>
70#include <sys/bus.h>
71#include <sys/rman.h>
72#include <dev/pci/pcireg.h>
73#include <dev/pci/pcivar.h>
74
75#include <dev/utopia/utopia.h>
76
77#include <dev/fatm/if_fatmreg.h>
78#include <dev/fatm/if_fatmvar.h>
79
80#include <dev/fatm/firmware.h>
81
82devclass_t fatm_devclass;
83
84static const struct {
85	uint16_t	vid;
86	uint16_t	did;
87	const char	*name;
88} fatm_devs[] = {
89	{ 0x1127, 0x300,
90	  "FORE PCA200E" },
91	{ 0, 0, NULL }
92};
93
94static const struct rate {
95	uint32_t	ratio;
96	uint32_t	cell_rate;
97} rate_table[] = {
98#include <dev/fatm/if_fatm_rate.h>
99};
100#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
101
102SYSCTL_DECL(_hw_atm);
103
104MODULE_DEPEND(fatm, utopia, 1, 1, 1);
105
106static int	fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
107static int	fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
108
109static const struct utopia_methods fatm_utopia_methods = {
110	fatm_utopia_readregs,
111	fatm_utopia_writereg
112};
113
114#define VC_OK(SC, VPI, VCI)						\
115	(((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 &&	\
116	 (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
117
118static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
119
120/*
121 * Probing is easy: step trough the list of known vendor and device
122 * ids and compare. If one is found - it's our.
123 */
124static int
125fatm_probe(device_t dev)
126{
127	int i;
128
129	for (i = 0; fatm_devs[i].name; i++)
130		if (pci_get_vendor(dev) == fatm_devs[i].vid &&
131		    pci_get_device(dev) == fatm_devs[i].did) {
132			device_set_desc(dev, fatm_devs[i].name);
133			return (BUS_PROBE_DEFAULT);
134		}
135	return (ENXIO);
136}
137
138/*
139 * Function called at completion of a SUNI writeregs/readregs command.
140 * This is called from the interrupt handler while holding the softc lock.
141 * We use the queue entry as the randevouze point.
142 */
143static void
144fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
145{
146
147	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
148	if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
149		sc->istats.suni_reg_errors++;
150		q->error = EIO;
151	}
152	wakeup(q);
153}
154
155/*
156 * Write a SUNI register. The bits that are 1 in mask are written from val
157 * into register reg. We wait for the command to complete by sleeping on
158 * the register memory.
159 *
160 * We assume, that we already hold the softc mutex.
161 */
162static int
163fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
164{
165	int error;
166	struct cmdqueue *q;
167	struct fatm_softc *sc;
168
169	sc = ifatm->ifp->if_softc;
170	FATM_CHECKLOCK(sc);
171	if (!(ifatm->ifp->if_flags & IFF_RUNNING))
172		return (EIO);
173
174	/* get queue element and fill it */
175	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
176
177	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
178	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
179		sc->istats.cmd_queue_full++;
180		return (EIO);
181	}
182	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
183
184	q->error = 0;
185	q->cb = fatm_utopia_writeregs_complete;
186	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
187	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
188
189	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
190	BARRIER_W(sc);
191	WRITE4(sc, q->q.card + FATMOC_OP,
192	    FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
193	BARRIER_W(sc);
194
195	/*
196	 * Wait for the command to complete
197	 */
198	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
199
200	switch(error) {
201
202	  case EWOULDBLOCK:
203		error = EIO;
204		break;
205
206	  case ERESTART:
207		error = EINTR;
208		break;
209
210	  case 0:
211		error = q->error;
212		break;
213	}
214
215	return (error);
216}
217
218/*
219 * Function called at completion of a SUNI readregs command.
220 * This is called from the interrupt handler while holding the softc lock.
221 * We use reg_mem as the randevouze point.
222 */
223static void
224fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
225{
226
227	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
228	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
229		sc->istats.suni_reg_errors++;
230		q->error = EIO;
231	}
232	wakeup(&sc->reg_mem);
233}
234
235/*
236 * Read SUNI registers
237 *
238 * We use a preallocated buffer to read the registers. Therefor we need
239 * to protect against multiple threads trying to read registers. We do this
240 * with a condition variable and a flag. We wait for the command to complete by sleeping on
241 * the register memory.
242 *
243 * We assume, that we already hold the softc mutex.
244 */
245static int
246fatm_utopia_readregs_internal(struct fatm_softc *sc)
247{
248	int error, i;
249	uint32_t *ptr;
250	struct cmdqueue *q;
251
252	/* get the buffer */
253	for (;;) {
254		if (!(sc->ifp->if_flags & IFF_RUNNING))
255			return (EIO);
256		if (!(sc->flags & FATM_REGS_INUSE))
257			break;
258		cv_wait(&sc->cv_regs, &sc->mtx);
259	}
260	sc->flags |= FATM_REGS_INUSE;
261
262	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
263
264	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
265	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
266		sc->istats.cmd_queue_full++;
267		return (EIO);
268	}
269	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
270
271	q->error = 0;
272	q->cb = fatm_utopia_readregs_complete;
273	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
274	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
275
276	bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
277
278	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
279	BARRIER_W(sc);
280	WRITE4(sc, q->q.card + FATMOC_OP,
281	    FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
282	BARRIER_W(sc);
283
284	/*
285	 * Wait for the command to complete
286	 */
287	error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
288	    "fatm_getreg", hz);
289
290	switch(error) {
291
292	  case EWOULDBLOCK:
293		error = EIO;
294		break;
295
296	  case ERESTART:
297		error = EINTR;
298		break;
299
300	  case 0:
301		bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
302		    BUS_DMASYNC_POSTREAD);
303		error = q->error;
304		break;
305	}
306
307	if (error != 0) {
308		/* declare buffer to be free */
309		sc->flags &= ~FATM_REGS_INUSE;
310		cv_signal(&sc->cv_regs);
311		return (error);
312	}
313
314	/* swap if needed */
315	ptr = (uint32_t *)sc->reg_mem.mem;
316	for (i = 0; i < FATM_NREGS; i++)
317		ptr[i] = le32toh(ptr[i]) & 0xff;
318
319	return (0);
320}
321
322/*
323 * Read SUNI registers for the SUNI module.
324 *
325 * We assume, that we already hold the mutex.
326 */
327static int
328fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
329{
330	int err;
331	int i;
332	struct fatm_softc *sc;
333
334	if (reg >= FATM_NREGS)
335		return (EINVAL);
336	if (reg + *np > FATM_NREGS)
337		*np = FATM_NREGS - reg;
338	sc = ifatm->ifp->if_softc;
339	FATM_CHECKLOCK(sc);
340
341	err = fatm_utopia_readregs_internal(sc);
342	if (err != 0)
343		return (err);
344
345	for (i = 0; i < *np; i++)
346		valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
347
348	/* declare buffer to be free */
349	sc->flags &= ~FATM_REGS_INUSE;
350	cv_signal(&sc->cv_regs);
351
352	return (0);
353}
354
355/*
356 * Check whether the hard is beating. We remember the last heart beat and
357 * compare it to the current one. If it appears stuck for 10 times, we have
358 * a problem.
359 *
360 * Assume we hold the lock.
361 */
362static void
363fatm_check_heartbeat(struct fatm_softc *sc)
364{
365	uint32_t h;
366
367	FATM_CHECKLOCK(sc);
368
369	h = READ4(sc, FATMO_HEARTBEAT);
370	DBG(sc, BEAT, ("heartbeat %08x", h));
371
372	if (sc->stop_cnt == 10)
373		return;
374
375	if (h == sc->heartbeat) {
376		if (++sc->stop_cnt == 10) {
377			log(LOG_ERR, "i960 stopped???\n");
378			WRITE4(sc, FATMO_HIMR, 1);
379		}
380		return;
381	}
382
383	sc->stop_cnt = 0;
384	sc->heartbeat = h;
385}
386
387/*
388 * Ensure that the heart is still beating.
389 */
390static void
391fatm_watchdog(struct ifnet *ifp)
392{
393	struct fatm_softc *sc = ifp->if_softc;
394
395	FATM_LOCK(sc);
396	if (ifp->if_flags & IFF_RUNNING) {
397		fatm_check_heartbeat(sc);
398		ifp->if_timer = 5;
399	}
400	FATM_UNLOCK(sc);
401}
402
403/*
404 * Hard reset the i960 on the board. This is done by initializing registers,
405 * clearing interrupts and waiting for the selftest to finish. Not sure,
406 * whether all these barriers are actually needed.
407 *
408 * Assumes that we hold the lock.
409 */
410static int
411fatm_reset(struct fatm_softc *sc)
412{
413	int w;
414	uint32_t val;
415
416	FATM_CHECKLOCK(sc);
417
418	WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
419	BARRIER_W(sc);
420
421	WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
422	BARRIER_W(sc);
423
424	WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
425	BARRIER_W(sc);
426
427	WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
428	BARRIER_W(sc);
429
430	WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
431	BARRIER_W(sc);
432
433	DELAY(1000);
434
435	WRITE1(sc, FATMO_HCR, 0);
436	BARRIER_RW(sc);
437
438	DELAY(1000);
439
440	for (w = 100; w; w--) {
441		BARRIER_R(sc);
442		val = READ4(sc, FATMO_BOOT_STATUS);
443		switch (val) {
444		  case SELF_TEST_OK:
445			return (0);
446		  case SELF_TEST_FAIL:
447			return (EIO);
448		}
449		DELAY(1000);
450	}
451	return (EIO);
452}
453
454/*
455 * Stop the card. Must be called WITH the lock held
456 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
457 */
458static void
459fatm_stop(struct fatm_softc *sc)
460{
461	int i;
462	struct cmdqueue *q;
463	struct rbuf *rb;
464	struct txqueue *tx;
465	uint32_t stat;
466
467	FATM_CHECKLOCK(sc);
468
469	/* Stop the board */
470	utopia_stop(&sc->utopia);
471	(void)fatm_reset(sc);
472
473	/* stop watchdog */
474	sc->ifp->if_timer = 0;
475
476	if (sc->ifp->if_flags & IFF_RUNNING) {
477		sc->ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
478		ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
479		    sc->utopia.carrier == UTP_CARR_OK);
480
481		/*
482		 * Collect transmit mbufs, partial receive mbufs and
483		 * supplied mbufs
484		 */
485		for (i = 0; i < FATM_TX_QLEN; i++) {
486			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
487			if (tx->m) {
488				bus_dmamap_unload(sc->tx_tag, tx->map);
489				m_freem(tx->m);
490				tx->m = NULL;
491			}
492		}
493
494		/* Collect supplied mbufs */
495		while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
496			LIST_REMOVE(rb, link);
497			bus_dmamap_unload(sc->rbuf_tag, rb->map);
498			m_free(rb->m);
499			rb->m = NULL;
500			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
501		}
502
503		/* Unwait any waiters */
504		wakeup(&sc->sadi_mem);
505
506		/* wakeup all threads waiting for STAT or REG buffers */
507		cv_broadcast(&sc->cv_stat);
508		cv_broadcast(&sc->cv_regs);
509
510		sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
511
512		/* wakeup all threads waiting on commands */
513		for (i = 0; i < FATM_CMD_QLEN; i++) {
514			q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
515
516			H_SYNCSTAT_POSTREAD(sc, q->q.statp);
517			if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
518				H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
519				H_SYNCSTAT_PREWRITE(sc, q->q.statp);
520				wakeup(q);
521			}
522		}
523		utopia_reset_media(&sc->utopia);
524	}
525	sc->small_cnt = sc->large_cnt = 0;
526
527	/* Reset vcc info */
528	if (sc->vccs != NULL) {
529		sc->open_vccs = 0;
530		for (i = 0; i < FORE_MAX_VCC + 1; i++) {
531			if (sc->vccs[i] != NULL) {
532				if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
533				    FATM_VCC_TRY_OPEN)) == 0) {
534					uma_zfree(sc->vcc_zone, sc->vccs[i]);
535					sc->vccs[i] = NULL;
536				} else {
537					sc->vccs[i]->vflags = 0;
538					sc->open_vccs++;
539				}
540			}
541		}
542	}
543
544}
545
546/*
547 * Load the firmware into the board and save the entry point.
548 */
549static uint32_t
550firmware_load(struct fatm_softc *sc)
551{
552	struct firmware *fw = (struct firmware *)firmware;
553
554	DBG(sc, INIT, ("loading - entry=%x", fw->entry));
555	bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
556	    sizeof(firmware) / sizeof(firmware[0]));
557	BARRIER_RW(sc);
558
559	return (fw->entry);
560}
561
562/*
563 * Read a character from the virtual UART. The availability of a character
564 * is signaled by a non-null value of the 32 bit register. The eating of
565 * the character by us is signalled to the card by setting that register
566 * to zero.
567 */
568static int
569rx_getc(struct fatm_softc *sc)
570{
571	int w = 50;
572	int c;
573
574	while (w--) {
575		c = READ4(sc, FATMO_UART_TO_HOST);
576		BARRIER_RW(sc);
577		if (c != 0) {
578			WRITE4(sc, FATMO_UART_TO_HOST, 0);
579			DBGC(sc, UART, ("%c", c & 0xff));
580			return (c & 0xff);
581		}
582		DELAY(1000);
583	}
584	return (-1);
585}
586
587/*
588 * Eat up characters from the board and stuff them in the bit-bucket.
589 */
590static void
591rx_flush(struct fatm_softc *sc)
592{
593	int w = 10000;
594
595	while (w-- && rx_getc(sc) >= 0)
596		;
597}
598
599/*
600 * Write a character to the card. The UART is available if the register
601 * is zero.
602 */
603static int
604tx_putc(struct fatm_softc *sc, u_char c)
605{
606	int w = 10;
607	int c1;
608
609	while (w--) {
610		c1 = READ4(sc, FATMO_UART_TO_960);
611		BARRIER_RW(sc);
612		if (c1 == 0) {
613			WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
614			DBGC(sc, UART, ("%c", c & 0xff));
615			return (0);
616		}
617		DELAY(1000);
618	}
619	return (-1);
620}
621
622/*
623 * Start the firmware. This is doing by issuing a 'go' command with
624 * the hex entry address of the firmware. Then we wait for the self-test to
625 * succeed.
626 */
627static int
628fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
629{
630	static char hex[] = "0123456789abcdef";
631	u_int w, val;
632
633	DBG(sc, INIT, ("starting"));
634	rx_flush(sc);
635	tx_putc(sc, '\r');
636	DELAY(1000);
637
638	rx_flush(sc);
639
640	tx_putc(sc, 'g');
641	(void)rx_getc(sc);
642	tx_putc(sc, 'o');
643	(void)rx_getc(sc);
644	tx_putc(sc, ' ');
645	(void)rx_getc(sc);
646
647	tx_putc(sc, hex[(start >> 12) & 0xf]);
648	(void)rx_getc(sc);
649	tx_putc(sc, hex[(start >>  8) & 0xf]);
650	(void)rx_getc(sc);
651	tx_putc(sc, hex[(start >>  4) & 0xf]);
652	(void)rx_getc(sc);
653	tx_putc(sc, hex[(start >>  0) & 0xf]);
654	(void)rx_getc(sc);
655
656	tx_putc(sc, '\r');
657	rx_flush(sc);
658
659	for (w = 100; w; w--) {
660		BARRIER_R(sc);
661		val = READ4(sc, FATMO_BOOT_STATUS);
662		switch (val) {
663		  case CP_RUNNING:
664			return (0);
665		  case SELF_TEST_FAIL:
666			return (EIO);
667		}
668		DELAY(1000);
669	}
670	return (EIO);
671}
672
673/*
674 * Initialize one card and host queue.
675 */
676static void
677init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
678    size_t qel_size, size_t desc_size, cardoff_t off,
679    u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
680{
681	struct fqelem *el = queue->chunk;
682
683	while (qlen--) {
684		el->card = off;
685		off += 8;	/* size of card entry */
686
687		el->statp = (uint32_t *)(*statpp);
688		(*statpp) += sizeof(uint32_t);
689		H_SETSTAT(el->statp, FATM_STAT_FREE);
690		H_SYNCSTAT_PREWRITE(sc, el->statp);
691
692		WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
693		(*cardstat) += sizeof(uint32_t);
694
695		el->ioblk = descp;
696		descp += desc_size;
697		el->card_ioblk = carddesc;
698		carddesc += desc_size;
699
700		el = (struct fqelem *)((u_char *)el + qel_size);
701	}
702	queue->tail = queue->head = 0;
703}
704
705/*
706 * Issue the initialize operation to the card, wait for completion and
707 * initialize the on-board and host queue structures with offsets and
708 * addresses.
709 */
710static int
711fatm_init_cmd(struct fatm_softc *sc)
712{
713	int w, c;
714	u_char *statp;
715	uint32_t card_stat;
716	u_int cnt;
717	struct fqelem *el;
718	cardoff_t off;
719
720	DBG(sc, INIT, ("command"));
721	WRITE4(sc, FATMO_ISTAT, 0);
722	WRITE4(sc, FATMO_IMASK, 1);
723	WRITE4(sc, FATMO_HLOGGER, 0);
724
725	WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
726	WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
727	WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
728	WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
729	WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
730	WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
731	WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
732
733	/*
734	 * initialize buffer descriptors
735	 */
736	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
737	    SMALL_SUPPLY_QLEN);
738	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
739	    SMALL_BUFFER_LEN);
740	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
741	    SMALL_POOL_SIZE);
742	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
743	    SMALL_SUPPLY_BLKSIZE);
744
745	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
746	    LARGE_SUPPLY_QLEN);
747	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
748	    LARGE_BUFFER_LEN);
749	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
750	    LARGE_POOL_SIZE);
751	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
752	    LARGE_SUPPLY_BLKSIZE);
753
754	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
755	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
756	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
757	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
758
759	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
760	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
761	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
762	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
763
764	/*
765	 * Start the command
766	 */
767	BARRIER_W(sc);
768	WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
769	BARRIER_W(sc);
770	WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
771	BARRIER_W(sc);
772
773	/*
774	 * Busy wait for completion
775	 */
776	w = 100;
777	while (w--) {
778		c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
779		BARRIER_R(sc);
780		if (c & FATM_STAT_COMPLETE)
781			break;
782		DELAY(1000);
783	}
784
785	if (c & FATM_STAT_ERROR)
786		return (EIO);
787
788	/*
789	 * Initialize the queues
790	 */
791	statp = sc->stat_mem.mem;
792	card_stat = sc->stat_mem.paddr;
793
794	/*
795	 * Command queue. This is special in that it's on the card.
796	 */
797	el = sc->cmdqueue.chunk;
798	off = READ4(sc, FATMO_COMMAND_QUEUE);
799	DBG(sc, INIT, ("cmd queue=%x", off));
800	for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
801		el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
802
803		el->card = off;
804		off += 32;		/* size of card structure */
805
806		el->statp = (uint32_t *)statp;
807		statp += sizeof(uint32_t);
808		H_SETSTAT(el->statp, FATM_STAT_FREE);
809		H_SYNCSTAT_PREWRITE(sc, el->statp);
810
811		WRITE4(sc, el->card + FATMOC_STATP, card_stat);
812		card_stat += sizeof(uint32_t);
813	}
814	sc->cmdqueue.tail = sc->cmdqueue.head = 0;
815
816	/*
817	 * Now the other queues. These are in memory
818	 */
819	init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
820	    sizeof(struct txqueue), TPD_SIZE,
821	    READ4(sc, FATMO_TRANSMIT_QUEUE),
822	    &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
823
824	init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
825	    sizeof(struct rxqueue), RPD_SIZE,
826	    READ4(sc, FATMO_RECEIVE_QUEUE),
827	    &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
828
829	init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
830	    sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
831	    READ4(sc, FATMO_SMALL_B1_QUEUE),
832	    &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
833
834	init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
835	    sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
836	    READ4(sc, FATMO_LARGE_B1_QUEUE),
837	    &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
838
839	sc->txcnt = 0;
840
841	return (0);
842}
843
844/*
845 * Read PROM. Called only from attach code. Here we spin because the interrupt
846 * handler is not yet set up.
847 */
848static int
849fatm_getprom(struct fatm_softc *sc)
850{
851	int i;
852	struct prom *prom;
853	struct cmdqueue *q;
854
855	DBG(sc, INIT, ("reading prom"));
856	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
857	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
858
859	q->error = 0;
860	q->cb = NULL;;
861	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
862	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
863
864	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
865	    BUS_DMASYNC_PREREAD);
866
867	WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
868	BARRIER_W(sc);
869	WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
870	BARRIER_W(sc);
871
872	for (i = 0; i < 1000; i++) {
873		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
874		if (H_GETSTAT(q->q.statp) &
875		    (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
876			break;
877		DELAY(1000);
878	}
879	if (i == 1000) {
880		if_printf(sc->ifp, "getprom timeout\n");
881		return (EIO);
882	}
883	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
884	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
885		if_printf(sc->ifp, "getprom error\n");
886		return (EIO);
887	}
888	H_SETSTAT(q->q.statp, FATM_STAT_FREE);
889	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
890	NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
891
892	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
893	    BUS_DMASYNC_POSTREAD);
894
895
896#ifdef notdef
897	{
898		u_int i;
899
900		printf("PROM: ");
901		u_char *ptr = (u_char *)sc->prom_mem.mem;
902		for (i = 0; i < sizeof(struct prom); i++)
903			printf("%02x ", *ptr++);
904		printf("\n");
905	}
906#endif
907
908	prom = (struct prom *)sc->prom_mem.mem;
909
910	bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6);
911	IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial);
912	IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version);
913	IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
914
915	if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
916	    "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0],
917	    IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3],
918	    IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial,
919	    IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version);
920
921	return (0);
922}
923
924/*
925 * This is the callback function for bus_dmamap_load. We assume, that we
926 * have a 32-bit bus and so have always one segment.
927 */
928static void
929dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
930{
931	bus_addr_t *ptr = (bus_addr_t *)arg;
932
933	if (error != 0) {
934		printf("%s: error=%d\n", __func__, error);
935		return;
936	}
937	KASSERT(nsegs == 1, ("too many DMA segments"));
938	KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
939	    (u_long)segs[0].ds_addr));
940
941	*ptr = segs[0].ds_addr;
942}
943
944/*
945 * Allocate a chunk of DMA-able memory and map it.
946 */
947static int
948alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
949{
950	int error;
951
952	mem->mem = NULL;
953
954	if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
955	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
956	    NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
957	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
958		if_printf(sc->ifp, "could not allocate %s DMA tag\n",
959		    nm);
960		return (ENOMEM);
961	}
962
963	error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
964	if (error) {
965		if_printf(sc->ifp, "could not allocate %s DMA memory: "
966		    "%d\n", nm, error);
967		bus_dma_tag_destroy(mem->dmat);
968		mem->mem = NULL;
969		return (error);
970	}
971
972	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
973	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
974	if (error) {
975		if_printf(sc->ifp, "could not load %s DMA memory: "
976		    "%d\n", nm, error);
977		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
978		bus_dma_tag_destroy(mem->dmat);
979		mem->mem = NULL;
980		return (error);
981	}
982
983	DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
984	    (u_long)mem->paddr, mem->size, mem->align));
985
986	return (0);
987}
988
989#ifdef TEST_DMA_SYNC
990static int
991alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
992{
993	int error;
994
995	mem->mem = NULL;
996
997	if (bus_dma_tag_create(NULL, mem->align, 0,
998	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
999	    NULL, NULL, mem->size, 1, mem->size,
1000	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1001		if_printf(sc->ifp, "could not allocate %s DMA tag\n",
1002		    nm);
1003		return (ENOMEM);
1004	}
1005
1006	mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1007	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1008
1009	error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1010	if (error) {
1011		if_printf(sc->ifp, "could not allocate %s DMA map: "
1012		    "%d\n", nm, error);
1013		contigfree(mem->mem, mem->size, M_DEVBUF);
1014		bus_dma_tag_destroy(mem->dmat);
1015		mem->mem = NULL;
1016		return (error);
1017	}
1018
1019	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1020	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1021	if (error) {
1022		if_printf(sc->ifp, "could not load %s DMA memory: "
1023		    "%d\n", nm, error);
1024		bus_dmamap_destroy(mem->dmat, mem->map);
1025		contigfree(mem->mem, mem->size, M_DEVBUF);
1026		bus_dma_tag_destroy(mem->dmat);
1027		mem->mem = NULL;
1028		return (error);
1029	}
1030
1031	DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1032	    (u_long)mem->paddr, mem->size, mem->align));
1033
1034	printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1035	    (u_long)mem->paddr, mem->size, mem->align);
1036
1037	return (0);
1038}
1039#endif /* TEST_DMA_SYNC */
1040
1041/*
1042 * Destroy all resources of an dma-able memory chunk
1043 */
1044static void
1045destroy_dma_memory(struct fatm_mem *mem)
1046{
1047	if (mem->mem != NULL) {
1048		bus_dmamap_unload(mem->dmat, mem->map);
1049		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1050		bus_dma_tag_destroy(mem->dmat);
1051		mem->mem = NULL;
1052	}
1053}
1054#ifdef TEST_DMA_SYNC
1055static void
1056destroy_dma_memoryX(struct fatm_mem *mem)
1057{
1058	if (mem->mem != NULL) {
1059		bus_dmamap_unload(mem->dmat, mem->map);
1060		bus_dmamap_destroy(mem->dmat, mem->map);
1061		contigfree(mem->mem, mem->size, M_DEVBUF);
1062		bus_dma_tag_destroy(mem->dmat);
1063		mem->mem = NULL;
1064	}
1065}
1066#endif /* TEST_DMA_SYNC */
1067
1068/*
1069 * Try to supply buffers to the card if there are free entries in the queues
1070 */
1071static void
1072fatm_supply_small_buffers(struct fatm_softc *sc)
1073{
1074	int nblocks, nbufs;
1075	struct supqueue *q;
1076	struct rbd *bd;
1077	int i, j, error, cnt;
1078	struct mbuf *m;
1079	struct rbuf *rb;
1080	bus_addr_t phys;
1081
1082	nbufs = max(4 * sc->open_vccs, 32);
1083	nbufs = min(nbufs, SMALL_POOL_SIZE);
1084	nbufs -= sc->small_cnt;
1085
1086	nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1087	for (cnt = 0; cnt < nblocks; cnt++) {
1088		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1089
1090		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1091		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1092			break;
1093
1094		bd = (struct rbd *)q->q.ioblk;
1095
1096		for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1097			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1098				if_printf(sc->ifp, "out of rbufs\n");
1099				break;
1100			}
1101			MGETHDR(m, M_DONTWAIT, MT_DATA);
1102			if (m == NULL) {
1103				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1104				break;
1105			}
1106			MH_ALIGN(m, SMALL_BUFFER_LEN);
1107			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1108			    m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1109			    &phys, BUS_DMA_NOWAIT);
1110			if (error) {
1111				if_printf(sc->ifp,
1112				    "dmamap_load mbuf failed %d", error);
1113				m_freem(m);
1114				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1115				break;
1116			}
1117			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1118			    BUS_DMASYNC_PREREAD);
1119
1120			LIST_REMOVE(rb, link);
1121			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1122
1123			rb->m = m;
1124			bd[i].handle = rb - sc->rbufs;
1125			H_SETDESC(bd[i].buffer, phys);
1126		}
1127
1128		if (i < SMALL_SUPPLY_BLKSIZE) {
1129			for (j = 0; j < i; j++) {
1130				rb = sc->rbufs + bd[j].handle;
1131				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1132				m_free(rb->m);
1133				rb->m = NULL;
1134
1135				LIST_REMOVE(rb, link);
1136				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1137			}
1138			break;
1139		}
1140		H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1141		    sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1142
1143		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1144		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1145
1146		WRITE4(sc, q->q.card, q->q.card_ioblk);
1147		BARRIER_W(sc);
1148
1149		sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1150
1151		NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1152	}
1153}
1154
1155/*
1156 * Try to supply buffers to the card if there are free entries in the queues
1157 * We assume that all buffers are within the address space accessible by the
1158 * card (32-bit), so we don't need bounce buffers.
1159 */
1160static void
1161fatm_supply_large_buffers(struct fatm_softc *sc)
1162{
1163	int nbufs, nblocks, cnt;
1164	struct supqueue *q;
1165	struct rbd *bd;
1166	int i, j, error;
1167	struct mbuf *m;
1168	struct rbuf *rb;
1169	bus_addr_t phys;
1170
1171	nbufs = max(4 * sc->open_vccs, 32);
1172	nbufs = min(nbufs, LARGE_POOL_SIZE);
1173	nbufs -= sc->large_cnt;
1174
1175	nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1176
1177	for (cnt = 0; cnt < nblocks; cnt++) {
1178		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1179
1180		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1181		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1182			break;
1183
1184		bd = (struct rbd *)q->q.ioblk;
1185
1186		for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1187			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1188				if_printf(sc->ifp, "out of rbufs\n");
1189				break;
1190			}
1191			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1192			    M_PKTHDR)) == NULL) {
1193				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1194				break;
1195			}
1196			/* No MEXT_ALIGN */
1197			m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1198			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1199			    m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1200			    &phys, BUS_DMA_NOWAIT);
1201			if (error) {
1202				if_printf(sc->ifp,
1203				    "dmamap_load mbuf failed %d", error);
1204				m_freem(m);
1205				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1206				break;
1207			}
1208
1209			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1210			    BUS_DMASYNC_PREREAD);
1211
1212			LIST_REMOVE(rb, link);
1213			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1214
1215			rb->m = m;
1216			bd[i].handle = rb - sc->rbufs;
1217			H_SETDESC(bd[i].buffer, phys);
1218		}
1219
1220		if (i < LARGE_SUPPLY_BLKSIZE) {
1221			for (j = 0; j < i; j++) {
1222				rb = sc->rbufs + bd[j].handle;
1223				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1224				m_free(rb->m);
1225				rb->m = NULL;
1226
1227				LIST_REMOVE(rb, link);
1228				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1229			}
1230			break;
1231		}
1232		H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1233		    sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1234
1235		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1236		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1237		WRITE4(sc, q->q.card, q->q.card_ioblk);
1238		BARRIER_W(sc);
1239
1240		sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1241
1242		NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1243	}
1244}
1245
1246
1247/*
1248 * Actually start the card. The lock must be held here.
1249 * Reset, load the firmware, start it, initializes queues, read the PROM
1250 * and supply receive buffers to the card.
1251 */
1252static void
1253fatm_init_locked(struct fatm_softc *sc)
1254{
1255	struct rxqueue *q;
1256	int i, c, error;
1257	uint32_t start;
1258
1259	DBG(sc, INIT, ("initialize"));
1260	if (sc->ifp->if_flags & IFF_RUNNING)
1261		fatm_stop(sc);
1262
1263	/*
1264	 * Hard reset the board
1265	 */
1266	if (fatm_reset(sc))
1267		return;
1268
1269	start = firmware_load(sc);
1270	if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1271	    fatm_getprom(sc)) {
1272		fatm_reset(sc);
1273		return;
1274	}
1275
1276	/*
1277	 * Handle media
1278	 */
1279	c = READ4(sc, FATMO_MEDIA_TYPE);
1280	switch (c) {
1281
1282	  case FORE_MT_TAXI_100:
1283		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100;
1284		IFP2IFATM(sc->ifp)->mib.pcr = 227273;
1285		break;
1286
1287	  case FORE_MT_TAXI_140:
1288		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140;
1289		IFP2IFATM(sc->ifp)->mib.pcr = 318181;
1290		break;
1291
1292	  case FORE_MT_UTP_SONET:
1293		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
1294		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1295		break;
1296
1297	  case FORE_MT_MM_OC3_ST:
1298	  case FORE_MT_MM_OC3_SC:
1299		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
1300		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1301		break;
1302
1303	  case FORE_MT_SM_OC3_ST:
1304	  case FORE_MT_SM_OC3_SC:
1305		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
1306		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1307		break;
1308
1309	  default:
1310		log(LOG_ERR, "fatm: unknown media type %d\n", c);
1311		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1312		IFP2IFATM(sc->ifp)->mib.pcr = 353207;
1313		break;
1314	}
1315	sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
1316	utopia_init_media(&sc->utopia);
1317
1318	/*
1319	 * Initialize the RBDs
1320	 */
1321	for (i = 0; i < FATM_RX_QLEN; i++) {
1322		q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1323		WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1324	}
1325	BARRIER_W(sc);
1326
1327	/*
1328	 * Supply buffers to the card
1329	 */
1330	fatm_supply_small_buffers(sc);
1331	fatm_supply_large_buffers(sc);
1332
1333	/*
1334	 * Now set flags, that we are ready
1335	 */
1336	sc->ifp->if_flags |= IFF_RUNNING;
1337
1338	/*
1339	 * Start the watchdog timer
1340	 */
1341	sc->ifp->if_timer = 5;
1342
1343	/* start SUNI */
1344	utopia_start(&sc->utopia);
1345
1346	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
1347	    sc->utopia.carrier == UTP_CARR_OK);
1348
1349	/* start all channels */
1350	for (i = 0; i < FORE_MAX_VCC + 1; i++)
1351		if (sc->vccs[i] != NULL) {
1352			sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1353			error = fatm_load_vc(sc, sc->vccs[i]);
1354			if (error != 0) {
1355				if_printf(sc->ifp, "reopening %u "
1356				    "failed: %d\n", i, error);
1357				sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1358			}
1359		}
1360
1361	DBG(sc, INIT, ("done"));
1362}
1363
1364/*
1365 * This is the exported as initialisation function.
1366 */
1367static void
1368fatm_init(void *p)
1369{
1370	struct fatm_softc *sc = p;
1371
1372	FATM_LOCK(sc);
1373	fatm_init_locked(sc);
1374	FATM_UNLOCK(sc);
1375}
1376
1377/************************************************************/
1378/*
1379 * The INTERRUPT handling
1380 */
1381/*
1382 * Check the command queue. If a command was completed, call the completion
1383 * function for that command.
1384 */
1385static void
1386fatm_intr_drain_cmd(struct fatm_softc *sc)
1387{
1388	struct cmdqueue *q;
1389	int stat;
1390
1391	/*
1392	 * Drain command queue
1393	 */
1394	for (;;) {
1395		q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1396
1397		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1398		stat = H_GETSTAT(q->q.statp);
1399
1400		if (stat != FATM_STAT_COMPLETE &&
1401		   stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1402		   stat != FATM_STAT_ERROR)
1403			break;
1404
1405		(*q->cb)(sc, q);
1406
1407		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1408		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1409
1410		NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1411	}
1412}
1413
1414/*
1415 * Drain the small buffer supply queue.
1416 */
1417static void
1418fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1419{
1420	struct supqueue *q;
1421	int stat;
1422
1423	for (;;) {
1424		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1425
1426		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1427		stat = H_GETSTAT(q->q.statp);
1428
1429		if ((stat & FATM_STAT_COMPLETE) == 0)
1430			break;
1431		if (stat & FATM_STAT_ERROR)
1432			log(LOG_ERR, "%s: status %x\n", __func__, stat);
1433
1434		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1435		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1436
1437		NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1438	}
1439}
1440
1441/*
1442 * Drain the large buffer supply queue.
1443 */
1444static void
1445fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1446{
1447	struct supqueue *q;
1448	int stat;
1449
1450	for (;;) {
1451		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1452
1453		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1454		stat = H_GETSTAT(q->q.statp);
1455
1456		if ((stat & FATM_STAT_COMPLETE) == 0)
1457			break;
1458		if (stat & FATM_STAT_ERROR)
1459			log(LOG_ERR, "%s status %x\n", __func__, stat);
1460
1461		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1462		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1463
1464		NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1465	}
1466}
1467
1468/*
1469 * Check the receive queue. Send any received PDU up the protocol stack
1470 * (except when there was an error or the VCI appears to be closed. In this
1471 * case discard the PDU).
1472 */
1473static void
1474fatm_intr_drain_rx(struct fatm_softc *sc)
1475{
1476	struct rxqueue *q;
1477	int stat, mlen;
1478	u_int i;
1479	uint32_t h;
1480	struct mbuf *last, *m0;
1481	struct rpd *rpd;
1482	struct rbuf *rb;
1483	u_int vci, vpi, pt;
1484	struct atm_pseudohdr aph;
1485	struct ifnet *ifp;
1486	struct card_vcc *vc;
1487
1488	for (;;) {
1489		q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1490
1491		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1492		stat = H_GETSTAT(q->q.statp);
1493
1494		if ((stat & FATM_STAT_COMPLETE) == 0)
1495			break;
1496
1497		rpd = (struct rpd *)q->q.ioblk;
1498		H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1499
1500		rpd->nseg = le32toh(rpd->nseg);
1501		mlen = 0;
1502		m0 = last = 0;
1503		for (i = 0; i < rpd->nseg; i++) {
1504			rb = sc->rbufs + rpd->segment[i].handle;
1505			if (m0 == NULL) {
1506				m0 = last = rb->m;
1507			} else {
1508				last->m_next = rb->m;
1509				last = rb->m;
1510			}
1511			last->m_next = NULL;
1512			if (last->m_flags & M_EXT)
1513				sc->large_cnt--;
1514			else
1515				sc->small_cnt--;
1516			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1517			    BUS_DMASYNC_POSTREAD);
1518			bus_dmamap_unload(sc->rbuf_tag, rb->map);
1519			rb->m = NULL;
1520
1521			LIST_REMOVE(rb, link);
1522			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1523
1524			last->m_len = le32toh(rpd->segment[i].length);
1525			mlen += last->m_len;
1526		}
1527
1528		m0->m_pkthdr.len = mlen;
1529		m0->m_pkthdr.rcvif = sc->ifp;
1530
1531		h = le32toh(rpd->atm_header);
1532		vpi = (h >> 20) & 0xff;
1533		vci = (h >> 4 ) & 0xffff;
1534		pt  = (h >> 1 ) & 0x7;
1535
1536		/*
1537		 * Locate the VCC this packet belongs to
1538		 */
1539		if (!VC_OK(sc, vpi, vci))
1540			vc = NULL;
1541		else if ((vc = sc->vccs[vci]) == NULL ||
1542		    !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1543			sc->istats.rx_closed++;
1544			vc = NULL;
1545		}
1546
1547		DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1548		    pt, mlen, vc == NULL ? "dropped" : ""));
1549
1550		if (vc == NULL) {
1551			m_freem(m0);
1552		} else {
1553			ATM_PH_FLAGS(&aph) = vc->param.flags;
1554			ATM_PH_VPI(&aph) = vpi;
1555			ATM_PH_SETVCI(&aph, vci);
1556
1557			ifp = sc->ifp;
1558			ifp->if_ipackets++;
1559
1560			vc->ipackets++;
1561			vc->ibytes += m0->m_pkthdr.len;
1562
1563			atm_input(ifp, &aph, m0, vc->rxhand);
1564		}
1565
1566		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1567		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1568
1569		WRITE4(sc, q->q.card, q->q.card_ioblk);
1570		BARRIER_W(sc);
1571
1572		NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1573	}
1574}
1575
1576/*
1577 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1578 */
1579static void
1580fatm_intr_drain_tx(struct fatm_softc *sc)
1581{
1582	struct txqueue *q;
1583	int stat;
1584
1585	/*
1586	 * Drain tx queue
1587	 */
1588	for (;;) {
1589		q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1590
1591		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1592		stat = H_GETSTAT(q->q.statp);
1593
1594		if (stat != FATM_STAT_COMPLETE &&
1595		    stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1596		    stat != FATM_STAT_ERROR)
1597			break;
1598
1599		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1600		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1601
1602		bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1603		bus_dmamap_unload(sc->tx_tag, q->map);
1604
1605		m_freem(q->m);
1606		q->m = NULL;
1607		sc->txcnt--;
1608
1609		NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1610	}
1611}
1612
1613/*
1614 * Interrupt handler
1615 */
1616static void
1617fatm_intr(void *p)
1618{
1619	struct fatm_softc *sc = (struct fatm_softc *)p;
1620
1621	FATM_LOCK(sc);
1622	if (!READ4(sc, FATMO_PSR)) {
1623		FATM_UNLOCK(sc);
1624		return;
1625	}
1626	WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1627
1628	if (!(sc->ifp->if_flags & IFF_RUNNING)) {
1629		FATM_UNLOCK(sc);
1630		return;
1631	}
1632	fatm_intr_drain_cmd(sc);
1633	fatm_intr_drain_rx(sc);
1634	fatm_intr_drain_tx(sc);
1635	fatm_intr_drain_small_buffers(sc);
1636	fatm_intr_drain_large_buffers(sc);
1637	fatm_supply_small_buffers(sc);
1638	fatm_supply_large_buffers(sc);
1639
1640	FATM_UNLOCK(sc);
1641
1642	if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd))
1643		(*sc->ifp->if_start)(sc->ifp);
1644}
1645
1646/*
1647 * Get device statistics. This must be called with the softc locked.
1648 * We use a preallocated buffer, so we need to protect this buffer.
1649 * We do this by using a condition variable and a flag. If the flag is set
1650 * the buffer is in use by one thread (one thread is executing a GETSTAT
1651 * card command). In this case all other threads that are trying to get
1652 * statistics block on that condition variable. When the thread finishes
1653 * using the buffer it resets the flag and signals the condition variable. This
1654 * will wakeup the next thread that is waiting for the buffer. If the interface
1655 * is stopped the stopping function will broadcast the cv. All threads will
1656 * find that the interface has been stopped and return.
1657 *
1658 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1659 * must be done by the caller when he has finished using the buffer.
1660 */
1661static void
1662fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1663{
1664
1665	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1666	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1667		sc->istats.get_stat_errors++;
1668		q->error = EIO;
1669	}
1670	wakeup(&sc->sadi_mem);
1671}
1672static int
1673fatm_getstat(struct fatm_softc *sc)
1674{
1675	int error;
1676	struct cmdqueue *q;
1677
1678	/*
1679	 * Wait until either the interface is stopped or we can get the
1680	 * statistics buffer
1681	 */
1682	for (;;) {
1683		if (!(sc->ifp->if_flags & IFF_RUNNING))
1684			return (EIO);
1685		if (!(sc->flags & FATM_STAT_INUSE))
1686			break;
1687		cv_wait(&sc->cv_stat, &sc->mtx);
1688	}
1689	sc->flags |= FATM_STAT_INUSE;
1690
1691	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1692
1693	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1694	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1695		sc->istats.cmd_queue_full++;
1696		return (EIO);
1697	}
1698	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1699
1700	q->error = 0;
1701	q->cb = fatm_getstat_complete;
1702	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1703	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1704
1705	bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1706	    BUS_DMASYNC_PREREAD);
1707
1708	WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1709	    sc->sadi_mem.paddr);
1710	BARRIER_W(sc);
1711	WRITE4(sc, q->q.card + FATMOC_OP,
1712	    FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1713	BARRIER_W(sc);
1714
1715	/*
1716	 * Wait for the command to complete
1717	 */
1718	error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1719	    "fatm_stat", hz);
1720
1721	switch (error) {
1722
1723	  case EWOULDBLOCK:
1724		error = EIO;
1725		break;
1726
1727	  case ERESTART:
1728		error = EINTR;
1729		break;
1730
1731	  case 0:
1732		bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1733		    BUS_DMASYNC_POSTREAD);
1734		error = q->error;
1735		break;
1736	}
1737
1738	/*
1739	 * Swap statistics
1740	 */
1741	if (q->error == 0) {
1742		u_int i;
1743		uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1744
1745		for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1746		    i++, p++)
1747			*p = be32toh(*p);
1748	}
1749
1750	return (error);
1751}
1752
1753/*
1754 * Create a copy of a single mbuf. It can have either internal or
1755 * external data, it may have a packet header. External data is really
1756 * copied, so the new buffer is writeable.
1757 */
1758static struct mbuf *
1759copy_mbuf(struct mbuf *m)
1760{
1761	struct mbuf *new;
1762
1763	MGET(new, M_DONTWAIT, MT_DATA);
1764	if (new == NULL)
1765		return (NULL);
1766
1767	if (m->m_flags & M_PKTHDR) {
1768		M_MOVE_PKTHDR(new, m);
1769		if (m->m_len > MHLEN) {
1770			MCLGET(new, M_TRYWAIT);
1771			if ((m->m_flags & M_EXT) == 0) {
1772				m_free(new);
1773				return (NULL);
1774			}
1775		}
1776	} else {
1777		if (m->m_len > MLEN) {
1778			MCLGET(new, M_TRYWAIT);
1779			if ((m->m_flags & M_EXT) == 0) {
1780				m_free(new);
1781				return (NULL);
1782			}
1783		}
1784	}
1785
1786	bcopy(m->m_data, new->m_data, m->m_len);
1787	new->m_len = m->m_len;
1788	new->m_flags &= ~M_RDONLY;
1789
1790	return (new);
1791}
1792
1793/*
1794 * All segments must have a four byte aligned buffer address and a four
1795 * byte aligned length. Step through an mbuf chain and check these conditions.
1796 * If the buffer address is not aligned and this is a normal mbuf, move
1797 * the data down. Else make a copy of the mbuf with aligned data.
1798 * If the buffer length is not aligned steel data from the next mbuf.
1799 * We don't need to check whether this has more than one external reference,
1800 * because steeling data doesn't change the external cluster.
1801 * If the last mbuf is not aligned, fill with zeroes.
1802 *
1803 * Return packet length (well we should have this in the packet header),
1804 * but be careful not to count the zero fill at the end.
1805 *
1806 * If fixing fails free the chain and zero the pointer.
1807 *
1808 * We assume, that aligning the virtual address also aligns the mapped bus
1809 * address.
1810 */
1811static u_int
1812fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1813{
1814	struct mbuf *m = *mp, *prev = NULL, *next, *new;
1815	u_int mlen = 0, fill = 0;
1816	int first, off;
1817	u_char *d, *cp;
1818
1819	do {
1820		next = m->m_next;
1821
1822		if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1823		   (m->m_len % 4 != 0 && next)) {
1824			/*
1825			 * Needs fixing
1826			 */
1827			first = (m == *mp);
1828
1829			d = mtod(m, u_char *);
1830			if ((off = (uintptr_t)(void *)d % 4) != 0) {
1831				if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1832					sc->istats.fix_addr_copy++;
1833					bcopy(d, d - off, m->m_len);
1834					m->m_data = (caddr_t)(d - off);
1835				} else {
1836					if ((new = copy_mbuf(m)) == NULL) {
1837						sc->istats.fix_addr_noext++;
1838						goto fail;
1839					}
1840					sc->istats.fix_addr_ext++;
1841					if (prev)
1842						prev->m_next = new;
1843					new->m_next = next;
1844					m_free(m);
1845					m = new;
1846				}
1847			}
1848
1849			if ((off = m->m_len % 4) != 0) {
1850				if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1851					if ((new = copy_mbuf(m)) == NULL) {
1852						sc->istats.fix_len_noext++;
1853						goto fail;
1854					}
1855					sc->istats.fix_len_copy++;
1856					if (prev)
1857						prev->m_next = new;
1858					new->m_next = next;
1859					m_free(m);
1860					m = new;
1861				} else
1862					sc->istats.fix_len++;
1863				d = mtod(m, u_char *) + m->m_len;
1864				off = 4 - off;
1865				while (off) {
1866					if (next == NULL) {
1867						*d++ = 0;
1868						fill++;
1869					} else if (next->m_len == 0) {
1870						sc->istats.fix_empty++;
1871						next = m_free(next);
1872						continue;
1873					} else {
1874						cp = mtod(next, u_char *);
1875						*d++ = *cp++;
1876						next->m_len--;
1877						next->m_data = (caddr_t)cp;
1878					}
1879					off--;
1880					m->m_len++;
1881				}
1882			}
1883
1884			if (first)
1885				*mp = m;
1886		}
1887
1888		mlen += m->m_len;
1889		prev = m;
1890	} while ((m = next) != NULL);
1891
1892	return (mlen - fill);
1893
1894  fail:
1895	m_freem(*mp);
1896	*mp = NULL;
1897	return (0);
1898}
1899
1900/*
1901 * The helper function is used to load the computed physical addresses
1902 * into the transmit descriptor.
1903 */
1904static void
1905fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1906    bus_size_t mapsize, int error)
1907{
1908	struct tpd *tpd = varg;
1909
1910	if (error)
1911		return;
1912
1913	KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1914
1915	tpd->spec = 0;
1916	while (nsegs--) {
1917		H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1918		H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1919		tpd->spec++;
1920		segs++;
1921	}
1922}
1923
1924/*
1925 * Start output.
1926 *
1927 * Note, that we update the internal statistics without the lock here.
1928 */
1929static int
1930fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1931{
1932	struct txqueue *q;
1933	u_int nblks;
1934	int error, aal, nsegs;
1935	struct tpd *tpd;
1936
1937	/*
1938	 * Get a queue element.
1939	 * If there isn't one - try to drain the transmit queue
1940	 * We used to sleep here if that doesn't help, but we
1941	 * should not sleep here, because we are called with locks.
1942	 */
1943	q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1944
1945	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1946	if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1947		fatm_intr_drain_tx(sc);
1948		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1949		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1950			if (sc->retry_tx) {
1951				sc->istats.tx_retry++;
1952				IF_PREPEND(&sc->ifp->if_snd, m);
1953				return (1);
1954			}
1955			sc->istats.tx_queue_full++;
1956			m_freem(m);
1957			return (0);
1958		}
1959		sc->istats.tx_queue_almost_full++;
1960	}
1961
1962	tpd = q->q.ioblk;
1963
1964	m->m_data += sizeof(struct atm_pseudohdr);
1965	m->m_len -= sizeof(struct atm_pseudohdr);
1966
1967	/* map the mbuf */
1968	error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1969	    fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1970	if(error) {
1971		sc->ifp->if_oerrors++;
1972		if_printf(sc->ifp, "mbuf loaded error=%d\n", error);
1973		m_freem(m);
1974		return (0);
1975	}
1976	nsegs = tpd->spec;
1977
1978	bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1979
1980	/*
1981	 * OK. Now go and do it.
1982	 */
1983	aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1984
1985	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1986	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1987	q->m = m;
1988
1989	/*
1990	 * If the transmit queue is almost full, schedule a
1991	 * transmit interrupt so that transmit descriptors can
1992	 * be recycled.
1993	 */
1994	H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1995	    (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1996	H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
1997	    vc->param.vci, 0, 0));
1998
1999	if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
2000		H_SETDESC(tpd->stream, 0);
2001	else {
2002		u_int i;
2003
2004		for (i = 0; i < RATE_TABLE_SIZE; i++)
2005			if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2006				break;
2007		if (i > 0)
2008			i--;
2009		H_SETDESC(tpd->stream, rate_table[i].ratio);
2010	}
2011	H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2012
2013	nblks = TDX_SEGS2BLKS(nsegs);
2014
2015	DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2016	    mlen, le32toh(tpd->spec), nsegs, nblks));
2017
2018	WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2019	BARRIER_W(sc);
2020
2021	sc->txcnt++;
2022	sc->ifp->if_opackets++;
2023	vc->obytes += m->m_pkthdr.len;
2024	vc->opackets++;
2025
2026	NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2027
2028	return (0);
2029}
2030
2031static void
2032fatm_start(struct ifnet *ifp)
2033{
2034	struct atm_pseudohdr aph;
2035	struct fatm_softc *sc;
2036	struct mbuf *m;
2037	u_int mlen, vpi, vci;
2038	struct card_vcc *vc;
2039
2040	sc = (struct fatm_softc *)ifp->if_softc;
2041
2042	while (1) {
2043		IF_DEQUEUE(&ifp->if_snd, m);
2044		if (m == NULL)
2045			break;
2046
2047		/*
2048		 * Loop through the mbuf chain and compute the total length
2049		 * of the packet. Check that all data pointer are
2050		 * 4 byte aligned. If they are not, call fatm_mfix to
2051		 * fix that problem. This comes more or less from the
2052		 * en driver.
2053		 */
2054		mlen = fatm_fix_chain(sc, &m);
2055		if (m == NULL)
2056			continue;
2057
2058		if (m->m_len < sizeof(struct atm_pseudohdr) &&
2059		    (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2060			continue;
2061
2062		aph = *mtod(m, struct atm_pseudohdr *);
2063		mlen -= sizeof(struct atm_pseudohdr);
2064
2065		if (mlen == 0) {
2066			m_freem(m);
2067			continue;
2068		}
2069		if (mlen > FATM_MAXPDU) {
2070			sc->istats.tx_pdu2big++;
2071			m_freem(m);
2072			continue;
2073		}
2074
2075		vci = ATM_PH_VCI(&aph);
2076		vpi = ATM_PH_VPI(&aph);
2077
2078		/*
2079		 * From here on we need the softc
2080		 */
2081		FATM_LOCK(sc);
2082		if (!(ifp->if_flags & IFF_RUNNING)) {
2083			FATM_UNLOCK(sc);
2084			m_freem(m);
2085			break;
2086		}
2087		if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2088		    !(vc->vflags & FATM_VCC_OPEN)) {
2089			FATM_UNLOCK(sc);
2090			m_freem(m);
2091			continue;
2092		}
2093		if (fatm_tx(sc, m, vc, mlen)) {
2094			FATM_UNLOCK(sc);
2095			break;
2096		}
2097		FATM_UNLOCK(sc);
2098	}
2099}
2100
2101/*
2102 * VCC managment
2103 *
2104 * This may seem complicated. The reason for this is, that we need an
2105 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2106 * is called with the radix node head of the routing table locked. Therefor
2107 * we cannot sleep there and wait for the open/close to succeed. For this
2108 * reason we just initiate the operation from the ioctl.
2109 */
2110
2111/*
2112 * Command the card to open/close a VC.
2113 * Return the queue entry for waiting if we are succesful.
2114 */
2115static struct cmdqueue *
2116fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2117    u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2118{
2119	struct cmdqueue *q;
2120
2121	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2122
2123	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2124	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2125		sc->istats.cmd_queue_full++;
2126		return (NULL);
2127	}
2128	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2129
2130	q->error = 0;
2131	q->cb = func;
2132	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2133	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2134
2135	WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2136	BARRIER_W(sc);
2137	WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2138	BARRIER_W(sc);
2139	WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2140	BARRIER_W(sc);
2141
2142	return (q);
2143}
2144
2145/*
2146 * The VC has been opened/closed and somebody has been waiting for this.
2147 * Wake him up.
2148 */
2149static void
2150fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2151{
2152
2153	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2154	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2155		sc->istats.get_stat_errors++;
2156		q->error = EIO;
2157	}
2158	wakeup(q);
2159}
2160
2161/*
2162 * Open complete
2163 */
2164static void
2165fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2166{
2167	vc->vflags &= ~FATM_VCC_TRY_OPEN;
2168	vc->vflags |= FATM_VCC_OPEN;
2169
2170	if (vc->vflags & FATM_VCC_REOPEN) {
2171		vc->vflags &= ~FATM_VCC_REOPEN;
2172		return;
2173	}
2174
2175	/* inform management if this is not an NG
2176	 * VCC or it's an NG PVC. */
2177	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2178	    (vc->param.flags & ATMIO_FLAG_PVC))
2179		ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1);
2180}
2181
2182/*
2183 * The VC that we have tried to open asynchronuosly has been opened.
2184 */
2185static void
2186fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2187{
2188	u_int vci;
2189	struct card_vcc *vc;
2190
2191	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2192	vc = sc->vccs[vci];
2193	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2194	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2195		sc->istats.get_stat_errors++;
2196		sc->vccs[vci] = NULL;
2197		uma_zfree(sc->vcc_zone, vc);
2198		if_printf(sc->ifp, "opening VCI %u failed\n", vci);
2199		return;
2200	}
2201	fatm_open_finish(sc, vc);
2202}
2203
2204/*
2205 * Wait on the queue entry until the VCC is opened/closed.
2206 */
2207static int
2208fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2209{
2210	int error;
2211
2212	/*
2213	 * Wait for the command to complete
2214	 */
2215	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2216
2217	if (error != 0)
2218		return (error);
2219	return (q->error);
2220}
2221
2222/*
2223 * Start to open a VCC. This just initiates the operation.
2224 */
2225static int
2226fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2227{
2228	int error;
2229	struct card_vcc *vc;
2230
2231	/*
2232	 * Check parameters
2233	 */
2234	if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2235	    (op->param.flags & ATMIO_FLAG_NORX))
2236		return (EINVAL);
2237
2238	if (!VC_OK(sc, op->param.vpi, op->param.vci))
2239		return (EINVAL);
2240	if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2241		return (EINVAL);
2242
2243	vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2244	if (vc == NULL)
2245		return (ENOMEM);
2246
2247	error = 0;
2248
2249	FATM_LOCK(sc);
2250	if (!(sc->ifp->if_flags & IFF_RUNNING)) {
2251		error = EIO;
2252		goto done;
2253	}
2254	if (sc->vccs[op->param.vci] != NULL) {
2255		error = EBUSY;
2256		goto done;
2257	}
2258	vc->param = op->param;
2259	vc->rxhand = op->rxhand;
2260
2261	switch (op->param.traffic) {
2262
2263	  case ATMIO_TRAFFIC_UBR:
2264		break;
2265
2266	  case ATMIO_TRAFFIC_CBR:
2267		if (op->param.tparam.pcr == 0 ||
2268		    op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) {
2269			error = EINVAL;
2270			goto done;
2271		}
2272		break;
2273
2274	  default:
2275		error = EINVAL;
2276		goto done;
2277	}
2278	vc->ibytes = vc->obytes = 0;
2279	vc->ipackets = vc->opackets = 0;
2280
2281	vc->vflags = FATM_VCC_TRY_OPEN;
2282	sc->vccs[op->param.vci] = vc;
2283	sc->open_vccs++;
2284
2285	error = fatm_load_vc(sc, vc);
2286	if (error != 0) {
2287		sc->vccs[op->param.vci] = NULL;
2288		sc->open_vccs--;
2289		goto done;
2290	}
2291
2292	/* don't free below */
2293	vc = NULL;
2294
2295  done:
2296	FATM_UNLOCK(sc);
2297	if (vc != NULL)
2298		uma_zfree(sc->vcc_zone, vc);
2299	return (error);
2300}
2301
2302/*
2303 * Try to initialize the given VC
2304 */
2305static int
2306fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2307{
2308	uint32_t cmd;
2309	struct cmdqueue *q;
2310	int error;
2311
2312	/* Command and buffer strategy */
2313	cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2314	if (vc->param.aal == ATMIO_AAL_0)
2315		cmd |= (0 << 8);
2316	else
2317		cmd |= (5 << 8);
2318
2319	q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2320	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2321	    fatm_open_complete : fatm_cmd_complete);
2322	if (q == NULL)
2323		return (EIO);
2324
2325	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2326		error = fatm_waitvcc(sc, q);
2327		if (error != 0)
2328			return (error);
2329		fatm_open_finish(sc, vc);
2330	}
2331	return (0);
2332}
2333
2334/*
2335 * Finish close
2336 */
2337static void
2338fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2339{
2340	/* inform management of this is not an NG
2341	 * VCC or it's an NG PVC. */
2342	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2343	    (vc->param.flags & ATMIO_FLAG_PVC))
2344		ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0);
2345
2346	sc->vccs[vc->param.vci] = NULL;
2347	sc->open_vccs--;
2348
2349	uma_zfree(sc->vcc_zone, vc);
2350}
2351
2352/*
2353 * The VC has been closed.
2354 */
2355static void
2356fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2357{
2358	u_int vci;
2359	struct card_vcc *vc;
2360
2361	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2362	vc = sc->vccs[vci];
2363	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2364	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2365		sc->istats.get_stat_errors++;
2366		/* keep the VCC in that state */
2367		if_printf(sc->ifp, "closing VCI %u failed\n", vci);
2368		return;
2369	}
2370
2371	fatm_close_finish(sc, vc);
2372}
2373
2374/*
2375 * Initiate closing a VCC
2376 */
2377static int
2378fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2379{
2380	int error;
2381	struct cmdqueue *q;
2382	struct card_vcc *vc;
2383
2384	if (!VC_OK(sc, cl->vpi, cl->vci))
2385		return (EINVAL);
2386
2387	error = 0;
2388
2389	FATM_LOCK(sc);
2390	if (!(sc->ifp->if_flags & IFF_RUNNING)) {
2391		error = EIO;
2392		goto done;
2393	}
2394	vc = sc->vccs[cl->vci];
2395	if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2396		error = ENOENT;
2397		goto done;
2398	}
2399
2400	q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2401	    FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2402	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2403	    fatm_close_complete : fatm_cmd_complete);
2404	if (q == NULL) {
2405		error = EIO;
2406		goto done;
2407	}
2408
2409	vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2410	vc->vflags |= FATM_VCC_TRY_CLOSE;
2411
2412	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2413		error = fatm_waitvcc(sc, q);
2414		if (error != 0)
2415			goto done;
2416
2417		fatm_close_finish(sc, vc);
2418	}
2419
2420  done:
2421	FATM_UNLOCK(sc);
2422	return (error);
2423}
2424
2425/*
2426 * IOCTL handler
2427 */
2428static int
2429fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2430{
2431	int error;
2432	struct fatm_softc *sc = ifp->if_softc;
2433	struct ifaddr *ifa = (struct ifaddr *)arg;
2434	struct ifreq *ifr = (struct ifreq *)arg;
2435	struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2436	struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2437	struct atmio_vcctable *vtab;
2438
2439	error = 0;
2440	switch (cmd) {
2441
2442	  case SIOCATMOPENVCC:		/* kernel internal use */
2443		error = fatm_open_vcc(sc, op);
2444		break;
2445
2446	  case SIOCATMCLOSEVCC:		/* kernel internal use */
2447		error = fatm_close_vcc(sc, cl);
2448		break;
2449
2450	  case SIOCSIFADDR:
2451		FATM_LOCK(sc);
2452		ifp->if_flags |= IFF_UP;
2453		if (!(ifp->if_flags & IFF_RUNNING))
2454			fatm_init_locked(sc);
2455		switch (ifa->ifa_addr->sa_family) {
2456#ifdef INET
2457		  case AF_INET:
2458		  case AF_INET6:
2459			ifa->ifa_rtrequest = atm_rtrequest;
2460			break;
2461#endif
2462		  default:
2463			break;
2464		}
2465		FATM_UNLOCK(sc);
2466		break;
2467
2468	  case SIOCSIFFLAGS:
2469		FATM_LOCK(sc);
2470		if (ifp->if_flags & IFF_UP) {
2471			if (!(ifp->if_flags & IFF_RUNNING)) {
2472				fatm_init_locked(sc);
2473			}
2474		} else {
2475			if (ifp->if_flags & IFF_RUNNING) {
2476				fatm_stop(sc);
2477			}
2478		}
2479		FATM_UNLOCK(sc);
2480		break;
2481
2482	  case SIOCGIFMEDIA:
2483	  case SIOCSIFMEDIA:
2484		if (ifp->if_flags & IFF_RUNNING)
2485			error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2486		else
2487			error = EINVAL;
2488		break;
2489
2490	  case SIOCATMGVCCS:
2491		/* return vcc table */
2492		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2493		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2494		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2495		    vtab->count * sizeof(vtab->vccs[0]));
2496		free(vtab, M_DEVBUF);
2497		break;
2498
2499	  case SIOCATMGETVCCS:	/* internal netgraph use */
2500		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2501		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2502		if (vtab == NULL) {
2503			error = ENOMEM;
2504			break;
2505		}
2506		*(void **)arg = vtab;
2507		break;
2508
2509	  default:
2510		DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2511		error = EINVAL;
2512		break;
2513	}
2514
2515	return (error);
2516}
2517
2518/*
2519 * Detach from the interface and free all resources allocated during
2520 * initialisation and later.
2521 */
2522static int
2523fatm_detach(device_t dev)
2524{
2525	u_int i;
2526	struct rbuf *rb;
2527	struct fatm_softc *sc;
2528	struct txqueue *tx;
2529
2530	sc = (struct fatm_softc *)device_get_softc(dev);
2531
2532	if (device_is_alive(dev)) {
2533		FATM_LOCK(sc);
2534		fatm_stop(sc);
2535		utopia_detach(&sc->utopia);
2536		FATM_UNLOCK(sc);
2537		atm_ifdetach(sc->ifp);		/* XXX race */
2538	}
2539
2540	if (sc->ih != NULL)
2541		bus_teardown_intr(dev, sc->irqres, sc->ih);
2542
2543	while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2544		if_printf(sc->ifp, "rbuf %p still in use!\n", rb);
2545		bus_dmamap_unload(sc->rbuf_tag, rb->map);
2546		m_freem(rb->m);
2547		LIST_REMOVE(rb, link);
2548		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2549	}
2550
2551	if (sc->txqueue.chunk != NULL) {
2552		for (i = 0; i < FATM_TX_QLEN; i++) {
2553			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2554			bus_dmamap_destroy(sc->tx_tag, tx->map);
2555		}
2556	}
2557
2558	while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2559		bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2560		LIST_REMOVE(rb, link);
2561	}
2562
2563	if (sc->rbufs != NULL)
2564		free(sc->rbufs, M_DEVBUF);
2565	if (sc->vccs != NULL) {
2566		for (i = 0; i < FORE_MAX_VCC + 1; i++)
2567			if (sc->vccs[i] != NULL) {
2568				uma_zfree(sc->vcc_zone, sc->vccs[i]);
2569				sc->vccs[i] = NULL;
2570			}
2571		free(sc->vccs, M_DEVBUF);
2572	}
2573	if (sc->vcc_zone != NULL)
2574		uma_zdestroy(sc->vcc_zone);
2575
2576	if (sc->l1queue.chunk != NULL)
2577		free(sc->l1queue.chunk, M_DEVBUF);
2578	if (sc->s1queue.chunk != NULL)
2579		free(sc->s1queue.chunk, M_DEVBUF);
2580	if (sc->rxqueue.chunk != NULL)
2581		free(sc->rxqueue.chunk, M_DEVBUF);
2582	if (sc->txqueue.chunk != NULL)
2583		free(sc->txqueue.chunk, M_DEVBUF);
2584	if (sc->cmdqueue.chunk != NULL)
2585		free(sc->cmdqueue.chunk, M_DEVBUF);
2586
2587	destroy_dma_memory(&sc->reg_mem);
2588	destroy_dma_memory(&sc->sadi_mem);
2589	destroy_dma_memory(&sc->prom_mem);
2590#ifdef TEST_DMA_SYNC
2591	destroy_dma_memoryX(&sc->s1q_mem);
2592	destroy_dma_memoryX(&sc->l1q_mem);
2593	destroy_dma_memoryX(&sc->rxq_mem);
2594	destroy_dma_memoryX(&sc->txq_mem);
2595	destroy_dma_memoryX(&sc->stat_mem);
2596#endif
2597
2598	if (sc->tx_tag != NULL)
2599		if (bus_dma_tag_destroy(sc->tx_tag))
2600			printf("tx DMA tag busy!\n");
2601
2602	if (sc->rbuf_tag != NULL)
2603		if (bus_dma_tag_destroy(sc->rbuf_tag))
2604			printf("rbuf DMA tag busy!\n");
2605
2606	if (sc->parent_dmat != NULL)
2607		if (bus_dma_tag_destroy(sc->parent_dmat))
2608			printf("parent DMA tag busy!\n");
2609
2610	if (sc->irqres != NULL)
2611		bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2612
2613	if (sc->memres != NULL)
2614		bus_release_resource(dev, SYS_RES_MEMORY,
2615		    sc->memid, sc->memres);
2616
2617	(void)sysctl_ctx_free(&sc->sysctl_ctx);
2618
2619	cv_destroy(&sc->cv_stat);
2620	cv_destroy(&sc->cv_regs);
2621
2622	mtx_destroy(&sc->mtx);
2623
2624	return (0);
2625}
2626
2627/*
2628 * Sysctl handler
2629 */
2630static int
2631fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2632{
2633	struct fatm_softc *sc = arg1;
2634	u_long *ret;
2635	int error;
2636
2637	ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2638
2639	FATM_LOCK(sc);
2640	bcopy(&sc->istats, ret, sizeof(sc->istats));
2641	FATM_UNLOCK(sc);
2642
2643	error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2644	free(ret, M_TEMP);
2645
2646	return (error);
2647}
2648
2649/*
2650 * Sysctl handler for card statistics
2651 * This is disable because it destroys the PHY statistics.
2652 */
2653static int
2654fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2655{
2656	struct fatm_softc *sc = arg1;
2657	int error;
2658	const struct fatm_stats *s;
2659	u_long *ret;
2660	u_int i;
2661
2662	ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2663
2664	FATM_LOCK(sc);
2665
2666	if ((error = fatm_getstat(sc)) == 0) {
2667		s = sc->sadi_mem.mem;
2668		i = 0;
2669		ret[i++] = s->phy_4b5b.crc_header_errors;
2670		ret[i++] = s->phy_4b5b.framing_errors;
2671		ret[i++] = s->phy_oc3.section_bip8_errors;
2672		ret[i++] = s->phy_oc3.path_bip8_errors;
2673		ret[i++] = s->phy_oc3.line_bip24_errors;
2674		ret[i++] = s->phy_oc3.line_febe_errors;
2675		ret[i++] = s->phy_oc3.path_febe_errors;
2676		ret[i++] = s->phy_oc3.corr_hcs_errors;
2677		ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2678		ret[i++] = s->atm.cells_transmitted;
2679		ret[i++] = s->atm.cells_received;
2680		ret[i++] = s->atm.vpi_bad_range;
2681		ret[i++] = s->atm.vpi_no_conn;
2682		ret[i++] = s->atm.vci_bad_range;
2683		ret[i++] = s->atm.vci_no_conn;
2684		ret[i++] = s->aal0.cells_transmitted;
2685		ret[i++] = s->aal0.cells_received;
2686		ret[i++] = s->aal0.cells_dropped;
2687		ret[i++] = s->aal4.cells_transmitted;
2688		ret[i++] = s->aal4.cells_received;
2689		ret[i++] = s->aal4.cells_crc_errors;
2690		ret[i++] = s->aal4.cels_protocol_errors;
2691		ret[i++] = s->aal4.cells_dropped;
2692		ret[i++] = s->aal4.cspdus_transmitted;
2693		ret[i++] = s->aal4.cspdus_received;
2694		ret[i++] = s->aal4.cspdus_protocol_errors;
2695		ret[i++] = s->aal4.cspdus_dropped;
2696		ret[i++] = s->aal5.cells_transmitted;
2697		ret[i++] = s->aal5.cells_received;
2698		ret[i++] = s->aal5.congestion_experienced;
2699		ret[i++] = s->aal5.cells_dropped;
2700		ret[i++] = s->aal5.cspdus_transmitted;
2701		ret[i++] = s->aal5.cspdus_received;
2702		ret[i++] = s->aal5.cspdus_crc_errors;
2703		ret[i++] = s->aal5.cspdus_protocol_errors;
2704		ret[i++] = s->aal5.cspdus_dropped;
2705		ret[i++] = s->aux.small_b1_failed;
2706		ret[i++] = s->aux.large_b1_failed;
2707		ret[i++] = s->aux.small_b2_failed;
2708		ret[i++] = s->aux.large_b2_failed;
2709		ret[i++] = s->aux.rpd_alloc_failed;
2710		ret[i++] = s->aux.receive_carrier;
2711	}
2712	/* declare the buffer free */
2713	sc->flags &= ~FATM_STAT_INUSE;
2714	cv_signal(&sc->cv_stat);
2715
2716	FATM_UNLOCK(sc);
2717
2718	if (error == 0)
2719		error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2720	free(ret, M_TEMP);
2721
2722	return (error);
2723}
2724
2725#define MAXDMASEGS 32		/* maximum number of receive descriptors */
2726
2727/*
2728 * Attach to the device.
2729 *
2730 * We assume, that there is a global lock (Giant in this case) that protects
2731 * multiple threads from entering this function. This makes sense, doesn't it?
2732 */
2733static int
2734fatm_attach(device_t dev)
2735{
2736	struct ifnet *ifp;
2737	struct fatm_softc *sc;
2738	int unit;
2739	uint16_t cfg;
2740	int error = 0;
2741	struct rbuf *rb;
2742	u_int i;
2743	struct txqueue *tx;
2744
2745	sc = device_get_softc(dev);
2746	unit = device_get_unit(dev);
2747
2748	ifp = sc->ifp = if_alloc(IFT_ATM);
2749	if (ifp == NULL) {
2750		error = ENOSPC;
2751		goto fail;
2752	}
2753
2754	IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E;
2755	IFP2IFATM(sc->ifp)->mib.serial = 0;
2756	IFP2IFATM(sc->ifp)->mib.hw_version = 0;
2757	IFP2IFATM(sc->ifp)->mib.sw_version = 0;
2758	IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2759	IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS;
2760	IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2761	IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC;
2762	IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
2763	IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2764
2765	LIST_INIT(&sc->rbuf_free);
2766	LIST_INIT(&sc->rbuf_used);
2767
2768	/*
2769	 * Initialize mutex and condition variables.
2770	 */
2771	mtx_init(&sc->mtx, device_get_nameunit(dev),
2772	    MTX_NETWORK_LOCK, MTX_DEF);
2773
2774	cv_init(&sc->cv_stat, "fatm_stat");
2775	cv_init(&sc->cv_regs, "fatm_regs");
2776
2777	sysctl_ctx_init(&sc->sysctl_ctx);
2778
2779	/*
2780	 * Make the sysctl tree
2781	 */
2782	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2783	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2784	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2785		goto fail;
2786
2787	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2788	    OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2789	    "LU", "internal statistics") == NULL)
2790		goto fail;
2791
2792	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2793	    OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2794	    "LU", "card statistics") == NULL)
2795		goto fail;
2796
2797	if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2798	    OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2799	    "retry flag") == NULL)
2800		goto fail;
2801
2802#ifdef FATM_DEBUG
2803	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2804	    OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2805	    == NULL)
2806		goto fail;
2807	sc->debug = FATM_DEBUG;
2808#endif
2809
2810	/*
2811	 * Network subsystem stuff
2812	 */
2813	ifp->if_softc = sc;
2814	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2815	ifp->if_flags = IFF_SIMPLEX;
2816	ifp->if_ioctl = fatm_ioctl;
2817	ifp->if_start = fatm_start;
2818	ifp->if_watchdog = fatm_watchdog;
2819	ifp->if_init = fatm_init;
2820	ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib;
2821	ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib);
2822
2823	/*
2824	 * Enable memory and bustmaster
2825	 */
2826	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2827	cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2828	pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2829
2830	/*
2831	 * Map memory
2832	 */
2833	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2834	if (!(cfg & PCIM_CMD_MEMEN)) {
2835		if_printf(ifp, "failed to enable memory mapping\n");
2836		error = ENXIO;
2837		goto fail;
2838	}
2839	sc->memid = 0x10;
2840	sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2841	    RF_ACTIVE);
2842	if (sc->memres == NULL) {
2843		if_printf(ifp, "could not map memory\n");
2844		error = ENXIO;
2845		goto fail;
2846	}
2847	sc->memh = rman_get_bushandle(sc->memres);
2848	sc->memt = rman_get_bustag(sc->memres);
2849
2850	/*
2851	 * Convert endianess of slave access
2852	 */
2853	cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2854	cfg |= FATM_PCIM_SWAB;
2855	pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2856
2857	/*
2858	 * Allocate interrupt (activate at the end)
2859	 */
2860	sc->irqid = 0;
2861	sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2862	    RF_SHAREABLE | RF_ACTIVE);
2863	if (sc->irqres == NULL) {
2864		if_printf(ifp, "could not allocate irq\n");
2865		error = ENXIO;
2866		goto fail;
2867	}
2868
2869	/*
2870	 * Allocate the parent DMA tag. This is used simply to hold overall
2871	 * restrictions for the controller (and PCI bus) and is never used
2872	 * to do anything.
2873	 */
2874	if (bus_dma_tag_create(NULL, 1, 0,
2875	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2876	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2877	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2878	    &sc->parent_dmat)) {
2879		if_printf(ifp, "could not allocate parent DMA tag\n");
2880		error = ENOMEM;
2881		goto fail;
2882	}
2883
2884	/*
2885	 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2886	 * a mbuf cluster.
2887	 */
2888	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2889	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2890	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2891	    NULL, NULL, &sc->rbuf_tag)) {
2892		if_printf(ifp, "could not allocate rbuf DMA tag\n");
2893		error = ENOMEM;
2894		goto fail;
2895	}
2896
2897	/*
2898	 * Allocate the transmission DMA tag. Must add 1, because
2899	 * rounded up PDU will be 65536 bytes long.
2900	 */
2901	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2902	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2903	    NULL, NULL,
2904	    FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2905	    NULL, NULL, &sc->tx_tag)) {
2906		if_printf(ifp, "could not allocate tx DMA tag\n");
2907		error = ENOMEM;
2908		goto fail;
2909	}
2910
2911	/*
2912	 * Allocate DMAable memory.
2913	 */
2914	sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2915	    + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2916	sc->stat_mem.align = 4;
2917
2918	sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2919	sc->txq_mem.align = 32;
2920
2921	sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2922	sc->rxq_mem.align = 32;
2923
2924	sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2925	    BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2926	sc->s1q_mem.align = 32;
2927
2928	sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2929	    BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2930	sc->l1q_mem.align = 32;
2931
2932#ifdef TEST_DMA_SYNC
2933	if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2934	    (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2935	    (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2936	    (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2937	    (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2938		goto fail;
2939#else
2940	if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2941	    (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2942	    (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2943	    (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2944	    (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2945		goto fail;
2946#endif
2947
2948	sc->prom_mem.size = sizeof(struct prom);
2949	sc->prom_mem.align = 32;
2950	if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2951		goto fail;
2952
2953	sc->sadi_mem.size = sizeof(struct fatm_stats);
2954	sc->sadi_mem.align = 32;
2955	if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2956		goto fail;
2957
2958	sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2959	sc->reg_mem.align = 32;
2960	if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2961		goto fail;
2962
2963	/*
2964	 * Allocate queues
2965	 */
2966	sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2967	    M_DEVBUF, M_ZERO | M_WAITOK);
2968	sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2969	    M_DEVBUF, M_ZERO | M_WAITOK);
2970	sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2971	    M_DEVBUF, M_ZERO | M_WAITOK);
2972	sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2973	    M_DEVBUF, M_ZERO | M_WAITOK);
2974	sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2975	    M_DEVBUF, M_ZERO | M_WAITOK);
2976
2977	sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2978	    M_DEVBUF, M_ZERO | M_WAITOK);
2979	sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2980	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2981	if (sc->vcc_zone == NULL) {
2982		error = ENOMEM;
2983		goto fail;
2984	}
2985
2986	/*
2987	 * Allocate memory for the receive buffer headers. The total number
2988	 * of headers should probably also include the maximum number of
2989	 * buffers on the receive queue.
2990	 */
2991	sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2992	sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2993	    M_DEVBUF, M_ZERO | M_WAITOK);
2994
2995	/*
2996	 * Put all rbuf headers on the free list and create DMA maps.
2997	 */
2998	for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2999		if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
3000			if_printf(sc->ifp, "creating rx map: %d\n",
3001			    error);
3002			goto fail;
3003		}
3004		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
3005	}
3006
3007	/*
3008	 * Create dma maps for transmission. In case of an error, free the
3009	 * allocated DMA maps, because on some architectures maps are NULL
3010	 * and we cannot distinguish between a failure and a NULL map in
3011	 * the detach routine.
3012	 */
3013	for (i = 0; i < FATM_TX_QLEN; i++) {
3014		tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3015		if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3016			if_printf(sc->ifp, "creating tx map: %d\n",
3017			    error);
3018			while (i > 0) {
3019				tx = GET_QUEUE(sc->txqueue, struct txqueue,
3020				    i - 1);
3021				bus_dmamap_destroy(sc->tx_tag, tx->map);
3022				i--;
3023			}
3024			goto fail;
3025		}
3026	}
3027
3028	utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
3029	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3030	    &fatm_utopia_methods);
3031	sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3032
3033	/*
3034	 * Attach the interface
3035	 */
3036	atm_ifattach(ifp);
3037	ifp->if_snd.ifq_maxlen = 512;
3038
3039	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3040	    fatm_intr, sc, &sc->ih);
3041	if (error) {
3042		if_printf(ifp, "couldn't setup irq\n");
3043		goto fail;
3044	}
3045
3046  fail:
3047	if (error)
3048		fatm_detach(dev);
3049
3050	return (error);
3051}
3052
3053#if defined(FATM_DEBUG) && 0
3054static void
3055dump_s1_queue(struct fatm_softc *sc)
3056{
3057	int i;
3058	struct supqueue *q;
3059
3060	for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3061		q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3062		printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3063		    q->q.card,
3064		    READ4(sc, q->q.card),
3065		    READ4(sc, q->q.card + 4),
3066		    *q->q.statp);
3067	}
3068}
3069#endif
3070
3071/*
3072 * Driver infrastructure.
3073 */
3074static device_method_t fatm_methods[] = {
3075	DEVMETHOD(device_probe,		fatm_probe),
3076	DEVMETHOD(device_attach,	fatm_attach),
3077	DEVMETHOD(device_detach,	fatm_detach),
3078	{ 0, 0 }
3079};
3080static driver_t fatm_driver = {
3081	"fatm",
3082	fatm_methods,
3083	sizeof(struct fatm_softc),
3084};
3085
3086DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
3087