if_fatm.c revision 139749
1331769Shselasky/*-
2219820Sjeff * Copyright (c) 2001-2003
3219820Sjeff *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4219820Sjeff * 	All rights reserved.
5219820Sjeff *
6219820Sjeff * Redistribution and use in source and binary forms, with or without
7219820Sjeff * modification, are permitted provided that the following conditions
8219820Sjeff * are met:
9219820Sjeff * 1. Redistributions of source code must retain the above copyright
10219820Sjeff *    notice, this list of conditions and the following disclaimer.
11219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright
12219820Sjeff *    notice, this list of conditions and the following disclaimer in the
13219820Sjeff *    documentation and/or other materials provided with the distribution.
14219820Sjeff *
15219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16331769Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17219820Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18219820Sjeff * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19331769Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20331769Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21331769Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22219820Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23219820Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Fore PCA200E driver for NATM
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/fatm/if_fatm.c 139749 2005-01-06 01:43:34Z imp $");
34
35#include "opt_inet.h"
36#include "opt_natm.h"
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/bus.h>
44#include <sys/errno.h>
45#include <sys/conf.h>
46#include <sys/module.h>
47#include <sys/queue.h>
48#include <sys/syslog.h>
49#include <sys/endian.h>
50#include <sys/sysctl.h>
51#include <sys/condvar.h>
52#include <vm/uma.h>
53
54#include <sys/sockio.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57
58#include <net/if.h>
59#include <net/if_media.h>
60#include <net/if_atm.h>
61#include <net/route.h>
62#ifdef INET
63#include <netinet/in.h>
64#include <netinet/if_atm.h>
65#endif
66
67#include <machine/bus.h>
68#include <machine/resource.h>
69#include <sys/bus.h>
70#include <sys/rman.h>
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73
74#include <dev/utopia/utopia.h>
75
76#include <dev/fatm/if_fatmreg.h>
77#include <dev/fatm/if_fatmvar.h>
78
79#include <dev/fatm/firmware.h>
80
81devclass_t fatm_devclass;
82
83static const struct {
84	uint16_t	vid;
85	uint16_t	did;
86	const char	*name;
87} fatm_devs[] = {
88	{ 0x1127, 0x300,
89	  "FORE PCA200E" },
90	{ 0, 0, NULL }
91};
92
93static const struct rate {
94	uint32_t	ratio;
95	uint32_t	cell_rate;
96} rate_table[] = {
97#include <dev/fatm/if_fatm_rate.h>
98};
99#define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0]))
100
101SYSCTL_DECL(_hw_atm);
102
103MODULE_DEPEND(fatm, utopia, 1, 1, 1);
104
105static int	fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *);
106static int	fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int);
107
108static const struct utopia_methods fatm_utopia_methods = {
109	fatm_utopia_readregs,
110	fatm_utopia_writereg
111};
112
113#define VC_OK(SC, VPI, VCI)						\
114	(((VPI) & ~((1 << (SC)->ifatm.mib.vpi_bits) - 1)) == 0 &&	\
115	 (VCI) != 0 && ((VCI) & ~((1 << (SC)->ifatm.mib.vci_bits) - 1)) == 0)
116
117static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
118
119/*
120 * Probing is easy: step trough the list of known vendor and device
121 * ids and compare. If one is found - it's our.
122 */
123static int
124fatm_probe(device_t dev)
125{
126	int i;
127
128	for (i = 0; fatm_devs[i].name; i++)
129		if (pci_get_vendor(dev) == fatm_devs[i].vid &&
130		    pci_get_device(dev) == fatm_devs[i].did) {
131			device_set_desc(dev, fatm_devs[i].name);
132			return (0);
133		}
134	return (ENXIO);
135}
136
137/*
138 * Function called at completion of a SUNI writeregs/readregs command.
139 * This is called from the interrupt handler while holding the softc lock.
140 * We use the queue entry as the randevouze point.
141 */
142static void
143fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
144{
145
146	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
147	if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
148		sc->istats.suni_reg_errors++;
149		q->error = EIO;
150	}
151	wakeup(q);
152}
153
154/*
155 * Write a SUNI register. The bits that are 1 in mask are written from val
156 * into register reg. We wait for the command to complete by sleeping on
157 * the register memory.
158 *
159 * We assume, that we already hold the softc mutex.
160 */
161static int
162fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
163{
164	int error;
165	struct cmdqueue *q;
166	struct fatm_softc *sc;
167
168	sc = ifatm->ifnet.if_softc;
169	FATM_CHECKLOCK(sc);
170	if (!(ifatm->ifnet.if_flags & IFF_RUNNING))
171		return (EIO);
172
173	/* get queue element and fill it */
174	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
175
176	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
177	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
178		sc->istats.cmd_queue_full++;
179		return (EIO);
180	}
181	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
182
183	q->error = 0;
184	q->cb = fatm_utopia_writeregs_complete;
185	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
186	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
187
188	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0);
189	BARRIER_W(sc);
190	WRITE4(sc, q->q.card + FATMOC_OP,
191	    FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL);
192	BARRIER_W(sc);
193
194	/*
195	 * Wait for the command to complete
196	 */
197	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz);
198
199	switch(error) {
200
201	  case EWOULDBLOCK:
202		error = EIO;
203		break;
204
205	  case ERESTART:
206		error = EINTR;
207		break;
208
209	  case 0:
210		error = q->error;
211		break;
212	}
213
214	return (error);
215}
216
217/*
218 * Function called at completion of a SUNI readregs command.
219 * This is called from the interrupt handler while holding the softc lock.
220 * We use reg_mem as the randevouze point.
221 */
222static void
223fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q)
224{
225
226	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
227	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
228		sc->istats.suni_reg_errors++;
229		q->error = EIO;
230	}
231	wakeup(&sc->reg_mem);
232}
233
234/*
235 * Read SUNI registers
236 *
237 * We use a preallocated buffer to read the registers. Therefor we need
238 * to protect against multiple threads trying to read registers. We do this
239 * with a condition variable and a flag. We wait for the command to complete by sleeping on
240 * the register memory.
241 *
242 * We assume, that we already hold the softc mutex.
243 */
244static int
245fatm_utopia_readregs_internal(struct fatm_softc *sc)
246{
247	int error, i;
248	uint32_t *ptr;
249	struct cmdqueue *q;
250
251	/* get the buffer */
252	for (;;) {
253		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
254			return (EIO);
255		if (!(sc->flags & FATM_REGS_INUSE))
256			break;
257		cv_wait(&sc->cv_regs, &sc->mtx);
258	}
259	sc->flags |= FATM_REGS_INUSE;
260
261	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
262
263	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
264	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
265		sc->istats.cmd_queue_full++;
266		return (EIO);
267	}
268	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
269
270	q->error = 0;
271	q->cb = fatm_utopia_readregs_complete;
272	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
273	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
274
275	bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD);
276
277	WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr);
278	BARRIER_W(sc);
279	WRITE4(sc, q->q.card + FATMOC_OP,
280	    FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL);
281	BARRIER_W(sc);
282
283	/*
284	 * Wait for the command to complete
285	 */
286	error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH,
287	    "fatm_getreg", hz);
288
289	switch(error) {
290
291	  case EWOULDBLOCK:
292		error = EIO;
293		break;
294
295	  case ERESTART:
296		error = EINTR;
297		break;
298
299	  case 0:
300		bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map,
301		    BUS_DMASYNC_POSTREAD);
302		error = q->error;
303		break;
304	}
305
306	if (error != 0) {
307		/* declare buffer to be free */
308		sc->flags &= ~FATM_REGS_INUSE;
309		cv_signal(&sc->cv_regs);
310		return (error);
311	}
312
313	/* swap if needed */
314	ptr = (uint32_t *)sc->reg_mem.mem;
315	for (i = 0; i < FATM_NREGS; i++)
316		ptr[i] = le32toh(ptr[i]) & 0xff;
317
318	return (0);
319}
320
321/*
322 * Read SUNI registers for the SUNI module.
323 *
324 * We assume, that we already hold the mutex.
325 */
326static int
327fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np)
328{
329	int err;
330	int i;
331	struct fatm_softc *sc;
332
333	if (reg >= FATM_NREGS)
334		return (EINVAL);
335	if (reg + *np > FATM_NREGS)
336		*np = FATM_NREGS - reg;
337	sc = ifatm->ifnet.if_softc;
338	FATM_CHECKLOCK(sc);
339
340	err = fatm_utopia_readregs_internal(sc);
341	if (err != 0)
342		return (err);
343
344	for (i = 0; i < *np; i++)
345		valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i];
346
347	/* declare buffer to be free */
348	sc->flags &= ~FATM_REGS_INUSE;
349	cv_signal(&sc->cv_regs);
350
351	return (0);
352}
353
354/*
355 * Check whether the hard is beating. We remember the last heart beat and
356 * compare it to the current one. If it appears stuck for 10 times, we have
357 * a problem.
358 *
359 * Assume we hold the lock.
360 */
361static void
362fatm_check_heartbeat(struct fatm_softc *sc)
363{
364	uint32_t h;
365
366	FATM_CHECKLOCK(sc);
367
368	h = READ4(sc, FATMO_HEARTBEAT);
369	DBG(sc, BEAT, ("heartbeat %08x", h));
370
371	if (sc->stop_cnt == 10)
372		return;
373
374	if (h == sc->heartbeat) {
375		if (++sc->stop_cnt == 10) {
376			log(LOG_ERR, "i960 stopped???\n");
377			WRITE4(sc, FATMO_HIMR, 1);
378		}
379		return;
380	}
381
382	sc->stop_cnt = 0;
383	sc->heartbeat = h;
384}
385
386/*
387 * Ensure that the heart is still beating.
388 */
389static void
390fatm_watchdog(struct ifnet *ifp)
391{
392	struct fatm_softc *sc = ifp->if_softc;
393
394	FATM_LOCK(sc);
395	if (ifp->if_flags & IFF_RUNNING) {
396		fatm_check_heartbeat(sc);
397		ifp->if_timer = 5;
398	}
399	FATM_UNLOCK(sc);
400}
401
402/*
403 * Hard reset the i960 on the board. This is done by initializing registers,
404 * clearing interrupts and waiting for the selftest to finish. Not sure,
405 * whether all these barriers are actually needed.
406 *
407 * Assumes that we hold the lock.
408 */
409static int
410fatm_reset(struct fatm_softc *sc)
411{
412	int w;
413	uint32_t val;
414
415	FATM_CHECKLOCK(sc);
416
417	WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN);
418	BARRIER_W(sc);
419
420	WRITE4(sc, FATMO_UART_TO_960, XMIT_READY);
421	BARRIER_W(sc);
422
423	WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY);
424	BARRIER_W(sc);
425
426	WRITE4(sc, FATMO_BOOT_STATUS, COLD_START);
427	BARRIER_W(sc);
428
429	WRITE1(sc, FATMO_HCR, FATM_HCR_RESET);
430	BARRIER_W(sc);
431
432	DELAY(1000);
433
434	WRITE1(sc, FATMO_HCR, 0);
435	BARRIER_RW(sc);
436
437	DELAY(1000);
438
439	for (w = 100; w; w--) {
440		BARRIER_R(sc);
441		val = READ4(sc, FATMO_BOOT_STATUS);
442		switch (val) {
443		  case SELF_TEST_OK:
444			return (0);
445		  case SELF_TEST_FAIL:
446			return (EIO);
447		}
448		DELAY(1000);
449	}
450	return (EIO);
451}
452
453/*
454 * Stop the card. Must be called WITH the lock held
455 * Reset, free transmit and receive buffers. Wakeup everybody who may sleep.
456 */
457static void
458fatm_stop(struct fatm_softc *sc)
459{
460	int i;
461	struct cmdqueue *q;
462	struct rbuf *rb;
463	struct txqueue *tx;
464	uint32_t stat;
465
466	FATM_CHECKLOCK(sc);
467
468	/* Stop the board */
469	utopia_stop(&sc->utopia);
470	(void)fatm_reset(sc);
471
472	/* stop watchdog */
473	sc->ifatm.ifnet.if_timer = 0;
474
475	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) {
476		sc->ifatm.ifnet.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
477		ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
478		    sc->utopia.carrier == UTP_CARR_OK);
479
480		/*
481		 * Collect transmit mbufs, partial receive mbufs and
482		 * supplied mbufs
483		 */
484		for (i = 0; i < FATM_TX_QLEN; i++) {
485			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
486			if (tx->m) {
487				bus_dmamap_unload(sc->tx_tag, tx->map);
488				m_freem(tx->m);
489				tx->m = NULL;
490			}
491		}
492
493		/* Collect supplied mbufs */
494		while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
495			LIST_REMOVE(rb, link);
496			bus_dmamap_unload(sc->rbuf_tag, rb->map);
497			m_free(rb->m);
498			rb->m = NULL;
499			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
500		}
501
502		/* Unwait any waiters */
503		wakeup(&sc->sadi_mem);
504
505		/* wakeup all threads waiting for STAT or REG buffers */
506		cv_broadcast(&sc->cv_stat);
507		cv_broadcast(&sc->cv_regs);
508
509		sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE);
510
511		/* wakeup all threads waiting on commands */
512		for (i = 0; i < FATM_CMD_QLEN; i++) {
513			q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i);
514
515			H_SYNCSTAT_POSTREAD(sc, q->q.statp);
516			if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) {
517				H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR);
518				H_SYNCSTAT_PREWRITE(sc, q->q.statp);
519				wakeup(q);
520			}
521		}
522		utopia_reset_media(&sc->utopia);
523	}
524	sc->small_cnt = sc->large_cnt = 0;
525
526	/* Reset vcc info */
527	if (sc->vccs != NULL) {
528		sc->open_vccs = 0;
529		for (i = 0; i < FORE_MAX_VCC + 1; i++) {
530			if (sc->vccs[i] != NULL) {
531				if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN |
532				    FATM_VCC_TRY_OPEN)) == 0) {
533					uma_zfree(sc->vcc_zone, sc->vccs[i]);
534					sc->vccs[i] = NULL;
535				} else {
536					sc->vccs[i]->vflags = 0;
537					sc->open_vccs++;
538				}
539			}
540		}
541	}
542
543}
544
545/*
546 * Load the firmware into the board and save the entry point.
547 */
548static uint32_t
549firmware_load(struct fatm_softc *sc)
550{
551	struct firmware *fw = (struct firmware *)firmware;
552
553	DBG(sc, INIT, ("loading - entry=%x", fw->entry));
554	bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware,
555	    sizeof(firmware) / sizeof(firmware[0]));
556	BARRIER_RW(sc);
557
558	return (fw->entry);
559}
560
561/*
562 * Read a character from the virtual UART. The availability of a character
563 * is signaled by a non-null value of the 32 bit register. The eating of
564 * the character by us is signalled to the card by setting that register
565 * to zero.
566 */
567static int
568rx_getc(struct fatm_softc *sc)
569{
570	int w = 50;
571	int c;
572
573	while (w--) {
574		c = READ4(sc, FATMO_UART_TO_HOST);
575		BARRIER_RW(sc);
576		if (c != 0) {
577			WRITE4(sc, FATMO_UART_TO_HOST, 0);
578			DBGC(sc, UART, ("%c", c & 0xff));
579			return (c & 0xff);
580		}
581		DELAY(1000);
582	}
583	return (-1);
584}
585
586/*
587 * Eat up characters from the board and stuff them in the bit-bucket.
588 */
589static void
590rx_flush(struct fatm_softc *sc)
591{
592	int w = 10000;
593
594	while (w-- && rx_getc(sc) >= 0)
595		;
596}
597
598/*
599 * Write a character to the card. The UART is available if the register
600 * is zero.
601 */
602static int
603tx_putc(struct fatm_softc *sc, u_char c)
604{
605	int w = 10;
606	int c1;
607
608	while (w--) {
609		c1 = READ4(sc, FATMO_UART_TO_960);
610		BARRIER_RW(sc);
611		if (c1 == 0) {
612			WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL);
613			DBGC(sc, UART, ("%c", c & 0xff));
614			return (0);
615		}
616		DELAY(1000);
617	}
618	return (-1);
619}
620
621/*
622 * Start the firmware. This is doing by issuing a 'go' command with
623 * the hex entry address of the firmware. Then we wait for the self-test to
624 * succeed.
625 */
626static int
627fatm_start_firmware(struct fatm_softc *sc, uint32_t start)
628{
629	static char hex[] = "0123456789abcdef";
630	u_int w, val;
631
632	DBG(sc, INIT, ("starting"));
633	rx_flush(sc);
634	tx_putc(sc, '\r');
635	DELAY(1000);
636
637	rx_flush(sc);
638
639	tx_putc(sc, 'g');
640	(void)rx_getc(sc);
641	tx_putc(sc, 'o');
642	(void)rx_getc(sc);
643	tx_putc(sc, ' ');
644	(void)rx_getc(sc);
645
646	tx_putc(sc, hex[(start >> 12) & 0xf]);
647	(void)rx_getc(sc);
648	tx_putc(sc, hex[(start >>  8) & 0xf]);
649	(void)rx_getc(sc);
650	tx_putc(sc, hex[(start >>  4) & 0xf]);
651	(void)rx_getc(sc);
652	tx_putc(sc, hex[(start >>  0) & 0xf]);
653	(void)rx_getc(sc);
654
655	tx_putc(sc, '\r');
656	rx_flush(sc);
657
658	for (w = 100; w; w--) {
659		BARRIER_R(sc);
660		val = READ4(sc, FATMO_BOOT_STATUS);
661		switch (val) {
662		  case CP_RUNNING:
663			return (0);
664		  case SELF_TEST_FAIL:
665			return (EIO);
666		}
667		DELAY(1000);
668	}
669	return (EIO);
670}
671
672/*
673 * Initialize one card and host queue.
674 */
675static void
676init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen,
677    size_t qel_size, size_t desc_size, cardoff_t off,
678    u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc)
679{
680	struct fqelem *el = queue->chunk;
681
682	while (qlen--) {
683		el->card = off;
684		off += 8;	/* size of card entry */
685
686		el->statp = (uint32_t *)(*statpp);
687		(*statpp) += sizeof(uint32_t);
688		H_SETSTAT(el->statp, FATM_STAT_FREE);
689		H_SYNCSTAT_PREWRITE(sc, el->statp);
690
691		WRITE4(sc, el->card + FATMOS_STATP, (*cardstat));
692		(*cardstat) += sizeof(uint32_t);
693
694		el->ioblk = descp;
695		descp += desc_size;
696		el->card_ioblk = carddesc;
697		carddesc += desc_size;
698
699		el = (struct fqelem *)((u_char *)el + qel_size);
700	}
701	queue->tail = queue->head = 0;
702}
703
704/*
705 * Issue the initialize operation to the card, wait for completion and
706 * initialize the on-board and host queue structures with offsets and
707 * addresses.
708 */
709static int
710fatm_init_cmd(struct fatm_softc *sc)
711{
712	int w, c;
713	u_char *statp;
714	uint32_t card_stat;
715	u_int cnt;
716	struct fqelem *el;
717	cardoff_t off;
718
719	DBG(sc, INIT, ("command"));
720	WRITE4(sc, FATMO_ISTAT, 0);
721	WRITE4(sc, FATMO_IMASK, 1);
722	WRITE4(sc, FATMO_HLOGGER, 0);
723
724	WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0);
725	WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC);
726	WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN);
727	WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN);
728	WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN);
729	WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS);
730	WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS);
731
732	/*
733	 * initialize buffer descriptors
734	 */
735	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH,
736	    SMALL_SUPPLY_QLEN);
737	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE,
738	    SMALL_BUFFER_LEN);
739	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE,
740	    SMALL_POOL_SIZE);
741	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE,
742	    SMALL_SUPPLY_BLKSIZE);
743
744	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH,
745	    LARGE_SUPPLY_QLEN);
746	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE,
747	    LARGE_BUFFER_LEN);
748	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE,
749	    LARGE_POOL_SIZE);
750	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE,
751	    LARGE_SUPPLY_BLKSIZE);
752
753	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0);
754	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0);
755	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0);
756	WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
757
758	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0);
759	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0);
760	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0);
761	WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0);
762
763	/*
764	 * Start the command
765	 */
766	BARRIER_W(sc);
767	WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING);
768	BARRIER_W(sc);
769	WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE);
770	BARRIER_W(sc);
771
772	/*
773	 * Busy wait for completion
774	 */
775	w = 100;
776	while (w--) {
777		c = READ4(sc, FATMO_INIT + FATMOI_STATUS);
778		BARRIER_R(sc);
779		if (c & FATM_STAT_COMPLETE)
780			break;
781		DELAY(1000);
782	}
783
784	if (c & FATM_STAT_ERROR)
785		return (EIO);
786
787	/*
788	 * Initialize the queues
789	 */
790	statp = sc->stat_mem.mem;
791	card_stat = sc->stat_mem.paddr;
792
793	/*
794	 * Command queue. This is special in that it's on the card.
795	 */
796	el = sc->cmdqueue.chunk;
797	off = READ4(sc, FATMO_COMMAND_QUEUE);
798	DBG(sc, INIT, ("cmd queue=%x", off));
799	for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) {
800		el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q;
801
802		el->card = off;
803		off += 32;		/* size of card structure */
804
805		el->statp = (uint32_t *)statp;
806		statp += sizeof(uint32_t);
807		H_SETSTAT(el->statp, FATM_STAT_FREE);
808		H_SYNCSTAT_PREWRITE(sc, el->statp);
809
810		WRITE4(sc, el->card + FATMOC_STATP, card_stat);
811		card_stat += sizeof(uint32_t);
812	}
813	sc->cmdqueue.tail = sc->cmdqueue.head = 0;
814
815	/*
816	 * Now the other queues. These are in memory
817	 */
818	init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN,
819	    sizeof(struct txqueue), TPD_SIZE,
820	    READ4(sc, FATMO_TRANSMIT_QUEUE),
821	    &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr);
822
823	init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN,
824	    sizeof(struct rxqueue), RPD_SIZE,
825	    READ4(sc, FATMO_RECEIVE_QUEUE),
826	    &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr);
827
828	init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN,
829	    sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE),
830	    READ4(sc, FATMO_SMALL_B1_QUEUE),
831	    &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr);
832
833	init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN,
834	    sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE),
835	    READ4(sc, FATMO_LARGE_B1_QUEUE),
836	    &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr);
837
838	sc->txcnt = 0;
839
840	return (0);
841}
842
843/*
844 * Read PROM. Called only from attach code. Here we spin because the interrupt
845 * handler is not yet set up.
846 */
847static int
848fatm_getprom(struct fatm_softc *sc)
849{
850	int i;
851	struct prom *prom;
852	struct cmdqueue *q;
853
854	DBG(sc, INIT, ("reading prom"));
855	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
856	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
857
858	q->error = 0;
859	q->cb = NULL;;
860	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
861	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
862
863	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
864	    BUS_DMASYNC_PREREAD);
865
866	WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr);
867	BARRIER_W(sc);
868	WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA);
869	BARRIER_W(sc);
870
871	for (i = 0; i < 1000; i++) {
872		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
873		if (H_GETSTAT(q->q.statp) &
874		    (FATM_STAT_COMPLETE | FATM_STAT_ERROR))
875			break;
876		DELAY(1000);
877	}
878	if (i == 1000) {
879		if_printf(&sc->ifatm.ifnet, "getprom timeout\n");
880		return (EIO);
881	}
882	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
883	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
884		if_printf(&sc->ifatm.ifnet, "getprom error\n");
885		return (EIO);
886	}
887	H_SETSTAT(q->q.statp, FATM_STAT_FREE);
888	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
889	NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
890
891	bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map,
892	    BUS_DMASYNC_POSTREAD);
893
894
895#ifdef notdef
896	{
897		u_int i;
898
899		printf("PROM: ");
900		u_char *ptr = (u_char *)sc->prom_mem.mem;
901		for (i = 0; i < sizeof(struct prom); i++)
902			printf("%02x ", *ptr++);
903		printf("\n");
904	}
905#endif
906
907	prom = (struct prom *)sc->prom_mem.mem;
908
909	bcopy(prom->mac + 2, sc->ifatm.mib.esi, 6);
910	sc->ifatm.mib.serial = le32toh(prom->serial);
911	sc->ifatm.mib.hw_version = le32toh(prom->version);
912	sc->ifatm.mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE);
913
914	if_printf(&sc->ifatm.ifnet, "ESI=%02x:%02x:%02x:%02x:%02x:%02x "
915	    "serial=%u hw=0x%x sw=0x%x\n", sc->ifatm.mib.esi[0],
916	    sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3],
917	    sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->ifatm.mib.serial,
918	    sc->ifatm.mib.hw_version, sc->ifatm.mib.sw_version);
919
920	return (0);
921}
922
923/*
924 * This is the callback function for bus_dmamap_load. We assume, that we
925 * have a 32-bit bus and so have always one segment.
926 */
927static void
928dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
929{
930	bus_addr_t *ptr = (bus_addr_t *)arg;
931
932	if (error != 0) {
933		printf("%s: error=%d\n", __func__, error);
934		return;
935	}
936	KASSERT(nsegs == 1, ("too many DMA segments"));
937	KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx",
938	    (u_long)segs[0].ds_addr));
939
940	*ptr = segs[0].ds_addr;
941}
942
943/*
944 * Allocate a chunk of DMA-able memory and map it.
945 */
946static int
947alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
948{
949	int error;
950
951	mem->mem = NULL;
952
953	if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0,
954	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
955	    NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT,
956	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
957		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
958		    nm);
959		return (ENOMEM);
960	}
961
962	error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map);
963	if (error) {
964		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA memory: "
965		    "%d\n", nm, error);
966		bus_dma_tag_destroy(mem->dmat);
967		mem->mem = NULL;
968		return (error);
969	}
970
971	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
972	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
973	if (error) {
974		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
975		    "%d\n", nm, error);
976		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
977		bus_dma_tag_destroy(mem->dmat);
978		mem->mem = NULL;
979		return (error);
980	}
981
982	DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
983	    (u_long)mem->paddr, mem->size, mem->align));
984
985	return (0);
986}
987
988#ifdef TEST_DMA_SYNC
989static int
990alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem)
991{
992	int error;
993
994	mem->mem = NULL;
995
996	if (bus_dma_tag_create(NULL, mem->align, 0,
997	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR,
998	    NULL, NULL, mem->size, 1, mem->size,
999	    BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) {
1000		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA tag\n",
1001		    nm);
1002		return (ENOMEM);
1003	}
1004
1005	mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK,
1006	    BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0);
1007
1008	error = bus_dmamap_create(mem->dmat, 0, &mem->map);
1009	if (error) {
1010		if_printf(&sc->ifatm.ifnet, "could not allocate %s DMA map: "
1011		    "%d\n", nm, error);
1012		contigfree(mem->mem, mem->size, M_DEVBUF);
1013		bus_dma_tag_destroy(mem->dmat);
1014		mem->mem = NULL;
1015		return (error);
1016	}
1017
1018	error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size,
1019	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
1020	if (error) {
1021		if_printf(&sc->ifatm.ifnet, "could not load %s DMA memory: "
1022		    "%d\n", nm, error);
1023		bus_dmamap_destroy(mem->dmat, mem->map);
1024		contigfree(mem->mem, mem->size, M_DEVBUF);
1025		bus_dma_tag_destroy(mem->dmat);
1026		mem->mem = NULL;
1027		return (error);
1028	}
1029
1030	DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1031	    (u_long)mem->paddr, mem->size, mem->align));
1032
1033	printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem,
1034	    (u_long)mem->paddr, mem->size, mem->align);
1035
1036	return (0);
1037}
1038#endif /* TEST_DMA_SYNC */
1039
1040/*
1041 * Destroy all resources of an dma-able memory chunk
1042 */
1043static void
1044destroy_dma_memory(struct fatm_mem *mem)
1045{
1046	if (mem->mem != NULL) {
1047		bus_dmamap_unload(mem->dmat, mem->map);
1048		bus_dmamem_free(mem->dmat, mem->mem, mem->map);
1049		bus_dma_tag_destroy(mem->dmat);
1050		mem->mem = NULL;
1051	}
1052}
1053#ifdef TEST_DMA_SYNC
1054static void
1055destroy_dma_memoryX(struct fatm_mem *mem)
1056{
1057	if (mem->mem != NULL) {
1058		bus_dmamap_unload(mem->dmat, mem->map);
1059		bus_dmamap_destroy(mem->dmat, mem->map);
1060		contigfree(mem->mem, mem->size, M_DEVBUF);
1061		bus_dma_tag_destroy(mem->dmat);
1062		mem->mem = NULL;
1063	}
1064}
1065#endif /* TEST_DMA_SYNC */
1066
1067/*
1068 * Try to supply buffers to the card if there are free entries in the queues
1069 */
1070static void
1071fatm_supply_small_buffers(struct fatm_softc *sc)
1072{
1073	int nblocks, nbufs;
1074	struct supqueue *q;
1075	struct rbd *bd;
1076	int i, j, error, cnt;
1077	struct mbuf *m;
1078	struct rbuf *rb;
1079	bus_addr_t phys;
1080
1081	nbufs = max(4 * sc->open_vccs, 32);
1082	nbufs = min(nbufs, SMALL_POOL_SIZE);
1083	nbufs -= sc->small_cnt;
1084
1085	nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE;
1086	for (cnt = 0; cnt < nblocks; cnt++) {
1087		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head);
1088
1089		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1090		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1091			break;
1092
1093		bd = (struct rbd *)q->q.ioblk;
1094
1095		for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) {
1096			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1097				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1098				break;
1099			}
1100			MGETHDR(m, M_DONTWAIT, MT_DATA);
1101			if (m == NULL) {
1102				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1103				break;
1104			}
1105			MH_ALIGN(m, SMALL_BUFFER_LEN);
1106			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1107			    m->m_data, SMALL_BUFFER_LEN, dmaload_helper,
1108			    &phys, BUS_DMA_NOWAIT);
1109			if (error) {
1110				if_printf(&sc->ifatm.ifnet,
1111				    "dmamap_load mbuf failed %d", error);
1112				m_freem(m);
1113				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1114				break;
1115			}
1116			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1117			    BUS_DMASYNC_PREREAD);
1118
1119			LIST_REMOVE(rb, link);
1120			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1121
1122			rb->m = m;
1123			bd[i].handle = rb - sc->rbufs;
1124			H_SETDESC(bd[i].buffer, phys);
1125		}
1126
1127		if (i < SMALL_SUPPLY_BLKSIZE) {
1128			for (j = 0; j < i; j++) {
1129				rb = sc->rbufs + bd[j].handle;
1130				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1131				m_free(rb->m);
1132				rb->m = NULL;
1133
1134				LIST_REMOVE(rb, link);
1135				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1136			}
1137			break;
1138		}
1139		H_SYNCQ_PREWRITE(&sc->s1q_mem, bd,
1140		    sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE);
1141
1142		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1143		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1144
1145		WRITE4(sc, q->q.card, q->q.card_ioblk);
1146		BARRIER_W(sc);
1147
1148		sc->small_cnt += SMALL_SUPPLY_BLKSIZE;
1149
1150		NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN);
1151	}
1152}
1153
1154/*
1155 * Try to supply buffers to the card if there are free entries in the queues
1156 * We assume that all buffers are within the address space accessible by the
1157 * card (32-bit), so we don't need bounce buffers.
1158 */
1159static void
1160fatm_supply_large_buffers(struct fatm_softc *sc)
1161{
1162	int nbufs, nblocks, cnt;
1163	struct supqueue *q;
1164	struct rbd *bd;
1165	int i, j, error;
1166	struct mbuf *m;
1167	struct rbuf *rb;
1168	bus_addr_t phys;
1169
1170	nbufs = max(4 * sc->open_vccs, 32);
1171	nbufs = min(nbufs, LARGE_POOL_SIZE);
1172	nbufs -= sc->large_cnt;
1173
1174	nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE;
1175
1176	for (cnt = 0; cnt < nblocks; cnt++) {
1177		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head);
1178
1179		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1180		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE)
1181			break;
1182
1183		bd = (struct rbd *)q->q.ioblk;
1184
1185		for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) {
1186			if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) {
1187				if_printf(&sc->ifatm.ifnet, "out of rbufs\n");
1188				break;
1189			}
1190			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
1191			    M_PKTHDR)) == NULL) {
1192				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1193				break;
1194			}
1195			/* No MEXT_ALIGN */
1196			m->m_data += MCLBYTES - LARGE_BUFFER_LEN;
1197			error = bus_dmamap_load(sc->rbuf_tag, rb->map,
1198			    m->m_data, LARGE_BUFFER_LEN, dmaload_helper,
1199			    &phys, BUS_DMA_NOWAIT);
1200			if (error) {
1201				if_printf(&sc->ifatm.ifnet,
1202				    "dmamap_load mbuf failed %d", error);
1203				m_freem(m);
1204				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1205				break;
1206			}
1207
1208			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1209			    BUS_DMASYNC_PREREAD);
1210
1211			LIST_REMOVE(rb, link);
1212			LIST_INSERT_HEAD(&sc->rbuf_used, rb, link);
1213
1214			rb->m = m;
1215			bd[i].handle = rb - sc->rbufs;
1216			H_SETDESC(bd[i].buffer, phys);
1217		}
1218
1219		if (i < LARGE_SUPPLY_BLKSIZE) {
1220			for (j = 0; j < i; j++) {
1221				rb = sc->rbufs + bd[j].handle;
1222				bus_dmamap_unload(sc->rbuf_tag, rb->map);
1223				m_free(rb->m);
1224				rb->m = NULL;
1225
1226				LIST_REMOVE(rb, link);
1227				LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1228			}
1229			break;
1230		}
1231		H_SYNCQ_PREWRITE(&sc->l1q_mem, bd,
1232		    sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE);
1233
1234		H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1235		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1236		WRITE4(sc, q->q.card, q->q.card_ioblk);
1237		BARRIER_W(sc);
1238
1239		sc->large_cnt += LARGE_SUPPLY_BLKSIZE;
1240
1241		NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN);
1242	}
1243}
1244
1245
1246/*
1247 * Actually start the card. The lock must be held here.
1248 * Reset, load the firmware, start it, initializes queues, read the PROM
1249 * and supply receive buffers to the card.
1250 */
1251static void
1252fatm_init_locked(struct fatm_softc *sc)
1253{
1254	struct rxqueue *q;
1255	int i, c, error;
1256	uint32_t start;
1257
1258	DBG(sc, INIT, ("initialize"));
1259	if (sc->ifatm.ifnet.if_flags & IFF_RUNNING)
1260		fatm_stop(sc);
1261
1262	/*
1263	 * Hard reset the board
1264	 */
1265	if (fatm_reset(sc))
1266		return;
1267
1268	start = firmware_load(sc);
1269	if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) ||
1270	    fatm_getprom(sc)) {
1271		fatm_reset(sc);
1272		return;
1273	}
1274
1275	/*
1276	 * Handle media
1277	 */
1278	c = READ4(sc, FATMO_MEDIA_TYPE);
1279	switch (c) {
1280
1281	  case FORE_MT_TAXI_100:
1282		sc->ifatm.mib.media = IFM_ATM_TAXI_100;
1283		sc->ifatm.mib.pcr = 227273;
1284		break;
1285
1286	  case FORE_MT_TAXI_140:
1287		sc->ifatm.mib.media = IFM_ATM_TAXI_140;
1288		sc->ifatm.mib.pcr = 318181;
1289		break;
1290
1291	  case FORE_MT_UTP_SONET:
1292		sc->ifatm.mib.media = IFM_ATM_UTP_155;
1293		sc->ifatm.mib.pcr = 353207;
1294		break;
1295
1296	  case FORE_MT_MM_OC3_ST:
1297	  case FORE_MT_MM_OC3_SC:
1298		sc->ifatm.mib.media = IFM_ATM_MM_155;
1299		sc->ifatm.mib.pcr = 353207;
1300		break;
1301
1302	  case FORE_MT_SM_OC3_ST:
1303	  case FORE_MT_SM_OC3_SC:
1304		sc->ifatm.mib.media = IFM_ATM_SM_155;
1305		sc->ifatm.mib.pcr = 353207;
1306		break;
1307
1308	  default:
1309		log(LOG_ERR, "fatm: unknown media type %d\n", c);
1310		sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
1311		sc->ifatm.mib.pcr = 353207;
1312		break;
1313	}
1314	sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr;
1315	utopia_init_media(&sc->utopia);
1316
1317	/*
1318	 * Initialize the RBDs
1319	 */
1320	for (i = 0; i < FATM_RX_QLEN; i++) {
1321		q = GET_QUEUE(sc->rxqueue, struct rxqueue, i);
1322		WRITE4(sc, q->q.card + 0, q->q.card_ioblk);
1323	}
1324	BARRIER_W(sc);
1325
1326	/*
1327	 * Supply buffers to the card
1328	 */
1329	fatm_supply_small_buffers(sc);
1330	fatm_supply_large_buffers(sc);
1331
1332	/*
1333	 * Now set flags, that we are ready
1334	 */
1335	sc->ifatm.ifnet.if_flags |= IFF_RUNNING;
1336
1337	/*
1338	 * Start the watchdog timer
1339	 */
1340	sc->ifatm.ifnet.if_timer = 5;
1341
1342	/* start SUNI */
1343	utopia_start(&sc->utopia);
1344
1345	ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm,
1346	    sc->utopia.carrier == UTP_CARR_OK);
1347
1348	/* start all channels */
1349	for (i = 0; i < FORE_MAX_VCC + 1; i++)
1350		if (sc->vccs[i] != NULL) {
1351			sc->vccs[i]->vflags |= FATM_VCC_REOPEN;
1352			error = fatm_load_vc(sc, sc->vccs[i]);
1353			if (error != 0) {
1354				if_printf(&sc->ifatm.ifnet, "reopening %u "
1355				    "failed: %d\n", i, error);
1356				sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN;
1357			}
1358		}
1359
1360	DBG(sc, INIT, ("done"));
1361}
1362
1363/*
1364 * This is the exported as initialisation function.
1365 */
1366static void
1367fatm_init(void *p)
1368{
1369	struct fatm_softc *sc = p;
1370
1371	FATM_LOCK(sc);
1372	fatm_init_locked(sc);
1373	FATM_UNLOCK(sc);
1374}
1375
1376/************************************************************/
1377/*
1378 * The INTERRUPT handling
1379 */
1380/*
1381 * Check the command queue. If a command was completed, call the completion
1382 * function for that command.
1383 */
1384static void
1385fatm_intr_drain_cmd(struct fatm_softc *sc)
1386{
1387	struct cmdqueue *q;
1388	int stat;
1389
1390	/*
1391	 * Drain command queue
1392	 */
1393	for (;;) {
1394		q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail);
1395
1396		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1397		stat = H_GETSTAT(q->q.statp);
1398
1399		if (stat != FATM_STAT_COMPLETE &&
1400		   stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1401		   stat != FATM_STAT_ERROR)
1402			break;
1403
1404		(*q->cb)(sc, q);
1405
1406		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1407		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1408
1409		NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN);
1410	}
1411}
1412
1413/*
1414 * Drain the small buffer supply queue.
1415 */
1416static void
1417fatm_intr_drain_small_buffers(struct fatm_softc *sc)
1418{
1419	struct supqueue *q;
1420	int stat;
1421
1422	for (;;) {
1423		q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail);
1424
1425		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1426		stat = H_GETSTAT(q->q.statp);
1427
1428		if ((stat & FATM_STAT_COMPLETE) == 0)
1429			break;
1430		if (stat & FATM_STAT_ERROR)
1431			log(LOG_ERR, "%s: status %x\n", __func__, stat);
1432
1433		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1434		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1435
1436		NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN);
1437	}
1438}
1439
1440/*
1441 * Drain the large buffer supply queue.
1442 */
1443static void
1444fatm_intr_drain_large_buffers(struct fatm_softc *sc)
1445{
1446	struct supqueue *q;
1447	int stat;
1448
1449	for (;;) {
1450		q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail);
1451
1452		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1453		stat = H_GETSTAT(q->q.statp);
1454
1455		if ((stat & FATM_STAT_COMPLETE) == 0)
1456			break;
1457		if (stat & FATM_STAT_ERROR)
1458			log(LOG_ERR, "%s status %x\n", __func__, stat);
1459
1460		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1461		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1462
1463		NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN);
1464	}
1465}
1466
1467/*
1468 * Check the receive queue. Send any received PDU up the protocol stack
1469 * (except when there was an error or the VCI appears to be closed. In this
1470 * case discard the PDU).
1471 */
1472static void
1473fatm_intr_drain_rx(struct fatm_softc *sc)
1474{
1475	struct rxqueue *q;
1476	int stat, mlen;
1477	u_int i;
1478	uint32_t h;
1479	struct mbuf *last, *m0;
1480	struct rpd *rpd;
1481	struct rbuf *rb;
1482	u_int vci, vpi, pt;
1483	struct atm_pseudohdr aph;
1484	struct ifnet *ifp;
1485	struct card_vcc *vc;
1486
1487	for (;;) {
1488		q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail);
1489
1490		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1491		stat = H_GETSTAT(q->q.statp);
1492
1493		if ((stat & FATM_STAT_COMPLETE) == 0)
1494			break;
1495
1496		rpd = (struct rpd *)q->q.ioblk;
1497		H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE);
1498
1499		rpd->nseg = le32toh(rpd->nseg);
1500		mlen = 0;
1501		m0 = last = 0;
1502		for (i = 0; i < rpd->nseg; i++) {
1503			rb = sc->rbufs + rpd->segment[i].handle;
1504			if (m0 == NULL) {
1505				m0 = last = rb->m;
1506			} else {
1507				last->m_next = rb->m;
1508				last = rb->m;
1509			}
1510			last->m_next = NULL;
1511			if (last->m_flags & M_EXT)
1512				sc->large_cnt--;
1513			else
1514				sc->small_cnt--;
1515			bus_dmamap_sync(sc->rbuf_tag, rb->map,
1516			    BUS_DMASYNC_POSTREAD);
1517			bus_dmamap_unload(sc->rbuf_tag, rb->map);
1518			rb->m = NULL;
1519
1520			LIST_REMOVE(rb, link);
1521			LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
1522
1523			last->m_len = le32toh(rpd->segment[i].length);
1524			mlen += last->m_len;
1525		}
1526
1527		m0->m_pkthdr.len = mlen;
1528		m0->m_pkthdr.rcvif = &sc->ifatm.ifnet;
1529
1530		h = le32toh(rpd->atm_header);
1531		vpi = (h >> 20) & 0xff;
1532		vci = (h >> 4 ) & 0xffff;
1533		pt  = (h >> 1 ) & 0x7;
1534
1535		/*
1536		 * Locate the VCC this packet belongs to
1537		 */
1538		if (!VC_OK(sc, vpi, vci))
1539			vc = NULL;
1540		else if ((vc = sc->vccs[vci]) == NULL ||
1541		    !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) {
1542			sc->istats.rx_closed++;
1543			vc = NULL;
1544		}
1545
1546		DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci,
1547		    pt, mlen, vc == NULL ? "dropped" : ""));
1548
1549		if (vc == NULL) {
1550			m_freem(m0);
1551		} else {
1552			ATM_PH_FLAGS(&aph) = vc->param.flags;
1553			ATM_PH_VPI(&aph) = vpi;
1554			ATM_PH_SETVCI(&aph, vci);
1555
1556			ifp = &sc->ifatm.ifnet;
1557			ifp->if_ipackets++;
1558
1559			vc->ipackets++;
1560			vc->ibytes += m0->m_pkthdr.len;
1561
1562			atm_input(ifp, &aph, m0, vc->rxhand);
1563		}
1564
1565		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1566		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1567
1568		WRITE4(sc, q->q.card, q->q.card_ioblk);
1569		BARRIER_W(sc);
1570
1571		NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN);
1572	}
1573}
1574
1575/*
1576 * Check the transmit queue. Free the mbuf chains that we were transmitting.
1577 */
1578static void
1579fatm_intr_drain_tx(struct fatm_softc *sc)
1580{
1581	struct txqueue *q;
1582	int stat;
1583
1584	/*
1585	 * Drain tx queue
1586	 */
1587	for (;;) {
1588		q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail);
1589
1590		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1591		stat = H_GETSTAT(q->q.statp);
1592
1593		if (stat != FATM_STAT_COMPLETE &&
1594		    stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) &&
1595		    stat != FATM_STAT_ERROR)
1596			break;
1597
1598		H_SETSTAT(q->q.statp, FATM_STAT_FREE);
1599		H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1600
1601		bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE);
1602		bus_dmamap_unload(sc->tx_tag, q->map);
1603
1604		m_freem(q->m);
1605		q->m = NULL;
1606		sc->txcnt--;
1607
1608		NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN);
1609	}
1610}
1611
1612/*
1613 * Interrupt handler
1614 */
1615static void
1616fatm_intr(void *p)
1617{
1618	struct fatm_softc *sc = (struct fatm_softc *)p;
1619
1620	FATM_LOCK(sc);
1621	if (!READ4(sc, FATMO_PSR)) {
1622		FATM_UNLOCK(sc);
1623		return;
1624	}
1625	WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ);
1626
1627	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
1628		FATM_UNLOCK(sc);
1629		return;
1630	}
1631	fatm_intr_drain_cmd(sc);
1632	fatm_intr_drain_rx(sc);
1633	fatm_intr_drain_tx(sc);
1634	fatm_intr_drain_small_buffers(sc);
1635	fatm_intr_drain_large_buffers(sc);
1636	fatm_supply_small_buffers(sc);
1637	fatm_supply_large_buffers(sc);
1638
1639	FATM_UNLOCK(sc);
1640
1641	if (sc->retry_tx && _IF_QLEN(&sc->ifatm.ifnet.if_snd))
1642		(*sc->ifatm.ifnet.if_start)(&sc->ifatm.ifnet);
1643}
1644
1645/*
1646 * Get device statistics. This must be called with the softc locked.
1647 * We use a preallocated buffer, so we need to protect this buffer.
1648 * We do this by using a condition variable and a flag. If the flag is set
1649 * the buffer is in use by one thread (one thread is executing a GETSTAT
1650 * card command). In this case all other threads that are trying to get
1651 * statistics block on that condition variable. When the thread finishes
1652 * using the buffer it resets the flag and signals the condition variable. This
1653 * will wakeup the next thread that is waiting for the buffer. If the interface
1654 * is stopped the stopping function will broadcast the cv. All threads will
1655 * find that the interface has been stopped and return.
1656 *
1657 * Aquiring of the buffer is done by the fatm_getstat() function. The freeing
1658 * must be done by the caller when he has finished using the buffer.
1659 */
1660static void
1661fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q)
1662{
1663
1664	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1665	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
1666		sc->istats.get_stat_errors++;
1667		q->error = EIO;
1668	}
1669	wakeup(&sc->sadi_mem);
1670}
1671static int
1672fatm_getstat(struct fatm_softc *sc)
1673{
1674	int error;
1675	struct cmdqueue *q;
1676
1677	/*
1678	 * Wait until either the interface is stopped or we can get the
1679	 * statistics buffer
1680	 */
1681	for (;;) {
1682		if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
1683			return (EIO);
1684		if (!(sc->flags & FATM_STAT_INUSE))
1685			break;
1686		cv_wait(&sc->cv_stat, &sc->mtx);
1687	}
1688	sc->flags |= FATM_STAT_INUSE;
1689
1690	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
1691
1692	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1693	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
1694		sc->istats.cmd_queue_full++;
1695		return (EIO);
1696	}
1697	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
1698
1699	q->error = 0;
1700	q->cb = fatm_getstat_complete;
1701	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1702	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1703
1704	bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1705	    BUS_DMASYNC_PREREAD);
1706
1707	WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF,
1708	    sc->sadi_mem.paddr);
1709	BARRIER_W(sc);
1710	WRITE4(sc, q->q.card + FATMOC_OP,
1711	    FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL);
1712	BARRIER_W(sc);
1713
1714	/*
1715	 * Wait for the command to complete
1716	 */
1717	error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH,
1718	    "fatm_stat", hz);
1719
1720	switch (error) {
1721
1722	  case EWOULDBLOCK:
1723		error = EIO;
1724		break;
1725
1726	  case ERESTART:
1727		error = EINTR;
1728		break;
1729
1730	  case 0:
1731		bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map,
1732		    BUS_DMASYNC_POSTREAD);
1733		error = q->error;
1734		break;
1735	}
1736
1737	/*
1738	 * Swap statistics
1739	 */
1740	if (q->error == 0) {
1741		u_int i;
1742		uint32_t *p = (uint32_t *)sc->sadi_mem.mem;
1743
1744		for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t);
1745		    i++, p++)
1746			*p = be32toh(*p);
1747	}
1748
1749	return (error);
1750}
1751
1752/*
1753 * Create a copy of a single mbuf. It can have either internal or
1754 * external data, it may have a packet header. External data is really
1755 * copied, so the new buffer is writeable.
1756 */
1757static struct mbuf *
1758copy_mbuf(struct mbuf *m)
1759{
1760	struct mbuf *new;
1761
1762	MGET(new, M_DONTWAIT, MT_DATA);
1763	if (new == NULL)
1764		return (NULL);
1765
1766	if (m->m_flags & M_PKTHDR) {
1767		M_MOVE_PKTHDR(new, m);
1768		if (m->m_len > MHLEN) {
1769			MCLGET(new, M_TRYWAIT);
1770			if ((m->m_flags & M_EXT) == 0) {
1771				m_free(new);
1772				return (NULL);
1773			}
1774		}
1775	} else {
1776		if (m->m_len > MLEN) {
1777			MCLGET(new, M_TRYWAIT);
1778			if ((m->m_flags & M_EXT) == 0) {
1779				m_free(new);
1780				return (NULL);
1781			}
1782		}
1783	}
1784
1785	bcopy(m->m_data, new->m_data, m->m_len);
1786	new->m_len = m->m_len;
1787	new->m_flags &= ~M_RDONLY;
1788
1789	return (new);
1790}
1791
1792/*
1793 * All segments must have a four byte aligned buffer address and a four
1794 * byte aligned length. Step through an mbuf chain and check these conditions.
1795 * If the buffer address is not aligned and this is a normal mbuf, move
1796 * the data down. Else make a copy of the mbuf with aligned data.
1797 * If the buffer length is not aligned steel data from the next mbuf.
1798 * We don't need to check whether this has more than one external reference,
1799 * because steeling data doesn't change the external cluster.
1800 * If the last mbuf is not aligned, fill with zeroes.
1801 *
1802 * Return packet length (well we should have this in the packet header),
1803 * but be careful not to count the zero fill at the end.
1804 *
1805 * If fixing fails free the chain and zero the pointer.
1806 *
1807 * We assume, that aligning the virtual address also aligns the mapped bus
1808 * address.
1809 */
1810static u_int
1811fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp)
1812{
1813	struct mbuf *m = *mp, *prev = NULL, *next, *new;
1814	u_int mlen = 0, fill = 0;
1815	int first, off;
1816	u_char *d, *cp;
1817
1818	do {
1819		next = m->m_next;
1820
1821		if ((uintptr_t)mtod(m, void *) % 4 != 0 ||
1822		   (m->m_len % 4 != 0 && next)) {
1823			/*
1824			 * Needs fixing
1825			 */
1826			first = (m == *mp);
1827
1828			d = mtod(m, u_char *);
1829			if ((off = (uintptr_t)(void *)d % 4) != 0) {
1830				if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) {
1831					sc->istats.fix_addr_copy++;
1832					bcopy(d, d - off, m->m_len);
1833					m->m_data = (caddr_t)(d - off);
1834				} else {
1835					if ((new = copy_mbuf(m)) == NULL) {
1836						sc->istats.fix_addr_noext++;
1837						goto fail;
1838					}
1839					sc->istats.fix_addr_ext++;
1840					if (prev)
1841						prev->m_next = new;
1842					new->m_next = next;
1843					m_free(m);
1844					m = new;
1845				}
1846			}
1847
1848			if ((off = m->m_len % 4) != 0) {
1849				if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) {
1850					if ((new = copy_mbuf(m)) == NULL) {
1851						sc->istats.fix_len_noext++;
1852						goto fail;
1853					}
1854					sc->istats.fix_len_copy++;
1855					if (prev)
1856						prev->m_next = new;
1857					new->m_next = next;
1858					m_free(m);
1859					m = new;
1860				} else
1861					sc->istats.fix_len++;
1862				d = mtod(m, u_char *) + m->m_len;
1863				off = 4 - off;
1864				while (off) {
1865					if (next == NULL) {
1866						*d++ = 0;
1867						fill++;
1868					} else if (next->m_len == 0) {
1869						sc->istats.fix_empty++;
1870						next = m_free(next);
1871						continue;
1872					} else {
1873						cp = mtod(next, u_char *);
1874						*d++ = *cp++;
1875						next->m_len--;
1876						next->m_data = (caddr_t)cp;
1877					}
1878					off--;
1879					m->m_len++;
1880				}
1881			}
1882
1883			if (first)
1884				*mp = m;
1885		}
1886
1887		mlen += m->m_len;
1888		prev = m;
1889	} while ((m = next) != NULL);
1890
1891	return (mlen - fill);
1892
1893  fail:
1894	m_freem(*mp);
1895	*mp = NULL;
1896	return (0);
1897}
1898
1899/*
1900 * The helper function is used to load the computed physical addresses
1901 * into the transmit descriptor.
1902 */
1903static void
1904fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs,
1905    bus_size_t mapsize, int error)
1906{
1907	struct tpd *tpd = varg;
1908
1909	if (error)
1910		return;
1911
1912	KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments"));
1913
1914	tpd->spec = 0;
1915	while (nsegs--) {
1916		H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr);
1917		H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len);
1918		tpd->spec++;
1919		segs++;
1920	}
1921}
1922
1923/*
1924 * Start output.
1925 *
1926 * Note, that we update the internal statistics without the lock here.
1927 */
1928static int
1929fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen)
1930{
1931	struct txqueue *q;
1932	u_int nblks;
1933	int error, aal, nsegs;
1934	struct tpd *tpd;
1935
1936	/*
1937	 * Get a queue element.
1938	 * If there isn't one - try to drain the transmit queue
1939	 * We used to sleep here if that doesn't help, but we
1940	 * should not sleep here, because we are called with locks.
1941	 */
1942	q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head);
1943
1944	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1945	if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1946		fatm_intr_drain_tx(sc);
1947		H_SYNCSTAT_POSTREAD(sc, q->q.statp);
1948		if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) {
1949			if (sc->retry_tx) {
1950				sc->istats.tx_retry++;
1951				IF_PREPEND(&sc->ifatm.ifnet.if_snd, m);
1952				return (1);
1953			}
1954			sc->istats.tx_queue_full++;
1955			m_freem(m);
1956			return (0);
1957		}
1958		sc->istats.tx_queue_almost_full++;
1959	}
1960
1961	tpd = q->q.ioblk;
1962
1963	m->m_data += sizeof(struct atm_pseudohdr);
1964	m->m_len -= sizeof(struct atm_pseudohdr);
1965
1966	/* map the mbuf */
1967	error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m,
1968	    fatm_tpd_load, tpd, BUS_DMA_NOWAIT);
1969	if(error) {
1970		sc->ifatm.ifnet.if_oerrors++;
1971		if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", error);
1972		m_freem(m);
1973		return (0);
1974	}
1975	nsegs = tpd->spec;
1976
1977	bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE);
1978
1979	/*
1980	 * OK. Now go and do it.
1981	 */
1982	aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0;
1983
1984	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
1985	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
1986	q->m = m;
1987
1988	/*
1989	 * If the transmit queue is almost full, schedule a
1990	 * transmit interrupt so that transmit descriptors can
1991	 * be recycled.
1992	 */
1993	H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >=
1994	    (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen));
1995	H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi,
1996	    vc->param.vci, 0, 0));
1997
1998	if (vc->param.traffic == ATMIO_TRAFFIC_UBR)
1999		H_SETDESC(tpd->stream, 0);
2000	else {
2001		u_int i;
2002
2003		for (i = 0; i < RATE_TABLE_SIZE; i++)
2004			if (rate_table[i].cell_rate < vc->param.tparam.pcr)
2005				break;
2006		if (i > 0)
2007			i--;
2008		H_SETDESC(tpd->stream, rate_table[i].ratio);
2009	}
2010	H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE);
2011
2012	nblks = TDX_SEGS2BLKS(nsegs);
2013
2014	DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d",
2015	    mlen, le32toh(tpd->spec), nsegs, nblks));
2016
2017	WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks);
2018	BARRIER_W(sc);
2019
2020	sc->txcnt++;
2021	sc->ifatm.ifnet.if_opackets++;
2022	vc->obytes += m->m_pkthdr.len;
2023	vc->opackets++;
2024
2025	NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN);
2026
2027	return (0);
2028}
2029
2030static void
2031fatm_start(struct ifnet *ifp)
2032{
2033	struct atm_pseudohdr aph;
2034	struct fatm_softc *sc;
2035	struct mbuf *m;
2036	u_int mlen, vpi, vci;
2037	struct card_vcc *vc;
2038
2039	sc = (struct fatm_softc *)ifp->if_softc;
2040
2041	while (1) {
2042		IF_DEQUEUE(&ifp->if_snd, m);
2043		if (m == NULL)
2044			break;
2045
2046		/*
2047		 * Loop through the mbuf chain and compute the total length
2048		 * of the packet. Check that all data pointer are
2049		 * 4 byte aligned. If they are not, call fatm_mfix to
2050		 * fix that problem. This comes more or less from the
2051		 * en driver.
2052		 */
2053		mlen = fatm_fix_chain(sc, &m);
2054		if (m == NULL)
2055			continue;
2056
2057		if (m->m_len < sizeof(struct atm_pseudohdr) &&
2058		    (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL)
2059			continue;
2060
2061		aph = *mtod(m, struct atm_pseudohdr *);
2062		mlen -= sizeof(struct atm_pseudohdr);
2063
2064		if (mlen == 0) {
2065			m_freem(m);
2066			continue;
2067		}
2068		if (mlen > FATM_MAXPDU) {
2069			sc->istats.tx_pdu2big++;
2070			m_freem(m);
2071			continue;
2072		}
2073
2074		vci = ATM_PH_VCI(&aph);
2075		vpi = ATM_PH_VPI(&aph);
2076
2077		/*
2078		 * From here on we need the softc
2079		 */
2080		FATM_LOCK(sc);
2081		if (!(ifp->if_flags & IFF_RUNNING)) {
2082			FATM_UNLOCK(sc);
2083			m_freem(m);
2084			break;
2085		}
2086		if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL ||
2087		    !(vc->vflags & FATM_VCC_OPEN)) {
2088			FATM_UNLOCK(sc);
2089			m_freem(m);
2090			continue;
2091		}
2092		if (fatm_tx(sc, m, vc, mlen)) {
2093			FATM_UNLOCK(sc);
2094			break;
2095		}
2096		FATM_UNLOCK(sc);
2097	}
2098}
2099
2100/*
2101 * VCC managment
2102 *
2103 * This may seem complicated. The reason for this is, that we need an
2104 * asynchronuous open/close for the NATM VCCs because our ioctl handler
2105 * is called with the radix node head of the routing table locked. Therefor
2106 * we cannot sleep there and wait for the open/close to succeed. For this
2107 * reason we just initiate the operation from the ioctl.
2108 */
2109
2110/*
2111 * Command the card to open/close a VC.
2112 * Return the queue entry for waiting if we are succesful.
2113 */
2114static struct cmdqueue *
2115fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd,
2116    u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *))
2117{
2118	struct cmdqueue *q;
2119
2120	q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head);
2121
2122	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2123	if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) {
2124		sc->istats.cmd_queue_full++;
2125		return (NULL);
2126	}
2127	NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
2128
2129	q->error = 0;
2130	q->cb = func;
2131	H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
2132	H_SYNCSTAT_PREWRITE(sc, q->q.statp);
2133
2134	WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci));
2135	BARRIER_W(sc);
2136	WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu);
2137	BARRIER_W(sc);
2138	WRITE4(sc, q->q.card + FATMOC_OP, cmd);
2139	BARRIER_W(sc);
2140
2141	return (q);
2142}
2143
2144/*
2145 * The VC has been opened/closed and somebody has been waiting for this.
2146 * Wake him up.
2147 */
2148static void
2149fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q)
2150{
2151
2152	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2153	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2154		sc->istats.get_stat_errors++;
2155		q->error = EIO;
2156	}
2157	wakeup(q);
2158}
2159
2160/*
2161 * Open complete
2162 */
2163static void
2164fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc)
2165{
2166	vc->vflags &= ~FATM_VCC_TRY_OPEN;
2167	vc->vflags |= FATM_VCC_OPEN;
2168
2169	if (vc->vflags & FATM_VCC_REOPEN) {
2170		vc->vflags &= ~FATM_VCC_REOPEN;
2171		return;
2172	}
2173
2174	/* inform management if this is not an NG
2175	 * VCC or it's an NG PVC. */
2176	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2177	    (vc->param.flags & ATMIO_FLAG_PVC))
2178		ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 1);
2179}
2180
2181/*
2182 * The VC that we have tried to open asynchronuosly has been opened.
2183 */
2184static void
2185fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q)
2186{
2187	u_int vci;
2188	struct card_vcc *vc;
2189
2190	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2191	vc = sc->vccs[vci];
2192	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2193	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2194		sc->istats.get_stat_errors++;
2195		sc->vccs[vci] = NULL;
2196		uma_zfree(sc->vcc_zone, vc);
2197		if_printf(&sc->ifatm.ifnet, "opening VCI %u failed\n", vci);
2198		return;
2199	}
2200	fatm_open_finish(sc, vc);
2201}
2202
2203/*
2204 * Wait on the queue entry until the VCC is opened/closed.
2205 */
2206static int
2207fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q)
2208{
2209	int error;
2210
2211	/*
2212	 * Wait for the command to complete
2213	 */
2214	error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz);
2215
2216	if (error != 0)
2217		return (error);
2218	return (q->error);
2219}
2220
2221/*
2222 * Start to open a VCC. This just initiates the operation.
2223 */
2224static int
2225fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op)
2226{
2227	int error;
2228	struct card_vcc *vc;
2229
2230	/*
2231	 * Check parameters
2232	 */
2233	if ((op->param.flags & ATMIO_FLAG_NOTX) &&
2234	    (op->param.flags & ATMIO_FLAG_NORX))
2235		return (EINVAL);
2236
2237	if (!VC_OK(sc, op->param.vpi, op->param.vci))
2238		return (EINVAL);
2239	if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5)
2240		return (EINVAL);
2241
2242	vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO);
2243	if (vc == NULL)
2244		return (ENOMEM);
2245
2246	error = 0;
2247
2248	FATM_LOCK(sc);
2249	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2250		error = EIO;
2251		goto done;
2252	}
2253	if (sc->vccs[op->param.vci] != NULL) {
2254		error = EBUSY;
2255		goto done;
2256	}
2257	vc->param = op->param;
2258	vc->rxhand = op->rxhand;
2259
2260	switch (op->param.traffic) {
2261
2262	  case ATMIO_TRAFFIC_UBR:
2263		break;
2264
2265	  case ATMIO_TRAFFIC_CBR:
2266		if (op->param.tparam.pcr == 0 ||
2267		    op->param.tparam.pcr > sc->ifatm.mib.pcr) {
2268			error = EINVAL;
2269			goto done;
2270		}
2271		break;
2272
2273	  default:
2274		error = EINVAL;
2275		goto done;
2276	}
2277	vc->ibytes = vc->obytes = 0;
2278	vc->ipackets = vc->opackets = 0;
2279
2280	vc->vflags = FATM_VCC_TRY_OPEN;
2281	sc->vccs[op->param.vci] = vc;
2282	sc->open_vccs++;
2283
2284	error = fatm_load_vc(sc, vc);
2285	if (error != 0) {
2286		sc->vccs[op->param.vci] = NULL;
2287		sc->open_vccs--;
2288		goto done;
2289	}
2290
2291	/* don't free below */
2292	vc = NULL;
2293
2294  done:
2295	FATM_UNLOCK(sc);
2296	if (vc != NULL)
2297		uma_zfree(sc->vcc_zone, vc);
2298	return (error);
2299}
2300
2301/*
2302 * Try to initialize the given VC
2303 */
2304static int
2305fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc)
2306{
2307	uint32_t cmd;
2308	struct cmdqueue *q;
2309	int error;
2310
2311	/* Command and buffer strategy */
2312	cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16);
2313	if (vc->param.aal == ATMIO_AAL_0)
2314		cmd |= (0 << 8);
2315	else
2316		cmd |= (5 << 8);
2317
2318	q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1,
2319	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2320	    fatm_open_complete : fatm_cmd_complete);
2321	if (q == NULL)
2322		return (EIO);
2323
2324	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2325		error = fatm_waitvcc(sc, q);
2326		if (error != 0)
2327			return (error);
2328		fatm_open_finish(sc, vc);
2329	}
2330	return (0);
2331}
2332
2333/*
2334 * Finish close
2335 */
2336static void
2337fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc)
2338{
2339	/* inform management of this is not an NG
2340	 * VCC or it's an NG PVC. */
2341	if (!(vc->param.flags & ATMIO_FLAG_NG) ||
2342	    (vc->param.flags & ATMIO_FLAG_PVC))
2343		ATMEV_SEND_VCC_CHANGED(&sc->ifatm, 0, vc->param.vci, 0);
2344
2345	sc->vccs[vc->param.vci] = NULL;
2346	sc->open_vccs--;
2347
2348	uma_zfree(sc->vcc_zone, vc);
2349}
2350
2351/*
2352 * The VC has been closed.
2353 */
2354static void
2355fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q)
2356{
2357	u_int vci;
2358	struct card_vcc *vc;
2359
2360	vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC));
2361	vc = sc->vccs[vci];
2362	H_SYNCSTAT_POSTREAD(sc, q->q.statp);
2363	if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) {
2364		sc->istats.get_stat_errors++;
2365		/* keep the VCC in that state */
2366		if_printf(&sc->ifatm.ifnet, "closing VCI %u failed\n", vci);
2367		return;
2368	}
2369
2370	fatm_close_finish(sc, vc);
2371}
2372
2373/*
2374 * Initiate closing a VCC
2375 */
2376static int
2377fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl)
2378{
2379	int error;
2380	struct cmdqueue *q;
2381	struct card_vcc *vc;
2382
2383	if (!VC_OK(sc, cl->vpi, cl->vci))
2384		return (EINVAL);
2385
2386	error = 0;
2387
2388	FATM_LOCK(sc);
2389	if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) {
2390		error = EIO;
2391		goto done;
2392	}
2393	vc = sc->vccs[cl->vci];
2394	if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) {
2395		error = ENOENT;
2396		goto done;
2397	}
2398
2399	q = fatm_start_vcc(sc, cl->vpi, cl->vci,
2400	    FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1,
2401	    (vc->param.flags & ATMIO_FLAG_ASYNC) ?
2402	    fatm_close_complete : fatm_cmd_complete);
2403	if (q == NULL) {
2404		error = EIO;
2405		goto done;
2406	}
2407
2408	vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN);
2409	vc->vflags |= FATM_VCC_TRY_CLOSE;
2410
2411	if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) {
2412		error = fatm_waitvcc(sc, q);
2413		if (error != 0)
2414			goto done;
2415
2416		fatm_close_finish(sc, vc);
2417	}
2418
2419  done:
2420	FATM_UNLOCK(sc);
2421	return (error);
2422}
2423
2424/*
2425 * IOCTL handler
2426 */
2427static int
2428fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg)
2429{
2430	int error;
2431	struct fatm_softc *sc = ifp->if_softc;
2432	struct ifaddr *ifa = (struct ifaddr *)arg;
2433	struct ifreq *ifr = (struct ifreq *)arg;
2434	struct atmio_closevcc *cl = (struct atmio_closevcc *)arg;
2435	struct atmio_openvcc *op = (struct atmio_openvcc *)arg;
2436	struct atmio_vcctable *vtab;
2437
2438	error = 0;
2439	switch (cmd) {
2440
2441	  case SIOCATMOPENVCC:		/* kernel internal use */
2442		error = fatm_open_vcc(sc, op);
2443		break;
2444
2445	  case SIOCATMCLOSEVCC:		/* kernel internal use */
2446		error = fatm_close_vcc(sc, cl);
2447		break;
2448
2449	  case SIOCSIFADDR:
2450		FATM_LOCK(sc);
2451		ifp->if_flags |= IFF_UP;
2452		if (!(ifp->if_flags & IFF_RUNNING))
2453			fatm_init_locked(sc);
2454		switch (ifa->ifa_addr->sa_family) {
2455#ifdef INET
2456		  case AF_INET:
2457		  case AF_INET6:
2458			ifa->ifa_rtrequest = atm_rtrequest;
2459			break;
2460#endif
2461		  default:
2462			break;
2463		}
2464		FATM_UNLOCK(sc);
2465		break;
2466
2467	  case SIOCSIFFLAGS:
2468		FATM_LOCK(sc);
2469		if (ifp->if_flags & IFF_UP) {
2470			if (!(ifp->if_flags & IFF_RUNNING)) {
2471				fatm_init_locked(sc);
2472			}
2473		} else {
2474			if (ifp->if_flags & IFF_RUNNING) {
2475				fatm_stop(sc);
2476			}
2477		}
2478		FATM_UNLOCK(sc);
2479		break;
2480
2481	  case SIOCGIFMEDIA:
2482	  case SIOCSIFMEDIA:
2483		if (ifp->if_flags & IFF_RUNNING)
2484			error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
2485		else
2486			error = EINVAL;
2487		break;
2488
2489	  case SIOCATMGVCCS:
2490		/* return vcc table */
2491		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2492		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1);
2493		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
2494		    vtab->count * sizeof(vtab->vccs[0]));
2495		free(vtab, M_DEVBUF);
2496		break;
2497
2498	  case SIOCATMGETVCCS:	/* internal netgraph use */
2499		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
2500		    FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0);
2501		if (vtab == NULL) {
2502			error = ENOMEM;
2503			break;
2504		}
2505		*(void **)arg = vtab;
2506		break;
2507
2508	  default:
2509		DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg));
2510		error = EINVAL;
2511		break;
2512	}
2513
2514	return (error);
2515}
2516
2517/*
2518 * Detach from the interface and free all resources allocated during
2519 * initialisation and later.
2520 */
2521static int
2522fatm_detach(device_t dev)
2523{
2524	u_int i;
2525	struct rbuf *rb;
2526	struct fatm_softc *sc;
2527	struct txqueue *tx;
2528
2529	sc = (struct fatm_softc *)device_get_softc(dev);
2530
2531	if (device_is_alive(dev)) {
2532		FATM_LOCK(sc);
2533		fatm_stop(sc);
2534		utopia_detach(&sc->utopia);
2535		FATM_UNLOCK(sc);
2536		atm_ifdetach(&sc->ifatm.ifnet);		/* XXX race */
2537	}
2538
2539	if (sc->ih != NULL)
2540		bus_teardown_intr(dev, sc->irqres, sc->ih);
2541
2542	while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) {
2543		if_printf(&sc->ifatm.ifnet, "rbuf %p still in use!\n", rb);
2544		bus_dmamap_unload(sc->rbuf_tag, rb->map);
2545		m_freem(rb->m);
2546		LIST_REMOVE(rb, link);
2547		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2548	}
2549
2550	if (sc->txqueue.chunk != NULL) {
2551		for (i = 0; i < FATM_TX_QLEN; i++) {
2552			tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
2553			bus_dmamap_destroy(sc->tx_tag, tx->map);
2554		}
2555	}
2556
2557	while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) {
2558		bus_dmamap_destroy(sc->rbuf_tag, rb->map);
2559		LIST_REMOVE(rb, link);
2560	}
2561
2562	if (sc->rbufs != NULL)
2563		free(sc->rbufs, M_DEVBUF);
2564	if (sc->vccs != NULL) {
2565		for (i = 0; i < FORE_MAX_VCC + 1; i++)
2566			if (sc->vccs[i] != NULL) {
2567				uma_zfree(sc->vcc_zone, sc->vccs[i]);
2568				sc->vccs[i] = NULL;
2569			}
2570		free(sc->vccs, M_DEVBUF);
2571	}
2572	if (sc->vcc_zone != NULL)
2573		uma_zdestroy(sc->vcc_zone);
2574
2575	if (sc->l1queue.chunk != NULL)
2576		free(sc->l1queue.chunk, M_DEVBUF);
2577	if (sc->s1queue.chunk != NULL)
2578		free(sc->s1queue.chunk, M_DEVBUF);
2579	if (sc->rxqueue.chunk != NULL)
2580		free(sc->rxqueue.chunk, M_DEVBUF);
2581	if (sc->txqueue.chunk != NULL)
2582		free(sc->txqueue.chunk, M_DEVBUF);
2583	if (sc->cmdqueue.chunk != NULL)
2584		free(sc->cmdqueue.chunk, M_DEVBUF);
2585
2586	destroy_dma_memory(&sc->reg_mem);
2587	destroy_dma_memory(&sc->sadi_mem);
2588	destroy_dma_memory(&sc->prom_mem);
2589#ifdef TEST_DMA_SYNC
2590	destroy_dma_memoryX(&sc->s1q_mem);
2591	destroy_dma_memoryX(&sc->l1q_mem);
2592	destroy_dma_memoryX(&sc->rxq_mem);
2593	destroy_dma_memoryX(&sc->txq_mem);
2594	destroy_dma_memoryX(&sc->stat_mem);
2595#endif
2596
2597	if (sc->tx_tag != NULL)
2598		if (bus_dma_tag_destroy(sc->tx_tag))
2599			printf("tx DMA tag busy!\n");
2600
2601	if (sc->rbuf_tag != NULL)
2602		if (bus_dma_tag_destroy(sc->rbuf_tag))
2603			printf("rbuf DMA tag busy!\n");
2604
2605	if (sc->parent_dmat != NULL)
2606		if (bus_dma_tag_destroy(sc->parent_dmat))
2607			printf("parent DMA tag busy!\n");
2608
2609	if (sc->irqres != NULL)
2610		bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres);
2611
2612	if (sc->memres != NULL)
2613		bus_release_resource(dev, SYS_RES_MEMORY,
2614		    sc->memid, sc->memres);
2615
2616	(void)sysctl_ctx_free(&sc->sysctl_ctx);
2617
2618	cv_destroy(&sc->cv_stat);
2619	cv_destroy(&sc->cv_regs);
2620
2621	mtx_destroy(&sc->mtx);
2622
2623	return (0);
2624}
2625
2626/*
2627 * Sysctl handler
2628 */
2629static int
2630fatm_sysctl_istats(SYSCTL_HANDLER_ARGS)
2631{
2632	struct fatm_softc *sc = arg1;
2633	u_long *ret;
2634	int error;
2635
2636	ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK);
2637
2638	FATM_LOCK(sc);
2639	bcopy(&sc->istats, ret, sizeof(sc->istats));
2640	FATM_UNLOCK(sc);
2641
2642	error = SYSCTL_OUT(req, ret, sizeof(sc->istats));
2643	free(ret, M_TEMP);
2644
2645	return (error);
2646}
2647
2648/*
2649 * Sysctl handler for card statistics
2650 * This is disable because it destroys the PHY statistics.
2651 */
2652static int
2653fatm_sysctl_stats(SYSCTL_HANDLER_ARGS)
2654{
2655	struct fatm_softc *sc = arg1;
2656	int error;
2657	const struct fatm_stats *s;
2658	u_long *ret;
2659	u_int i;
2660
2661	ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK);
2662
2663	FATM_LOCK(sc);
2664
2665	if ((error = fatm_getstat(sc)) == 0) {
2666		s = sc->sadi_mem.mem;
2667		i = 0;
2668		ret[i++] = s->phy_4b5b.crc_header_errors;
2669		ret[i++] = s->phy_4b5b.framing_errors;
2670		ret[i++] = s->phy_oc3.section_bip8_errors;
2671		ret[i++] = s->phy_oc3.path_bip8_errors;
2672		ret[i++] = s->phy_oc3.line_bip24_errors;
2673		ret[i++] = s->phy_oc3.line_febe_errors;
2674		ret[i++] = s->phy_oc3.path_febe_errors;
2675		ret[i++] = s->phy_oc3.corr_hcs_errors;
2676		ret[i++] = s->phy_oc3.ucorr_hcs_errors;
2677		ret[i++] = s->atm.cells_transmitted;
2678		ret[i++] = s->atm.cells_received;
2679		ret[i++] = s->atm.vpi_bad_range;
2680		ret[i++] = s->atm.vpi_no_conn;
2681		ret[i++] = s->atm.vci_bad_range;
2682		ret[i++] = s->atm.vci_no_conn;
2683		ret[i++] = s->aal0.cells_transmitted;
2684		ret[i++] = s->aal0.cells_received;
2685		ret[i++] = s->aal0.cells_dropped;
2686		ret[i++] = s->aal4.cells_transmitted;
2687		ret[i++] = s->aal4.cells_received;
2688		ret[i++] = s->aal4.cells_crc_errors;
2689		ret[i++] = s->aal4.cels_protocol_errors;
2690		ret[i++] = s->aal4.cells_dropped;
2691		ret[i++] = s->aal4.cspdus_transmitted;
2692		ret[i++] = s->aal4.cspdus_received;
2693		ret[i++] = s->aal4.cspdus_protocol_errors;
2694		ret[i++] = s->aal4.cspdus_dropped;
2695		ret[i++] = s->aal5.cells_transmitted;
2696		ret[i++] = s->aal5.cells_received;
2697		ret[i++] = s->aal5.congestion_experienced;
2698		ret[i++] = s->aal5.cells_dropped;
2699		ret[i++] = s->aal5.cspdus_transmitted;
2700		ret[i++] = s->aal5.cspdus_received;
2701		ret[i++] = s->aal5.cspdus_crc_errors;
2702		ret[i++] = s->aal5.cspdus_protocol_errors;
2703		ret[i++] = s->aal5.cspdus_dropped;
2704		ret[i++] = s->aux.small_b1_failed;
2705		ret[i++] = s->aux.large_b1_failed;
2706		ret[i++] = s->aux.small_b2_failed;
2707		ret[i++] = s->aux.large_b2_failed;
2708		ret[i++] = s->aux.rpd_alloc_failed;
2709		ret[i++] = s->aux.receive_carrier;
2710	}
2711	/* declare the buffer free */
2712	sc->flags &= ~FATM_STAT_INUSE;
2713	cv_signal(&sc->cv_stat);
2714
2715	FATM_UNLOCK(sc);
2716
2717	if (error == 0)
2718		error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS);
2719	free(ret, M_TEMP);
2720
2721	return (error);
2722}
2723
2724#define MAXDMASEGS 32		/* maximum number of receive descriptors */
2725
2726/*
2727 * Attach to the device.
2728 *
2729 * We assume, that there is a global lock (Giant in this case) that protects
2730 * multiple threads from entering this function. This makes sense, doesn't it?
2731 */
2732static int
2733fatm_attach(device_t dev)
2734{
2735	struct ifnet *ifp;
2736	struct fatm_softc *sc;
2737	int unit;
2738	uint16_t cfg;
2739	int error = 0;
2740	struct rbuf *rb;
2741	u_int i;
2742	struct txqueue *tx;
2743
2744	sc = device_get_softc(dev);
2745	unit = device_get_unit(dev);
2746
2747	sc->ifatm.mib.device = ATM_DEVICE_PCA200E;
2748	sc->ifatm.mib.serial = 0;
2749	sc->ifatm.mib.hw_version = 0;
2750	sc->ifatm.mib.sw_version = 0;
2751	sc->ifatm.mib.vpi_bits = 0;
2752	sc->ifatm.mib.vci_bits = FORE_VCIBITS;
2753	sc->ifatm.mib.max_vpcs = 0;
2754	sc->ifatm.mib.max_vccs = FORE_MAX_VCC;
2755	sc->ifatm.mib.media = IFM_ATM_UNKNOWN;
2756	sc->ifatm.phy = &sc->utopia;
2757
2758	LIST_INIT(&sc->rbuf_free);
2759	LIST_INIT(&sc->rbuf_used);
2760
2761	/*
2762	 * Initialize mutex and condition variables.
2763	 */
2764	mtx_init(&sc->mtx, device_get_nameunit(dev),
2765	    MTX_NETWORK_LOCK, MTX_DEF);
2766
2767	cv_init(&sc->cv_stat, "fatm_stat");
2768	cv_init(&sc->cv_regs, "fatm_regs");
2769
2770	sysctl_ctx_init(&sc->sysctl_ctx);
2771
2772	/*
2773	 * Make the sysctl tree
2774	 */
2775	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2776	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2777	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
2778		goto fail;
2779
2780	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2781	    OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats,
2782	    "LU", "internal statistics") == NULL)
2783		goto fail;
2784
2785	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2786	    OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats,
2787	    "LU", "card statistics") == NULL)
2788		goto fail;
2789
2790	if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2791	    OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0,
2792	    "retry flag") == NULL)
2793		goto fail;
2794
2795#ifdef FATM_DEBUG
2796	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2797	    OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags")
2798	    == NULL)
2799		goto fail;
2800	sc->debug = FATM_DEBUG;
2801#endif
2802
2803	/*
2804	 * Network subsystem stuff
2805	 */
2806	ifp = &sc->ifatm.ifnet;
2807	ifp->if_softc = sc;
2808	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2809	ifp->if_flags = IFF_SIMPLEX;
2810	ifp->if_ioctl = fatm_ioctl;
2811	ifp->if_start = fatm_start;
2812	ifp->if_watchdog = fatm_watchdog;
2813	ifp->if_init = fatm_init;
2814	ifp->if_linkmib = &sc->ifatm.mib;
2815	ifp->if_linkmiblen = sizeof(sc->ifatm.mib);
2816
2817	/*
2818	 * Enable memory and bustmaster
2819	 */
2820	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2821	cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
2822	pci_write_config(dev, PCIR_COMMAND, cfg, 2);
2823
2824	/*
2825	 * Map memory
2826	 */
2827	cfg = pci_read_config(dev, PCIR_COMMAND, 2);
2828	if (!(cfg & PCIM_CMD_MEMEN)) {
2829		if_printf(ifp, "failed to enable memory mapping\n");
2830		error = ENXIO;
2831		goto fail;
2832	}
2833	sc->memid = 0x10;
2834	sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
2835	    RF_ACTIVE);
2836	if (sc->memres == NULL) {
2837		if_printf(ifp, "could not map memory\n");
2838		error = ENXIO;
2839		goto fail;
2840	}
2841	sc->memh = rman_get_bushandle(sc->memres);
2842	sc->memt = rman_get_bustag(sc->memres);
2843
2844	/*
2845	 * Convert endianess of slave access
2846	 */
2847	cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1);
2848	cfg |= FATM_PCIM_SWAB;
2849	pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1);
2850
2851	/*
2852	 * Allocate interrupt (activate at the end)
2853	 */
2854	sc->irqid = 0;
2855	sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
2856	    RF_SHAREABLE | RF_ACTIVE);
2857	if (sc->irqres == NULL) {
2858		if_printf(ifp, "could not allocate irq\n");
2859		error = ENXIO;
2860		goto fail;
2861	}
2862
2863	/*
2864	 * Allocate the parent DMA tag. This is used simply to hold overall
2865	 * restrictions for the controller (and PCI bus) and is never used
2866	 * to do anything.
2867	 */
2868	if (bus_dma_tag_create(NULL, 1, 0,
2869	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2870	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS,
2871	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
2872	    &sc->parent_dmat)) {
2873		if_printf(ifp, "could not allocate parent DMA tag\n");
2874		error = ENOMEM;
2875		goto fail;
2876	}
2877
2878	/*
2879	 * Allocate the receive buffer DMA tag. This tag must map a maximum of
2880	 * a mbuf cluster.
2881	 */
2882	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2883	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2884	    NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
2885	    NULL, NULL, &sc->rbuf_tag)) {
2886		if_printf(ifp, "could not allocate rbuf DMA tag\n");
2887		error = ENOMEM;
2888		goto fail;
2889	}
2890
2891	/*
2892	 * Allocate the transmission DMA tag. Must add 1, because
2893	 * rounded up PDU will be 65536 bytes long.
2894	 */
2895	if (bus_dma_tag_create(sc->parent_dmat, 1, 0,
2896	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2897	    NULL, NULL,
2898	    FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0,
2899	    NULL, NULL, &sc->tx_tag)) {
2900		if_printf(ifp, "could not allocate tx DMA tag\n");
2901		error = ENOMEM;
2902		goto fail;
2903	}
2904
2905	/*
2906	 * Allocate DMAable memory.
2907	 */
2908	sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN
2909	    + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN);
2910	sc->stat_mem.align = 4;
2911
2912	sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE;
2913	sc->txq_mem.align = 32;
2914
2915	sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE;
2916	sc->rxq_mem.align = 32;
2917
2918	sc->s1q_mem.size = SMALL_SUPPLY_QLEN *
2919	    BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE);
2920	sc->s1q_mem.align = 32;
2921
2922	sc->l1q_mem.size = LARGE_SUPPLY_QLEN *
2923	    BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE);
2924	sc->l1q_mem.align = 32;
2925
2926#ifdef TEST_DMA_SYNC
2927	if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 ||
2928	    (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 ||
2929	    (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2930	    (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2931	    (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0)
2932		goto fail;
2933#else
2934	if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 ||
2935	    (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 ||
2936	    (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 ||
2937	    (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 ||
2938	    (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0)
2939		goto fail;
2940#endif
2941
2942	sc->prom_mem.size = sizeof(struct prom);
2943	sc->prom_mem.align = 32;
2944	if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0)
2945		goto fail;
2946
2947	sc->sadi_mem.size = sizeof(struct fatm_stats);
2948	sc->sadi_mem.align = 32;
2949	if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0)
2950		goto fail;
2951
2952	sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS;
2953	sc->reg_mem.align = 32;
2954	if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0)
2955		goto fail;
2956
2957	/*
2958	 * Allocate queues
2959	 */
2960	sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue),
2961	    M_DEVBUF, M_ZERO | M_WAITOK);
2962	sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue),
2963	    M_DEVBUF, M_ZERO | M_WAITOK);
2964	sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue),
2965	    M_DEVBUF, M_ZERO | M_WAITOK);
2966	sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue),
2967	    M_DEVBUF, M_ZERO | M_WAITOK);
2968	sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue),
2969	    M_DEVBUF, M_ZERO | M_WAITOK);
2970
2971	sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]),
2972	    M_DEVBUF, M_ZERO | M_WAITOK);
2973	sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc),
2974	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2975	if (sc->vcc_zone == NULL) {
2976		error = ENOMEM;
2977		goto fail;
2978	}
2979
2980	/*
2981	 * Allocate memory for the receive buffer headers. The total number
2982	 * of headers should probably also include the maximum number of
2983	 * buffers on the receive queue.
2984	 */
2985	sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE;
2986	sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf),
2987	    M_DEVBUF, M_ZERO | M_WAITOK);
2988
2989	/*
2990	 * Put all rbuf headers on the free list and create DMA maps.
2991	 */
2992	for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) {
2993		if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) {
2994			if_printf(&sc->ifatm.ifnet, "creating rx map: %d\n",
2995			    error);
2996			goto fail;
2997		}
2998		LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
2999	}
3000
3001	/*
3002	 * Create dma maps for transmission. In case of an error, free the
3003	 * allocated DMA maps, because on some architectures maps are NULL
3004	 * and we cannot distinguish between a failure and a NULL map in
3005	 * the detach routine.
3006	 */
3007	for (i = 0; i < FATM_TX_QLEN; i++) {
3008		tx = GET_QUEUE(sc->txqueue, struct txqueue, i);
3009		if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) {
3010			if_printf(&sc->ifatm.ifnet, "creating tx map: %d\n",
3011			    error);
3012			while (i > 0) {
3013				tx = GET_QUEUE(sc->txqueue, struct txqueue,
3014				    i - 1);
3015				bus_dmamap_destroy(sc->tx_tag, tx->map);
3016				i--;
3017			}
3018			goto fail;
3019		}
3020	}
3021
3022	utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx,
3023	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
3024	    &fatm_utopia_methods);
3025	sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER;
3026
3027	/*
3028	 * Attach the interface
3029	 */
3030	atm_ifattach(ifp);
3031	ifp->if_snd.ifq_maxlen = 512;
3032
3033	error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET,
3034	    fatm_intr, sc, &sc->ih);
3035	if (error) {
3036		if_printf(ifp, "couldn't setup irq\n");
3037		goto fail;
3038	}
3039
3040  fail:
3041	if (error)
3042		fatm_detach(dev);
3043
3044	return (error);
3045}
3046
3047#if defined(FATM_DEBUG) && 0
3048static void
3049dump_s1_queue(struct fatm_softc *sc)
3050{
3051	int i;
3052	struct supqueue *q;
3053
3054	for(i = 0; i < SMALL_SUPPLY_QLEN; i++) {
3055		q = GET_QUEUE(sc->s1queue, struct supqueue, i);
3056		printf("%2d: card=%x(%x,%x) stat=%x\n", i,
3057		    q->q.card,
3058		    READ4(sc, q->q.card),
3059		    READ4(sc, q->q.card + 4),
3060		    *q->q.statp);
3061	}
3062}
3063#endif
3064
3065/*
3066 * Driver infrastructure.
3067 */
3068static device_method_t fatm_methods[] = {
3069	DEVMETHOD(device_probe,		fatm_probe),
3070	DEVMETHOD(device_attach,	fatm_attach),
3071	DEVMETHOD(device_detach,	fatm_detach),
3072	{ 0, 0 }
3073};
3074static driver_t fatm_driver = {
3075	"fatm",
3076	fatm_methods,
3077	sizeof(struct fatm_softc),
3078};
3079
3080DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0);
3081