1/*	$NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $	*/
2/*	(sync'd to midway.c 1.68)	*/
3
4/*-
5 * Copyright (c) 1996 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Charles D. Cranor and
19 *	Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37/*
38 *
39 * m i d w a y . c   e n i 1 5 5   d r i v e r
40 *
41 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
42 * started: spring, 1996 (written from scratch).
43 *
44 * notes from the author:
45 *   Extra special thanks go to Werner Almesberger, EPFL LRC.   Werner's
46 *   ENI driver was especially useful in figuring out how this card works.
47 *   I would also like to thank Werner for promptly answering email and being
48 *   generally helpful.
49 */
50
51#define	EN_DIAG
52#define EN_DDBHOOK	1	/* compile in ddb functions */
53
54/*
55 * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
56 * appears to be broken.   it works just fine if there is no load... however
57 * when the card is loaded the data get corrupted.   to see this, one only
58 * has to use "telnet" over ATM.   do the following command in "telnet":
59 * 	cat /usr/share/misc/termcap
60 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
61 * use of the byte aligner).   watch "netstat -s" for checksum errors.
62 *
63 * I further tested this by adding a function that compared the transmit
64 * data on the card's SRAM with the data in the mbuf chain _after_ the
65 * "transmit DMA complete" interrupt.   using the "telnet" test I got data
66 * mismatches where the byte-aligned data should have been.   using ddb
67 * and en_dumpmem() I verified that the DTQs fed into the card were
68 * absolutely correct.   thus, we are forced to concluded that the ENI
69 * hardware is buggy.   note that the Adaptec version of the card works
70 * just fine with byte DMA.
71 *
72 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
73 * card.
74 */
75
76#if defined(DIAGNOSTIC) && !defined(EN_DIAG)
77#define EN_DIAG			/* link in with master DIAG option */
78#endif
79
80#define EN_COUNT(X) (X)++
81
82#ifdef EN_DEBUG
83
84#undef	EN_DDBHOOK
85#define	EN_DDBHOOK	1
86
87/*
88 * This macro removes almost all the EN_DEBUG conditionals in the code that make
89 * to code a good deal less readable.
90 */
91#define DBG(SC, FL, PRINT) do {						\
92	if ((SC)->debug & DBG_##FL) {					\
93		device_printf((SC)->dev, "%s: "#FL": ", __func__);	\
94		printf PRINT;						\
95		printf("\n");						\
96	}								\
97    } while (0)
98
99enum {
100	DBG_INIT	= 0x0001,	/* debug attach/detach */
101	DBG_TX		= 0x0002,	/* debug transmitting */
102	DBG_SERV	= 0x0004,	/* debug service interrupts */
103	DBG_IOCTL	= 0x0008,	/* debug ioctls */
104	DBG_VC		= 0x0010,	/* debug VC handling */
105	DBG_INTR	= 0x0020,	/* debug interrupts */
106	DBG_DMA		= 0x0040,	/* debug DMA probing */
107	DBG_IPACKETS	= 0x0080,	/* print input packets */
108	DBG_REG		= 0x0100,	/* print all register access */
109	DBG_LOCK	= 0x0200,	/* debug locking */
110};
111
112#else /* EN_DEBUG */
113
114#define DBG(SC, FL, PRINT) do { } while (0)
115
116#endif /* EN_DEBUG */
117
118#include "opt_inet.h"
119#include "opt_natm.h"
120#include "opt_ddb.h"
121
122#ifdef DDB
123#undef	EN_DDBHOOK
124#define	EN_DDBHOOK	1
125#endif
126
127#include <sys/param.h>
128#include <sys/systm.h>
129#include <sys/queue.h>
130#include <sys/sockio.h>
131#include <sys/socket.h>
132#include <sys/mbuf.h>
133#include <sys/endian.h>
134#include <sys/stdint.h>
135#include <sys/lock.h>
136#include <sys/mutex.h>
137#include <sys/condvar.h>
138#include <vm/uma.h>
139
140#include <net/if.h>
141#include <net/if_media.h>
142#include <net/if_atm.h>
143
144#if defined(NATM) || defined(INET) || defined(INET6)
145#include <netinet/in.h>
146#if defined(INET) || defined(INET6)
147#include <netinet/if_atm.h>
148#endif
149#endif
150
151#ifdef NATM
152#include <netnatm/natm.h>
153#endif
154
155#include <sys/bus.h>
156#include <machine/bus.h>
157#include <sys/rman.h>
158#include <sys/module.h>
159#include <sys/sysctl.h>
160#include <sys/malloc.h>
161#include <machine/resource.h>
162#include <dev/utopia/utopia.h>
163#include <dev/en/midwayreg.h>
164#include <dev/en/midwayvar.h>
165
166#include <net/bpf.h>
167
168/*
169 * params
170 */
171#ifndef EN_TXHIWAT
172#define EN_TXHIWAT	(64 * 1024)	/* max 64 KB waiting to be DMAd out */
173#endif
174
175SYSCTL_DECL(_hw_atm);
176
177/*
178 * dma tables
179 *
180 * The plan is indexed by the number of words to transfer.
181 * The maximum index is 15 for 60 words.
182 */
183struct en_dmatab {
184	uint8_t bcode;		/* code */
185	uint8_t divshift;	/* byte divisor */
186};
187
188static const struct en_dmatab en_dmaplan[] = {
189  { 0, 0 },		/* 0 */		{ MIDDMA_WORD, 2},	/* 1 */
190  { MIDDMA_2WORD, 3},	/* 2 */		{ MIDDMA_WORD, 2},	/* 3 */
191  { MIDDMA_4WORD, 4},	/* 4 */		{ MIDDMA_WORD, 2},	/* 5 */
192  { MIDDMA_2WORD, 3},	/* 6 */		{ MIDDMA_WORD, 2},	/* 7 */
193  { MIDDMA_8WORD, 5},   /* 8 */		{ MIDDMA_WORD, 2},	/* 9 */
194  { MIDDMA_2WORD, 3},	/* 10 */	{ MIDDMA_WORD, 2},	/* 11 */
195  { MIDDMA_4WORD, 4},	/* 12 */	{ MIDDMA_WORD, 2},	/* 13 */
196  { MIDDMA_2WORD, 3},	/* 14 */	{ MIDDMA_WORD, 2},	/* 15 */
197  { MIDDMA_16WORD,6},	/* 16 */
198};
199
200/*
201 * prototypes
202 */
203#ifdef EN_DDBHOOK
204int en_dump(int unit, int level);
205int en_dumpmem(int,int,int);
206#endif
207static void en_close_finish(struct en_softc *sc, struct en_vcc *vc);
208
209#define EN_LOCK(SC)	do {				\
210	DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__));	\
211	mtx_lock(&sc->en_mtx);				\
212    } while (0)
213#define EN_UNLOCK(SC)	do {				\
214	DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__));	\
215	mtx_unlock(&sc->en_mtx);			\
216    } while (0)
217#define EN_CHECKLOCK(sc)	mtx_assert(&sc->en_mtx, MA_OWNED)
218
219/*
220 * While a transmit mbuf is waiting to get transmit DMA resources we
221 * need to keep some information with it. We don't want to allocate
222 * additional memory for this so we stuff it into free fields in the
223 * mbuf packet header. Neither the checksum fields nor the rcvif field are used
224 * so use these.
225 */
226#define TX_AAL5		0x1	/* transmit AAL5 PDU */
227#define TX_HAS_TBD	0x2	/* TBD did fit into mbuf */
228#define TX_HAS_PAD	0x4	/* padding did fit into mbuf */
229#define TX_HAS_PDU	0x8	/* PDU trailer did fit into mbuf */
230
231#define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do {		\
232	(M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS);	\
233	(M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) |		\
234	    ((PAD & 0x3f) << 16);					\
235	(M)->m_pkthdr.rcvif = (void *)(MAP);				\
236    } while (0)
237
238#define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do {		\
239	(VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1);	\
240	(FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf;	\
241	(DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff;			\
242	(PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f;		\
243	(MAP) = (void *)((M)->m_pkthdr.rcvif);				\
244    } while (0)
245
246
247#define EN_WRAPADD(START, STOP, CUR, VAL) do {			\
248	(CUR) = (CUR) + (VAL);					\
249	if ((CUR) >= (STOP))					\
250		(CUR) = (START) + ((CUR) - (STOP));		\
251    } while (0)
252
253#define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
254
255#define SETQ_END(SC, VAL) ((SC)->is_adaptec ?			\
256	((VAL) | (MID_DMA_END >> 4)) :				\
257	((VAL) | (MID_DMA_END)))
258
259/*
260 * The dtq and drq members are set for each END entry in the corresponding
261 * card queue entry. It is used to find out, when a buffer has been
262 * finished DMAing and can be freed.
263 *
264 * We store sc->dtq and sc->drq data in the following format...
265 * the 0x80000 ensures we != 0
266 */
267#define EN_DQ_MK(SLOT, LEN)	(((SLOT) << 20) | (LEN) | (0x80000))
268#define EN_DQ_SLOT(X)		((X) >> 20)
269#define EN_DQ_LEN(X)		((X) & 0x3ffff)
270
271/*
272 * Variables
273 */
274static uma_zone_t en_vcc_zone;
275
276/***********************************************************************/
277
278/*
279 * en_read{x}: read a word from the card. These are the only functions
280 * that read from the card.
281 */
282static __inline uint32_t
283en_readx(struct en_softc *sc, uint32_t r)
284{
285	uint32_t v;
286
287#ifdef EN_DIAG
288	if (r > MID_MAXOFF || (r % 4))
289		panic("en_read out of range, r=0x%x", r);
290#endif
291	v = bus_space_read_4(sc->en_memt, sc->en_base, r);
292	return (v);
293}
294
295static __inline uint32_t
296en_read(struct en_softc *sc, uint32_t r)
297{
298	uint32_t v;
299
300#ifdef EN_DIAG
301	if (r > MID_MAXOFF || (r % 4))
302		panic("en_read out of range, r=0x%x", r);
303#endif
304	v = bus_space_read_4(sc->en_memt, sc->en_base, r);
305	DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
306	return (v);
307}
308
309/*
310 * en_write: write a word to the card. This is the only function that
311 * writes to the card.
312 */
313static __inline void
314en_write(struct en_softc *sc, uint32_t r, uint32_t v)
315{
316#ifdef EN_DIAG
317	if (r > MID_MAXOFF || (r % 4))
318		panic("en_write out of range, r=0x%x", r);
319#endif
320	DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
321	bus_space_write_4(sc->en_memt, sc->en_base, r, v);
322}
323
324/*
325 * en_k2sz: convert KBytes to a size parameter (a log2)
326 */
327static __inline int
328en_k2sz(int k)
329{
330	switch(k) {
331	  case 1:   return (0);
332	  case 2:   return (1);
333	  case 4:   return (2);
334	  case 8:   return (3);
335	  case 16:  return (4);
336	  case 32:  return (5);
337	  case 64:  return (6);
338	  case 128: return (7);
339	  default:
340		panic("en_k2sz");
341	}
342	return (0);
343}
344#define en_log2(X) en_k2sz(X)
345
346#if 0
347/*
348 * en_b2sz: convert a DMA burst code to its byte size
349 */
350static __inline int
351en_b2sz(int b)
352{
353	switch (b) {
354	  case MIDDMA_WORD:   return (1*4);
355	  case MIDDMA_2WMAYBE:
356	  case MIDDMA_2WORD:  return (2*4);
357	  case MIDDMA_4WMAYBE:
358	  case MIDDMA_4WORD:  return (4*4);
359	  case MIDDMA_8WMAYBE:
360	  case MIDDMA_8WORD:  return (8*4);
361	  case MIDDMA_16WMAYBE:
362	  case MIDDMA_16WORD: return (16*4);
363	  default:
364		panic("en_b2sz");
365	}
366	return (0);
367}
368#endif
369
370/*
371 * en_sz2b: convert a burst size (bytes) to DMA burst code
372 */
373static __inline int
374en_sz2b(int sz)
375{
376	switch (sz) {
377	  case 1*4:  return (MIDDMA_WORD);
378	  case 2*4:  return (MIDDMA_2WORD);
379	  case 4*4:  return (MIDDMA_4WORD);
380	  case 8*4:  return (MIDDMA_8WORD);
381	  case 16*4: return (MIDDMA_16WORD);
382	  default:
383		panic("en_sz2b");
384	}
385	return(0);
386}
387
388#ifdef EN_DEBUG
389/*
390 * Dump a packet
391 */
392static void
393en_dump_packet(struct en_softc *sc, struct mbuf *m)
394{
395	int plen = m->m_pkthdr.len;
396	u_int pos = 0;
397	u_int totlen = 0;
398	int len;
399	u_char *ptr;
400
401	device_printf(sc->dev, "packet len=%d", plen);
402	while (m != NULL) {
403		totlen += m->m_len;
404		ptr = mtod(m, u_char *);
405		for (len = 0; len < m->m_len; len++, pos++, ptr++) {
406			if (pos % 16 == 8)
407				printf(" ");
408			if (pos % 16 == 0)
409				printf("\n");
410			printf(" %02x", *ptr);
411		}
412		m = m->m_next;
413	}
414	printf("\n");
415	if (totlen != plen)
416		printf("sum of m_len=%u\n", totlen);
417}
418#endif
419
420/*********************************************************************/
421/*
422 * DMA maps
423 */
424
425/*
426 * Map constructor for a MAP.
427 *
428 * This is called each time when a map is allocated
429 * from the pool and about to be returned to the user. Here we actually
430 * allocate the map if there isn't one. The problem is that we may fail
431 * to allocate the DMA map yet have no means to signal this error. Therefor
432 * when allocating a map, the call must check that there is a map. An
433 * additional problem is, that i386 maps will be NULL, yet are ok and must
434 * be freed so let's use a flag to signal allocation.
435 *
436 * Caveat: we have no way to know that we are called from an interrupt context
437 * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
438 * its allocations.
439 *
440 * LOCK: any, not needed
441 */
442static int
443en_map_ctor(void *mem, int size, void *arg, int flags)
444{
445	struct en_softc *sc = arg;
446	struct en_map *map = mem;
447	int err;
448
449	err = bus_dmamap_create(sc->txtag, 0, &map->map);
450	if (err != 0) {
451		device_printf(sc->dev, "cannot create DMA map %d\n", err);
452		return (err);
453	}
454	map->flags = ENMAP_ALLOC;
455	map->sc = sc;
456	return (0);
457}
458
459/*
460 * Map destructor.
461 *
462 * Called when a map is disposed into the zone. If the map is loaded, unload
463 * it.
464 *
465 * LOCK: any, not needed
466 */
467static void
468en_map_dtor(void *mem, int size, void *arg)
469{
470	struct en_map *map = mem;
471
472	if (map->flags & ENMAP_LOADED) {
473		bus_dmamap_unload(map->sc->txtag, map->map);
474		map->flags &= ~ENMAP_LOADED;
475	}
476}
477
478/*
479 * Map finializer.
480 *
481 * This is called each time a map is returned from the zone to the system.
482 * Get rid of the dmamap here.
483 *
484 * LOCK: any, not needed
485 */
486static void
487en_map_fini(void *mem, int size)
488{
489	struct en_map *map = mem;
490
491	bus_dmamap_destroy(map->sc->txtag, map->map);
492}
493
494/*********************************************************************/
495/*
496 * Transmission
497 */
498
499/*
500 * Argument structure to load a transmit DMA map
501 */
502struct txarg {
503	struct en_softc *sc;
504	struct mbuf *m;
505	u_int vci;
506	u_int chan;		/* transmit channel */
507	u_int datalen;		/* length of user data */
508	u_int flags;
509	u_int wait;		/* return: out of resources */
510};
511
512/*
513 * TX DMA map loader helper. This function is the callback when the map
514 * is loaded. It should fill the DMA segment descriptors into the hardware.
515 *
516 * LOCK: locked, needed
517 */
518static void
519en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
520    int error)
521{
522	struct txarg *tx = uarg;
523	struct en_softc *sc = tx->sc;
524	struct en_txslot *slot = &sc->txslot[tx->chan];
525	uint32_t cur;		/* on-card buffer position (bytes offset) */
526	uint32_t dtq;		/* on-card queue position (byte offset) */
527	uint32_t last_dtq;	/* last DTQ we have written */
528	uint32_t tmp;
529	u_int free;		/* free queue entries on card */
530	u_int needalign, cnt;
531	bus_size_t rest;	/* remaining bytes in current segment */
532	bus_addr_t addr;
533	bus_dma_segment_t *s;
534	uint32_t count, bcode;
535	int i;
536
537	if (error != 0)
538		return;
539
540	cur = slot->cur;
541	dtq = sc->dtq_us;
542	free = sc->dtq_free;
543
544	last_dtq = 0;		/* make gcc happy */
545
546	/*
547	 * Local macro to add an entry to the transmit DMA area. If there
548	 * are no entries left, return. Save the byte offset of the entry
549	 * in last_dtq for later use.
550	 */
551#define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR)				\
552	if (free == 0) {						\
553		EN_COUNT(sc->stats.txdtqout);				\
554		tx->wait = 1;						\
555		return;							\
556	}								\
557	last_dtq = dtq;							\
558	en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ?		\
559	    MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) :			\
560	    MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE));			\
561	en_write(sc, dtq + 4, ADDR);					\
562									\
563	EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8);			\
564	free--;
565
566	/*
567	 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
568	 * the current buffer byte offset accordingly.
569	 */
570#define DO_DTQ(TYPE) do {						\
571	rest -= cnt;							\
572	EN_WRAPADD(slot->start, slot->stop, cur, cnt);			\
573	DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x",	\
574	    tx->chan, cnt, (uintmax_t)rest, cur));			\
575									\
576	PUT_DTQ_ENTRY(1, bcode, count, addr);				\
577									\
578	addr += cnt;							\
579    } while (0)
580
581	if (!(tx->flags & TX_HAS_TBD)) {
582		/*
583		 * Prepend the TBD - it did not fit into the first mbuf
584		 */
585		tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
586		    MID_TBD_AAL5 : MID_TBD_NOAAL5,
587		    sc->vccs[tx->vci]->txspeed,
588		    tx->m->m_pkthdr.len / MID_ATMDATASZ);
589		en_write(sc, cur, tmp);
590		EN_WRAPADD(slot->start, slot->stop, cur, 4);
591
592		tmp = MID_TBD_MK2(tx->vci, 0, 0);
593		en_write(sc, cur, tmp);
594		EN_WRAPADD(slot->start, slot->stop, cur, 4);
595
596		/* update DMA address */
597		PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
598	}
599
600	for (i = 0, s = segs; i < nseg; i++, s++) {
601		rest = s->ds_len;
602		addr = s->ds_addr;
603
604		if (sc->is_adaptec) {
605			/* adaptec card - simple */
606
607			/* advance the on-card buffer pointer */
608			EN_WRAPADD(slot->start, slot->stop, cur, rest);
609			DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
610			    tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
611
612			PUT_DTQ_ENTRY(0, 0, rest, addr);
613
614			continue;
615		}
616
617		/*
618		 * do we need to do a DMA op to align to the maximum
619		 * burst? Note, that we are alway 32-bit aligned.
620		 */
621		if (sc->alburst &&
622		    (needalign = (addr & sc->bestburstmask)) != 0) {
623			/* compute number of bytes, words and code */
624			cnt = sc->bestburstlen - needalign;
625			if (cnt > rest)
626				cnt = rest;
627			count = cnt / sizeof(uint32_t);
628			if (sc->noalbursts) {
629				bcode = MIDDMA_WORD;
630			} else {
631				bcode = en_dmaplan[count].bcode;
632				count = cnt >> en_dmaplan[count].divshift;
633			}
634			DO_DTQ("al_dma");
635		}
636
637		/* do we need to do a max-sized burst? */
638		if (rest >= sc->bestburstlen) {
639			count = rest >> sc->bestburstshift;
640			cnt = count << sc->bestburstshift;
641			bcode = sc->bestburstcode;
642			DO_DTQ("best_dma");
643		}
644
645		/* do we need to do a cleanup burst? */
646		if (rest != 0) {
647			cnt = rest;
648			count = rest / sizeof(uint32_t);
649			if (sc->noalbursts) {
650				bcode = MIDDMA_WORD;
651			} else {
652				bcode = en_dmaplan[count].bcode;
653				count = cnt >> en_dmaplan[count].divshift;
654			}
655			DO_DTQ("clean_dma");
656		}
657	}
658
659	KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
660
661	if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
662		/*
663		 * Append the AAL5 PDU trailer
664		 */
665		tmp = MID_PDU_MK1(0, 0, tx->datalen);
666		en_write(sc, cur, tmp);
667		EN_WRAPADD(slot->start, slot->stop, cur, 4);
668
669		en_write(sc, cur, 0);
670		EN_WRAPADD(slot->start, slot->stop, cur, 4);
671
672		/* update DMA address */
673		PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
674	}
675
676	/* record the end for the interrupt routine */
677	sc->dtq[MID_DTQ_A2REG(last_dtq)] =
678	    EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
679
680	/* set the end flag in the last descriptor */
681	en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
682
683#undef PUT_DTQ_ENTRY
684#undef DO_DTQ
685
686	/* commit */
687	slot->cur = cur;
688	sc->dtq_free = free;
689	sc->dtq_us = dtq;
690
691	/* tell card */
692	en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
693}
694
695/*
696 * en_txdma: start transmit DMA on the given channel, if possible
697 *
698 * This is called from two places: when we got new packets from the upper
699 * layer or when we found that buffer space has freed up during interrupt
700 * processing.
701 *
702 * LOCK: locked, needed
703 */
704static void
705en_txdma(struct en_softc *sc, struct en_txslot *slot)
706{
707	struct en_map *map;
708	struct mbuf *lastm;
709	struct txarg tx;
710	u_int pad;
711	int error;
712
713	DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
714  again:
715	bzero(&tx, sizeof(tx));
716	tx.chan = slot - sc->txslot;
717	tx.sc = sc;
718
719	/*
720	 * get an mbuf waiting for DMA
721	 */
722	_IF_DEQUEUE(&slot->q, tx.m);
723	if (tx.m == NULL) {
724		DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
725		return;
726	}
727	MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
728
729	/*
730	 * note: don't use the entire buffer space.  if WRTX becomes equal
731	 * to RDTX, the transmitter stops assuming the buffer is empty!  --kjc
732	 */
733	if (tx.m->m_pkthdr.len >= slot->bfree) {
734		EN_COUNT(sc->stats.txoutspace);
735		DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
736		goto waitres;
737	}
738
739	lastm = NULL;
740	if (!(tx.flags & TX_HAS_PAD)) {
741		if (pad != 0) {
742			/* Append the padding buffer */
743			(void)m_length(tx.m, &lastm);
744			lastm->m_next = sc->padbuf;
745			sc->padbuf->m_len = pad;
746		}
747		tx.flags |= TX_HAS_PAD;
748	}
749
750	/*
751	 * Try to load that map
752	 */
753	error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
754	    en_txdma_load, &tx, BUS_DMA_NOWAIT);
755
756	if (lastm != NULL)
757		lastm->m_next = NULL;
758
759	if (error != 0) {
760		device_printf(sc->dev, "loading TX map failed %d\n",
761		    error);
762		goto dequeue_drop;
763	}
764	map->flags |= ENMAP_LOADED;
765	if (tx.wait) {
766		/* probably not enough space */
767		bus_dmamap_unload(map->sc->txtag, map->map);
768		map->flags &= ~ENMAP_LOADED;
769
770		sc->need_dtqs = 1;
771		DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
772		goto waitres;
773	}
774
775	EN_COUNT(sc->stats.launch);
776	sc->ifp->if_opackets++;
777
778	sc->vccs[tx.vci]->opackets++;
779	sc->vccs[tx.vci]->obytes += tx.datalen;
780
781#ifdef ENABLE_BPF
782	if (bpf_peers_present(sc->ifp->if_bpf)) {
783		/*
784		 * adjust the top of the mbuf to skip the TBD if present
785		 * before passing the packet to bpf.
786		 * Also remove padding and the PDU trailer. Assume both of
787		 * them to be in the same mbuf. pktlen, m_len and m_data
788		 * are not needed anymore so we can change them.
789		 */
790		if (tx.flags & TX_HAS_TBD) {
791			tx.m->m_data += MID_TBD_SIZE;
792			tx.m->m_len -= MID_TBD_SIZE;
793		}
794		tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
795		if (tx.m->m_pkthdr.len > tx.datalen) {
796			lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
797			tx.m->m_pkthdr.len = tx.datalen;
798		}
799
800		bpf_mtap(sc->ifp->if_bpf, tx.m);
801	}
802#endif
803
804	/*
805	 * do some housekeeping and get the next packet
806	 */
807	slot->bfree -= tx.m->m_pkthdr.len;
808	_IF_ENQUEUE(&slot->indma, tx.m);
809
810	goto again;
811
812	/*
813	 * error handling. This is jumped to when we just want to drop
814	 * the packet. Must be unlocked here.
815	 */
816  dequeue_drop:
817	if (map != NULL)
818		uma_zfree(sc->map_zone, map);
819
820	slot->mbsize -= tx.m->m_pkthdr.len;
821
822	m_freem(tx.m);
823
824	goto again;
825
826  waitres:
827	_IF_PREPEND(&slot->q, tx.m);
828}
829
830/*
831 * Create a copy of a single mbuf. It can have either internal or
832 * external data, it may have a packet header. External data is really
833 * copied, so the new buffer is writeable.
834 *
835 * LOCK: any, not needed
836 */
837static struct mbuf *
838copy_mbuf(struct mbuf *m)
839{
840	struct mbuf *new;
841
842	MGET(new, M_WAITOK, MT_DATA);
843
844	if (m->m_flags & M_PKTHDR) {
845		M_MOVE_PKTHDR(new, m);
846		if (m->m_len > MHLEN)
847			MCLGET(new, M_WAITOK);
848	} else {
849		if (m->m_len > MLEN)
850			MCLGET(new, M_WAITOK);
851	}
852
853	bcopy(m->m_data, new->m_data, m->m_len);
854	new->m_len = m->m_len;
855	new->m_flags &= ~M_RDONLY;
856
857	return (new);
858}
859
860/*
861 * This function is called when we have an ENI adapter. It fixes the
862 * mbuf chain, so that all addresses and lengths are 4 byte aligned.
863 * The overall length is already padded to multiple of cells plus the
864 * TBD so this must always succeed. The routine can fail, when it
865 * needs to copy an mbuf (this may happen if an mbuf is readonly).
866 *
867 * We assume here, that aligning the virtual addresses to 4 bytes also
868 * aligns the physical addresses.
869 *
870 * LOCK: locked, needed
871 */
872static struct mbuf *
873en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
874{
875	struct mbuf **prev = &m0;
876	struct mbuf *m = m0;
877	struct mbuf *new;
878	u_char *d;
879	int off;
880
881	while (m != NULL) {
882		d = mtod(m, u_char *);
883		if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
884			EN_COUNT(sc->stats.mfixaddr);
885			if (M_WRITABLE(m)) {
886				bcopy(d, d - off, m->m_len);
887				m->m_data -= off;
888			} else {
889				if ((new = copy_mbuf(m)) == NULL) {
890					EN_COUNT(sc->stats.mfixfail);
891					m_freem(m0);
892					return (NULL);
893				}
894				new->m_next = m_free(m);
895				*prev = m = new;
896			}
897		}
898
899		if ((off = m->m_len % sizeof(uint32_t)) != 0) {
900			EN_COUNT(sc->stats.mfixlen);
901			if (!M_WRITABLE(m)) {
902				if ((new = copy_mbuf(m)) == NULL) {
903					EN_COUNT(sc->stats.mfixfail);
904					m_freem(m0);
905					return (NULL);
906				}
907				new->m_next = m_free(m);
908				*prev = m = new;
909			}
910			d = mtod(m, u_char *) + m->m_len;
911			off = 4 - off;
912			while (off) {
913				while (m->m_next && m->m_next->m_len == 0)
914					m->m_next = m_free(m->m_next);
915
916				if (m->m_next == NULL) {
917					*d++ = 0;
918					KASSERT(*pad > 0, ("no padding space"));
919					(*pad)--;
920				} else {
921					*d++ = *mtod(m->m_next, u_char *);
922					m->m_next->m_len--;
923					m->m_next->m_data++;
924				}
925				m->m_len++;
926				off--;
927			}
928		}
929
930		prev = &m->m_next;
931		m = m->m_next;
932	}
933
934	return (m0);
935}
936
937/*
938 * en_start: start transmitting the next packet that needs to go out
939 * if there is one. We take off all packets from the interface's queue and
940 * put them into the channels queue.
941 *
942 * Here we also prepend the transmit packet descriptor and append the padding
943 * and (for aal5) the PDU trailer. This is different from the original driver:
944 * we assume, that allocating one or two additional mbufs is actually cheaper
945 * than all this algorithmic fiddling we would need otherwise.
946 *
947 * While the packet is on the channels wait queue we use the csum_* fields
948 * in the packet header to hold the original datalen, the AAL5 flag and the
949 * VCI. The packet length field in the header holds the needed buffer space.
950 * This may actually be more than the length of the current mbuf chain (when
951 * one or more of TBD, padding and PDU do not fit).
952 *
953 * LOCK: unlocked, needed
954 */
955static void
956en_start(struct ifnet *ifp)
957{
958	struct en_softc *sc = (struct en_softc *)ifp->if_softc;
959	struct mbuf *m, *lastm;
960	struct atm_pseudohdr *ap;
961	u_int pad;		/* 0-bytes to pad at PDU end */
962	u_int datalen;		/* length of user data */
963	u_int vci;		/* the VCI we are transmitting on */
964	u_int flags;
965	uint32_t tbd[2];
966	uint32_t pdu[2];
967	struct en_vcc *vc;
968	struct en_map *map;
969	struct en_txslot *tx;
970
971	while (1) {
972		IF_DEQUEUE(&ifp->if_snd, m);
973		if (m == NULL)
974			return;
975
976		flags = 0;
977
978	    	ap = mtod(m, struct atm_pseudohdr *);
979		vci = ATM_PH_VCI(ap);
980
981		if (ATM_PH_VPI(ap) != 0 || vci >= MID_N_VC ||
982		    (vc = sc->vccs[vci]) == NULL ||
983		    (vc->vflags & VCC_CLOSE_RX)) {
984			DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
985			    ATM_PH_VPI(ap), vci));
986			m_freem(m);
987			continue;
988		}
989		if (vc->vcc.aal == ATMIO_AAL_5)
990			flags |= TX_AAL5;
991		m_adj(m, sizeof(struct atm_pseudohdr));
992
993		/*
994		 * (re-)calculate size of packet (in bytes)
995		 */
996		m->m_pkthdr.len = datalen = m_length(m, &lastm);
997
998		/*
999		 * computing how much padding we need on the end of the mbuf,
1000		 * then see if we can put the TBD at the front of the mbuf
1001		 * where the link header goes (well behaved protocols will
1002		 * reserve room for us). Last, check if room for PDU tail.
1003		 */
1004		if (flags & TX_AAL5)
1005			m->m_pkthdr.len += MID_PDU_SIZE;
1006		m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
1007		pad = m->m_pkthdr.len - datalen;
1008		if (flags & TX_AAL5)
1009			pad -= MID_PDU_SIZE;
1010		m->m_pkthdr.len += MID_TBD_SIZE;
1011
1012		DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
1013		    vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
1014		    (int)M_TRAILINGSPACE(lastm)));
1015
1016		/*
1017		 * From here on we need access to sc
1018		 */
1019		EN_LOCK(sc);
1020
1021		/*
1022		 * Allocate a map. We do this here rather then in en_txdma,
1023		 * because en_txdma is also called from the interrupt handler
1024		 * and we are going to have a locking problem then. We must
1025		 * use NOWAIT here, because the ip_output path holds various
1026		 * locks.
1027		 */
1028		map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
1029		if (map == NULL) {
1030			/* drop that packet */
1031			EN_COUNT(sc->stats.txnomap);
1032			EN_UNLOCK(sc);
1033			m_freem(m);
1034			continue;
1035		}
1036
1037		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1038			EN_UNLOCK(sc);
1039			uma_zfree(sc->map_zone, map);
1040			m_freem(m);
1041			continue;
1042		}
1043
1044		/*
1045		 * Look, whether we can prepend the TBD (8 byte)
1046		 */
1047		if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
1048			tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
1049			    MID_TBD_AAL5 : MID_TBD_NOAAL5,
1050			    vc->txspeed, m->m_pkthdr.len / MID_ATMDATASZ));
1051			tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
1052
1053			m->m_data -= MID_TBD_SIZE;
1054			bcopy(tbd, m->m_data, MID_TBD_SIZE);
1055			m->m_len += MID_TBD_SIZE;
1056			flags |= TX_HAS_TBD;
1057		}
1058
1059		/*
1060		 * Check whether the padding fits (must be writeable -
1061		 * we pad with zero).
1062		 */
1063		if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
1064			bzero(lastm->m_data + lastm->m_len, pad);
1065			lastm->m_len += pad;
1066			flags |= TX_HAS_PAD;
1067
1068			if ((flags & TX_AAL5) &&
1069			    M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
1070				pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
1071				pdu[1] = 0;
1072				bcopy(pdu, lastm->m_data + lastm->m_len,
1073				    MID_PDU_SIZE);
1074				lastm->m_len += MID_PDU_SIZE;
1075				flags |= TX_HAS_PDU;
1076			}
1077		}
1078
1079		if (!sc->is_adaptec &&
1080		    (m = en_fix_mchain(sc, m, &pad)) == NULL) {
1081			EN_UNLOCK(sc);
1082			uma_zfree(sc->map_zone, map);
1083			continue;
1084		}
1085
1086		/*
1087		 * get assigned channel (will be zero unless txspeed is set)
1088		 */
1089		tx = vc->txslot;
1090
1091		if (m->m_pkthdr.len > EN_TXSZ * 1024) {
1092			DBG(sc, TX, ("tx%td: packet larger than xmit buffer "
1093			    "(%d > %d)\n", tx - sc->txslot, m->m_pkthdr.len,
1094			    EN_TXSZ * 1024));
1095			EN_UNLOCK(sc);
1096			m_freem(m);
1097			uma_zfree(sc->map_zone, map);
1098			continue;
1099		}
1100
1101		if (tx->mbsize > EN_TXHIWAT) {
1102			EN_COUNT(sc->stats.txmbovr);
1103			DBG(sc, TX, ("tx%td: buffer space shortage",
1104			    tx - sc->txslot));
1105			EN_UNLOCK(sc);
1106			m_freem(m);
1107			uma_zfree(sc->map_zone, map);
1108			continue;
1109		}
1110
1111		/* commit */
1112		tx->mbsize += m->m_pkthdr.len;
1113
1114		DBG(sc, TX, ("tx%td: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
1115		    tx - sc->txslot, vci, sc->vccs[vci]->txspeed,
1116		    m->m_pkthdr.len, tx->mbsize));
1117
1118		MBUF_SET_TX(m, vci, flags, datalen, pad, map);
1119
1120		_IF_ENQUEUE(&tx->q, m);
1121
1122		en_txdma(sc, tx);
1123
1124		EN_UNLOCK(sc);
1125	}
1126}
1127
1128/*********************************************************************/
1129/*
1130 * VCs
1131 */
1132
1133/*
1134 * en_loadvc: load a vc tab entry from a slot
1135 *
1136 * LOCK: locked, needed
1137 */
1138static void
1139en_loadvc(struct en_softc *sc, struct en_vcc *vc)
1140{
1141	uint32_t reg = en_read(sc, MID_VC(vc->vcc.vci));
1142
1143	reg = MIDV_SETMODE(reg, MIDV_TRASH);
1144	en_write(sc, MID_VC(vc->vcc.vci), reg);
1145	DELAY(27);
1146
1147	/* no need to set CRC */
1148
1149	/* read pointer = 0, desc. start = 0 */
1150	en_write(sc, MID_DST_RP(vc->vcc.vci), 0);
1151	/* write pointer = 0 */
1152	en_write(sc, MID_WP_ST_CNT(vc->vcc.vci), 0);
1153	/* set mode, size, loc */
1154	en_write(sc, MID_VC(vc->vcc.vci), vc->rxslot->mode);
1155
1156	vc->rxslot->cur = vc->rxslot->start;
1157
1158	DBG(sc, VC, ("rx%td: assigned to VCI %d", vc->rxslot - sc->rxslot,
1159	    vc->vcc.vci));
1160}
1161
1162/*
1163 * Open the given vcc.
1164 *
1165 * LOCK: unlocked, needed
1166 */
1167static int
1168en_open_vcc(struct en_softc *sc, struct atmio_openvcc *op)
1169{
1170	uint32_t oldmode, newmode;
1171	struct en_rxslot *slot;
1172	struct en_vcc *vc;
1173	int error = 0;
1174
1175	DBG(sc, IOCTL, ("enable vpi=%d, vci=%d, flags=%#x",
1176	    op->param.vpi, op->param.vci, op->param.flags));
1177
1178	if (op->param.vpi != 0 || op->param.vci >= MID_N_VC)
1179		return (EINVAL);
1180
1181	vc = uma_zalloc(en_vcc_zone, M_NOWAIT | M_ZERO);
1182	if (vc == NULL)
1183		return (ENOMEM);
1184
1185	EN_LOCK(sc);
1186
1187	if (sc->vccs[op->param.vci] != NULL) {
1188		error = EBUSY;
1189		goto done;
1190	}
1191
1192	/* find a free receive slot */
1193	for (slot = sc->rxslot; slot < &sc->rxslot[sc->en_nrx]; slot++)
1194		if (slot->vcc == NULL)
1195			break;
1196	if (slot == &sc->rxslot[sc->en_nrx]) {
1197		error = ENOSPC;
1198		goto done;
1199	}
1200
1201	vc->rxslot = slot;
1202	vc->rxhand = op->rxhand;
1203	vc->vcc = op->param;
1204
1205	oldmode = slot->mode;
1206	newmode = (op->param.aal == ATMIO_AAL_5) ? MIDV_AAL5 : MIDV_NOAAL;
1207	slot->mode = MIDV_SETMODE(oldmode, newmode);
1208	slot->vcc = vc;
1209
1210	KASSERT (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0,
1211	    ("en_rxctl: left over mbufs on enable slot=%td",
1212	    vc->rxslot - sc->rxslot));
1213
1214	vc->txspeed = 0;
1215	vc->txslot = sc->txslot;
1216	vc->txslot->nref++;	/* bump reference count */
1217
1218	en_loadvc(sc, vc);	/* does debug printf for us */
1219
1220	/* don't free below */
1221	sc->vccs[vc->vcc.vci] = vc;
1222	vc = NULL;
1223	sc->vccs_open++;
1224
1225  done:
1226	if (vc != NULL)
1227		uma_zfree(en_vcc_zone, vc);
1228
1229	EN_UNLOCK(sc);
1230	return (error);
1231}
1232
1233/*
1234 * Close finished
1235 */
1236static void
1237en_close_finish(struct en_softc *sc, struct en_vcc *vc)
1238{
1239
1240	if (vc->rxslot != NULL)
1241		vc->rxslot->vcc = NULL;
1242
1243	DBG(sc, VC, ("vci: %u free (%p)", vc->vcc.vci, vc));
1244
1245	sc->vccs[vc->vcc.vci] = NULL;
1246	uma_zfree(en_vcc_zone, vc);
1247	sc->vccs_open--;
1248}
1249
1250/*
1251 * LOCK: unlocked, needed
1252 */
1253static int
1254en_close_vcc(struct en_softc *sc, struct atmio_closevcc *cl)
1255{
1256	uint32_t oldmode, newmode;
1257	struct en_vcc *vc;
1258	int error = 0;
1259
1260	DBG(sc, IOCTL, ("disable vpi=%d, vci=%d", cl->vpi, cl->vci));
1261
1262	if (cl->vpi != 0 || cl->vci >= MID_N_VC)
1263		return (EINVAL);
1264
1265	EN_LOCK(sc);
1266	if ((vc = sc->vccs[cl->vci]) == NULL) {
1267		error = ENOTCONN;
1268		goto done;
1269	}
1270
1271	/*
1272	 * turn off VCI
1273	 */
1274	if (vc->rxslot == NULL) {
1275		error = ENOTCONN;
1276		goto done;
1277	}
1278	if (vc->vflags & VCC_DRAIN) {
1279		error = EINVAL;
1280		goto done;
1281	}
1282
1283	oldmode = en_read(sc, MID_VC(cl->vci));
1284	newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1285	en_write(sc, MID_VC(cl->vci), (newmode | (oldmode & MIDV_INSERVICE)));
1286
1287	/* halt in tracks, be careful to preserve inservice bit */
1288	DELAY(27);
1289	vc->rxslot->mode = newmode;
1290
1291	vc->txslot->nref--;
1292
1293	/* if stuff is still going on we are going to have to drain it out */
1294	if (_IF_QLEN(&vc->rxslot->indma) == 0 &&
1295	    _IF_QLEN(&vc->rxslot->q) == 0 &&
1296	    (vc->vflags & VCC_SWSL) == 0) {
1297		en_close_finish(sc, vc);
1298		goto done;
1299	}
1300
1301	vc->vflags |= VCC_DRAIN;
1302	DBG(sc, IOCTL, ("VCI %u now draining", cl->vci));
1303
1304	if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1305		goto done;
1306
1307	vc->vflags |= VCC_CLOSE_RX;
1308	while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1309	    (vc->vflags & VCC_DRAIN))
1310		cv_wait(&sc->cv_close, &sc->en_mtx);
1311
1312	en_close_finish(sc, vc);
1313	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1314		error = EIO;
1315		goto done;
1316	}
1317
1318
1319  done:
1320	EN_UNLOCK(sc);
1321	return (error);
1322}
1323
1324/*********************************************************************/
1325/*
1326 * starting/stopping the card
1327 */
1328
1329/*
1330 * en_reset_ul: reset the board, throw away work in progress.
1331 * must en_init to recover.
1332 *
1333 * LOCK: locked, needed
1334 */
1335static void
1336en_reset_ul(struct en_softc *sc)
1337{
1338	struct en_map *map;
1339	struct mbuf *m;
1340	struct en_rxslot *rx;
1341	int lcv;
1342
1343	device_printf(sc->dev, "reset\n");
1344	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1345
1346	if (sc->en_busreset)
1347		sc->en_busreset(sc);
1348	en_write(sc, MID_RESID, 0x0);	/* reset hardware */
1349
1350	/*
1351	 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1352	 * will free us! Don't release the rxslot from the channel.
1353	 */
1354	for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1355		if (sc->vccs[lcv] == NULL)
1356			continue;
1357		rx = sc->vccs[lcv]->rxslot;
1358
1359		for (;;) {
1360			_IF_DEQUEUE(&rx->indma, m);
1361			if (m == NULL)
1362				break;
1363			map = (void *)m->m_pkthdr.rcvif;
1364			uma_zfree(sc->map_zone, map);
1365			m_freem(m);
1366		}
1367		for (;;) {
1368			_IF_DEQUEUE(&rx->q, m);
1369			if (m == NULL)
1370				break;
1371			m_freem(m);
1372		}
1373		sc->vccs[lcv]->vflags = 0;
1374	}
1375
1376	/*
1377	 * xmit: dump everything
1378	 */
1379	for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1380		for (;;) {
1381			_IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1382			if (m == NULL)
1383				break;
1384			map = (void *)m->m_pkthdr.rcvif;
1385			uma_zfree(sc->map_zone, map);
1386			m_freem(m);
1387		}
1388		for (;;) {
1389			_IF_DEQUEUE(&sc->txslot[lcv].q, m);
1390			if (m == NULL)
1391				break;
1392			map = (void *)m->m_pkthdr.rcvif;
1393			uma_zfree(sc->map_zone, map);
1394			m_freem(m);
1395		}
1396		sc->txslot[lcv].mbsize = 0;
1397	}
1398
1399	/*
1400	 * Unstop all waiters
1401	 */
1402	cv_broadcast(&sc->cv_close);
1403}
1404
1405/*
1406 * en_reset: reset the board, throw away work in progress.
1407 * must en_init to recover.
1408 *
1409 * LOCK: unlocked, needed
1410 *
1411 * Use en_reset_ul if you alreay have the lock
1412 */
1413void
1414en_reset(struct en_softc *sc)
1415{
1416	EN_LOCK(sc);
1417	en_reset_ul(sc);
1418	EN_UNLOCK(sc);
1419}
1420
1421
1422/*
1423 * en_init: init board and sync the card with the data in the softc.
1424 *
1425 * LOCK: locked, needed
1426 */
1427static void
1428en_init(struct en_softc *sc)
1429{
1430	int vc, slot;
1431	uint32_t loc;
1432
1433	if ((sc->ifp->if_flags & IFF_UP) == 0) {
1434		DBG(sc, INIT, ("going down"));
1435		en_reset(sc);				/* to be safe */
1436		return;
1437	}
1438
1439	DBG(sc, INIT, ("going up"));
1440	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;	/* enable */
1441
1442	if (sc->en_busreset)
1443		sc->en_busreset(sc);
1444	en_write(sc, MID_RESID, 0x0);		/* reset */
1445
1446	/* zero memory */
1447	bus_space_set_region_4(sc->en_memt, sc->en_base,
1448	    MID_RAMOFF, 0, sc->en_obmemsz / 4);
1449
1450	/*
1451	 * init obmem data structures: vc tab, dma q's, slist.
1452	 *
1453	 * note that we set drq_free/dtq_free to one less than the total number
1454	 * of DTQ/DRQs present.   we do this because the card uses the condition
1455	 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1456	 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1457	 * the drq_us pointer will wrap all the way around].   by restricting
1458	 * the number of active requests to (N - 1) we prevent the list from
1459	 * becoming completely full.    note that the card will sometimes give
1460	 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1461	 * keep that interrupt from messing us up.
1462	 */
1463	bzero(&sc->drq, sizeof(sc->drq));
1464	sc->drq_free = MID_DRQ_N - 1;
1465	sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
1466	en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1467	sc->drq_us = sc->drq_chip;
1468
1469	bzero(&sc->dtq, sizeof(sc->dtq));
1470	sc->dtq_free = MID_DTQ_N - 1;
1471	sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
1472	en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1473	sc->dtq_us = sc->dtq_chip;
1474
1475	sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1476	sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1477
1478	DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
1479	    "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
1480	    sc->dtq_chip, sc->hwslistp));
1481
1482	for (slot = 0 ; slot < EN_NTX ; slot++) {
1483		sc->txslot[slot].bfree = EN_TXSZ * 1024;
1484		en_write(sc, MIDX_READPTR(slot), 0);
1485		en_write(sc, MIDX_DESCSTART(slot), 0);
1486		loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1487		loc = loc - MID_RAMOFF;
1488		/* mask, cvt to words */
1489		loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
1490		/* top 11 bits */
1491		loc = loc >> MIDV_LOCTOPSHFT;
1492		en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
1493		    loc));
1494		DBG(sc, INIT, ("tx%d: place 0x%x", slot,
1495		    (u_int)en_read(sc, MIDX_PLACE(slot))));
1496	}
1497
1498	for (vc = 0; vc < MID_N_VC; vc++)
1499		if (sc->vccs[vc] != NULL)
1500			en_loadvc(sc, sc->vccs[vc]);
1501
1502	/*
1503	 * enable!
1504	 */
1505	en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
1506	    MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
1507	    MID_INT_SERVICE | MID_INT_SUNI | MID_INT_STATS);
1508	en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
1509	    MID_MCSR_ENTX | MID_MCSR_ENRX);
1510}
1511
1512/*********************************************************************/
1513/*
1514 * Ioctls
1515 */
1516/*
1517 * en_ioctl: handle ioctl requests
1518 *
1519 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1520 * TX channel/slot.   Choose the one with the lowest sc->txslot[slot].nref
1521 * value, subtract one from sc->txslot[0].nref, add one to the
1522 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1523 * txspeed[vci].
1524 *
1525 * LOCK: unlocked, needed
1526 */
1527static int
1528en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1529{
1530	struct en_softc *sc = (struct en_softc *)ifp->if_softc;
1531#if defined(INET) || defined(INET6)
1532	struct ifaddr *ifa = (struct ifaddr *)data;
1533#endif
1534	struct ifreq *ifr = (struct ifreq *)data;
1535	struct atmio_vcctable *vtab;
1536	int error = 0;
1537
1538	switch (cmd) {
1539
1540	  case SIOCSIFADDR:
1541		EN_LOCK(sc);
1542		ifp->if_flags |= IFF_UP;
1543#if defined(INET) || defined(INET6)
1544		if (ifa->ifa_addr->sa_family == AF_INET
1545		    || ifa->ifa_addr->sa_family == AF_INET6) {
1546			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1547				en_reset_ul(sc);
1548				en_init(sc);
1549			}
1550			ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1551			EN_UNLOCK(sc);
1552			break;
1553		}
1554#endif /* INET */
1555		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1556			en_reset_ul(sc);
1557			en_init(sc);
1558		}
1559		EN_UNLOCK(sc);
1560		break;
1561
1562	case SIOCSIFFLAGS:
1563		EN_LOCK(sc);
1564		if (ifp->if_flags & IFF_UP) {
1565			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1566				en_init(sc);
1567		} else {
1568			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1569				en_reset_ul(sc);
1570		}
1571		EN_UNLOCK(sc);
1572		break;
1573
1574	  case SIOCSIFMTU:
1575		/*
1576		 * Set the interface MTU.
1577		 */
1578		if (ifr->ifr_mtu > ATMMTU) {
1579			error = EINVAL;
1580			break;
1581		}
1582		ifp->if_mtu = ifr->ifr_mtu;
1583		break;
1584
1585	  case SIOCSIFMEDIA:
1586	  case SIOCGIFMEDIA:
1587		error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
1588		break;
1589
1590	  case SIOCATMOPENVCC:		/* kernel internal use */
1591		error = en_open_vcc(sc, (struct atmio_openvcc *)data);
1592		break;
1593
1594	  case SIOCATMCLOSEVCC:		/* kernel internal use */
1595		error = en_close_vcc(sc, (struct atmio_closevcc *)data);
1596		break;
1597
1598	  case SIOCATMGETVCCS:	/* internal netgraph use */
1599		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1600		    MID_N_VC, sc->vccs_open, &sc->en_mtx, 0);
1601		if (vtab == NULL) {
1602			error = ENOMEM;
1603			break;
1604		}
1605		*(void **)data = vtab;
1606		break;
1607
1608	  case SIOCATMGVCCS:	/* return vcc table */
1609		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1610		    MID_N_VC, sc->vccs_open, &sc->en_mtx, 1);
1611		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
1612		    vtab->count * sizeof(vtab->vccs[0]));
1613		free(vtab, M_DEVBUF);
1614		break;
1615
1616	  default:
1617		error = EINVAL;
1618		break;
1619	}
1620	return (error);
1621}
1622
1623/*********************************************************************/
1624/*
1625 * Sysctl's
1626 */
1627
1628/*
1629 * Sysctl handler for internal statistics
1630 *
1631 * LOCK: unlocked, needed
1632 */
1633static int
1634en_sysctl_istats(SYSCTL_HANDLER_ARGS)
1635{
1636	struct en_softc *sc = arg1;
1637	uint32_t *ret;
1638	int error;
1639
1640	ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK);
1641
1642	EN_LOCK(sc);
1643	bcopy(&sc->stats, ret, sizeof(sc->stats));
1644	EN_UNLOCK(sc);
1645
1646	error = SYSCTL_OUT(req, ret, sizeof(sc->stats));
1647	free(ret, M_TEMP);
1648
1649	return (error);
1650}
1651
1652/*********************************************************************/
1653/*
1654 * Interrupts
1655 */
1656
1657/*
1658 * Transmit interrupt handler
1659 *
1660 * check for tx complete, if detected then this means that some space
1661 * has come free on the card.   we must account for it and arrange to
1662 * kick the channel to life (in case it is stalled waiting on the card).
1663 *
1664 * LOCK: locked, needed
1665 */
1666static uint32_t
1667en_intr_tx(struct en_softc *sc, uint32_t reg)
1668{
1669	uint32_t kick;
1670	uint32_t mask;
1671	uint32_t val;
1672	int chan;
1673
1674	kick = 0;		/* bitmask of channels to kick */
1675
1676	for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
1677		if (!(reg & MID_TXCHAN(chan)))
1678			continue;
1679
1680		kick = kick | mask;
1681
1682		/* current read pointer */
1683		val = en_read(sc, MIDX_READPTR(chan));
1684		/* as offset */
1685		val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
1686		if (val > sc->txslot[chan].cur)
1687			sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
1688		else
1689			sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
1690			    sc->txslot[chan].cur;
1691		DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
1692		    "buffer", chan, sc->txslot[chan].bfree));
1693	}
1694	return (kick);
1695}
1696
1697/*
1698 * TX DMA interrupt
1699 *
1700 * check for TX DMA complete, if detected then this means
1701 * that some DTQs are now free.   it also means some indma
1702 * mbufs can be freed. if we needed DTQs, kick all channels.
1703 *
1704 * LOCK: locked, needed
1705 */
1706static uint32_t
1707en_intr_tx_dma(struct en_softc *sc)
1708{
1709	uint32_t kick = 0;
1710	uint32_t val;
1711	uint32_t idx;
1712	uint32_t slot;
1713	uint32_t dtq;
1714	struct en_map *map;
1715	struct mbuf *m;
1716
1717	val = en_read(sc, MID_DMA_RDTX); 	/* chip's current location */
1718	idx = MID_DTQ_A2REG(sc->dtq_chip);	/* where we last saw chip */
1719
1720	if (sc->need_dtqs) {
1721		kick = MID_NTX_CH - 1;	/* assume power of 2, kick all! */
1722		sc->need_dtqs = 0;	/* recalculated in "kick" loop below */
1723		DBG(sc, INTR, ("cleared need DTQ condition"));
1724	}
1725
1726	while (idx != val) {
1727		sc->dtq_free++;
1728		if ((dtq = sc->dtq[idx]) != 0) {
1729			/* don't forget to zero it out when done */
1730			sc->dtq[idx] = 0;
1731			slot = EN_DQ_SLOT(dtq);
1732
1733			_IF_DEQUEUE(&sc->txslot[slot].indma, m);
1734			if (m == NULL)
1735				panic("enintr: dtqsync");
1736			map = (void *)m->m_pkthdr.rcvif;
1737			uma_zfree(sc->map_zone, map);
1738			m_freem(m);
1739
1740			sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
1741			DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
1742			    "%d", slot, EN_DQ_LEN(dtq),
1743			    sc->txslot[slot].mbsize));
1744		}
1745		EN_WRAPADD(0, MID_DTQ_N, idx, 1);
1746	}
1747	sc->dtq_chip = MID_DTQ_REG2A(val);	/* sync softc */
1748
1749	return (kick);
1750}
1751
1752/*
1753 * Service interrupt
1754 *
1755 * LOCK: locked, needed
1756 */
1757static int
1758en_intr_service(struct en_softc *sc)
1759{
1760	uint32_t chip;
1761	uint32_t vci;
1762	int need_softserv = 0;
1763	struct en_vcc *vc;
1764
1765	chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1766
1767	while (sc->hwslistp != chip) {
1768		/* fetch and remove it from hardware service list */
1769		vci = en_read(sc, sc->hwslistp);
1770		EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
1771
1772		if ((vc = sc->vccs[vci]) == NULL ||
1773		    (vc->vcc.flags & ATMIO_FLAG_NORX)) {
1774			DBG(sc, INTR, ("unexpected rx interrupt VCI %d", vci));
1775			en_write(sc, MID_VC(vci), MIDV_TRASH);  /* rx off */
1776			continue;
1777		}
1778
1779		/* remove from hwsl */
1780		en_write(sc, MID_VC(vci), vc->rxslot->mode);
1781		EN_COUNT(sc->stats.hwpull);
1782
1783		DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
1784
1785		/* add it to the software service list (if needed) */
1786		if ((vc->vflags & VCC_SWSL) == 0) {
1787			EN_COUNT(sc->stats.swadd);
1788			need_softserv = 1;
1789			vc->vflags |= VCC_SWSL;
1790			sc->swslist[sc->swsl_tail] = vci;
1791			EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
1792			sc->swsl_size++;
1793			DBG(sc, INTR, ("added VCI %d to swslist", vci));
1794		}
1795	}
1796	return (need_softserv);
1797}
1798
1799/*
1800 * Handle a receive DMA completion
1801 */
1802static void
1803en_rx_drain(struct en_softc *sc, u_int drq)
1804{
1805	struct en_rxslot *slot;
1806	struct en_vcc *vc;
1807	struct mbuf *m;
1808	struct atm_pseudohdr ah;
1809
1810	slot = &sc->rxslot[EN_DQ_SLOT(drq)];
1811
1812	m = NULL;	/* assume "JK" trash DMA */
1813	if (EN_DQ_LEN(drq) != 0) {
1814		_IF_DEQUEUE(&slot->indma, m);
1815		KASSERT(m != NULL, ("drqsync: %s: lost mbuf in slot %td!",
1816		    sc->ifp->if_xname, slot - sc->rxslot));
1817		uma_zfree(sc->map_zone, (struct en_map *)m->m_pkthdr.rcvif);
1818	}
1819	if ((vc = slot->vcc) == NULL) {
1820		/* ups */
1821		if (m != NULL)
1822			m_freem(m);
1823		return;
1824	}
1825
1826	/* do something with this mbuf */
1827	if (vc->vflags & VCC_DRAIN) {
1828		/* drain? */
1829		if (m != NULL)
1830			m_freem(m);
1831		if (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0 &&
1832		    (en_read(sc, MID_VC(vc->vcc.vci)) & MIDV_INSERVICE) == 0 &&
1833		    (vc->vflags & VCC_SWSL) == 0) {
1834			vc->vflags &= ~VCC_CLOSE_RX;
1835			if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1836				en_close_finish(sc, vc);
1837			else
1838				cv_signal(&sc->cv_close);
1839		}
1840		return;
1841	}
1842
1843	if (m != NULL) {
1844		ATM_PH_FLAGS(&ah) = vc->vcc.flags;
1845		ATM_PH_VPI(&ah) = 0;
1846		ATM_PH_SETVCI(&ah, vc->vcc.vci);
1847
1848		DBG(sc, INTR, ("rx%td: rxvci%d: atm_input, mbuf %p, len %d, "
1849		    "hand %p", slot - sc->rxslot, vc->vcc.vci, m,
1850		    EN_DQ_LEN(drq), vc->rxhand));
1851
1852		m->m_pkthdr.rcvif = sc->ifp;
1853		sc->ifp->if_ipackets++;
1854
1855		vc->ipackets++;
1856		vc->ibytes += m->m_pkthdr.len;
1857
1858#ifdef EN_DEBUG
1859		if (sc->debug & DBG_IPACKETS)
1860			en_dump_packet(sc, m);
1861#endif
1862#ifdef ENABLE_BPF
1863		BPF_MTAP(sc->ifp, m);
1864#endif
1865		EN_UNLOCK(sc);
1866		atm_input(sc->ifp, &ah, m, vc->rxhand);
1867		EN_LOCK(sc);
1868	}
1869}
1870
1871/*
1872 * check for RX DMA complete, and pass the data "upstairs"
1873 *
1874 * LOCK: locked, needed
1875 */
1876static int
1877en_intr_rx_dma(struct en_softc *sc)
1878{
1879	uint32_t val;
1880	uint32_t idx;
1881	uint32_t drq;
1882
1883	val = en_read(sc, MID_DMA_RDRX); 	/* chip's current location */
1884	idx = MID_DRQ_A2REG(sc->drq_chip);	/* where we last saw chip */
1885
1886	while (idx != val) {
1887		sc->drq_free++;
1888		if ((drq = sc->drq[idx]) != 0) {
1889			/* don't forget to zero it out when done */
1890			sc->drq[idx] = 0;
1891			en_rx_drain(sc, drq);
1892		}
1893		EN_WRAPADD(0, MID_DRQ_N, idx, 1);
1894	}
1895	sc->drq_chip = MID_DRQ_REG2A(val);	/* sync softc */
1896
1897	if (sc->need_drqs) {
1898		/* true if we had a DRQ shortage */
1899		sc->need_drqs = 0;
1900		DBG(sc, INTR, ("cleared need DRQ condition"));
1901		return (1);
1902	} else
1903		return (0);
1904}
1905
1906/*
1907 * en_mget: get an mbuf chain that can hold totlen bytes and return it
1908 * (for recv). For the actual allocation totlen is rounded up to a multiple
1909 * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
1910 *
1911 * After this call the sum of all the m_len's in the chain will be totlen.
1912 * This is called at interrupt time, so we can't wait here.
1913 *
1914 * LOCK: any, not needed
1915 */
1916static struct mbuf *
1917en_mget(struct en_softc *sc, u_int pktlen)
1918{
1919	struct mbuf *m, *tmp;
1920	u_int totlen, pad;
1921
1922	totlen = roundup(pktlen, sizeof(uint32_t));
1923	pad = totlen - pktlen;
1924
1925	/*
1926	 * First get an mbuf with header. Keep space for a couple of
1927	 * words at the begin.
1928	 */
1929	/* called from interrupt context */
1930	MGETHDR(m, M_NOWAIT, MT_DATA);
1931	if (m == NULL)
1932		return (NULL);
1933
1934	m->m_pkthdr.rcvif = NULL;
1935	m->m_pkthdr.len = pktlen;
1936	m->m_len = EN_RX1BUF;
1937	MH_ALIGN(m, EN_RX1BUF);
1938	if (m->m_len >= totlen) {
1939		m->m_len = totlen;
1940
1941	} else {
1942		totlen -= m->m_len;
1943
1944		/* called from interrupt context */
1945		tmp = m_getm(m, totlen, M_NOWAIT, MT_DATA);
1946		if (tmp == NULL) {
1947			m_free(m);
1948			return (NULL);
1949		}
1950		tmp = m->m_next;
1951		/* m_getm could do this for us */
1952		while (tmp != NULL) {
1953			tmp->m_len = min(MCLBYTES, totlen);
1954			totlen -= tmp->m_len;
1955			tmp = tmp->m_next;
1956		}
1957	}
1958
1959	return (m);
1960}
1961
1962/*
1963 * Argument for RX DMAMAP loader.
1964 */
1965struct rxarg {
1966	struct en_softc *sc;
1967	struct mbuf *m;
1968	u_int pre_skip;		/* number of bytes to skip at begin */
1969	u_int post_skip;	/* number of bytes to skip at end */
1970	struct en_vcc *vc;	/* vc we are receiving on */
1971	int wait;		/* wait for DRQ entries */
1972};
1973
1974/*
1975 * Copy the segment table to the buffer for later use. And compute the
1976 * number of dma queue entries we need.
1977 *
1978 * LOCK: locked, needed
1979 */
1980static void
1981en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
1982    bus_size_t mapsize, int error)
1983{
1984	struct rxarg *rx = uarg;
1985	struct en_softc *sc = rx->sc;
1986	struct en_rxslot *slot = rx->vc->rxslot;
1987	u_int		free;		/* number of free DRQ entries */
1988	uint32_t	cur;		/* current buffer offset */
1989	uint32_t	drq;		/* DRQ entry pointer */
1990	uint32_t	last_drq;	/* where we have written last */
1991	u_int		needalign, cnt, count, bcode;
1992	bus_addr_t	addr;
1993	bus_size_t	rest;
1994	int		i;
1995
1996	if (error != 0)
1997		return;
1998	if (nseg > EN_MAX_DMASEG)
1999		panic("too many DMA segments");
2000
2001	rx->wait = 0;
2002
2003	free = sc->drq_free;
2004	drq = sc->drq_us;
2005	cur = slot->cur;
2006
2007	last_drq = 0;
2008
2009	/*
2010	 * Local macro to add an entry to the receive DMA area. If there
2011	 * are no entries left, return. Save the byte offset of the entry
2012	 * in last_drq for later use.
2013	 */
2014#define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR)				\
2015	if (free == 0) {						\
2016		EN_COUNT(sc->stats.rxdrqout);				\
2017		rx->wait = 1;						\
2018		return;							\
2019	}								\
2020	last_drq = drq;							\
2021	en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ?		\
2022	    MID_MK_RXQ_ENI(COUNT, rx->vc->vcc.vci, 0, BCODE) :		\
2023	    MID_MK_RXQ_ADP(COUNT, rx->vc->vcc.vci, 0, BCODE));		\
2024	en_write(sc, drq + 4, ADDR);					\
2025									\
2026	EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8);			\
2027	free--;
2028
2029	/*
2030	 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
2031	 * the current buffer byte offset accordingly.
2032	 */
2033#define DO_DRQ(TYPE) do {						\
2034	rest -= cnt;							\
2035	EN_WRAPADD(slot->start, slot->stop, cur, cnt);			\
2036	DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x",	\
2037	    slot - sc->rxslot, cnt, (uintmax_t)rest, cur));		\
2038									\
2039	PUT_DRQ_ENTRY(1, bcode, count, addr);				\
2040									\
2041	addr += cnt;							\
2042    } while (0)
2043
2044	/*
2045	 * Skip the RBD at the beginning
2046	 */
2047	if (rx->pre_skip > 0) {
2048		/* update DMA address */
2049		EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
2050
2051		PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2052	}
2053
2054	for (i = 0; i < nseg; i++, segs++) {
2055		addr = segs->ds_addr;
2056		rest = segs->ds_len;
2057
2058		if (sc->is_adaptec) {
2059			/* adaptec card - simple */
2060
2061			/* advance the on-card buffer pointer */
2062			EN_WRAPADD(slot->start, slot->stop, cur, rest);
2063			DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
2064			    "(cur now 0x%x)", slot - sc->rxslot,
2065			    (uintmax_t)rest, (uintmax_t)addr, cur));
2066
2067			PUT_DRQ_ENTRY(0, 0, rest, addr);
2068
2069			continue;
2070		}
2071
2072		/*
2073		 * do we need to do a DMA op to align to the maximum
2074		 * burst? Note, that we are alway 32-bit aligned.
2075		 */
2076		if (sc->alburst &&
2077		    (needalign = (addr & sc->bestburstmask)) != 0) {
2078			/* compute number of bytes, words and code */
2079			cnt = sc->bestburstlen - needalign;
2080			if (cnt > rest)
2081				cnt = rest;
2082			count = cnt / sizeof(uint32_t);
2083			if (sc->noalbursts) {
2084				bcode = MIDDMA_WORD;
2085			} else {
2086				bcode = en_dmaplan[count].bcode;
2087				count = cnt >> en_dmaplan[count].divshift;
2088			}
2089			DO_DRQ("al_dma");
2090		}
2091
2092		/* do we need to do a max-sized burst? */
2093		if (rest >= sc->bestburstlen) {
2094			count = rest >> sc->bestburstshift;
2095			cnt = count << sc->bestburstshift;
2096			bcode = sc->bestburstcode;
2097			DO_DRQ("best_dma");
2098		}
2099
2100		/* do we need to do a cleanup burst? */
2101		if (rest != 0) {
2102			cnt = rest;
2103			count = rest / sizeof(uint32_t);
2104			if (sc->noalbursts) {
2105				bcode = MIDDMA_WORD;
2106			} else {
2107				bcode = en_dmaplan[count].bcode;
2108				count = cnt >> en_dmaplan[count].divshift;
2109			}
2110			DO_DRQ("clean_dma");
2111		}
2112	}
2113
2114	/*
2115	 * Skip stuff at the end
2116	 */
2117	if (rx->post_skip > 0) {
2118		/* update DMA address */
2119		EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
2120
2121		PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2122	}
2123
2124	/* record the end for the interrupt routine */
2125	sc->drq[MID_DRQ_A2REG(last_drq)] =
2126	    EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
2127
2128	/* set the end flag in the last descriptor */
2129	en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
2130
2131#undef PUT_DRQ_ENTRY
2132#undef DO_DRQ
2133
2134	/* commit */
2135	slot->cur = cur;
2136	sc->drq_free = free;
2137	sc->drq_us = drq;
2138
2139	/* signal to card */
2140	en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2141}
2142
2143/*
2144 * en_service: handle a service interrupt
2145 *
2146 * Q: why do we need a software service list?
2147 *
2148 * A: if we remove a VCI from the hardware list and we find that we are
2149 *    out of DRQs we must defer processing until some DRQs become free.
2150 *    so we must remember to look at this RX VCI/slot later, but we can't
2151 *    put it back on the hardware service list (since that isn't allowed).
2152 *    so we instead save it on the software service list.   it would be nice
2153 *    if we could peek at the VCI on top of the hwservice list without removing
2154 *    it, however this leads to a race condition: if we peek at it and
2155 *    decide we are done with it new data could come in before we have a
2156 *    chance to remove it from the hwslist.   by the time we get it out of
2157 *    the list the interrupt for the new data will be lost.   oops!
2158 *
2159 * LOCK: locked, needed
2160 */
2161static void
2162en_service(struct en_softc *sc)
2163{
2164	struct mbuf	*m, *lastm;
2165	struct en_map	*map;
2166	struct rxarg	rx;
2167	uint32_t	cur;
2168	uint32_t	dstart;		/* data start (as reported by card) */
2169	uint32_t	rbd;		/* receive buffer descriptor */
2170	uint32_t	pdu;		/* AAL5 trailer */
2171	int		mlen;
2172	int		error;
2173	struct en_rxslot *slot;
2174	struct en_vcc *vc;
2175
2176	rx.sc = sc;
2177
2178  next_vci:
2179	if (sc->swsl_size == 0) {
2180		DBG(sc, SERV, ("en_service done"));
2181		return;
2182	}
2183
2184	/*
2185	 * get vcc to service
2186	 */
2187	rx.vc = vc = sc->vccs[sc->swslist[sc->swsl_head]];
2188	slot = vc->rxslot;
2189	KASSERT (slot->vcc->rxslot == slot, ("en_service: rx slot/vci sync"));
2190
2191	/*
2192	 * determine our mode and if we've got any work to do
2193	 */
2194	DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
2195	    "0x%x", slot - sc->rxslot, vc->vcc.vci, slot->start,
2196	    slot->stop, slot->cur));
2197
2198  same_vci:
2199	cur = slot->cur;
2200
2201	dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(vc->vcc.vci)));
2202	dstart = (dstart * sizeof(uint32_t)) + slot->start;
2203
2204	/* check to see if there is any data at all */
2205	if (dstart == cur) {
2206		EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2207		/* remove from swslist */
2208		vc->vflags &= ~VCC_SWSL;
2209		sc->swsl_size--;
2210		DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
2211		    slot - sc->rxslot, vc->vcc.vci));
2212		goto next_vci;
2213	}
2214
2215	/*
2216	 * figure out how many bytes we need
2217	 * [mlen = # bytes to go in mbufs]
2218	 */
2219	rbd = en_read(sc, cur);
2220	if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2221		panic("en_service: id mismatch");
2222
2223	if (rbd & MID_RBD_T) {
2224		mlen = 0;		/* we've got trash */
2225		rx.pre_skip = MID_RBD_SIZE;
2226		rx.post_skip = 0;
2227		EN_COUNT(sc->stats.ttrash);
2228		DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
2229
2230	} else if (vc->vcc.aal != ATMIO_AAL_5) {
2231		/* 1 cell (ick!) */
2232		mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
2233		rx.pre_skip = MID_RBD_SIZE;
2234		rx.post_skip = 0;
2235
2236	} else {
2237		rx.pre_skip = MID_RBD_SIZE;
2238
2239		/* get PDU trailer in correct byte order */
2240		pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
2241		    MID_RBD_SIZE - MID_PDU_SIZE;
2242		if (pdu >= slot->stop)
2243			pdu -= EN_RXSZ * 1024;
2244		pdu = en_read(sc, pdu);
2245
2246		if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
2247		    MID_PDU_LEN(pdu)) {
2248			device_printf(sc->dev, "invalid AAL5 length\n");
2249			rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2250			mlen = 0;
2251			sc->ifp->if_ierrors++;
2252
2253		} else if (rbd & MID_RBD_CRCERR) {
2254			device_printf(sc->dev, "CRC error\n");
2255			rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2256			mlen = 0;
2257			sc->ifp->if_ierrors++;
2258
2259		} else {
2260			mlen = MID_PDU_LEN(pdu);
2261			rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
2262		}
2263	}
2264
2265	/*
2266	 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2267	 *
2268	 * notes:
2269	 *  1. it is possible that we've already allocated an mbuf for this pkt
2270	 *     but ran out of DRQs, in which case we saved the allocated mbuf
2271	 *     on "q".
2272	 *  2. if we save an buf in "q" we store the "cur" (pointer) in the
2273	 *     buf as an identity (that we can check later).
2274	 *  3. after this block of code, if m is still NULL then we ran out of
2275	 *     mbufs
2276	 */
2277	_IF_DEQUEUE(&slot->q, m);
2278	if (m != NULL) {
2279		if (m->m_pkthdr.csum_data != cur) {
2280			/* wasn't ours */
2281			DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
2282			    slot - sc->rxslot, m));
2283			_IF_PREPEND(&slot->q, m);
2284			m = NULL;
2285			EN_COUNT(sc->stats.rxqnotus);
2286		} else {
2287			EN_COUNT(sc->stats.rxqus);
2288			DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
2289			    slot - sc->rxslot, m));
2290		}
2291	}
2292	if (mlen == 0 && m != NULL) {
2293		/* should not happen */
2294		m_freem(m);
2295		m = NULL;
2296	}
2297
2298	if (mlen != 0 && m == NULL) {
2299		m = en_mget(sc, mlen);
2300		if (m == NULL) {
2301			rx.post_skip += mlen;
2302			mlen = 0;
2303			EN_COUNT(sc->stats.rxmbufout);
2304			DBG(sc, SERV, ("rx%td: out of mbufs",
2305			    slot - sc->rxslot));
2306		} else
2307			rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
2308
2309		DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
2310		    slot - sc->rxslot, m, mlen));
2311	}
2312
2313	DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
2314	    slot - sc->rxslot, vc->vcc.vci, m, mlen, rx.pre_skip,
2315	    rx.post_skip));
2316
2317	if (m != NULL) {
2318		/* M_NOWAIT - called from interrupt context */
2319		map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
2320		if (map == NULL) {
2321			rx.post_skip += mlen;
2322			m_freem(m);
2323			DBG(sc, SERV, ("rx%td: out of maps",
2324			    slot - sc->rxslot));
2325			goto skip;
2326		}
2327		rx.m = m;
2328		error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
2329		    en_rxdma_load, &rx, BUS_DMA_NOWAIT);
2330
2331		if (error != 0) {
2332			device_printf(sc->dev, "loading RX map failed "
2333			    "%d\n", error);
2334			uma_zfree(sc->map_zone, map);
2335			m_freem(m);
2336			rx.post_skip += mlen;
2337			goto skip;
2338
2339		}
2340		map->flags |= ENMAP_LOADED;
2341
2342		if (rx.wait) {
2343			/* out of DRQs - wait */
2344			uma_zfree(sc->map_zone, map);
2345
2346			m->m_pkthdr.csum_data = cur;
2347			_IF_ENQUEUE(&slot->q, m);
2348			EN_COUNT(sc->stats.rxdrqout);
2349
2350			sc->need_drqs = 1;	/* flag condition */
2351			return;
2352
2353		}
2354		(void)m_length(m, &lastm);
2355		lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
2356
2357		m->m_pkthdr.rcvif = (void *)map;
2358		_IF_ENQUEUE(&slot->indma, m);
2359
2360		/* get next packet in this slot */
2361		goto same_vci;
2362	}
2363  skip:
2364	/*
2365	 * Here we end if we should drop the packet from the receive buffer.
2366	 * The number of bytes to drop is in fill. We can do this with on
2367	 * JK entry. If we don't even have that one - wait.
2368	 */
2369	if (sc->drq_free == 0) {
2370		sc->need_drqs = 1;	/* flag condition */
2371		return;
2372	}
2373	rx.post_skip += rx.pre_skip;
2374	DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
2375
2376	/* advance buffer address */
2377	EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
2378
2379	/* write DRQ entry */
2380	if (sc->is_adaptec)
2381		en_write(sc, sc->drq_us,
2382		    MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
2383		    vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2384	else
2385	  	en_write(sc, sc->drq_us,
2386		    MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
2387		    vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2388	en_write(sc, sc->drq_us + 4, 0);
2389	EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
2390	sc->drq_free--;
2391
2392	/* signal to RX interrupt */
2393	sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
2394	slot->cur = cur;
2395
2396	/* signal to card */
2397	en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2398
2399	goto same_vci;
2400}
2401
2402/*
2403 * interrupt handler
2404 *
2405 * LOCK: unlocked, needed
2406 */
2407void
2408en_intr(void *arg)
2409{
2410	struct en_softc *sc = arg;
2411	uint32_t reg, kick, mask;
2412	int lcv, need_softserv;
2413
2414	EN_LOCK(sc);
2415
2416	reg = en_read(sc, MID_INTACK);
2417	DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
2418
2419	if ((reg & MID_INT_ANY) == 0) {
2420		EN_UNLOCK(sc);
2421		return;
2422	}
2423
2424	/*
2425	 * unexpected errors that need a reset
2426	 */
2427	if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
2428		device_printf(sc->dev, "unexpected interrupt=0x%b, "
2429		    "resetting\n", reg, MID_INTBITS);
2430#ifdef EN_DEBUG
2431		panic("en: unexpected error");
2432#else
2433		en_reset_ul(sc);
2434		en_init(sc);
2435#endif
2436		EN_UNLOCK(sc);
2437		return;
2438	}
2439
2440	if (reg & MID_INT_SUNI)
2441		utopia_intr(&sc->utopia);
2442
2443	kick = 0;
2444	if (reg & MID_INT_TX)
2445		kick |= en_intr_tx(sc, reg);
2446
2447	if (reg & MID_INT_DMA_TX)
2448		kick |= en_intr_tx_dma(sc);
2449
2450	/*
2451	 * kick xmit channels as needed.
2452	 */
2453	if (kick) {
2454		DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
2455		for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
2456			if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
2457				en_txdma(sc, &sc->txslot[lcv]);
2458	}
2459
2460	need_softserv = 0;
2461	if (reg & MID_INT_DMA_RX)
2462		need_softserv |= en_intr_rx_dma(sc);
2463
2464	if (reg & MID_INT_SERVICE)
2465		need_softserv |= en_intr_service(sc);
2466
2467	if (need_softserv)
2468		en_service(sc);
2469
2470	/*
2471	 * keep our stats
2472	 */
2473	if (reg & MID_INT_DMA_OVR) {
2474		EN_COUNT(sc->stats.dmaovr);
2475		DBG(sc, INTR, ("MID_INT_DMA_OVR"));
2476	}
2477	reg = en_read(sc, MID_STAT);
2478	sc->stats.otrash += MID_OTRASH(reg);
2479	sc->stats.vtrash += MID_VTRASH(reg);
2480
2481	EN_UNLOCK(sc);
2482}
2483
2484/*
2485 * Read at most n SUNI regs starting at reg into val
2486 */
2487static int
2488en_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
2489{
2490	struct en_softc *sc = ifatm->ifp->if_softc;
2491	u_int i;
2492
2493	EN_CHECKLOCK(sc);
2494	if (reg >= MID_NSUNI)
2495		return (EINVAL);
2496	if (reg + *n > MID_NSUNI)
2497		*n = MID_NSUNI - reg;
2498
2499	for (i = 0; i < *n; i++)
2500		val[i] = en_read(sc, MID_SUNIOFF + 4 * (reg + i));
2501
2502	return (0);
2503}
2504
2505/*
2506 * change the bits given by mask to them in val in register reg
2507 */
2508static int
2509en_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
2510{
2511	struct en_softc *sc = ifatm->ifp->if_softc;
2512	uint32_t regval;
2513
2514	EN_CHECKLOCK(sc);
2515	if (reg >= MID_NSUNI)
2516		return (EINVAL);
2517	regval = en_read(sc, MID_SUNIOFF + 4 * reg);
2518	regval = (regval & ~mask) | (val & mask);
2519	en_write(sc, MID_SUNIOFF + 4 * reg, regval);
2520	return (0);
2521}
2522
2523static const struct utopia_methods en_utopia_methods = {
2524	en_utopia_readregs,
2525	en_utopia_writereg
2526};
2527
2528/*********************************************************************/
2529/*
2530 * Probing the DMA brokeness of the card
2531 */
2532
2533/*
2534 * Physical address load helper function for DMA probe
2535 *
2536 * LOCK: unlocked, not needed
2537 */
2538static void
2539en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
2540{
2541	if (error == 0)
2542		*(bus_addr_t *)uarg = segs[0].ds_addr;
2543}
2544
2545/*
2546 * en_dmaprobe: helper function for en_attach.
2547 *
2548 * see how the card handles DMA by running a few DMA tests.   we need
2549 * to figure out the largest number of bytes we can DMA in one burst
2550 * ("bestburstlen"), and if the starting address for a burst needs to
2551 * be aligned on any sort of boundary or not ("alburst").
2552 *
2553 * Things turn out more complex than that, because on my (harti) brand
2554 * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
2555 * we more than 4 bytes fails (with an RX DMA timeout) for physical
2556 * addresses that end with 0xc. Therefor we search not only the largest
2557 * burst that is supported (hopefully 64) but also check what is the largerst
2558 * unaligned supported size. If that appears to be lesser than 4 words,
2559 * set the noalbursts flag. That will be set only if also alburst is set.
2560 */
2561
2562/*
2563 * en_dmaprobe_doit: do actual testing for the DMA test.
2564 * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
2565 * Return the largest one that works.
2566 *
2567 * LOCK: unlocked, not needed
2568 */
2569static int
2570en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
2571{
2572	uint8_t *dp = sp + MIDDMA_MAXBURST;
2573	bus_addr_t pdp = psp + MIDDMA_MAXBURST;
2574	int lcv, retval = 4, cnt;
2575	uint32_t reg, bcode, midvloc;
2576
2577	if (sc->en_busreset)
2578		sc->en_busreset(sc);
2579	en_write(sc, MID_RESID, 0x0);	/* reset card before touching RAM */
2580
2581	/*
2582	 * set up a 1k buffer at MID_BUFOFF
2583	 */
2584	midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
2585	    >> MIDV_LOCTOPSHFT;
2586	en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
2587	en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
2588	    | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
2589	en_write(sc, MID_DST_RP(0), 0);
2590	en_write(sc, MID_WP_ST_CNT(0), 0);
2591
2592 	/* set up sample data */
2593	for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
2594		sp[lcv] = lcv + 1;
2595
2596	/* enable DMA (only) */
2597	en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2598
2599	sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
2600	sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
2601
2602	/*
2603	 * try it now . . .  DMA it out, then DMA it back in and compare
2604	 *
2605	 * note: in order to get the dma stuff to reverse directions it wants
2606	 * the "end" flag set!   since we are not dma'ing valid data we may
2607	 * get an ident mismatch interrupt (which we will ignore).
2608	 */
2609	DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
2610	    sp, (u_long)psp, dp, (u_long)pdp));
2611	for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
2612		DBG(sc, DMA, ("test lcv=%d", lcv));
2613
2614		/* zero SRAM and dest buffer */
2615		bus_space_set_region_4(sc->en_memt, sc->en_base,
2616		    MID_BUFOFF, 0, 1024 / 4);
2617		bzero(dp, MIDDMA_MAXBURST);
2618
2619		bcode = en_sz2b(lcv);
2620
2621		/* build lcv-byte-DMA x NBURSTS */
2622		if (sc->is_adaptec)
2623			en_write(sc, sc->dtq_chip,
2624			    MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
2625		else
2626			en_write(sc, sc->dtq_chip,
2627			    MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
2628		en_write(sc, sc->dtq_chip + 4, psp);
2629		EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
2630		en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
2631
2632		cnt = 1000;
2633		while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
2634		    MID_DTQ_A2REG(sc->dtq_chip)) {
2635			DELAY(1);
2636			if (--cnt == 0) {
2637				DBG(sc, DMA, ("unexpected timeout in tx "
2638				    "DMA test\n  alignment=0x%lx, burst size=%d"
2639				    ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
2640				    (u_long)sp & 63, lcv,
2641				    en_read(sc, MID_DMA_ADDR), reg,
2642				    en_read(sc, MID_INTSTAT)));
2643				return (retval);
2644			}
2645		}
2646
2647		reg = en_read(sc, MID_INTACK);
2648		if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
2649			DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
2650			    reg));
2651			return (retval);
2652		}
2653		/* re-enable DMA (only) */
2654		en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2655
2656		/* "return to sender..."  address is known ... */
2657
2658		/* build lcv-byte-DMA x NBURSTS */
2659		if (sc->is_adaptec)
2660			en_write(sc, sc->drq_chip,
2661			    MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
2662		else
2663			en_write(sc, sc->drq_chip,
2664			    MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
2665		en_write(sc, sc->drq_chip + 4, pdp);
2666		EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
2667		en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
2668		cnt = 1000;
2669		while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
2670		    MID_DRQ_A2REG(sc->drq_chip)) {
2671			DELAY(1);
2672			cnt--;
2673			if (--cnt == 0) {
2674				DBG(sc, DMA, ("unexpected timeout in rx "
2675				    "DMA test, rdrx=%#x\n", reg));
2676				return (retval);
2677			}
2678		}
2679		reg = en_read(sc, MID_INTACK);
2680		if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
2681			DBG(sc, DMA, ("unexpected status in rx DMA "
2682			    "test: 0x%x\n", reg));
2683			return (retval);
2684		}
2685		if (bcmp(sp, dp, lcv)) {
2686			DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
2687			    "dp=%p", lcv, sp, dp));
2688			return (retval);
2689		}
2690
2691		retval = lcv;
2692	}
2693	return (retval);	/* studly 64 byte DMA present!  oh baby!! */
2694}
2695
2696/*
2697 * Find the best DMA parameters
2698 *
2699 * LOCK: unlocked, not needed
2700 */
2701static void
2702en_dmaprobe(struct en_softc *sc)
2703{
2704	bus_dma_tag_t tag;
2705	bus_dmamap_t map;
2706	int err;
2707	void *buffer;
2708	int bestalgn, lcv, try, bestnoalgn;
2709	bus_addr_t phys;
2710	uint8_t *addr;
2711
2712	sc->alburst = 0;
2713	sc->noalbursts = 0;
2714
2715	/*
2716	 * Allocate some DMA-able memory.
2717	 * We need 3 times the max burst size aligned to the max burst size.
2718	 */
2719	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), MIDDMA_MAXBURST, 0,
2720	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2721	    3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0,
2722	    NULL, NULL, &tag);
2723	if (err)
2724		panic("%s: cannot create test DMA tag %d", __func__, err);
2725
2726	err = bus_dmamem_alloc(tag, &buffer, 0, &map);
2727	if (err)
2728		panic("%s: cannot allocate test DMA memory %d", __func__, err);
2729
2730	err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
2731	    en_dmaprobe_load, &phys, BUS_DMA_NOWAIT);
2732	if (err)
2733		panic("%s: cannot load test DMA map %d", __func__, err);
2734	addr = buffer;
2735	DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
2736
2737	/*
2738	 * Now get the best burst size of the aligned case.
2739	 */
2740	bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
2741
2742	/*
2743	 * Now try unaligned.
2744	 */
2745	for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
2746		try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
2747
2748		if (try < bestnoalgn)
2749			bestnoalgn = try;
2750	}
2751
2752	if (bestnoalgn < bestalgn) {
2753		sc->alburst = 1;
2754		if (bestnoalgn < 32)
2755			sc->noalbursts = 1;
2756	}
2757
2758	sc->bestburstlen = bestalgn;
2759	sc->bestburstshift = en_log2(bestalgn);
2760	sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
2761	sc->bestburstcode = en_sz2b(bestalgn);
2762
2763	/*
2764	 * Reset the chip before freeing the buffer. It may still be trying
2765	 * to DMA.
2766	 */
2767	if (sc->en_busreset)
2768		sc->en_busreset(sc);
2769	en_write(sc, MID_RESID, 0x0);	/* reset card before touching RAM */
2770
2771	DELAY(10000);			/* may still do DMA */
2772
2773	/*
2774	 * Free the DMA stuff
2775	 */
2776	bus_dmamap_unload(tag, map);
2777	bus_dmamem_free(tag, buffer, map);
2778	bus_dma_tag_destroy(tag);
2779}
2780
2781/*********************************************************************/
2782/*
2783 * Attach/detach.
2784 */
2785
2786/*
2787 * Attach to the card.
2788 *
2789 * LOCK: unlocked, not needed (but initialized)
2790 */
2791int
2792en_attach(struct en_softc *sc)
2793{
2794	struct ifnet *ifp = sc->ifp;
2795	int sz;
2796	uint32_t reg, lcv, check, ptr, sav, midvloc;
2797
2798#ifdef EN_DEBUG
2799	sc->debug = EN_DEBUG;
2800#endif
2801
2802	/*
2803	 * Probe card to determine memory size.
2804	 *
2805	 * The stupid ENI card always reports to PCI that it needs 4MB of
2806	 * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
2807	 * addresses wrap in the RAM address space (i.e. on a 512KB card
2808	 * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
2809	 * 0x27fffc  [note that RAM starts at offset 0x200000]).
2810	 */
2811
2812	/* reset card before touching RAM */
2813	if (sc->en_busreset)
2814		sc->en_busreset(sc);
2815	en_write(sc, MID_RESID, 0x0);
2816
2817	for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
2818		en_write(sc, lcv, lcv);	/* data[address] = address */
2819		for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
2820			reg = en_read(sc, check);
2821			if (reg != check)
2822				/* found an alias! - quit */
2823				goto done_probe;
2824		}
2825	}
2826  done_probe:
2827	lcv -= MID_PROBSIZE;			/* take one step back */
2828	sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
2829
2830	/*
2831	 * determine the largest DMA burst supported
2832	 */
2833	en_dmaprobe(sc);
2834
2835	/*
2836	 * "hello world"
2837	 */
2838
2839	/* reset */
2840	if (sc->en_busreset)
2841		sc->en_busreset(sc);
2842	en_write(sc, MID_RESID, 0x0);		/* reset */
2843
2844	/* zero memory */
2845	bus_space_set_region_4(sc->en_memt, sc->en_base,
2846	    MID_RAMOFF, 0, sc->en_obmemsz / 4);
2847
2848	reg = en_read(sc, MID_RESID);
2849
2850	device_printf(sc->dev, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
2851	    "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
2852	    (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
2853	    (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
2854	    (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
2855	    (long)sc->en_obmemsz / 1024);
2856
2857	/*
2858	 * fill in common ATM interface stuff
2859	 */
2860	IFP2IFATM(sc->ifp)->mib.hw_version = (MID_VER(reg) << 16) |
2861	    (MID_MID(reg) << 8) | MID_DID(reg);
2862	if (MID_DID(reg) & 0x4)
2863		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
2864	else
2865		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
2866
2867	IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
2868	IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2869	IFP2IFATM(sc->ifp)->mib.vci_bits = MID_VCI_BITS;
2870	IFP2IFATM(sc->ifp)->mib.max_vccs = MID_N_VC;
2871	IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2872
2873	if (sc->is_adaptec) {
2874		IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ADP155P;
2875		if (sc->bestburstlen == 64 && sc->alburst == 0)
2876			device_printf(sc->dev,
2877			    "passed 64 byte DMA test\n");
2878		else
2879			device_printf(sc->dev, "FAILED DMA TEST: "
2880			    "burst=%d, alburst=%d\n", sc->bestburstlen,
2881			    sc->alburst);
2882	} else {
2883		IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ENI155P;
2884		device_printf(sc->dev, "maximum DMA burst length = %d "
2885		    "bytes%s\n", sc->bestburstlen, sc->alburst ?
2886		    sc->noalbursts ?  " (no large bursts)" : " (must align)" :
2887		    "");
2888	}
2889
2890	/*
2891	 * link into network subsystem and prepare card
2892	 */
2893	sc->ifp->if_softc = sc;
2894	ifp->if_flags = IFF_SIMPLEX;
2895	ifp->if_ioctl = en_ioctl;
2896	ifp->if_start = en_start;
2897
2898	mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
2899	    MTX_NETWORK_LOCK, MTX_DEF);
2900	cv_init(&sc->cv_close, "VC close");
2901
2902	/*
2903	 * Make the sysctl tree
2904	 */
2905	sysctl_ctx_init(&sc->sysctl_ctx);
2906
2907	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2908	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2909	    device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
2910		goto fail;
2911
2912	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2913	    OID_AUTO, "istats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0,
2914	    en_sysctl_istats, "S", "internal statistics") == NULL)
2915		goto fail;
2916
2917#ifdef EN_DEBUG
2918	if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2919	    OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
2920		goto fail;
2921#endif
2922
2923	IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2924	utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->en_mtx,
2925	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2926	    &en_utopia_methods);
2927	utopia_init_media(&sc->utopia);
2928
2929	MGET(sc->padbuf, M_WAITOK, MT_DATA);
2930	bzero(sc->padbuf->m_data, MLEN);
2931
2932	if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
2933	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2934	    EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0,
2935	    NULL, NULL, &sc->txtag))
2936		goto fail;
2937
2938	sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
2939	    en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
2940	    UMA_ZONE_ZINIT);
2941	if (sc->map_zone == NULL)
2942		goto fail;
2943	uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
2944
2945	/*
2946	 * init softc
2947	 */
2948	sc->vccs = malloc(MID_N_VC * sizeof(sc->vccs[0]),
2949	    M_DEVBUF, M_ZERO | M_WAITOK);
2950
2951	sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
2952	ptr = sav = MID_BUFOFF;
2953	ptr = roundup(ptr, EN_TXSZ * 1024);	/* align */
2954	sz = sz - (ptr - sav);
2955	if (EN_TXSZ*1024 * EN_NTX > sz) {
2956		device_printf(sc->dev, "EN_NTX/EN_TXSZ too big\n");
2957		goto fail;
2958	}
2959	for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
2960		sc->txslot[lcv].mbsize = 0;
2961		sc->txslot[lcv].start = ptr;
2962		ptr += (EN_TXSZ * 1024);
2963		sz -= (EN_TXSZ * 1024);
2964		sc->txslot[lcv].stop = ptr;
2965		sc->txslot[lcv].nref = 0;
2966		DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
2967		    sc->txslot[lcv].start, sc->txslot[lcv].stop));
2968	}
2969
2970	sav = ptr;
2971	ptr = roundup(ptr, EN_RXSZ * 1024);	/* align */
2972	sz = sz - (ptr - sav);
2973	sc->en_nrx = sz / (EN_RXSZ * 1024);
2974	if (sc->en_nrx <= 0) {
2975		device_printf(sc->dev, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
2976		goto fail;
2977	}
2978
2979	/*
2980	 * ensure that there is always one VC slot on the service list free
2981	 * so that we can tell the difference between a full and empty list.
2982	 */
2983	if (sc->en_nrx >= MID_N_VC)
2984		sc->en_nrx = MID_N_VC - 1;
2985
2986	for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
2987		sc->rxslot[lcv].vcc = NULL;
2988		midvloc = sc->rxslot[lcv].start = ptr;
2989		ptr += (EN_RXSZ * 1024);
2990		sz -= (EN_RXSZ * 1024);
2991		sc->rxslot[lcv].stop = ptr;
2992		midvloc = midvloc - MID_RAMOFF;
2993		/* mask, cvt to words */
2994		midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
2995		/* we only want the top 11 bits */
2996		midvloc = midvloc >> MIDV_LOCTOPSHFT;
2997		midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
2998		sc->rxslot[lcv].mode = midvloc |
2999		    (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
3000
3001		DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
3002		    sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
3003		    sc->rxslot[lcv].mode));
3004	}
3005
3006	device_printf(sc->dev, "%d %dKB receive buffers, %d %dKB transmit "
3007	    "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
3008	device_printf(sc->dev, "end station identifier (mac address) "
3009	    "%6D\n", IFP2IFATM(sc->ifp)->mib.esi, ":");
3010
3011	/*
3012	 * Start SUNI stuff. This will call our readregs/writeregs
3013	 * functions and these assume the lock to be held so we must get it
3014	 * here.
3015	 */
3016	EN_LOCK(sc);
3017	utopia_start(&sc->utopia);
3018	utopia_reset(&sc->utopia);
3019	EN_UNLOCK(sc);
3020
3021	/*
3022	 * final commit
3023	 */
3024	atm_ifattach(ifp);
3025
3026#ifdef ENABLE_BPF
3027	bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3028#endif
3029
3030	return (0);
3031
3032 fail:
3033	en_destroy(sc);
3034	return (-1);
3035}
3036
3037/*
3038 * Free all internal resources. No access to bus resources here.
3039 * No locking required here (interrupt is already disabled).
3040 *
3041 * LOCK: unlocked, needed (but destroyed)
3042 */
3043void
3044en_destroy(struct en_softc *sc)
3045{
3046	u_int i;
3047
3048	if (sc->utopia.state & UTP_ST_ATTACHED) {
3049		/* these assume the lock to be held */
3050		EN_LOCK(sc);
3051		utopia_stop(&sc->utopia);
3052		utopia_detach(&sc->utopia);
3053		EN_UNLOCK(sc);
3054	}
3055
3056	if (sc->vccs != NULL) {
3057		/* get rid of sticky VCCs */
3058		for (i = 0; i < MID_N_VC; i++)
3059			if (sc->vccs[i] != NULL)
3060				uma_zfree(en_vcc_zone, sc->vccs[i]);
3061		free(sc->vccs, M_DEVBUF);
3062	}
3063
3064	if (sc->padbuf != NULL)
3065		m_free(sc->padbuf);
3066
3067	/*
3068	 * Destroy the map zone before the tag (the fini function will
3069	 * destroy the DMA maps using the tag)
3070	 */
3071	if (sc->map_zone != NULL)
3072		uma_zdestroy(sc->map_zone);
3073
3074	if (sc->txtag != NULL)
3075		bus_dma_tag_destroy(sc->txtag);
3076
3077	(void)sysctl_ctx_free(&sc->sysctl_ctx);
3078
3079	cv_destroy(&sc->cv_close);
3080	mtx_destroy(&sc->en_mtx);
3081}
3082
3083/*
3084 * Module loaded/unloaded
3085 */
3086int
3087en_modevent(module_t mod __unused, int event, void *arg __unused)
3088{
3089
3090	switch (event) {
3091
3092	  case MOD_LOAD:
3093		en_vcc_zone = uma_zcreate("EN vccs", sizeof(struct en_vcc),
3094		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3095		if (en_vcc_zone == NULL)
3096			return (ENOMEM);
3097		break;
3098
3099	  case MOD_UNLOAD:
3100		uma_zdestroy(en_vcc_zone);
3101		break;
3102	}
3103	return (0);
3104}
3105
3106/*********************************************************************/
3107/*
3108 * Debugging support
3109 */
3110
3111#ifdef EN_DDBHOOK
3112/*
3113 * functions we can call from ddb
3114 */
3115
3116/*
3117 * en_dump: dump the state
3118 */
3119#define END_SWSL	0x00000040		/* swsl state */
3120#define END_DRQ		0x00000020		/* drq state */
3121#define END_DTQ		0x00000010		/* dtq state */
3122#define END_RX		0x00000008		/* rx state */
3123#define END_TX		0x00000004		/* tx state */
3124#define END_MREGS	0x00000002		/* registers */
3125#define END_STATS	0x00000001		/* dump stats */
3126
3127#define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3128
3129static void
3130en_dump_stats(const struct en_stats *s)
3131{
3132	printf("en_stats:\n");
3133	printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
3134	    s->mfixfail);
3135	printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
3136	printf("\t%d times out of TX space and stalled\n", s->txoutspace);
3137	printf("\t%d times out of DTQs\n", s->txdtqout);
3138	printf("\t%d times launched a packet\n", s->launch);
3139	printf("\t%d times pulled the hw service list\n", s->hwpull);
3140	printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
3141	printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
3142	    s->rxqnotus);
3143	printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
3144	printf("\t%d times ran out of DRQs\n", s->rxdrqout);
3145	printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
3146	printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
3147	printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
3148	printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
3149	printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
3150	printf("\t%d times out of tx maps\n", s->txnomap);
3151#ifdef NATM
3152#ifdef NATM_STAT
3153	printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3154	    natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3155#endif
3156#endif
3157}
3158
3159static void
3160en_dump_mregs(struct en_softc *sc)
3161{
3162	u_int cnt;
3163
3164	printf("mregs:\n");
3165	printf("resid = 0x%x\n", en_read(sc, MID_RESID));
3166	printf("interrupt status = 0x%b\n",
3167	    (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
3168	printf("interrupt enable = 0x%b\n",
3169	     (int)en_read(sc, MID_INTENA), MID_INTBITS);
3170	printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
3171	printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
3172	     MID_SL_A2REG(sc->hwslistp));
3173	printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
3174	printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3175	    MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
3176	    MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
3177	printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3178	    MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
3179	    MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
3180
3181	printf("  unusal txspeeds:");
3182	for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3183		if (sc->vccs[cnt]->txspeed)
3184			printf(" vci%d=0x%x", cnt, sc->vccs[cnt]->txspeed);
3185	printf("\n");
3186
3187	printf("  rxvc slot mappings:");
3188	for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3189		if (sc->vccs[cnt]->rxslot != NULL)
3190			printf("  %d->%td", cnt,
3191			    sc->vccs[cnt]->rxslot - sc->rxslot);
3192	printf("\n");
3193}
3194
3195static void
3196en_dump_tx(struct en_softc *sc)
3197{
3198	u_int slot;
3199
3200	printf("tx:\n");
3201	for (slot = 0 ; slot < EN_NTX; slot++) {
3202		printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d]  ", slot,
3203		    sc->txslot[slot].start, sc->txslot[slot].stop,
3204		    sc->txslot[slot].cur,
3205		    (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
3206		printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3207		    sc->txslot[slot].bfree);
3208		printf("txhw: base_address=0x%x, size=%u, read=%u, "
3209		    "descstart=%u\n",
3210		    (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
3211		    MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
3212		    en_read(sc, MIDX_READPTR(slot)),
3213		    en_read(sc, MIDX_DESCSTART(slot)));
3214	}
3215}
3216
3217static void
3218en_dump_rx(struct en_softc *sc)
3219{
3220	struct en_rxslot *slot;
3221
3222	printf("  recv slots:\n");
3223	for (slot = sc->rxslot ; slot < &sc->rxslot[sc->en_nrx]; slot++) {
3224		printf("rx%td: start/stop/cur=0x%x/0x%x/0x%x mode=0x%x ",
3225		    slot - sc->rxslot, slot->start, slot->stop, slot->cur,
3226		    slot->mode);
3227		if (slot->vcc != NULL) {
3228			printf("vci=%u\n", slot->vcc->vcc.vci);
3229			printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3230			    en_read(sc, MID_VC(slot->vcc->vcc.vci)),
3231			    en_read(sc, MID_DST_RP(slot->vcc->vcc.vci)),
3232			    en_read(sc, MID_WP_ST_CNT(slot->vcc->vcc.vci)));
3233		}
3234	}
3235}
3236
3237/*
3238 * This is only correct for non-adaptec adapters
3239 */
3240static void
3241en_dump_dtqs(struct en_softc *sc)
3242{
3243	uint32_t ptr, reg;
3244
3245	printf("  dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
3246	    sc->dtq_free);
3247	ptr = sc->dtq_chip;
3248	while (ptr != sc->dtq_us) {
3249		reg = en_read(sc, ptr);
3250		printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3251		    sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
3252		    MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
3253		    MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3254		EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3255	}
3256}
3257
3258static void
3259en_dump_drqs(struct en_softc *sc)
3260{
3261	uint32_t ptr, reg;
3262
3263	printf("  drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
3264	    sc->drq_free);
3265	ptr = sc->drq_chip;
3266	while (ptr != sc->drq_us) {
3267		reg = en_read(sc, ptr);
3268		printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3269		    sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
3270		    MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
3271		    MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3272		EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3273	}
3274}
3275
3276/* Do not staticize - meant for calling from DDB! */
3277int
3278en_dump(int unit, int level)
3279{
3280	struct en_softc *sc;
3281	int lcv, cnt;
3282	devclass_t dc;
3283	int maxunit;
3284
3285	dc = devclass_find("en");
3286	if (dc == NULL) {
3287		printf("%s: can't find devclass!\n", __func__);
3288		return (0);
3289	}
3290	maxunit = devclass_get_maxunit(dc);
3291	for (lcv = 0 ; lcv < maxunit ; lcv++) {
3292		sc = devclass_get_softc(dc, lcv);
3293		if (sc == NULL)
3294			continue;
3295		if (unit != -1 && unit != lcv)
3296			continue;
3297
3298		device_printf(sc->dev, "dumping device at level 0x%b\n",
3299		    level, END_BITS);
3300
3301		if (sc->dtq_us == 0) {
3302			printf("<hasn't been en_init'd yet>\n");
3303			continue;
3304		}
3305
3306		if (level & END_STATS)
3307			en_dump_stats(&sc->stats);
3308		if (level & END_MREGS)
3309			en_dump_mregs(sc);
3310		if (level & END_TX)
3311			en_dump_tx(sc);
3312		if (level & END_RX)
3313			en_dump_rx(sc);
3314		if (level & END_DTQ)
3315			en_dump_dtqs(sc);
3316		if (level & END_DRQ)
3317			en_dump_drqs(sc);
3318
3319		if (level & END_SWSL) {
3320			printf(" swslist [size=%d]: ", sc->swsl_size);
3321			for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3322			    cnt = (cnt + 1) % MID_SL_N)
3323				printf("0x%x ", sc->swslist[cnt]);
3324			printf("\n");
3325		}
3326	}
3327	return (0);
3328}
3329
3330/*
3331 * en_dumpmem: dump the memory
3332 *
3333 * Do not staticize - meant for calling from DDB!
3334 */
3335int
3336en_dumpmem(int unit, int addr, int len)
3337{
3338	struct en_softc *sc;
3339	uint32_t reg;
3340	devclass_t dc;
3341
3342	dc = devclass_find("en");
3343	if (dc == NULL) {
3344		printf("%s: can't find devclass\n", __func__);
3345		return (0);
3346	}
3347	sc = devclass_get_softc(dc, unit);
3348	if (sc == NULL) {
3349		printf("%s: invalid unit number: %d\n", __func__, unit);
3350		return (0);
3351	}
3352
3353	addr = addr & ~3;
3354	if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
3355		printf("invalid addr/len number: %d, %d\n", addr, len);
3356		return (0);
3357	}
3358	printf("dumping %d words starting at offset 0x%x\n", len, addr);
3359	while (len--) {
3360		reg = en_read(sc, addr);
3361		printf("mem[0x%x] = 0x%x\n", addr, reg);
3362		addr += 4;
3363	}
3364	return (0);
3365}
3366#endif
3367