Deleted Added
sdiff udiff text old ( 114201 ) new ( 114739 )
full compact
1/* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2/* (sync'd to midway.c 1.68) */
3
4/*
5 *
6 * Copyright (c) 1996 Charles D. Cranor and Washington University.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Charles D. Cranor and
20 * Washington University.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: head/sys/dev/en/midway.c 114201 2003-04-29 08:07:44Z harti $
36 */
37
38/*
39 *
40 * m i d w a y . c e n i 1 5 5 d r i v e r
41 *
42 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
43 * started: spring, 1996 (written from scratch).
44 *
45 * notes from the author:
46 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
47 * ENI driver was especially useful in figuring out how this card works.
48 * I would also like to thank Werner for promptly answering email and being
49 * generally helpful.
50 */
51
52#define EN_DIAG
53#define EN_DDBHOOK 1 /* compile in ddb functions */
54
55/*
56 * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
57 * appears to be broken. it works just fine if there is no load... however
58 * when the card is loaded the data get corrupted. to see this, one only
59 * has to use "telnet" over ATM. do the following command in "telnet":
60 * cat /usr/share/misc/termcap
61 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
62 * use of the byte aligner). watch "netstat -s" for checksum errors.
63 *
64 * I further tested this by adding a function that compared the transmit
65 * data on the card's SRAM with the data in the mbuf chain _after_ the
66 * "transmit DMA complete" interrupt. using the "telnet" test I got data
67 * mismatches where the byte-aligned data should have been. using ddb
68 * and en_dumpmem() I verified that the DTQs fed into the card were
69 * absolutely correct. thus, we are forced to concluded that the ENI
70 * hardware is buggy. note that the Adaptec version of the card works
71 * just fine with byte DMA.
72 *
73 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
74 * card.
75 */
76
77#if defined(DIAGNOSTIC) && !defined(EN_DIAG)
78#define EN_DIAG /* link in with master DIAG option */
79#endif
80
81#define EN_COUNT(X) (X)++
82
83#ifdef EN_DEBUG
84
85#undef EN_DDBHOOK
86#define EN_DDBHOOK 1
87
88/*
89 * This macro removes almost all the EN_DEBUG conditionals in the code that make
90 * to code a good deal less readable.
91 */
92#define DBG(SC, FL, PRINT) do { \
93 if ((SC)->debug & DBG_##FL) { \
94 if_printf(&(SC)->enif, "%s: "#FL": ", __func__); \
95 printf PRINT; \
96 printf("\n"); \
97 } \
98 } while (0)
99
100enum {
101 DBG_INIT = 0x0001, /* debug attach/detach */
102 DBG_TX = 0x0002, /* debug transmitting */
103 DBG_SERV = 0x0004, /* debug service interrupts */
104 DBG_IOCTL = 0x0008, /* debug ioctls */
105 DBG_VC = 0x0010, /* debug VC handling */
106 DBG_INTR = 0x0020, /* debug interrupts */
107 DBG_DMA = 0x0040, /* debug DMA probing */
108 DBG_IPACKETS = 0x0080, /* print input packets */
109 DBG_REG = 0x0100, /* print all register access */
110 DBG_LOCK = 0x0200, /* debug locking */
111};
112
113#else /* EN_DEBUG */
114
115#define DBG(SC, FL, PRINT) do { } while (0)
116
117#endif /* EN_DEBUG */
118
119#include "opt_inet.h"
120#include "opt_natm.h"
121#include "opt_ddb.h"
122
123#ifdef DDB
124#undef EN_DDBHOOK
125#define EN_DDBHOOK 1
126#endif
127
128#include <sys/param.h>
129#include <sys/systm.h>
130#include <sys/queue.h>
131#include <sys/sockio.h>
132#include <sys/socket.h>
133#include <sys/mbuf.h>
134#include <sys/endian.h>
135#include <sys/sbuf.h>
136#include <sys/stdint.h>
137#include <vm/uma.h>
138
139#include <net/if.h>
140#include <net/if_atm.h>
141
142#if defined(INET) || defined(INET6)
143#include <netinet/in.h>
144#include <netinet/if_atm.h>
145#endif
146
147#ifdef NATM
148#include <netnatm/natm.h>
149#endif
150
151#include <sys/bus.h>
152#include <machine/bus.h>
153#include <sys/rman.h>
154#include <sys/module.h>
155#include <sys/sysctl.h>
156#include <sys/malloc.h>
157#include <machine/resource.h>
158#include <dev/en/midwayreg.h>
159#include <dev/en/midwayvar.h>
160
161#include <net/bpf.h>
162
163/*
164 * params
165 */
166#ifndef EN_TXHIWAT
167#define EN_TXHIWAT (64 * 1024) /* max 64 KB waiting to be DMAd out */
168#endif
169
170#define RX_NONE 0xffff /* recv VC not in use */
171
172#define ENOTHER_FREE 0x01 /* free rxslot */
173#define ENOTHER_DRAIN 0x02 /* almost free (drain DRQ dma) */
174#define ENOTHER_SWSL 0x08 /* in software service list */
175
176SYSCTL_DECL(_hw_atm);
177
178/*
179 * dma tables
180 *
181 * The plan is indexed by the number of words to transfer.
182 * The maximum index is 15 for 60 words.
183 */
184struct en_dmatab {
185 uint8_t bcode; /* code */
186 uint8_t divshift; /* byte divisor */
187};
188
189static const struct en_dmatab en_dmaplan[] = {
190 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
191 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
192 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
193 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
194 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
195 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
196 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
197 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
198 { MIDDMA_16WORD,6}, /* 16 */
199};
200
201/*
202 * prototypes
203 */
204#ifdef EN_DDBHOOK
205int en_dump(int unit, int level);
206int en_dumpmem(int,int,int);
207#endif
208
209#define EN_LOCK(SC) do { \
210 DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__)); \
211 mtx_lock(&sc->en_mtx); \
212 } while (0)
213#define EN_UNLOCK(SC) do { \
214 DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__)); \
215 mtx_unlock(&sc->en_mtx); \
216 } while (0)
217
218/*
219 * While a transmit mbuf is waiting to get transmit DMA resources we
220 * need to keep some information with it. We don't want to allocate
221 * additional memory for this so we stuff it into free fields in the
222 * mbuf packet header. Neither the checksum fields nor the rcvif field are used
223 * so use these.
224 */
225#define TX_AAL5 0x1 /* transmit AAL5 PDU */
226#define TX_HAS_TBD 0x2 /* TBD did fit into mbuf */
227#define TX_HAS_PAD 0x4 /* padding did fit into mbuf */
228#define TX_HAS_PDU 0x8 /* PDU trailer did fit into mbuf */
229
230#define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
231 (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS); \
232 (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) | \
233 ((PAD & 0x3f) << 16); \
234 (M)->m_pkthdr.rcvif = (void *)(MAP); \
235 } while (0)
236
237#define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
238 (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1); \
239 (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf; \
240 (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff; \
241 (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f; \
242 (MAP) = (void *)((M)->m_pkthdr.rcvif); \
243 } while (0)
244
245
246#define EN_WRAPADD(START, STOP, CUR, VAL) do { \
247 (CUR) = (CUR) + (VAL); \
248 if ((CUR) >= (STOP)) \
249 (CUR) = (START) + ((CUR) - (STOP)); \
250 } while (0)
251
252#define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
253
254#define SETQ_END(SC, VAL) ((SC)->is_adaptec ? \
255 ((VAL) | (MID_DMA_END >> 4)) : \
256 ((VAL) | (MID_DMA_END)))
257
258/*
259 * The dtq and drq members are set for each END entry in the corresponding
260 * card queue entry. It is used to find out, when a buffer has been
261 * finished DMAing and can be freed.
262 *
263 * We store sc->dtq and sc->drq data in the following format...
264 * the 0x80000 ensures we != 0
265 */
266#define EN_DQ_MK(SLOT, LEN) (((SLOT) << 20) | (LEN) | (0x80000))
267#define EN_DQ_SLOT(X) ((X) >> 20)
268#define EN_DQ_LEN(X) ((X) & 0x3ffff)
269
270/***********************************************************************/
271
272/*
273 * en_read{x}: read a word from the card. These are the only functions
274 * that read from the card.
275 */
276static __inline uint32_t
277en_readx(struct en_softc *sc, uint32_t r)
278{
279 uint32_t v;
280
281#ifdef EN_DIAG
282 if (r > MID_MAXOFF || (r % 4))
283 panic("en_read out of range, r=0x%x", r);
284#endif
285 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
286 return (v);
287}
288
289static __inline uint32_t
290en_read(struct en_softc *sc, uint32_t r)
291{
292 uint32_t v;
293
294#ifdef EN_DIAG
295 if (r > MID_MAXOFF || (r % 4))
296 panic("en_read out of range, r=0x%x", r);
297#endif
298 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
299 DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
300 return (v);
301}
302
303/*
304 * en_write: write a word to the card. This is the only function that
305 * writes to the card.
306 */
307static __inline void
308en_write(struct en_softc *sc, uint32_t r, uint32_t v)
309{
310#ifdef EN_DIAG
311 if (r > MID_MAXOFF || (r % 4))
312 panic("en_write out of range, r=0x%x", r);
313#endif
314 DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
315 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
316}
317
318/*
319 * en_k2sz: convert KBytes to a size parameter (a log2)
320 */
321static __inline int
322en_k2sz(int k)
323{
324 switch(k) {
325 case 1: return (0);
326 case 2: return (1);
327 case 4: return (2);
328 case 8: return (3);
329 case 16: return (4);
330 case 32: return (5);
331 case 64: return (6);
332 case 128: return (7);
333 default:
334 panic("en_k2sz");
335 }
336 return (0);
337}
338#define en_log2(X) en_k2sz(X)
339
340/*
341 * en_b2sz: convert a DMA burst code to its byte size
342 */
343static __inline int
344en_b2sz(int b)
345{
346 switch (b) {
347 case MIDDMA_WORD: return (1*4);
348 case MIDDMA_2WMAYBE:
349 case MIDDMA_2WORD: return (2*4);
350 case MIDDMA_4WMAYBE:
351 case MIDDMA_4WORD: return (4*4);
352 case MIDDMA_8WMAYBE:
353 case MIDDMA_8WORD: return (8*4);
354 case MIDDMA_16WMAYBE:
355 case MIDDMA_16WORD: return (16*4);
356 default:
357 panic("en_b2sz");
358 }
359 return (0);
360}
361
362/*
363 * en_sz2b: convert a burst size (bytes) to DMA burst code
364 */
365static __inline int
366en_sz2b(int sz)
367{
368 switch (sz) {
369 case 1*4: return (MIDDMA_WORD);
370 case 2*4: return (MIDDMA_2WORD);
371 case 4*4: return (MIDDMA_4WORD);
372 case 8*4: return (MIDDMA_8WORD);
373 case 16*4: return (MIDDMA_16WORD);
374 default:
375 panic("en_sz2b");
376 }
377 return(0);
378}
379
380#ifdef EN_DEBUG
381/*
382 * Dump a packet
383 */
384static void
385en_dump_packet(struct en_softc *sc, struct mbuf *m)
386{
387 int plen = m->m_pkthdr.len;
388 u_int pos = 0;
389 u_int totlen = 0;
390 int len;
391 u_char *ptr;
392
393 if_printf(&sc->enif, "packet len=%d", plen);
394 while (m != NULL) {
395 totlen += m->m_len;
396 ptr = mtod(m, u_char *);
397 for (len = 0; len < m->m_len; len++, pos++, ptr++) {
398 if (pos % 16 == 8)
399 printf(" ");
400 if (pos % 16 == 0)
401 printf("\n");
402 printf(" %02x", *ptr);
403 }
404 m = m->m_next;
405 }
406 printf("\n");
407 if (totlen != plen);
408 printf("sum of m_len=%u\n", totlen);
409}
410#endif
411
412/*********************************************************************/
413/*
414 * DMA maps
415 */
416
417/*
418 * Map constructor for a MAP.
419 *
420 * This is called each time when a map is allocated
421 * from the pool and about to be returned to the user. Here we actually
422 * allocate the map if there isn't one. The problem is that we may fail
423 * to allocate the DMA map yet have no means to signal this error. Therefor
424 * when allocating a map, the call must check that there is a map. An
425 * additional problem is, that i386 maps will be NULL, yet are ok and must
426 * be freed so let's use a flag to signal allocation.
427 *
428 * Caveat: we have no way to know that we are called from an interrupt context
429 * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
430 * its allocations.
431 *
432 * LOCK: any, not needed
433 */
434static void
435en_map_ctor(void *mem, int size, void *arg)
436{
437 struct en_softc *sc = arg;
438 struct en_map *map = mem;
439 int err;
440
441 if (map->sc == NULL)
442 map->sc = sc;
443
444 if (!(map->flags & ENMAP_ALLOC)) {
445 err = bus_dmamap_create(sc->txtag, 0, &map->map);
446 if (err != 0)
447 if_printf(&sc->enif, "cannot create DMA map %d\n", err);
448 else
449 map->flags |= ENMAP_ALLOC;
450 }
451 map->flags &= ~ENMAP_LOADED;
452}
453
454/*
455 * Map destructor.
456 *
457 * Called when a map is disposed into the zone. If the map is loaded, unload
458 * it.
459 *
460 * LOCK: any, not needed
461 */
462static void
463en_map_dtor(void *mem, int size, void *arg)
464{
465 struct en_map *map = mem;
466
467 if (map->flags & ENMAP_LOADED) {
468 bus_dmamap_unload(map->sc->txtag, map->map);
469 map->flags &= ~ENMAP_LOADED;
470 }
471}
472
473/*
474 * Map finializer.
475 *
476 * This is called each time a map is returned from the zone to the system.
477 * Get rid of the dmamap here.
478 *
479 * LOCK: any, not needed
480 */
481static void
482en_map_fini(void *mem, int size)
483{
484 struct en_map *map = mem;
485
486 if (map->flags & ENMAP_ALLOC)
487 bus_dmamap_destroy(map->sc->txtag, map->map);
488}
489
490/*********************************************************************/
491/*
492 * Transmission
493 */
494
495/*
496 * Argument structure to load a transmit DMA map
497 */
498struct txarg {
499 struct en_softc *sc;
500 struct mbuf *m;
501 u_int vci;
502 u_int chan; /* transmit channel */
503 u_int datalen; /* length of user data */
504 u_int flags;
505 u_int wait; /* return: out of resources */
506};
507
508/*
509 * TX DMA map loader helper. This function is the callback when the map
510 * is loaded. It should fill the DMA segment descriptors into the hardware.
511 *
512 * LOCK: locked, needed
513 */
514static void
515en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
516 int error)
517{
518 struct txarg *tx = uarg;
519 struct en_softc *sc = tx->sc;
520 struct en_txslot *slot = &sc->txslot[tx->chan];
521 uint32_t cur; /* on-card buffer position (bytes offset) */
522 uint32_t dtq; /* on-card queue position (byte offset) */
523 uint32_t last_dtq; /* last DTQ we have written */
524 uint32_t tmp;
525 u_int free; /* free queue entries on card */
526 u_int needalign, cnt;
527 bus_size_t rest; /* remaining bytes in current segment */
528 bus_addr_t addr;
529 bus_dma_segment_t *s;
530 uint32_t count, bcode;
531 int i;
532
533 if (error != 0)
534 return;
535
536 cur = slot->cur;
537 dtq = sc->dtq_us;
538 free = sc->dtq_free;
539
540 last_dtq = 0; /* make gcc happy */
541
542 /*
543 * Local macro to add an entry to the transmit DMA area. If there
544 * are no entries left, return. Save the byte offset of the entry
545 * in last_dtq for later use.
546 */
547#define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
548 if (free == 0) { \
549 EN_COUNT(sc->stats.txdtqout); \
550 tx->wait = 1; \
551 return; \
552 } \
553 last_dtq = dtq; \
554 en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ? \
555 MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) : \
556 MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE)); \
557 en_write(sc, dtq + 4, ADDR); \
558 \
559 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8); \
560 free--;
561
562 /*
563 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
564 * the current buffer byte offset accordingly.
565 */
566#define DO_DTQ(TYPE) do { \
567 rest -= cnt; \
568 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
569 DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x", \
570 tx->chan, cnt, (uintmax_t)rest, cur)); \
571 \
572 PUT_DTQ_ENTRY(1, bcode, count, addr); \
573 \
574 addr += cnt; \
575 } while (0)
576
577 if (!(tx->flags & TX_HAS_TBD)) {
578 /*
579 * Prepend the TBD - it did not fit into the first mbuf
580 */
581 tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
582 MID_TBD_AAL5 : MID_TBD_NOAAL5,
583 sc->txspeed[tx->vci],
584 tx->m->m_pkthdr.len / MID_ATMDATASZ);
585 en_write(sc, cur, tmp);
586 EN_WRAPADD(slot->start, slot->stop, cur, 4);
587
588 tmp = MID_TBD_MK2(tx->vci, 0, 0);
589 en_write(sc, cur, tmp);
590 EN_WRAPADD(slot->start, slot->stop, cur, 4);
591
592 /* update DMA address */
593 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
594 }
595
596 for (i = 0, s = segs; i < nseg; i++, s++) {
597 rest = s->ds_len;
598 addr = s->ds_addr;
599
600 if (sc->is_adaptec) {
601 /* adaptec card - simple */
602
603 /* advance the on-card buffer pointer */
604 EN_WRAPADD(slot->start, slot->stop, cur, rest);
605 DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
606 tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
607
608 PUT_DTQ_ENTRY(0, 0, rest, addr);
609
610 continue;
611 }
612
613 /*
614 * do we need to do a DMA op to align to the maximum
615 * burst? Note, that we are alway 32-bit aligned.
616 */
617 if (sc->alburst &&
618 (needalign = (addr & sc->bestburstmask)) != 0) {
619 /* compute number of bytes, words and code */
620 cnt = sc->bestburstlen - needalign;
621 if (cnt > rest)
622 cnt = rest;
623 count = cnt / sizeof(uint32_t);
624 if (sc->noalbursts) {
625 bcode = MIDDMA_WORD;
626 } else {
627 bcode = en_dmaplan[count].bcode;
628 count = cnt >> en_dmaplan[count].divshift;
629 }
630 DO_DTQ("al_dma");
631 }
632
633 /* do we need to do a max-sized burst? */
634 if (rest >= sc->bestburstlen) {
635 count = rest >> sc->bestburstshift;
636 cnt = count << sc->bestburstshift;
637 bcode = sc->bestburstcode;
638 DO_DTQ("best_dma");
639 }
640
641 /* do we need to do a cleanup burst? */
642 if (rest != 0) {
643 cnt = rest;
644 count = rest / sizeof(uint32_t);
645 if (sc->noalbursts) {
646 bcode = MIDDMA_WORD;
647 } else {
648 bcode = en_dmaplan[count].bcode;
649 count = cnt >> en_dmaplan[count].divshift;
650 }
651 DO_DTQ("clean_dma");
652 }
653 }
654
655 KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
656
657 if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
658 /*
659 * Append the AAL5 PDU trailer
660 */
661 tmp = MID_PDU_MK1(0, 0, tx->datalen);
662 en_write(sc, cur, tmp);
663 EN_WRAPADD(slot->start, slot->stop, cur, 4);
664
665 en_write(sc, cur, 0);
666 EN_WRAPADD(slot->start, slot->stop, cur, 4);
667
668 /* update DMA address */
669 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
670 }
671
672 /* record the end for the interrupt routine */
673 sc->dtq[MID_DTQ_A2REG(last_dtq)] =
674 EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
675
676 /* set the end flag in the last descriptor */
677 en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
678
679#undef PUT_DTQ_ENTRY
680#undef DO_DTQ
681
682 /* commit */
683 slot->cur = cur;
684 sc->dtq_free = free;
685 sc->dtq_us = dtq;
686
687 /* tell card */
688 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
689}
690
691/*
692 * en_txdma: start transmit DMA on the given channel, if possible
693 *
694 * This is called from two places: when we got new packets from the upper
695 * layer or when we found that buffer space has freed up during interrupt
696 * processing.
697 *
698 * LOCK: locked, needed
699 */
700static void
701en_txdma(struct en_softc *sc, struct en_txslot *slot)
702{
703 struct en_map *map;
704 struct mbuf *lastm;
705 struct txarg tx;
706 u_int pad;
707 int error;
708
709 DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
710 again:
711 bzero(&tx, sizeof(tx));
712 tx.chan = slot - sc->txslot;
713 tx.sc = sc;
714
715 /*
716 * get an mbuf waiting for DMA
717 */
718 _IF_DEQUEUE(&slot->q, tx.m);
719 if (tx.m == NULL) {
720 DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
721 return;
722 }
723 MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
724
725 /*
726 * note: don't use the entire buffer space. if WRTX becomes equal
727 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
728 */
729 if (tx.m->m_pkthdr.len >= slot->bfree) {
730 EN_COUNT(sc->stats.txoutspace);
731 DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
732 goto waitres;
733 }
734
735 lastm = NULL;
736 if (!(tx.flags & TX_HAS_PAD)) {
737 if (pad != 0) {
738 /* Append the padding buffer */
739 (void)m_length(tx.m, &lastm);
740 lastm->m_next = sc->padbuf;
741 sc->padbuf->m_len = pad;
742 }
743 tx.flags |= TX_HAS_PAD;
744 }
745
746 /*
747 * Try to load that map
748 */
749 error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
750 en_txdma_load, &tx, 0);
751
752 if (lastm != NULL)
753 lastm->m_next = NULL;
754
755 if (error != 0) {
756 if_printf(&sc->enif, "loading TX map failed %d\n", error);
757 goto dequeue_drop;
758 }
759 map->flags |= ENMAP_LOADED;
760 if (tx.wait) {
761 /* probably not enough space */
762 bus_dmamap_unload(map->sc->txtag, map->map);
763 map->flags &= ~ENMAP_LOADED;
764
765 sc->need_dtqs = 1;
766 DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
767 goto waitres;
768 }
769
770 EN_COUNT(sc->stats.launch);
771 sc->enif.if_opackets++;
772
773#ifdef ENABLE_BPF
774 if (sc->enif.if_bpf != NULL) {
775 /*
776 * adjust the top of the mbuf to skip the TBD if present
777 * before passing the packet to bpf.
778 * Also remove padding and the PDU trailer. Assume both of
779 * them to be in the same mbuf. pktlen, m_len and m_data
780 * are not needed anymore so we can change them.
781 */
782 if (tx.flags & TX_HAS_TBD) {
783 tx.m->m_data += MID_TBD_SIZE;
784 tx.m->m_len -= MID_TBD_SIZE;
785 }
786 tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
787 if (tx.m->m_pkthdr.len > tx.datalen) {
788 lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
789 tx.m->m_pkthdr.len = tx.datalen;
790 }
791
792 BPF_MTAP(&sc->enif, tx.m);
793 }
794#endif
795
796 /*
797 * do some housekeeping and get the next packet
798 */
799 slot->bfree -= tx.m->m_pkthdr.len;
800 _IF_ENQUEUE(&slot->indma, tx.m);
801
802 goto again;
803
804 /*
805 * error handling. This is jumped to when we just want to drop
806 * the packet. Must be unlocked here.
807 */
808 dequeue_drop:
809 if (map != NULL)
810 uma_zfree(sc->map_zone, map);
811
812 slot->mbsize -= tx.m->m_pkthdr.len;
813
814 m_freem(tx.m);
815
816 goto again;
817
818 waitres:
819 _IF_PREPEND(&slot->q, tx.m);
820}
821
822/*
823 * Create a copy of a single mbuf. It can have either internal or
824 * external data, it may have a packet header. External data is really
825 * copied, so the new buffer is writeable.
826 *
827 * LOCK: any, not needed
828 */
829static struct mbuf *
830copy_mbuf(struct mbuf *m)
831{
832 struct mbuf *new;
833
834 MGET(new, M_TRYWAIT, MT_DATA);
835 if (new == NULL)
836 return (NULL);
837
838 if (m->m_flags & M_PKTHDR) {
839 M_MOVE_PKTHDR(new, m);
840 if (m->m_len > MHLEN) {
841 MCLGET(new, M_TRYWAIT);
842 if ((m->m_flags & M_EXT) == 0) {
843 m_free(new);
844 return (NULL);
845 }
846 }
847 } else {
848 if (m->m_len > MLEN) {
849 MCLGET(new, M_TRYWAIT);
850 if ((m->m_flags & M_EXT) == 0) {
851 m_free(new);
852 return (NULL);
853 }
854 }
855 }
856
857 bcopy(m->m_data, new->m_data, m->m_len);
858 new->m_len = m->m_len;
859 new->m_flags &= ~M_RDONLY;
860
861 return (new);
862}
863
864/*
865 * This function is called when we have an ENI adapter. It fixes the
866 * mbuf chain, so that all addresses and lengths are 4 byte aligned.
867 * The overall length is already padded to multiple of cells plus the
868 * TBD so this must always succeed. The routine can fail, when it
869 * needs to copy an mbuf (this may happen if an mbuf is readonly).
870 *
871 * We assume here, that aligning the virtual addresses to 4 bytes also
872 * aligns the physical addresses.
873 *
874 * LOCK: locked, needed
875 */
876static struct mbuf *
877en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
878{
879 struct mbuf **prev = &m0;
880 struct mbuf *m = m0;
881 struct mbuf *new;
882 u_char *d;
883 int off;
884
885 while (m != NULL) {
886 d = mtod(m, u_char *);
887 if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
888 EN_COUNT(sc->stats.mfixaddr);
889 if (M_WRITABLE(m)) {
890 bcopy(d, d - off, m->m_len);
891 m->m_data -= off;
892 } else {
893 if ((new = copy_mbuf(m)) == NULL) {
894 EN_COUNT(sc->stats.mfixfail);
895 m_freem(m0);
896 return (NULL);
897 }
898 new->m_next = m_free(m);
899 *prev = m = new;
900 }
901 }
902
903 if ((off = m->m_len % sizeof(uint32_t)) != 0) {
904 EN_COUNT(sc->stats.mfixlen);
905 if (!M_WRITABLE(m)) {
906 if ((new = copy_mbuf(m)) == NULL) {
907 EN_COUNT(sc->stats.mfixfail);
908 m_freem(m0);
909 return (NULL);
910 }
911 new->m_next = m_free(m);
912 *prev = m = new;
913 }
914 d = mtod(m, u_char *) + m->m_len;
915 off = 4 - off;
916 while (off) {
917 while (m->m_next && m->m_next->m_len == 0)
918 m->m_next = m_free(m->m_next);
919
920 if (m->m_next == NULL) {
921 *d++ = 0;
922 KASSERT(*pad > 0, ("no padding space"));
923 (*pad)--;
924 } else {
925 *d++ = *mtod(m->m_next, u_char *);
926 m->m_next->m_len--;
927 m->m_next->m_data++;
928 }
929 m->m_len++;
930 off--;
931 }
932 }
933
934 prev = &m->m_next;
935 m = m->m_next;
936 }
937
938 return (m0);
939}
940
941/*
942 * en_start: start transmitting the next packet that needs to go out
943 * if there is one. We take off all packets from the interface's queue and
944 * put them into the channels queue.
945 *
946 * Here we also prepend the transmit packet descriptor and append the padding
947 * and (for aal5) the PDU trailer. This is different from the original driver:
948 * we assume, that allocating one or two additional mbufs is actually cheaper
949 * than all this algorithmic fiddling we would need otherwise.
950 *
951 * While the packet is on the channels wait queue we use the csum_* fields
952 * in the packet header to hold the original datalen, the AAL5 flag and the
953 * VCI. The packet length field in the header holds the needed buffer space.
954 * This may actually be more than the length of the current mbuf chain (when
955 * one or more of TBD, padding and PDU do not fit).
956 *
957 * LOCK: unlocked, needed
958 */
959static void
960en_start(struct ifnet *ifp)
961{
962 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
963 struct mbuf *m, *lastm;
964 struct atm_pseudohdr *ap;
965 u_int pad; /* 0-bytes to pad at PDU end */
966 u_int datalen; /* length of user data */
967 u_int vci; /* the VCI we are transmitting on */
968 u_int chan; /* the transmit channel */
969 u_int flags;
970 uint32_t tbd[2];
971 uint32_t pdu[2];
972 struct en_map *map;
973
974 while (1) {
975 IF_DEQUEUE(&ifp->if_snd, m);
976 if (m == NULL)
977 return;
978
979 flags = 0;
980
981 ap = mtod(m, struct atm_pseudohdr *);
982 vci = ATM_PH_VCI(ap);
983 if (ATM_PH_FLAGS(ap) & ATM_PH_AAL5)
984 flags |= TX_AAL5;
985
986 if (ATM_PH_VPI(ap) != 0 || vci > MID_N_VC) {
987 DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
988 ATM_PH_VPI(ap), vci));
989 m_freem(m);
990 continue;
991 }
992 m_adj(m, sizeof(struct atm_pseudohdr));
993
994 /*
995 * (re-)calculate size of packet (in bytes)
996 */
997 m->m_pkthdr.len = datalen = m_length(m, &lastm);
998
999 /*
1000 * computing how much padding we need on the end of the mbuf,
1001 * then see if we can put the TBD at the front of the mbuf
1002 * where the link header goes (well behaved protocols will
1003 * reserve room for us). Last, check if room for PDU tail.
1004 */
1005 if (flags & TX_AAL5)
1006 m->m_pkthdr.len += MID_PDU_SIZE;
1007 m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
1008 pad = m->m_pkthdr.len - datalen;
1009 if (flags & TX_AAL5)
1010 pad -= MID_PDU_SIZE;
1011 m->m_pkthdr.len += MID_TBD_SIZE;
1012
1013 DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
1014 vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
1015 (int)M_TRAILINGSPACE(lastm)));
1016
1017 /*
1018 * Allocate a map. We do this here rather then in en_txdma,
1019 * because en_txdma is also called from the interrupt handler
1020 * and we are going to have a locking problem then. We must
1021 * use NOWAIT here, because the ip_output path holds various
1022 * locks.
1023 */
1024 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
1025 if (map == NULL || !(map->flags & ENMAP_ALLOC)) {
1026 /* drop that packet */
1027 EN_COUNT(sc->stats.txnomap);
1028 if (map != NULL)
1029 uma_zfree(sc->map_zone, map);
1030 m_freem(m);
1031 continue;
1032 }
1033
1034 /*
1035 * From here on we need access to sc
1036 */
1037 EN_LOCK(sc);
1038 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1039 EN_UNLOCK(sc);
1040 uma_zfree(sc->map_zone, map);
1041 m_freem(m);
1042 continue;
1043 }
1044
1045 /*
1046 * Look, whether we can prepend the TBD (8 byte)
1047 */
1048 if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
1049 tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
1050 MID_TBD_AAL5 : MID_TBD_NOAAL5,
1051 sc->txspeed[vci],
1052 m->m_pkthdr.len / MID_ATMDATASZ));
1053 tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
1054
1055 m->m_data -= MID_TBD_SIZE;
1056 bcopy(tbd, m->m_data, MID_TBD_SIZE);
1057 m->m_len += MID_TBD_SIZE;
1058 flags |= TX_HAS_TBD;
1059 }
1060
1061 /*
1062 * Check whether the padding fits (must be writeable -
1063 * we pad with zero).
1064 */
1065 if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
1066 bzero(lastm->m_data + lastm->m_len, pad);
1067 lastm->m_len += pad;
1068 flags |= TX_HAS_PAD;
1069
1070 if ((flags & TX_AAL5) &&
1071 M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
1072 pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
1073 pdu[1] = 0;
1074 bcopy(pdu, lastm->m_data + lastm->m_len,
1075 MID_PDU_SIZE);
1076 lastm->m_len += MID_PDU_SIZE;
1077 flags |= TX_HAS_PDU;
1078 }
1079 }
1080
1081 if (!sc->is_adaptec &&
1082 (m = en_fix_mchain(sc, m, &pad)) == NULL) {
1083 EN_UNLOCK(sc);
1084 uma_zfree(sc->map_zone, map);
1085 continue;
1086 }
1087
1088 /*
1089 * get assigned channel (will be zero unless
1090 * txspeed[atm_vci] is set)
1091 */
1092 chan = sc->txvc2slot[vci];
1093
1094 if (m->m_pkthdr.len > EN_TXSZ * 1024) {
1095 DBG(sc, TX, ("tx%d: packet larger than xmit buffer "
1096 "(%d > %d)\n", chan, m->m_pkthdr.len,
1097 EN_TXSZ * 1024));
1098 EN_UNLOCK(sc);
1099 m_freem(m);
1100 uma_zfree(sc->map_zone, map);
1101 continue;
1102 }
1103
1104 if (sc->txslot[chan].mbsize > EN_TXHIWAT) {
1105 EN_COUNT(sc->stats.txmbovr);
1106 DBG(sc, TX, ("tx%d: buffer space shortage", chan));
1107 EN_UNLOCK(sc);
1108 m_freem(m);
1109 uma_zfree(sc->map_zone, map);
1110 continue;
1111 }
1112
1113 /* commit */
1114 sc->txslot[chan].mbsize += m->m_pkthdr.len;
1115
1116 DBG(sc, TX, ("tx%d: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
1117 chan, vci, sc->txspeed[vci], m->m_pkthdr.len,
1118 sc->txslot[chan].mbsize));
1119
1120 MBUF_SET_TX(m, vci, flags, datalen, pad, map);
1121
1122 _IF_ENQUEUE(&sc->txslot[chan].q, m);
1123
1124 en_txdma(sc, &sc->txslot[chan]);
1125
1126 EN_UNLOCK(sc);
1127 }
1128}
1129
1130/*********************************************************************/
1131/*
1132 * VCs
1133 */
1134
1135/*
1136 * en_loadvc: load a vc tab entry from a slot
1137 *
1138 * LOCK: locked, needed
1139 */
1140static void
1141en_loadvc(struct en_softc *sc, int vc)
1142{
1143 int slot;
1144 uint32_t reg = en_read(sc, MID_VC(vc));
1145
1146 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1147 en_write(sc, MID_VC(vc), reg);
1148 DELAY(27);
1149
1150 if ((slot = sc->rxvc2slot[vc]) == RX_NONE)
1151 return;
1152
1153 /* no need to set CRC */
1154
1155 /* read pointer = 0, desc. start = 0 */
1156 en_write(sc, MID_DST_RP(vc), 0);
1157 /* write pointer = 0 */
1158 en_write(sc, MID_WP_ST_CNT(vc), 0);
1159 /* set mode, size, loc */
1160 en_write(sc, MID_VC(vc), sc->rxslot[slot].mode);
1161
1162 sc->rxslot[slot].cur = sc->rxslot[slot].start;
1163
1164 DBG(sc, VC, ("rx%d: assigned to VCI %d", slot, vc));
1165}
1166
1167/*
1168 * en_rxctl: turn on and off VCs for recv.
1169 *
1170 * LOCK: unlocked, needed
1171 */
1172static int
1173en_rxctl(struct en_softc *sc, struct atm_pseudoioctl *pi, int on)
1174{
1175 u_int vci, flags, slot;
1176 uint32_t oldmode, newmode;
1177
1178 vci = ATM_PH_VCI(&pi->aph);
1179 flags = ATM_PH_FLAGS(&pi->aph);
1180
1181 DBG(sc, IOCTL, ("%s vpi=%d, vci=%d, flags=%#x",
1182 (on) ? "enable" : "disable", ATM_PH_VPI(&pi->aph), vci, flags));
1183
1184 if (ATM_PH_VPI(&pi->aph) || vci >= MID_N_VC)
1185 return (EINVAL);
1186
1187 EN_LOCK(sc);
1188
1189 if (on) {
1190 /*
1191 * turn on VCI!
1192 */
1193 if (sc->rxvc2slot[vci] != RX_NONE)
1194 return (EINVAL);
1195 for (slot = 0; slot < sc->en_nrx; slot++)
1196 if (sc->rxslot[slot].oth_flags & ENOTHER_FREE)
1197 break;
1198 if (slot == sc->en_nrx) {
1199 EN_UNLOCK(sc);
1200 return (ENOSPC);
1201 }
1202
1203 sc->rxvc2slot[vci] = slot;
1204 sc->rxslot[slot].rxhand = NULL;
1205 oldmode = sc->rxslot[slot].mode;
1206 newmode = (flags & ATM_PH_AAL5) ? MIDV_AAL5 : MIDV_NOAAL;
1207 sc->rxslot[slot].mode = MIDV_SETMODE(oldmode, newmode);
1208 sc->rxslot[slot].atm_vci = vci;
1209 sc->rxslot[slot].atm_flags = flags;
1210 sc->rxslot[slot].oth_flags = 0;
1211 sc->rxslot[slot].rxhand = pi->rxhand;
1212
1213 if (_IF_QLEN(&sc->rxslot[slot].indma) != 0 ||
1214 _IF_QLEN(&sc->rxslot[slot].q) != 0)
1215 panic("en_rxctl: left over mbufs on enable");
1216 sc->txspeed[vci] = 0; /* full speed to start */
1217 sc->txvc2slot[vci] = 0; /* init value */
1218 sc->txslot[0].nref++; /* bump reference count */
1219 en_loadvc(sc, vci); /* does debug printf for us */
1220
1221 EN_UNLOCK(sc);
1222 return (0);
1223 }
1224
1225 /*
1226 * turn off VCI
1227 */
1228 if (sc->rxvc2slot[vci] == RX_NONE) {
1229 EN_UNLOCK(sc);
1230 return (EINVAL);
1231 }
1232 slot = sc->rxvc2slot[vci];
1233 if ((sc->rxslot[slot].oth_flags & (ENOTHER_FREE|ENOTHER_DRAIN)) != 0) {
1234 EN_UNLOCK(sc);
1235 return (EINVAL);
1236 }
1237
1238 oldmode = en_read(sc, MID_VC(vci));
1239 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1240 en_write(sc, MID_VC(vci), (newmode | (oldmode & MIDV_INSERVICE)));
1241
1242 /* halt in tracks, be careful to preserve inservice bit */
1243 DELAY(27);
1244 sc->rxslot[slot].rxhand = NULL;
1245 sc->rxslot[slot].mode = newmode;
1246
1247 sc->txslot[sc->txvc2slot[vci]].nref--;
1248 sc->txspeed[vci] = 0;
1249 sc->txvc2slot[vci] = 0;
1250
1251 /* if stuff is still going on we are going to have to drain it out */
1252 if (_IF_QLEN(&sc->rxslot[slot].indma) != 0 ||
1253 _IF_QLEN(&sc->rxslot[slot].q) != 0 ||
1254 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) != 0) {
1255 sc->rxslot[slot].oth_flags |= ENOTHER_DRAIN;
1256 } else {
1257 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1258 sc->rxslot[slot].atm_vci = RX_NONE;
1259 sc->rxvc2slot[vci] = RX_NONE;
1260 }
1261 EN_UNLOCK(sc);
1262
1263 DBG(sc, IOCTL, ("rx%d: VCI %d is now %s", slot, vci,
1264 (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) ? "draining" : "free"));
1265
1266 return (0);
1267}
1268
1269/*********************************************************************/
1270/*
1271 * starting/stopping the card
1272 */
1273
1274/*
1275 * en_reset_ul: reset the board, throw away work in progress.
1276 * must en_init to recover.
1277 *
1278 * LOCK: locked, needed
1279 */
1280static void
1281en_reset_ul(struct en_softc *sc)
1282{
1283 struct en_map *map;
1284 struct mbuf *m;
1285 int lcv, slot;
1286
1287 if_printf(&sc->enif, "reset\n");
1288
1289 if (sc->en_busreset)
1290 sc->en_busreset(sc);
1291 en_write(sc, MID_RESID, 0x0); /* reset hardware */
1292
1293 /*
1294 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1295 * will free us!
1296 */
1297 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1298 if (sc->rxvc2slot[lcv] == RX_NONE)
1299 continue;
1300 slot = sc->rxvc2slot[lcv];
1301
1302 for (;;) {
1303 _IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1304 if (m == NULL)
1305 break;
1306 map = (void *)m->m_pkthdr.rcvif;
1307 uma_zfree(sc->map_zone, map);
1308 m_freem(m);
1309 }
1310 for (;;) {
1311 _IF_DEQUEUE(&sc->rxslot[slot].q, m);
1312 if (m == NULL)
1313 break;
1314 m_freem(m);
1315 }
1316 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
1317 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1318 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1319 sc->rxvc2slot[lcv] = RX_NONE;
1320 DBG(sc, INIT, ("rx%d: VCI %d is now free", slot, lcv));
1321 }
1322 }
1323
1324 /*
1325 * xmit: dump everything
1326 */
1327 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1328 for (;;) {
1329 _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1330 if (m == NULL)
1331 break;
1332 map = (void *)m->m_pkthdr.rcvif;
1333 uma_zfree(sc->map_zone, map);
1334 m_freem(m);
1335 }
1336 for (;;) {
1337 _IF_DEQUEUE(&sc->txslot[lcv].q, m);
1338 if (m == NULL)
1339 break;
1340 map = (void *)m->m_pkthdr.rcvif;
1341 uma_zfree(sc->map_zone, map);
1342 m_freem(m);
1343 }
1344 sc->txslot[lcv].mbsize = 0;
1345 }
1346}
1347
1348/*
1349 * en_reset: reset the board, throw away work in progress.
1350 * must en_init to recover.
1351 *
1352 * LOCK: unlocked, needed
1353 *
1354 * Use en_reset_ul if you alreay have the lock
1355 */
1356void
1357en_reset(struct en_softc *sc)
1358{
1359 EN_LOCK(sc);
1360 en_reset_ul(sc);
1361 EN_UNLOCK(sc);
1362}
1363
1364
1365/*
1366 * en_init: init board and sync the card with the data in the softc.
1367 *
1368 * LOCK: locked, needed
1369 */
1370static void
1371en_init(struct en_softc *sc)
1372{
1373 int vc, slot;
1374 uint32_t loc;
1375
1376 if ((sc->enif.if_flags & IFF_UP) == 0) {
1377 DBG(sc, INIT, ("going down"));
1378 en_reset(sc); /* to be safe */
1379 sc->enif.if_flags &= ~IFF_RUNNING; /* disable */
1380 return;
1381 }
1382
1383 DBG(sc, INIT, ("going up"));
1384 sc->enif.if_flags |= IFF_RUNNING; /* enable */
1385
1386 if (sc->en_busreset)
1387 sc->en_busreset(sc);
1388 en_write(sc, MID_RESID, 0x0); /* reset */
1389
1390 /*
1391 * init obmem data structures: vc tab, dma q's, slist.
1392 *
1393 * note that we set drq_free/dtq_free to one less than the total number
1394 * of DTQ/DRQs present. we do this because the card uses the condition
1395 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1396 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1397 * the drq_us pointer will wrap all the way around]. by restricting
1398 * the number of active requests to (N - 1) we prevent the list from
1399 * becoming completely full. note that the card will sometimes give
1400 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1401 * keep that interrupt from messing us up.
1402 */
1403
1404 for (vc = 0; vc < MID_N_VC; vc++)
1405 en_loadvc(sc, vc);
1406
1407 bzero(&sc->drq, sizeof(sc->drq));
1408 sc->drq_free = MID_DRQ_N - 1;
1409 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
1410 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1411 sc->drq_us = sc->drq_chip;
1412
1413 bzero(&sc->dtq, sizeof(sc->dtq));
1414 sc->dtq_free = MID_DTQ_N - 1;
1415 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
1416 en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1417 sc->dtq_us = sc->dtq_chip;
1418
1419 sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1420 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1421
1422 DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
1423 "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
1424 sc->dtq_chip, sc->hwslistp));
1425
1426 for (slot = 0 ; slot < EN_NTX ; slot++) {
1427 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1428 en_write(sc, MIDX_READPTR(slot), 0);
1429 en_write(sc, MIDX_DESCSTART(slot), 0);
1430 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1431 loc = loc - MID_RAMOFF;
1432 /* mask, cvt to words */
1433 loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
1434 /* top 11 bits */
1435 loc = loc >> MIDV_LOCTOPSHFT;
1436 en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
1437 loc));
1438 DBG(sc, INIT, ("tx%d: place 0x%x", slot,
1439 (u_int)en_read(sc, MIDX_PLACE(slot))));
1440 }
1441
1442 /*
1443 * enable!
1444 */
1445 en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
1446 MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
1447 MID_INT_SERVICE | /* MID_INT_SUNI | */ MID_INT_STATS);
1448 en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
1449 MID_MCSR_ENTX | MID_MCSR_ENRX);
1450}
1451
1452/*********************************************************************/
1453/*
1454 * Ioctls
1455 */
1456
1457/*
1458 * en_ioctl: handle ioctl requests
1459 *
1460 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1461 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1462 * value, subtract one from sc->txslot[0].nref, add one to the
1463 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1464 * txspeed[vci].
1465 *
1466 * LOCK: unlocked, needed
1467 */
1468static int
1469en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1470{
1471 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
1472 struct ifaddr *ifa = (struct ifaddr *)data;
1473 struct ifreq *ifr = (struct ifreq *)data;
1474 struct atm_pseudoioctl *api = (struct atm_pseudoioctl *)data;
1475 int error = 0;
1476
1477 switch (cmd) {
1478
1479 case SIOCATMENA: /* enable circuit for recv */
1480 error = en_rxctl(sc, api, 1);
1481 break;
1482
1483 case SIOCATMDIS: /* disable circuit for recv */
1484 error = en_rxctl(sc, api, 0);
1485 break;
1486
1487 case SIOCSIFADDR:
1488 EN_LOCK(sc);
1489 ifp->if_flags |= IFF_UP;
1490#if defined(INET) || defined(INET6)
1491 if (ifa->ifa_addr->sa_family == AF_INET
1492 || ifa->ifa_addr->sa_family == AF_INET6) {
1493 if (!(ifp->if_flags & IFF_RUNNING)) {
1494 en_reset_ul(sc);
1495 en_init(sc);
1496 }
1497 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1498 EN_UNLOCK(sc);
1499 break;
1500 }
1501#endif /* INET */
1502 if (!(ifp->if_flags & IFF_RUNNING)) {
1503 en_reset_ul(sc);
1504 en_init(sc);
1505 }
1506 EN_UNLOCK(sc);
1507 break;
1508
1509 case SIOCSIFFLAGS:
1510 EN_LOCK(sc);
1511 if (ifp->if_flags & IFF_UP) {
1512 if (!(ifp->if_flags & IFF_RUNNING))
1513 en_init(sc);
1514 } else {
1515 if (ifp->if_flags & IFF_RUNNING)
1516 en_reset_ul(sc);
1517 }
1518 EN_UNLOCK(sc);
1519 break;
1520
1521 case SIOCSIFMTU:
1522 /*
1523 * Set the interface MTU.
1524 */
1525 if (ifr->ifr_mtu > ATMMTU) {
1526 error = EINVAL;
1527 break;
1528 }
1529 ifp->if_mtu = ifr->ifr_mtu;
1530 break;
1531
1532 default:
1533 error = EINVAL;
1534 break;
1535 }
1536 return (error);
1537}
1538
1539/*********************************************************************/
1540/*
1541 * Sysctl's
1542 */
1543
1544/*
1545 * Sysctl handler for internal statistics
1546 *
1547 * LOCK: unlocked, needed
1548 */
1549static int
1550en_sysctl_istats(SYSCTL_HANDLER_ARGS)
1551{
1552 struct en_softc *sc = arg1;
1553 struct sbuf *sb;
1554 int error;
1555
1556 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
1557 sbuf_clear(sb);
1558
1559 EN_LOCK(sc);
1560
1561#define DO(NAME) sbuf_printf(sb, #NAME": %u\n", sc->stats.NAME)
1562 DO(vtrash);
1563 DO(otrash);
1564 DO(ttrash);
1565 DO(mfixaddr);
1566 DO(mfixlen);
1567 DO(mfixfail);
1568 DO(txmbovr);
1569 DO(dmaovr);
1570 DO(txoutspace);
1571 DO(txdtqout);
1572 DO(launch);
1573 DO(hwpull);
1574 DO(swadd);
1575 DO(rxqnotus);
1576 DO(rxqus);
1577 DO(rxdrqout);
1578 DO(rxmbufout);
1579 DO(txnomap);
1580#undef DO
1581
1582 EN_UNLOCK(sc);
1583
1584 sbuf_finish(sb);
1585 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1586 sbuf_delete(sb);
1587 return (error);
1588}
1589
1590/*********************************************************************/
1591/*
1592 * Interrupts
1593 */
1594
1595/*
1596 * Transmit interrupt handler
1597 *
1598 * check for tx complete, if detected then this means that some space
1599 * has come free on the card. we must account for it and arrange to
1600 * kick the channel to life (in case it is stalled waiting on the card).
1601 *
1602 * LOCK: locked, needed
1603 */
1604static uint32_t
1605en_intr_tx(struct en_softc *sc, uint32_t reg)
1606{
1607 uint32_t kick;
1608 uint32_t mask;
1609 uint32_t val;
1610 int chan;
1611
1612 kick = 0; /* bitmask of channels to kick */
1613
1614 for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
1615 if (!(reg & MID_TXCHAN(chan)))
1616 continue;
1617
1618 kick = kick | mask;
1619
1620 /* current read pointer */
1621 val = en_read(sc, MIDX_READPTR(chan));
1622 /* as offset */
1623 val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
1624 if (val > sc->txslot[chan].cur)
1625 sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
1626 else
1627 sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
1628 sc->txslot[chan].cur;
1629 DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
1630 "buffer", chan, sc->txslot[chan].bfree));
1631 }
1632 return (kick);
1633}
1634
1635/*
1636 * TX DMA interrupt
1637 *
1638 * check for TX DMA complete, if detected then this means
1639 * that some DTQs are now free. it also means some indma
1640 * mbufs can be freed. if we needed DTQs, kick all channels.
1641 *
1642 * LOCK: locked, needed
1643 */
1644static uint32_t
1645en_intr_tx_dma(struct en_softc *sc)
1646{
1647 uint32_t kick = 0;
1648 uint32_t val;
1649 uint32_t idx;
1650 uint32_t slot;
1651 uint32_t dtq;
1652 struct en_map *map;
1653 struct mbuf *m;
1654
1655 val = en_read(sc, MID_DMA_RDTX); /* chip's current location */
1656 idx = MID_DTQ_A2REG(sc->dtq_chip); /* where we last saw chip */
1657
1658 if (sc->need_dtqs) {
1659 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
1660 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
1661 DBG(sc, INTR, ("cleared need DTQ condition"));
1662 }
1663
1664 while (idx != val) {
1665 sc->dtq_free++;
1666 if ((dtq = sc->dtq[idx]) != 0) {
1667 /* don't forget to zero it out when done */
1668 sc->dtq[idx] = 0;
1669 slot = EN_DQ_SLOT(dtq);
1670
1671 _IF_DEQUEUE(&sc->txslot[slot].indma, m);
1672 if (m == NULL)
1673 panic("enintr: dtqsync");
1674 map = (void *)m->m_pkthdr.rcvif;
1675 uma_zfree(sc->map_zone, map);
1676 m_freem(m);
1677
1678 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
1679 DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
1680 "%d", slot, EN_DQ_LEN(dtq),
1681 sc->txslot[slot].mbsize));
1682 }
1683 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
1684 }
1685 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
1686
1687 return (kick);
1688}
1689
1690/*
1691 * Service interrupt
1692 *
1693 * LOCK: locked, needed
1694 */
1695static int
1696en_intr_service(struct en_softc *sc)
1697{
1698 uint32_t chip;
1699 uint32_t slot;
1700 uint32_t vci;
1701 int need_softserv = 0;
1702
1703 chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1704
1705 while (sc->hwslistp != chip) {
1706 /* fetch and remove it from hardware service list */
1707 vci = en_read(sc, sc->hwslistp);
1708 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
1709
1710 slot = sc->rxvc2slot[vci];
1711 if (slot == RX_NONE) {
1712 DBG(sc, INTR, ("unexpected rx interrupt on VCI %d",
1713 vci));
1714 en_write(sc, MID_VC(vci), MIDV_TRASH); /* rx off */
1715 continue;
1716 }
1717
1718 /* remove from hwsl */
1719 en_write(sc, MID_VC(vci), sc->rxslot[slot].mode);
1720 EN_COUNT(sc->stats.hwpull);
1721
1722 DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
1723
1724 /* add it to the software service list (if needed) */
1725 if ((sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
1726 EN_COUNT(sc->stats.swadd);
1727 need_softserv = 1;
1728 sc->rxslot[slot].oth_flags |= ENOTHER_SWSL;
1729 sc->swslist[sc->swsl_tail] = slot;
1730 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
1731 sc->swsl_size++;
1732 DBG(sc, INTR, ("added VCI %d to swslist", vci));
1733 }
1734 }
1735 return (need_softserv);
1736}
1737
1738/*
1739 * check for RX DMA complete, and pass the data "upstairs"
1740 *
1741 * LOCK: locked, needed
1742 */
1743static int
1744en_intr_rx_dma(struct en_softc *sc)
1745{
1746 uint32_t val;
1747 uint32_t idx;
1748 uint32_t drq;
1749 uint32_t slot;
1750 uint32_t vci;
1751 struct atm_pseudohdr ah;
1752 struct mbuf *m;
1753 struct en_map *map;
1754
1755 val = en_read(sc, MID_DMA_RDRX); /* chip's current location */
1756 idx = MID_DRQ_A2REG(sc->drq_chip); /* where we last saw chip */
1757
1758 while (idx != val) {
1759 sc->drq_free++;
1760 if ((drq = sc->drq[idx]) != 0) {
1761 /* don't forget to zero it out when done */
1762 sc->drq[idx] = 0;
1763 slot = EN_DQ_SLOT(drq);
1764 if (EN_DQ_LEN(drq) == 0) { /* "JK" trash DMA? */
1765 m = NULL;
1766 map = NULL;
1767 } else {
1768 _IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1769 if (m == NULL)
1770 panic("enintr: drqsync: %s%d: lost mbuf"
1771 " in slot %d!", sc->enif.if_name,
1772 sc->enif.if_unit, slot);
1773 map = (void *)m->m_pkthdr.rcvif;
1774 uma_zfree(sc->map_zone, map);
1775 }
1776 /* do something with this mbuf */
1777 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1778 /* drain? */
1779 if (m != NULL)
1780 m_freem(m);
1781 vci = sc->rxslot[slot].atm_vci;
1782 if (!_IF_QLEN(&sc->rxslot[slot].indma) &&
1783 !_IF_QLEN(&sc->rxslot[slot].q) &&
1784 (en_read(sc, MID_VC(vci)) & MIDV_INSERVICE)
1785 == 0 &&
1786 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL)
1787 == 0) {
1788 sc->rxslot[slot].oth_flags =
1789 ENOTHER_FREE; /* done drain */
1790 sc->rxslot[slot].atm_vci = RX_NONE;
1791 sc->rxvc2slot[vci] = RX_NONE;
1792 DBG(sc, INTR, ("rx%d: VCI %d now free",
1793 slot, vci));
1794 }
1795
1796 } else if (m != NULL) {
1797 ATM_PH_FLAGS(&ah) = sc->rxslot[slot].atm_flags;
1798 ATM_PH_VPI(&ah) = 0;
1799 ATM_PH_SETVCI(&ah, sc->rxslot[slot].atm_vci);
1800 DBG(sc, INTR, ("rx%d: rxvci%d: atm_input, "
1801 "mbuf %p, len %d, hand %p", slot,
1802 sc->rxslot[slot].atm_vci, m,
1803 EN_DQ_LEN(drq), sc->rxslot[slot].rxhand));
1804
1805 m->m_pkthdr.rcvif = &sc->enif;
1806 sc->enif.if_ipackets++;
1807#ifdef EN_DEBUG
1808 if (sc->debug & DBG_IPACKETS)
1809 en_dump_packet(sc, m);
1810#endif
1811#ifdef ENABLE_BPF
1812 BPF_MTAP(&sc->enif, m);
1813#endif
1814 atm_input(&sc->enif, &ah, m,
1815 sc->rxslot[slot].rxhand);
1816 }
1817 }
1818 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
1819 }
1820 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
1821
1822 if (sc->need_drqs) {
1823 /* true if we had a DRQ shortage */
1824 sc->need_drqs = 0;
1825 DBG(sc, INTR, ("cleared need DRQ condition"));
1826 return (1);
1827 } else
1828 return (0);
1829}
1830
1831/*
1832 * en_mget: get an mbuf chain that can hold totlen bytes and return it
1833 * (for recv). For the actual allocation totlen is rounded up to a multiple
1834 * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
1835 *
1836 * After this call the sum of all the m_len's in the chain will be totlen.
1837 * This is called at interrupt time, so we can't wait here.
1838 *
1839 * LOCK: any, not needed
1840 */
1841static struct mbuf *
1842en_mget(struct en_softc *sc, u_int pktlen)
1843{
1844 struct mbuf *m, *tmp;
1845 u_int totlen, pad;
1846
1847 totlen = roundup(pktlen, sizeof(uint32_t));
1848 pad = totlen - pktlen;
1849
1850 /*
1851 * First get an mbuf with header. Keep space for a couple of
1852 * words at the begin.
1853 */
1854 /* called from interrupt context */
1855 MGETHDR(m, M_DONTWAIT, MT_DATA);
1856 if (m == NULL)
1857 return (NULL);
1858
1859 m->m_pkthdr.rcvif = NULL;
1860 m->m_pkthdr.len = pktlen;
1861 m->m_len = EN_RX1BUF;
1862 MH_ALIGN(m, EN_RX1BUF);
1863 if (m->m_len >= totlen) {
1864 m->m_len = totlen;
1865
1866 } else {
1867 totlen -= m->m_len;
1868
1869 /* called from interrupt context */
1870 tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
1871 if (tmp == NULL) {
1872 m_free(m);
1873 return (NULL);
1874 }
1875 tmp = m->m_next;
1876 /* m_getm could do this for us */
1877 while (tmp != NULL) {
1878 tmp->m_len = min(MCLBYTES, totlen);
1879 totlen -= tmp->m_len;
1880 tmp = tmp->m_next;
1881 }
1882 }
1883
1884 return (m);
1885}
1886
1887/*
1888 * Argument for RX DMAMAP loader.
1889 */
1890struct rxarg {
1891 struct en_softc *sc;
1892 struct mbuf *m;
1893 u_int pre_skip; /* number of bytes to skip at begin */
1894 u_int post_skip; /* number of bytes to skip at end */
1895 struct en_rxslot *slot; /* slot we are receiving on */
1896 int wait; /* wait for DRQ entries */
1897};
1898
1899/*
1900 * Copy the segment table to the buffer for later use. And compute the
1901 * number of dma queue entries we need.
1902 *
1903 * LOCK: locked, needed
1904 */
1905static void
1906en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
1907 bus_size_t mapsize, int error)
1908{
1909 struct rxarg *rx = uarg;
1910 struct en_softc *sc = rx->sc;
1911 struct en_rxslot *slot = rx->slot;
1912 u_int free; /* number of free DRQ entries */
1913 uint32_t cur; /* current buffer offset */
1914 uint32_t drq; /* DRQ entry pointer */
1915 uint32_t last_drq; /* where we have written last */
1916 u_int needalign, cnt, count, bcode;
1917 bus_addr_t addr;
1918 bus_size_t rest;
1919 int i;
1920
1921 if (error != 0)
1922 return;
1923 if (nseg > EN_MAX_DMASEG)
1924 panic("too many DMA segments");
1925
1926 rx->wait = 0;
1927
1928 free = sc->drq_free;
1929 drq = sc->drq_us;
1930 cur = slot->cur;
1931
1932 last_drq = 0;
1933
1934 /*
1935 * Local macro to add an entry to the receive DMA area. If there
1936 * are no entries left, return. Save the byte offset of the entry
1937 * in last_drq for later use.
1938 */
1939#define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
1940 if (free == 0) { \
1941 EN_COUNT(sc->stats.rxdrqout); \
1942 rx->wait = 1; \
1943 return; \
1944 } \
1945 last_drq = drq; \
1946 en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ? \
1947 MID_MK_RXQ_ENI(COUNT, slot->atm_vci, 0, BCODE) : \
1948 MID_MK_RXQ_ADP(COUNT, slot->atm_vci, 0, BCODE)); \
1949 en_write(sc, drq + 4, ADDR); \
1950 \
1951 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8); \
1952 free--;
1953
1954 /*
1955 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
1956 * the current buffer byte offset accordingly.
1957 */
1958#define DO_DRQ(TYPE) do { \
1959 rest -= cnt; \
1960 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
1961 DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x", \
1962 slot - sc->rxslot, cnt, (uintmax_t)rest, cur)); \
1963 \
1964 PUT_DRQ_ENTRY(1, bcode, count, addr); \
1965 \
1966 addr += cnt; \
1967 } while (0)
1968
1969 /*
1970 * Skip the RBD at the beginning
1971 */
1972 if (rx->pre_skip > 0) {
1973 /* update DMA address */
1974 EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
1975
1976 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
1977 }
1978
1979 for (i = 0; i < nseg; i++, segs++) {
1980 addr = segs->ds_addr;
1981 rest = segs->ds_len;
1982
1983 if (sc->is_adaptec) {
1984 /* adaptec card - simple */
1985
1986 /* advance the on-card buffer pointer */
1987 EN_WRAPADD(slot->start, slot->stop, cur, rest);
1988 DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
1989 "(cur now 0x%x)", slot - sc->rxslot,
1990 (uintmax_t)rest, (uintmax_t)addr, cur));
1991
1992 PUT_DRQ_ENTRY(0, 0, rest, addr);
1993
1994 continue;
1995 }
1996
1997 /*
1998 * do we need to do a DMA op to align to the maximum
1999 * burst? Note, that we are alway 32-bit aligned.
2000 */
2001 if (sc->alburst &&
2002 (needalign = (addr & sc->bestburstmask)) != 0) {
2003 /* compute number of bytes, words and code */
2004 cnt = sc->bestburstlen - needalign;
2005 if (cnt > rest)
2006 cnt = rest;
2007 count = cnt / sizeof(uint32_t);
2008 if (sc->noalbursts) {
2009 bcode = MIDDMA_WORD;
2010 } else {
2011 bcode = en_dmaplan[count].bcode;
2012 count = cnt >> en_dmaplan[count].divshift;
2013 }
2014 DO_DRQ("al_dma");
2015 }
2016
2017 /* do we need to do a max-sized burst? */
2018 if (rest >= sc->bestburstlen) {
2019 count = rest >> sc->bestburstshift;
2020 cnt = count << sc->bestburstshift;
2021 bcode = sc->bestburstcode;
2022 DO_DRQ("best_dma");
2023 }
2024
2025 /* do we need to do a cleanup burst? */
2026 if (rest != 0) {
2027 cnt = rest;
2028 count = rest / sizeof(uint32_t);
2029 if (sc->noalbursts) {
2030 bcode = MIDDMA_WORD;
2031 } else {
2032 bcode = en_dmaplan[count].bcode;
2033 count = cnt >> en_dmaplan[count].divshift;
2034 }
2035 DO_DRQ("clean_dma");
2036 }
2037 }
2038
2039 /*
2040 * Skip stuff at the end
2041 */
2042 if (rx->post_skip > 0) {
2043 /* update DMA address */
2044 EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
2045
2046 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2047 }
2048
2049 /* record the end for the interrupt routine */
2050 sc->drq[MID_DRQ_A2REG(last_drq)] =
2051 EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
2052
2053 /* set the end flag in the last descriptor */
2054 en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
2055
2056#undef PUT_DRQ_ENTRY
2057#undef DO_DRQ
2058
2059 /* commit */
2060 slot->cur = cur;
2061 sc->drq_free = free;
2062 sc->drq_us = drq;
2063
2064 /* signal to card */
2065 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2066}
2067
2068/*
2069 * en_service: handle a service interrupt
2070 *
2071 * Q: why do we need a software service list?
2072 *
2073 * A: if we remove a VCI from the hardware list and we find that we are
2074 * out of DRQs we must defer processing until some DRQs become free.
2075 * so we must remember to look at this RX VCI/slot later, but we can't
2076 * put it back on the hardware service list (since that isn't allowed).
2077 * so we instead save it on the software service list. it would be nice
2078 * if we could peek at the VCI on top of the hwservice list without removing
2079 * it, however this leads to a race condition: if we peek at it and
2080 * decide we are done with it new data could come in before we have a
2081 * chance to remove it from the hwslist. by the time we get it out of
2082 * the list the interrupt for the new data will be lost. oops!
2083 *
2084 * LOCK: locked, needed
2085 */
2086static void
2087en_service(struct en_softc *sc)
2088{
2089 struct mbuf *m, *lastm;
2090 struct en_map *map;
2091 struct rxarg rx;
2092 uint32_t cur;
2093 uint32_t dstart; /* data start (as reported by card) */
2094 uint32_t rbd; /* receive buffer descriptor */
2095 uint32_t pdu; /* AAL5 trailer */
2096 int mlen;
2097 struct en_rxslot *slot;
2098 int error;
2099
2100 rx.sc = sc;
2101
2102 next_vci:
2103 if (sc->swsl_size == 0) {
2104 DBG(sc, SERV, ("en_service done"));
2105 return;
2106 }
2107
2108 /*
2109 * get slot to service
2110 */
2111 rx.slot = slot = &sc->rxslot[sc->swslist[sc->swsl_head]];
2112
2113 KASSERT (sc->rxvc2slot[slot->atm_vci] == slot - sc->rxslot,
2114 ("en_service: rx slot/vci sync"));
2115
2116 /*
2117 * determine our mode and if we've got any work to do
2118 */
2119 DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
2120 "0x%x", slot - sc->rxslot, slot->atm_vci,
2121 slot->start, slot->stop, slot->cur));
2122
2123 same_vci:
2124 cur = slot->cur;
2125
2126 dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(slot->atm_vci)));
2127 dstart = (dstart * sizeof(uint32_t)) + slot->start;
2128
2129 /* check to see if there is any data at all */
2130 if (dstart == cur) {
2131 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2132 /* remove from swslist */
2133 slot->oth_flags &= ~ENOTHER_SWSL;
2134 sc->swsl_size--;
2135 DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
2136 slot - sc->rxslot, slot->atm_vci));
2137 goto next_vci;
2138 }
2139
2140 /*
2141 * figure out how many bytes we need
2142 * [mlen = # bytes to go in mbufs]
2143 */
2144 rbd = en_read(sc, cur);
2145 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2146 panic("en_service: id mismatch");
2147
2148 if (rbd & MID_RBD_T) {
2149 mlen = 0; /* we've got trash */
2150 rx.pre_skip = MID_RBD_SIZE;
2151 rx.post_skip = 0;
2152 EN_COUNT(sc->stats.ttrash);
2153 DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
2154
2155 } else if (!(slot->atm_flags & ATM_PH_AAL5)) {
2156 /* 1 cell (ick!) */
2157 mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
2158 rx.pre_skip = MID_RBD_SIZE;
2159 rx.post_skip = 0;
2160
2161 } else {
2162 rx.pre_skip = MID_RBD_SIZE;
2163
2164 /* get PDU trailer in correct byte order */
2165 pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
2166 MID_RBD_SIZE - MID_PDU_SIZE;
2167 if (pdu >= slot->stop)
2168 pdu -= EN_RXSZ * 1024;
2169 pdu = en_read(sc, pdu);
2170
2171 if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
2172 MID_PDU_LEN(pdu)) {
2173 if_printf(&sc->enif, "invalid AAL5 length\n");
2174 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2175 mlen = 0;
2176 sc->enif.if_ierrors++;
2177
2178 } else if (rbd & MID_RBD_CRCERR) {
2179 if_printf(&sc->enif, "CRC error\n");
2180 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2181 mlen = 0;
2182 sc->enif.if_ierrors++;
2183
2184 } else {
2185 mlen = MID_PDU_LEN(pdu);
2186 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
2187 }
2188 }
2189
2190 /*
2191 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2192 *
2193 * notes:
2194 * 1. it is possible that we've already allocated an mbuf for this pkt
2195 * but ran out of DRQs, in which case we saved the allocated mbuf
2196 * on "q".
2197 * 2. if we save an buf in "q" we store the "cur" (pointer) in the
2198 * buf as an identity (that we can check later).
2199 * 3. after this block of code, if m is still NULL then we ran out of
2200 * mbufs
2201 */
2202 _IF_DEQUEUE(&slot->q, m);
2203 if (m != NULL) {
2204 if (m->m_pkthdr.csum_data != cur) {
2205 /* wasn't ours */
2206 DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
2207 slot - sc->rxslot, m));
2208 _IF_PREPEND(&slot->q, m);
2209 m = NULL;
2210 EN_COUNT(sc->stats.rxqnotus);
2211 } else {
2212 EN_COUNT(sc->stats.rxqus);
2213 DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
2214 slot - sc->rxslot, m));
2215 }
2216 }
2217 if (mlen == 0 && m != NULL) {
2218 /* should not happen */
2219 m_freem(m);
2220 m = NULL;
2221 }
2222
2223 if (mlen != 0 && m == NULL) {
2224 m = en_mget(sc, mlen);
2225 if (m == NULL) {
2226 rx.post_skip += mlen;
2227 mlen = 0;
2228 EN_COUNT(sc->stats.rxmbufout);
2229 DBG(sc, SERV, ("rx%td: out of mbufs",
2230 slot - sc->rxslot));
2231 } else
2232 rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
2233
2234 DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
2235 slot - sc->rxslot, m, mlen));
2236 }
2237
2238 DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
2239 slot - sc->rxslot, slot->atm_vci, m, mlen, rx.pre_skip,
2240 rx.post_skip));
2241
2242 if (m != NULL) {
2243 /* M_NOWAIT - called from interrupt context */
2244 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
2245 if (map == NULL || !(map->flags & ENMAP_ALLOC)) {
2246 rx.post_skip += mlen;
2247 m_freem(m);
2248 DBG(sc, SERV, ("rx%td: out of maps",
2249 slot - sc->rxslot));
2250 if (map->map != NULL)
2251 uma_zfree(sc->map_zone, map);
2252 goto skip;
2253 }
2254 rx.m = m;
2255 error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
2256 en_rxdma_load, &rx, 0);
2257
2258 if (error != 0) {
2259 if_printf(&sc->enif, "loading RX map failed "
2260 "%d\n", error);
2261 uma_zfree(sc->map_zone, map);
2262 m_freem(m);
2263 rx.post_skip += mlen;
2264 goto skip;
2265
2266 }
2267 map->flags |= ENMAP_LOADED;
2268
2269 if (rx.wait) {
2270 /* out of DRQs - wait */
2271 uma_zfree(sc->map_zone, map);
2272
2273 m->m_pkthdr.csum_data = cur;
2274 _IF_ENQUEUE(&slot->q, m);
2275 EN_COUNT(sc->stats.rxdrqout);
2276
2277 sc->need_drqs = 1; /* flag condition */
2278 return;
2279
2280 }
2281 (void)m_length(m, &lastm);
2282 lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
2283
2284 m->m_pkthdr.rcvif = (void *)map;
2285 _IF_ENQUEUE(&slot->indma, m);
2286
2287 /* get next packet in this slot */
2288 goto same_vci;
2289 }
2290 skip:
2291 /*
2292 * Here we end if we should drop the packet from the receive buffer.
2293 * The number of bytes to drop is in fill. We can do this with on
2294 * JK entry. If we don't even have that one - wait.
2295 */
2296 if (sc->drq_free == 0) {
2297 sc->need_drqs = 1; /* flag condition */
2298 return;
2299 }
2300 rx.post_skip += rx.pre_skip;
2301 DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
2302
2303 /* advance buffer address */
2304 EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
2305
2306 /* write DRQ entry */
2307 if (sc->is_adaptec)
2308 en_write(sc, sc->drq_us,
2309 MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
2310 slot->atm_vci, MID_DMA_END, MIDDMA_JK));
2311 else
2312 en_write(sc, sc->drq_us,
2313 MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
2314 slot->atm_vci, MID_DMA_END, MIDDMA_JK));
2315 en_write(sc, sc->drq_us + 4, 0);
2316 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
2317 sc->drq_free--;
2318
2319 /* signal to RX interrupt */
2320 sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
2321 slot->cur = cur;
2322
2323 /* signal to card */
2324 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2325
2326 goto same_vci;
2327}
2328
2329/*
2330 * interrupt handler
2331 *
2332 * LOCK: unlocked, needed
2333 */
2334void
2335en_intr(void *arg)
2336{
2337 struct en_softc *sc = arg;
2338 uint32_t reg, kick, mask;
2339 int lcv, need_softserv;
2340
2341 EN_LOCK(sc);
2342
2343 reg = en_read(sc, MID_INTACK);
2344 DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
2345
2346 if ((reg & MID_INT_ANY) == 0) {
2347 EN_UNLOCK(sc);
2348 return;
2349 }
2350
2351 /*
2352 * unexpected errors that need a reset
2353 */
2354 if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
2355 if_printf(&sc->enif, "unexpected interrupt=0x%b, resetting\n",
2356 reg, MID_INTBITS);
2357#ifdef EN_DEBUG
2358#ifdef DDB
2359 Debugger("en: unexpected error");
2360#endif /* DDB */
2361 sc->enif.if_flags &= ~IFF_RUNNING; /* FREEZE! */
2362#else
2363 en_reset_ul(sc);
2364 en_init(sc);
2365#endif
2366 EN_UNLOCK(sc);
2367 return;
2368 }
2369
2370#if 0
2371 if (reg & MID_INT_SUNI)
2372 if_printf(&sc->enif, "interrupt from SUNI (probably carrier "
2373 "change)\n");
2374#endif
2375
2376 kick = 0;
2377 if (reg & MID_INT_TX)
2378 kick |= en_intr_tx(sc, reg);
2379
2380 if (reg & MID_INT_DMA_TX)
2381 kick |= en_intr_tx_dma(sc);
2382
2383 /*
2384 * kick xmit channels as needed.
2385 */
2386 if (kick) {
2387 DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
2388 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
2389 if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
2390 en_txdma(sc, &sc->txslot[lcv]);
2391 }
2392
2393 need_softserv = 0;
2394 if (reg & MID_INT_DMA_RX)
2395 need_softserv |= en_intr_rx_dma(sc);
2396
2397 if (reg & MID_INT_SERVICE)
2398 need_softserv |= en_intr_service(sc);
2399
2400 if (need_softserv)
2401 en_service(sc);
2402
2403 /*
2404 * keep our stats
2405 */
2406 if (reg & MID_INT_DMA_OVR) {
2407 EN_COUNT(sc->stats.dmaovr);
2408 DBG(sc, INTR, ("MID_INT_DMA_OVR"));
2409 }
2410 reg = en_read(sc, MID_STAT);
2411 sc->stats.otrash += MID_OTRASH(reg);
2412 sc->stats.vtrash += MID_VTRASH(reg);
2413
2414 EN_UNLOCK(sc);
2415}
2416
2417/*********************************************************************/
2418/*
2419 * Probing the DMA brokeness of the card
2420 */
2421
2422/*
2423 * Physical address load helper function for DMA probe
2424 *
2425 * LOCK: unlocked, not needed
2426 */
2427static void
2428en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
2429{
2430 if (error == 0)
2431 *(bus_addr_t *)uarg = segs[0].ds_addr;
2432}
2433
2434/*
2435 * en_dmaprobe: helper function for en_attach.
2436 *
2437 * see how the card handles DMA by running a few DMA tests. we need
2438 * to figure out the largest number of bytes we can DMA in one burst
2439 * ("bestburstlen"), and if the starting address for a burst needs to
2440 * be aligned on any sort of boundary or not ("alburst").
2441 *
2442 * Things turn out more complex than that, because on my (harti) brand
2443 * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
2444 * we more than 4 bytes fails (with an RX DMA timeout) for physical
2445 * addresses that end with 0xc. Therefor we search not only the largest
2446 * burst that is supported (hopefully 64) but also check what is the largerst
2447 * unaligned supported size. If that appears to be lesser than 4 words,
2448 * set the noalbursts flag. That will be set only if also alburst is set.
2449 */
2450
2451/*
2452 * en_dmaprobe_doit: do actual testing for the DMA test.
2453 * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
2454 * Return the largest one that works.
2455 *
2456 * LOCK: unlocked, not needed
2457 */
2458static int
2459en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
2460{
2461 uint8_t *dp = sp + MIDDMA_MAXBURST;
2462 bus_addr_t pdp = psp + MIDDMA_MAXBURST;
2463 int lcv, retval = 4, cnt;
2464 uint32_t reg, bcode, midvloc;
2465
2466 if (sc->en_busreset)
2467 sc->en_busreset(sc);
2468 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2469
2470 /*
2471 * set up a 1k buffer at MID_BUFOFF
2472 */
2473 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
2474 >> MIDV_LOCTOPSHFT;
2475 en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
2476 en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
2477 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
2478 en_write(sc, MID_DST_RP(0), 0);
2479 en_write(sc, MID_WP_ST_CNT(0), 0);
2480
2481 /* set up sample data */
2482 for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
2483 sp[lcv] = lcv + 1;
2484
2485 /* enable DMA (only) */
2486 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2487
2488 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
2489 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
2490
2491 /*
2492 * try it now . . . DMA it out, then DMA it back in and compare
2493 *
2494 * note: in order to get the dma stuff to reverse directions it wants
2495 * the "end" flag set! since we are not dma'ing valid data we may
2496 * get an ident mismatch interrupt (which we will ignore).
2497 */
2498 DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
2499 sp, (u_long)psp, dp, (u_long)pdp));
2500 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
2501 DBG(sc, DMA, ("test lcv=%d", lcv));
2502
2503 /* zero SRAM and dest buffer */
2504 bus_space_set_region_4(sc->en_memt, sc->en_base,
2505 MID_BUFOFF, 0, 1024 / 4);
2506 bzero(dp, MIDDMA_MAXBURST);
2507
2508 bcode = en_sz2b(lcv);
2509
2510 /* build lcv-byte-DMA x NBURSTS */
2511 if (sc->is_adaptec)
2512 en_write(sc, sc->dtq_chip,
2513 MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
2514 else
2515 en_write(sc, sc->dtq_chip,
2516 MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
2517 en_write(sc, sc->dtq_chip + 4, psp);
2518 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
2519 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
2520
2521 cnt = 1000;
2522 while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
2523 MID_DTQ_A2REG(sc->dtq_chip)) {
2524 DELAY(1);
2525 if (--cnt == 0) {
2526 DBG(sc, DMA, ("unexpected timeout in tx "
2527 "DMA test\n alignment=0x%lx, burst size=%d"
2528 ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
2529 (u_long)sp & 63, lcv,
2530 en_read(sc, MID_DMA_ADDR), reg,
2531 en_read(sc, MID_INTSTAT)));
2532 return (retval);
2533 }
2534 }
2535
2536 reg = en_read(sc, MID_INTACK);
2537 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
2538 DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
2539 reg));
2540 return (retval);
2541 }
2542 /* re-enable DMA (only) */
2543 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2544
2545 /* "return to sender..." address is known ... */
2546
2547 /* build lcv-byte-DMA x NBURSTS */
2548 if (sc->is_adaptec)
2549 en_write(sc, sc->drq_chip,
2550 MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
2551 else
2552 en_write(sc, sc->drq_chip,
2553 MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
2554 en_write(sc, sc->drq_chip + 4, pdp);
2555 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
2556 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
2557 cnt = 1000;
2558 while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
2559 MID_DRQ_A2REG(sc->drq_chip)) {
2560 DELAY(1);
2561 cnt--;
2562 if (--cnt == 0) {
2563 DBG(sc, DMA, ("unexpected timeout in rx "
2564 "DMA test, rdrx=%#x\n", reg));
2565 return (retval);
2566 }
2567 }
2568 reg = en_read(sc, MID_INTACK);
2569 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
2570 DBG(sc, DMA, ("unexpected status in rx DMA "
2571 "test: 0x%x\n", reg));
2572 return (retval);
2573 }
2574 if (bcmp(sp, dp, lcv)) {
2575 DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
2576 "dp=%p", lcv, sp, dp));
2577 return (retval);
2578 }
2579
2580 retval = lcv;
2581 }
2582 return (retval); /* studly 64 byte DMA present! oh baby!! */
2583}
2584
2585/*
2586 * Find the best DMA parameters
2587 *
2588 * LOCK: unlocked, not needed
2589 */
2590static void
2591en_dmaprobe(struct en_softc *sc)
2592{
2593 bus_dma_tag_t tag;
2594 bus_dmamap_t map;
2595 int err;
2596 void *buffer;
2597 int bestalgn, lcv, try, bestnoalgn;
2598 bus_addr_t phys;
2599 uint8_t *addr;
2600
2601 sc->alburst = 0;
2602 sc->noalbursts = 0;
2603
2604 /*
2605 * Allocate some DMA-able memory.
2606 * We need 3 times the max burst size aligned to the max burst size.
2607 */
2608 err = bus_dma_tag_create(NULL, MIDDMA_MAXBURST, 0,
2609 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2610 3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0, &tag);
2611 if (err)
2612 panic("%s: cannot create test DMA tag %d", __func__, err);
2613
2614 err = bus_dmamem_alloc(tag, &buffer, 0, &map);
2615 if (err)
2616 panic("%s: cannot allocate test DMA memory %d", __func__, err);
2617
2618 err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
2619 en_dmaprobe_load, &phys, 0);
2620 if (err)
2621 panic("%s: cannot load test DMA map %d", __func__, err);
2622 addr = buffer;
2623 DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
2624
2625 /*
2626 * Now get the best burst size of the aligned case.
2627 */
2628 bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
2629
2630 /*
2631 * Now try unaligned.
2632 */
2633 for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
2634 try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
2635
2636 if (try < bestnoalgn)
2637 bestnoalgn = try;
2638 }
2639
2640 if (bestnoalgn < bestalgn) {
2641 sc->alburst = 1;
2642 if (bestnoalgn < 32)
2643 sc->noalbursts = 1;
2644 }
2645
2646 sc->bestburstlen = bestalgn;
2647 sc->bestburstshift = en_log2(bestalgn);
2648 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
2649 sc->bestburstcode = en_sz2b(bestalgn);
2650
2651 /*
2652 * Reset the chip before freeing the buffer. It may still be trying
2653 * to DMA.
2654 */
2655 if (sc->en_busreset)
2656 sc->en_busreset(sc);
2657 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2658
2659 DELAY(10000); /* may still do DMA */
2660
2661 /*
2662 * Free the DMA stuff
2663 */
2664 bus_dmamap_unload(tag, map);
2665 bus_dmamem_free(tag, buffer, map);
2666 bus_dma_tag_destroy(tag);
2667}
2668
2669/*********************************************************************/
2670/*
2671 * Attach/detach.
2672 */
2673
2674/*
2675 * Attach to the card.
2676 *
2677 * LOCK: unlocked, not needed (but initialized)
2678 */
2679int
2680en_attach(struct en_softc *sc)
2681{
2682 struct ifnet *ifp = &sc->enif;
2683 int sz;
2684 uint32_t reg, lcv, check, ptr, sav, midvloc;
2685
2686#ifdef EN_DEBUG
2687 sc->debug = EN_DEBUG;
2688#endif
2689 /*
2690 * Probe card to determine memory size.
2691 *
2692 * The stupid ENI card always reports to PCI that it needs 4MB of
2693 * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
2694 * addresses wrap in the RAM address space (i.e. on a 512KB card
2695 * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
2696 * 0x27fffc [note that RAM starts at offset 0x200000]).
2697 */
2698
2699 /* reset card before touching RAM */
2700 if (sc->en_busreset)
2701 sc->en_busreset(sc);
2702 en_write(sc, MID_RESID, 0x0);
2703
2704 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
2705 en_write(sc, lcv, lcv); /* data[address] = address */
2706 for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
2707 reg = en_read(sc, check);
2708 if (reg != check)
2709 /* found an alias! - quit */
2710 goto done_probe;
2711 }
2712 }
2713 done_probe:
2714 lcv -= MID_PROBSIZE; /* take one step back */
2715 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
2716
2717 /*
2718 * determine the largest DMA burst supported
2719 */
2720 en_dmaprobe(sc);
2721
2722 /*
2723 * "hello world"
2724 */
2725
2726 /* reset */
2727 if (sc->en_busreset)
2728 sc->en_busreset(sc);
2729 en_write(sc, MID_RESID, 0x0); /* reset */
2730
2731 /* zero memory */
2732 bus_space_set_region_4(sc->en_memt, sc->en_base,
2733 MID_RAMOFF, 0, sc->en_obmemsz / 4);
2734
2735 reg = en_read(sc, MID_RESID);
2736
2737 if_printf(&sc->enif, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
2738 "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
2739 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
2740 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
2741 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
2742 (long)sc->en_obmemsz / 1024);
2743
2744 if (sc->is_adaptec) {
2745 if (sc->bestburstlen == 64 && sc->alburst == 0)
2746 if_printf(&sc->enif, "passed 64 byte DMA test\n");
2747 else
2748 if_printf(&sc->enif, "FAILED DMA TEST: burst=%d, "
2749 "alburst=%d\n", sc->bestburstlen, sc->alburst);
2750 } else {
2751 if_printf(&sc->enif, "maximum DMA burst length = %d bytes%s\n",
2752 sc->bestburstlen, sc->alburst ? sc->noalbursts ?
2753 " (no large bursts)" : " (must align)" : "");
2754 }
2755
2756 /*
2757 * link into network subsystem and prepare card
2758 */
2759 sc->enif.if_softc = sc;
2760 ifp->if_flags = IFF_SIMPLEX;
2761 ifp->if_ioctl = en_ioctl;
2762 ifp->if_start = en_start;
2763
2764 /*
2765 * Make the sysctl tree
2766 */
2767 sysctl_ctx_init(&sc->sysctl_ctx);
2768
2769 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2770 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2771 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
2772 goto fail;
2773
2774 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2775 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, en_sysctl_istats,
2776 "A", "internal statistics") == NULL)
2777 goto fail;
2778
2779#ifdef EN_DEBUG
2780 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2781 OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
2782 goto fail;
2783#endif
2784
2785 mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
2786 MTX_NETWORK_LOCK, MTX_DEF);
2787
2788 MGET(sc->padbuf, M_TRYWAIT, MT_DATA);
2789 if (sc->padbuf == NULL)
2790 goto fail;
2791 bzero(sc->padbuf->m_data, MLEN);
2792
2793 if (bus_dma_tag_create(NULL, 1, 0,
2794 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2795 EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0, &sc->txtag))
2796 goto fail;
2797
2798 sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
2799 en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
2800 UMA_ZONE_ZINIT);
2801 if (sc->map_zone == NULL)
2802 goto fail;
2803 uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
2804
2805 /*
2806 * init softc
2807 */
2808 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
2809 sc->rxvc2slot[lcv] = RX_NONE;
2810 sc->txspeed[lcv] = 0; /* full */
2811 sc->txvc2slot[lcv] = 0; /* full speed == slot 0 */
2812 }
2813
2814 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
2815 ptr = sav = MID_BUFOFF;
2816 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
2817 sz = sz - (ptr - sav);
2818 if (EN_TXSZ*1024 * EN_NTX > sz) {
2819 if_printf(&sc->enif, "EN_NTX/EN_TXSZ too big\n");
2820 goto fail;
2821 }
2822 for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
2823 sc->txslot[lcv].mbsize = 0;
2824 sc->txslot[lcv].start = ptr;
2825 ptr += (EN_TXSZ * 1024);
2826 sz -= (EN_TXSZ * 1024);
2827 sc->txslot[lcv].stop = ptr;
2828 sc->txslot[lcv].nref = 0;
2829 DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
2830 sc->txslot[lcv].start, sc->txslot[lcv].stop));
2831 }
2832
2833 sav = ptr;
2834 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
2835 sz = sz - (ptr - sav);
2836 sc->en_nrx = sz / (EN_RXSZ * 1024);
2837 if (sc->en_nrx <= 0) {
2838 if_printf(&sc->enif, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
2839 goto fail;
2840 }
2841
2842 /*
2843 * ensure that there is always one VC slot on the service list free
2844 * so that we can tell the difference between a full and empty list.
2845 */
2846 if (sc->en_nrx >= MID_N_VC)
2847 sc->en_nrx = MID_N_VC - 1;
2848
2849 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
2850 sc->rxslot[lcv].rxhand = NULL;
2851 sc->rxslot[lcv].oth_flags = ENOTHER_FREE;
2852 midvloc = sc->rxslot[lcv].start = ptr;
2853 ptr += (EN_RXSZ * 1024);
2854 sz -= (EN_RXSZ * 1024);
2855 sc->rxslot[lcv].stop = ptr;
2856 midvloc = midvloc - MID_RAMOFF;
2857 /* mask, cvt to words */
2858 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
2859 /* we only want the top 11 bits */
2860 midvloc = midvloc >> MIDV_LOCTOPSHFT;
2861 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
2862 sc->rxslot[lcv].mode = midvloc |
2863 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
2864
2865 DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
2866 sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
2867 sc->rxslot[lcv].mode));
2868 }
2869
2870 bzero(&sc->stats, sizeof(sc->stats));
2871
2872 if_printf(&sc->enif, "%d %dKB receive buffers, %d %dKB transmit "
2873 "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
2874 if_printf(&sc->enif, "end station identifier (mac address) %6D\n",
2875 sc->macaddr, ":");
2876
2877 /*
2878 * final commit
2879 */
2880 atm_ifattach(ifp);
2881
2882#ifdef ENABLE_BPF
2883 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
2884#endif
2885
2886 return (0);
2887
2888 fail:
2889 en_destroy(sc);
2890 return (-1);
2891}
2892
2893/*
2894 * Free all internal resources. No access to bus resources here.
2895 * No locking required here (interrupt is already disabled).
2896 *
2897 * LOCK: unlocked, not needed (but destroyed)
2898 */
2899void
2900en_destroy(struct en_softc *sc)
2901{
2902 if (sc->padbuf != NULL)
2903 m_free(sc->padbuf);
2904
2905 /*
2906 * Destroy the map zone before the tag (the fini function will
2907 * destroy the DMA maps using the tag)
2908 */
2909 if (sc->map_zone != NULL)
2910 uma_zdestroy(sc->map_zone);
2911
2912 if (sc->txtag != NULL)
2913 bus_dma_tag_destroy(sc->txtag);
2914
2915 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2916
2917 mtx_destroy(&sc->en_mtx);
2918}
2919
2920/*********************************************************************/
2921/*
2922 * Debugging support
2923 */
2924
2925#ifdef EN_DDBHOOK
2926/*
2927 * functions we can call from ddb
2928 */
2929
2930/*
2931 * en_dump: dump the state
2932 */
2933#define END_SWSL 0x00000040 /* swsl state */
2934#define END_DRQ 0x00000020 /* drq state */
2935#define END_DTQ 0x00000010 /* dtq state */
2936#define END_RX 0x00000008 /* rx state */
2937#define END_TX 0x00000004 /* tx state */
2938#define END_MREGS 0x00000002 /* registers */
2939#define END_STATS 0x00000001 /* dump stats */
2940
2941#define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
2942
2943static void
2944en_dump_stats(const struct en_stats *s)
2945{
2946 printf("en_stats:\n");
2947 printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
2948 s->mfixfail);
2949 printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
2950 printf("\t%d times out of TX space and stalled\n", s->txoutspace);
2951 printf("\t%d times out of DTQs\n", s->txdtqout);
2952 printf("\t%d times launched a packet\n", s->launch);
2953 printf("\t%d times pulled the hw service list\n", s->hwpull);
2954 printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
2955 printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
2956 s->rxqnotus);
2957 printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
2958 printf("\t%d times ran out of DRQs\n", s->rxdrqout);
2959 printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
2960 printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
2961 printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
2962 printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
2963 printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
2964 printf("\t%d times out of tx maps\n", s->txnomap);
2965#ifdef NATM
2966#ifdef NATM_STAT
2967 printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
2968 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
2969#endif
2970#endif
2971}
2972
2973static void
2974en_dump_mregs(struct en_softc *sc)
2975{
2976 u_int cnt;
2977
2978 printf("mregs:\n");
2979 printf("resid = 0x%x\n", en_read(sc, MID_RESID));
2980 printf("interrupt status = 0x%b\n",
2981 (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
2982 printf("interrupt enable = 0x%b\n",
2983 (int)en_read(sc, MID_INTENA), MID_INTBITS);
2984 printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
2985 printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
2986 MID_SL_A2REG(sc->hwslistp));
2987 printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
2988 printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
2989 MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
2990 MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
2991 printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
2992 MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
2993 MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
2994
2995 printf(" unusal txspeeds:");
2996 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
2997 if (sc->txspeed[cnt])
2998 printf(" vci%d=0x%x", cnt, sc->txspeed[cnt]);
2999 printf("\n");
3000
3001 printf(" rxvc slot mappings:");
3002 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3003 if (sc->rxvc2slot[cnt] != RX_NONE)
3004 printf(" %d->%d", cnt, sc->rxvc2slot[cnt]);
3005 printf("\n");
3006}
3007
3008static void
3009en_dump_tx(struct en_softc *sc)
3010{
3011 u_int slot;
3012
3013 printf("tx:\n");
3014 for (slot = 0 ; slot < EN_NTX; slot++) {
3015 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3016 sc->txslot[slot].start, sc->txslot[slot].stop,
3017 sc->txslot[slot].cur,
3018 (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
3019 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3020 sc->txslot[slot].bfree);
3021 printf("txhw: base_address=0x%x, size=%u, read=%u, "
3022 "descstart=%u\n",
3023 (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
3024 MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
3025 en_read(sc, MIDX_READPTR(slot)),
3026 en_read(sc, MIDX_DESCSTART(slot)));
3027 }
3028}
3029
3030static void
3031en_dump_rx(struct en_softc *sc)
3032{
3033 u_int slot;
3034
3035 printf(" recv slots:\n");
3036 for (slot = 0 ; slot < sc->en_nrx; slot++) {
3037 printf("rx%d: vci=%d: start/stop/cur=0x%x/0x%x/0x%x ",
3038 slot, sc->rxslot[slot].atm_vci,
3039 sc->rxslot[slot].start, sc->rxslot[slot].stop,
3040 sc->rxslot[slot].cur);
3041 printf("mode=0x%x, atm_flags=0x%x, oth_flags=0x%x\n",
3042 sc->rxslot[slot].mode, sc->rxslot[slot].atm_flags,
3043 sc->rxslot[slot].oth_flags);
3044 printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3045 en_read(sc, MID_VC(sc->rxslot[slot].atm_vci)),
3046 en_read(sc, MID_DST_RP(sc->rxslot[slot].atm_vci)),
3047 en_read(sc,
3048 MID_WP_ST_CNT(sc->rxslot[slot].atm_vci)));
3049 }
3050}
3051
3052/*
3053 * This is only correct for non-adaptec adapters
3054 */
3055static void
3056en_dump_dtqs(struct en_softc *sc)
3057{
3058 uint32_t ptr, reg;
3059
3060 printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
3061 sc->dtq_free);
3062 ptr = sc->dtq_chip;
3063 while (ptr != sc->dtq_us) {
3064 reg = en_read(sc, ptr);
3065 printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3066 sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
3067 MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
3068 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3069 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3070 }
3071}
3072
3073static void
3074en_dump_drqs(struct en_softc *sc)
3075{
3076 uint32_t ptr, reg;
3077
3078 printf(" drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
3079 sc->drq_free);
3080 ptr = sc->drq_chip;
3081 while (ptr != sc->drq_us) {
3082 reg = en_read(sc, ptr);
3083 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3084 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
3085 MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
3086 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3087 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3088 }
3089}
3090
3091/* Do not staticize - meant for calling from DDB! */
3092int
3093en_dump(int unit, int level)
3094{
3095 struct en_softc *sc;
3096 int lcv, cnt;
3097 devclass_t dc;
3098 int maxunit;
3099
3100 dc = devclass_find("en");
3101 if (dc == NULL) {
3102 printf("%s: can't find devclass!\n", __func__);
3103 return (0);
3104 }
3105 maxunit = devclass_get_maxunit(dc);
3106 for (lcv = 0 ; lcv < maxunit ; lcv++) {
3107 sc = devclass_get_softc(dc, lcv);
3108 if (sc == NULL)
3109 continue;
3110 if (unit != -1 && unit != lcv)
3111 continue;
3112
3113 if_printf(&sc->enif, "dumping device at level 0x%b\n",
3114 level, END_BITS);
3115
3116 if (sc->dtq_us == 0) {
3117 printf("<hasn't been en_init'd yet>\n");
3118 continue;
3119 }
3120
3121 if (level & END_STATS)
3122 en_dump_stats(&sc->stats);
3123 if (level & END_MREGS)
3124 en_dump_mregs(sc);
3125 if (level & END_TX)
3126 en_dump_tx(sc);
3127 if (level & END_RX)
3128 en_dump_rx(sc);
3129 if (level & END_DTQ)
3130 en_dump_dtqs(sc);
3131 if (level & END_DRQ)
3132 en_dump_drqs(sc);
3133
3134 if (level & END_SWSL) {
3135 printf(" swslist [size=%d]: ", sc->swsl_size);
3136 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3137 cnt = (cnt + 1) % MID_SL_N)
3138 printf("0x%x ", sc->swslist[cnt]);
3139 printf("\n");
3140 }
3141 }
3142 return (0);
3143}
3144
3145/*
3146 * en_dumpmem: dump the memory
3147 *
3148 * Do not staticize - meant for calling from DDB!
3149 */
3150int
3151en_dumpmem(int unit, int addr, int len)
3152{
3153 struct en_softc *sc;
3154 uint32_t reg;
3155 devclass_t dc;
3156
3157 dc = devclass_find("en");
3158 if (dc == NULL) {
3159 printf("%s: can't find devclass\n", __func__);
3160 return (0);
3161 }
3162 sc = devclass_get_softc(dc, unit);
3163 if (sc == NULL) {
3164 printf("%s: invalid unit number: %d\n", __func__, unit);
3165 return (0);
3166 }
3167
3168 addr = addr & ~3;
3169 if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
3170 printf("invalid addr/len number: %d, %d\n", addr, len);
3171 return (0);
3172 }
3173 printf("dumping %d words starting at offset 0x%x\n", len, addr);
3174 while (len--) {
3175 reg = en_read(sc, addr);
3176 printf("mem[0x%x] = 0x%x\n", addr, reg);
3177 addr += 4;
3178 }
3179 return (0);
3180}
3181#endif