Deleted Added
full compact
midway.c (170093) midway.c (177599)
1/* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2/* (sync'd to midway.c 1.68) */
3
4/*-
5 * Copyright (c) 1996 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34#include <sys/cdefs.h>
1/* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2/* (sync'd to midway.c 1.68) */
3
4/*-
5 * Copyright (c) 1996 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/en/midway.c 170093 2007-05-29 11:28:28Z rwatson $");
35__FBSDID("$FreeBSD: head/sys/dev/en/midway.c 177599 2008-03-25 09:39:02Z ru $");
36
37/*
38 *
39 * m i d w a y . c e n i 1 5 5 d r i v e r
40 *
41 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
42 * started: spring, 1996 (written from scratch).
43 *
44 * notes from the author:
45 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
46 * ENI driver was especially useful in figuring out how this card works.
47 * I would also like to thank Werner for promptly answering email and being
48 * generally helpful.
49 */
50
51#define EN_DIAG
52#define EN_DDBHOOK 1 /* compile in ddb functions */
53
54/*
55 * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
56 * appears to be broken. it works just fine if there is no load... however
57 * when the card is loaded the data get corrupted. to see this, one only
58 * has to use "telnet" over ATM. do the following command in "telnet":
59 * cat /usr/share/misc/termcap
60 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
61 * use of the byte aligner). watch "netstat -s" for checksum errors.
62 *
63 * I further tested this by adding a function that compared the transmit
64 * data on the card's SRAM with the data in the mbuf chain _after_ the
65 * "transmit DMA complete" interrupt. using the "telnet" test I got data
66 * mismatches where the byte-aligned data should have been. using ddb
67 * and en_dumpmem() I verified that the DTQs fed into the card were
68 * absolutely correct. thus, we are forced to concluded that the ENI
69 * hardware is buggy. note that the Adaptec version of the card works
70 * just fine with byte DMA.
71 *
72 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
73 * card.
74 */
75
76#if defined(DIAGNOSTIC) && !defined(EN_DIAG)
77#define EN_DIAG /* link in with master DIAG option */
78#endif
79
80#define EN_COUNT(X) (X)++
81
82#ifdef EN_DEBUG
83
84#undef EN_DDBHOOK
85#define EN_DDBHOOK 1
86
87/*
88 * This macro removes almost all the EN_DEBUG conditionals in the code that make
89 * to code a good deal less readable.
90 */
91#define DBG(SC, FL, PRINT) do { \
92 if ((SC)->debug & DBG_##FL) { \
93 device_printf((SC)->dev, "%s: "#FL": ", __func__); \
94 printf PRINT; \
95 printf("\n"); \
96 } \
97 } while (0)
98
99enum {
100 DBG_INIT = 0x0001, /* debug attach/detach */
101 DBG_TX = 0x0002, /* debug transmitting */
102 DBG_SERV = 0x0004, /* debug service interrupts */
103 DBG_IOCTL = 0x0008, /* debug ioctls */
104 DBG_VC = 0x0010, /* debug VC handling */
105 DBG_INTR = 0x0020, /* debug interrupts */
106 DBG_DMA = 0x0040, /* debug DMA probing */
107 DBG_IPACKETS = 0x0080, /* print input packets */
108 DBG_REG = 0x0100, /* print all register access */
109 DBG_LOCK = 0x0200, /* debug locking */
110};
111
112#else /* EN_DEBUG */
113
114#define DBG(SC, FL, PRINT) do { } while (0)
115
116#endif /* EN_DEBUG */
117
118#include "opt_inet.h"
119#include "opt_natm.h"
120#include "opt_ddb.h"
121
122#ifdef DDB
123#undef EN_DDBHOOK
124#define EN_DDBHOOK 1
125#endif
126
127#include <sys/param.h>
128#include <sys/systm.h>
129#include <sys/queue.h>
130#include <sys/sockio.h>
131#include <sys/socket.h>
132#include <sys/mbuf.h>
133#include <sys/endian.h>
134#include <sys/stdint.h>
135#include <sys/lock.h>
136#include <sys/mutex.h>
137#include <sys/condvar.h>
138#include <vm/uma.h>
139
140#include <net/if.h>
141#include <net/if_media.h>
142#include <net/if_atm.h>
143
144#if defined(INET) || defined(INET6)
145#include <netinet/in.h>
146#include <netinet/if_atm.h>
147#endif
148
149#ifdef NATM
150#include <netnatm/natm.h>
151#endif
152
153#include <sys/bus.h>
154#include <machine/bus.h>
155#include <sys/rman.h>
156#include <sys/module.h>
157#include <sys/sysctl.h>
158#include <sys/malloc.h>
159#include <machine/resource.h>
160#include <dev/utopia/utopia.h>
161#include <dev/en/midwayreg.h>
162#include <dev/en/midwayvar.h>
163
164#include <net/bpf.h>
165
166/*
167 * params
168 */
169#ifndef EN_TXHIWAT
170#define EN_TXHIWAT (64 * 1024) /* max 64 KB waiting to be DMAd out */
171#endif
172
173SYSCTL_DECL(_hw_atm);
174
175/*
176 * dma tables
177 *
178 * The plan is indexed by the number of words to transfer.
179 * The maximum index is 15 for 60 words.
180 */
181struct en_dmatab {
182 uint8_t bcode; /* code */
183 uint8_t divshift; /* byte divisor */
184};
185
186static const struct en_dmatab en_dmaplan[] = {
187 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
188 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
189 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
190 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
191 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
192 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
193 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
194 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
195 { MIDDMA_16WORD,6}, /* 16 */
196};
197
198/*
199 * prototypes
200 */
201#ifdef EN_DDBHOOK
202int en_dump(int unit, int level);
203int en_dumpmem(int,int,int);
204#endif
205static void en_close_finish(struct en_softc *sc, struct en_vcc *vc);
206
207#define EN_LOCK(SC) do { \
208 DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__)); \
209 mtx_lock(&sc->en_mtx); \
210 } while (0)
211#define EN_UNLOCK(SC) do { \
212 DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__)); \
213 mtx_unlock(&sc->en_mtx); \
214 } while (0)
215#define EN_CHECKLOCK(sc) mtx_assert(&sc->en_mtx, MA_OWNED)
216
217/*
218 * While a transmit mbuf is waiting to get transmit DMA resources we
219 * need to keep some information with it. We don't want to allocate
220 * additional memory for this so we stuff it into free fields in the
221 * mbuf packet header. Neither the checksum fields nor the rcvif field are used
222 * so use these.
223 */
224#define TX_AAL5 0x1 /* transmit AAL5 PDU */
225#define TX_HAS_TBD 0x2 /* TBD did fit into mbuf */
226#define TX_HAS_PAD 0x4 /* padding did fit into mbuf */
227#define TX_HAS_PDU 0x8 /* PDU trailer did fit into mbuf */
228
229#define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
230 (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS); \
231 (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) | \
232 ((PAD & 0x3f) << 16); \
233 (M)->m_pkthdr.rcvif = (void *)(MAP); \
234 } while (0)
235
236#define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
237 (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1); \
238 (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf; \
239 (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff; \
240 (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f; \
241 (MAP) = (void *)((M)->m_pkthdr.rcvif); \
242 } while (0)
243
244
245#define EN_WRAPADD(START, STOP, CUR, VAL) do { \
246 (CUR) = (CUR) + (VAL); \
247 if ((CUR) >= (STOP)) \
248 (CUR) = (START) + ((CUR) - (STOP)); \
249 } while (0)
250
251#define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
252
253#define SETQ_END(SC, VAL) ((SC)->is_adaptec ? \
254 ((VAL) | (MID_DMA_END >> 4)) : \
255 ((VAL) | (MID_DMA_END)))
256
257/*
258 * The dtq and drq members are set for each END entry in the corresponding
259 * card queue entry. It is used to find out, when a buffer has been
260 * finished DMAing and can be freed.
261 *
262 * We store sc->dtq and sc->drq data in the following format...
263 * the 0x80000 ensures we != 0
264 */
265#define EN_DQ_MK(SLOT, LEN) (((SLOT) << 20) | (LEN) | (0x80000))
266#define EN_DQ_SLOT(X) ((X) >> 20)
267#define EN_DQ_LEN(X) ((X) & 0x3ffff)
268
269/*
270 * Variables
271 */
272static uma_zone_t en_vcc_zone;
273
274/***********************************************************************/
275
276/*
277 * en_read{x}: read a word from the card. These are the only functions
278 * that read from the card.
279 */
280static __inline uint32_t
281en_readx(struct en_softc *sc, uint32_t r)
282{
283 uint32_t v;
284
285#ifdef EN_DIAG
286 if (r > MID_MAXOFF || (r % 4))
287 panic("en_read out of range, r=0x%x", r);
288#endif
289 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
290 return (v);
291}
292
293static __inline uint32_t
294en_read(struct en_softc *sc, uint32_t r)
295{
296 uint32_t v;
297
298#ifdef EN_DIAG
299 if (r > MID_MAXOFF || (r % 4))
300 panic("en_read out of range, r=0x%x", r);
301#endif
302 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
303 DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
304 return (v);
305}
306
307/*
308 * en_write: write a word to the card. This is the only function that
309 * writes to the card.
310 */
311static __inline void
312en_write(struct en_softc *sc, uint32_t r, uint32_t v)
313{
314#ifdef EN_DIAG
315 if (r > MID_MAXOFF || (r % 4))
316 panic("en_write out of range, r=0x%x", r);
317#endif
318 DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
319 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
320}
321
322/*
323 * en_k2sz: convert KBytes to a size parameter (a log2)
324 */
325static __inline int
326en_k2sz(int k)
327{
328 switch(k) {
329 case 1: return (0);
330 case 2: return (1);
331 case 4: return (2);
332 case 8: return (3);
333 case 16: return (4);
334 case 32: return (5);
335 case 64: return (6);
336 case 128: return (7);
337 default:
338 panic("en_k2sz");
339 }
340 return (0);
341}
342#define en_log2(X) en_k2sz(X)
343
344/*
345 * en_b2sz: convert a DMA burst code to its byte size
346 */
347static __inline int
348en_b2sz(int b)
349{
350 switch (b) {
351 case MIDDMA_WORD: return (1*4);
352 case MIDDMA_2WMAYBE:
353 case MIDDMA_2WORD: return (2*4);
354 case MIDDMA_4WMAYBE:
355 case MIDDMA_4WORD: return (4*4);
356 case MIDDMA_8WMAYBE:
357 case MIDDMA_8WORD: return (8*4);
358 case MIDDMA_16WMAYBE:
359 case MIDDMA_16WORD: return (16*4);
360 default:
361 panic("en_b2sz");
362 }
363 return (0);
364}
365
366/*
367 * en_sz2b: convert a burst size (bytes) to DMA burst code
368 */
369static __inline int
370en_sz2b(int sz)
371{
372 switch (sz) {
373 case 1*4: return (MIDDMA_WORD);
374 case 2*4: return (MIDDMA_2WORD);
375 case 4*4: return (MIDDMA_4WORD);
376 case 8*4: return (MIDDMA_8WORD);
377 case 16*4: return (MIDDMA_16WORD);
378 default:
379 panic("en_sz2b");
380 }
381 return(0);
382}
383
384#ifdef EN_DEBUG
385/*
386 * Dump a packet
387 */
388static void
389en_dump_packet(struct en_softc *sc, struct mbuf *m)
390{
391 int plen = m->m_pkthdr.len;
392 u_int pos = 0;
393 u_int totlen = 0;
394 int len;
395 u_char *ptr;
396
397 device_printf(sc->dev, "packet len=%d", plen);
398 while (m != NULL) {
399 totlen += m->m_len;
400 ptr = mtod(m, u_char *);
401 for (len = 0; len < m->m_len; len++, pos++, ptr++) {
402 if (pos % 16 == 8)
403 printf(" ");
404 if (pos % 16 == 0)
405 printf("\n");
406 printf(" %02x", *ptr);
407 }
408 m = m->m_next;
409 }
410 printf("\n");
411 if (totlen != plen)
412 printf("sum of m_len=%u\n", totlen);
413}
414#endif
415
416/*********************************************************************/
417/*
418 * DMA maps
419 */
420
421/*
422 * Map constructor for a MAP.
423 *
424 * This is called each time when a map is allocated
425 * from the pool and about to be returned to the user. Here we actually
426 * allocate the map if there isn't one. The problem is that we may fail
427 * to allocate the DMA map yet have no means to signal this error. Therefor
428 * when allocating a map, the call must check that there is a map. An
429 * additional problem is, that i386 maps will be NULL, yet are ok and must
430 * be freed so let's use a flag to signal allocation.
431 *
432 * Caveat: we have no way to know that we are called from an interrupt context
433 * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
434 * its allocations.
435 *
436 * LOCK: any, not needed
437 */
438static int
439en_map_ctor(void *mem, int size, void *arg, int flags)
440{
441 struct en_softc *sc = arg;
442 struct en_map *map = mem;
443 int err;
444
445 err = bus_dmamap_create(sc->txtag, 0, &map->map);
446 if (err != 0) {
447 device_printf(sc->dev, "cannot create DMA map %d\n", err);
448 return (err);
449 }
450 map->flags = ENMAP_ALLOC;
451 map->sc = sc;
452 return (0);
453}
454
455/*
456 * Map destructor.
457 *
458 * Called when a map is disposed into the zone. If the map is loaded, unload
459 * it.
460 *
461 * LOCK: any, not needed
462 */
463static void
464en_map_dtor(void *mem, int size, void *arg)
465{
466 struct en_map *map = mem;
467
468 if (map->flags & ENMAP_LOADED) {
469 bus_dmamap_unload(map->sc->txtag, map->map);
470 map->flags &= ~ENMAP_LOADED;
471 }
472}
473
474/*
475 * Map finializer.
476 *
477 * This is called each time a map is returned from the zone to the system.
478 * Get rid of the dmamap here.
479 *
480 * LOCK: any, not needed
481 */
482static void
483en_map_fini(void *mem, int size)
484{
485 struct en_map *map = mem;
486
487 bus_dmamap_destroy(map->sc->txtag, map->map);
488}
489
490/*********************************************************************/
491/*
492 * Transmission
493 */
494
495/*
496 * Argument structure to load a transmit DMA map
497 */
498struct txarg {
499 struct en_softc *sc;
500 struct mbuf *m;
501 u_int vci;
502 u_int chan; /* transmit channel */
503 u_int datalen; /* length of user data */
504 u_int flags;
505 u_int wait; /* return: out of resources */
506};
507
508/*
509 * TX DMA map loader helper. This function is the callback when the map
510 * is loaded. It should fill the DMA segment descriptors into the hardware.
511 *
512 * LOCK: locked, needed
513 */
514static void
515en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
516 int error)
517{
518 struct txarg *tx = uarg;
519 struct en_softc *sc = tx->sc;
520 struct en_txslot *slot = &sc->txslot[tx->chan];
521 uint32_t cur; /* on-card buffer position (bytes offset) */
522 uint32_t dtq; /* on-card queue position (byte offset) */
523 uint32_t last_dtq; /* last DTQ we have written */
524 uint32_t tmp;
525 u_int free; /* free queue entries on card */
526 u_int needalign, cnt;
527 bus_size_t rest; /* remaining bytes in current segment */
528 bus_addr_t addr;
529 bus_dma_segment_t *s;
530 uint32_t count, bcode;
531 int i;
532
533 if (error != 0)
534 return;
535
536 cur = slot->cur;
537 dtq = sc->dtq_us;
538 free = sc->dtq_free;
539
540 last_dtq = 0; /* make gcc happy */
541
542 /*
543 * Local macro to add an entry to the transmit DMA area. If there
544 * are no entries left, return. Save the byte offset of the entry
545 * in last_dtq for later use.
546 */
547#define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
548 if (free == 0) { \
549 EN_COUNT(sc->stats.txdtqout); \
550 tx->wait = 1; \
551 return; \
552 } \
553 last_dtq = dtq; \
554 en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ? \
555 MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) : \
556 MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE)); \
557 en_write(sc, dtq + 4, ADDR); \
558 \
559 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8); \
560 free--;
561
562 /*
563 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
564 * the current buffer byte offset accordingly.
565 */
566#define DO_DTQ(TYPE) do { \
567 rest -= cnt; \
568 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
569 DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x", \
570 tx->chan, cnt, (uintmax_t)rest, cur)); \
571 \
572 PUT_DTQ_ENTRY(1, bcode, count, addr); \
573 \
574 addr += cnt; \
575 } while (0)
576
577 if (!(tx->flags & TX_HAS_TBD)) {
578 /*
579 * Prepend the TBD - it did not fit into the first mbuf
580 */
581 tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
582 MID_TBD_AAL5 : MID_TBD_NOAAL5,
583 sc->vccs[tx->vci]->txspeed,
584 tx->m->m_pkthdr.len / MID_ATMDATASZ);
585 en_write(sc, cur, tmp);
586 EN_WRAPADD(slot->start, slot->stop, cur, 4);
587
588 tmp = MID_TBD_MK2(tx->vci, 0, 0);
589 en_write(sc, cur, tmp);
590 EN_WRAPADD(slot->start, slot->stop, cur, 4);
591
592 /* update DMA address */
593 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
594 }
595
596 for (i = 0, s = segs; i < nseg; i++, s++) {
597 rest = s->ds_len;
598 addr = s->ds_addr;
599
600 if (sc->is_adaptec) {
601 /* adaptec card - simple */
602
603 /* advance the on-card buffer pointer */
604 EN_WRAPADD(slot->start, slot->stop, cur, rest);
605 DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
606 tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
607
608 PUT_DTQ_ENTRY(0, 0, rest, addr);
609
610 continue;
611 }
612
613 /*
614 * do we need to do a DMA op to align to the maximum
615 * burst? Note, that we are alway 32-bit aligned.
616 */
617 if (sc->alburst &&
618 (needalign = (addr & sc->bestburstmask)) != 0) {
619 /* compute number of bytes, words and code */
620 cnt = sc->bestburstlen - needalign;
621 if (cnt > rest)
622 cnt = rest;
623 count = cnt / sizeof(uint32_t);
624 if (sc->noalbursts) {
625 bcode = MIDDMA_WORD;
626 } else {
627 bcode = en_dmaplan[count].bcode;
628 count = cnt >> en_dmaplan[count].divshift;
629 }
630 DO_DTQ("al_dma");
631 }
632
633 /* do we need to do a max-sized burst? */
634 if (rest >= sc->bestburstlen) {
635 count = rest >> sc->bestburstshift;
636 cnt = count << sc->bestburstshift;
637 bcode = sc->bestburstcode;
638 DO_DTQ("best_dma");
639 }
640
641 /* do we need to do a cleanup burst? */
642 if (rest != 0) {
643 cnt = rest;
644 count = rest / sizeof(uint32_t);
645 if (sc->noalbursts) {
646 bcode = MIDDMA_WORD;
647 } else {
648 bcode = en_dmaplan[count].bcode;
649 count = cnt >> en_dmaplan[count].divshift;
650 }
651 DO_DTQ("clean_dma");
652 }
653 }
654
655 KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
656
657 if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
658 /*
659 * Append the AAL5 PDU trailer
660 */
661 tmp = MID_PDU_MK1(0, 0, tx->datalen);
662 en_write(sc, cur, tmp);
663 EN_WRAPADD(slot->start, slot->stop, cur, 4);
664
665 en_write(sc, cur, 0);
666 EN_WRAPADD(slot->start, slot->stop, cur, 4);
667
668 /* update DMA address */
669 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
670 }
671
672 /* record the end for the interrupt routine */
673 sc->dtq[MID_DTQ_A2REG(last_dtq)] =
674 EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
675
676 /* set the end flag in the last descriptor */
677 en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
678
679#undef PUT_DTQ_ENTRY
680#undef DO_DTQ
681
682 /* commit */
683 slot->cur = cur;
684 sc->dtq_free = free;
685 sc->dtq_us = dtq;
686
687 /* tell card */
688 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
689}
690
691/*
692 * en_txdma: start transmit DMA on the given channel, if possible
693 *
694 * This is called from two places: when we got new packets from the upper
695 * layer or when we found that buffer space has freed up during interrupt
696 * processing.
697 *
698 * LOCK: locked, needed
699 */
700static void
701en_txdma(struct en_softc *sc, struct en_txslot *slot)
702{
703 struct en_map *map;
704 struct mbuf *lastm;
705 struct txarg tx;
706 u_int pad;
707 int error;
708
709 DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
710 again:
711 bzero(&tx, sizeof(tx));
712 tx.chan = slot - sc->txslot;
713 tx.sc = sc;
714
715 /*
716 * get an mbuf waiting for DMA
717 */
718 _IF_DEQUEUE(&slot->q, tx.m);
719 if (tx.m == NULL) {
720 DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
721 return;
722 }
723 MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
724
725 /*
726 * note: don't use the entire buffer space. if WRTX becomes equal
727 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
728 */
729 if (tx.m->m_pkthdr.len >= slot->bfree) {
730 EN_COUNT(sc->stats.txoutspace);
731 DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
732 goto waitres;
733 }
734
735 lastm = NULL;
736 if (!(tx.flags & TX_HAS_PAD)) {
737 if (pad != 0) {
738 /* Append the padding buffer */
739 (void)m_length(tx.m, &lastm);
740 lastm->m_next = sc->padbuf;
741 sc->padbuf->m_len = pad;
742 }
743 tx.flags |= TX_HAS_PAD;
744 }
745
746 /*
747 * Try to load that map
748 */
749 error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
750 en_txdma_load, &tx, BUS_DMA_NOWAIT);
751
752 if (lastm != NULL)
753 lastm->m_next = NULL;
754
755 if (error != 0) {
756 device_printf(sc->dev, "loading TX map failed %d\n",
757 error);
758 goto dequeue_drop;
759 }
760 map->flags |= ENMAP_LOADED;
761 if (tx.wait) {
762 /* probably not enough space */
763 bus_dmamap_unload(map->sc->txtag, map->map);
764 map->flags &= ~ENMAP_LOADED;
765
766 sc->need_dtqs = 1;
767 DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
768 goto waitres;
769 }
770
771 EN_COUNT(sc->stats.launch);
772 sc->ifp->if_opackets++;
773
774 sc->vccs[tx.vci]->opackets++;
775 sc->vccs[tx.vci]->obytes += tx.datalen;
776
777#ifdef ENABLE_BPF
778 if (bpf_peers_present(sc->ifp->if_bpf)) {
779 /*
780 * adjust the top of the mbuf to skip the TBD if present
781 * before passing the packet to bpf.
782 * Also remove padding and the PDU trailer. Assume both of
783 * them to be in the same mbuf. pktlen, m_len and m_data
784 * are not needed anymore so we can change them.
785 */
786 if (tx.flags & TX_HAS_TBD) {
787 tx.m->m_data += MID_TBD_SIZE;
788 tx.m->m_len -= MID_TBD_SIZE;
789 }
790 tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
791 if (tx.m->m_pkthdr.len > tx.datalen) {
792 lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
793 tx.m->m_pkthdr.len = tx.datalen;
794 }
795
796 bpf_mtap(sc->ifp->if_bpf, tx.m);
797 }
798#endif
799
800 /*
801 * do some housekeeping and get the next packet
802 */
803 slot->bfree -= tx.m->m_pkthdr.len;
804 _IF_ENQUEUE(&slot->indma, tx.m);
805
806 goto again;
807
808 /*
809 * error handling. This is jumped to when we just want to drop
810 * the packet. Must be unlocked here.
811 */
812 dequeue_drop:
813 if (map != NULL)
814 uma_zfree(sc->map_zone, map);
815
816 slot->mbsize -= tx.m->m_pkthdr.len;
817
818 m_freem(tx.m);
819
820 goto again;
821
822 waitres:
823 _IF_PREPEND(&slot->q, tx.m);
824}
825
826/*
827 * Create a copy of a single mbuf. It can have either internal or
828 * external data, it may have a packet header. External data is really
829 * copied, so the new buffer is writeable.
830 *
831 * LOCK: any, not needed
832 */
833static struct mbuf *
834copy_mbuf(struct mbuf *m)
835{
836 struct mbuf *new;
837
36
37/*
38 *
39 * m i d w a y . c e n i 1 5 5 d r i v e r
40 *
41 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
42 * started: spring, 1996 (written from scratch).
43 *
44 * notes from the author:
45 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
46 * ENI driver was especially useful in figuring out how this card works.
47 * I would also like to thank Werner for promptly answering email and being
48 * generally helpful.
49 */
50
51#define EN_DIAG
52#define EN_DDBHOOK 1 /* compile in ddb functions */
53
54/*
55 * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
56 * appears to be broken. it works just fine if there is no load... however
57 * when the card is loaded the data get corrupted. to see this, one only
58 * has to use "telnet" over ATM. do the following command in "telnet":
59 * cat /usr/share/misc/termcap
60 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
61 * use of the byte aligner). watch "netstat -s" for checksum errors.
62 *
63 * I further tested this by adding a function that compared the transmit
64 * data on the card's SRAM with the data in the mbuf chain _after_ the
65 * "transmit DMA complete" interrupt. using the "telnet" test I got data
66 * mismatches where the byte-aligned data should have been. using ddb
67 * and en_dumpmem() I verified that the DTQs fed into the card were
68 * absolutely correct. thus, we are forced to concluded that the ENI
69 * hardware is buggy. note that the Adaptec version of the card works
70 * just fine with byte DMA.
71 *
72 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
73 * card.
74 */
75
76#if defined(DIAGNOSTIC) && !defined(EN_DIAG)
77#define EN_DIAG /* link in with master DIAG option */
78#endif
79
80#define EN_COUNT(X) (X)++
81
82#ifdef EN_DEBUG
83
84#undef EN_DDBHOOK
85#define EN_DDBHOOK 1
86
87/*
88 * This macro removes almost all the EN_DEBUG conditionals in the code that make
89 * to code a good deal less readable.
90 */
91#define DBG(SC, FL, PRINT) do { \
92 if ((SC)->debug & DBG_##FL) { \
93 device_printf((SC)->dev, "%s: "#FL": ", __func__); \
94 printf PRINT; \
95 printf("\n"); \
96 } \
97 } while (0)
98
99enum {
100 DBG_INIT = 0x0001, /* debug attach/detach */
101 DBG_TX = 0x0002, /* debug transmitting */
102 DBG_SERV = 0x0004, /* debug service interrupts */
103 DBG_IOCTL = 0x0008, /* debug ioctls */
104 DBG_VC = 0x0010, /* debug VC handling */
105 DBG_INTR = 0x0020, /* debug interrupts */
106 DBG_DMA = 0x0040, /* debug DMA probing */
107 DBG_IPACKETS = 0x0080, /* print input packets */
108 DBG_REG = 0x0100, /* print all register access */
109 DBG_LOCK = 0x0200, /* debug locking */
110};
111
112#else /* EN_DEBUG */
113
114#define DBG(SC, FL, PRINT) do { } while (0)
115
116#endif /* EN_DEBUG */
117
118#include "opt_inet.h"
119#include "opt_natm.h"
120#include "opt_ddb.h"
121
122#ifdef DDB
123#undef EN_DDBHOOK
124#define EN_DDBHOOK 1
125#endif
126
127#include <sys/param.h>
128#include <sys/systm.h>
129#include <sys/queue.h>
130#include <sys/sockio.h>
131#include <sys/socket.h>
132#include <sys/mbuf.h>
133#include <sys/endian.h>
134#include <sys/stdint.h>
135#include <sys/lock.h>
136#include <sys/mutex.h>
137#include <sys/condvar.h>
138#include <vm/uma.h>
139
140#include <net/if.h>
141#include <net/if_media.h>
142#include <net/if_atm.h>
143
144#if defined(INET) || defined(INET6)
145#include <netinet/in.h>
146#include <netinet/if_atm.h>
147#endif
148
149#ifdef NATM
150#include <netnatm/natm.h>
151#endif
152
153#include <sys/bus.h>
154#include <machine/bus.h>
155#include <sys/rman.h>
156#include <sys/module.h>
157#include <sys/sysctl.h>
158#include <sys/malloc.h>
159#include <machine/resource.h>
160#include <dev/utopia/utopia.h>
161#include <dev/en/midwayreg.h>
162#include <dev/en/midwayvar.h>
163
164#include <net/bpf.h>
165
166/*
167 * params
168 */
169#ifndef EN_TXHIWAT
170#define EN_TXHIWAT (64 * 1024) /* max 64 KB waiting to be DMAd out */
171#endif
172
173SYSCTL_DECL(_hw_atm);
174
175/*
176 * dma tables
177 *
178 * The plan is indexed by the number of words to transfer.
179 * The maximum index is 15 for 60 words.
180 */
181struct en_dmatab {
182 uint8_t bcode; /* code */
183 uint8_t divshift; /* byte divisor */
184};
185
186static const struct en_dmatab en_dmaplan[] = {
187 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
188 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
189 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
190 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
191 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
192 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
193 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
194 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
195 { MIDDMA_16WORD,6}, /* 16 */
196};
197
198/*
199 * prototypes
200 */
201#ifdef EN_DDBHOOK
202int en_dump(int unit, int level);
203int en_dumpmem(int,int,int);
204#endif
205static void en_close_finish(struct en_softc *sc, struct en_vcc *vc);
206
207#define EN_LOCK(SC) do { \
208 DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__)); \
209 mtx_lock(&sc->en_mtx); \
210 } while (0)
211#define EN_UNLOCK(SC) do { \
212 DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__)); \
213 mtx_unlock(&sc->en_mtx); \
214 } while (0)
215#define EN_CHECKLOCK(sc) mtx_assert(&sc->en_mtx, MA_OWNED)
216
217/*
218 * While a transmit mbuf is waiting to get transmit DMA resources we
219 * need to keep some information with it. We don't want to allocate
220 * additional memory for this so we stuff it into free fields in the
221 * mbuf packet header. Neither the checksum fields nor the rcvif field are used
222 * so use these.
223 */
224#define TX_AAL5 0x1 /* transmit AAL5 PDU */
225#define TX_HAS_TBD 0x2 /* TBD did fit into mbuf */
226#define TX_HAS_PAD 0x4 /* padding did fit into mbuf */
227#define TX_HAS_PDU 0x8 /* PDU trailer did fit into mbuf */
228
229#define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
230 (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS); \
231 (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) | \
232 ((PAD & 0x3f) << 16); \
233 (M)->m_pkthdr.rcvif = (void *)(MAP); \
234 } while (0)
235
236#define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
237 (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1); \
238 (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf; \
239 (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff; \
240 (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f; \
241 (MAP) = (void *)((M)->m_pkthdr.rcvif); \
242 } while (0)
243
244
245#define EN_WRAPADD(START, STOP, CUR, VAL) do { \
246 (CUR) = (CUR) + (VAL); \
247 if ((CUR) >= (STOP)) \
248 (CUR) = (START) + ((CUR) - (STOP)); \
249 } while (0)
250
251#define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
252
253#define SETQ_END(SC, VAL) ((SC)->is_adaptec ? \
254 ((VAL) | (MID_DMA_END >> 4)) : \
255 ((VAL) | (MID_DMA_END)))
256
257/*
258 * The dtq and drq members are set for each END entry in the corresponding
259 * card queue entry. It is used to find out, when a buffer has been
260 * finished DMAing and can be freed.
261 *
262 * We store sc->dtq and sc->drq data in the following format...
263 * the 0x80000 ensures we != 0
264 */
265#define EN_DQ_MK(SLOT, LEN) (((SLOT) << 20) | (LEN) | (0x80000))
266#define EN_DQ_SLOT(X) ((X) >> 20)
267#define EN_DQ_LEN(X) ((X) & 0x3ffff)
268
269/*
270 * Variables
271 */
272static uma_zone_t en_vcc_zone;
273
274/***********************************************************************/
275
276/*
277 * en_read{x}: read a word from the card. These are the only functions
278 * that read from the card.
279 */
280static __inline uint32_t
281en_readx(struct en_softc *sc, uint32_t r)
282{
283 uint32_t v;
284
285#ifdef EN_DIAG
286 if (r > MID_MAXOFF || (r % 4))
287 panic("en_read out of range, r=0x%x", r);
288#endif
289 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
290 return (v);
291}
292
293static __inline uint32_t
294en_read(struct en_softc *sc, uint32_t r)
295{
296 uint32_t v;
297
298#ifdef EN_DIAG
299 if (r > MID_MAXOFF || (r % 4))
300 panic("en_read out of range, r=0x%x", r);
301#endif
302 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
303 DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
304 return (v);
305}
306
307/*
308 * en_write: write a word to the card. This is the only function that
309 * writes to the card.
310 */
311static __inline void
312en_write(struct en_softc *sc, uint32_t r, uint32_t v)
313{
314#ifdef EN_DIAG
315 if (r > MID_MAXOFF || (r % 4))
316 panic("en_write out of range, r=0x%x", r);
317#endif
318 DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
319 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
320}
321
322/*
323 * en_k2sz: convert KBytes to a size parameter (a log2)
324 */
325static __inline int
326en_k2sz(int k)
327{
328 switch(k) {
329 case 1: return (0);
330 case 2: return (1);
331 case 4: return (2);
332 case 8: return (3);
333 case 16: return (4);
334 case 32: return (5);
335 case 64: return (6);
336 case 128: return (7);
337 default:
338 panic("en_k2sz");
339 }
340 return (0);
341}
342#define en_log2(X) en_k2sz(X)
343
344/*
345 * en_b2sz: convert a DMA burst code to its byte size
346 */
347static __inline int
348en_b2sz(int b)
349{
350 switch (b) {
351 case MIDDMA_WORD: return (1*4);
352 case MIDDMA_2WMAYBE:
353 case MIDDMA_2WORD: return (2*4);
354 case MIDDMA_4WMAYBE:
355 case MIDDMA_4WORD: return (4*4);
356 case MIDDMA_8WMAYBE:
357 case MIDDMA_8WORD: return (8*4);
358 case MIDDMA_16WMAYBE:
359 case MIDDMA_16WORD: return (16*4);
360 default:
361 panic("en_b2sz");
362 }
363 return (0);
364}
365
366/*
367 * en_sz2b: convert a burst size (bytes) to DMA burst code
368 */
369static __inline int
370en_sz2b(int sz)
371{
372 switch (sz) {
373 case 1*4: return (MIDDMA_WORD);
374 case 2*4: return (MIDDMA_2WORD);
375 case 4*4: return (MIDDMA_4WORD);
376 case 8*4: return (MIDDMA_8WORD);
377 case 16*4: return (MIDDMA_16WORD);
378 default:
379 panic("en_sz2b");
380 }
381 return(0);
382}
383
384#ifdef EN_DEBUG
385/*
386 * Dump a packet
387 */
388static void
389en_dump_packet(struct en_softc *sc, struct mbuf *m)
390{
391 int plen = m->m_pkthdr.len;
392 u_int pos = 0;
393 u_int totlen = 0;
394 int len;
395 u_char *ptr;
396
397 device_printf(sc->dev, "packet len=%d", plen);
398 while (m != NULL) {
399 totlen += m->m_len;
400 ptr = mtod(m, u_char *);
401 for (len = 0; len < m->m_len; len++, pos++, ptr++) {
402 if (pos % 16 == 8)
403 printf(" ");
404 if (pos % 16 == 0)
405 printf("\n");
406 printf(" %02x", *ptr);
407 }
408 m = m->m_next;
409 }
410 printf("\n");
411 if (totlen != plen)
412 printf("sum of m_len=%u\n", totlen);
413}
414#endif
415
416/*********************************************************************/
417/*
418 * DMA maps
419 */
420
421/*
422 * Map constructor for a MAP.
423 *
424 * This is called each time when a map is allocated
425 * from the pool and about to be returned to the user. Here we actually
426 * allocate the map if there isn't one. The problem is that we may fail
427 * to allocate the DMA map yet have no means to signal this error. Therefor
428 * when allocating a map, the call must check that there is a map. An
429 * additional problem is, that i386 maps will be NULL, yet are ok and must
430 * be freed so let's use a flag to signal allocation.
431 *
432 * Caveat: we have no way to know that we are called from an interrupt context
433 * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
434 * its allocations.
435 *
436 * LOCK: any, not needed
437 */
438static int
439en_map_ctor(void *mem, int size, void *arg, int flags)
440{
441 struct en_softc *sc = arg;
442 struct en_map *map = mem;
443 int err;
444
445 err = bus_dmamap_create(sc->txtag, 0, &map->map);
446 if (err != 0) {
447 device_printf(sc->dev, "cannot create DMA map %d\n", err);
448 return (err);
449 }
450 map->flags = ENMAP_ALLOC;
451 map->sc = sc;
452 return (0);
453}
454
455/*
456 * Map destructor.
457 *
458 * Called when a map is disposed into the zone. If the map is loaded, unload
459 * it.
460 *
461 * LOCK: any, not needed
462 */
463static void
464en_map_dtor(void *mem, int size, void *arg)
465{
466 struct en_map *map = mem;
467
468 if (map->flags & ENMAP_LOADED) {
469 bus_dmamap_unload(map->sc->txtag, map->map);
470 map->flags &= ~ENMAP_LOADED;
471 }
472}
473
474/*
475 * Map finializer.
476 *
477 * This is called each time a map is returned from the zone to the system.
478 * Get rid of the dmamap here.
479 *
480 * LOCK: any, not needed
481 */
482static void
483en_map_fini(void *mem, int size)
484{
485 struct en_map *map = mem;
486
487 bus_dmamap_destroy(map->sc->txtag, map->map);
488}
489
490/*********************************************************************/
491/*
492 * Transmission
493 */
494
495/*
496 * Argument structure to load a transmit DMA map
497 */
498struct txarg {
499 struct en_softc *sc;
500 struct mbuf *m;
501 u_int vci;
502 u_int chan; /* transmit channel */
503 u_int datalen; /* length of user data */
504 u_int flags;
505 u_int wait; /* return: out of resources */
506};
507
508/*
509 * TX DMA map loader helper. This function is the callback when the map
510 * is loaded. It should fill the DMA segment descriptors into the hardware.
511 *
512 * LOCK: locked, needed
513 */
514static void
515en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
516 int error)
517{
518 struct txarg *tx = uarg;
519 struct en_softc *sc = tx->sc;
520 struct en_txslot *slot = &sc->txslot[tx->chan];
521 uint32_t cur; /* on-card buffer position (bytes offset) */
522 uint32_t dtq; /* on-card queue position (byte offset) */
523 uint32_t last_dtq; /* last DTQ we have written */
524 uint32_t tmp;
525 u_int free; /* free queue entries on card */
526 u_int needalign, cnt;
527 bus_size_t rest; /* remaining bytes in current segment */
528 bus_addr_t addr;
529 bus_dma_segment_t *s;
530 uint32_t count, bcode;
531 int i;
532
533 if (error != 0)
534 return;
535
536 cur = slot->cur;
537 dtq = sc->dtq_us;
538 free = sc->dtq_free;
539
540 last_dtq = 0; /* make gcc happy */
541
542 /*
543 * Local macro to add an entry to the transmit DMA area. If there
544 * are no entries left, return. Save the byte offset of the entry
545 * in last_dtq for later use.
546 */
547#define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
548 if (free == 0) { \
549 EN_COUNT(sc->stats.txdtqout); \
550 tx->wait = 1; \
551 return; \
552 } \
553 last_dtq = dtq; \
554 en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ? \
555 MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) : \
556 MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE)); \
557 en_write(sc, dtq + 4, ADDR); \
558 \
559 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8); \
560 free--;
561
562 /*
563 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
564 * the current buffer byte offset accordingly.
565 */
566#define DO_DTQ(TYPE) do { \
567 rest -= cnt; \
568 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
569 DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x", \
570 tx->chan, cnt, (uintmax_t)rest, cur)); \
571 \
572 PUT_DTQ_ENTRY(1, bcode, count, addr); \
573 \
574 addr += cnt; \
575 } while (0)
576
577 if (!(tx->flags & TX_HAS_TBD)) {
578 /*
579 * Prepend the TBD - it did not fit into the first mbuf
580 */
581 tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
582 MID_TBD_AAL5 : MID_TBD_NOAAL5,
583 sc->vccs[tx->vci]->txspeed,
584 tx->m->m_pkthdr.len / MID_ATMDATASZ);
585 en_write(sc, cur, tmp);
586 EN_WRAPADD(slot->start, slot->stop, cur, 4);
587
588 tmp = MID_TBD_MK2(tx->vci, 0, 0);
589 en_write(sc, cur, tmp);
590 EN_WRAPADD(slot->start, slot->stop, cur, 4);
591
592 /* update DMA address */
593 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
594 }
595
596 for (i = 0, s = segs; i < nseg; i++, s++) {
597 rest = s->ds_len;
598 addr = s->ds_addr;
599
600 if (sc->is_adaptec) {
601 /* adaptec card - simple */
602
603 /* advance the on-card buffer pointer */
604 EN_WRAPADD(slot->start, slot->stop, cur, rest);
605 DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
606 tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
607
608 PUT_DTQ_ENTRY(0, 0, rest, addr);
609
610 continue;
611 }
612
613 /*
614 * do we need to do a DMA op to align to the maximum
615 * burst? Note, that we are alway 32-bit aligned.
616 */
617 if (sc->alburst &&
618 (needalign = (addr & sc->bestburstmask)) != 0) {
619 /* compute number of bytes, words and code */
620 cnt = sc->bestburstlen - needalign;
621 if (cnt > rest)
622 cnt = rest;
623 count = cnt / sizeof(uint32_t);
624 if (sc->noalbursts) {
625 bcode = MIDDMA_WORD;
626 } else {
627 bcode = en_dmaplan[count].bcode;
628 count = cnt >> en_dmaplan[count].divshift;
629 }
630 DO_DTQ("al_dma");
631 }
632
633 /* do we need to do a max-sized burst? */
634 if (rest >= sc->bestburstlen) {
635 count = rest >> sc->bestburstshift;
636 cnt = count << sc->bestburstshift;
637 bcode = sc->bestburstcode;
638 DO_DTQ("best_dma");
639 }
640
641 /* do we need to do a cleanup burst? */
642 if (rest != 0) {
643 cnt = rest;
644 count = rest / sizeof(uint32_t);
645 if (sc->noalbursts) {
646 bcode = MIDDMA_WORD;
647 } else {
648 bcode = en_dmaplan[count].bcode;
649 count = cnt >> en_dmaplan[count].divshift;
650 }
651 DO_DTQ("clean_dma");
652 }
653 }
654
655 KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
656
657 if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
658 /*
659 * Append the AAL5 PDU trailer
660 */
661 tmp = MID_PDU_MK1(0, 0, tx->datalen);
662 en_write(sc, cur, tmp);
663 EN_WRAPADD(slot->start, slot->stop, cur, 4);
664
665 en_write(sc, cur, 0);
666 EN_WRAPADD(slot->start, slot->stop, cur, 4);
667
668 /* update DMA address */
669 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
670 }
671
672 /* record the end for the interrupt routine */
673 sc->dtq[MID_DTQ_A2REG(last_dtq)] =
674 EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
675
676 /* set the end flag in the last descriptor */
677 en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
678
679#undef PUT_DTQ_ENTRY
680#undef DO_DTQ
681
682 /* commit */
683 slot->cur = cur;
684 sc->dtq_free = free;
685 sc->dtq_us = dtq;
686
687 /* tell card */
688 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
689}
690
691/*
692 * en_txdma: start transmit DMA on the given channel, if possible
693 *
694 * This is called from two places: when we got new packets from the upper
695 * layer or when we found that buffer space has freed up during interrupt
696 * processing.
697 *
698 * LOCK: locked, needed
699 */
700static void
701en_txdma(struct en_softc *sc, struct en_txslot *slot)
702{
703 struct en_map *map;
704 struct mbuf *lastm;
705 struct txarg tx;
706 u_int pad;
707 int error;
708
709 DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
710 again:
711 bzero(&tx, sizeof(tx));
712 tx.chan = slot - sc->txslot;
713 tx.sc = sc;
714
715 /*
716 * get an mbuf waiting for DMA
717 */
718 _IF_DEQUEUE(&slot->q, tx.m);
719 if (tx.m == NULL) {
720 DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
721 return;
722 }
723 MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
724
725 /*
726 * note: don't use the entire buffer space. if WRTX becomes equal
727 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
728 */
729 if (tx.m->m_pkthdr.len >= slot->bfree) {
730 EN_COUNT(sc->stats.txoutspace);
731 DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
732 goto waitres;
733 }
734
735 lastm = NULL;
736 if (!(tx.flags & TX_HAS_PAD)) {
737 if (pad != 0) {
738 /* Append the padding buffer */
739 (void)m_length(tx.m, &lastm);
740 lastm->m_next = sc->padbuf;
741 sc->padbuf->m_len = pad;
742 }
743 tx.flags |= TX_HAS_PAD;
744 }
745
746 /*
747 * Try to load that map
748 */
749 error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
750 en_txdma_load, &tx, BUS_DMA_NOWAIT);
751
752 if (lastm != NULL)
753 lastm->m_next = NULL;
754
755 if (error != 0) {
756 device_printf(sc->dev, "loading TX map failed %d\n",
757 error);
758 goto dequeue_drop;
759 }
760 map->flags |= ENMAP_LOADED;
761 if (tx.wait) {
762 /* probably not enough space */
763 bus_dmamap_unload(map->sc->txtag, map->map);
764 map->flags &= ~ENMAP_LOADED;
765
766 sc->need_dtqs = 1;
767 DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
768 goto waitres;
769 }
770
771 EN_COUNT(sc->stats.launch);
772 sc->ifp->if_opackets++;
773
774 sc->vccs[tx.vci]->opackets++;
775 sc->vccs[tx.vci]->obytes += tx.datalen;
776
777#ifdef ENABLE_BPF
778 if (bpf_peers_present(sc->ifp->if_bpf)) {
779 /*
780 * adjust the top of the mbuf to skip the TBD if present
781 * before passing the packet to bpf.
782 * Also remove padding and the PDU trailer. Assume both of
783 * them to be in the same mbuf. pktlen, m_len and m_data
784 * are not needed anymore so we can change them.
785 */
786 if (tx.flags & TX_HAS_TBD) {
787 tx.m->m_data += MID_TBD_SIZE;
788 tx.m->m_len -= MID_TBD_SIZE;
789 }
790 tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
791 if (tx.m->m_pkthdr.len > tx.datalen) {
792 lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
793 tx.m->m_pkthdr.len = tx.datalen;
794 }
795
796 bpf_mtap(sc->ifp->if_bpf, tx.m);
797 }
798#endif
799
800 /*
801 * do some housekeeping and get the next packet
802 */
803 slot->bfree -= tx.m->m_pkthdr.len;
804 _IF_ENQUEUE(&slot->indma, tx.m);
805
806 goto again;
807
808 /*
809 * error handling. This is jumped to when we just want to drop
810 * the packet. Must be unlocked here.
811 */
812 dequeue_drop:
813 if (map != NULL)
814 uma_zfree(sc->map_zone, map);
815
816 slot->mbsize -= tx.m->m_pkthdr.len;
817
818 m_freem(tx.m);
819
820 goto again;
821
822 waitres:
823 _IF_PREPEND(&slot->q, tx.m);
824}
825
826/*
827 * Create a copy of a single mbuf. It can have either internal or
828 * external data, it may have a packet header. External data is really
829 * copied, so the new buffer is writeable.
830 *
831 * LOCK: any, not needed
832 */
833static struct mbuf *
834copy_mbuf(struct mbuf *m)
835{
836 struct mbuf *new;
837
838 MGET(new, M_TRYWAIT, MT_DATA);
839 if (new == NULL)
840 return (NULL);
838 MGET(new, M_WAIT, MT_DATA);
841
842 if (m->m_flags & M_PKTHDR) {
843 M_MOVE_PKTHDR(new, m);
839
840 if (m->m_flags & M_PKTHDR) {
841 M_MOVE_PKTHDR(new, m);
844 if (m->m_len > MHLEN) {
845 MCLGET(new, M_TRYWAIT);
846 if ((m->m_flags & M_EXT) == 0) {
847 m_free(new);
848 return (NULL);
849 }
850 }
842 if (m->m_len > MHLEN)
843 MCLGET(new, M_WAIT);
851 } else {
844 } else {
852 if (m->m_len > MLEN) {
853 MCLGET(new, M_TRYWAIT);
854 if ((m->m_flags & M_EXT) == 0) {
855 m_free(new);
856 return (NULL);
857 }
858 }
845 if (m->m_len > MLEN)
846 MCLGET(new, M_WAIT);
859 }
860
861 bcopy(m->m_data, new->m_data, m->m_len);
862 new->m_len = m->m_len;
863 new->m_flags &= ~M_RDONLY;
864
865 return (new);
866}
867
868/*
869 * This function is called when we have an ENI adapter. It fixes the
870 * mbuf chain, so that all addresses and lengths are 4 byte aligned.
871 * The overall length is already padded to multiple of cells plus the
872 * TBD so this must always succeed. The routine can fail, when it
873 * needs to copy an mbuf (this may happen if an mbuf is readonly).
874 *
875 * We assume here, that aligning the virtual addresses to 4 bytes also
876 * aligns the physical addresses.
877 *
878 * LOCK: locked, needed
879 */
880static struct mbuf *
881en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
882{
883 struct mbuf **prev = &m0;
884 struct mbuf *m = m0;
885 struct mbuf *new;
886 u_char *d;
887 int off;
888
889 while (m != NULL) {
890 d = mtod(m, u_char *);
891 if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
892 EN_COUNT(sc->stats.mfixaddr);
893 if (M_WRITABLE(m)) {
894 bcopy(d, d - off, m->m_len);
895 m->m_data -= off;
896 } else {
897 if ((new = copy_mbuf(m)) == NULL) {
898 EN_COUNT(sc->stats.mfixfail);
899 m_freem(m0);
900 return (NULL);
901 }
902 new->m_next = m_free(m);
903 *prev = m = new;
904 }
905 }
906
907 if ((off = m->m_len % sizeof(uint32_t)) != 0) {
908 EN_COUNT(sc->stats.mfixlen);
909 if (!M_WRITABLE(m)) {
910 if ((new = copy_mbuf(m)) == NULL) {
911 EN_COUNT(sc->stats.mfixfail);
912 m_freem(m0);
913 return (NULL);
914 }
915 new->m_next = m_free(m);
916 *prev = m = new;
917 }
918 d = mtod(m, u_char *) + m->m_len;
919 off = 4 - off;
920 while (off) {
921 while (m->m_next && m->m_next->m_len == 0)
922 m->m_next = m_free(m->m_next);
923
924 if (m->m_next == NULL) {
925 *d++ = 0;
926 KASSERT(*pad > 0, ("no padding space"));
927 (*pad)--;
928 } else {
929 *d++ = *mtod(m->m_next, u_char *);
930 m->m_next->m_len--;
931 m->m_next->m_data++;
932 }
933 m->m_len++;
934 off--;
935 }
936 }
937
938 prev = &m->m_next;
939 m = m->m_next;
940 }
941
942 return (m0);
943}
944
945/*
946 * en_start: start transmitting the next packet that needs to go out
947 * if there is one. We take off all packets from the interface's queue and
948 * put them into the channels queue.
949 *
950 * Here we also prepend the transmit packet descriptor and append the padding
951 * and (for aal5) the PDU trailer. This is different from the original driver:
952 * we assume, that allocating one or two additional mbufs is actually cheaper
953 * than all this algorithmic fiddling we would need otherwise.
954 *
955 * While the packet is on the channels wait queue we use the csum_* fields
956 * in the packet header to hold the original datalen, the AAL5 flag and the
957 * VCI. The packet length field in the header holds the needed buffer space.
958 * This may actually be more than the length of the current mbuf chain (when
959 * one or more of TBD, padding and PDU do not fit).
960 *
961 * LOCK: unlocked, needed
962 */
963static void
964en_start(struct ifnet *ifp)
965{
966 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
967 struct mbuf *m, *lastm;
968 struct atm_pseudohdr *ap;
969 u_int pad; /* 0-bytes to pad at PDU end */
970 u_int datalen; /* length of user data */
971 u_int vci; /* the VCI we are transmitting on */
972 u_int flags;
973 uint32_t tbd[2];
974 uint32_t pdu[2];
975 struct en_vcc *vc;
976 struct en_map *map;
977 struct en_txslot *tx;
978
979 while (1) {
980 IF_DEQUEUE(&ifp->if_snd, m);
981 if (m == NULL)
982 return;
983
984 flags = 0;
985
986 ap = mtod(m, struct atm_pseudohdr *);
987 vci = ATM_PH_VCI(ap);
988
989 if (ATM_PH_VPI(ap) != 0 || vci >= MID_N_VC ||
990 (vc = sc->vccs[vci]) == NULL ||
991 (vc->vflags & VCC_CLOSE_RX)) {
992 DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
993 ATM_PH_VPI(ap), vci));
994 m_freem(m);
995 continue;
996 }
997 if (vc->vcc.aal == ATMIO_AAL_5)
998 flags |= TX_AAL5;
999 m_adj(m, sizeof(struct atm_pseudohdr));
1000
1001 /*
1002 * (re-)calculate size of packet (in bytes)
1003 */
1004 m->m_pkthdr.len = datalen = m_length(m, &lastm);
1005
1006 /*
1007 * computing how much padding we need on the end of the mbuf,
1008 * then see if we can put the TBD at the front of the mbuf
1009 * where the link header goes (well behaved protocols will
1010 * reserve room for us). Last, check if room for PDU tail.
1011 */
1012 if (flags & TX_AAL5)
1013 m->m_pkthdr.len += MID_PDU_SIZE;
1014 m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
1015 pad = m->m_pkthdr.len - datalen;
1016 if (flags & TX_AAL5)
1017 pad -= MID_PDU_SIZE;
1018 m->m_pkthdr.len += MID_TBD_SIZE;
1019
1020 DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
1021 vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
1022 (int)M_TRAILINGSPACE(lastm)));
1023
1024 /*
1025 * From here on we need access to sc
1026 */
1027 EN_LOCK(sc);
1028
1029 /*
1030 * Allocate a map. We do this here rather then in en_txdma,
1031 * because en_txdma is also called from the interrupt handler
1032 * and we are going to have a locking problem then. We must
1033 * use NOWAIT here, because the ip_output path holds various
1034 * locks.
1035 */
1036 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
1037 if (map == NULL) {
1038 /* drop that packet */
1039 EN_COUNT(sc->stats.txnomap);
1040 EN_UNLOCK(sc);
1041 m_freem(m);
1042 continue;
1043 }
1044
1045 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1046 EN_UNLOCK(sc);
1047 uma_zfree(sc->map_zone, map);
1048 m_freem(m);
1049 continue;
1050 }
1051
1052 /*
1053 * Look, whether we can prepend the TBD (8 byte)
1054 */
1055 if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
1056 tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
1057 MID_TBD_AAL5 : MID_TBD_NOAAL5,
1058 vc->txspeed, m->m_pkthdr.len / MID_ATMDATASZ));
1059 tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
1060
1061 m->m_data -= MID_TBD_SIZE;
1062 bcopy(tbd, m->m_data, MID_TBD_SIZE);
1063 m->m_len += MID_TBD_SIZE;
1064 flags |= TX_HAS_TBD;
1065 }
1066
1067 /*
1068 * Check whether the padding fits (must be writeable -
1069 * we pad with zero).
1070 */
1071 if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
1072 bzero(lastm->m_data + lastm->m_len, pad);
1073 lastm->m_len += pad;
1074 flags |= TX_HAS_PAD;
1075
1076 if ((flags & TX_AAL5) &&
1077 M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
1078 pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
1079 pdu[1] = 0;
1080 bcopy(pdu, lastm->m_data + lastm->m_len,
1081 MID_PDU_SIZE);
1082 lastm->m_len += MID_PDU_SIZE;
1083 flags |= TX_HAS_PDU;
1084 }
1085 }
1086
1087 if (!sc->is_adaptec &&
1088 (m = en_fix_mchain(sc, m, &pad)) == NULL) {
1089 EN_UNLOCK(sc);
1090 uma_zfree(sc->map_zone, map);
1091 continue;
1092 }
1093
1094 /*
1095 * get assigned channel (will be zero unless txspeed is set)
1096 */
1097 tx = vc->txslot;
1098
1099 if (m->m_pkthdr.len > EN_TXSZ * 1024) {
1100 DBG(sc, TX, ("tx%td: packet larger than xmit buffer "
1101 "(%d > %d)\n", tx - sc->txslot, m->m_pkthdr.len,
1102 EN_TXSZ * 1024));
1103 EN_UNLOCK(sc);
1104 m_freem(m);
1105 uma_zfree(sc->map_zone, map);
1106 continue;
1107 }
1108
1109 if (tx->mbsize > EN_TXHIWAT) {
1110 EN_COUNT(sc->stats.txmbovr);
1111 DBG(sc, TX, ("tx%td: buffer space shortage",
1112 tx - sc->txslot));
1113 EN_UNLOCK(sc);
1114 m_freem(m);
1115 uma_zfree(sc->map_zone, map);
1116 continue;
1117 }
1118
1119 /* commit */
1120 tx->mbsize += m->m_pkthdr.len;
1121
1122 DBG(sc, TX, ("tx%td: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
1123 tx - sc->txslot, vci, sc->vccs[vci]->txspeed,
1124 m->m_pkthdr.len, tx->mbsize));
1125
1126 MBUF_SET_TX(m, vci, flags, datalen, pad, map);
1127
1128 _IF_ENQUEUE(&tx->q, m);
1129
1130 en_txdma(sc, tx);
1131
1132 EN_UNLOCK(sc);
1133 }
1134}
1135
1136/*********************************************************************/
1137/*
1138 * VCs
1139 */
1140
1141/*
1142 * en_loadvc: load a vc tab entry from a slot
1143 *
1144 * LOCK: locked, needed
1145 */
1146static void
1147en_loadvc(struct en_softc *sc, struct en_vcc *vc)
1148{
1149 uint32_t reg = en_read(sc, MID_VC(vc->vcc.vci));
1150
1151 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1152 en_write(sc, MID_VC(vc->vcc.vci), reg);
1153 DELAY(27);
1154
1155 /* no need to set CRC */
1156
1157 /* read pointer = 0, desc. start = 0 */
1158 en_write(sc, MID_DST_RP(vc->vcc.vci), 0);
1159 /* write pointer = 0 */
1160 en_write(sc, MID_WP_ST_CNT(vc->vcc.vci), 0);
1161 /* set mode, size, loc */
1162 en_write(sc, MID_VC(vc->vcc.vci), vc->rxslot->mode);
1163
1164 vc->rxslot->cur = vc->rxslot->start;
1165
1166 DBG(sc, VC, ("rx%td: assigned to VCI %d", vc->rxslot - sc->rxslot,
1167 vc->vcc.vci));
1168}
1169
1170/*
1171 * Open the given vcc.
1172 *
1173 * LOCK: unlocked, needed
1174 */
1175static int
1176en_open_vcc(struct en_softc *sc, struct atmio_openvcc *op)
1177{
1178 uint32_t oldmode, newmode;
1179 struct en_rxslot *slot;
1180 struct en_vcc *vc;
1181 int error = 0;
1182
1183 DBG(sc, IOCTL, ("enable vpi=%d, vci=%d, flags=%#x",
1184 op->param.vpi, op->param.vci, op->param.flags));
1185
1186 if (op->param.vpi != 0 || op->param.vci >= MID_N_VC)
1187 return (EINVAL);
1188
1189 vc = uma_zalloc(en_vcc_zone, M_NOWAIT | M_ZERO);
1190 if (vc == NULL)
1191 return (ENOMEM);
1192
1193 EN_LOCK(sc);
1194
1195 if (sc->vccs[op->param.vci] != NULL) {
1196 error = EBUSY;
1197 goto done;
1198 }
1199
1200 /* find a free receive slot */
1201 for (slot = sc->rxslot; slot < &sc->rxslot[sc->en_nrx]; slot++)
1202 if (slot->vcc == NULL)
1203 break;
1204 if (slot == &sc->rxslot[sc->en_nrx]) {
1205 error = ENOSPC;
1206 goto done;
1207 }
1208
1209 vc->rxslot = slot;
1210 vc->rxhand = op->rxhand;
1211 vc->vcc = op->param;
1212
1213 oldmode = slot->mode;
1214 newmode = (op->param.aal == ATMIO_AAL_5) ? MIDV_AAL5 : MIDV_NOAAL;
1215 slot->mode = MIDV_SETMODE(oldmode, newmode);
1216 slot->vcc = vc;
1217
1218 KASSERT (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0,
1219 ("en_rxctl: left over mbufs on enable slot=%td",
1220 vc->rxslot - sc->rxslot));
1221
1222 vc->txspeed = 0;
1223 vc->txslot = sc->txslot;
1224 vc->txslot->nref++; /* bump reference count */
1225
1226 en_loadvc(sc, vc); /* does debug printf for us */
1227
1228 /* don't free below */
1229 sc->vccs[vc->vcc.vci] = vc;
1230 vc = NULL;
1231 sc->vccs_open++;
1232
1233 done:
1234 if (vc != NULL)
1235 uma_zfree(en_vcc_zone, vc);
1236
1237 EN_UNLOCK(sc);
1238 return (error);
1239}
1240
1241/*
1242 * Close finished
1243 */
1244static void
1245en_close_finish(struct en_softc *sc, struct en_vcc *vc)
1246{
1247
1248 if (vc->rxslot != NULL)
1249 vc->rxslot->vcc = NULL;
1250
1251 DBG(sc, VC, ("vci: %u free (%p)", vc->vcc.vci, vc));
1252
1253 sc->vccs[vc->vcc.vci] = NULL;
1254 uma_zfree(en_vcc_zone, vc);
1255 sc->vccs_open--;
1256}
1257
1258/*
1259 * LOCK: unlocked, needed
1260 */
1261static int
1262en_close_vcc(struct en_softc *sc, struct atmio_closevcc *cl)
1263{
1264 uint32_t oldmode, newmode;
1265 struct en_vcc *vc;
1266 int error = 0;
1267
1268 DBG(sc, IOCTL, ("disable vpi=%d, vci=%d", cl->vpi, cl->vci));
1269
1270 if (cl->vpi != 0 || cl->vci >= MID_N_VC)
1271 return (EINVAL);
1272
1273 EN_LOCK(sc);
1274 if ((vc = sc->vccs[cl->vci]) == NULL) {
1275 error = ENOTCONN;
1276 goto done;
1277 }
1278
1279 /*
1280 * turn off VCI
1281 */
1282 if (vc->rxslot == NULL) {
1283 error = ENOTCONN;
1284 goto done;
1285 }
1286 if (vc->vflags & VCC_DRAIN) {
1287 error = EINVAL;
1288 goto done;
1289 }
1290
1291 oldmode = en_read(sc, MID_VC(cl->vci));
1292 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1293 en_write(sc, MID_VC(cl->vci), (newmode | (oldmode & MIDV_INSERVICE)));
1294
1295 /* halt in tracks, be careful to preserve inservice bit */
1296 DELAY(27);
1297 vc->rxslot->mode = newmode;
1298
1299 vc->txslot->nref--;
1300
1301 /* if stuff is still going on we are going to have to drain it out */
1302 if (_IF_QLEN(&vc->rxslot->indma) == 0 &&
1303 _IF_QLEN(&vc->rxslot->q) == 0 &&
1304 (vc->vflags & VCC_SWSL) == 0) {
1305 en_close_finish(sc, vc);
1306 goto done;
1307 }
1308
1309 vc->vflags |= VCC_DRAIN;
1310 DBG(sc, IOCTL, ("VCI %u now draining", cl->vci));
1311
1312 if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1313 goto done;
1314
1315 vc->vflags |= VCC_CLOSE_RX;
1316 while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1317 (vc->vflags & VCC_DRAIN))
1318 cv_wait(&sc->cv_close, &sc->en_mtx);
1319
1320 en_close_finish(sc, vc);
1321 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1322 error = EIO;
1323 goto done;
1324 }
1325
1326
1327 done:
1328 EN_UNLOCK(sc);
1329 return (error);
1330}
1331
1332/*********************************************************************/
1333/*
1334 * starting/stopping the card
1335 */
1336
1337/*
1338 * en_reset_ul: reset the board, throw away work in progress.
1339 * must en_init to recover.
1340 *
1341 * LOCK: locked, needed
1342 */
1343static void
1344en_reset_ul(struct en_softc *sc)
1345{
1346 struct en_map *map;
1347 struct mbuf *m;
1348 struct en_rxslot *rx;
1349 int lcv;
1350
1351 device_printf(sc->dev, "reset\n");
1352 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1353
1354 if (sc->en_busreset)
1355 sc->en_busreset(sc);
1356 en_write(sc, MID_RESID, 0x0); /* reset hardware */
1357
1358 /*
1359 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1360 * will free us! Don't release the rxslot from the channel.
1361 */
1362 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1363 if (sc->vccs[lcv] == NULL)
1364 continue;
1365 rx = sc->vccs[lcv]->rxslot;
1366
1367 for (;;) {
1368 _IF_DEQUEUE(&rx->indma, m);
1369 if (m == NULL)
1370 break;
1371 map = (void *)m->m_pkthdr.rcvif;
1372 uma_zfree(sc->map_zone, map);
1373 m_freem(m);
1374 }
1375 for (;;) {
1376 _IF_DEQUEUE(&rx->q, m);
1377 if (m == NULL)
1378 break;
1379 m_freem(m);
1380 }
1381 sc->vccs[lcv]->vflags = 0;
1382 }
1383
1384 /*
1385 * xmit: dump everything
1386 */
1387 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1388 for (;;) {
1389 _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1390 if (m == NULL)
1391 break;
1392 map = (void *)m->m_pkthdr.rcvif;
1393 uma_zfree(sc->map_zone, map);
1394 m_freem(m);
1395 }
1396 for (;;) {
1397 _IF_DEQUEUE(&sc->txslot[lcv].q, m);
1398 if (m == NULL)
1399 break;
1400 map = (void *)m->m_pkthdr.rcvif;
1401 uma_zfree(sc->map_zone, map);
1402 m_freem(m);
1403 }
1404 sc->txslot[lcv].mbsize = 0;
1405 }
1406
1407 /*
1408 * Unstop all waiters
1409 */
1410 cv_broadcast(&sc->cv_close);
1411}
1412
1413/*
1414 * en_reset: reset the board, throw away work in progress.
1415 * must en_init to recover.
1416 *
1417 * LOCK: unlocked, needed
1418 *
1419 * Use en_reset_ul if you alreay have the lock
1420 */
1421void
1422en_reset(struct en_softc *sc)
1423{
1424 EN_LOCK(sc);
1425 en_reset_ul(sc);
1426 EN_UNLOCK(sc);
1427}
1428
1429
1430/*
1431 * en_init: init board and sync the card with the data in the softc.
1432 *
1433 * LOCK: locked, needed
1434 */
1435static void
1436en_init(struct en_softc *sc)
1437{
1438 int vc, slot;
1439 uint32_t loc;
1440
1441 if ((sc->ifp->if_flags & IFF_UP) == 0) {
1442 DBG(sc, INIT, ("going down"));
1443 en_reset(sc); /* to be safe */
1444 return;
1445 }
1446
1447 DBG(sc, INIT, ("going up"));
1448 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* enable */
1449
1450 if (sc->en_busreset)
1451 sc->en_busreset(sc);
1452 en_write(sc, MID_RESID, 0x0); /* reset */
1453
1454 /* zero memory */
1455 bus_space_set_region_4(sc->en_memt, sc->en_base,
1456 MID_RAMOFF, 0, sc->en_obmemsz / 4);
1457
1458 /*
1459 * init obmem data structures: vc tab, dma q's, slist.
1460 *
1461 * note that we set drq_free/dtq_free to one less than the total number
1462 * of DTQ/DRQs present. we do this because the card uses the condition
1463 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1464 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1465 * the drq_us pointer will wrap all the way around]. by restricting
1466 * the number of active requests to (N - 1) we prevent the list from
1467 * becoming completely full. note that the card will sometimes give
1468 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1469 * keep that interrupt from messing us up.
1470 */
1471 bzero(&sc->drq, sizeof(sc->drq));
1472 sc->drq_free = MID_DRQ_N - 1;
1473 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
1474 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1475 sc->drq_us = sc->drq_chip;
1476
1477 bzero(&sc->dtq, sizeof(sc->dtq));
1478 sc->dtq_free = MID_DTQ_N - 1;
1479 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
1480 en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1481 sc->dtq_us = sc->dtq_chip;
1482
1483 sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1484 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1485
1486 DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
1487 "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
1488 sc->dtq_chip, sc->hwslistp));
1489
1490 for (slot = 0 ; slot < EN_NTX ; slot++) {
1491 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1492 en_write(sc, MIDX_READPTR(slot), 0);
1493 en_write(sc, MIDX_DESCSTART(slot), 0);
1494 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1495 loc = loc - MID_RAMOFF;
1496 /* mask, cvt to words */
1497 loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
1498 /* top 11 bits */
1499 loc = loc >> MIDV_LOCTOPSHFT;
1500 en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
1501 loc));
1502 DBG(sc, INIT, ("tx%d: place 0x%x", slot,
1503 (u_int)en_read(sc, MIDX_PLACE(slot))));
1504 }
1505
1506 for (vc = 0; vc < MID_N_VC; vc++)
1507 if (sc->vccs[vc] != NULL)
1508 en_loadvc(sc, sc->vccs[vc]);
1509
1510 /*
1511 * enable!
1512 */
1513 en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
1514 MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
1515 MID_INT_SERVICE | MID_INT_SUNI | MID_INT_STATS);
1516 en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
1517 MID_MCSR_ENTX | MID_MCSR_ENRX);
1518}
1519
1520/*********************************************************************/
1521/*
1522 * Ioctls
1523 */
1524/*
1525 * en_ioctl: handle ioctl requests
1526 *
1527 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1528 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1529 * value, subtract one from sc->txslot[0].nref, add one to the
1530 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1531 * txspeed[vci].
1532 *
1533 * LOCK: unlocked, needed
1534 */
1535static int
1536en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1537{
1538 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
1539 struct ifaddr *ifa = (struct ifaddr *)data;
1540 struct ifreq *ifr = (struct ifreq *)data;
1541 struct atmio_vcctable *vtab;
1542 int error = 0;
1543
1544 switch (cmd) {
1545
1546 case SIOCSIFADDR:
1547 EN_LOCK(sc);
1548 ifp->if_flags |= IFF_UP;
1549#if defined(INET) || defined(INET6)
1550 if (ifa->ifa_addr->sa_family == AF_INET
1551 || ifa->ifa_addr->sa_family == AF_INET6) {
1552 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1553 en_reset_ul(sc);
1554 en_init(sc);
1555 }
1556 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1557 EN_UNLOCK(sc);
1558 break;
1559 }
1560#endif /* INET */
1561 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1562 en_reset_ul(sc);
1563 en_init(sc);
1564 }
1565 EN_UNLOCK(sc);
1566 break;
1567
1568 case SIOCSIFFLAGS:
1569 EN_LOCK(sc);
1570 if (ifp->if_flags & IFF_UP) {
1571 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1572 en_init(sc);
1573 } else {
1574 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1575 en_reset_ul(sc);
1576 }
1577 EN_UNLOCK(sc);
1578 break;
1579
1580 case SIOCSIFMTU:
1581 /*
1582 * Set the interface MTU.
1583 */
1584 if (ifr->ifr_mtu > ATMMTU) {
1585 error = EINVAL;
1586 break;
1587 }
1588 ifp->if_mtu = ifr->ifr_mtu;
1589 break;
1590
1591 case SIOCSIFMEDIA:
1592 case SIOCGIFMEDIA:
1593 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
1594 break;
1595
1596 case SIOCATMOPENVCC: /* kernel internal use */
1597 error = en_open_vcc(sc, (struct atmio_openvcc *)data);
1598 break;
1599
1600 case SIOCATMCLOSEVCC: /* kernel internal use */
1601 error = en_close_vcc(sc, (struct atmio_closevcc *)data);
1602 break;
1603
1604 case SIOCATMGETVCCS: /* internal netgraph use */
1605 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1606 MID_N_VC, sc->vccs_open, &sc->en_mtx, 0);
1607 if (vtab == NULL) {
1608 error = ENOMEM;
1609 break;
1610 }
1611 *(void **)data = vtab;
1612 break;
1613
1614 case SIOCATMGVCCS: /* return vcc table */
1615 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1616 MID_N_VC, sc->vccs_open, &sc->en_mtx, 1);
1617 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
1618 vtab->count * sizeof(vtab->vccs[0]));
1619 free(vtab, M_DEVBUF);
1620 break;
1621
1622 default:
1623 error = EINVAL;
1624 break;
1625 }
1626 return (error);
1627}
1628
1629/*********************************************************************/
1630/*
1631 * Sysctl's
1632 */
1633
1634/*
1635 * Sysctl handler for internal statistics
1636 *
1637 * LOCK: unlocked, needed
1638 */
1639static int
1640en_sysctl_istats(SYSCTL_HANDLER_ARGS)
1641{
1642 struct en_softc *sc = arg1;
1643 uint32_t *ret;
1644 int error;
1645
1646 ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK);
1647
1648 EN_LOCK(sc);
1649 bcopy(&sc->stats, ret, sizeof(sc->stats));
1650 EN_UNLOCK(sc);
1651
1652 error = SYSCTL_OUT(req, ret, sizeof(sc->stats));
1653 free(ret, M_TEMP);
1654
1655 return (error);
1656}
1657
1658/*********************************************************************/
1659/*
1660 * Interrupts
1661 */
1662
1663/*
1664 * Transmit interrupt handler
1665 *
1666 * check for tx complete, if detected then this means that some space
1667 * has come free on the card. we must account for it and arrange to
1668 * kick the channel to life (in case it is stalled waiting on the card).
1669 *
1670 * LOCK: locked, needed
1671 */
1672static uint32_t
1673en_intr_tx(struct en_softc *sc, uint32_t reg)
1674{
1675 uint32_t kick;
1676 uint32_t mask;
1677 uint32_t val;
1678 int chan;
1679
1680 kick = 0; /* bitmask of channels to kick */
1681
1682 for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
1683 if (!(reg & MID_TXCHAN(chan)))
1684 continue;
1685
1686 kick = kick | mask;
1687
1688 /* current read pointer */
1689 val = en_read(sc, MIDX_READPTR(chan));
1690 /* as offset */
1691 val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
1692 if (val > sc->txslot[chan].cur)
1693 sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
1694 else
1695 sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
1696 sc->txslot[chan].cur;
1697 DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
1698 "buffer", chan, sc->txslot[chan].bfree));
1699 }
1700 return (kick);
1701}
1702
1703/*
1704 * TX DMA interrupt
1705 *
1706 * check for TX DMA complete, if detected then this means
1707 * that some DTQs are now free. it also means some indma
1708 * mbufs can be freed. if we needed DTQs, kick all channels.
1709 *
1710 * LOCK: locked, needed
1711 */
1712static uint32_t
1713en_intr_tx_dma(struct en_softc *sc)
1714{
1715 uint32_t kick = 0;
1716 uint32_t val;
1717 uint32_t idx;
1718 uint32_t slot;
1719 uint32_t dtq;
1720 struct en_map *map;
1721 struct mbuf *m;
1722
1723 val = en_read(sc, MID_DMA_RDTX); /* chip's current location */
1724 idx = MID_DTQ_A2REG(sc->dtq_chip); /* where we last saw chip */
1725
1726 if (sc->need_dtqs) {
1727 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
1728 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
1729 DBG(sc, INTR, ("cleared need DTQ condition"));
1730 }
1731
1732 while (idx != val) {
1733 sc->dtq_free++;
1734 if ((dtq = sc->dtq[idx]) != 0) {
1735 /* don't forget to zero it out when done */
1736 sc->dtq[idx] = 0;
1737 slot = EN_DQ_SLOT(dtq);
1738
1739 _IF_DEQUEUE(&sc->txslot[slot].indma, m);
1740 if (m == NULL)
1741 panic("enintr: dtqsync");
1742 map = (void *)m->m_pkthdr.rcvif;
1743 uma_zfree(sc->map_zone, map);
1744 m_freem(m);
1745
1746 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
1747 DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
1748 "%d", slot, EN_DQ_LEN(dtq),
1749 sc->txslot[slot].mbsize));
1750 }
1751 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
1752 }
1753 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
1754
1755 return (kick);
1756}
1757
1758/*
1759 * Service interrupt
1760 *
1761 * LOCK: locked, needed
1762 */
1763static int
1764en_intr_service(struct en_softc *sc)
1765{
1766 uint32_t chip;
1767 uint32_t vci;
1768 int need_softserv = 0;
1769 struct en_vcc *vc;
1770
1771 chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1772
1773 while (sc->hwslistp != chip) {
1774 /* fetch and remove it from hardware service list */
1775 vci = en_read(sc, sc->hwslistp);
1776 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
1777
1778 if ((vc = sc->vccs[vci]) == NULL ||
1779 (vc->vcc.flags & ATMIO_FLAG_NORX)) {
1780 DBG(sc, INTR, ("unexpected rx interrupt VCI %d", vci));
1781 en_write(sc, MID_VC(vci), MIDV_TRASH); /* rx off */
1782 continue;
1783 }
1784
1785 /* remove from hwsl */
1786 en_write(sc, MID_VC(vci), vc->rxslot->mode);
1787 EN_COUNT(sc->stats.hwpull);
1788
1789 DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
1790
1791 /* add it to the software service list (if needed) */
1792 if ((vc->vflags & VCC_SWSL) == 0) {
1793 EN_COUNT(sc->stats.swadd);
1794 need_softserv = 1;
1795 vc->vflags |= VCC_SWSL;
1796 sc->swslist[sc->swsl_tail] = vci;
1797 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
1798 sc->swsl_size++;
1799 DBG(sc, INTR, ("added VCI %d to swslist", vci));
1800 }
1801 }
1802 return (need_softserv);
1803}
1804
1805/*
1806 * Handle a receive DMA completion
1807 */
1808static void
1809en_rx_drain(struct en_softc *sc, u_int drq)
1810{
1811 struct en_rxslot *slot;
1812 struct en_vcc *vc;
1813 struct mbuf *m;
1814 struct atm_pseudohdr ah;
1815
1816 slot = &sc->rxslot[EN_DQ_SLOT(drq)];
1817
1818 m = NULL; /* assume "JK" trash DMA */
1819 if (EN_DQ_LEN(drq) != 0) {
1820 _IF_DEQUEUE(&slot->indma, m);
1821 KASSERT(m != NULL, ("drqsync: %s: lost mbuf in slot %td!",
1822 sc->ifp->if_xname, slot - sc->rxslot));
1823 uma_zfree(sc->map_zone, (struct en_map *)m->m_pkthdr.rcvif);
1824 }
1825 if ((vc = slot->vcc) == NULL) {
1826 /* ups */
1827 if (m != NULL)
1828 m_freem(m);
1829 return;
1830 }
1831
1832 /* do something with this mbuf */
1833 if (vc->vflags & VCC_DRAIN) {
1834 /* drain? */
1835 if (m != NULL)
1836 m_freem(m);
1837 if (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0 &&
1838 (en_read(sc, MID_VC(vc->vcc.vci)) & MIDV_INSERVICE) == 0 &&
1839 (vc->vflags & VCC_SWSL) == 0) {
1840 vc->vflags &= ~VCC_CLOSE_RX;
1841 if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1842 en_close_finish(sc, vc);
1843 else
1844 cv_signal(&sc->cv_close);
1845 }
1846 return;
1847 }
1848
1849 if (m != NULL) {
1850 ATM_PH_FLAGS(&ah) = vc->vcc.flags;
1851 ATM_PH_VPI(&ah) = 0;
1852 ATM_PH_SETVCI(&ah, vc->vcc.vci);
1853
1854 DBG(sc, INTR, ("rx%td: rxvci%d: atm_input, mbuf %p, len %d, "
1855 "hand %p", slot - sc->rxslot, vc->vcc.vci, m,
1856 EN_DQ_LEN(drq), vc->rxhand));
1857
1858 m->m_pkthdr.rcvif = sc->ifp;
1859 sc->ifp->if_ipackets++;
1860
1861 vc->ipackets++;
1862 vc->ibytes += m->m_pkthdr.len;
1863
1864#ifdef EN_DEBUG
1865 if (sc->debug & DBG_IPACKETS)
1866 en_dump_packet(sc, m);
1867#endif
1868#ifdef ENABLE_BPF
1869 BPF_MTAP(sc->ifp, m);
1870#endif
1871 EN_UNLOCK(sc);
1872 atm_input(sc->ifp, &ah, m, vc->rxhand);
1873 EN_LOCK(sc);
1874 }
1875}
1876
1877/*
1878 * check for RX DMA complete, and pass the data "upstairs"
1879 *
1880 * LOCK: locked, needed
1881 */
1882static int
1883en_intr_rx_dma(struct en_softc *sc)
1884{
1885 uint32_t val;
1886 uint32_t idx;
1887 uint32_t drq;
1888
1889 val = en_read(sc, MID_DMA_RDRX); /* chip's current location */
1890 idx = MID_DRQ_A2REG(sc->drq_chip); /* where we last saw chip */
1891
1892 while (idx != val) {
1893 sc->drq_free++;
1894 if ((drq = sc->drq[idx]) != 0) {
1895 /* don't forget to zero it out when done */
1896 sc->drq[idx] = 0;
1897 en_rx_drain(sc, drq);
1898 }
1899 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
1900 }
1901 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
1902
1903 if (sc->need_drqs) {
1904 /* true if we had a DRQ shortage */
1905 sc->need_drqs = 0;
1906 DBG(sc, INTR, ("cleared need DRQ condition"));
1907 return (1);
1908 } else
1909 return (0);
1910}
1911
1912/*
1913 * en_mget: get an mbuf chain that can hold totlen bytes and return it
1914 * (for recv). For the actual allocation totlen is rounded up to a multiple
1915 * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
1916 *
1917 * After this call the sum of all the m_len's in the chain will be totlen.
1918 * This is called at interrupt time, so we can't wait here.
1919 *
1920 * LOCK: any, not needed
1921 */
1922static struct mbuf *
1923en_mget(struct en_softc *sc, u_int pktlen)
1924{
1925 struct mbuf *m, *tmp;
1926 u_int totlen, pad;
1927
1928 totlen = roundup(pktlen, sizeof(uint32_t));
1929 pad = totlen - pktlen;
1930
1931 /*
1932 * First get an mbuf with header. Keep space for a couple of
1933 * words at the begin.
1934 */
1935 /* called from interrupt context */
1936 MGETHDR(m, M_DONTWAIT, MT_DATA);
1937 if (m == NULL)
1938 return (NULL);
1939
1940 m->m_pkthdr.rcvif = NULL;
1941 m->m_pkthdr.len = pktlen;
1942 m->m_len = EN_RX1BUF;
1943 MH_ALIGN(m, EN_RX1BUF);
1944 if (m->m_len >= totlen) {
1945 m->m_len = totlen;
1946
1947 } else {
1948 totlen -= m->m_len;
1949
1950 /* called from interrupt context */
1951 tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
1952 if (tmp == NULL) {
1953 m_free(m);
1954 return (NULL);
1955 }
1956 tmp = m->m_next;
1957 /* m_getm could do this for us */
1958 while (tmp != NULL) {
1959 tmp->m_len = min(MCLBYTES, totlen);
1960 totlen -= tmp->m_len;
1961 tmp = tmp->m_next;
1962 }
1963 }
1964
1965 return (m);
1966}
1967
1968/*
1969 * Argument for RX DMAMAP loader.
1970 */
1971struct rxarg {
1972 struct en_softc *sc;
1973 struct mbuf *m;
1974 u_int pre_skip; /* number of bytes to skip at begin */
1975 u_int post_skip; /* number of bytes to skip at end */
1976 struct en_vcc *vc; /* vc we are receiving on */
1977 int wait; /* wait for DRQ entries */
1978};
1979
1980/*
1981 * Copy the segment table to the buffer for later use. And compute the
1982 * number of dma queue entries we need.
1983 *
1984 * LOCK: locked, needed
1985 */
1986static void
1987en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
1988 bus_size_t mapsize, int error)
1989{
1990 struct rxarg *rx = uarg;
1991 struct en_softc *sc = rx->sc;
1992 struct en_rxslot *slot = rx->vc->rxslot;
1993 u_int free; /* number of free DRQ entries */
1994 uint32_t cur; /* current buffer offset */
1995 uint32_t drq; /* DRQ entry pointer */
1996 uint32_t last_drq; /* where we have written last */
1997 u_int needalign, cnt, count, bcode;
1998 bus_addr_t addr;
1999 bus_size_t rest;
2000 int i;
2001
2002 if (error != 0)
2003 return;
2004 if (nseg > EN_MAX_DMASEG)
2005 panic("too many DMA segments");
2006
2007 rx->wait = 0;
2008
2009 free = sc->drq_free;
2010 drq = sc->drq_us;
2011 cur = slot->cur;
2012
2013 last_drq = 0;
2014
2015 /*
2016 * Local macro to add an entry to the receive DMA area. If there
2017 * are no entries left, return. Save the byte offset of the entry
2018 * in last_drq for later use.
2019 */
2020#define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
2021 if (free == 0) { \
2022 EN_COUNT(sc->stats.rxdrqout); \
2023 rx->wait = 1; \
2024 return; \
2025 } \
2026 last_drq = drq; \
2027 en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ? \
2028 MID_MK_RXQ_ENI(COUNT, rx->vc->vcc.vci, 0, BCODE) : \
2029 MID_MK_RXQ_ADP(COUNT, rx->vc->vcc.vci, 0, BCODE)); \
2030 en_write(sc, drq + 4, ADDR); \
2031 \
2032 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8); \
2033 free--;
2034
2035 /*
2036 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
2037 * the current buffer byte offset accordingly.
2038 */
2039#define DO_DRQ(TYPE) do { \
2040 rest -= cnt; \
2041 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
2042 DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x", \
2043 slot - sc->rxslot, cnt, (uintmax_t)rest, cur)); \
2044 \
2045 PUT_DRQ_ENTRY(1, bcode, count, addr); \
2046 \
2047 addr += cnt; \
2048 } while (0)
2049
2050 /*
2051 * Skip the RBD at the beginning
2052 */
2053 if (rx->pre_skip > 0) {
2054 /* update DMA address */
2055 EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
2056
2057 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2058 }
2059
2060 for (i = 0; i < nseg; i++, segs++) {
2061 addr = segs->ds_addr;
2062 rest = segs->ds_len;
2063
2064 if (sc->is_adaptec) {
2065 /* adaptec card - simple */
2066
2067 /* advance the on-card buffer pointer */
2068 EN_WRAPADD(slot->start, slot->stop, cur, rest);
2069 DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
2070 "(cur now 0x%x)", slot - sc->rxslot,
2071 (uintmax_t)rest, (uintmax_t)addr, cur));
2072
2073 PUT_DRQ_ENTRY(0, 0, rest, addr);
2074
2075 continue;
2076 }
2077
2078 /*
2079 * do we need to do a DMA op to align to the maximum
2080 * burst? Note, that we are alway 32-bit aligned.
2081 */
2082 if (sc->alburst &&
2083 (needalign = (addr & sc->bestburstmask)) != 0) {
2084 /* compute number of bytes, words and code */
2085 cnt = sc->bestburstlen - needalign;
2086 if (cnt > rest)
2087 cnt = rest;
2088 count = cnt / sizeof(uint32_t);
2089 if (sc->noalbursts) {
2090 bcode = MIDDMA_WORD;
2091 } else {
2092 bcode = en_dmaplan[count].bcode;
2093 count = cnt >> en_dmaplan[count].divshift;
2094 }
2095 DO_DRQ("al_dma");
2096 }
2097
2098 /* do we need to do a max-sized burst? */
2099 if (rest >= sc->bestburstlen) {
2100 count = rest >> sc->bestburstshift;
2101 cnt = count << sc->bestburstshift;
2102 bcode = sc->bestburstcode;
2103 DO_DRQ("best_dma");
2104 }
2105
2106 /* do we need to do a cleanup burst? */
2107 if (rest != 0) {
2108 cnt = rest;
2109 count = rest / sizeof(uint32_t);
2110 if (sc->noalbursts) {
2111 bcode = MIDDMA_WORD;
2112 } else {
2113 bcode = en_dmaplan[count].bcode;
2114 count = cnt >> en_dmaplan[count].divshift;
2115 }
2116 DO_DRQ("clean_dma");
2117 }
2118 }
2119
2120 /*
2121 * Skip stuff at the end
2122 */
2123 if (rx->post_skip > 0) {
2124 /* update DMA address */
2125 EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
2126
2127 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2128 }
2129
2130 /* record the end for the interrupt routine */
2131 sc->drq[MID_DRQ_A2REG(last_drq)] =
2132 EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
2133
2134 /* set the end flag in the last descriptor */
2135 en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
2136
2137#undef PUT_DRQ_ENTRY
2138#undef DO_DRQ
2139
2140 /* commit */
2141 slot->cur = cur;
2142 sc->drq_free = free;
2143 sc->drq_us = drq;
2144
2145 /* signal to card */
2146 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2147}
2148
2149/*
2150 * en_service: handle a service interrupt
2151 *
2152 * Q: why do we need a software service list?
2153 *
2154 * A: if we remove a VCI from the hardware list and we find that we are
2155 * out of DRQs we must defer processing until some DRQs become free.
2156 * so we must remember to look at this RX VCI/slot later, but we can't
2157 * put it back on the hardware service list (since that isn't allowed).
2158 * so we instead save it on the software service list. it would be nice
2159 * if we could peek at the VCI on top of the hwservice list without removing
2160 * it, however this leads to a race condition: if we peek at it and
2161 * decide we are done with it new data could come in before we have a
2162 * chance to remove it from the hwslist. by the time we get it out of
2163 * the list the interrupt for the new data will be lost. oops!
2164 *
2165 * LOCK: locked, needed
2166 */
2167static void
2168en_service(struct en_softc *sc)
2169{
2170 struct mbuf *m, *lastm;
2171 struct en_map *map;
2172 struct rxarg rx;
2173 uint32_t cur;
2174 uint32_t dstart; /* data start (as reported by card) */
2175 uint32_t rbd; /* receive buffer descriptor */
2176 uint32_t pdu; /* AAL5 trailer */
2177 int mlen;
2178 int error;
2179 struct en_rxslot *slot;
2180 struct en_vcc *vc;
2181
2182 rx.sc = sc;
2183
2184 next_vci:
2185 if (sc->swsl_size == 0) {
2186 DBG(sc, SERV, ("en_service done"));
2187 return;
2188 }
2189
2190 /*
2191 * get vcc to service
2192 */
2193 rx.vc = vc = sc->vccs[sc->swslist[sc->swsl_head]];
2194 slot = vc->rxslot;
2195 KASSERT (slot->vcc->rxslot == slot, ("en_service: rx slot/vci sync"));
2196
2197 /*
2198 * determine our mode and if we've got any work to do
2199 */
2200 DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
2201 "0x%x", slot - sc->rxslot, vc->vcc.vci, slot->start,
2202 slot->stop, slot->cur));
2203
2204 same_vci:
2205 cur = slot->cur;
2206
2207 dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(vc->vcc.vci)));
2208 dstart = (dstart * sizeof(uint32_t)) + slot->start;
2209
2210 /* check to see if there is any data at all */
2211 if (dstart == cur) {
2212 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2213 /* remove from swslist */
2214 vc->vflags &= ~VCC_SWSL;
2215 sc->swsl_size--;
2216 DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
2217 slot - sc->rxslot, vc->vcc.vci));
2218 goto next_vci;
2219 }
2220
2221 /*
2222 * figure out how many bytes we need
2223 * [mlen = # bytes to go in mbufs]
2224 */
2225 rbd = en_read(sc, cur);
2226 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2227 panic("en_service: id mismatch");
2228
2229 if (rbd & MID_RBD_T) {
2230 mlen = 0; /* we've got trash */
2231 rx.pre_skip = MID_RBD_SIZE;
2232 rx.post_skip = 0;
2233 EN_COUNT(sc->stats.ttrash);
2234 DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
2235
2236 } else if (vc->vcc.aal != ATMIO_AAL_5) {
2237 /* 1 cell (ick!) */
2238 mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
2239 rx.pre_skip = MID_RBD_SIZE;
2240 rx.post_skip = 0;
2241
2242 } else {
2243 rx.pre_skip = MID_RBD_SIZE;
2244
2245 /* get PDU trailer in correct byte order */
2246 pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
2247 MID_RBD_SIZE - MID_PDU_SIZE;
2248 if (pdu >= slot->stop)
2249 pdu -= EN_RXSZ * 1024;
2250 pdu = en_read(sc, pdu);
2251
2252 if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
2253 MID_PDU_LEN(pdu)) {
2254 device_printf(sc->dev, "invalid AAL5 length\n");
2255 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2256 mlen = 0;
2257 sc->ifp->if_ierrors++;
2258
2259 } else if (rbd & MID_RBD_CRCERR) {
2260 device_printf(sc->dev, "CRC error\n");
2261 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2262 mlen = 0;
2263 sc->ifp->if_ierrors++;
2264
2265 } else {
2266 mlen = MID_PDU_LEN(pdu);
2267 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
2268 }
2269 }
2270
2271 /*
2272 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2273 *
2274 * notes:
2275 * 1. it is possible that we've already allocated an mbuf for this pkt
2276 * but ran out of DRQs, in which case we saved the allocated mbuf
2277 * on "q".
2278 * 2. if we save an buf in "q" we store the "cur" (pointer) in the
2279 * buf as an identity (that we can check later).
2280 * 3. after this block of code, if m is still NULL then we ran out of
2281 * mbufs
2282 */
2283 _IF_DEQUEUE(&slot->q, m);
2284 if (m != NULL) {
2285 if (m->m_pkthdr.csum_data != cur) {
2286 /* wasn't ours */
2287 DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
2288 slot - sc->rxslot, m));
2289 _IF_PREPEND(&slot->q, m);
2290 m = NULL;
2291 EN_COUNT(sc->stats.rxqnotus);
2292 } else {
2293 EN_COUNT(sc->stats.rxqus);
2294 DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
2295 slot - sc->rxslot, m));
2296 }
2297 }
2298 if (mlen == 0 && m != NULL) {
2299 /* should not happen */
2300 m_freem(m);
2301 m = NULL;
2302 }
2303
2304 if (mlen != 0 && m == NULL) {
2305 m = en_mget(sc, mlen);
2306 if (m == NULL) {
2307 rx.post_skip += mlen;
2308 mlen = 0;
2309 EN_COUNT(sc->stats.rxmbufout);
2310 DBG(sc, SERV, ("rx%td: out of mbufs",
2311 slot - sc->rxslot));
2312 } else
2313 rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
2314
2315 DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
2316 slot - sc->rxslot, m, mlen));
2317 }
2318
2319 DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
2320 slot - sc->rxslot, vc->vcc.vci, m, mlen, rx.pre_skip,
2321 rx.post_skip));
2322
2323 if (m != NULL) {
2324 /* M_NOWAIT - called from interrupt context */
2325 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
2326 if (map == NULL) {
2327 rx.post_skip += mlen;
2328 m_freem(m);
2329 DBG(sc, SERV, ("rx%td: out of maps",
2330 slot - sc->rxslot));
2331 goto skip;
2332 }
2333 rx.m = m;
2334 error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
2335 en_rxdma_load, &rx, BUS_DMA_NOWAIT);
2336
2337 if (error != 0) {
2338 device_printf(sc->dev, "loading RX map failed "
2339 "%d\n", error);
2340 uma_zfree(sc->map_zone, map);
2341 m_freem(m);
2342 rx.post_skip += mlen;
2343 goto skip;
2344
2345 }
2346 map->flags |= ENMAP_LOADED;
2347
2348 if (rx.wait) {
2349 /* out of DRQs - wait */
2350 uma_zfree(sc->map_zone, map);
2351
2352 m->m_pkthdr.csum_data = cur;
2353 _IF_ENQUEUE(&slot->q, m);
2354 EN_COUNT(sc->stats.rxdrqout);
2355
2356 sc->need_drqs = 1; /* flag condition */
2357 return;
2358
2359 }
2360 (void)m_length(m, &lastm);
2361 lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
2362
2363 m->m_pkthdr.rcvif = (void *)map;
2364 _IF_ENQUEUE(&slot->indma, m);
2365
2366 /* get next packet in this slot */
2367 goto same_vci;
2368 }
2369 skip:
2370 /*
2371 * Here we end if we should drop the packet from the receive buffer.
2372 * The number of bytes to drop is in fill. We can do this with on
2373 * JK entry. If we don't even have that one - wait.
2374 */
2375 if (sc->drq_free == 0) {
2376 sc->need_drqs = 1; /* flag condition */
2377 return;
2378 }
2379 rx.post_skip += rx.pre_skip;
2380 DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
2381
2382 /* advance buffer address */
2383 EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
2384
2385 /* write DRQ entry */
2386 if (sc->is_adaptec)
2387 en_write(sc, sc->drq_us,
2388 MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
2389 vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2390 else
2391 en_write(sc, sc->drq_us,
2392 MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
2393 vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2394 en_write(sc, sc->drq_us + 4, 0);
2395 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
2396 sc->drq_free--;
2397
2398 /* signal to RX interrupt */
2399 sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
2400 slot->cur = cur;
2401
2402 /* signal to card */
2403 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2404
2405 goto same_vci;
2406}
2407
2408/*
2409 * interrupt handler
2410 *
2411 * LOCK: unlocked, needed
2412 */
2413void
2414en_intr(void *arg)
2415{
2416 struct en_softc *sc = arg;
2417 uint32_t reg, kick, mask;
2418 int lcv, need_softserv;
2419
2420 EN_LOCK(sc);
2421
2422 reg = en_read(sc, MID_INTACK);
2423 DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
2424
2425 if ((reg & MID_INT_ANY) == 0) {
2426 EN_UNLOCK(sc);
2427 return;
2428 }
2429
2430 /*
2431 * unexpected errors that need a reset
2432 */
2433 if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
2434 device_printf(sc->dev, "unexpected interrupt=0x%b, "
2435 "resetting\n", reg, MID_INTBITS);
2436#ifdef EN_DEBUG
2437 panic("en: unexpected error");
2438#else
2439 en_reset_ul(sc);
2440 en_init(sc);
2441#endif
2442 EN_UNLOCK(sc);
2443 return;
2444 }
2445
2446 if (reg & MID_INT_SUNI)
2447 utopia_intr(&sc->utopia);
2448
2449 kick = 0;
2450 if (reg & MID_INT_TX)
2451 kick |= en_intr_tx(sc, reg);
2452
2453 if (reg & MID_INT_DMA_TX)
2454 kick |= en_intr_tx_dma(sc);
2455
2456 /*
2457 * kick xmit channels as needed.
2458 */
2459 if (kick) {
2460 DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
2461 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
2462 if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
2463 en_txdma(sc, &sc->txslot[lcv]);
2464 }
2465
2466 need_softserv = 0;
2467 if (reg & MID_INT_DMA_RX)
2468 need_softserv |= en_intr_rx_dma(sc);
2469
2470 if (reg & MID_INT_SERVICE)
2471 need_softserv |= en_intr_service(sc);
2472
2473 if (need_softserv)
2474 en_service(sc);
2475
2476 /*
2477 * keep our stats
2478 */
2479 if (reg & MID_INT_DMA_OVR) {
2480 EN_COUNT(sc->stats.dmaovr);
2481 DBG(sc, INTR, ("MID_INT_DMA_OVR"));
2482 }
2483 reg = en_read(sc, MID_STAT);
2484 sc->stats.otrash += MID_OTRASH(reg);
2485 sc->stats.vtrash += MID_VTRASH(reg);
2486
2487 EN_UNLOCK(sc);
2488}
2489
2490/*
2491 * Read at most n SUNI regs starting at reg into val
2492 */
2493static int
2494en_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
2495{
2496 struct en_softc *sc = ifatm->ifp->if_softc;
2497 u_int i;
2498
2499 EN_CHECKLOCK(sc);
2500 if (reg >= MID_NSUNI)
2501 return (EINVAL);
2502 if (reg + *n > MID_NSUNI)
2503 *n = MID_NSUNI - reg;
2504
2505 for (i = 0; i < *n; i++)
2506 val[i] = en_read(sc, MID_SUNIOFF + 4 * (reg + i));
2507
2508 return (0);
2509}
2510
2511/*
2512 * change the bits given by mask to them in val in register reg
2513 */
2514static int
2515en_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
2516{
2517 struct en_softc *sc = ifatm->ifp->if_softc;
2518 uint32_t regval;
2519
2520 EN_CHECKLOCK(sc);
2521 if (reg >= MID_NSUNI)
2522 return (EINVAL);
2523 regval = en_read(sc, MID_SUNIOFF + 4 * reg);
2524 regval = (regval & ~mask) | (val & mask);
2525 en_write(sc, MID_SUNIOFF + 4 * reg, regval);
2526 return (0);
2527}
2528
2529static const struct utopia_methods en_utopia_methods = {
2530 en_utopia_readregs,
2531 en_utopia_writereg
2532};
2533
2534/*********************************************************************/
2535/*
2536 * Probing the DMA brokeness of the card
2537 */
2538
2539/*
2540 * Physical address load helper function for DMA probe
2541 *
2542 * LOCK: unlocked, not needed
2543 */
2544static void
2545en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
2546{
2547 if (error == 0)
2548 *(bus_addr_t *)uarg = segs[0].ds_addr;
2549}
2550
2551/*
2552 * en_dmaprobe: helper function for en_attach.
2553 *
2554 * see how the card handles DMA by running a few DMA tests. we need
2555 * to figure out the largest number of bytes we can DMA in one burst
2556 * ("bestburstlen"), and if the starting address for a burst needs to
2557 * be aligned on any sort of boundary or not ("alburst").
2558 *
2559 * Things turn out more complex than that, because on my (harti) brand
2560 * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
2561 * we more than 4 bytes fails (with an RX DMA timeout) for physical
2562 * addresses that end with 0xc. Therefor we search not only the largest
2563 * burst that is supported (hopefully 64) but also check what is the largerst
2564 * unaligned supported size. If that appears to be lesser than 4 words,
2565 * set the noalbursts flag. That will be set only if also alburst is set.
2566 */
2567
2568/*
2569 * en_dmaprobe_doit: do actual testing for the DMA test.
2570 * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
2571 * Return the largest one that works.
2572 *
2573 * LOCK: unlocked, not needed
2574 */
2575static int
2576en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
2577{
2578 uint8_t *dp = sp + MIDDMA_MAXBURST;
2579 bus_addr_t pdp = psp + MIDDMA_MAXBURST;
2580 int lcv, retval = 4, cnt;
2581 uint32_t reg, bcode, midvloc;
2582
2583 if (sc->en_busreset)
2584 sc->en_busreset(sc);
2585 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2586
2587 /*
2588 * set up a 1k buffer at MID_BUFOFF
2589 */
2590 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
2591 >> MIDV_LOCTOPSHFT;
2592 en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
2593 en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
2594 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
2595 en_write(sc, MID_DST_RP(0), 0);
2596 en_write(sc, MID_WP_ST_CNT(0), 0);
2597
2598 /* set up sample data */
2599 for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
2600 sp[lcv] = lcv + 1;
2601
2602 /* enable DMA (only) */
2603 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2604
2605 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
2606 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
2607
2608 /*
2609 * try it now . . . DMA it out, then DMA it back in and compare
2610 *
2611 * note: in order to get the dma stuff to reverse directions it wants
2612 * the "end" flag set! since we are not dma'ing valid data we may
2613 * get an ident mismatch interrupt (which we will ignore).
2614 */
2615 DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
2616 sp, (u_long)psp, dp, (u_long)pdp));
2617 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
2618 DBG(sc, DMA, ("test lcv=%d", lcv));
2619
2620 /* zero SRAM and dest buffer */
2621 bus_space_set_region_4(sc->en_memt, sc->en_base,
2622 MID_BUFOFF, 0, 1024 / 4);
2623 bzero(dp, MIDDMA_MAXBURST);
2624
2625 bcode = en_sz2b(lcv);
2626
2627 /* build lcv-byte-DMA x NBURSTS */
2628 if (sc->is_adaptec)
2629 en_write(sc, sc->dtq_chip,
2630 MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
2631 else
2632 en_write(sc, sc->dtq_chip,
2633 MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
2634 en_write(sc, sc->dtq_chip + 4, psp);
2635 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
2636 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
2637
2638 cnt = 1000;
2639 while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
2640 MID_DTQ_A2REG(sc->dtq_chip)) {
2641 DELAY(1);
2642 if (--cnt == 0) {
2643 DBG(sc, DMA, ("unexpected timeout in tx "
2644 "DMA test\n alignment=0x%lx, burst size=%d"
2645 ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
2646 (u_long)sp & 63, lcv,
2647 en_read(sc, MID_DMA_ADDR), reg,
2648 en_read(sc, MID_INTSTAT)));
2649 return (retval);
2650 }
2651 }
2652
2653 reg = en_read(sc, MID_INTACK);
2654 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
2655 DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
2656 reg));
2657 return (retval);
2658 }
2659 /* re-enable DMA (only) */
2660 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2661
2662 /* "return to sender..." address is known ... */
2663
2664 /* build lcv-byte-DMA x NBURSTS */
2665 if (sc->is_adaptec)
2666 en_write(sc, sc->drq_chip,
2667 MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
2668 else
2669 en_write(sc, sc->drq_chip,
2670 MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
2671 en_write(sc, sc->drq_chip + 4, pdp);
2672 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
2673 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
2674 cnt = 1000;
2675 while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
2676 MID_DRQ_A2REG(sc->drq_chip)) {
2677 DELAY(1);
2678 cnt--;
2679 if (--cnt == 0) {
2680 DBG(sc, DMA, ("unexpected timeout in rx "
2681 "DMA test, rdrx=%#x\n", reg));
2682 return (retval);
2683 }
2684 }
2685 reg = en_read(sc, MID_INTACK);
2686 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
2687 DBG(sc, DMA, ("unexpected status in rx DMA "
2688 "test: 0x%x\n", reg));
2689 return (retval);
2690 }
2691 if (bcmp(sp, dp, lcv)) {
2692 DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
2693 "dp=%p", lcv, sp, dp));
2694 return (retval);
2695 }
2696
2697 retval = lcv;
2698 }
2699 return (retval); /* studly 64 byte DMA present! oh baby!! */
2700}
2701
2702/*
2703 * Find the best DMA parameters
2704 *
2705 * LOCK: unlocked, not needed
2706 */
2707static void
2708en_dmaprobe(struct en_softc *sc)
2709{
2710 bus_dma_tag_t tag;
2711 bus_dmamap_t map;
2712 int err;
2713 void *buffer;
2714 int bestalgn, lcv, try, bestnoalgn;
2715 bus_addr_t phys;
2716 uint8_t *addr;
2717
2718 sc->alburst = 0;
2719 sc->noalbursts = 0;
2720
2721 /*
2722 * Allocate some DMA-able memory.
2723 * We need 3 times the max burst size aligned to the max burst size.
2724 */
2725 err = bus_dma_tag_create(NULL, MIDDMA_MAXBURST, 0,
2726 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2727 3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0,
2728 NULL, NULL, &tag);
2729 if (err)
2730 panic("%s: cannot create test DMA tag %d", __func__, err);
2731
2732 err = bus_dmamem_alloc(tag, &buffer, 0, &map);
2733 if (err)
2734 panic("%s: cannot allocate test DMA memory %d", __func__, err);
2735
2736 err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
2737 en_dmaprobe_load, &phys, BUS_DMA_NOWAIT);
2738 if (err)
2739 panic("%s: cannot load test DMA map %d", __func__, err);
2740 addr = buffer;
2741 DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
2742
2743 /*
2744 * Now get the best burst size of the aligned case.
2745 */
2746 bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
2747
2748 /*
2749 * Now try unaligned.
2750 */
2751 for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
2752 try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
2753
2754 if (try < bestnoalgn)
2755 bestnoalgn = try;
2756 }
2757
2758 if (bestnoalgn < bestalgn) {
2759 sc->alburst = 1;
2760 if (bestnoalgn < 32)
2761 sc->noalbursts = 1;
2762 }
2763
2764 sc->bestburstlen = bestalgn;
2765 sc->bestburstshift = en_log2(bestalgn);
2766 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
2767 sc->bestburstcode = en_sz2b(bestalgn);
2768
2769 /*
2770 * Reset the chip before freeing the buffer. It may still be trying
2771 * to DMA.
2772 */
2773 if (sc->en_busreset)
2774 sc->en_busreset(sc);
2775 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2776
2777 DELAY(10000); /* may still do DMA */
2778
2779 /*
2780 * Free the DMA stuff
2781 */
2782 bus_dmamap_unload(tag, map);
2783 bus_dmamem_free(tag, buffer, map);
2784 bus_dma_tag_destroy(tag);
2785}
2786
2787/*********************************************************************/
2788/*
2789 * Attach/detach.
2790 */
2791
2792/*
2793 * Attach to the card.
2794 *
2795 * LOCK: unlocked, not needed (but initialized)
2796 */
2797int
2798en_attach(struct en_softc *sc)
2799{
2800 struct ifnet *ifp = sc->ifp;
2801 int sz;
2802 uint32_t reg, lcv, check, ptr, sav, midvloc;
2803
2804#ifdef EN_DEBUG
2805 sc->debug = EN_DEBUG;
2806#endif
2807
2808 /*
2809 * Probe card to determine memory size.
2810 *
2811 * The stupid ENI card always reports to PCI that it needs 4MB of
2812 * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
2813 * addresses wrap in the RAM address space (i.e. on a 512KB card
2814 * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
2815 * 0x27fffc [note that RAM starts at offset 0x200000]).
2816 */
2817
2818 /* reset card before touching RAM */
2819 if (sc->en_busreset)
2820 sc->en_busreset(sc);
2821 en_write(sc, MID_RESID, 0x0);
2822
2823 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
2824 en_write(sc, lcv, lcv); /* data[address] = address */
2825 for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
2826 reg = en_read(sc, check);
2827 if (reg != check)
2828 /* found an alias! - quit */
2829 goto done_probe;
2830 }
2831 }
2832 done_probe:
2833 lcv -= MID_PROBSIZE; /* take one step back */
2834 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
2835
2836 /*
2837 * determine the largest DMA burst supported
2838 */
2839 en_dmaprobe(sc);
2840
2841 /*
2842 * "hello world"
2843 */
2844
2845 /* reset */
2846 if (sc->en_busreset)
2847 sc->en_busreset(sc);
2848 en_write(sc, MID_RESID, 0x0); /* reset */
2849
2850 /* zero memory */
2851 bus_space_set_region_4(sc->en_memt, sc->en_base,
2852 MID_RAMOFF, 0, sc->en_obmemsz / 4);
2853
2854 reg = en_read(sc, MID_RESID);
2855
2856 device_printf(sc->dev, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
2857 "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
2858 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
2859 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
2860 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
2861 (long)sc->en_obmemsz / 1024);
2862
2863 /*
2864 * fill in common ATM interface stuff
2865 */
2866 IFP2IFATM(sc->ifp)->mib.hw_version = (MID_VER(reg) << 16) |
2867 (MID_MID(reg) << 8) | MID_DID(reg);
2868 if (MID_DID(reg) & 0x4)
2869 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
2870 else
2871 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
2872
2873 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
2874 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2875 IFP2IFATM(sc->ifp)->mib.vci_bits = MID_VCI_BITS;
2876 IFP2IFATM(sc->ifp)->mib.max_vccs = MID_N_VC;
2877 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2878
2879 if (sc->is_adaptec) {
2880 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ADP155P;
2881 if (sc->bestburstlen == 64 && sc->alburst == 0)
2882 device_printf(sc->dev,
2883 "passed 64 byte DMA test\n");
2884 else
2885 device_printf(sc->dev, "FAILED DMA TEST: "
2886 "burst=%d, alburst=%d\n", sc->bestburstlen,
2887 sc->alburst);
2888 } else {
2889 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ENI155P;
2890 device_printf(sc->dev, "maximum DMA burst length = %d "
2891 "bytes%s\n", sc->bestburstlen, sc->alburst ?
2892 sc->noalbursts ? " (no large bursts)" : " (must align)" :
2893 "");
2894 }
2895
2896 /*
2897 * link into network subsystem and prepare card
2898 */
2899 sc->ifp->if_softc = sc;
2900 ifp->if_flags = IFF_SIMPLEX;
2901 ifp->if_ioctl = en_ioctl;
2902 ifp->if_start = en_start;
2903
2904 mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
2905 MTX_NETWORK_LOCK, MTX_DEF);
2906 cv_init(&sc->cv_close, "VC close");
2907
2908 /*
2909 * Make the sysctl tree
2910 */
2911 sysctl_ctx_init(&sc->sysctl_ctx);
2912
2913 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2914 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2915 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
2916 goto fail;
2917
2918 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2919 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, en_sysctl_istats,
2920 "S", "internal statistics") == NULL)
2921 goto fail;
2922
2923#ifdef EN_DEBUG
2924 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2925 OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
2926 goto fail;
2927#endif
2928
2929 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2930 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->en_mtx,
2931 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2932 &en_utopia_methods);
2933 utopia_init_media(&sc->utopia);
2934
847 }
848
849 bcopy(m->m_data, new->m_data, m->m_len);
850 new->m_len = m->m_len;
851 new->m_flags &= ~M_RDONLY;
852
853 return (new);
854}
855
856/*
857 * This function is called when we have an ENI adapter. It fixes the
858 * mbuf chain, so that all addresses and lengths are 4 byte aligned.
859 * The overall length is already padded to multiple of cells plus the
860 * TBD so this must always succeed. The routine can fail, when it
861 * needs to copy an mbuf (this may happen if an mbuf is readonly).
862 *
863 * We assume here, that aligning the virtual addresses to 4 bytes also
864 * aligns the physical addresses.
865 *
866 * LOCK: locked, needed
867 */
868static struct mbuf *
869en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
870{
871 struct mbuf **prev = &m0;
872 struct mbuf *m = m0;
873 struct mbuf *new;
874 u_char *d;
875 int off;
876
877 while (m != NULL) {
878 d = mtod(m, u_char *);
879 if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
880 EN_COUNT(sc->stats.mfixaddr);
881 if (M_WRITABLE(m)) {
882 bcopy(d, d - off, m->m_len);
883 m->m_data -= off;
884 } else {
885 if ((new = copy_mbuf(m)) == NULL) {
886 EN_COUNT(sc->stats.mfixfail);
887 m_freem(m0);
888 return (NULL);
889 }
890 new->m_next = m_free(m);
891 *prev = m = new;
892 }
893 }
894
895 if ((off = m->m_len % sizeof(uint32_t)) != 0) {
896 EN_COUNT(sc->stats.mfixlen);
897 if (!M_WRITABLE(m)) {
898 if ((new = copy_mbuf(m)) == NULL) {
899 EN_COUNT(sc->stats.mfixfail);
900 m_freem(m0);
901 return (NULL);
902 }
903 new->m_next = m_free(m);
904 *prev = m = new;
905 }
906 d = mtod(m, u_char *) + m->m_len;
907 off = 4 - off;
908 while (off) {
909 while (m->m_next && m->m_next->m_len == 0)
910 m->m_next = m_free(m->m_next);
911
912 if (m->m_next == NULL) {
913 *d++ = 0;
914 KASSERT(*pad > 0, ("no padding space"));
915 (*pad)--;
916 } else {
917 *d++ = *mtod(m->m_next, u_char *);
918 m->m_next->m_len--;
919 m->m_next->m_data++;
920 }
921 m->m_len++;
922 off--;
923 }
924 }
925
926 prev = &m->m_next;
927 m = m->m_next;
928 }
929
930 return (m0);
931}
932
933/*
934 * en_start: start transmitting the next packet that needs to go out
935 * if there is one. We take off all packets from the interface's queue and
936 * put them into the channels queue.
937 *
938 * Here we also prepend the transmit packet descriptor and append the padding
939 * and (for aal5) the PDU trailer. This is different from the original driver:
940 * we assume, that allocating one or two additional mbufs is actually cheaper
941 * than all this algorithmic fiddling we would need otherwise.
942 *
943 * While the packet is on the channels wait queue we use the csum_* fields
944 * in the packet header to hold the original datalen, the AAL5 flag and the
945 * VCI. The packet length field in the header holds the needed buffer space.
946 * This may actually be more than the length of the current mbuf chain (when
947 * one or more of TBD, padding and PDU do not fit).
948 *
949 * LOCK: unlocked, needed
950 */
951static void
952en_start(struct ifnet *ifp)
953{
954 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
955 struct mbuf *m, *lastm;
956 struct atm_pseudohdr *ap;
957 u_int pad; /* 0-bytes to pad at PDU end */
958 u_int datalen; /* length of user data */
959 u_int vci; /* the VCI we are transmitting on */
960 u_int flags;
961 uint32_t tbd[2];
962 uint32_t pdu[2];
963 struct en_vcc *vc;
964 struct en_map *map;
965 struct en_txslot *tx;
966
967 while (1) {
968 IF_DEQUEUE(&ifp->if_snd, m);
969 if (m == NULL)
970 return;
971
972 flags = 0;
973
974 ap = mtod(m, struct atm_pseudohdr *);
975 vci = ATM_PH_VCI(ap);
976
977 if (ATM_PH_VPI(ap) != 0 || vci >= MID_N_VC ||
978 (vc = sc->vccs[vci]) == NULL ||
979 (vc->vflags & VCC_CLOSE_RX)) {
980 DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
981 ATM_PH_VPI(ap), vci));
982 m_freem(m);
983 continue;
984 }
985 if (vc->vcc.aal == ATMIO_AAL_5)
986 flags |= TX_AAL5;
987 m_adj(m, sizeof(struct atm_pseudohdr));
988
989 /*
990 * (re-)calculate size of packet (in bytes)
991 */
992 m->m_pkthdr.len = datalen = m_length(m, &lastm);
993
994 /*
995 * computing how much padding we need on the end of the mbuf,
996 * then see if we can put the TBD at the front of the mbuf
997 * where the link header goes (well behaved protocols will
998 * reserve room for us). Last, check if room for PDU tail.
999 */
1000 if (flags & TX_AAL5)
1001 m->m_pkthdr.len += MID_PDU_SIZE;
1002 m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
1003 pad = m->m_pkthdr.len - datalen;
1004 if (flags & TX_AAL5)
1005 pad -= MID_PDU_SIZE;
1006 m->m_pkthdr.len += MID_TBD_SIZE;
1007
1008 DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
1009 vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
1010 (int)M_TRAILINGSPACE(lastm)));
1011
1012 /*
1013 * From here on we need access to sc
1014 */
1015 EN_LOCK(sc);
1016
1017 /*
1018 * Allocate a map. We do this here rather then in en_txdma,
1019 * because en_txdma is also called from the interrupt handler
1020 * and we are going to have a locking problem then. We must
1021 * use NOWAIT here, because the ip_output path holds various
1022 * locks.
1023 */
1024 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
1025 if (map == NULL) {
1026 /* drop that packet */
1027 EN_COUNT(sc->stats.txnomap);
1028 EN_UNLOCK(sc);
1029 m_freem(m);
1030 continue;
1031 }
1032
1033 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1034 EN_UNLOCK(sc);
1035 uma_zfree(sc->map_zone, map);
1036 m_freem(m);
1037 continue;
1038 }
1039
1040 /*
1041 * Look, whether we can prepend the TBD (8 byte)
1042 */
1043 if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
1044 tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
1045 MID_TBD_AAL5 : MID_TBD_NOAAL5,
1046 vc->txspeed, m->m_pkthdr.len / MID_ATMDATASZ));
1047 tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
1048
1049 m->m_data -= MID_TBD_SIZE;
1050 bcopy(tbd, m->m_data, MID_TBD_SIZE);
1051 m->m_len += MID_TBD_SIZE;
1052 flags |= TX_HAS_TBD;
1053 }
1054
1055 /*
1056 * Check whether the padding fits (must be writeable -
1057 * we pad with zero).
1058 */
1059 if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
1060 bzero(lastm->m_data + lastm->m_len, pad);
1061 lastm->m_len += pad;
1062 flags |= TX_HAS_PAD;
1063
1064 if ((flags & TX_AAL5) &&
1065 M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
1066 pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
1067 pdu[1] = 0;
1068 bcopy(pdu, lastm->m_data + lastm->m_len,
1069 MID_PDU_SIZE);
1070 lastm->m_len += MID_PDU_SIZE;
1071 flags |= TX_HAS_PDU;
1072 }
1073 }
1074
1075 if (!sc->is_adaptec &&
1076 (m = en_fix_mchain(sc, m, &pad)) == NULL) {
1077 EN_UNLOCK(sc);
1078 uma_zfree(sc->map_zone, map);
1079 continue;
1080 }
1081
1082 /*
1083 * get assigned channel (will be zero unless txspeed is set)
1084 */
1085 tx = vc->txslot;
1086
1087 if (m->m_pkthdr.len > EN_TXSZ * 1024) {
1088 DBG(sc, TX, ("tx%td: packet larger than xmit buffer "
1089 "(%d > %d)\n", tx - sc->txslot, m->m_pkthdr.len,
1090 EN_TXSZ * 1024));
1091 EN_UNLOCK(sc);
1092 m_freem(m);
1093 uma_zfree(sc->map_zone, map);
1094 continue;
1095 }
1096
1097 if (tx->mbsize > EN_TXHIWAT) {
1098 EN_COUNT(sc->stats.txmbovr);
1099 DBG(sc, TX, ("tx%td: buffer space shortage",
1100 tx - sc->txslot));
1101 EN_UNLOCK(sc);
1102 m_freem(m);
1103 uma_zfree(sc->map_zone, map);
1104 continue;
1105 }
1106
1107 /* commit */
1108 tx->mbsize += m->m_pkthdr.len;
1109
1110 DBG(sc, TX, ("tx%td: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
1111 tx - sc->txslot, vci, sc->vccs[vci]->txspeed,
1112 m->m_pkthdr.len, tx->mbsize));
1113
1114 MBUF_SET_TX(m, vci, flags, datalen, pad, map);
1115
1116 _IF_ENQUEUE(&tx->q, m);
1117
1118 en_txdma(sc, tx);
1119
1120 EN_UNLOCK(sc);
1121 }
1122}
1123
1124/*********************************************************************/
1125/*
1126 * VCs
1127 */
1128
1129/*
1130 * en_loadvc: load a vc tab entry from a slot
1131 *
1132 * LOCK: locked, needed
1133 */
1134static void
1135en_loadvc(struct en_softc *sc, struct en_vcc *vc)
1136{
1137 uint32_t reg = en_read(sc, MID_VC(vc->vcc.vci));
1138
1139 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1140 en_write(sc, MID_VC(vc->vcc.vci), reg);
1141 DELAY(27);
1142
1143 /* no need to set CRC */
1144
1145 /* read pointer = 0, desc. start = 0 */
1146 en_write(sc, MID_DST_RP(vc->vcc.vci), 0);
1147 /* write pointer = 0 */
1148 en_write(sc, MID_WP_ST_CNT(vc->vcc.vci), 0);
1149 /* set mode, size, loc */
1150 en_write(sc, MID_VC(vc->vcc.vci), vc->rxslot->mode);
1151
1152 vc->rxslot->cur = vc->rxslot->start;
1153
1154 DBG(sc, VC, ("rx%td: assigned to VCI %d", vc->rxslot - sc->rxslot,
1155 vc->vcc.vci));
1156}
1157
1158/*
1159 * Open the given vcc.
1160 *
1161 * LOCK: unlocked, needed
1162 */
1163static int
1164en_open_vcc(struct en_softc *sc, struct atmio_openvcc *op)
1165{
1166 uint32_t oldmode, newmode;
1167 struct en_rxslot *slot;
1168 struct en_vcc *vc;
1169 int error = 0;
1170
1171 DBG(sc, IOCTL, ("enable vpi=%d, vci=%d, flags=%#x",
1172 op->param.vpi, op->param.vci, op->param.flags));
1173
1174 if (op->param.vpi != 0 || op->param.vci >= MID_N_VC)
1175 return (EINVAL);
1176
1177 vc = uma_zalloc(en_vcc_zone, M_NOWAIT | M_ZERO);
1178 if (vc == NULL)
1179 return (ENOMEM);
1180
1181 EN_LOCK(sc);
1182
1183 if (sc->vccs[op->param.vci] != NULL) {
1184 error = EBUSY;
1185 goto done;
1186 }
1187
1188 /* find a free receive slot */
1189 for (slot = sc->rxslot; slot < &sc->rxslot[sc->en_nrx]; slot++)
1190 if (slot->vcc == NULL)
1191 break;
1192 if (slot == &sc->rxslot[sc->en_nrx]) {
1193 error = ENOSPC;
1194 goto done;
1195 }
1196
1197 vc->rxslot = slot;
1198 vc->rxhand = op->rxhand;
1199 vc->vcc = op->param;
1200
1201 oldmode = slot->mode;
1202 newmode = (op->param.aal == ATMIO_AAL_5) ? MIDV_AAL5 : MIDV_NOAAL;
1203 slot->mode = MIDV_SETMODE(oldmode, newmode);
1204 slot->vcc = vc;
1205
1206 KASSERT (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0,
1207 ("en_rxctl: left over mbufs on enable slot=%td",
1208 vc->rxslot - sc->rxslot));
1209
1210 vc->txspeed = 0;
1211 vc->txslot = sc->txslot;
1212 vc->txslot->nref++; /* bump reference count */
1213
1214 en_loadvc(sc, vc); /* does debug printf for us */
1215
1216 /* don't free below */
1217 sc->vccs[vc->vcc.vci] = vc;
1218 vc = NULL;
1219 sc->vccs_open++;
1220
1221 done:
1222 if (vc != NULL)
1223 uma_zfree(en_vcc_zone, vc);
1224
1225 EN_UNLOCK(sc);
1226 return (error);
1227}
1228
1229/*
1230 * Close finished
1231 */
1232static void
1233en_close_finish(struct en_softc *sc, struct en_vcc *vc)
1234{
1235
1236 if (vc->rxslot != NULL)
1237 vc->rxslot->vcc = NULL;
1238
1239 DBG(sc, VC, ("vci: %u free (%p)", vc->vcc.vci, vc));
1240
1241 sc->vccs[vc->vcc.vci] = NULL;
1242 uma_zfree(en_vcc_zone, vc);
1243 sc->vccs_open--;
1244}
1245
1246/*
1247 * LOCK: unlocked, needed
1248 */
1249static int
1250en_close_vcc(struct en_softc *sc, struct atmio_closevcc *cl)
1251{
1252 uint32_t oldmode, newmode;
1253 struct en_vcc *vc;
1254 int error = 0;
1255
1256 DBG(sc, IOCTL, ("disable vpi=%d, vci=%d", cl->vpi, cl->vci));
1257
1258 if (cl->vpi != 0 || cl->vci >= MID_N_VC)
1259 return (EINVAL);
1260
1261 EN_LOCK(sc);
1262 if ((vc = sc->vccs[cl->vci]) == NULL) {
1263 error = ENOTCONN;
1264 goto done;
1265 }
1266
1267 /*
1268 * turn off VCI
1269 */
1270 if (vc->rxslot == NULL) {
1271 error = ENOTCONN;
1272 goto done;
1273 }
1274 if (vc->vflags & VCC_DRAIN) {
1275 error = EINVAL;
1276 goto done;
1277 }
1278
1279 oldmode = en_read(sc, MID_VC(cl->vci));
1280 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1281 en_write(sc, MID_VC(cl->vci), (newmode | (oldmode & MIDV_INSERVICE)));
1282
1283 /* halt in tracks, be careful to preserve inservice bit */
1284 DELAY(27);
1285 vc->rxslot->mode = newmode;
1286
1287 vc->txslot->nref--;
1288
1289 /* if stuff is still going on we are going to have to drain it out */
1290 if (_IF_QLEN(&vc->rxslot->indma) == 0 &&
1291 _IF_QLEN(&vc->rxslot->q) == 0 &&
1292 (vc->vflags & VCC_SWSL) == 0) {
1293 en_close_finish(sc, vc);
1294 goto done;
1295 }
1296
1297 vc->vflags |= VCC_DRAIN;
1298 DBG(sc, IOCTL, ("VCI %u now draining", cl->vci));
1299
1300 if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1301 goto done;
1302
1303 vc->vflags |= VCC_CLOSE_RX;
1304 while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1305 (vc->vflags & VCC_DRAIN))
1306 cv_wait(&sc->cv_close, &sc->en_mtx);
1307
1308 en_close_finish(sc, vc);
1309 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1310 error = EIO;
1311 goto done;
1312 }
1313
1314
1315 done:
1316 EN_UNLOCK(sc);
1317 return (error);
1318}
1319
1320/*********************************************************************/
1321/*
1322 * starting/stopping the card
1323 */
1324
1325/*
1326 * en_reset_ul: reset the board, throw away work in progress.
1327 * must en_init to recover.
1328 *
1329 * LOCK: locked, needed
1330 */
1331static void
1332en_reset_ul(struct en_softc *sc)
1333{
1334 struct en_map *map;
1335 struct mbuf *m;
1336 struct en_rxslot *rx;
1337 int lcv;
1338
1339 device_printf(sc->dev, "reset\n");
1340 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1341
1342 if (sc->en_busreset)
1343 sc->en_busreset(sc);
1344 en_write(sc, MID_RESID, 0x0); /* reset hardware */
1345
1346 /*
1347 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1348 * will free us! Don't release the rxslot from the channel.
1349 */
1350 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1351 if (sc->vccs[lcv] == NULL)
1352 continue;
1353 rx = sc->vccs[lcv]->rxslot;
1354
1355 for (;;) {
1356 _IF_DEQUEUE(&rx->indma, m);
1357 if (m == NULL)
1358 break;
1359 map = (void *)m->m_pkthdr.rcvif;
1360 uma_zfree(sc->map_zone, map);
1361 m_freem(m);
1362 }
1363 for (;;) {
1364 _IF_DEQUEUE(&rx->q, m);
1365 if (m == NULL)
1366 break;
1367 m_freem(m);
1368 }
1369 sc->vccs[lcv]->vflags = 0;
1370 }
1371
1372 /*
1373 * xmit: dump everything
1374 */
1375 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1376 for (;;) {
1377 _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1378 if (m == NULL)
1379 break;
1380 map = (void *)m->m_pkthdr.rcvif;
1381 uma_zfree(sc->map_zone, map);
1382 m_freem(m);
1383 }
1384 for (;;) {
1385 _IF_DEQUEUE(&sc->txslot[lcv].q, m);
1386 if (m == NULL)
1387 break;
1388 map = (void *)m->m_pkthdr.rcvif;
1389 uma_zfree(sc->map_zone, map);
1390 m_freem(m);
1391 }
1392 sc->txslot[lcv].mbsize = 0;
1393 }
1394
1395 /*
1396 * Unstop all waiters
1397 */
1398 cv_broadcast(&sc->cv_close);
1399}
1400
1401/*
1402 * en_reset: reset the board, throw away work in progress.
1403 * must en_init to recover.
1404 *
1405 * LOCK: unlocked, needed
1406 *
1407 * Use en_reset_ul if you alreay have the lock
1408 */
1409void
1410en_reset(struct en_softc *sc)
1411{
1412 EN_LOCK(sc);
1413 en_reset_ul(sc);
1414 EN_UNLOCK(sc);
1415}
1416
1417
1418/*
1419 * en_init: init board and sync the card with the data in the softc.
1420 *
1421 * LOCK: locked, needed
1422 */
1423static void
1424en_init(struct en_softc *sc)
1425{
1426 int vc, slot;
1427 uint32_t loc;
1428
1429 if ((sc->ifp->if_flags & IFF_UP) == 0) {
1430 DBG(sc, INIT, ("going down"));
1431 en_reset(sc); /* to be safe */
1432 return;
1433 }
1434
1435 DBG(sc, INIT, ("going up"));
1436 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* enable */
1437
1438 if (sc->en_busreset)
1439 sc->en_busreset(sc);
1440 en_write(sc, MID_RESID, 0x0); /* reset */
1441
1442 /* zero memory */
1443 bus_space_set_region_4(sc->en_memt, sc->en_base,
1444 MID_RAMOFF, 0, sc->en_obmemsz / 4);
1445
1446 /*
1447 * init obmem data structures: vc tab, dma q's, slist.
1448 *
1449 * note that we set drq_free/dtq_free to one less than the total number
1450 * of DTQ/DRQs present. we do this because the card uses the condition
1451 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1452 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1453 * the drq_us pointer will wrap all the way around]. by restricting
1454 * the number of active requests to (N - 1) we prevent the list from
1455 * becoming completely full. note that the card will sometimes give
1456 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1457 * keep that interrupt from messing us up.
1458 */
1459 bzero(&sc->drq, sizeof(sc->drq));
1460 sc->drq_free = MID_DRQ_N - 1;
1461 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
1462 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1463 sc->drq_us = sc->drq_chip;
1464
1465 bzero(&sc->dtq, sizeof(sc->dtq));
1466 sc->dtq_free = MID_DTQ_N - 1;
1467 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
1468 en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1469 sc->dtq_us = sc->dtq_chip;
1470
1471 sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1472 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1473
1474 DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
1475 "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
1476 sc->dtq_chip, sc->hwslistp));
1477
1478 for (slot = 0 ; slot < EN_NTX ; slot++) {
1479 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1480 en_write(sc, MIDX_READPTR(slot), 0);
1481 en_write(sc, MIDX_DESCSTART(slot), 0);
1482 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1483 loc = loc - MID_RAMOFF;
1484 /* mask, cvt to words */
1485 loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
1486 /* top 11 bits */
1487 loc = loc >> MIDV_LOCTOPSHFT;
1488 en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
1489 loc));
1490 DBG(sc, INIT, ("tx%d: place 0x%x", slot,
1491 (u_int)en_read(sc, MIDX_PLACE(slot))));
1492 }
1493
1494 for (vc = 0; vc < MID_N_VC; vc++)
1495 if (sc->vccs[vc] != NULL)
1496 en_loadvc(sc, sc->vccs[vc]);
1497
1498 /*
1499 * enable!
1500 */
1501 en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
1502 MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
1503 MID_INT_SERVICE | MID_INT_SUNI | MID_INT_STATS);
1504 en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
1505 MID_MCSR_ENTX | MID_MCSR_ENRX);
1506}
1507
1508/*********************************************************************/
1509/*
1510 * Ioctls
1511 */
1512/*
1513 * en_ioctl: handle ioctl requests
1514 *
1515 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1516 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1517 * value, subtract one from sc->txslot[0].nref, add one to the
1518 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1519 * txspeed[vci].
1520 *
1521 * LOCK: unlocked, needed
1522 */
1523static int
1524en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1525{
1526 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
1527 struct ifaddr *ifa = (struct ifaddr *)data;
1528 struct ifreq *ifr = (struct ifreq *)data;
1529 struct atmio_vcctable *vtab;
1530 int error = 0;
1531
1532 switch (cmd) {
1533
1534 case SIOCSIFADDR:
1535 EN_LOCK(sc);
1536 ifp->if_flags |= IFF_UP;
1537#if defined(INET) || defined(INET6)
1538 if (ifa->ifa_addr->sa_family == AF_INET
1539 || ifa->ifa_addr->sa_family == AF_INET6) {
1540 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1541 en_reset_ul(sc);
1542 en_init(sc);
1543 }
1544 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1545 EN_UNLOCK(sc);
1546 break;
1547 }
1548#endif /* INET */
1549 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1550 en_reset_ul(sc);
1551 en_init(sc);
1552 }
1553 EN_UNLOCK(sc);
1554 break;
1555
1556 case SIOCSIFFLAGS:
1557 EN_LOCK(sc);
1558 if (ifp->if_flags & IFF_UP) {
1559 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1560 en_init(sc);
1561 } else {
1562 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1563 en_reset_ul(sc);
1564 }
1565 EN_UNLOCK(sc);
1566 break;
1567
1568 case SIOCSIFMTU:
1569 /*
1570 * Set the interface MTU.
1571 */
1572 if (ifr->ifr_mtu > ATMMTU) {
1573 error = EINVAL;
1574 break;
1575 }
1576 ifp->if_mtu = ifr->ifr_mtu;
1577 break;
1578
1579 case SIOCSIFMEDIA:
1580 case SIOCGIFMEDIA:
1581 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
1582 break;
1583
1584 case SIOCATMOPENVCC: /* kernel internal use */
1585 error = en_open_vcc(sc, (struct atmio_openvcc *)data);
1586 break;
1587
1588 case SIOCATMCLOSEVCC: /* kernel internal use */
1589 error = en_close_vcc(sc, (struct atmio_closevcc *)data);
1590 break;
1591
1592 case SIOCATMGETVCCS: /* internal netgraph use */
1593 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1594 MID_N_VC, sc->vccs_open, &sc->en_mtx, 0);
1595 if (vtab == NULL) {
1596 error = ENOMEM;
1597 break;
1598 }
1599 *(void **)data = vtab;
1600 break;
1601
1602 case SIOCATMGVCCS: /* return vcc table */
1603 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
1604 MID_N_VC, sc->vccs_open, &sc->en_mtx, 1);
1605 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
1606 vtab->count * sizeof(vtab->vccs[0]));
1607 free(vtab, M_DEVBUF);
1608 break;
1609
1610 default:
1611 error = EINVAL;
1612 break;
1613 }
1614 return (error);
1615}
1616
1617/*********************************************************************/
1618/*
1619 * Sysctl's
1620 */
1621
1622/*
1623 * Sysctl handler for internal statistics
1624 *
1625 * LOCK: unlocked, needed
1626 */
1627static int
1628en_sysctl_istats(SYSCTL_HANDLER_ARGS)
1629{
1630 struct en_softc *sc = arg1;
1631 uint32_t *ret;
1632 int error;
1633
1634 ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK);
1635
1636 EN_LOCK(sc);
1637 bcopy(&sc->stats, ret, sizeof(sc->stats));
1638 EN_UNLOCK(sc);
1639
1640 error = SYSCTL_OUT(req, ret, sizeof(sc->stats));
1641 free(ret, M_TEMP);
1642
1643 return (error);
1644}
1645
1646/*********************************************************************/
1647/*
1648 * Interrupts
1649 */
1650
1651/*
1652 * Transmit interrupt handler
1653 *
1654 * check for tx complete, if detected then this means that some space
1655 * has come free on the card. we must account for it and arrange to
1656 * kick the channel to life (in case it is stalled waiting on the card).
1657 *
1658 * LOCK: locked, needed
1659 */
1660static uint32_t
1661en_intr_tx(struct en_softc *sc, uint32_t reg)
1662{
1663 uint32_t kick;
1664 uint32_t mask;
1665 uint32_t val;
1666 int chan;
1667
1668 kick = 0; /* bitmask of channels to kick */
1669
1670 for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
1671 if (!(reg & MID_TXCHAN(chan)))
1672 continue;
1673
1674 kick = kick | mask;
1675
1676 /* current read pointer */
1677 val = en_read(sc, MIDX_READPTR(chan));
1678 /* as offset */
1679 val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
1680 if (val > sc->txslot[chan].cur)
1681 sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
1682 else
1683 sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
1684 sc->txslot[chan].cur;
1685 DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
1686 "buffer", chan, sc->txslot[chan].bfree));
1687 }
1688 return (kick);
1689}
1690
1691/*
1692 * TX DMA interrupt
1693 *
1694 * check for TX DMA complete, if detected then this means
1695 * that some DTQs are now free. it also means some indma
1696 * mbufs can be freed. if we needed DTQs, kick all channels.
1697 *
1698 * LOCK: locked, needed
1699 */
1700static uint32_t
1701en_intr_tx_dma(struct en_softc *sc)
1702{
1703 uint32_t kick = 0;
1704 uint32_t val;
1705 uint32_t idx;
1706 uint32_t slot;
1707 uint32_t dtq;
1708 struct en_map *map;
1709 struct mbuf *m;
1710
1711 val = en_read(sc, MID_DMA_RDTX); /* chip's current location */
1712 idx = MID_DTQ_A2REG(sc->dtq_chip); /* where we last saw chip */
1713
1714 if (sc->need_dtqs) {
1715 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
1716 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
1717 DBG(sc, INTR, ("cleared need DTQ condition"));
1718 }
1719
1720 while (idx != val) {
1721 sc->dtq_free++;
1722 if ((dtq = sc->dtq[idx]) != 0) {
1723 /* don't forget to zero it out when done */
1724 sc->dtq[idx] = 0;
1725 slot = EN_DQ_SLOT(dtq);
1726
1727 _IF_DEQUEUE(&sc->txslot[slot].indma, m);
1728 if (m == NULL)
1729 panic("enintr: dtqsync");
1730 map = (void *)m->m_pkthdr.rcvif;
1731 uma_zfree(sc->map_zone, map);
1732 m_freem(m);
1733
1734 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
1735 DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
1736 "%d", slot, EN_DQ_LEN(dtq),
1737 sc->txslot[slot].mbsize));
1738 }
1739 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
1740 }
1741 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
1742
1743 return (kick);
1744}
1745
1746/*
1747 * Service interrupt
1748 *
1749 * LOCK: locked, needed
1750 */
1751static int
1752en_intr_service(struct en_softc *sc)
1753{
1754 uint32_t chip;
1755 uint32_t vci;
1756 int need_softserv = 0;
1757 struct en_vcc *vc;
1758
1759 chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1760
1761 while (sc->hwslistp != chip) {
1762 /* fetch and remove it from hardware service list */
1763 vci = en_read(sc, sc->hwslistp);
1764 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
1765
1766 if ((vc = sc->vccs[vci]) == NULL ||
1767 (vc->vcc.flags & ATMIO_FLAG_NORX)) {
1768 DBG(sc, INTR, ("unexpected rx interrupt VCI %d", vci));
1769 en_write(sc, MID_VC(vci), MIDV_TRASH); /* rx off */
1770 continue;
1771 }
1772
1773 /* remove from hwsl */
1774 en_write(sc, MID_VC(vci), vc->rxslot->mode);
1775 EN_COUNT(sc->stats.hwpull);
1776
1777 DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
1778
1779 /* add it to the software service list (if needed) */
1780 if ((vc->vflags & VCC_SWSL) == 0) {
1781 EN_COUNT(sc->stats.swadd);
1782 need_softserv = 1;
1783 vc->vflags |= VCC_SWSL;
1784 sc->swslist[sc->swsl_tail] = vci;
1785 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
1786 sc->swsl_size++;
1787 DBG(sc, INTR, ("added VCI %d to swslist", vci));
1788 }
1789 }
1790 return (need_softserv);
1791}
1792
1793/*
1794 * Handle a receive DMA completion
1795 */
1796static void
1797en_rx_drain(struct en_softc *sc, u_int drq)
1798{
1799 struct en_rxslot *slot;
1800 struct en_vcc *vc;
1801 struct mbuf *m;
1802 struct atm_pseudohdr ah;
1803
1804 slot = &sc->rxslot[EN_DQ_SLOT(drq)];
1805
1806 m = NULL; /* assume "JK" trash DMA */
1807 if (EN_DQ_LEN(drq) != 0) {
1808 _IF_DEQUEUE(&slot->indma, m);
1809 KASSERT(m != NULL, ("drqsync: %s: lost mbuf in slot %td!",
1810 sc->ifp->if_xname, slot - sc->rxslot));
1811 uma_zfree(sc->map_zone, (struct en_map *)m->m_pkthdr.rcvif);
1812 }
1813 if ((vc = slot->vcc) == NULL) {
1814 /* ups */
1815 if (m != NULL)
1816 m_freem(m);
1817 return;
1818 }
1819
1820 /* do something with this mbuf */
1821 if (vc->vflags & VCC_DRAIN) {
1822 /* drain? */
1823 if (m != NULL)
1824 m_freem(m);
1825 if (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0 &&
1826 (en_read(sc, MID_VC(vc->vcc.vci)) & MIDV_INSERVICE) == 0 &&
1827 (vc->vflags & VCC_SWSL) == 0) {
1828 vc->vflags &= ~VCC_CLOSE_RX;
1829 if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
1830 en_close_finish(sc, vc);
1831 else
1832 cv_signal(&sc->cv_close);
1833 }
1834 return;
1835 }
1836
1837 if (m != NULL) {
1838 ATM_PH_FLAGS(&ah) = vc->vcc.flags;
1839 ATM_PH_VPI(&ah) = 0;
1840 ATM_PH_SETVCI(&ah, vc->vcc.vci);
1841
1842 DBG(sc, INTR, ("rx%td: rxvci%d: atm_input, mbuf %p, len %d, "
1843 "hand %p", slot - sc->rxslot, vc->vcc.vci, m,
1844 EN_DQ_LEN(drq), vc->rxhand));
1845
1846 m->m_pkthdr.rcvif = sc->ifp;
1847 sc->ifp->if_ipackets++;
1848
1849 vc->ipackets++;
1850 vc->ibytes += m->m_pkthdr.len;
1851
1852#ifdef EN_DEBUG
1853 if (sc->debug & DBG_IPACKETS)
1854 en_dump_packet(sc, m);
1855#endif
1856#ifdef ENABLE_BPF
1857 BPF_MTAP(sc->ifp, m);
1858#endif
1859 EN_UNLOCK(sc);
1860 atm_input(sc->ifp, &ah, m, vc->rxhand);
1861 EN_LOCK(sc);
1862 }
1863}
1864
1865/*
1866 * check for RX DMA complete, and pass the data "upstairs"
1867 *
1868 * LOCK: locked, needed
1869 */
1870static int
1871en_intr_rx_dma(struct en_softc *sc)
1872{
1873 uint32_t val;
1874 uint32_t idx;
1875 uint32_t drq;
1876
1877 val = en_read(sc, MID_DMA_RDRX); /* chip's current location */
1878 idx = MID_DRQ_A2REG(sc->drq_chip); /* where we last saw chip */
1879
1880 while (idx != val) {
1881 sc->drq_free++;
1882 if ((drq = sc->drq[idx]) != 0) {
1883 /* don't forget to zero it out when done */
1884 sc->drq[idx] = 0;
1885 en_rx_drain(sc, drq);
1886 }
1887 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
1888 }
1889 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
1890
1891 if (sc->need_drqs) {
1892 /* true if we had a DRQ shortage */
1893 sc->need_drqs = 0;
1894 DBG(sc, INTR, ("cleared need DRQ condition"));
1895 return (1);
1896 } else
1897 return (0);
1898}
1899
1900/*
1901 * en_mget: get an mbuf chain that can hold totlen bytes and return it
1902 * (for recv). For the actual allocation totlen is rounded up to a multiple
1903 * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
1904 *
1905 * After this call the sum of all the m_len's in the chain will be totlen.
1906 * This is called at interrupt time, so we can't wait here.
1907 *
1908 * LOCK: any, not needed
1909 */
1910static struct mbuf *
1911en_mget(struct en_softc *sc, u_int pktlen)
1912{
1913 struct mbuf *m, *tmp;
1914 u_int totlen, pad;
1915
1916 totlen = roundup(pktlen, sizeof(uint32_t));
1917 pad = totlen - pktlen;
1918
1919 /*
1920 * First get an mbuf with header. Keep space for a couple of
1921 * words at the begin.
1922 */
1923 /* called from interrupt context */
1924 MGETHDR(m, M_DONTWAIT, MT_DATA);
1925 if (m == NULL)
1926 return (NULL);
1927
1928 m->m_pkthdr.rcvif = NULL;
1929 m->m_pkthdr.len = pktlen;
1930 m->m_len = EN_RX1BUF;
1931 MH_ALIGN(m, EN_RX1BUF);
1932 if (m->m_len >= totlen) {
1933 m->m_len = totlen;
1934
1935 } else {
1936 totlen -= m->m_len;
1937
1938 /* called from interrupt context */
1939 tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
1940 if (tmp == NULL) {
1941 m_free(m);
1942 return (NULL);
1943 }
1944 tmp = m->m_next;
1945 /* m_getm could do this for us */
1946 while (tmp != NULL) {
1947 tmp->m_len = min(MCLBYTES, totlen);
1948 totlen -= tmp->m_len;
1949 tmp = tmp->m_next;
1950 }
1951 }
1952
1953 return (m);
1954}
1955
1956/*
1957 * Argument for RX DMAMAP loader.
1958 */
1959struct rxarg {
1960 struct en_softc *sc;
1961 struct mbuf *m;
1962 u_int pre_skip; /* number of bytes to skip at begin */
1963 u_int post_skip; /* number of bytes to skip at end */
1964 struct en_vcc *vc; /* vc we are receiving on */
1965 int wait; /* wait for DRQ entries */
1966};
1967
1968/*
1969 * Copy the segment table to the buffer for later use. And compute the
1970 * number of dma queue entries we need.
1971 *
1972 * LOCK: locked, needed
1973 */
1974static void
1975en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
1976 bus_size_t mapsize, int error)
1977{
1978 struct rxarg *rx = uarg;
1979 struct en_softc *sc = rx->sc;
1980 struct en_rxslot *slot = rx->vc->rxslot;
1981 u_int free; /* number of free DRQ entries */
1982 uint32_t cur; /* current buffer offset */
1983 uint32_t drq; /* DRQ entry pointer */
1984 uint32_t last_drq; /* where we have written last */
1985 u_int needalign, cnt, count, bcode;
1986 bus_addr_t addr;
1987 bus_size_t rest;
1988 int i;
1989
1990 if (error != 0)
1991 return;
1992 if (nseg > EN_MAX_DMASEG)
1993 panic("too many DMA segments");
1994
1995 rx->wait = 0;
1996
1997 free = sc->drq_free;
1998 drq = sc->drq_us;
1999 cur = slot->cur;
2000
2001 last_drq = 0;
2002
2003 /*
2004 * Local macro to add an entry to the receive DMA area. If there
2005 * are no entries left, return. Save the byte offset of the entry
2006 * in last_drq for later use.
2007 */
2008#define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
2009 if (free == 0) { \
2010 EN_COUNT(sc->stats.rxdrqout); \
2011 rx->wait = 1; \
2012 return; \
2013 } \
2014 last_drq = drq; \
2015 en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ? \
2016 MID_MK_RXQ_ENI(COUNT, rx->vc->vcc.vci, 0, BCODE) : \
2017 MID_MK_RXQ_ADP(COUNT, rx->vc->vcc.vci, 0, BCODE)); \
2018 en_write(sc, drq + 4, ADDR); \
2019 \
2020 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8); \
2021 free--;
2022
2023 /*
2024 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
2025 * the current buffer byte offset accordingly.
2026 */
2027#define DO_DRQ(TYPE) do { \
2028 rest -= cnt; \
2029 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
2030 DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x", \
2031 slot - sc->rxslot, cnt, (uintmax_t)rest, cur)); \
2032 \
2033 PUT_DRQ_ENTRY(1, bcode, count, addr); \
2034 \
2035 addr += cnt; \
2036 } while (0)
2037
2038 /*
2039 * Skip the RBD at the beginning
2040 */
2041 if (rx->pre_skip > 0) {
2042 /* update DMA address */
2043 EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
2044
2045 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2046 }
2047
2048 for (i = 0; i < nseg; i++, segs++) {
2049 addr = segs->ds_addr;
2050 rest = segs->ds_len;
2051
2052 if (sc->is_adaptec) {
2053 /* adaptec card - simple */
2054
2055 /* advance the on-card buffer pointer */
2056 EN_WRAPADD(slot->start, slot->stop, cur, rest);
2057 DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
2058 "(cur now 0x%x)", slot - sc->rxslot,
2059 (uintmax_t)rest, (uintmax_t)addr, cur));
2060
2061 PUT_DRQ_ENTRY(0, 0, rest, addr);
2062
2063 continue;
2064 }
2065
2066 /*
2067 * do we need to do a DMA op to align to the maximum
2068 * burst? Note, that we are alway 32-bit aligned.
2069 */
2070 if (sc->alburst &&
2071 (needalign = (addr & sc->bestburstmask)) != 0) {
2072 /* compute number of bytes, words and code */
2073 cnt = sc->bestburstlen - needalign;
2074 if (cnt > rest)
2075 cnt = rest;
2076 count = cnt / sizeof(uint32_t);
2077 if (sc->noalbursts) {
2078 bcode = MIDDMA_WORD;
2079 } else {
2080 bcode = en_dmaplan[count].bcode;
2081 count = cnt >> en_dmaplan[count].divshift;
2082 }
2083 DO_DRQ("al_dma");
2084 }
2085
2086 /* do we need to do a max-sized burst? */
2087 if (rest >= sc->bestburstlen) {
2088 count = rest >> sc->bestburstshift;
2089 cnt = count << sc->bestburstshift;
2090 bcode = sc->bestburstcode;
2091 DO_DRQ("best_dma");
2092 }
2093
2094 /* do we need to do a cleanup burst? */
2095 if (rest != 0) {
2096 cnt = rest;
2097 count = rest / sizeof(uint32_t);
2098 if (sc->noalbursts) {
2099 bcode = MIDDMA_WORD;
2100 } else {
2101 bcode = en_dmaplan[count].bcode;
2102 count = cnt >> en_dmaplan[count].divshift;
2103 }
2104 DO_DRQ("clean_dma");
2105 }
2106 }
2107
2108 /*
2109 * Skip stuff at the end
2110 */
2111 if (rx->post_skip > 0) {
2112 /* update DMA address */
2113 EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
2114
2115 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2116 }
2117
2118 /* record the end for the interrupt routine */
2119 sc->drq[MID_DRQ_A2REG(last_drq)] =
2120 EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
2121
2122 /* set the end flag in the last descriptor */
2123 en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
2124
2125#undef PUT_DRQ_ENTRY
2126#undef DO_DRQ
2127
2128 /* commit */
2129 slot->cur = cur;
2130 sc->drq_free = free;
2131 sc->drq_us = drq;
2132
2133 /* signal to card */
2134 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2135}
2136
2137/*
2138 * en_service: handle a service interrupt
2139 *
2140 * Q: why do we need a software service list?
2141 *
2142 * A: if we remove a VCI from the hardware list and we find that we are
2143 * out of DRQs we must defer processing until some DRQs become free.
2144 * so we must remember to look at this RX VCI/slot later, but we can't
2145 * put it back on the hardware service list (since that isn't allowed).
2146 * so we instead save it on the software service list. it would be nice
2147 * if we could peek at the VCI on top of the hwservice list without removing
2148 * it, however this leads to a race condition: if we peek at it and
2149 * decide we are done with it new data could come in before we have a
2150 * chance to remove it from the hwslist. by the time we get it out of
2151 * the list the interrupt for the new data will be lost. oops!
2152 *
2153 * LOCK: locked, needed
2154 */
2155static void
2156en_service(struct en_softc *sc)
2157{
2158 struct mbuf *m, *lastm;
2159 struct en_map *map;
2160 struct rxarg rx;
2161 uint32_t cur;
2162 uint32_t dstart; /* data start (as reported by card) */
2163 uint32_t rbd; /* receive buffer descriptor */
2164 uint32_t pdu; /* AAL5 trailer */
2165 int mlen;
2166 int error;
2167 struct en_rxslot *slot;
2168 struct en_vcc *vc;
2169
2170 rx.sc = sc;
2171
2172 next_vci:
2173 if (sc->swsl_size == 0) {
2174 DBG(sc, SERV, ("en_service done"));
2175 return;
2176 }
2177
2178 /*
2179 * get vcc to service
2180 */
2181 rx.vc = vc = sc->vccs[sc->swslist[sc->swsl_head]];
2182 slot = vc->rxslot;
2183 KASSERT (slot->vcc->rxslot == slot, ("en_service: rx slot/vci sync"));
2184
2185 /*
2186 * determine our mode and if we've got any work to do
2187 */
2188 DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
2189 "0x%x", slot - sc->rxslot, vc->vcc.vci, slot->start,
2190 slot->stop, slot->cur));
2191
2192 same_vci:
2193 cur = slot->cur;
2194
2195 dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(vc->vcc.vci)));
2196 dstart = (dstart * sizeof(uint32_t)) + slot->start;
2197
2198 /* check to see if there is any data at all */
2199 if (dstart == cur) {
2200 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2201 /* remove from swslist */
2202 vc->vflags &= ~VCC_SWSL;
2203 sc->swsl_size--;
2204 DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
2205 slot - sc->rxslot, vc->vcc.vci));
2206 goto next_vci;
2207 }
2208
2209 /*
2210 * figure out how many bytes we need
2211 * [mlen = # bytes to go in mbufs]
2212 */
2213 rbd = en_read(sc, cur);
2214 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2215 panic("en_service: id mismatch");
2216
2217 if (rbd & MID_RBD_T) {
2218 mlen = 0; /* we've got trash */
2219 rx.pre_skip = MID_RBD_SIZE;
2220 rx.post_skip = 0;
2221 EN_COUNT(sc->stats.ttrash);
2222 DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
2223
2224 } else if (vc->vcc.aal != ATMIO_AAL_5) {
2225 /* 1 cell (ick!) */
2226 mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
2227 rx.pre_skip = MID_RBD_SIZE;
2228 rx.post_skip = 0;
2229
2230 } else {
2231 rx.pre_skip = MID_RBD_SIZE;
2232
2233 /* get PDU trailer in correct byte order */
2234 pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
2235 MID_RBD_SIZE - MID_PDU_SIZE;
2236 if (pdu >= slot->stop)
2237 pdu -= EN_RXSZ * 1024;
2238 pdu = en_read(sc, pdu);
2239
2240 if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
2241 MID_PDU_LEN(pdu)) {
2242 device_printf(sc->dev, "invalid AAL5 length\n");
2243 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2244 mlen = 0;
2245 sc->ifp->if_ierrors++;
2246
2247 } else if (rbd & MID_RBD_CRCERR) {
2248 device_printf(sc->dev, "CRC error\n");
2249 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2250 mlen = 0;
2251 sc->ifp->if_ierrors++;
2252
2253 } else {
2254 mlen = MID_PDU_LEN(pdu);
2255 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
2256 }
2257 }
2258
2259 /*
2260 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2261 *
2262 * notes:
2263 * 1. it is possible that we've already allocated an mbuf for this pkt
2264 * but ran out of DRQs, in which case we saved the allocated mbuf
2265 * on "q".
2266 * 2. if we save an buf in "q" we store the "cur" (pointer) in the
2267 * buf as an identity (that we can check later).
2268 * 3. after this block of code, if m is still NULL then we ran out of
2269 * mbufs
2270 */
2271 _IF_DEQUEUE(&slot->q, m);
2272 if (m != NULL) {
2273 if (m->m_pkthdr.csum_data != cur) {
2274 /* wasn't ours */
2275 DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
2276 slot - sc->rxslot, m));
2277 _IF_PREPEND(&slot->q, m);
2278 m = NULL;
2279 EN_COUNT(sc->stats.rxqnotus);
2280 } else {
2281 EN_COUNT(sc->stats.rxqus);
2282 DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
2283 slot - sc->rxslot, m));
2284 }
2285 }
2286 if (mlen == 0 && m != NULL) {
2287 /* should not happen */
2288 m_freem(m);
2289 m = NULL;
2290 }
2291
2292 if (mlen != 0 && m == NULL) {
2293 m = en_mget(sc, mlen);
2294 if (m == NULL) {
2295 rx.post_skip += mlen;
2296 mlen = 0;
2297 EN_COUNT(sc->stats.rxmbufout);
2298 DBG(sc, SERV, ("rx%td: out of mbufs",
2299 slot - sc->rxslot));
2300 } else
2301 rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
2302
2303 DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
2304 slot - sc->rxslot, m, mlen));
2305 }
2306
2307 DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
2308 slot - sc->rxslot, vc->vcc.vci, m, mlen, rx.pre_skip,
2309 rx.post_skip));
2310
2311 if (m != NULL) {
2312 /* M_NOWAIT - called from interrupt context */
2313 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
2314 if (map == NULL) {
2315 rx.post_skip += mlen;
2316 m_freem(m);
2317 DBG(sc, SERV, ("rx%td: out of maps",
2318 slot - sc->rxslot));
2319 goto skip;
2320 }
2321 rx.m = m;
2322 error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
2323 en_rxdma_load, &rx, BUS_DMA_NOWAIT);
2324
2325 if (error != 0) {
2326 device_printf(sc->dev, "loading RX map failed "
2327 "%d\n", error);
2328 uma_zfree(sc->map_zone, map);
2329 m_freem(m);
2330 rx.post_skip += mlen;
2331 goto skip;
2332
2333 }
2334 map->flags |= ENMAP_LOADED;
2335
2336 if (rx.wait) {
2337 /* out of DRQs - wait */
2338 uma_zfree(sc->map_zone, map);
2339
2340 m->m_pkthdr.csum_data = cur;
2341 _IF_ENQUEUE(&slot->q, m);
2342 EN_COUNT(sc->stats.rxdrqout);
2343
2344 sc->need_drqs = 1; /* flag condition */
2345 return;
2346
2347 }
2348 (void)m_length(m, &lastm);
2349 lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
2350
2351 m->m_pkthdr.rcvif = (void *)map;
2352 _IF_ENQUEUE(&slot->indma, m);
2353
2354 /* get next packet in this slot */
2355 goto same_vci;
2356 }
2357 skip:
2358 /*
2359 * Here we end if we should drop the packet from the receive buffer.
2360 * The number of bytes to drop is in fill. We can do this with on
2361 * JK entry. If we don't even have that one - wait.
2362 */
2363 if (sc->drq_free == 0) {
2364 sc->need_drqs = 1; /* flag condition */
2365 return;
2366 }
2367 rx.post_skip += rx.pre_skip;
2368 DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
2369
2370 /* advance buffer address */
2371 EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
2372
2373 /* write DRQ entry */
2374 if (sc->is_adaptec)
2375 en_write(sc, sc->drq_us,
2376 MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
2377 vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2378 else
2379 en_write(sc, sc->drq_us,
2380 MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
2381 vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
2382 en_write(sc, sc->drq_us + 4, 0);
2383 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
2384 sc->drq_free--;
2385
2386 /* signal to RX interrupt */
2387 sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
2388 slot->cur = cur;
2389
2390 /* signal to card */
2391 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2392
2393 goto same_vci;
2394}
2395
2396/*
2397 * interrupt handler
2398 *
2399 * LOCK: unlocked, needed
2400 */
2401void
2402en_intr(void *arg)
2403{
2404 struct en_softc *sc = arg;
2405 uint32_t reg, kick, mask;
2406 int lcv, need_softserv;
2407
2408 EN_LOCK(sc);
2409
2410 reg = en_read(sc, MID_INTACK);
2411 DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
2412
2413 if ((reg & MID_INT_ANY) == 0) {
2414 EN_UNLOCK(sc);
2415 return;
2416 }
2417
2418 /*
2419 * unexpected errors that need a reset
2420 */
2421 if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
2422 device_printf(sc->dev, "unexpected interrupt=0x%b, "
2423 "resetting\n", reg, MID_INTBITS);
2424#ifdef EN_DEBUG
2425 panic("en: unexpected error");
2426#else
2427 en_reset_ul(sc);
2428 en_init(sc);
2429#endif
2430 EN_UNLOCK(sc);
2431 return;
2432 }
2433
2434 if (reg & MID_INT_SUNI)
2435 utopia_intr(&sc->utopia);
2436
2437 kick = 0;
2438 if (reg & MID_INT_TX)
2439 kick |= en_intr_tx(sc, reg);
2440
2441 if (reg & MID_INT_DMA_TX)
2442 kick |= en_intr_tx_dma(sc);
2443
2444 /*
2445 * kick xmit channels as needed.
2446 */
2447 if (kick) {
2448 DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
2449 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
2450 if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
2451 en_txdma(sc, &sc->txslot[lcv]);
2452 }
2453
2454 need_softserv = 0;
2455 if (reg & MID_INT_DMA_RX)
2456 need_softserv |= en_intr_rx_dma(sc);
2457
2458 if (reg & MID_INT_SERVICE)
2459 need_softserv |= en_intr_service(sc);
2460
2461 if (need_softserv)
2462 en_service(sc);
2463
2464 /*
2465 * keep our stats
2466 */
2467 if (reg & MID_INT_DMA_OVR) {
2468 EN_COUNT(sc->stats.dmaovr);
2469 DBG(sc, INTR, ("MID_INT_DMA_OVR"));
2470 }
2471 reg = en_read(sc, MID_STAT);
2472 sc->stats.otrash += MID_OTRASH(reg);
2473 sc->stats.vtrash += MID_VTRASH(reg);
2474
2475 EN_UNLOCK(sc);
2476}
2477
2478/*
2479 * Read at most n SUNI regs starting at reg into val
2480 */
2481static int
2482en_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
2483{
2484 struct en_softc *sc = ifatm->ifp->if_softc;
2485 u_int i;
2486
2487 EN_CHECKLOCK(sc);
2488 if (reg >= MID_NSUNI)
2489 return (EINVAL);
2490 if (reg + *n > MID_NSUNI)
2491 *n = MID_NSUNI - reg;
2492
2493 for (i = 0; i < *n; i++)
2494 val[i] = en_read(sc, MID_SUNIOFF + 4 * (reg + i));
2495
2496 return (0);
2497}
2498
2499/*
2500 * change the bits given by mask to them in val in register reg
2501 */
2502static int
2503en_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
2504{
2505 struct en_softc *sc = ifatm->ifp->if_softc;
2506 uint32_t regval;
2507
2508 EN_CHECKLOCK(sc);
2509 if (reg >= MID_NSUNI)
2510 return (EINVAL);
2511 regval = en_read(sc, MID_SUNIOFF + 4 * reg);
2512 regval = (regval & ~mask) | (val & mask);
2513 en_write(sc, MID_SUNIOFF + 4 * reg, regval);
2514 return (0);
2515}
2516
2517static const struct utopia_methods en_utopia_methods = {
2518 en_utopia_readregs,
2519 en_utopia_writereg
2520};
2521
2522/*********************************************************************/
2523/*
2524 * Probing the DMA brokeness of the card
2525 */
2526
2527/*
2528 * Physical address load helper function for DMA probe
2529 *
2530 * LOCK: unlocked, not needed
2531 */
2532static void
2533en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
2534{
2535 if (error == 0)
2536 *(bus_addr_t *)uarg = segs[0].ds_addr;
2537}
2538
2539/*
2540 * en_dmaprobe: helper function for en_attach.
2541 *
2542 * see how the card handles DMA by running a few DMA tests. we need
2543 * to figure out the largest number of bytes we can DMA in one burst
2544 * ("bestburstlen"), and if the starting address for a burst needs to
2545 * be aligned on any sort of boundary or not ("alburst").
2546 *
2547 * Things turn out more complex than that, because on my (harti) brand
2548 * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
2549 * we more than 4 bytes fails (with an RX DMA timeout) for physical
2550 * addresses that end with 0xc. Therefor we search not only the largest
2551 * burst that is supported (hopefully 64) but also check what is the largerst
2552 * unaligned supported size. If that appears to be lesser than 4 words,
2553 * set the noalbursts flag. That will be set only if also alburst is set.
2554 */
2555
2556/*
2557 * en_dmaprobe_doit: do actual testing for the DMA test.
2558 * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
2559 * Return the largest one that works.
2560 *
2561 * LOCK: unlocked, not needed
2562 */
2563static int
2564en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
2565{
2566 uint8_t *dp = sp + MIDDMA_MAXBURST;
2567 bus_addr_t pdp = psp + MIDDMA_MAXBURST;
2568 int lcv, retval = 4, cnt;
2569 uint32_t reg, bcode, midvloc;
2570
2571 if (sc->en_busreset)
2572 sc->en_busreset(sc);
2573 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2574
2575 /*
2576 * set up a 1k buffer at MID_BUFOFF
2577 */
2578 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
2579 >> MIDV_LOCTOPSHFT;
2580 en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
2581 en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
2582 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
2583 en_write(sc, MID_DST_RP(0), 0);
2584 en_write(sc, MID_WP_ST_CNT(0), 0);
2585
2586 /* set up sample data */
2587 for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
2588 sp[lcv] = lcv + 1;
2589
2590 /* enable DMA (only) */
2591 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2592
2593 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
2594 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
2595
2596 /*
2597 * try it now . . . DMA it out, then DMA it back in and compare
2598 *
2599 * note: in order to get the dma stuff to reverse directions it wants
2600 * the "end" flag set! since we are not dma'ing valid data we may
2601 * get an ident mismatch interrupt (which we will ignore).
2602 */
2603 DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
2604 sp, (u_long)psp, dp, (u_long)pdp));
2605 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
2606 DBG(sc, DMA, ("test lcv=%d", lcv));
2607
2608 /* zero SRAM and dest buffer */
2609 bus_space_set_region_4(sc->en_memt, sc->en_base,
2610 MID_BUFOFF, 0, 1024 / 4);
2611 bzero(dp, MIDDMA_MAXBURST);
2612
2613 bcode = en_sz2b(lcv);
2614
2615 /* build lcv-byte-DMA x NBURSTS */
2616 if (sc->is_adaptec)
2617 en_write(sc, sc->dtq_chip,
2618 MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
2619 else
2620 en_write(sc, sc->dtq_chip,
2621 MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
2622 en_write(sc, sc->dtq_chip + 4, psp);
2623 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
2624 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
2625
2626 cnt = 1000;
2627 while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
2628 MID_DTQ_A2REG(sc->dtq_chip)) {
2629 DELAY(1);
2630 if (--cnt == 0) {
2631 DBG(sc, DMA, ("unexpected timeout in tx "
2632 "DMA test\n alignment=0x%lx, burst size=%d"
2633 ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
2634 (u_long)sp & 63, lcv,
2635 en_read(sc, MID_DMA_ADDR), reg,
2636 en_read(sc, MID_INTSTAT)));
2637 return (retval);
2638 }
2639 }
2640
2641 reg = en_read(sc, MID_INTACK);
2642 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
2643 DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
2644 reg));
2645 return (retval);
2646 }
2647 /* re-enable DMA (only) */
2648 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2649
2650 /* "return to sender..." address is known ... */
2651
2652 /* build lcv-byte-DMA x NBURSTS */
2653 if (sc->is_adaptec)
2654 en_write(sc, sc->drq_chip,
2655 MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
2656 else
2657 en_write(sc, sc->drq_chip,
2658 MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
2659 en_write(sc, sc->drq_chip + 4, pdp);
2660 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
2661 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
2662 cnt = 1000;
2663 while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
2664 MID_DRQ_A2REG(sc->drq_chip)) {
2665 DELAY(1);
2666 cnt--;
2667 if (--cnt == 0) {
2668 DBG(sc, DMA, ("unexpected timeout in rx "
2669 "DMA test, rdrx=%#x\n", reg));
2670 return (retval);
2671 }
2672 }
2673 reg = en_read(sc, MID_INTACK);
2674 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
2675 DBG(sc, DMA, ("unexpected status in rx DMA "
2676 "test: 0x%x\n", reg));
2677 return (retval);
2678 }
2679 if (bcmp(sp, dp, lcv)) {
2680 DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
2681 "dp=%p", lcv, sp, dp));
2682 return (retval);
2683 }
2684
2685 retval = lcv;
2686 }
2687 return (retval); /* studly 64 byte DMA present! oh baby!! */
2688}
2689
2690/*
2691 * Find the best DMA parameters
2692 *
2693 * LOCK: unlocked, not needed
2694 */
2695static void
2696en_dmaprobe(struct en_softc *sc)
2697{
2698 bus_dma_tag_t tag;
2699 bus_dmamap_t map;
2700 int err;
2701 void *buffer;
2702 int bestalgn, lcv, try, bestnoalgn;
2703 bus_addr_t phys;
2704 uint8_t *addr;
2705
2706 sc->alburst = 0;
2707 sc->noalbursts = 0;
2708
2709 /*
2710 * Allocate some DMA-able memory.
2711 * We need 3 times the max burst size aligned to the max burst size.
2712 */
2713 err = bus_dma_tag_create(NULL, MIDDMA_MAXBURST, 0,
2714 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2715 3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0,
2716 NULL, NULL, &tag);
2717 if (err)
2718 panic("%s: cannot create test DMA tag %d", __func__, err);
2719
2720 err = bus_dmamem_alloc(tag, &buffer, 0, &map);
2721 if (err)
2722 panic("%s: cannot allocate test DMA memory %d", __func__, err);
2723
2724 err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
2725 en_dmaprobe_load, &phys, BUS_DMA_NOWAIT);
2726 if (err)
2727 panic("%s: cannot load test DMA map %d", __func__, err);
2728 addr = buffer;
2729 DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
2730
2731 /*
2732 * Now get the best burst size of the aligned case.
2733 */
2734 bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
2735
2736 /*
2737 * Now try unaligned.
2738 */
2739 for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
2740 try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
2741
2742 if (try < bestnoalgn)
2743 bestnoalgn = try;
2744 }
2745
2746 if (bestnoalgn < bestalgn) {
2747 sc->alburst = 1;
2748 if (bestnoalgn < 32)
2749 sc->noalbursts = 1;
2750 }
2751
2752 sc->bestburstlen = bestalgn;
2753 sc->bestburstshift = en_log2(bestalgn);
2754 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
2755 sc->bestburstcode = en_sz2b(bestalgn);
2756
2757 /*
2758 * Reset the chip before freeing the buffer. It may still be trying
2759 * to DMA.
2760 */
2761 if (sc->en_busreset)
2762 sc->en_busreset(sc);
2763 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2764
2765 DELAY(10000); /* may still do DMA */
2766
2767 /*
2768 * Free the DMA stuff
2769 */
2770 bus_dmamap_unload(tag, map);
2771 bus_dmamem_free(tag, buffer, map);
2772 bus_dma_tag_destroy(tag);
2773}
2774
2775/*********************************************************************/
2776/*
2777 * Attach/detach.
2778 */
2779
2780/*
2781 * Attach to the card.
2782 *
2783 * LOCK: unlocked, not needed (but initialized)
2784 */
2785int
2786en_attach(struct en_softc *sc)
2787{
2788 struct ifnet *ifp = sc->ifp;
2789 int sz;
2790 uint32_t reg, lcv, check, ptr, sav, midvloc;
2791
2792#ifdef EN_DEBUG
2793 sc->debug = EN_DEBUG;
2794#endif
2795
2796 /*
2797 * Probe card to determine memory size.
2798 *
2799 * The stupid ENI card always reports to PCI that it needs 4MB of
2800 * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
2801 * addresses wrap in the RAM address space (i.e. on a 512KB card
2802 * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
2803 * 0x27fffc [note that RAM starts at offset 0x200000]).
2804 */
2805
2806 /* reset card before touching RAM */
2807 if (sc->en_busreset)
2808 sc->en_busreset(sc);
2809 en_write(sc, MID_RESID, 0x0);
2810
2811 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
2812 en_write(sc, lcv, lcv); /* data[address] = address */
2813 for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
2814 reg = en_read(sc, check);
2815 if (reg != check)
2816 /* found an alias! - quit */
2817 goto done_probe;
2818 }
2819 }
2820 done_probe:
2821 lcv -= MID_PROBSIZE; /* take one step back */
2822 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
2823
2824 /*
2825 * determine the largest DMA burst supported
2826 */
2827 en_dmaprobe(sc);
2828
2829 /*
2830 * "hello world"
2831 */
2832
2833 /* reset */
2834 if (sc->en_busreset)
2835 sc->en_busreset(sc);
2836 en_write(sc, MID_RESID, 0x0); /* reset */
2837
2838 /* zero memory */
2839 bus_space_set_region_4(sc->en_memt, sc->en_base,
2840 MID_RAMOFF, 0, sc->en_obmemsz / 4);
2841
2842 reg = en_read(sc, MID_RESID);
2843
2844 device_printf(sc->dev, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
2845 "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
2846 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
2847 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
2848 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
2849 (long)sc->en_obmemsz / 1024);
2850
2851 /*
2852 * fill in common ATM interface stuff
2853 */
2854 IFP2IFATM(sc->ifp)->mib.hw_version = (MID_VER(reg) << 16) |
2855 (MID_MID(reg) << 8) | MID_DID(reg);
2856 if (MID_DID(reg) & 0x4)
2857 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
2858 else
2859 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
2860
2861 IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
2862 IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
2863 IFP2IFATM(sc->ifp)->mib.vci_bits = MID_VCI_BITS;
2864 IFP2IFATM(sc->ifp)->mib.max_vccs = MID_N_VC;
2865 IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
2866
2867 if (sc->is_adaptec) {
2868 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ADP155P;
2869 if (sc->bestburstlen == 64 && sc->alburst == 0)
2870 device_printf(sc->dev,
2871 "passed 64 byte DMA test\n");
2872 else
2873 device_printf(sc->dev, "FAILED DMA TEST: "
2874 "burst=%d, alburst=%d\n", sc->bestburstlen,
2875 sc->alburst);
2876 } else {
2877 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ENI155P;
2878 device_printf(sc->dev, "maximum DMA burst length = %d "
2879 "bytes%s\n", sc->bestburstlen, sc->alburst ?
2880 sc->noalbursts ? " (no large bursts)" : " (must align)" :
2881 "");
2882 }
2883
2884 /*
2885 * link into network subsystem and prepare card
2886 */
2887 sc->ifp->if_softc = sc;
2888 ifp->if_flags = IFF_SIMPLEX;
2889 ifp->if_ioctl = en_ioctl;
2890 ifp->if_start = en_start;
2891
2892 mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
2893 MTX_NETWORK_LOCK, MTX_DEF);
2894 cv_init(&sc->cv_close, "VC close");
2895
2896 /*
2897 * Make the sysctl tree
2898 */
2899 sysctl_ctx_init(&sc->sysctl_ctx);
2900
2901 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2902 SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
2903 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
2904 goto fail;
2905
2906 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2907 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, en_sysctl_istats,
2908 "S", "internal statistics") == NULL)
2909 goto fail;
2910
2911#ifdef EN_DEBUG
2912 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2913 OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
2914 goto fail;
2915#endif
2916
2917 IFP2IFATM(sc->ifp)->phy = &sc->utopia;
2918 utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->en_mtx,
2919 &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2920 &en_utopia_methods);
2921 utopia_init_media(&sc->utopia);
2922
2935 MGET(sc->padbuf, M_TRYWAIT, MT_DATA);
2936 if (sc->padbuf == NULL)
2937 goto fail;
2923 MGET(sc->padbuf, M_WAIT, MT_DATA);
2938 bzero(sc->padbuf->m_data, MLEN);
2939
2940 if (bus_dma_tag_create(NULL, 1, 0,
2941 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2942 EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0,
2943 NULL, NULL, &sc->txtag))
2944 goto fail;
2945
2946 sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
2947 en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
2948 UMA_ZONE_ZINIT);
2949 if (sc->map_zone == NULL)
2950 goto fail;
2951 uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
2952
2953 /*
2954 * init softc
2955 */
2956 sc->vccs = malloc(MID_N_VC * sizeof(sc->vccs[0]),
2957 M_DEVBUF, M_ZERO | M_WAITOK);
2958
2959 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
2960 ptr = sav = MID_BUFOFF;
2961 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
2962 sz = sz - (ptr - sav);
2963 if (EN_TXSZ*1024 * EN_NTX > sz) {
2964 device_printf(sc->dev, "EN_NTX/EN_TXSZ too big\n");
2965 goto fail;
2966 }
2967 for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
2968 sc->txslot[lcv].mbsize = 0;
2969 sc->txslot[lcv].start = ptr;
2970 ptr += (EN_TXSZ * 1024);
2971 sz -= (EN_TXSZ * 1024);
2972 sc->txslot[lcv].stop = ptr;
2973 sc->txslot[lcv].nref = 0;
2974 DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
2975 sc->txslot[lcv].start, sc->txslot[lcv].stop));
2976 }
2977
2978 sav = ptr;
2979 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
2980 sz = sz - (ptr - sav);
2981 sc->en_nrx = sz / (EN_RXSZ * 1024);
2982 if (sc->en_nrx <= 0) {
2983 device_printf(sc->dev, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
2984 goto fail;
2985 }
2986
2987 /*
2988 * ensure that there is always one VC slot on the service list free
2989 * so that we can tell the difference between a full and empty list.
2990 */
2991 if (sc->en_nrx >= MID_N_VC)
2992 sc->en_nrx = MID_N_VC - 1;
2993
2994 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
2995 sc->rxslot[lcv].vcc = NULL;
2996 midvloc = sc->rxslot[lcv].start = ptr;
2997 ptr += (EN_RXSZ * 1024);
2998 sz -= (EN_RXSZ * 1024);
2999 sc->rxslot[lcv].stop = ptr;
3000 midvloc = midvloc - MID_RAMOFF;
3001 /* mask, cvt to words */
3002 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
3003 /* we only want the top 11 bits */
3004 midvloc = midvloc >> MIDV_LOCTOPSHFT;
3005 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
3006 sc->rxslot[lcv].mode = midvloc |
3007 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
3008
3009 DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
3010 sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
3011 sc->rxslot[lcv].mode));
3012 }
3013
3014 device_printf(sc->dev, "%d %dKB receive buffers, %d %dKB transmit "
3015 "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
3016 device_printf(sc->dev, "end station identifier (mac address) "
3017 "%6D\n", IFP2IFATM(sc->ifp)->mib.esi, ":");
3018
3019 /*
3020 * Start SUNI stuff. This will call our readregs/writeregs
3021 * functions and these assume the lock to be held so we must get it
3022 * here.
3023 */
3024 EN_LOCK(sc);
3025 utopia_start(&sc->utopia);
3026 utopia_reset(&sc->utopia);
3027 EN_UNLOCK(sc);
3028
3029 /*
3030 * final commit
3031 */
3032 atm_ifattach(ifp);
3033
3034#ifdef ENABLE_BPF
3035 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3036#endif
3037
3038 return (0);
3039
3040 fail:
3041 en_destroy(sc);
3042 return (-1);
3043}
3044
3045/*
3046 * Free all internal resources. No access to bus resources here.
3047 * No locking required here (interrupt is already disabled).
3048 *
3049 * LOCK: unlocked, needed (but destroyed)
3050 */
3051void
3052en_destroy(struct en_softc *sc)
3053{
3054 u_int i;
3055
3056 if (sc->utopia.state & UTP_ST_ATTACHED) {
3057 /* these assume the lock to be held */
3058 EN_LOCK(sc);
3059 utopia_stop(&sc->utopia);
3060 utopia_detach(&sc->utopia);
3061 EN_UNLOCK(sc);
3062 }
3063
3064 if (sc->vccs != NULL) {
3065 /* get rid of sticky VCCs */
3066 for (i = 0; i < MID_N_VC; i++)
3067 if (sc->vccs[i] != NULL)
3068 uma_zfree(en_vcc_zone, sc->vccs[i]);
3069 free(sc->vccs, M_DEVBUF);
3070 }
3071
3072 if (sc->padbuf != NULL)
3073 m_free(sc->padbuf);
3074
3075 /*
3076 * Destroy the map zone before the tag (the fini function will
3077 * destroy the DMA maps using the tag)
3078 */
3079 if (sc->map_zone != NULL)
3080 uma_zdestroy(sc->map_zone);
3081
3082 if (sc->txtag != NULL)
3083 bus_dma_tag_destroy(sc->txtag);
3084
3085 (void)sysctl_ctx_free(&sc->sysctl_ctx);
3086
3087 cv_destroy(&sc->cv_close);
3088 mtx_destroy(&sc->en_mtx);
3089}
3090
3091/*
3092 * Module loaded/unloaded
3093 */
3094int
3095en_modevent(module_t mod __unused, int event, void *arg __unused)
3096{
3097
3098 switch (event) {
3099
3100 case MOD_LOAD:
3101 en_vcc_zone = uma_zcreate("EN vccs", sizeof(struct en_vcc),
3102 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3103 if (en_vcc_zone == NULL)
3104 return (ENOMEM);
3105 break;
3106
3107 case MOD_UNLOAD:
3108 uma_zdestroy(en_vcc_zone);
3109 break;
3110 }
3111 return (0);
3112}
3113
3114/*********************************************************************/
3115/*
3116 * Debugging support
3117 */
3118
3119#ifdef EN_DDBHOOK
3120/*
3121 * functions we can call from ddb
3122 */
3123
3124/*
3125 * en_dump: dump the state
3126 */
3127#define END_SWSL 0x00000040 /* swsl state */
3128#define END_DRQ 0x00000020 /* drq state */
3129#define END_DTQ 0x00000010 /* dtq state */
3130#define END_RX 0x00000008 /* rx state */
3131#define END_TX 0x00000004 /* tx state */
3132#define END_MREGS 0x00000002 /* registers */
3133#define END_STATS 0x00000001 /* dump stats */
3134
3135#define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3136
3137static void
3138en_dump_stats(const struct en_stats *s)
3139{
3140 printf("en_stats:\n");
3141 printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
3142 s->mfixfail);
3143 printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
3144 printf("\t%d times out of TX space and stalled\n", s->txoutspace);
3145 printf("\t%d times out of DTQs\n", s->txdtqout);
3146 printf("\t%d times launched a packet\n", s->launch);
3147 printf("\t%d times pulled the hw service list\n", s->hwpull);
3148 printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
3149 printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
3150 s->rxqnotus);
3151 printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
3152 printf("\t%d times ran out of DRQs\n", s->rxdrqout);
3153 printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
3154 printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
3155 printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
3156 printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
3157 printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
3158 printf("\t%d times out of tx maps\n", s->txnomap);
3159#ifdef NATM
3160#ifdef NATM_STAT
3161 printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3162 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3163#endif
3164#endif
3165}
3166
3167static void
3168en_dump_mregs(struct en_softc *sc)
3169{
3170 u_int cnt;
3171
3172 printf("mregs:\n");
3173 printf("resid = 0x%x\n", en_read(sc, MID_RESID));
3174 printf("interrupt status = 0x%b\n",
3175 (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
3176 printf("interrupt enable = 0x%b\n",
3177 (int)en_read(sc, MID_INTENA), MID_INTBITS);
3178 printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
3179 printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
3180 MID_SL_A2REG(sc->hwslistp));
3181 printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
3182 printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3183 MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
3184 MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
3185 printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3186 MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
3187 MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
3188
3189 printf(" unusal txspeeds:");
3190 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3191 if (sc->vccs[cnt]->txspeed)
3192 printf(" vci%d=0x%x", cnt, sc->vccs[cnt]->txspeed);
3193 printf("\n");
3194
3195 printf(" rxvc slot mappings:");
3196 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3197 if (sc->vccs[cnt]->rxslot != NULL)
3198 printf(" %d->%td", cnt,
3199 sc->vccs[cnt]->rxslot - sc->rxslot);
3200 printf("\n");
3201}
3202
3203static void
3204en_dump_tx(struct en_softc *sc)
3205{
3206 u_int slot;
3207
3208 printf("tx:\n");
3209 for (slot = 0 ; slot < EN_NTX; slot++) {
3210 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3211 sc->txslot[slot].start, sc->txslot[slot].stop,
3212 sc->txslot[slot].cur,
3213 (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
3214 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3215 sc->txslot[slot].bfree);
3216 printf("txhw: base_address=0x%x, size=%u, read=%u, "
3217 "descstart=%u\n",
3218 (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
3219 MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
3220 en_read(sc, MIDX_READPTR(slot)),
3221 en_read(sc, MIDX_DESCSTART(slot)));
3222 }
3223}
3224
3225static void
3226en_dump_rx(struct en_softc *sc)
3227{
3228 struct en_rxslot *slot;
3229
3230 printf(" recv slots:\n");
3231 for (slot = sc->rxslot ; slot < &sc->rxslot[sc->en_nrx]; slot++) {
3232 printf("rx%td: start/stop/cur=0x%x/0x%x/0x%x mode=0x%x ",
3233 slot - sc->rxslot, slot->start, slot->stop, slot->cur,
3234 slot->mode);
3235 if (slot->vcc != NULL) {
3236 printf("vci=%u\n", slot->vcc->vcc.vci);
3237 printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3238 en_read(sc, MID_VC(slot->vcc->vcc.vci)),
3239 en_read(sc, MID_DST_RP(slot->vcc->vcc.vci)),
3240 en_read(sc, MID_WP_ST_CNT(slot->vcc->vcc.vci)));
3241 }
3242 }
3243}
3244
3245/*
3246 * This is only correct for non-adaptec adapters
3247 */
3248static void
3249en_dump_dtqs(struct en_softc *sc)
3250{
3251 uint32_t ptr, reg;
3252
3253 printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
3254 sc->dtq_free);
3255 ptr = sc->dtq_chip;
3256 while (ptr != sc->dtq_us) {
3257 reg = en_read(sc, ptr);
3258 printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3259 sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
3260 MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
3261 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3262 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3263 }
3264}
3265
3266static void
3267en_dump_drqs(struct en_softc *sc)
3268{
3269 uint32_t ptr, reg;
3270
3271 printf(" drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
3272 sc->drq_free);
3273 ptr = sc->drq_chip;
3274 while (ptr != sc->drq_us) {
3275 reg = en_read(sc, ptr);
3276 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3277 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
3278 MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
3279 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3280 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3281 }
3282}
3283
3284/* Do not staticize - meant for calling from DDB! */
3285int
3286en_dump(int unit, int level)
3287{
3288 struct en_softc *sc;
3289 int lcv, cnt;
3290 devclass_t dc;
3291 int maxunit;
3292
3293 dc = devclass_find("en");
3294 if (dc == NULL) {
3295 printf("%s: can't find devclass!\n", __func__);
3296 return (0);
3297 }
3298 maxunit = devclass_get_maxunit(dc);
3299 for (lcv = 0 ; lcv < maxunit ; lcv++) {
3300 sc = devclass_get_softc(dc, lcv);
3301 if (sc == NULL)
3302 continue;
3303 if (unit != -1 && unit != lcv)
3304 continue;
3305
3306 device_printf(sc->dev, "dumping device at level 0x%b\n",
3307 level, END_BITS);
3308
3309 if (sc->dtq_us == 0) {
3310 printf("<hasn't been en_init'd yet>\n");
3311 continue;
3312 }
3313
3314 if (level & END_STATS)
3315 en_dump_stats(&sc->stats);
3316 if (level & END_MREGS)
3317 en_dump_mregs(sc);
3318 if (level & END_TX)
3319 en_dump_tx(sc);
3320 if (level & END_RX)
3321 en_dump_rx(sc);
3322 if (level & END_DTQ)
3323 en_dump_dtqs(sc);
3324 if (level & END_DRQ)
3325 en_dump_drqs(sc);
3326
3327 if (level & END_SWSL) {
3328 printf(" swslist [size=%d]: ", sc->swsl_size);
3329 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3330 cnt = (cnt + 1) % MID_SL_N)
3331 printf("0x%x ", sc->swslist[cnt]);
3332 printf("\n");
3333 }
3334 }
3335 return (0);
3336}
3337
3338/*
3339 * en_dumpmem: dump the memory
3340 *
3341 * Do not staticize - meant for calling from DDB!
3342 */
3343int
3344en_dumpmem(int unit, int addr, int len)
3345{
3346 struct en_softc *sc;
3347 uint32_t reg;
3348 devclass_t dc;
3349
3350 dc = devclass_find("en");
3351 if (dc == NULL) {
3352 printf("%s: can't find devclass\n", __func__);
3353 return (0);
3354 }
3355 sc = devclass_get_softc(dc, unit);
3356 if (sc == NULL) {
3357 printf("%s: invalid unit number: %d\n", __func__, unit);
3358 return (0);
3359 }
3360
3361 addr = addr & ~3;
3362 if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
3363 printf("invalid addr/len number: %d, %d\n", addr, len);
3364 return (0);
3365 }
3366 printf("dumping %d words starting at offset 0x%x\n", len, addr);
3367 while (len--) {
3368 reg = en_read(sc, addr);
3369 printf("mem[0x%x] = 0x%x\n", addr, reg);
3370 addr += 4;
3371 }
3372 return (0);
3373}
3374#endif
2924 bzero(sc->padbuf->m_data, MLEN);
2925
2926 if (bus_dma_tag_create(NULL, 1, 0,
2927 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2928 EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0,
2929 NULL, NULL, &sc->txtag))
2930 goto fail;
2931
2932 sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
2933 en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
2934 UMA_ZONE_ZINIT);
2935 if (sc->map_zone == NULL)
2936 goto fail;
2937 uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
2938
2939 /*
2940 * init softc
2941 */
2942 sc->vccs = malloc(MID_N_VC * sizeof(sc->vccs[0]),
2943 M_DEVBUF, M_ZERO | M_WAITOK);
2944
2945 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
2946 ptr = sav = MID_BUFOFF;
2947 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
2948 sz = sz - (ptr - sav);
2949 if (EN_TXSZ*1024 * EN_NTX > sz) {
2950 device_printf(sc->dev, "EN_NTX/EN_TXSZ too big\n");
2951 goto fail;
2952 }
2953 for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
2954 sc->txslot[lcv].mbsize = 0;
2955 sc->txslot[lcv].start = ptr;
2956 ptr += (EN_TXSZ * 1024);
2957 sz -= (EN_TXSZ * 1024);
2958 sc->txslot[lcv].stop = ptr;
2959 sc->txslot[lcv].nref = 0;
2960 DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
2961 sc->txslot[lcv].start, sc->txslot[lcv].stop));
2962 }
2963
2964 sav = ptr;
2965 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
2966 sz = sz - (ptr - sav);
2967 sc->en_nrx = sz / (EN_RXSZ * 1024);
2968 if (sc->en_nrx <= 0) {
2969 device_printf(sc->dev, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
2970 goto fail;
2971 }
2972
2973 /*
2974 * ensure that there is always one VC slot on the service list free
2975 * so that we can tell the difference between a full and empty list.
2976 */
2977 if (sc->en_nrx >= MID_N_VC)
2978 sc->en_nrx = MID_N_VC - 1;
2979
2980 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
2981 sc->rxslot[lcv].vcc = NULL;
2982 midvloc = sc->rxslot[lcv].start = ptr;
2983 ptr += (EN_RXSZ * 1024);
2984 sz -= (EN_RXSZ * 1024);
2985 sc->rxslot[lcv].stop = ptr;
2986 midvloc = midvloc - MID_RAMOFF;
2987 /* mask, cvt to words */
2988 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
2989 /* we only want the top 11 bits */
2990 midvloc = midvloc >> MIDV_LOCTOPSHFT;
2991 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
2992 sc->rxslot[lcv].mode = midvloc |
2993 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
2994
2995 DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
2996 sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
2997 sc->rxslot[lcv].mode));
2998 }
2999
3000 device_printf(sc->dev, "%d %dKB receive buffers, %d %dKB transmit "
3001 "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
3002 device_printf(sc->dev, "end station identifier (mac address) "
3003 "%6D\n", IFP2IFATM(sc->ifp)->mib.esi, ":");
3004
3005 /*
3006 * Start SUNI stuff. This will call our readregs/writeregs
3007 * functions and these assume the lock to be held so we must get it
3008 * here.
3009 */
3010 EN_LOCK(sc);
3011 utopia_start(&sc->utopia);
3012 utopia_reset(&sc->utopia);
3013 EN_UNLOCK(sc);
3014
3015 /*
3016 * final commit
3017 */
3018 atm_ifattach(ifp);
3019
3020#ifdef ENABLE_BPF
3021 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3022#endif
3023
3024 return (0);
3025
3026 fail:
3027 en_destroy(sc);
3028 return (-1);
3029}
3030
3031/*
3032 * Free all internal resources. No access to bus resources here.
3033 * No locking required here (interrupt is already disabled).
3034 *
3035 * LOCK: unlocked, needed (but destroyed)
3036 */
3037void
3038en_destroy(struct en_softc *sc)
3039{
3040 u_int i;
3041
3042 if (sc->utopia.state & UTP_ST_ATTACHED) {
3043 /* these assume the lock to be held */
3044 EN_LOCK(sc);
3045 utopia_stop(&sc->utopia);
3046 utopia_detach(&sc->utopia);
3047 EN_UNLOCK(sc);
3048 }
3049
3050 if (sc->vccs != NULL) {
3051 /* get rid of sticky VCCs */
3052 for (i = 0; i < MID_N_VC; i++)
3053 if (sc->vccs[i] != NULL)
3054 uma_zfree(en_vcc_zone, sc->vccs[i]);
3055 free(sc->vccs, M_DEVBUF);
3056 }
3057
3058 if (sc->padbuf != NULL)
3059 m_free(sc->padbuf);
3060
3061 /*
3062 * Destroy the map zone before the tag (the fini function will
3063 * destroy the DMA maps using the tag)
3064 */
3065 if (sc->map_zone != NULL)
3066 uma_zdestroy(sc->map_zone);
3067
3068 if (sc->txtag != NULL)
3069 bus_dma_tag_destroy(sc->txtag);
3070
3071 (void)sysctl_ctx_free(&sc->sysctl_ctx);
3072
3073 cv_destroy(&sc->cv_close);
3074 mtx_destroy(&sc->en_mtx);
3075}
3076
3077/*
3078 * Module loaded/unloaded
3079 */
3080int
3081en_modevent(module_t mod __unused, int event, void *arg __unused)
3082{
3083
3084 switch (event) {
3085
3086 case MOD_LOAD:
3087 en_vcc_zone = uma_zcreate("EN vccs", sizeof(struct en_vcc),
3088 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3089 if (en_vcc_zone == NULL)
3090 return (ENOMEM);
3091 break;
3092
3093 case MOD_UNLOAD:
3094 uma_zdestroy(en_vcc_zone);
3095 break;
3096 }
3097 return (0);
3098}
3099
3100/*********************************************************************/
3101/*
3102 * Debugging support
3103 */
3104
3105#ifdef EN_DDBHOOK
3106/*
3107 * functions we can call from ddb
3108 */
3109
3110/*
3111 * en_dump: dump the state
3112 */
3113#define END_SWSL 0x00000040 /* swsl state */
3114#define END_DRQ 0x00000020 /* drq state */
3115#define END_DTQ 0x00000010 /* dtq state */
3116#define END_RX 0x00000008 /* rx state */
3117#define END_TX 0x00000004 /* tx state */
3118#define END_MREGS 0x00000002 /* registers */
3119#define END_STATS 0x00000001 /* dump stats */
3120
3121#define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3122
3123static void
3124en_dump_stats(const struct en_stats *s)
3125{
3126 printf("en_stats:\n");
3127 printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
3128 s->mfixfail);
3129 printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
3130 printf("\t%d times out of TX space and stalled\n", s->txoutspace);
3131 printf("\t%d times out of DTQs\n", s->txdtqout);
3132 printf("\t%d times launched a packet\n", s->launch);
3133 printf("\t%d times pulled the hw service list\n", s->hwpull);
3134 printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
3135 printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
3136 s->rxqnotus);
3137 printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
3138 printf("\t%d times ran out of DRQs\n", s->rxdrqout);
3139 printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
3140 printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
3141 printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
3142 printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
3143 printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
3144 printf("\t%d times out of tx maps\n", s->txnomap);
3145#ifdef NATM
3146#ifdef NATM_STAT
3147 printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3148 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3149#endif
3150#endif
3151}
3152
3153static void
3154en_dump_mregs(struct en_softc *sc)
3155{
3156 u_int cnt;
3157
3158 printf("mregs:\n");
3159 printf("resid = 0x%x\n", en_read(sc, MID_RESID));
3160 printf("interrupt status = 0x%b\n",
3161 (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
3162 printf("interrupt enable = 0x%b\n",
3163 (int)en_read(sc, MID_INTENA), MID_INTBITS);
3164 printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
3165 printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
3166 MID_SL_A2REG(sc->hwslistp));
3167 printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
3168 printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3169 MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
3170 MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
3171 printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3172 MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
3173 MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
3174
3175 printf(" unusal txspeeds:");
3176 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3177 if (sc->vccs[cnt]->txspeed)
3178 printf(" vci%d=0x%x", cnt, sc->vccs[cnt]->txspeed);
3179 printf("\n");
3180
3181 printf(" rxvc slot mappings:");
3182 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3183 if (sc->vccs[cnt]->rxslot != NULL)
3184 printf(" %d->%td", cnt,
3185 sc->vccs[cnt]->rxslot - sc->rxslot);
3186 printf("\n");
3187}
3188
3189static void
3190en_dump_tx(struct en_softc *sc)
3191{
3192 u_int slot;
3193
3194 printf("tx:\n");
3195 for (slot = 0 ; slot < EN_NTX; slot++) {
3196 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3197 sc->txslot[slot].start, sc->txslot[slot].stop,
3198 sc->txslot[slot].cur,
3199 (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
3200 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3201 sc->txslot[slot].bfree);
3202 printf("txhw: base_address=0x%x, size=%u, read=%u, "
3203 "descstart=%u\n",
3204 (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
3205 MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
3206 en_read(sc, MIDX_READPTR(slot)),
3207 en_read(sc, MIDX_DESCSTART(slot)));
3208 }
3209}
3210
3211static void
3212en_dump_rx(struct en_softc *sc)
3213{
3214 struct en_rxslot *slot;
3215
3216 printf(" recv slots:\n");
3217 for (slot = sc->rxslot ; slot < &sc->rxslot[sc->en_nrx]; slot++) {
3218 printf("rx%td: start/stop/cur=0x%x/0x%x/0x%x mode=0x%x ",
3219 slot - sc->rxslot, slot->start, slot->stop, slot->cur,
3220 slot->mode);
3221 if (slot->vcc != NULL) {
3222 printf("vci=%u\n", slot->vcc->vcc.vci);
3223 printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3224 en_read(sc, MID_VC(slot->vcc->vcc.vci)),
3225 en_read(sc, MID_DST_RP(slot->vcc->vcc.vci)),
3226 en_read(sc, MID_WP_ST_CNT(slot->vcc->vcc.vci)));
3227 }
3228 }
3229}
3230
3231/*
3232 * This is only correct for non-adaptec adapters
3233 */
3234static void
3235en_dump_dtqs(struct en_softc *sc)
3236{
3237 uint32_t ptr, reg;
3238
3239 printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
3240 sc->dtq_free);
3241 ptr = sc->dtq_chip;
3242 while (ptr != sc->dtq_us) {
3243 reg = en_read(sc, ptr);
3244 printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3245 sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
3246 MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
3247 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3248 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3249 }
3250}
3251
3252static void
3253en_dump_drqs(struct en_softc *sc)
3254{
3255 uint32_t ptr, reg;
3256
3257 printf(" drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
3258 sc->drq_free);
3259 ptr = sc->drq_chip;
3260 while (ptr != sc->drq_us) {
3261 reg = en_read(sc, ptr);
3262 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3263 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
3264 MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
3265 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3266 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3267 }
3268}
3269
3270/* Do not staticize - meant for calling from DDB! */
3271int
3272en_dump(int unit, int level)
3273{
3274 struct en_softc *sc;
3275 int lcv, cnt;
3276 devclass_t dc;
3277 int maxunit;
3278
3279 dc = devclass_find("en");
3280 if (dc == NULL) {
3281 printf("%s: can't find devclass!\n", __func__);
3282 return (0);
3283 }
3284 maxunit = devclass_get_maxunit(dc);
3285 for (lcv = 0 ; lcv < maxunit ; lcv++) {
3286 sc = devclass_get_softc(dc, lcv);
3287 if (sc == NULL)
3288 continue;
3289 if (unit != -1 && unit != lcv)
3290 continue;
3291
3292 device_printf(sc->dev, "dumping device at level 0x%b\n",
3293 level, END_BITS);
3294
3295 if (sc->dtq_us == 0) {
3296 printf("<hasn't been en_init'd yet>\n");
3297 continue;
3298 }
3299
3300 if (level & END_STATS)
3301 en_dump_stats(&sc->stats);
3302 if (level & END_MREGS)
3303 en_dump_mregs(sc);
3304 if (level & END_TX)
3305 en_dump_tx(sc);
3306 if (level & END_RX)
3307 en_dump_rx(sc);
3308 if (level & END_DTQ)
3309 en_dump_dtqs(sc);
3310 if (level & END_DRQ)
3311 en_dump_drqs(sc);
3312
3313 if (level & END_SWSL) {
3314 printf(" swslist [size=%d]: ", sc->swsl_size);
3315 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3316 cnt = (cnt + 1) % MID_SL_N)
3317 printf("0x%x ", sc->swslist[cnt]);
3318 printf("\n");
3319 }
3320 }
3321 return (0);
3322}
3323
3324/*
3325 * en_dumpmem: dump the memory
3326 *
3327 * Do not staticize - meant for calling from DDB!
3328 */
3329int
3330en_dumpmem(int unit, int addr, int len)
3331{
3332 struct en_softc *sc;
3333 uint32_t reg;
3334 devclass_t dc;
3335
3336 dc = devclass_find("en");
3337 if (dc == NULL) {
3338 printf("%s: can't find devclass\n", __func__);
3339 return (0);
3340 }
3341 sc = devclass_get_softc(dc, unit);
3342 if (sc == NULL) {
3343 printf("%s: invalid unit number: %d\n", __func__, unit);
3344 return (0);
3345 }
3346
3347 addr = addr & ~3;
3348 if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
3349 printf("invalid addr/len number: %d, %d\n", addr, len);
3350 return (0);
3351 }
3352 printf("dumping %d words starting at offset 0x%x\n", len, addr);
3353 while (len--) {
3354 reg = en_read(sc, addr);
3355 printf("mem[0x%x] = 0x%x\n", addr, reg);
3356 addr += 4;
3357 }
3358 return (0);
3359}
3360#endif