Deleted Added
full compact
midway.c (112135) midway.c (114018)
1/* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2/* (sync'd to midway.c 1.68) */
3
4/*
5 *
6 * Copyright (c) 1996 Charles D. Cranor and Washington University.
7 * All rights reserved.
8 *

--- 18 unchanged lines hidden (view full) ---

27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
1/* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2/* (sync'd to midway.c 1.68) */
3
4/*
5 *
6 * Copyright (c) 1996 Charles D. Cranor and Washington University.
7 * All rights reserved.
8 *

--- 18 unchanged lines hidden (view full) ---

27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: head/sys/dev/en/midway.c 112135 2003-03-12 10:28:26Z kjc $
35 * $FreeBSD: head/sys/dev/en/midway.c 114018 2003-04-25 16:14:03Z harti $
36 */
37
38/*
39 *
40 * m i d w a y . c e n i 1 5 5 d r i v e r
41 *
42 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
43 * started: spring, 1996 (written from scratch).
44 *
45 * notes from the author:
46 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
47 * ENI driver was especially useful in figuring out how this card works.
48 * I would also like to thank Werner for promptly answering email and being
49 * generally helpful.
50 */
51
36 */
37
38/*
39 *
40 * m i d w a y . c e n i 1 5 5 d r i v e r
41 *
42 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
43 * started: spring, 1996 (written from scratch).
44 *
45 * notes from the author:
46 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
47 * ENI driver was especially useful in figuring out how this card works.
48 * I would also like to thank Werner for promptly answering email and being
49 * generally helpful.
50 */
51
52#undef EN_DEBUG
53#undef EN_DEBUG_RANGE /* check ranges on en_read/en_write's? */
54#define EN_MBUF_OPT /* try and put more stuff in mbuf? */
55#define EN_DIAG
52#define EN_DIAG
56#define EN_STAT
57#ifndef EN_DMA
58#define EN_DMA 1 /* use dma? */
59#endif
60#define EN_NOTXDMA 0 /* hook to disable tx dma only */
61#define EN_NORXDMA 0 /* hook to disable rx dma only */
62#define EN_DDBHOOK 1 /* compile in ddb functions */
53#define EN_DDBHOOK 1 /* compile in ddb functions */
63#if defined(MIDWAY_ADPONLY)
64#define EN_ENIDMAFIX 0 /* no ENI cards to worry about */
65#else
66#define EN_ENIDMAFIX 1 /* avoid byte DMA on the ENI card (see below) */
67#endif
68
69/*
54
55/*
70 * note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
56 * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
71 * appears to be broken. it works just fine if there is no load... however
72 * when the card is loaded the data get corrupted. to see this, one only
73 * has to use "telnet" over ATM. do the following command in "telnet":
74 * cat /usr/share/misc/termcap
75 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
76 * use of the byte aligner). watch "netstat -s" for checksum errors.
77 *
78 * I further tested this by adding a function that compared the transmit

--- 7 unchanged lines hidden (view full) ---

86 *
87 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
88 * card.
89 */
90
91#if defined(DIAGNOSTIC) && !defined(EN_DIAG)
92#define EN_DIAG /* link in with master DIAG option */
93#endif
57 * appears to be broken. it works just fine if there is no load... however
58 * when the card is loaded the data get corrupted. to see this, one only
59 * has to use "telnet" over ATM. do the following command in "telnet":
60 * cat /usr/share/misc/termcap
61 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
62 * use of the byte aligner). watch "netstat -s" for checksum errors.
63 *
64 * I further tested this by adding a function that compared the transmit

--- 7 unchanged lines hidden (view full) ---

72 *
73 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
74 * card.
75 */
76
77#if defined(DIAGNOSTIC) && !defined(EN_DIAG)
78#define EN_DIAG /* link in with master DIAG option */
79#endif
94#ifdef EN_STAT
80
95#define EN_COUNT(X) (X)++
81#define EN_COUNT(X) (X)++
96#else
97#define EN_COUNT(X) /* nothing */
98#endif
99
100#ifdef EN_DEBUG
82
83#ifdef EN_DEBUG
84
101#undef EN_DDBHOOK
102#define EN_DDBHOOK 1
85#undef EN_DDBHOOK
86#define EN_DDBHOOK 1
103#define STATIC /* nothing */
104#define INLINE /* nothing */
87
88/*
89 * This macro removes almost all the EN_DEBUG conditionals in the code that make
90 * to code a good deal less readable.
91 */
92#define DBG(SC, FL, PRINT) do { \
93 if ((SC)->debug & DBG_##FL) { \
94 if_printf(&(SC)->enif, "%s: "#FL": ", __func__); \
95 printf PRINT; \
96 printf("\n"); \
97 } \
98 } while (0)
99
100enum {
101 DBG_INIT = 0x0001, /* debug attach/detach */
102 DBG_TX = 0x0002, /* debug transmitting */
103 DBG_SERV = 0x0004, /* debug service interrupts */
104 DBG_IOCTL = 0x0008, /* debug ioctls */
105 DBG_VC = 0x0010, /* debug VC handling */
106 DBG_INTR = 0x0020, /* debug interrupts */
107 DBG_DMA = 0x0040, /* debug DMA probing */
108 DBG_IPACKETS = 0x0080, /* print input packets */
109 DBG_REG = 0x0100, /* print all register access */
110 DBG_LOCK = 0x0200, /* debug locking */
111};
112
105#else /* EN_DEBUG */
113#else /* EN_DEBUG */
106#define STATIC static
107#define INLINE __inline
114
115#define DBG(SC, FL, PRINT) do { } while (0)
116
108#endif /* EN_DEBUG */
109
117#endif /* EN_DEBUG */
118
110#ifdef __FreeBSD__
111#include "opt_inet.h"
112#include "opt_natm.h"
113#include "opt_ddb.h"
119#include "opt_inet.h"
120#include "opt_natm.h"
121#include "opt_ddb.h"
114/* enable DDBHOOK when DDB is available */
115#undef EN_DDBHOOK
122
116#ifdef DDB
123#ifdef DDB
124#undef EN_DDBHOOK
117#define EN_DDBHOOK 1
118#endif
125#define EN_DDBHOOK 1
126#endif
119#endif
120
121#include <sys/param.h>
122#include <sys/systm.h>
123#include <sys/queue.h>
127
128#include <sys/param.h>
129#include <sys/systm.h>
130#include <sys/queue.h>
124#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__)
125#include <sys/device.h>
126#endif
127#include <sys/sockio.h>
131#include <sys/sockio.h>
128#include <sys/mbuf.h>
129#include <sys/socket.h>
132#include <sys/socket.h>
133#include <sys/mbuf.h>
134#include <sys/endian.h>
135#include <sys/sbuf.h>
136#include <sys/stdint.h>
137#include <vm/uma.h>
130
131#include <net/if.h>
132#include <net/if_atm.h>
133
138
139#include <net/if.h>
140#include <net/if_atm.h>
141
134#include <vm/vm.h>
135
136#if defined(INET) || defined(INET6)
137#include <netinet/in.h>
138#include <netinet/if_atm.h>
139#endif
140
141#ifdef NATM
142#include <netnatm/natm.h>
143#endif
144
142#if defined(INET) || defined(INET6)
143#include <netinet/in.h>
144#include <netinet/if_atm.h>
145#endif
146
147#ifdef NATM
148#include <netnatm/natm.h>
149#endif
150
145#if defined(__NetBSD__) || defined(__OpenBSD__)
146#include <machine/bus.h>
147#include <dev/ic/midwayreg.h>
148#include <dev/ic/midwayvar.h>
149#elif defined(__FreeBSD__)
150#include <sys/bus.h>
151#include <machine/bus.h>
152#include <sys/rman.h>
151#include <sys/bus.h>
152#include <machine/bus.h>
153#include <sys/rman.h>
154#include <sys/module.h>
155#include <sys/sysctl.h>
156#include <sys/malloc.h>
153#include <machine/resource.h>
154#include <dev/en/midwayreg.h>
155#include <dev/en/midwayvar.h>
157#include <machine/resource.h>
158#include <dev/en/midwayreg.h>
159#include <dev/en/midwayvar.h>
156#include <vm/pmap.h> /* for vtophys proto */
157
160
158#ifndef IFF_NOTRAILERS
159#define IFF_NOTRAILERS 0
160#endif
161
162#endif /* __FreeBSD__ */
163
164#if defined(__alpha__)
165/* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
166#undef vtophys
167#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va))
168#endif
169
170#ifdef __FreeBSD__
171#define NBPF 1
172#else
173#include "bpf.h"
174#endif
175#if NBPF > 0
176#include <net/bpf.h>
161#include <net/bpf.h>
177#ifdef __FreeBSD__
178#define BPFATTACH(ifp, dlt, hlen) bpfattach((ifp), (dlt), (hlen))
179#else
180#define BPFATTACH(ifp, dlt, hlen) bpfattach(&(ifp)->if_bpf, (ifp), (dlt), (hlen))
181#define BPF_MTAP(ifp, m) bpf_mtap((ifp)->if_bpf, (m))
182#endif
183#endif /* NBPF > 0 */
184
185/*
186 * params
187 */
162
163/*
164 * params
165 */
188
189#ifndef EN_TXHIWAT
166#ifndef EN_TXHIWAT
190#define EN_TXHIWAT (64*1024) /* max 64 KB waiting to be DMAd out */
167#define EN_TXHIWAT (64 * 1024) /* max 64 KB waiting to be DMAd out */
191#endif
192
168#endif
169
193#ifndef EN_MINDMA
194#define EN_MINDMA 32 /* don't DMA anything less than this (bytes) */
195#endif
170#define RX_NONE 0xffff /* recv VC not in use */
196
171
197#define RX_NONE 0xffff /* recv VC not in use */
198
199#define EN_OBHDR ATM_PH_DRIVER7 /* TBD in first mbuf ! */
200#define EN_OBTRL ATM_PH_DRIVER8 /* PDU trailier in last mbuf ! */
201
202#define ENOTHER_FREE 0x01 /* free rxslot */
203#define ENOTHER_DRAIN 0x02 /* almost free (drain DRQ dma) */
172#define ENOTHER_FREE 0x01 /* free rxslot */
173#define ENOTHER_DRAIN 0x02 /* almost free (drain DRQ dma) */
204#define ENOTHER_RAW 0x04 /* 'raw' access (aka boodi mode) */
205#define ENOTHER_SWSL 0x08 /* in software service list */
206
174#define ENOTHER_SWSL 0x08 /* in software service list */
175
207static int en_dma = EN_DMA; /* use DMA (switch off for dbg) */
176SYSCTL_NODE(_hw, OID_AUTO, en, CTLFLAG_RW, 0, "ENI 155p");
208
177
209#ifndef __FreeBSD__
210/*
178/*
211 * autoconfig attachments
212 */
213
214struct cfdriver en_cd = {
215 0, "en", DV_IFNET,
216};
217#endif
218
219/*
220 * local structures
221 */
222
223/*
224 * params to en_txlaunch() function
225 */
226
227struct en_launch {
228 u_int32_t tbd1; /* TBD 1 */
229 u_int32_t tbd2; /* TBD 2 */
230 u_int32_t pdu1; /* PDU 1 (aal5) */
231 int nodma; /* don't use DMA */
232 int need; /* total space we need (pad out if less data) */
233 int mlen; /* length of mbuf (for dtq) */
234 struct mbuf *t; /* data */
235 u_int32_t aal; /* aal code */
236 u_int32_t atm_vci; /* vci */
237 u_int8_t atm_flags; /* flags */
238};
239
240
241/*
242 * dma table (index by # of words)
179 * dma tables
243 *
180 *
244 * plan A: use WMAYBE (obsolete)
245 * plan B: avoid WMAYBE
181 * The plan is indexed by the number of words to transfer.
182 * The maximum index is 15 for 60 words.
246 */
183 */
247
248struct en_dmatab {
184struct en_dmatab {
249 u_int8_t bcode; /* code */
250 u_int8_t divshift; /* byte divisor */
185 uint8_t bcode; /* code */
186 uint8_t divshift; /* byte divisor */
251};
252
187};
188
253static struct en_dmatab en_dma_planB[] = {
189static const struct en_dmatab en_dmaplan[] = {
254 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
255 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
256 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
257 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
258 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
259 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
260 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
261 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
190 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
191 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
192 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
193 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
194 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
195 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
196 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
197 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
262 { MIDDMA_16WORD, 6}, /* 16 */
198 { MIDDMA_16WORD,6}, /* 16 */
263};
264
199};
200
265static struct en_dmatab *en_dmaplan = en_dma_planB;
266
267/*
268 * prototypes
269 */
201/*
202 * prototypes
203 */
270
271STATIC INLINE int en_b2sz(int) __attribute__ ((unused));
272#ifdef EN_DDBHOOK
204#ifdef EN_DDBHOOK
273 int en_dump(int,int);
274 int en_dumpmem(int,int,int);
205int en_dump(int unit, int level);
206int en_dumpmem(int,int,int);
275#endif
207#endif
276STATIC void en_dmaprobe(struct en_softc *);
277STATIC int en_dmaprobe_doit(struct en_softc *, u_int8_t *,
278 u_int8_t *, int);
279STATIC INLINE int en_dqneed(struct en_softc *, caddr_t, u_int,
280 u_int) __attribute__ ((unused));
281STATIC void en_init(struct en_softc *);
282STATIC int en_ioctl(struct ifnet *, EN_IOCTL_CMDT, caddr_t);
283STATIC INLINE int en_k2sz(int) __attribute__ ((unused));
284STATIC void en_loadvc(struct en_softc *, int);
285STATIC int en_mfix(struct en_softc *, struct mbuf **, struct mbuf *);
286STATIC INLINE struct mbuf *en_mget(struct en_softc *, u_int,
287 u_int *) __attribute__ ((unused));
288STATIC INLINE u_int32_t en_read(struct en_softc *,
289 u_int32_t) __attribute__ ((unused));
290STATIC int en_rxctl(struct en_softc *, struct atm_pseudoioctl *, int);
291STATIC void en_txdma(struct en_softc *, int);
292STATIC void en_txlaunch(struct en_softc *, int,
293 struct en_launch *);
294STATIC void en_service(struct en_softc *);
295STATIC void en_start(struct ifnet *);
296STATIC INLINE int en_sz2b(int) __attribute__ ((unused));
297STATIC INLINE void en_write(struct en_softc *, u_int32_t,
298 u_int32_t) __attribute__ ((unused));
299
208
300/*
301 * macros/inline
302 */
209#define EN_LOCK(SC) do { \
210 DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__)); \
211 mtx_lock(&sc->en_mtx); \
212 } while (0)
213#define EN_UNLOCK(SC) do { \
214 DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__)); \
215 mtx_unlock(&sc->en_mtx); \
216 } while (0)
303
304/*
217
218/*
305 * raw read/write macros
219 * While a transmit mbuf is waiting to get transmit DMA resources we
220 * need to keep some information with it. We don't want to allocate
221 * additional memory for this so we stuff it into free fields in the
222 * mbuf packet header. Neither the checksum fields nor the rcvif field are used
223 * so use these.
306 */
224 */
225#define TX_AAL5 0x1 /* transmit AAL5 PDU */
226#define TX_HAS_TBD 0x2 /* TBD did fit into mbuf */
227#define TX_HAS_PAD 0x4 /* padding did fit into mbuf */
228#define TX_HAS_PDU 0x8 /* PDU trailer did fit into mbuf */
307
229
308#define EN_READDAT(SC,R) en_read(SC,R)
309#define EN_WRITEDAT(SC,R,V) en_write(SC,R,V)
230#define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
231 (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS); \
232 (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) | \
233 ((PAD & 0x3f) << 16); \
234 (M)->m_pkthdr.rcvif = (void *)(MAP); \
235 } while (0)
310
236
311/*
312 * cooked read/write macros
313 */
237#define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
238 (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1); \
239 (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf; \
240 (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff; \
241 (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f; \
242 (MAP) = (void *)((M)->m_pkthdr.rcvif); \
243 } while (0)
314
244
315#define EN_READ(SC,R) (u_int32_t)ntohl(en_read(SC,R))
316#define EN_WRITE(SC,R,V) en_write(SC,R, htonl(V))
317
245
318#define EN_WRAPADD(START,STOP,CUR,VAL) { \
319 (CUR) = (CUR) + (VAL); \
320 if ((CUR) >= (STOP)) \
321 (CUR) = (START) + ((CUR) - (STOP)); \
322 }
246#define EN_WRAPADD(START, STOP, CUR, VAL) do { \
247 (CUR) = (CUR) + (VAL); \
248 if ((CUR) >= (STOP)) \
249 (CUR) = (START) + ((CUR) - (STOP)); \
250 } while (0)
323
251
324#define WORD_IDX(START, X) (((X) - (START)) / sizeof(u_int32_t))
252#define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
325
253
326/* we store sc->dtq and sc->drq data in the following format... */
327#define EN_DQ_MK(SLOT,LEN) (((SLOT) << 20)|(LEN)|(0x80000))
328 /* the 0x80000 ensures we != 0 */
329#define EN_DQ_SLOT(X) ((X) >> 20)
330#define EN_DQ_LEN(X) ((X) & 0x3ffff)
254#define SETQ_END(SC, VAL) ((SC)->is_adaptec ? \
255 ((VAL) | (MID_DMA_END >> 4)) : \
256 ((VAL) | (MID_DMA_END)))
331
257
332/* format of DTQ/DRQ word 1 differs between ENI and ADP */
333#if defined(MIDWAY_ENIONLY)
334
335#define MID_MK_TXQ(SC,CNT,CHAN,END,BCODE) \
336 EN_WRITE((SC), (SC)->dtq_us, \
337 MID_MK_TXQ_ENI((CNT), (CHAN), (END), (BCODE)));
338
339#define MID_MK_RXQ(SC,CNT,VCI,END,BCODE) \
340 EN_WRITE((SC), (SC)->drq_us, \
341 MID_MK_RXQ_ENI((CNT), (VCI), (END), (BCODE)));
342
343#elif defined(MIDWAY_ADPONLY)
344
345#define MID_MK_TXQ(SC,CNT,CHAN,END,JK) \
346 EN_WRITE((SC), (SC)->dtq_us, \
347 MID_MK_TXQ_ADP((CNT), (CHAN), (END), (JK)));
348
349#define MID_MK_RXQ(SC,CNT,VCI,END,JK) \
350 EN_WRITE((SC), (SC)->drq_us, \
351 MID_MK_RXQ_ADP((CNT), (VCI), (END), (JK)));
352
353#else
354
355#define MID_MK_TXQ(SC,CNT,CHAN,END,JK_OR_BCODE) { \
356 if ((SC)->is_adaptec) \
357 EN_WRITE((SC), (SC)->dtq_us, \
358 MID_MK_TXQ_ADP((CNT), (CHAN), (END), (JK_OR_BCODE))); \
359 else \
360 EN_WRITE((SC), (SC)->dtq_us, \
361 MID_MK_TXQ_ENI((CNT), (CHAN), (END), (JK_OR_BCODE))); \
362 }
363
364#define MID_MK_RXQ(SC,CNT,VCI,END,JK_OR_BCODE) { \
365 if ((SC)->is_adaptec) \
366 EN_WRITE((SC), (SC)->drq_us, \
367 MID_MK_RXQ_ADP((CNT), (VCI), (END), (JK_OR_BCODE))); \
368 else \
369 EN_WRITE((SC), (SC)->drq_us, \
370 MID_MK_RXQ_ENI((CNT), (VCI), (END), (JK_OR_BCODE))); \
371 }
372
373#endif
374
375/* add an item to the DTQ */
376#define EN_DTQADD(SC,CNT,CHAN,JK_OR_BCODE,ADDR,LEN,END) { \
377 if (END) \
378 (SC)->dtq[MID_DTQ_A2REG((SC)->dtq_us)] = EN_DQ_MK(CHAN,LEN); \
379 MID_MK_TXQ(SC,CNT,CHAN,END,JK_OR_BCODE); \
380 (SC)->dtq_us += 4; \
381 EN_WRITE((SC), (SC)->dtq_us, (ADDR)); \
382 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, (SC)->dtq_us, 4); \
383 (SC)->dtq_free--; \
384 if (END) \
385 EN_WRITE((SC), MID_DMA_WRTX, MID_DTQ_A2REG((SC)->dtq_us)); \
386}
387
388/* DRQ add macro */
389#define EN_DRQADD(SC,CNT,VCI,JK_OR_BCODE,ADDR,LEN,SLOT,END) { \
390 if (END) \
391 (SC)->drq[MID_DRQ_A2REG((SC)->drq_us)] = EN_DQ_MK(SLOT,LEN); \
392 MID_MK_RXQ(SC,CNT,VCI,END,JK_OR_BCODE); \
393 (SC)->drq_us += 4; \
394 EN_WRITE((SC), (SC)->drq_us, (ADDR)); \
395 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, (SC)->drq_us, 4); \
396 (SC)->drq_free--; \
397 if (END) \
398 EN_WRITE((SC), MID_DMA_WRRX, MID_DRQ_A2REG((SC)->drq_us)); \
399}
400
401/*
258/*
402 * the driver code
259 * The dtq and drq members are set for each END entry in the corresponding
260 * card queue entry. It is used to find out, when a buffer has been
261 * finished DMAing and can be freed.
403 *
262 *
404 * the code is arranged in a specific way:
405 * [1] short/inline functions
406 * [2] autoconfig stuff
407 * [3] ioctl stuff
408 * [4] reset -> init -> trasmit -> intr -> receive functions
409 *
263 * We store sc->dtq and sc->drq data in the following format...
264 * the 0x80000 ensures we != 0
410 */
265 */
266#define EN_DQ_MK(SLOT, LEN) (((SLOT) << 20) | (LEN) | (0x80000))
267#define EN_DQ_SLOT(X) ((X) >> 20)
268#define EN_DQ_LEN(X) ((X) & 0x3ffff)
411
412/***********************************************************************/
413
414/*
269
270/***********************************************************************/
271
272/*
415 * en_read: read a word from the card. this is the only function
416 * that reads from the card.
273 * en_read{x}: read a word from the card. These are the only functions
274 * that read from the card.
417 */
275 */
276static __inline uint32_t
277en_readx(struct en_softc *sc, uint32_t r)
278{
279 uint32_t v;
418
280
419STATIC INLINE u_int32_t en_read(sc, r)
281#ifdef EN_DIAG
282 if (r > MID_MAXOFF || (r % 4))
283 panic("en_read out of range, r=0x%x", r);
284#endif
285 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
286 return (v);
287}
420
288
421struct en_softc *sc;
422u_int32_t r;
423
289static __inline uint32_t
290en_read(struct en_softc *sc, uint32_t r)
424{
291{
292 uint32_t v;
425
293
426#ifdef EN_DEBUG_RANGE
427 if (r > MID_MAXOFF || (r % 4))
428 panic("en_read out of range, r=0x%x", r);
294#ifdef EN_DIAG
295 if (r > MID_MAXOFF || (r % 4))
296 panic("en_read out of range, r=0x%x", r);
429#endif
297#endif
430
431 return(bus_space_read_4(sc->en_memt, sc->en_base, r));
298 v = bus_space_read_4(sc->en_memt, sc->en_base, r);
299 DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
300 return (v);
432}
433
434/*
301}
302
303/*
435 * en_write: write a word to the card. this is the only function that
304 * en_write: write a word to the card. This is the only function that
436 * writes to the card.
437 */
305 * writes to the card.
306 */
438
439STATIC INLINE void en_write(sc, r, v)
440
441struct en_softc *sc;
442u_int32_t r, v;
443
307static __inline void
308en_write(struct en_softc *sc, uint32_t r, uint32_t v)
444{
309{
445#ifdef EN_DEBUG_RANGE
446 if (r > MID_MAXOFF || (r % 4))
447 panic("en_write out of range, r=0x%x", r);
310#ifdef EN_DIAG
311 if (r > MID_MAXOFF || (r % 4))
312 panic("en_write out of range, r=0x%x", r);
448#endif
313#endif
449
450 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
314 DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
315 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
451}
452
453/*
454 * en_k2sz: convert KBytes to a size parameter (a log2)
455 */
316}
317
318/*
319 * en_k2sz: convert KBytes to a size parameter (a log2)
320 */
456
457STATIC INLINE int en_k2sz(k)
458
459int k;
460
321static __inline int
322en_k2sz(int k)
461{
323{
462 switch(k) {
463 case 1: return(0);
464 case 2: return(1);
465 case 4: return(2);
466 case 8: return(3);
467 case 16: return(4);
468 case 32: return(5);
469 case 64: return(6);
470 case 128: return(7);
471 default: panic("en_k2sz");
472 }
473 return(0);
324 switch(k) {
325 case 1: return (0);
326 case 2: return (1);
327 case 4: return (2);
328 case 8: return (3);
329 case 16: return (4);
330 case 32: return (5);
331 case 64: return (6);
332 case 128: return (7);
333 default:
334 panic("en_k2sz");
335 }
336 return (0);
474}
475#define en_log2(X) en_k2sz(X)
476
337}
338#define en_log2(X) en_k2sz(X)
339
477
478/*
479 * en_b2sz: convert a DMA burst code to its byte size
480 */
340/*
341 * en_b2sz: convert a DMA burst code to its byte size
342 */
481
482STATIC INLINE int en_b2sz(b)
483
484int b;
485
343static __inline int
344en_b2sz(int b)
486{
345{
487 switch (b) {
488 case MIDDMA_WORD: return(1*4);
489 case MIDDMA_2WMAYBE:
490 case MIDDMA_2WORD: return(2*4);
491 case MIDDMA_4WMAYBE:
492 case MIDDMA_4WORD: return(4*4);
493 case MIDDMA_8WMAYBE:
494 case MIDDMA_8WORD: return(8*4);
495 case MIDDMA_16WMAYBE:
496 case MIDDMA_16WORD: return(16*4);
497 default: panic("en_b2sz");
498 }
499 return(0);
346 switch (b) {
347 case MIDDMA_WORD: return (1*4);
348 case MIDDMA_2WMAYBE:
349 case MIDDMA_2WORD: return (2*4);
350 case MIDDMA_4WMAYBE:
351 case MIDDMA_4WORD: return (4*4);
352 case MIDDMA_8WMAYBE:
353 case MIDDMA_8WORD: return (8*4);
354 case MIDDMA_16WMAYBE:
355 case MIDDMA_16WORD: return (16*4);
356 default:
357 panic("en_b2sz");
358 }
359 return (0);
500}
501
360}
361
502
503/*
504 * en_sz2b: convert a burst size (bytes) to DMA burst code
505 */
362/*
363 * en_sz2b: convert a burst size (bytes) to DMA burst code
364 */
506
507STATIC INLINE int en_sz2b(sz)
508
509int sz;
510
365static __inline int
366en_sz2b(int sz)
511{
367{
512 switch (sz) {
513 case 1*4: return(MIDDMA_WORD);
514 case 2*4: return(MIDDMA_2WORD);
515 case 4*4: return(MIDDMA_4WORD);
516 case 8*4: return(MIDDMA_8WORD);
517 case 16*4: return(MIDDMA_16WORD);
518 default: panic("en_sz2b");
519 }
520 return(0);
368 switch (sz) {
369 case 1*4: return (MIDDMA_WORD);
370 case 2*4: return (MIDDMA_2WORD);
371 case 4*4: return (MIDDMA_4WORD);
372 case 8*4: return (MIDDMA_8WORD);
373 case 16*4: return (MIDDMA_16WORD);
374 default:
375 panic("en_sz2b");
376 }
377 return(0);
521}
522
378}
379
523
380#ifdef EN_DEBUG
524/*
381/*
525 * en_dqneed: calculate number of DTQ/DRQ's needed for a buffer
382 * Dump a packet
526 */
383 */
527
528STATIC INLINE int en_dqneed(sc, data, len, tx)
529
530struct en_softc *sc;
531caddr_t data;
532u_int len, tx;
533
384static void
385en_dump_packet(struct en_softc *sc, struct mbuf *m)
534{
386{
535 int result, needalign, sz;
387 int plen = m->m_pkthdr.len;
388 u_int pos = 0;
389 u_int totlen = 0;
390 int len;
391 u_char *ptr;
536
392
537#if !defined(MIDWAY_ENIONLY)
538#if !defined(MIDWAY_ADPONLY)
539 if (sc->is_adaptec)
540#endif /* !MIDWAY_ADPONLY */
541 return(1); /* adaptec can DMA anything in one go */
393 if_printf(&sc->enif, "packet len=%d", plen);
394 while (m != NULL) {
395 totlen += m->m_len;
396 ptr = mtod(m, u_char *);
397 for (len = 0; len < m->m_len; len++, pos++, ptr++) {
398 if (pos % 16 == 8)
399 printf(" ");
400 if (pos % 16 == 0)
401 printf("\n");
402 printf(" %02x", *ptr);
403 }
404 m = m->m_next;
405 }
406 printf("\n");
407 if (totlen != plen);
408 printf("sum of m_len=%u\n", totlen);
409}
542#endif
410#endif
543
544#if !defined(MIDWAY_ADPONLY)
545 result = 0;
546 if (len < EN_MINDMA) {
547 if (!tx) /* XXX: conservative */
548 return(1); /* will copy/DMA_JK */
549 }
550
411
551 if (tx) { /* byte burst? */
552 needalign = (((uintptr_t) (void *) data) % sizeof(u_int32_t));
553 if (needalign) {
554 result++;
555 sz = min(len, sizeof(u_int32_t) - needalign);
556 len -= sz;
557 data += sz;
558 }
559 }
412/*********************************************************************/
413/*
414 * DMA maps
415 */
560
416
561 if (sc->alburst && len) {
562 needalign = (((uintptr_t) (void *) data) & sc->bestburstmask);
563 if (needalign) {
564 result++; /* alburst */
565 sz = min(len, sc->bestburstlen - needalign);
566 len -= sz;
567 }
568 }
417/*
418 * Map constructor for a MAP.
419 *
420 * This is called each time when a map is allocated
421 * from the pool and about to be returned to the user. Here we actually
422 * allocate the map if there isn't one. The problem is that we may fail
423 * to allocate the DMA map yet have no means to signal this error. Therefor
424 * when allocating a map, the call must check that there is a map. An
425 * additional problem is, that i386 maps will be NULL, yet are ok and must
426 * be freed so let's use a flag to signal allocation.
427 *
428 * Caveat: we have no way to know that we are called from an interrupt context
429 * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
430 * its allocations.
431 *
432 * LOCK: any, not needed
433 */
434static void
435en_map_ctor(void *mem, int size, void *arg)
436{
437 struct en_softc *sc = arg;
438 struct en_map *map = mem;
439 int err;
569
440
570 if (len >= sc->bestburstlen) {
571 sz = len / sc->bestburstlen;
572 sz = sz * sc->bestburstlen;
573 len -= sz;
574 result++; /* best shot */
575 }
576
577 if (len) {
578 result++; /* clean up */
579 if (tx && (len % sizeof(u_int32_t)) != 0)
580 result++; /* byte cleanup */
581 }
441 if (map->sc == NULL)
442 map->sc = sc;
582
443
583 return(result);
584#endif /* !MIDWAY_ADPONLY */
444 if (!(map->flags & ENMAP_ALLOC)) {
445 err = bus_dmamap_create(sc->txtag, 0, &map->map);
446 if (err != 0)
447 if_printf(&sc->enif, "cannot create DMA map %d\n", err);
448 else
449 map->flags |= ENMAP_ALLOC;
450 }
451 map->flags &= ~ENMAP_LOADED;
585}
586
452}
453
587
588/*
454/*
589 * en_mget: get an mbuf chain that can hold totlen bytes and return it
590 * (for recv) [based on am7990_get from if_le and ieget from if_ie]
591 * after this call the sum of all the m_len's in the chain will be totlen.
455 * Map destructor.
456 *
457 * Called when a map is disposed into the zone. If the map is loaded, unload
458 * it.
459 *
460 * LOCK: any, not needed
592 */
461 */
462static void
463en_map_dtor(void *mem, int size, void *arg)
464{
465 struct en_map *map = mem;
593
466
594STATIC INLINE struct mbuf *en_mget(sc, totlen, drqneed)
467 if (map->flags & ENMAP_LOADED) {
468 bus_dmamap_unload(map->sc->txtag, map->map);
469 map->flags &= ~ENMAP_LOADED;
470 }
471}
595
472
596struct en_softc *sc;
597u_int totlen, *drqneed;
598
473/*
474 * Map finializer.
475 *
476 * This is called each time a map is returned from the zone to the system.
477 * Get rid of the dmamap here.
478 *
479 * LOCK: any, not needed
480 */
481static void
482en_map_fini(void *mem, int size)
599{
483{
600 struct mbuf *m;
601 struct mbuf *top, **mp;
602 *drqneed = 0;
484 struct en_map *map = mem;
603
485
604 MGETHDR(m, M_DONTWAIT, MT_DATA);
605 if (m == NULL)
606 return(NULL);
607 m->m_pkthdr.rcvif = &sc->enif;
608 m->m_pkthdr.len = totlen;
609 m->m_len = MHLEN;
610 top = NULL;
611 mp = &top;
612
613 /* if (top != NULL) then we've already got 1 mbuf on the chain */
614 while (totlen > 0) {
615 if (top) {
616 MGET(m, M_DONTWAIT, MT_DATA);
617 if (!m) {
618 m_freem(top);
619 return(NULL); /* out of mbufs */
620 }
621 m->m_len = MLEN;
622 }
623 if (totlen >= MINCLSIZE) {
624 MCLGET(m, M_DONTWAIT);
625 if ((m->m_flags & M_EXT) == 0) {
626 m_free(m);
627 m_freem(top);
628 return(NULL); /* out of mbuf clusters */
629 }
630 m->m_len = MCLBYTES;
631 }
632 m->m_len = min(totlen, m->m_len);
633 totlen -= m->m_len;
634 *mp = m;
635 mp = &m->m_next;
636
637 *drqneed += en_dqneed(sc, m->m_data, m->m_len, 0);
638
639 }
640 return(top);
486 if (map->flags & ENMAP_ALLOC)
487 bus_dmamap_destroy(map->sc->txtag, map->map);
641}
642
488}
489
643/***********************************************************************/
490/*********************************************************************/
491/*
492 * Transmission
493 */
644
645/*
494
495/*
646 * autoconfig stuff
496 * Argument structure to load a transmit DMA map
647 */
497 */
498struct txarg {
499 struct en_softc *sc;
500 struct mbuf *m;
501 u_int vci;
502 u_int chan; /* transmit channel */
503 u_int datalen; /* length of user data */
504 u_int flags;
505 u_int wait; /* return: out of resources */
506};
648
507
649void en_attach(sc)
650
651struct en_softc *sc;
652
508/*
509 * TX DMA map loader helper. This function is the callback when the map
510 * is loaded. It should fill the DMA segment descriptors into the hardware.
511 *
512 * LOCK: locked, needed
513 */
514static void
515en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
516 int error)
653{
517{
654 struct ifnet *ifp = &sc->enif;
655 int sz;
656 u_int32_t reg, lcv, check, ptr, sav, midvloc;
518 struct txarg *tx = uarg;
519 struct en_softc *sc = tx->sc;
520 struct en_txslot *slot = &sc->txslot[tx->chan];
521 uint32_t cur; /* on-card buffer position (bytes offset) */
522 uint32_t dtq; /* on-card queue position (byte offset) */
523 uint32_t last_dtq; /* last DTQ we have written */
524 uint32_t tmp;
525 u_int free; /* free queue entries on card */
526 u_int needalign, cnt;
527 bus_size_t rest; /* remaining bytes in current segment */
528 bus_addr_t addr;
529 bus_dma_segment_t *s;
530 uint32_t count, bcode;
531 int i;
657
532
658 /*
659 * probe card to determine memory size. the stupid ENI card always
660 * reports to PCI that it needs 4MB of space (2MB regs and 2MB RAM).
661 * if it has less than 2MB RAM the addresses wrap in the RAM address space.
662 * (i.e. on a 512KB card addresses 0x3ffffc, 0x37fffc, and 0x2ffffc
663 * are aliases for 0x27fffc [note that RAM starts at offset 0x200000]).
664 */
533 if (error != 0)
534 return;
665
535
666 if (sc->en_busreset)
667 sc->en_busreset(sc);
668 EN_WRITE(sc, MID_RESID, 0x0); /* reset card before touching RAM */
669 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
670 EN_WRITE(sc, lcv, lcv); /* data[address] = address */
671 for (check = MID_PROBEOFF ; check < lcv ; check += MID_PROBSIZE) {
672 reg = EN_READ(sc, check);
673 if (reg != check) { /* found an alias! */
674 goto done_probe; /* and quit */
675 }
676 }
677 }
678done_probe:
679 lcv -= MID_PROBSIZE; /* take one step back */
680 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
536 cur = slot->cur;
537 dtq = sc->dtq_us;
538 free = sc->dtq_free;
681
539
682 /*
683 * determine the largest DMA burst supported
684 */
540 last_dtq = 0; /* make gcc happy */
685
541
686 en_dmaprobe(sc);
542 /*
543 * Local macro to add an entry to the transmit DMA area. If there
544 * are no entries left, return. Save the byte offset of the entry
545 * in last_dtq for later use.
546 */
547#define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
548 if (free == 0) { \
549 EN_COUNT(sc->stats.txdtqout); \
550 tx->wait = 1; \
551 return; \
552 } \
553 last_dtq = dtq; \
554 en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ? \
555 MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) : \
556 MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE)); \
557 en_write(sc, dtq + 4, ADDR); \
558 \
559 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8); \
560 free--;
687
561
688 /*
689 * "hello world"
690 */
562 /*
563 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
564 * the current buffer byte offset accordingly.
565 */
566#define DO_DTQ(TYPE) do { \
567 rest -= cnt; \
568 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
569 DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x", \
570 tx->chan, cnt, (uintmax_t)rest, cur)); \
571 \
572 PUT_DTQ_ENTRY(1, bcode, count, addr); \
573 \
574 addr += cnt; \
575 } while (0)
691
576
692 if (sc->en_busreset)
693 sc->en_busreset(sc);
694 EN_WRITE(sc, MID_RESID, 0x0); /* reset */
695 for (lcv = MID_RAMOFF ; lcv < MID_RAMOFF + sc->en_obmemsz ; lcv += 4)
696 EN_WRITE(sc, lcv, 0); /* zero memory */
577 if (!(tx->flags & TX_HAS_TBD)) {
578 /*
579 * Prepend the TBD - it did not fit into the first mbuf
580 */
581 tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
582 MID_TBD_AAL5 : MID_TBD_NOAAL5,
583 sc->txspeed[tx->vci],
584 tx->m->m_pkthdr.len / MID_ATMDATASZ);
585 en_write(sc, cur, tmp);
586 EN_WRAPADD(slot->start, slot->stop, cur, 4);
697
587
698 reg = EN_READ(sc, MID_RESID);
588 tmp = MID_TBD_MK2(tx->vci, 0, 0);
589 en_write(sc, cur, tmp);
590 EN_WRAPADD(slot->start, slot->stop, cur, 4);
699
591
700 printf("%s: ATM midway v%d, board IDs %d.%d, %s%s%s, %ldKB on-board RAM\n",
701 sc->sc_dev.dv_xname, MID_VER(reg), MID_MID(reg), MID_DID(reg),
702 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
703 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
704 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
705 (long)sc->en_obmemsz / 1024);
592 /* update DMA address */
593 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
594 }
706
595
707 if (sc->is_adaptec) {
708 if (sc->bestburstlen == 64 && sc->alburst == 0)
709 printf("%s: passed 64 byte DMA test\n", sc->sc_dev.dv_xname);
710 else
711 printf("%s: FAILED DMA TEST: burst=%d, alburst=%d\n",
712 sc->sc_dev.dv_xname, sc->bestburstlen, sc->alburst);
713 } else {
714 printf("%s: maximum DMA burst length = %d bytes%s\n", sc->sc_dev.dv_xname,
715 sc->bestburstlen, (sc->alburst) ? " (must align)" : "");
716 }
596 for (i = 0, s = segs; i < nseg; i++, s++) {
597 rest = s->ds_len;
598 addr = s->ds_addr;
717
599
718 /*
719 * link into network subsystem and prepare card
720 */
600 if (sc->is_adaptec) {
601 /* adaptec card - simple */
721
602
722#if defined(__NetBSD__) || defined(__OpenBSD__)
723 bcopy(sc->sc_dev.dv_xname, sc->enif.if_xname, IFNAMSIZ);
724#endif
725 sc->enif.if_softc = sc;
726 ifp->if_flags = IFF_SIMPLEX|IFF_NOTRAILERS;
727 ifp->if_ioctl = en_ioctl;
728 ifp->if_output = atm_output;
729 ifp->if_start = en_start;
603 /* advance the on-card buffer pointer */
604 EN_WRAPADD(slot->start, slot->stop, cur, rest);
605 DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
606 tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
730
607
731 /*
732 * init softc
733 */
608 PUT_DTQ_ENTRY(0, 0, rest, addr);
734
609
735 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
736 sc->rxvc2slot[lcv] = RX_NONE;
737 sc->txspeed[lcv] = 0; /* full */
738 sc->txvc2slot[lcv] = 0; /* full speed == slot 0 */
739 }
610 continue;
611 }
740
612
741 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
742 ptr = sav = MID_BUFOFF;
743 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
744 sz = sz - (ptr - sav);
745 if (EN_TXSZ*1024 * EN_NTX > sz) {
746 printf("%s: EN_NTX/EN_TXSZ too big\n", sc->sc_dev.dv_xname);
747 return;
748 }
749 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
750 sc->txslot[lcv].mbsize = 0;
751 sc->txslot[lcv].start = ptr;
752 ptr += (EN_TXSZ * 1024);
753 sz -= (EN_TXSZ * 1024);
754 sc->txslot[lcv].stop = ptr;
755 sc->txslot[lcv].nref = 0;
756 bzero(&sc->txslot[lcv].indma, sizeof(sc->txslot[lcv].indma));
757 bzero(&sc->txslot[lcv].q, sizeof(sc->txslot[lcv].q));
758#ifdef EN_DEBUG
759 printf("%s: tx%d: start 0x%x, stop 0x%x\n", sc->sc_dev.dv_xname, lcv,
760 sc->txslot[lcv].start, sc->txslot[lcv].stop);
761#endif
762 }
613 /*
614 * do we need to do a DMA op to align to the maximum
615 * burst? Note, that we are alway 32-bit aligned.
616 */
617 if (sc->alburst &&
618 (needalign = (addr & sc->bestburstmask)) != 0) {
619 /* compute number of bytes, words and code */
620 cnt = sc->bestburstlen - needalign;
621 if (cnt > rest)
622 cnt = rest;
623 count = cnt / sizeof(uint32_t);
624 if (sc->noalbursts) {
625 bcode = MIDDMA_WORD;
626 } else {
627 bcode = en_dmaplan[count].bcode;
628 count = cnt >> en_dmaplan[count].divshift;
629 }
630 DO_DTQ("al_dma");
631 }
763
632
764 sav = ptr;
765 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
766 sz = sz - (ptr - sav);
767 sc->en_nrx = sz / (EN_RXSZ * 1024);
768 if (sc->en_nrx <= 0) {
769 printf("%s: EN_NTX/EN_TXSZ/EN_RXSZ too big\n", sc->sc_dev.dv_xname);
770 return;
771 }
633 /* do we need to do a max-sized burst? */
634 if (rest >= sc->bestburstlen) {
635 count = rest >> sc->bestburstshift;
636 cnt = count << sc->bestburstshift;
637 bcode = sc->bestburstcode;
638 DO_DTQ("best_dma");
639 }
772
640
773 /*
774 * ensure that there is always one VC slot on the service list free
775 * so that we can tell the difference between a full and empty list.
776 */
777 if (sc->en_nrx >= MID_N_VC)
778 sc->en_nrx = MID_N_VC - 1;
641 /* do we need to do a cleanup burst? */
642 if (rest != 0) {
643 cnt = rest;
644 count = rest / sizeof(uint32_t);
645 if (sc->noalbursts) {
646 bcode = MIDDMA_WORD;
647 } else {
648 bcode = en_dmaplan[count].bcode;
649 count = cnt >> en_dmaplan[count].divshift;
650 }
651 DO_DTQ("clean_dma");
652 }
653 }
779
654
780 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
781 sc->rxslot[lcv].rxhand = NULL;
782 sc->rxslot[lcv].oth_flags = ENOTHER_FREE;
783 bzero(&sc->rxslot[lcv].indma, sizeof(sc->rxslot[lcv].indma));
784 bzero(&sc->rxslot[lcv].q, sizeof(sc->rxslot[lcv].q));
785 midvloc = sc->rxslot[lcv].start = ptr;
786 ptr += (EN_RXSZ * 1024);
787 sz -= (EN_RXSZ * 1024);
788 sc->rxslot[lcv].stop = ptr;
789 midvloc = midvloc - MID_RAMOFF;
790 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2; /* mask, cvt to words */
791 midvloc = midvloc >> MIDV_LOCTOPSHFT; /* we only want the top 11 bits */
792 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
793 sc->rxslot[lcv].mode = midvloc |
794 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
655 KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
795
656
796#ifdef EN_DEBUG
797 printf("%s: rx%d: start 0x%x, stop 0x%x, mode 0x%x\n", sc->sc_dev.dv_xname,
798 lcv, sc->rxslot[lcv].start, sc->rxslot[lcv].stop, sc->rxslot[lcv].mode);
799#endif
800 }
657 if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
658 /*
659 * Append the AAL5 PDU trailer
660 */
661 tmp = MID_PDU_MK1(0, 0, tx->datalen);
662 en_write(sc, cur, tmp);
663 EN_WRAPADD(slot->start, slot->stop, cur, 4);
801
664
802#ifdef EN_STAT
803 sc->vtrash = sc->otrash = sc->mfix = sc->txmbovr = sc->dmaovr = 0;
804 sc->txoutspace = sc->txdtqout = sc->launch = sc->lheader = sc->ltail = 0;
805 sc->hwpull = sc->swadd = sc->rxqnotus = sc->rxqus = sc->rxoutboth = 0;
806 sc->rxdrqout = sc->ttrash = sc->rxmbufout = sc->mfixfail = 0;
807 sc->headbyte = sc->tailbyte = sc->tailflush = 0;
808#endif
809 sc->need_drqs = sc->need_dtqs = 0;
665 en_write(sc, cur, 0);
666 EN_WRAPADD(slot->start, slot->stop, cur, 4);
810
667
811 printf("%s: %d %dKB receive buffers, %d %dKB transmit buffers allocated\n",
812 sc->sc_dev.dv_xname, sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
668 /* update DMA address */
669 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
670 }
813
671
814 printf("%s: End Station Identifier (mac address) %6D\n",
815 sc->sc_dev.dv_xname, sc->macaddr, ":");
672 /* record the end for the interrupt routine */
673 sc->dtq[MID_DTQ_A2REG(last_dtq)] =
674 EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
816
675
817 /*
818 * final commit
819 */
676 /* set the end flag in the last descriptor */
677 en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
820
678
821 if_attach(ifp);
822 atm_ifattach(ifp);
679#undef PUT_DTQ_ENTRY
680#undef DO_DTQ
823
681
824#if NBPF > 0
825 BPFATTACH(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
826#endif
682 /* commit */
683 slot->cur = cur;
684 sc->dtq_free = free;
685 sc->dtq_us = dtq;
686
687 /* tell card */
688 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
827}
828
689}
690
829
830/*
691/*
831 * en_dmaprobe: helper function for en_attach.
692 * en_txdma: start transmit DMA on the given channel, if possible
832 *
693 *
833 * see how the card handles DMA by running a few DMA tests. we need
834 * to figure out the largest number of bytes we can DMA in one burst
835 * ("bestburstlen"), and if the starting address for a burst needs to
836 * be aligned on any sort of boundary or not ("alburst").
694 * This is called from two places: when we got new packets from the upper
695 * layer or when we found that buffer space has freed up during interrupt
696 * processing.
837 *
697 *
838 * typical findings:
839 * sparc1: bestburstlen=4, alburst=0 (ick, broken DMA!)
840 * sparc2: bestburstlen=64, alburst=1
841 * p166: bestburstlen=64, alburst=0
698 * LOCK: locked, needed
842 */
699 */
843
844STATIC void en_dmaprobe(sc)
845
846struct en_softc *sc;
847
700static void
701en_txdma(struct en_softc *sc, struct en_txslot *slot)
848{
702{
849 u_int32_t srcbuf[64], dstbuf[64];
850 u_int8_t *sp, *dp;
851 int bestalgn, bestnotalgn, lcv, try;
703 struct en_map *map;
704 struct mbuf *lastm;
705 struct txarg tx;
706 u_int pad;
707 int error;
852
708
853 sc->alburst = 0;
709 DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
710 again:
711 bzero(&tx, sizeof(tx));
712 tx.chan = slot - sc->txslot;
713 tx.sc = sc;
854
714
855 sp = (u_int8_t *) srcbuf;
856 while ((((unsigned long) sp) % MIDDMA_MAXBURST) != 0)
857 sp += 4;
858 dp = (u_int8_t *) dstbuf;
859 while ((((unsigned long) dp) % MIDDMA_MAXBURST) != 0)
860 dp += 4;
715 /*
716 * get an mbuf waiting for DMA
717 */
718 _IF_DEQUEUE(&slot->q, tx.m);
719 if (tx.m == NULL) {
720 DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
721 return;
722 }
723 MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
861
724
862 bestalgn = bestnotalgn = en_dmaprobe_doit(sc, sp, dp, 0);
725 /*
726 * note: don't use the entire buffer space. if WRTX becomes equal
727 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
728 */
729 if (tx.m->m_pkthdr.len >= slot->bfree) {
730 EN_COUNT(sc->stats.txoutspace);
731 DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
732 goto waitres;
733 }
734
735 lastm = NULL;
736 if (!(tx.flags & TX_HAS_PAD)) {
737 if (pad != 0) {
738 /* Append the padding buffer */
739 (void)m_length(tx.m, &lastm);
740 lastm->m_next = sc->padbuf;
741 sc->padbuf->m_len = pad;
742 }
743 tx.flags |= TX_HAS_PAD;
744 }
863
745
864 for (lcv = 4 ; lcv < MIDDMA_MAXBURST ; lcv += 4) {
865 try = en_dmaprobe_doit(sc, sp+lcv, dp+lcv, 0);
866 if (try < bestnotalgn)
867 bestnotalgn = try;
868 }
746 /*
747 * Try to load that map
748 */
749 error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
750 en_txdma_load, &tx, 0);
869
751
870 if (bestalgn != bestnotalgn) /* need bursts aligned */
871 sc->alburst = 1;
752 if (lastm != NULL)
753 lastm->m_next = NULL;
872
754
873 sc->bestburstlen = bestalgn;
874 sc->bestburstshift = en_log2(bestalgn);
875 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
876 sc->bestburstcode = en_sz2b(bestalgn);
755 if (error != 0) {
756 if_printf(&sc->enif, "loading TX map failed %d\n", error);
757 goto dequeue_drop;
758 }
759 map->flags |= ENMAP_LOADED;
760 if (tx.wait) {
761 /* probably not enough space */
762 bus_dmamap_unload(map->sc->txtag, map->map);
763 map->flags &= ~ENMAP_LOADED;
877
764
878#if 1 /* __FreeBSD__ */
879 /*
880 * correct pci chipsets should be able to handle misaligned-64-byte DMA.
881 * but there are too many broken chipsets around. we try to work around
882 * by finding the best workable dma size, but still some broken machines
883 * exhibit the problem later. so warn it here.
884 */
885 if (bestalgn != 64 || sc->alburst != 0) {
886 printf("%s: WARNING: DMA test detects a broken PCI chipset!\n",
887 sc->sc_dev.dv_xname);
888 printf(" trying to work around the problem... but if this doesn't\n");
889 printf(" work for you, you'd better switch to a newer motherboard.\n");
890 }
891#endif /* 1 */
892 return;
893}
765 sc->need_dtqs = 1;
766 DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
767 goto waitres;
768 }
894
769
770 EN_COUNT(sc->stats.launch);
771 sc->enif.if_opackets++;
772
773#ifdef ENABLE_BPF
774 if (sc->enif.if_bpf != NULL) {
775 /*
776 * adjust the top of the mbuf to skip the TBD if present
777 * before passing the packet to bpf.
778 * Also remove padding and the PDU trailer. Assume both of
779 * them to be in the same mbuf. pktlen, m_len and m_data
780 * are not needed anymore so we can change them.
781 */
782 if (tx.flags & TX_HAS_TBD) {
783 tx.m->m_data += MID_TBD_SIZE;
784 tx.m->m_len -= MID_TBD_SIZE;
785 }
786 tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
787 if (tx.m->m_pkthdr.len > tx.datalen) {
788 lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
789 tx.m->m_pkthdr.len = tx.datalen;
790 }
895
791
896/*
897 * en_dmaprobe_doit: do actual testing
898 */
792 BPF_MTAP(&sc->enif, tx.m);
793 }
794#endif
899
795
900STATIC int
901en_dmaprobe_doit(sc, sp, dp, wmtry)
796 /*
797 * do some housekeeping and get the next packet
798 */
799 slot->bfree -= tx.m->m_pkthdr.len;
800 _IF_ENQUEUE(&slot->indma, tx.m);
902
801
903struct en_softc *sc;
904u_int8_t *sp, *dp;
905int wmtry;
802 goto again;
906
803
907{
908 int lcv, retval = 4, cnt, count;
909 u_int32_t reg, bcode, midvloc;
804 /*
805 * error handling. This is jumped to when we just want to drop
806 * the packet. Must be unlocked here.
807 */
808 dequeue_drop:
809 if (map != NULL)
810 uma_zfree(sc->map_zone, map);
910
811
911 /*
912 * set up a 1k buffer at MID_BUFOFF
913 */
812 slot->mbsize -= tx.m->m_pkthdr.len;
914
813
915 if (sc->en_busreset)
916 sc->en_busreset(sc);
917 EN_WRITE(sc, MID_RESID, 0x0); /* reset card before touching RAM */
814 m_freem(tx.m);
918
815
919 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(u_int32_t)) >> MIDV_LOCTOPSHFT;
920 EN_WRITE(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
921 EN_WRITE(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
922 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
923 EN_WRITE(sc, MID_DST_RP(0), 0);
924 EN_WRITE(sc, MID_WP_ST_CNT(0), 0);
816 goto again;
925
817
926 for (lcv = 0 ; lcv < 68 ; lcv++) /* set up sample data */
927 sp[lcv] = lcv+1;
928 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* enable DMA (only) */
818 waitres:
819 _IF_PREPEND(&slot->q, tx.m);
820}
929
821
930 sc->drq_chip = MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX));
931 sc->dtq_chip = MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX));
822/*
823 * Create a copy of a single mbuf. It can have either internal or
824 * external data, it may have a packet header. External data is really
825 * copied, so the new buffer is writeable.
826 *
827 * LOCK: any, not needed
828 */
829static struct mbuf *
830copy_mbuf(struct mbuf *m)
831{
832 struct mbuf *new;
932
833
933 /*
934 * try it now . . . DMA it out, then DMA it back in and compare
935 *
936 * note: in order to get the dma stuff to reverse directions it wants
937 * the "end" flag set! since we are not dma'ing valid data we may
938 * get an ident mismatch interrupt (which we will ignore).
939 *
940 * note: we've got two different tests rolled up in the same loop
941 * if (wmtry)
942 * then we are doing a wmaybe test and wmtry is a byte count
943 * else we are doing a burst test
944 */
834 MGET(new, M_TRYWAIT, MT_DATA);
835 if (new == NULL)
836 return (NULL);
945
837
946 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
838 if (m->m_flags & M_PKTHDR) {
839 M_MOVE_PKTHDR(new, m);
840 if (m->m_len > MHLEN) {
841 MCLGET(new, M_TRYWAIT);
842 if ((m->m_flags & M_EXT) == 0) {
843 m_free(new);
844 return (NULL);
845 }
846 }
847 } else {
848 if (m->m_len > MLEN) {
849 MCLGET(new, M_TRYWAIT);
850 if ((m->m_flags & M_EXT) == 0) {
851 m_free(new);
852 return (NULL);
853 }
854 }
855 }
947
856
948#ifdef EN_DEBUG
949 printf("DMA test lcv=%d, sp=0x%lx, dp=0x%lx, wmtry=%d\n",
950 lcv, (unsigned long)sp, (unsigned long)dp, wmtry);
951#endif
857 bcopy(m->m_data, new->m_data, m->m_len);
858 new->m_len = m->m_len;
859 new->m_flags &= ~M_RDONLY;
952
860
953 /* zero SRAM and dest buffer */
954 for (cnt = 0 ; cnt < 1024; cnt += 4)
955 EN_WRITE(sc, MID_BUFOFF+cnt, 0); /* zero memory */
956 for (cnt = 0 ; cnt < 68 ; cnt++)
957 dp[cnt] = 0;
861 return (new);
862}
958
863
959 if (wmtry) {
960 count = (sc->bestburstlen - sizeof(u_int32_t)) / sizeof(u_int32_t);
961 bcode = en_dmaplan[count].bcode;
962 count = wmtry >> en_dmaplan[count].divshift;
963 } else {
964 bcode = en_sz2b(lcv);
965 count = 1;
966 }
967 if (sc->is_adaptec)
968 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
969 else
970 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ENI(count, 0, MID_DMA_END, bcode));
971 EN_WRITE(sc, sc->dtq_chip+4, vtophys(sp));
972 EN_WRITE(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip+8));
973 cnt = 1000;
974 while (EN_READ(sc, MID_DMA_RDTX) == MID_DTQ_A2REG(sc->dtq_chip)) {
975 DELAY(1);
976 cnt--;
977 if (cnt == 0) {
978 printf("%s: unexpected timeout in tx DMA test\n", sc->sc_dev.dv_xname);
979 return(retval); /* timeout, give up */
980 }
981 }
982 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
983 reg = EN_READ(sc, MID_INTACK);
984 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
985 printf("%s: unexpected status in tx DMA test: 0x%x\n",
986 sc->sc_dev.dv_xname, reg);
987 return(retval);
988 }
989 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* re-enable DMA (only) */
864/*
865 * This function is called when we have an ENI adapter. It fixes the
866 * mbuf chain, so that all addresses and lengths are 4 byte aligned.
867 * The overall length is already padded to multiple of cells plus the
868 * TBD so this must always succeed. The routine can fail, when it
869 * needs to copy an mbuf (this may happen if an mbuf is readonly).
870 *
871 * We assume here, that aligning the virtual addresses to 4 bytes also
872 * aligns the physical addresses.
873 *
874 * LOCK: locked, needed
875 */
876static struct mbuf *
877en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
878{
879 struct mbuf **prev = &m0;
880 struct mbuf *m = m0;
881 struct mbuf *new;
882 u_char *d;
883 int off;
990
884
991 /* "return to sender..." address is known ... */
885 while (m != NULL) {
886 d = mtod(m, u_char *);
887 if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
888 EN_COUNT(sc->stats.mfixaddr);
889 if (M_WRITABLE(m)) {
890 bcopy(d, d - off, m->m_len);
891 m->m_data -= off;
892 } else {
893 if ((new = copy_mbuf(m)) == NULL) {
894 EN_COUNT(sc->stats.mfixfail);
895 m_freem(m0);
896 return (NULL);
897 }
898 new->m_next = m_free(m);
899 *prev = m = new;
900 }
901 }
992
902
993 if (sc->is_adaptec)
994 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
995 else
996 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ENI(count, 0, MID_DMA_END, bcode));
997 EN_WRITE(sc, sc->drq_chip+4, vtophys(dp));
998 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip+8));
999 cnt = 1000;
1000 while (EN_READ(sc, MID_DMA_RDRX) == MID_DRQ_A2REG(sc->drq_chip)) {
1001 DELAY(1);
1002 cnt--;
1003 if (cnt == 0) {
1004 printf("%s: unexpected timeout in rx DMA test\n", sc->sc_dev.dv_xname);
1005 return(retval); /* timeout, give up */
1006 }
1007 }
1008 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
1009 reg = EN_READ(sc, MID_INTACK);
1010 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
1011 printf("%s: unexpected status in rx DMA test: 0x%x\n",
1012 sc->sc_dev.dv_xname, reg);
1013 return(retval);
1014 }
1015 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* re-enable DMA (only) */
903 if ((off = m->m_len % sizeof(uint32_t)) != 0) {
904 EN_COUNT(sc->stats.mfixlen);
905 if (!M_WRITABLE(m)) {
906 if ((new = copy_mbuf(m)) == NULL) {
907 EN_COUNT(sc->stats.mfixfail);
908 m_freem(m0);
909 return (NULL);
910 }
911 new->m_next = m_free(m);
912 *prev = m = new;
913 }
914 d = mtod(m, u_char *) + m->m_len;
915 off = 4 - off;
916 while (off) {
917 while (m->m_next && m->m_next->m_len == 0)
918 m->m_next = m_free(m->m_next);
1016
919
1017 if (wmtry) {
1018 return(bcmp(sp, dp, wmtry)); /* wmtry always exits here, no looping */
1019 }
1020
1021 if (bcmp(sp, dp, lcv))
1022 return(retval); /* failed, use last value */
920 if (m->m_next == NULL) {
921 *d++ = 0;
922 KASSERT(*pad > 0, ("no padding space"));
923 (*pad)--;
924 } else {
925 *d++ = *mtod(m->m_next, u_char *);
926 m->m_next->m_len--;
927 m->m_next->m_data++;
928 }
929 m->m_len++;
930 off--;
931 }
932 }
1023
933
1024 retval = lcv;
934 prev = &m->m_next;
935 m = m->m_next;
936 }
1025
937
1026 }
1027 return(retval); /* studly 64 byte DMA present! oh baby!! */
938 return (m0);
1028}
1029
939}
940
1030/***********************************************************************/
1031
1032/*
941/*
1033 * en_ioctl: handle ioctl requests
942 * en_start: start transmitting the next packet that needs to go out
943 * if there is one. We take off all packets from the interface's queue and
944 * put them into the channels queue.
1034 *
945 *
1035 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1036 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1037 * value, subtract one from sc->txslot[0].nref, add one to the
1038 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1039 * txspeed[vci].
946 * Here we also prepend the transmit packet descriptor and append the padding
947 * and (for aal5) the PDU trailer. This is different from the original driver:
948 * we assume, that allocating one or two additional mbufs is actually cheaper
949 * than all this algorithmic fiddling we would need otherwise.
950 *
951 * While the packet is on the channels wait queue we use the csum_* fields
952 * in the packet header to hold the original datalen, the AAL5 flag and the
953 * VCI. The packet length field in the header holds the needed buffer space.
954 * This may actually be more than the length of the current mbuf chain (when
955 * one or more of TBD, padding and PDU do not fit).
956 *
957 * LOCK: unlocked, needed
1040 */
958 */
1041
1042STATIC int en_ioctl(ifp, cmd, data)
1043
1044struct ifnet *ifp;
1045EN_IOCTL_CMDT cmd;
1046caddr_t data;
1047
959static void
960en_start(struct ifnet *ifp)
1048{
961{
1049 struct en_softc *sc = (struct en_softc *) ifp->if_softc;
1050 struct ifaddr *ifa = (struct ifaddr *) data;
1051 struct ifreq *ifr = (struct ifreq *) data;
1052 struct atm_pseudoioctl *api = (struct atm_pseudoioctl *)data;
1053#ifdef NATM
1054 struct atm_rawioctl *ario = (struct atm_rawioctl *)data;
1055 int slot;
1056#endif
1057 int s, error = 0;
962 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
963 struct mbuf *m, *lastm;
964 struct atm_pseudohdr *ap;
965 u_int pad; /* 0-bytes to pad at PDU end */
966 u_int datalen; /* length of user data */
967 u_int vci; /* the VCI we are transmitting on */
968 u_int chan; /* the transmit channel */
969 u_int flags;
970 uint32_t tbd[2];
971 uint32_t pdu[2];
972 struct en_map *map;
1058
973
1059 s = splnet();
974 while (1) {
975 IF_DEQUEUE(&ifp->if_snd, m);
976 if (m == NULL)
977 return;
1060
978
1061 switch (cmd) {
1062 case SIOCATMENA: /* enable circuit for recv */
1063 error = en_rxctl(sc, api, 1);
1064 break;
979 flags = 0;
1065
980
1066 case SIOCATMDIS: /* disable circuit for recv */
1067 error = en_rxctl(sc, api, 0);
1068 break;
981 ap = mtod(m, struct atm_pseudohdr *);
982 vci = ATM_PH_VCI(ap);
983 if (ATM_PH_FLAGS(ap) & ATM_PH_AAL5)
984 flags |= TX_AAL5;
1069
985
1070#ifdef NATM
1071 case SIOCXRAWATM:
1072 if ((slot = sc->rxvc2slot[ario->npcb->npcb_vci]) == RX_NONE) {
1073 error = EINVAL;
1074 break;
986 if (ATM_PH_VPI(ap) != 0 || vci > MID_N_VC) {
987 DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
988 ATM_PH_VPI(ap), vci));
989 m_freem(m);
990 continue;
1075 }
991 }
1076 if (ario->rawvalue > EN_RXSZ*1024)
1077 ario->rawvalue = EN_RXSZ*1024;
1078 if (ario->rawvalue) {
1079 sc->rxslot[slot].oth_flags |= ENOTHER_RAW;
1080 sc->rxslot[slot].raw_threshold = ario->rawvalue;
1081 } else {
1082 sc->rxslot[slot].oth_flags &= (~ENOTHER_RAW);
1083 sc->rxslot[slot].raw_threshold = 0;
1084 }
1085#ifdef EN_DEBUG
1086 printf("%s: rxvci%d: turn %s raw (boodi) mode\n",
1087 sc->sc_dev.dv_xname, ario->npcb->npcb_vci,
1088 (ario->rawvalue) ? "on" : "off");
1089#endif
1090 break;
1091#endif
1092 case SIOCSIFADDR:
1093 ifp->if_flags |= IFF_UP;
1094#if defined(INET) || defined(INET6)
1095 if (ifa->ifa_addr->sa_family == AF_INET
1096 || ifa->ifa_addr->sa_family == AF_INET6) {
1097 en_reset(sc);
1098 en_init(sc);
1099 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1100 break;
1101 }
1102#endif /* INET */
1103 /* what to do if not INET? */
1104 en_reset(sc);
1105 en_init(sc);
1106 break;
992 m_adj(m, sizeof(struct atm_pseudohdr));
1107
993
1108 case SIOCGIFADDR:
1109 error = EINVAL;
1110 break;
994 /*
995 * (re-)calculate size of packet (in bytes)
996 */
997 m->m_pkthdr.len = datalen = m_length(m, &lastm);
1111
998
1112 case SIOCSIFFLAGS:
1113 error = EINVAL;
1114 break;
999 /*
1000 * computing how much padding we need on the end of the mbuf,
1001 * then see if we can put the TBD at the front of the mbuf
1002 * where the link header goes (well behaved protocols will
1003 * reserve room for us). Last, check if room for PDU tail.
1004 */
1005 if (flags & TX_AAL5)
1006 m->m_pkthdr.len += MID_PDU_SIZE;
1007 m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
1008 pad = m->m_pkthdr.len - datalen;
1009 if (flags & TX_AAL5)
1010 pad -= MID_PDU_SIZE;
1011 m->m_pkthdr.len += MID_TBD_SIZE;
1115
1012
1116#if defined(SIOCSIFMTU) /* ??? copied from if_de */
1117#if !defined(ifr_mtu)
1118#define ifr_mtu ifr_metric
1119#endif
1120 case SIOCSIFMTU:
1121 /*
1122 * Set the interface MTU.
1123 */
1124#ifdef notsure
1125 if (ifr->ifr_mtu > ATMMTU) {
1126 error = EINVAL;
1127 break;
1128 }
1129#endif
1130 ifp->if_mtu = ifr->ifr_mtu;
1131 /* XXXCDC: do we really need to reset on MTU size change? */
1132 en_reset(sc);
1133 en_init(sc);
1134 break;
1135#endif /* SIOCSIFMTU */
1013 DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
1014 vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
1015 (int)M_TRAILINGSPACE(lastm)));
1136
1016
1137 default:
1138 error = EINVAL;
1139 break;
1140 }
1141 splx(s);
1142 return error;
1143}
1017 /*
1018 * Allocate a map. We do this here rather then in en_txdma,
1019 * because en_txdma is also called from the interrupt handler
1020 * and we are going to have a locking problem then. We must
1021 * use NOWAIT here, because the ip_output path holds various
1022 * locks.
1023 */
1024 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
1025 if (map == NULL || !(map->flags & ENMAP_ALLOC)) {
1026 /* drop that packet */
1027 EN_COUNT(sc->stats.txnomap);
1028 if (map != NULL)
1029 uma_zfree(sc->map_zone, map);
1030 m_freem(m);
1031 continue;
1032 }
1144
1033
1034 /*
1035 * From here on we need access to sc
1036 */
1037 EN_LOCK(sc);
1038 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1039 EN_UNLOCK(sc);
1040 uma_zfree(sc->map_zone, map);
1041 m_freem(m);
1042 continue;
1043 }
1145
1044
1146/*
1147 * en_rxctl: turn on and off VCs for recv.
1148 */
1045 /*
1046 * Look, whether we can prepend the TBD (8 byte)
1047 */
1048 if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
1049 tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
1050 MID_TBD_AAL5 : MID_TBD_NOAAL5,
1051 sc->txspeed[vci],
1052 m->m_pkthdr.len / MID_ATMDATASZ));
1053 tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
1149
1054
1150STATIC int en_rxctl(sc, pi, on)
1055 m->m_data -= MID_TBD_SIZE;
1056 bcopy(tbd, m->m_data, MID_TBD_SIZE);
1057 m->m_len += MID_TBD_SIZE;
1058 flags |= TX_HAS_TBD;
1059 }
1151
1060
1152struct en_softc *sc;
1153struct atm_pseudoioctl *pi;
1154int on;
1061 /*
1062 * Check whether the padding fits (must be writeable -
1063 * we pad with zero).
1064 */
1065 if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
1066 bzero(lastm->m_data + lastm->m_len, pad);
1067 lastm->m_len += pad;
1068 flags |= TX_HAS_PAD;
1155
1069
1156{
1157 u_int s, vci, flags, slot;
1158 u_int32_t oldmode, newmode;
1070 if ((flags & TX_AAL5) &&
1071 M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
1072 pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
1073 pdu[1] = 0;
1074 bcopy(pdu, lastm->m_data + lastm->m_len,
1075 MID_PDU_SIZE);
1076 lastm->m_len += MID_PDU_SIZE;
1077 flags |= TX_HAS_PDU;
1078 }
1079 }
1159
1080
1160 vci = ATM_PH_VCI(&pi->aph);
1161 flags = ATM_PH_FLAGS(&pi->aph);
1081 if (!sc->is_adaptec &&
1082 (m = en_fix_mchain(sc, m, &pad)) == NULL) {
1083 EN_UNLOCK(sc);
1084 uma_zfree(sc->map_zone, map);
1085 continue;
1086 }
1162
1087
1163#ifdef EN_DEBUG
1164 printf("%s: %s vpi=%d, vci=%d, flags=%d\n", sc->sc_dev.dv_xname,
1165 (on) ? "enable" : "disable", ATM_PH_VPI(&pi->aph), vci, flags);
1166#endif
1088 /*
1089 * get assigned channel (will be zero unless
1090 * txspeed[atm_vci] is set)
1091 */
1092 chan = sc->txvc2slot[vci];
1167
1093
1168 if (ATM_PH_VPI(&pi->aph) || vci >= MID_N_VC)
1169 return(EINVAL);
1094 if (m->m_pkthdr.len > EN_TXSZ * 1024) {
1095 DBG(sc, TX, ("tx%d: packet larger than xmit buffer "
1096 "(%d > %d)\n", chan, m->m_pkthdr.len,
1097 EN_TXSZ * 1024));
1098 EN_UNLOCK(sc);
1099 m_freem(m);
1100 uma_zfree(sc->map_zone, map);
1101 continue;
1102 }
1170
1103
1171 /*
1172 * turn on VCI!
1173 */
1104 if (sc->txslot[chan].mbsize > EN_TXHIWAT) {
1105 EN_COUNT(sc->stats.txmbovr);
1106 DBG(sc, TX, ("tx%d: buffer space shortage", chan));
1107 EN_UNLOCK(sc);
1108 m_freem(m);
1109 uma_zfree(sc->map_zone, map);
1110 continue;
1111 }
1174
1112
1175 if (on) {
1176 if (sc->rxvc2slot[vci] != RX_NONE)
1177 return(EINVAL);
1178 for (slot = 0 ; slot < sc->en_nrx ; slot++)
1179 if (sc->rxslot[slot].oth_flags & ENOTHER_FREE)
1180 break;
1181 if (slot == sc->en_nrx)
1182 return(ENOSPC);
1183 sc->rxvc2slot[vci] = slot;
1184 sc->rxslot[slot].rxhand = NULL;
1185 oldmode = sc->rxslot[slot].mode;
1186 newmode = (flags & ATM_PH_AAL5) ? MIDV_AAL5 : MIDV_NOAAL;
1187 sc->rxslot[slot].mode = MIDV_SETMODE(oldmode, newmode);
1188 sc->rxslot[slot].atm_vci = vci;
1189 sc->rxslot[slot].atm_flags = flags;
1190 sc->rxslot[slot].oth_flags = 0;
1191 sc->rxslot[slot].rxhand = pi->rxhand;
1192 if (sc->rxslot[slot].indma.ifq_head || sc->rxslot[slot].q.ifq_head)
1193 panic("en_rxctl: left over mbufs on enable");
1194 sc->txspeed[vci] = 0; /* full speed to start */
1195 sc->txvc2slot[vci] = 0; /* init value */
1196 sc->txslot[0].nref++; /* bump reference count */
1197 en_loadvc(sc, vci); /* does debug printf for us */
1198 return(0);
1199 }
1113 /* commit */
1114 sc->txslot[chan].mbsize += m->m_pkthdr.len;
1200
1115
1201 /*
1202 * turn off VCI
1203 */
1116 DBG(sc, TX, ("tx%d: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
1117 chan, vci, sc->txspeed[vci], m->m_pkthdr.len,
1118 sc->txslot[chan].mbsize));
1204
1119
1205 if (sc->rxvc2slot[vci] == RX_NONE)
1206 return(EINVAL);
1207 slot = sc->rxvc2slot[vci];
1208 if ((sc->rxslot[slot].oth_flags & (ENOTHER_FREE|ENOTHER_DRAIN)) != 0)
1209 return(EINVAL);
1210 s = splimp(); /* block out enintr() */
1211 oldmode = EN_READ(sc, MID_VC(vci));
1212 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1213 EN_WRITE(sc, MID_VC(vci), (newmode | (oldmode & MIDV_INSERVICE)));
1214 /* halt in tracks, be careful to preserve inserivce bit */
1215 DELAY(27);
1216 sc->rxslot[slot].rxhand = NULL;
1217 sc->rxslot[slot].mode = newmode;
1120 MBUF_SET_TX(m, vci, flags, datalen, pad, map);
1218
1121
1219 sc->txslot[sc->txvc2slot[vci]].nref--;
1220 sc->txspeed[vci] = 0;
1221 sc->txvc2slot[vci] = 0;
1122 _IF_ENQUEUE(&sc->txslot[chan].q, m);
1222
1123
1223 /* if stuff is still going on we are going to have to drain it out */
1224 if (sc->rxslot[slot].indma.ifq_head ||
1225 sc->rxslot[slot].q.ifq_head ||
1226 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) != 0) {
1227 sc->rxslot[slot].oth_flags |= ENOTHER_DRAIN;
1228 } else {
1229 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1230 sc->rxslot[slot].atm_vci = RX_NONE;
1231 sc->rxvc2slot[vci] = RX_NONE;
1232 }
1233 splx(s); /* enable enintr() */
1234#ifdef EN_DEBUG
1235 printf("%s: rx%d: VCI %d is now %s\n", sc->sc_dev.dv_xname, slot, vci,
1236 (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) ? "draining" : "free");
1237#endif
1238 return(0);
1124 en_txdma(sc, &sc->txslot[chan]);
1125
1126 EN_UNLOCK(sc);
1127 }
1239}
1240
1128}
1129
1241/***********************************************************************/
1242
1130/*********************************************************************/
1243/*
1131/*
1244 * en_reset: reset the board, throw away work in progress.
1245 * must en_init to recover.
1132 * VCs
1246 */
1247
1133 */
1134
1248void en_reset(sc)
1249
1250struct en_softc *sc;
1251
1135/*
1136 * en_loadvc: load a vc tab entry from a slot
1137 *
1138 * LOCK: locked, needed
1139 */
1140static void
1141en_loadvc(struct en_softc *sc, int vc)
1252{
1142{
1253 struct mbuf *m;
1254 int lcv, slot;
1143 int slot;
1144 uint32_t reg = en_read(sc, MID_VC(vc));
1255
1145
1256#ifdef EN_DEBUG
1257 printf("%s: reset\n", sc->sc_dev.dv_xname);
1258#endif
1146 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1147 en_write(sc, MID_VC(vc), reg);
1148 DELAY(27);
1259
1149
1260 if (sc->en_busreset)
1261 sc->en_busreset(sc);
1262 EN_WRITE(sc, MID_RESID, 0x0); /* reset hardware */
1150 if ((slot = sc->rxvc2slot[vc]) == RX_NONE)
1151 return;
1263
1152
1264 /*
1265 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1266 * will free us!
1267 */
1153 /* no need to set CRC */
1268
1154
1269 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1270 if (sc->rxvc2slot[lcv] == RX_NONE)
1271 continue;
1272 slot = sc->rxvc2slot[lcv];
1273 while (1) {
1274 _IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1275 if (m == NULL)
1276 break; /* >>> exit 'while(1)' here <<< */
1277 m_freem(m);
1278 }
1279 while (1) {
1280 _IF_DEQUEUE(&sc->rxslot[slot].q, m);
1281 if (m == NULL)
1282 break; /* >>> exit 'while(1)' here <<< */
1283 m_freem(m);
1284 }
1285 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
1286 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1287 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1288 sc->rxvc2slot[lcv] = RX_NONE;
1289#ifdef EN_DEBUG
1290 printf("%s: rx%d: VCI %d is now free\n", sc->sc_dev.dv_xname, slot, lcv);
1291#endif
1292 }
1293 }
1155 /* read pointer = 0, desc. start = 0 */
1156 en_write(sc, MID_DST_RP(vc), 0);
1157 /* write pointer = 0 */
1158 en_write(sc, MID_WP_ST_CNT(vc), 0);
1159 /* set mode, size, loc */
1160 en_write(sc, MID_VC(vc), sc->rxslot[slot].mode);
1294
1161
1295 /*
1296 * xmit: dump everything
1297 */
1162 sc->rxslot[slot].cur = sc->rxslot[slot].start;
1298
1163
1299 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1300 while (1) {
1301 _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1302 if (m == NULL)
1303 break; /* >>> exit 'while(1)' here <<< */
1304 m_freem(m);
1305 }
1306 while (1) {
1307 _IF_DEQUEUE(&sc->txslot[lcv].q, m);
1308 if (m == NULL)
1309 break; /* >>> exit 'while(1)' here <<< */
1310 m_freem(m);
1311 }
1312
1313 sc->txslot[lcv].mbsize = 0;
1314 }
1315
1316 return;
1164 DBG(sc, VC, ("rx%d: assigned to VCI %d", slot, vc));
1317}
1318
1165}
1166
1319
1320/*
1167/*
1321 * en_init: init board and sync the card with the data in the softc.
1168 * en_rxctl: turn on and off VCs for recv.
1169 *
1170 * LOCK: unlocked, needed
1322 */
1171 */
1172static int
1173en_rxctl(struct en_softc *sc, struct atm_pseudoioctl *pi, int on)
1174{
1175 u_int vci, flags, slot;
1176 uint32_t oldmode, newmode;
1323
1177
1324STATIC void en_init(sc)
1178 vci = ATM_PH_VCI(&pi->aph);
1179 flags = ATM_PH_FLAGS(&pi->aph);
1325
1180
1326struct en_softc *sc;
1181 DBG(sc, IOCTL, ("%s vpi=%d, vci=%d, flags=%#x",
1182 (on) ? "enable" : "disable", ATM_PH_VPI(&pi->aph), vci, flags));
1327
1183
1328{
1329 int vc, slot;
1330 u_int32_t loc;
1184 if (ATM_PH_VPI(&pi->aph) || vci >= MID_N_VC)
1185 return (EINVAL);
1331
1186
1332 if ((sc->enif.if_flags & IFF_UP) == 0) {
1333#ifdef EN_DEBUG
1334 printf("%s: going down\n", sc->sc_dev.dv_xname);
1335#endif
1336 en_reset(sc); /* to be safe */
1337 sc->enif.if_flags &= ~IFF_RUNNING; /* disable */
1338 return;
1339 }
1187 EN_LOCK(sc);
1340
1188
1341#ifdef EN_DEBUG
1342 printf("%s: going up\n", sc->sc_dev.dv_xname);
1343#endif
1344 sc->enif.if_flags |= IFF_RUNNING; /* enable */
1189 if (on) {
1190 /*
1191 * turn on VCI!
1192 */
1193 if (sc->rxvc2slot[vci] != RX_NONE)
1194 return (EINVAL);
1195 for (slot = 0; slot < sc->en_nrx; slot++)
1196 if (sc->rxslot[slot].oth_flags & ENOTHER_FREE)
1197 break;
1198 if (slot == sc->en_nrx) {
1199 EN_UNLOCK(sc);
1200 return (ENOSPC);
1201 }
1345
1202
1346 if (sc->en_busreset)
1347 sc->en_busreset(sc);
1348 EN_WRITE(sc, MID_RESID, 0x0); /* reset */
1203 sc->rxvc2slot[vci] = slot;
1204 sc->rxslot[slot].rxhand = NULL;
1205 oldmode = sc->rxslot[slot].mode;
1206 newmode = (flags & ATM_PH_AAL5) ? MIDV_AAL5 : MIDV_NOAAL;
1207 sc->rxslot[slot].mode = MIDV_SETMODE(oldmode, newmode);
1208 sc->rxslot[slot].atm_vci = vci;
1209 sc->rxslot[slot].atm_flags = flags;
1210 sc->rxslot[slot].oth_flags = 0;
1211 sc->rxslot[slot].rxhand = pi->rxhand;
1349
1212
1350 /*
1351 * init obmem data structures: vc tab, dma q's, slist.
1352 *
1353 * note that we set drq_free/dtq_free to one less than the total number
1354 * of DTQ/DRQs present. we do this because the card uses the condition
1355 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1356 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1357 * the drq_us pointer will wrap all the way around]. by restricting
1358 * the number of active requests to (N - 1) we prevent the list from
1359 * becoming completely full. note that the card will sometimes give
1360 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1361 * keep that interrupt from messing us up.
1362 */
1213 if (_IF_QLEN(&sc->rxslot[slot].indma) != 0 ||
1214 _IF_QLEN(&sc->rxslot[slot].q) != 0)
1215 panic("en_rxctl: left over mbufs on enable");
1216 sc->txspeed[vci] = 0; /* full speed to start */
1217 sc->txvc2slot[vci] = 0; /* init value */
1218 sc->txslot[0].nref++; /* bump reference count */
1219 en_loadvc(sc, vci); /* does debug printf for us */
1363
1220
1364 for (vc = 0 ; vc < MID_N_VC ; vc++)
1365 en_loadvc(sc, vc);
1221 EN_UNLOCK(sc);
1222 return (0);
1223 }
1366
1224
1367 bzero(&sc->drq, sizeof(sc->drq));
1368 sc->drq_free = MID_DRQ_N - 1; /* N - 1 */
1369 sc->drq_chip = MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX));
1370 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1371 /* ensure zero queue */
1372 sc->drq_us = sc->drq_chip;
1225 /*
1226 * turn off VCI
1227 */
1228 if (sc->rxvc2slot[vci] == RX_NONE) {
1229 EN_UNLOCK(sc);
1230 return (EINVAL);
1231 }
1232 slot = sc->rxvc2slot[vci];
1233 if ((sc->rxslot[slot].oth_flags & (ENOTHER_FREE|ENOTHER_DRAIN)) != 0) {
1234 EN_UNLOCK(sc);
1235 return (EINVAL);
1236 }
1373
1237
1374 bzero(&sc->dtq, sizeof(sc->dtq));
1375 sc->dtq_free = MID_DTQ_N - 1; /* N - 1 */
1376 sc->dtq_chip = MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX));
1377 EN_WRITE(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1378 /* ensure zero queue */
1379 sc->dtq_us = sc->dtq_chip;
1238 oldmode = en_read(sc, MID_VC(vci));
1239 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1240 en_write(sc, MID_VC(vci), (newmode | (oldmode & MIDV_INSERVICE)));
1380
1241
1381 sc->hwslistp = MID_SL_REG2A(EN_READ(sc, MID_SERV_WRITE));
1382 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1242 /* halt in tracks, be careful to preserve inservice bit */
1243 DELAY(27);
1244 sc->rxslot[slot].rxhand = NULL;
1245 sc->rxslot[slot].mode = newmode;
1383
1246
1384#ifdef EN_DEBUG
1385 printf("%s: drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, hwslist: 0x%x\n",
1386 sc->sc_dev.dv_xname, sc->drq_free, sc->drq_chip,
1387 sc->dtq_free, sc->dtq_chip, sc->hwslistp);
1388#endif
1247 sc->txslot[sc->txvc2slot[vci]].nref--;
1248 sc->txspeed[vci] = 0;
1249 sc->txvc2slot[vci] = 0;
1389
1250
1390 for (slot = 0 ; slot < EN_NTX ; slot++) {
1391 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1392 EN_WRITE(sc, MIDX_READPTR(slot), 0);
1393 EN_WRITE(sc, MIDX_DESCSTART(slot), 0);
1394 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1395 loc = loc - MID_RAMOFF;
1396 loc = (loc & ~((EN_TXSZ*1024) - 1)) >> 2; /* mask, cvt to words */
1397 loc = loc >> MIDV_LOCTOPSHFT; /* top 11 bits */
1398 EN_WRITE(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ), loc));
1399#ifdef EN_DEBUG
1400 printf("%s: tx%d: place 0x%x\n", sc->sc_dev.dv_xname, slot,
1401 (u_int)EN_READ(sc, MIDX_PLACE(slot)));
1402#endif
1403 }
1251 /* if stuff is still going on we are going to have to drain it out */
1252 if (_IF_QLEN(&sc->rxslot[slot].indma) != 0 ||
1253 _IF_QLEN(&sc->rxslot[slot].q) != 0 ||
1254 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) != 0) {
1255 sc->rxslot[slot].oth_flags |= ENOTHER_DRAIN;
1256 } else {
1257 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1258 sc->rxslot[slot].atm_vci = RX_NONE;
1259 sc->rxvc2slot[vci] = RX_NONE;
1260 }
1261 EN_UNLOCK(sc);
1404
1262
1405 /*
1406 * enable!
1407 */
1263 DBG(sc, IOCTL, ("rx%d: VCI %d is now %s", slot, vci,
1264 (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) ? "draining" : "free"));
1408
1265
1409 EN_WRITE(sc, MID_INTENA, MID_INT_TX|MID_INT_DMA_OVR|MID_INT_IDENT|
1410 MID_INT_LERR|MID_INT_DMA_ERR|MID_INT_DMA_RX|MID_INT_DMA_TX|
1411 MID_INT_SERVICE| /* >>> MID_INT_SUNI| XXXCDC<<< */ MID_INT_STATS);
1412 EN_WRITE(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl)|MID_MCSR_ENDMA|
1413 MID_MCSR_ENTX|MID_MCSR_ENRX);
1414
1266 return (0);
1415}
1416
1267}
1268
1269/*********************************************************************/
1270/*
1271 * starting/stopping the card
1272 */
1417
1418/*
1273
1274/*
1419 * en_loadvc: load a vc tab entry from a slot
1275 * en_reset_ul: reset the board, throw away work in progress.
1276 * must en_init to recover.
1277 *
1278 * LOCK: locked, needed
1420 */
1279 */
1280static void
1281en_reset_ul(struct en_softc *sc)
1282{
1283 struct en_map *map;
1284 struct mbuf *m;
1285 int lcv, slot;
1421
1286
1422STATIC void en_loadvc(sc, vc)
1287 if_printf(&sc->enif, "reset\n");
1423
1288
1424struct en_softc *sc;
1425int vc;
1289 if (sc->en_busreset)
1290 sc->en_busreset(sc);
1291 en_write(sc, MID_RESID, 0x0); /* reset hardware */
1426
1292
1427{
1428 int slot;
1429 u_int32_t reg = EN_READ(sc, MID_VC(vc));
1430
1431 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1432 EN_WRITE(sc, MID_VC(vc), reg);
1433 DELAY(27);
1293 /*
1294 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1295 * will free us!
1296 */
1297 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1298 if (sc->rxvc2slot[lcv] == RX_NONE)
1299 continue;
1300 slot = sc->rxvc2slot[lcv];
1434
1301
1435 if ((slot = sc->rxvc2slot[vc]) == RX_NONE)
1436 return;
1302 for (;;) {
1303 _IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1304 if (m == NULL)
1305 break;
1306 map = (void *)m->m_pkthdr.rcvif;
1307 uma_zfree(sc->map_zone, map);
1308 m_freem(m);
1309 }
1310 for (;;) {
1311 _IF_DEQUEUE(&sc->rxslot[slot].q, m);
1312 if (m == NULL)
1313 break;
1314 m_freem(m);
1315 }
1316 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
1317 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1318 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1319 sc->rxvc2slot[lcv] = RX_NONE;
1320 DBG(sc, INIT, ("rx%d: VCI %d is now free", slot, lcv));
1321 }
1322 }
1437
1323
1438 /* no need to set CRC */
1439 EN_WRITE(sc, MID_DST_RP(vc), 0); /* read pointer = 0, desc. start = 0 */
1440 EN_WRITE(sc, MID_WP_ST_CNT(vc), 0); /* write pointer = 0 */
1441 EN_WRITE(sc, MID_VC(vc), sc->rxslot[slot].mode); /* set mode, size, loc */
1442 sc->rxslot[slot].cur = sc->rxslot[slot].start;
1324 /*
1325 * xmit: dump everything
1326 */
1327 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1328 for (;;) {
1329 _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1330 if (m == NULL)
1331 break;
1332 map = (void *)m->m_pkthdr.rcvif;
1333 uma_zfree(sc->map_zone, map);
1334 m_freem(m);
1335 }
1336 for (;;) {
1337 _IF_DEQUEUE(&sc->txslot[lcv].q, m);
1338 if (m == NULL)
1339 break;
1340 map = (void *)m->m_pkthdr.rcvif;
1341 uma_zfree(sc->map_zone, map);
1342 m_freem(m);
1343 }
1344 sc->txslot[lcv].mbsize = 0;
1345 }
1346}
1443
1347
1444#ifdef EN_DEBUG
1445 printf("%s: rx%d: assigned to VCI %d\n", sc->sc_dev.dv_xname, slot, vc);
1446#endif
1348/*
1349 * en_reset: reset the board, throw away work in progress.
1350 * must en_init to recover.
1351 *
1352 * LOCK: unlocked, needed
1353 *
1354 * Use en_reset_ul if you alreay have the lock
1355 */
1356void
1357en_reset(struct en_softc *sc)
1358{
1359 EN_LOCK(sc);
1360 en_reset_ul(sc);
1361 EN_UNLOCK(sc);
1447}
1448
1449
1450/*
1362}
1363
1364
1365/*
1451 * en_start: start transmitting the next packet that needs to go out
1452 * if there is one. note that atm_output() has already splimp()'d us.
1366 * en_init: init board and sync the card with the data in the softc.
1367 *
1368 * LOCK: locked, needed
1453 */
1369 */
1370static void
1371en_init(struct en_softc *sc)
1372{
1373 int vc, slot;
1374 uint32_t loc;
1454
1375
1455STATIC void en_start(ifp)
1376 if ((sc->enif.if_flags & IFF_UP) == 0) {
1377 DBG(sc, INIT, ("going down"));
1378 en_reset(sc); /* to be safe */
1379 sc->enif.if_flags &= ~IFF_RUNNING; /* disable */
1380 return;
1381 }
1456
1382
1457struct ifnet *ifp;
1383 DBG(sc, INIT, ("going up"));
1384 sc->enif.if_flags |= IFF_RUNNING; /* enable */
1458
1385
1459{
1460 struct en_softc *sc = (struct en_softc *) ifp->if_softc;
1461 struct ifqueue *ifq = &ifp->if_snd; /* if INPUT QUEUE */
1462 struct mbuf *m, *lastm, *prev;
1463 struct atm_pseudohdr *ap, *new_ap;
1464 int txchan, mlen, got, need, toadd, cellcnt, first;
1465 u_int32_t atm_vpi, atm_vci, atm_flags, *dat, aal;
1466 u_int8_t *cp;
1386 if (sc->en_busreset)
1387 sc->en_busreset(sc);
1388 en_write(sc, MID_RESID, 0x0); /* reset */
1467
1389
1468 if ((ifp->if_flags & IFF_RUNNING) == 0)
1469 return;
1390 /*
1391 * init obmem data structures: vc tab, dma q's, slist.
1392 *
1393 * note that we set drq_free/dtq_free to one less than the total number
1394 * of DTQ/DRQs present. we do this because the card uses the condition
1395 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1396 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1397 * the drq_us pointer will wrap all the way around]. by restricting
1398 * the number of active requests to (N - 1) we prevent the list from
1399 * becoming completely full. note that the card will sometimes give
1400 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1401 * keep that interrupt from messing us up.
1402 */
1470
1403
1471 /*
1472 * remove everything from interface queue since we handle all queueing
1473 * locally ...
1474 */
1404 for (vc = 0; vc < MID_N_VC; vc++)
1405 en_loadvc(sc, vc);
1475
1406
1476 while (1) {
1407 bzero(&sc->drq, sizeof(sc->drq));
1408 sc->drq_free = MID_DRQ_N - 1;
1409 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
1410 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1411 sc->drq_us = sc->drq_chip;
1477
1412
1478 IF_DEQUEUE(ifq, m);
1479 if (m == NULL)
1480 return; /* EMPTY: >>> exit here <<< */
1481
1482 /*
1483 * calculate size of packet (in bytes)
1484 * also, if we are not doing transmit DMA we eliminate all stupid
1485 * (non-word) alignments here using en_mfix(). calls to en_mfix()
1486 * seem to be due to tcp retransmits for the most part.
1487 *
1488 * after this loop mlen total length of mbuf chain (including atm_ph),
1489 * and lastm is a pointer to the last mbuf on the chain.
1490 */
1413 bzero(&sc->dtq, sizeof(sc->dtq));
1414 sc->dtq_free = MID_DTQ_N - 1;
1415 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
1416 en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1417 sc->dtq_us = sc->dtq_chip;
1491
1418
1492 lastm = m;
1493 mlen = 0;
1494 prev = NULL;
1495 while (1) {
1496 /* no DMA? */
1497 if ((!sc->is_adaptec && EN_ENIDMAFIX) || EN_NOTXDMA || !en_dma) {
1498 if ( ((uintptr_t)mtod(lastm, void *) % sizeof(u_int32_t)) != 0 ||
1499 ((lastm->m_len % sizeof(u_int32_t)) != 0 && lastm->m_next)) {
1500 first = (lastm == m);
1501 if (en_mfix(sc, &lastm, prev) == 0) { /* failed? */
1502 m_freem(m);
1503 m = NULL;
1504 break;
1505 }
1506 if (first)
1507 m = lastm; /* update */
1508 }
1509 prev = lastm;
1510 }
1419 sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1420 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1511
1421
1512 mlen += lastm->m_len;
1513 if (lastm->m_next == NULL)
1514 break;
1515 lastm = lastm->m_next;
1516 }
1422 DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
1423 "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
1424 sc->dtq_chip, sc->hwslistp));
1517
1425
1518 if (m == NULL) /* happens only if mfix fails */
1519 continue;
1426 for (slot = 0 ; slot < EN_NTX ; slot++) {
1427 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1428 en_write(sc, MIDX_READPTR(slot), 0);
1429 en_write(sc, MIDX_DESCSTART(slot), 0);
1430 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1431 loc = loc - MID_RAMOFF;
1432 /* mask, cvt to words */
1433 loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
1434 /* top 11 bits */
1435 loc = loc >> MIDV_LOCTOPSHFT;
1436 en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
1437 loc));
1438 DBG(sc, INIT, ("tx%d: place 0x%x", slot,
1439 (u_int)en_read(sc, MIDX_PLACE(slot))));
1440 }
1520
1441
1521 ap = mtod(m, struct atm_pseudohdr *);
1442 /*
1443 * enable!
1444 */
1445 en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
1446 MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
1447 MID_INT_SERVICE | /* MID_INT_SUNI | */ MID_INT_STATS);
1448 en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
1449 MID_MCSR_ENTX | MID_MCSR_ENRX);
1450}
1522
1451
1523 atm_vpi = ATM_PH_VPI(ap);
1524 atm_vci = ATM_PH_VCI(ap);
1525 atm_flags = ATM_PH_FLAGS(ap) & ~(EN_OBHDR|EN_OBTRL);
1526 aal = ((atm_flags & ATM_PH_AAL5) != 0)
1527 ? MID_TBD_AAL5 : MID_TBD_NOAAL5;
1452/*********************************************************************/
1453/*
1454 * Ioctls
1455 */
1528
1456
1529 /*
1530 * check that vpi/vci is one we can use
1531 */
1457/*
1458 * en_ioctl: handle ioctl requests
1459 *
1460 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1461 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1462 * value, subtract one from sc->txslot[0].nref, add one to the
1463 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1464 * txspeed[vci].
1465 *
1466 * LOCK: unlocked, needed
1467 */
1468static int
1469en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1470{
1471 struct en_softc *sc = (struct en_softc *)ifp->if_softc;
1472 struct ifaddr *ifa = (struct ifaddr *)data;
1473 struct ifreq *ifr = (struct ifreq *)data;
1474 struct atm_pseudoioctl *api = (struct atm_pseudoioctl *)data;
1475 int error = 0;
1532
1476
1533 if (atm_vpi || atm_vci > MID_N_VC) {
1534 printf("%s: output vpi=%d, vci=%d out of card range, dropping...\n",
1535 sc->sc_dev.dv_xname, atm_vpi, atm_vci);
1536 m_freem(m);
1537 continue;
1538 }
1477 switch (cmd) {
1539
1478
1540 /*
1541 * computing how much padding we need on the end of the mbuf, then
1542 * see if we can put the TBD at the front of the mbuf where the
1543 * link header goes (well behaved protocols will reserve room for us).
1544 * last, check if room for PDU tail.
1545 *
1546 * got = number of bytes of data we have
1547 * cellcnt = number of cells in this mbuf
1548 * need = number of bytes of data + padding we need (excludes TBD)
1549 * toadd = number of bytes of data we need to add to end of mbuf,
1550 * [including AAL5 PDU, if AAL5]
1551 */
1479 case SIOCATMENA: /* enable circuit for recv */
1480 error = en_rxctl(sc, api, 1);
1481 break;
1552
1482
1553 got = mlen - sizeof(struct atm_pseudohdr);
1554 toadd = (aal == MID_TBD_AAL5) ? MID_PDU_SIZE : 0; /* PDU */
1555 cellcnt = (got + toadd + (MID_ATMDATASZ - 1)) / MID_ATMDATASZ;
1556 need = cellcnt * MID_ATMDATASZ;
1557 toadd = need - got; /* recompute, including zero padding */
1483 case SIOCATMDIS: /* disable circuit for recv */
1484 error = en_rxctl(sc, api, 0);
1485 break;
1558
1486
1559#ifdef EN_DEBUG
1560 printf("%s: txvci%d: mlen=%d, got=%d, need=%d, toadd=%d, cell#=%d\n",
1561 sc->sc_dev.dv_xname, atm_vci, mlen, got, need, toadd, cellcnt);
1562 printf(" leading_space=%d, trailing_space=%d\n",
1563 (int)M_LEADINGSPACE(m), (int)M_TRAILINGSPACE(lastm));
1564#endif
1487 case SIOCSIFADDR:
1488 EN_LOCK(sc);
1489 ifp->if_flags |= IFF_UP;
1490#if defined(INET) || defined(INET6)
1491 if (ifa->ifa_addr->sa_family == AF_INET
1492 || ifa->ifa_addr->sa_family == AF_INET6) {
1493 if (!(ifp->if_flags & IFF_RUNNING)) {
1494 en_reset_ul(sc);
1495 en_init(sc);
1496 }
1497 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1498 EN_UNLOCK(sc);
1499 break;
1500 }
1501#endif /* INET */
1502 if (!(ifp->if_flags & IFF_RUNNING)) {
1503 en_reset_ul(sc);
1504 en_init(sc);
1505 }
1506 EN_UNLOCK(sc);
1507 break;
1565
1508
1566#ifdef EN_MBUF_OPT
1509 case SIOCSIFFLAGS:
1510 EN_LOCK(sc);
1511 if (ifp->if_flags & IFF_UP) {
1512 if (!(ifp->if_flags & IFF_RUNNING))
1513 en_init(sc);
1514 } else {
1515 if (ifp->if_flags & IFF_RUNNING)
1516 en_reset_ul(sc);
1517 }
1518 EN_UNLOCK(sc);
1519 break;
1567
1520
1568 /*
1569 * note: external storage (M_EXT) can be shared between mbufs
1570 * to avoid copying (see m_copym()). this means that the same
1571 * data buffer could be shared by several mbufs, and thus it isn't
1572 * a good idea to try and write TBDs or PDUs to M_EXT data areas.
1573 */
1521 case SIOCSIFMTU:
1522 /*
1523 * Set the interface MTU.
1524 */
1525 if (ifr->ifr_mtu > ATMMTU) {
1526 error = EINVAL;
1527 break;
1528 }
1529 ifp->if_mtu = ifr->ifr_mtu;
1530 break;
1574
1531
1575 if (M_LEADINGSPACE(m) >= MID_TBD_SIZE && (m->m_flags & M_EXT) == 0) {
1576 m->m_data -= MID_TBD_SIZE;
1577 m->m_len += MID_TBD_SIZE;
1578 mlen += MID_TBD_SIZE;
1579 new_ap = mtod(m, struct atm_pseudohdr *);
1580 *new_ap = *ap; /* move it back */
1581 ap = new_ap;
1582 dat = ((u_int32_t *) ap) + 1;
1583 /* make sure the TBD is in proper byte order */
1584 *dat++ = htonl(MID_TBD_MK1(aal, sc->txspeed[atm_vci], cellcnt));
1585 *dat = htonl(MID_TBD_MK2(atm_vci, 0, 0));
1586 atm_flags |= EN_OBHDR;
1587 }
1588
1589 if (toadd && (lastm->m_flags & M_EXT) == 0 &&
1590 M_TRAILINGSPACE(lastm) >= toadd) {
1591 cp = mtod(lastm, u_int8_t *) + lastm->m_len;
1592 lastm->m_len += toadd;
1593 mlen += toadd;
1594 if (aal == MID_TBD_AAL5) {
1595 bzero(cp, toadd - MID_PDU_SIZE);
1596 dat = (u_int32_t *)(cp + toadd - MID_PDU_SIZE);
1597 /* make sure the PDU is in proper byte order */
1598 *dat = htonl(MID_PDU_MK1(0, 0, got));
1599 } else {
1600 bzero(cp, toadd);
1532 default:
1533 error = EINVAL;
1534 break;
1601 }
1535 }
1602 atm_flags |= EN_OBTRL;
1603 }
1604 ATM_PH_FLAGS(ap) = atm_flags; /* update EN_OBHDR/EN_OBTRL bits */
1605#endif /* EN_MBUF_OPT */
1536 return (error);
1537}
1606
1538
1607 /*
1608 * get assigned channel (will be zero unless txspeed[atm_vci] is set)
1609 */
1539/*********************************************************************/
1540/*
1541 * Sysctl's
1542 */
1610
1543
1611 txchan = sc->txvc2slot[atm_vci];
1544/*
1545 * Sysctl handler for internal statistics
1546 *
1547 * LOCK: unlocked, needed
1548 */
1549static int
1550en_sysctl_istats(SYSCTL_HANDLER_ARGS)
1551{
1552 struct en_softc *sc = arg1;
1553 struct sbuf *sb;
1554 int error;
1612
1555
1613 if (sc->txslot[txchan].mbsize > EN_TXHIWAT) {
1614 EN_COUNT(sc->txmbovr);
1615 m_freem(m);
1616#ifdef EN_DEBUG
1617 printf("%s: tx%d: buffer space shortage\n", sc->sc_dev.dv_xname,
1618 txchan);
1619#endif
1620 continue;
1621 }
1556 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND);
1557 sbuf_clear(sb);
1622
1558
1623 sc->txslot[txchan].mbsize += mlen;
1559 EN_LOCK(sc);
1624
1560
1625#ifdef EN_DEBUG
1626 printf("%s: tx%d: VPI=%d, VCI=%d, FLAGS=0x%x, speed=0x%x\n",
1627 sc->sc_dev.dv_xname, txchan, atm_vpi, atm_vci, atm_flags,
1628 sc->txspeed[atm_vci]);
1629 printf(" adjusted mlen=%d, mbsize=%d\n", mlen,
1630 sc->txslot[txchan].mbsize);
1631#endif
1561#define DO(NAME) sbuf_printf(sb, #NAME": %u\n", sc->stats.NAME)
1562 DO(vtrash);
1563 DO(otrash);
1564 DO(ttrash);
1565 DO(mfixaddr);
1566 DO(mfixlen);
1567 DO(mfixfail);
1568 DO(txmbovr);
1569 DO(dmaovr);
1570 DO(txoutspace);
1571 DO(txdtqout);
1572 DO(launch);
1573 DO(hwpull);
1574 DO(swadd);
1575 DO(rxqnotus);
1576 DO(rxqus);
1577 DO(rxdrqout);
1578 DO(rxmbufout);
1579 DO(txnomap);
1580#undef DO
1632
1581
1633 _IF_ENQUEUE(&sc->txslot[txchan].q, m);
1582 EN_UNLOCK(sc);
1634
1583
1635 en_txdma(sc, txchan);
1636
1637 }
1638 /*NOTREACHED*/
1584 sbuf_finish(sb);
1585 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1586 sbuf_delete(sb);
1587 return (error);
1639}
1640
1588}
1589
1641
1590/*********************************************************************/
1642/*
1591/*
1643 * en_mfix: fix a stupid mbuf
1592 * Interrupts
1644 */
1645
1593 */
1594
1646#ifndef __FreeBSD__
1647
1648STATIC int en_mfix(sc, mm, prev)
1649
1650struct en_softc *sc;
1651struct mbuf **mm, *prev;
1652
1595/*
1596 * Transmit interrupt handler
1597 *
1598 * check for tx complete, if detected then this means that some space
1599 * has come free on the card. we must account for it and arrange to
1600 * kick the channel to life (in case it is stalled waiting on the card).
1601 *
1602 * LOCK: locked, needed
1603 */
1604static uint32_t
1605en_intr_tx(struct en_softc *sc, uint32_t reg)
1653{
1606{
1654 struct mbuf *m, *new;
1655 u_char *d, *cp;
1656 int off;
1657 struct mbuf *nxt;
1607 uint32_t kick;
1608 uint32_t mask;
1609 uint32_t val;
1610 int chan;
1658
1611
1659 m = *mm;
1612 kick = 0; /* bitmask of channels to kick */
1660
1613
1661 EN_COUNT(sc->mfix); /* count # of calls */
1662#ifdef EN_DEBUG
1663 printf("%s: mfix mbuf m_data=%p, m_len=%d\n", sc->sc_dev.dv_xname,
1664 m->m_data, m->m_len);
1665#endif
1614 for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
1615 if (!(reg & MID_TXCHAN(chan)))
1616 continue;
1666
1617
1667 d = mtod(m, u_char *);
1668 off = ((unsigned long) d) % sizeof(u_int32_t);
1618 kick = kick | mask;
1669
1619
1670 if (off) {
1671 if ((m->m_flags & M_EXT) == 0) {
1672 bcopy(d, d - off, m->m_len); /* ALIGN! (with costly data copy...) */
1673 d -= off;
1674 m->m_data = (caddr_t)d;
1675 } else {
1676 /* can't write to an M_EXT mbuf since it may be shared */
1677 MGET(new, M_DONTWAIT, MT_DATA);
1678 if (!new) {
1679 EN_COUNT(sc->mfixfail);
1680 return(0);
1681 }
1682 MCLGET(new, M_DONTWAIT);
1683 if ((new->m_flags & M_EXT) == 0) {
1684 m_free(new);
1685 EN_COUNT(sc->mfixfail);
1686 return(0);
1687 }
1688 bcopy(d, new->m_data, m->m_len); /* ALIGN! (with costly data copy...) */
1689 new->m_len = m->m_len;
1690 new->m_next = m->m_next;
1691 if (prev)
1692 prev->m_next = new;
1693 m_free(m);
1694 *mm = m = new; /* note: 'd' now invalid */
1695 }
1696 }
1620 /* current read pointer */
1621 val = en_read(sc, MIDX_READPTR(chan));
1622 /* as offset */
1623 val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
1624 if (val > sc->txslot[chan].cur)
1625 sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
1626 else
1627 sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
1628 sc->txslot[chan].cur;
1629 DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
1630 "buffer", chan, sc->txslot[chan].bfree));
1631 }
1632 return (kick);
1633}
1697
1634
1698 off = m->m_len % sizeof(u_int32_t);
1699 if (off == 0)
1700 return(1);
1635/*
1636 * TX DMA interrupt
1637 *
1638 * check for TX DMA complete, if detected then this means
1639 * that some DTQs are now free. it also means some indma
1640 * mbufs can be freed. if we needed DTQs, kick all channels.
1641 *
1642 * LOCK: locked, needed
1643 */
1644static uint32_t
1645en_intr_tx_dma(struct en_softc *sc)
1646{
1647 uint32_t kick = 0;
1648 uint32_t val;
1649 uint32_t idx;
1650 uint32_t slot;
1651 uint32_t dtq;
1652 struct en_map *map;
1653 struct mbuf *m;
1701
1654
1702 d = mtod(m, u_char *) + m->m_len;
1703 off = sizeof(u_int32_t) - off;
1704
1705 nxt = m->m_next;
1706 while (off--) {
1707 for ( ; nxt != NULL && nxt->m_len == 0 ; nxt = nxt->m_next)
1708 /*null*/;
1709 if (nxt == NULL) { /* out of data, zero fill */
1710 *d++ = 0;
1711 continue; /* next "off" */
1712 }
1713 cp = mtod(nxt, u_char *);
1714 *d++ = *cp++;
1715 m->m_len++;
1716 nxt->m_len--;
1717 nxt->m_data = (caddr_t)cp;
1718 }
1719 return(1);
1720}
1655 val = en_read(sc, MID_DMA_RDTX); /* chip's current location */
1656 idx = MID_DTQ_A2REG(sc->dtq_chip); /* where we last saw chip */
1721
1657
1722#else /* __FreeBSD__ */
1658 if (sc->need_dtqs) {
1659 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
1660 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
1661 DBG(sc, INTR, ("cleared need DTQ condition"));
1662 }
1723
1663
1724STATIC int en_makeexclusive(struct en_softc *, struct mbuf **, struct mbuf *);
1664 while (idx != val) {
1665 sc->dtq_free++;
1666 if ((dtq = sc->dtq[idx]) != 0) {
1667 /* don't forget to zero it out when done */
1668 sc->dtq[idx] = 0;
1669 slot = EN_DQ_SLOT(dtq);
1725
1670
1726STATIC int en_makeexclusive(sc, mm, prev)
1727 struct en_softc *sc;
1728 struct mbuf **mm, *prev;
1729{
1730 struct mbuf *m, *new;
1671 _IF_DEQUEUE(&sc->txslot[slot].indma, m);
1672 if (m == NULL)
1673 panic("enintr: dtqsync");
1674 map = (void *)m->m_pkthdr.rcvif;
1675 uma_zfree(sc->map_zone, map);
1676 m_freem(m);
1731
1677
1732 m = *mm;
1733
1734 if (m->m_flags & M_EXT) {
1735 if (m->m_ext.ext_type != EXT_CLUSTER) {
1736 /* external buffer isn't an ordinary mbuf cluster! */
1737 printf("%s: mfix: special buffer! can't make a copy!\n",
1738 sc->sc_dev.dv_xname);
1739 return (0);
1678 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
1679 DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
1680 "%d", slot, EN_DQ_LEN(dtq),
1681 sc->txslot[slot].mbsize));
1682 }
1683 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
1740 }
1684 }
1741
1742 if (MEXT_IS_REF(m)) {
1743 /* make a real copy of the M_EXT mbuf since it is shared */
1744 MGET(new, M_DONTWAIT, MT_DATA);
1745 if (!new) {
1746 EN_COUNT(sc->mfixfail);
1747 return(0);
1748 }
1749 if (m->m_flags & M_PKTHDR)
1750 M_MOVE_PKTHDR(new, m);
1751 MCLGET(new, M_DONTWAIT);
1752 if ((new->m_flags & M_EXT) == 0) {
1753 m_free(new);
1754 EN_COUNT(sc->mfixfail);
1755 return(0);
1756 }
1757 bcopy(m->m_data, new->m_data, m->m_len);
1758 new->m_len = m->m_len;
1759 new->m_next = m->m_next;
1760 if (prev)
1761 prev->m_next = new;
1762 m_free(m);
1763 *mm = new;
1764 }
1765 else {
1766 /* the buffer is not shared, align the data offset using
1767 this buffer. */
1768 u_char *d = mtod(m, u_char *);
1769 int off = ((uintptr_t)(void *)d) % sizeof(u_int32_t);
1685 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
1770
1686
1771 if (off > 0) {
1772 bcopy(d, d - off, m->m_len);
1773 m->m_data = (caddr_t)d - off;
1774 }
1775 }
1776 }
1777 return (1);
1687 return (kick);
1778}
1779
1688}
1689
1780STATIC int en_mfix(sc, mm, prev)
1781
1782struct en_softc *sc;
1783struct mbuf **mm, *prev;
1784
1690/*
1691 * Service interrupt
1692 *
1693 * LOCK: locked, needed
1694 */
1695static int
1696en_intr_service(struct en_softc *sc)
1785{
1697{
1786 struct mbuf *m;
1787 u_char *d, *cp;
1788 int off;
1789 struct mbuf *nxt;
1698 uint32_t chip;
1699 uint32_t slot;
1700 uint32_t vci;
1701 int need_softserv = 0;
1790
1702
1791 m = *mm;
1703 chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
1792
1704
1793 EN_COUNT(sc->mfix); /* count # of calls */
1794#ifdef EN_DEBUG
1795 printf("%s: mfix mbuf m_data=%p, m_len=%d\n", sc->sc_dev.dv_xname,
1796 m->m_data, m->m_len);
1797#endif
1705 while (sc->hwslistp != chip) {
1706 /* fetch and remove it from hardware service list */
1707 vci = en_read(sc, sc->hwslistp);
1708 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
1798
1709
1799 d = mtod(m, u_char *);
1800 off = ((uintptr_t) (void *) d) % sizeof(u_int32_t);
1710 slot = sc->rxvc2slot[vci];
1711 if (slot == RX_NONE) {
1712 DBG(sc, INTR, ("unexpected rx interrupt on VCI %d",
1713 vci));
1714 en_write(sc, MID_VC(vci), MIDV_TRASH); /* rx off */
1715 continue;
1716 }
1801
1717
1802 if (off) {
1803 if ((m->m_flags & M_EXT) == 0) {
1804 bcopy(d, d - off, m->m_len); /* ALIGN! (with costly data copy...) */
1805 d -= off;
1806 m->m_data = (caddr_t)d;
1807 } else {
1808 /* can't write to an M_EXT mbuf since it may be shared */
1809 if (en_makeexclusive(sc, &m, prev) == 0)
1810 return (0);
1811 *mm = m; /* note: 'd' now invalid */
1812 }
1813 }
1718 /* remove from hwsl */
1719 en_write(sc, MID_VC(vci), sc->rxslot[slot].mode);
1720 EN_COUNT(sc->stats.hwpull);
1814
1721
1815 off = m->m_len % sizeof(u_int32_t);
1816 if (off == 0)
1817 return(1);
1722 DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
1818
1723
1819 if (m->m_flags & M_EXT) {
1820 /* can't write to an M_EXT mbuf since it may be shared */
1821 if (en_makeexclusive(sc, &m, prev) == 0)
1822 return (0);
1823 *mm = m; /* note: 'd' now invalid */
1824 }
1825
1826 d = mtod(m, u_char *) + m->m_len;
1827 off = sizeof(u_int32_t) - off;
1828
1829 nxt = m->m_next;
1830 while (off--) {
1831 if (nxt != NULL && nxt->m_len == 0) {
1832 /* remove an empty mbuf. this avoids odd byte padding to an empty
1833 last mbuf. */
1834 m->m_next = nxt = m_free(nxt);
1835 }
1836 if (nxt == NULL) { /* out of data, zero fill */
1837 *d++ = 0;
1838 continue; /* next "off" */
1839 }
1840 cp = mtod(nxt, u_char *);
1841 *d++ = *cp++;
1842 m->m_len++;
1843 nxt->m_len--;
1844 nxt->m_data = (caddr_t)cp;
1845 }
1846 if (nxt != NULL && nxt->m_len == 0)
1847 m->m_next = m_free(nxt);
1848 return(1);
1724 /* add it to the software service list (if needed) */
1725 if ((sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
1726 EN_COUNT(sc->stats.swadd);
1727 need_softserv = 1;
1728 sc->rxslot[slot].oth_flags |= ENOTHER_SWSL;
1729 sc->swslist[sc->swsl_tail] = slot;
1730 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
1731 sc->swsl_size++;
1732 DBG(sc, INTR, ("added VCI %d to swslist", vci));
1733 }
1734 }
1735 return (need_softserv);
1849}
1850
1736}
1737
1851#endif /* __FreeBSD__ */
1852
1853/*
1738/*
1854 * en_txdma: start trasmit DMA, if possible
1739 * check for RX DMA complete, and pass the data "upstairs"
1740 *
1741 * LOCK: locked, needed
1855 */
1742 */
1743static int
1744en_intr_rx_dma(struct en_softc *sc)
1745{
1746 uint32_t val;
1747 uint32_t idx;
1748 uint32_t drq;
1749 uint32_t slot;
1750 uint32_t vci;
1751 struct atm_pseudohdr ah;
1752 struct mbuf *m;
1753 struct en_map *map;
1856
1754
1857STATIC void en_txdma(sc, chan)
1755 val = en_read(sc, MID_DMA_RDRX); /* chip's current location */
1756 idx = MID_DRQ_A2REG(sc->drq_chip); /* where we last saw chip */
1858
1757
1859struct en_softc *sc;
1860int chan;
1758 while (idx != val) {
1759 sc->drq_free++;
1760 if ((drq = sc->drq[idx]) != 0) {
1761 /* don't forget to zero it out when done */
1762 sc->drq[idx] = 0;
1763 slot = EN_DQ_SLOT(drq);
1764 if (EN_DQ_LEN(drq) == 0) { /* "JK" trash DMA? */
1765 m = NULL;
1766 map = NULL;
1767 } else {
1768 _IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1769 if (m == NULL)
1770 panic("enintr: drqsync: %s%d: lost mbuf"
1771 " in slot %d!", sc->enif.if_name,
1772 sc->enif.if_unit, slot);
1773 map = (void *)m->m_pkthdr.rcvif;
1774 uma_zfree(sc->map_zone, map);
1775 }
1776 /* do something with this mbuf */
1777 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1778 /* drain? */
1779 if (m != NULL)
1780 m_freem(m);
1781 vci = sc->rxslot[slot].atm_vci;
1782 if (!_IF_QLEN(&sc->rxslot[slot].indma) &&
1783 !_IF_QLEN(&sc->rxslot[slot].q) &&
1784 (en_read(sc, MID_VC(vci)) & MIDV_INSERVICE)
1785 == 0 &&
1786 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL)
1787 == 0) {
1788 sc->rxslot[slot].oth_flags =
1789 ENOTHER_FREE; /* done drain */
1790 sc->rxslot[slot].atm_vci = RX_NONE;
1791 sc->rxvc2slot[vci] = RX_NONE;
1792 DBG(sc, INTR, ("rx%d: VCI %d now free",
1793 slot, vci));
1794 }
1861
1795
1862{
1863 struct mbuf *tmp;
1864 struct atm_pseudohdr *ap;
1865 struct en_launch launch;
1866 int datalen = 0, dtqneed, len, ncells;
1867 u_int8_t *cp;
1868 struct ifnet *ifp;
1796 } else if (m != NULL) {
1797 ATM_PH_FLAGS(&ah) = sc->rxslot[slot].atm_flags;
1798 ATM_PH_VPI(&ah) = 0;
1799 ATM_PH_SETVCI(&ah, sc->rxslot[slot].atm_vci);
1800 DBG(sc, INTR, ("rx%d: rxvci%d: atm_input, "
1801 "mbuf %p, len %d, hand %p", slot,
1802 sc->rxslot[slot].atm_vci, m,
1803 EN_DQ_LEN(drq), sc->rxslot[slot].rxhand));
1869
1804
1805 m->m_pkthdr.rcvif = &sc->enif;
1806 sc->enif.if_ipackets++;
1870#ifdef EN_DEBUG
1807#ifdef EN_DEBUG
1871 printf("%s: tx%d: starting...\n", sc->sc_dev.dv_xname, chan);
1808 if (sc->debug & DBG_IPACKETS)
1809 en_dump_packet(sc, m);
1872#endif
1810#endif
1811#ifdef ENABLE_BPF
1812 BPF_MTAP(&sc->enif, m);
1813#endif
1814 atm_input(&sc->enif, &ah, m,
1815 sc->rxslot[slot].rxhand);
1816 }
1817 }
1818 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
1819 }
1820 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
1873
1821
1874 /*
1875 * note: now that txlaunch handles non-word aligned/sized requests
1876 * the only time you can safely set launch.nodma is if you've en_mfix()'d
1877 * the mbuf chain. this happens only if EN_NOTXDMA || !en_dma.
1878 */
1822 if (sc->need_drqs) {
1823 /* true if we had a DRQ shortage */
1824 sc->need_drqs = 0;
1825 DBG(sc, INTR, ("cleared need DRQ condition"));
1826 return (1);
1827 } else
1828 return (0);
1829}
1879
1830
1880 launch.nodma = (EN_NOTXDMA || !en_dma);
1831/*
1832 * en_mget: get an mbuf chain that can hold totlen bytes and return it
1833 * (for recv). For the actual allocation totlen is rounded up to a multiple
1834 * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
1835 *
1836 * After this call the sum of all the m_len's in the chain will be totlen.
1837 * This is called at interrupt time, so we can't wait here.
1838 *
1839 * LOCK: any, not needed
1840 */
1841static struct mbuf *
1842en_mget(struct en_softc *sc, u_int pktlen)
1843{
1844 struct mbuf *m, *tmp;
1845 u_int totlen, pad;
1881
1846
1882again:
1847 totlen = roundup(pktlen, sizeof(uint32_t));
1848 pad = totlen - pktlen;
1883
1849
1884 /*
1885 * get an mbuf waiting for DMA
1886 */
1850 /*
1851 * First get an mbuf with header. Keep space for a couple of
1852 * words at the begin.
1853 */
1854 /* called from interrupt context */
1855 MGETHDR(m, M_DONTWAIT, MT_DATA);
1856 if (m == NULL)
1857 return (NULL);
1887
1858
1888 launch.t = sc->txslot[chan].q.ifq_head; /* peek at head of queue */
1859 m->m_pkthdr.rcvif = NULL;
1860 m->m_pkthdr.len = pktlen;
1861 m->m_len = EN_RX1BUF;
1862 MH_ALIGN(m, EN_RX1BUF);
1863 if (m->m_len >= totlen) {
1864 m->m_len = totlen;
1889
1865
1890 if (launch.t == NULL) {
1891#ifdef EN_DEBUG
1892 printf("%s: tx%d: ...done!\n", sc->sc_dev.dv_xname, chan);
1893#endif
1894 return; /* >>> exit here if no data waiting for DMA <<< */
1895 }
1866 } else {
1867 totlen -= m->m_len;
1896
1868
1897 /*
1898 * get flags, vci
1899 *
1900 * note: launch.need = # bytes we need to get on the card
1901 * dtqneed = # of DTQs we need for this packet
1902 * launch.mlen = # of bytes in in mbuf chain (<= launch.need)
1903 */
1869 /* called from interrupt context */
1870 tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
1871 if (tmp == NULL) {
1872 m_free(m);
1873 return (NULL);
1874 }
1875 tmp = m->m_next;
1876 /* m_getm could do this for us */
1877 while (tmp != NULL) {
1878 tmp->m_len = min(MCLBYTES, totlen);
1879 totlen -= tmp->m_len;
1880 tmp = tmp->m_next;
1881 }
1882 }
1904
1883
1905 ap = mtod(launch.t, struct atm_pseudohdr *);
1906 launch.atm_vci = ATM_PH_VCI(ap);
1907 launch.atm_flags = ATM_PH_FLAGS(ap);
1908 launch.aal = ((launch.atm_flags & ATM_PH_AAL5) != 0) ?
1909 MID_TBD_AAL5 : MID_TBD_NOAAL5;
1884 return (m);
1885}
1910
1886
1911 /*
1912 * XXX: have to recompute the length again, even though we already did
1913 * it in en_start(). might as well compute dtqneed here as well, so
1914 * this isn't that bad.
1915 */
1887/*
1888 * Argument for RX DMAMAP loader.
1889 */
1890struct rxarg {
1891 struct en_softc *sc;
1892 struct mbuf *m;
1893 u_int pre_skip; /* number of bytes to skip at begin */
1894 u_int post_skip; /* number of bytes to skip at end */
1895 struct en_rxslot *slot; /* slot we are receiving on */
1896 int wait; /* wait for DRQ entries */
1897};
1916
1898
1917 if ((launch.atm_flags & EN_OBHDR) == 0) {
1918 dtqneed = 1; /* header still needs to be added */
1919 launch.need = MID_TBD_SIZE; /* not includeded with mbuf */
1920 } else {
1921 dtqneed = 0; /* header on-board, dma with mbuf */
1922 launch.need = 0;
1923 }
1899/*
1900 * Copy the segment table to the buffer for later use. And compute the
1901 * number of dma queue entries we need.
1902 *
1903 * LOCK: locked, needed
1904 */
1905static void
1906en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
1907 bus_size_t mapsize, int error)
1908{
1909 struct rxarg *rx = uarg;
1910 struct en_softc *sc = rx->sc;
1911 struct en_rxslot *slot = rx->slot;
1912 u_int free; /* number of free DRQ entries */
1913 uint32_t cur; /* current buffer offset */
1914 uint32_t drq; /* DRQ entry pointer */
1915 uint32_t last_drq; /* where we have written last */
1916 u_int needalign, cnt, count, bcode;
1917 bus_addr_t addr;
1918 bus_size_t rest;
1919 int i;
1924
1920
1925 launch.mlen = 0;
1926 for (tmp = launch.t ; tmp != NULL ; tmp = tmp->m_next) {
1927 len = tmp->m_len;
1928 launch.mlen += len;
1929 cp = mtod(tmp, u_int8_t *);
1930 if (tmp == launch.t) {
1931 len -= sizeof(struct atm_pseudohdr); /* don't count this! */
1932 cp += sizeof(struct atm_pseudohdr);
1933 }
1934 launch.need += len;
1935 if (len == 0)
1936 continue; /* atm_pseudohdr alone in first mbuf */
1921 if (error != 0)
1922 return;
1923 if (nseg > EN_MAX_DMASEG)
1924 panic("too many DMA segments");
1937
1925
1938 dtqneed += en_dqneed(sc, (caddr_t) cp, len, 1);
1939 }
1926 rx->wait = 0;
1940
1927
1941 if ((launch.need % sizeof(u_int32_t)) != 0)
1942 dtqneed++; /* need DTQ to FLUSH internal buffer */
1928 free = sc->drq_free;
1929 drq = sc->drq_us;
1930 cur = slot->cur;
1943
1931
1944 if ((launch.atm_flags & EN_OBTRL) == 0) {
1945 if (launch.aal == MID_TBD_AAL5) {
1946 datalen = launch.need - MID_TBD_SIZE;
1947 launch.need += MID_PDU_SIZE; /* AAL5: need PDU tail */
1948 }
1949 dtqneed++; /* need to work on the end a bit */
1950 }
1932 last_drq = 0;
1951
1933
1952 /*
1953 * finish calculation of launch.need (need to figure out how much padding
1954 * we will need). launch.need includes MID_TBD_SIZE, but we need to
1955 * remove that to so we can round off properly. we have to add
1956 * MID_TBD_SIZE back in after calculating ncells.
1957 */
1934 /*
1935 * Local macro to add an entry to the receive DMA area. If there
1936 * are no entries left, return. Save the byte offset of the entry
1937 * in last_drq for later use.
1938 */
1939#define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
1940 if (free == 0) { \
1941 EN_COUNT(sc->stats.rxdrqout); \
1942 rx->wait = 1; \
1943 return; \
1944 } \
1945 last_drq = drq; \
1946 en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ? \
1947 MID_MK_RXQ_ENI(COUNT, slot->atm_vci, 0, BCODE) : \
1948 MID_MK_RXQ_ADP(COUNT, slot->atm_vci, 0, BCODE)); \
1949 en_write(sc, drq + 4, ADDR); \
1950 \
1951 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8); \
1952 free--;
1958
1953
1959 launch.need = roundup(launch.need - MID_TBD_SIZE, MID_ATMDATASZ);
1960 ncells = launch.need / MID_ATMDATASZ;
1961 launch.need += MID_TBD_SIZE;
1954 /*
1955 * Local macro to generate a DMA entry to DMA cnt bytes. Updates
1956 * the current buffer byte offset accordingly.
1957 */
1958#define DO_DRQ(TYPE) do { \
1959 rest -= cnt; \
1960 EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
1961 DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x", \
1962 slot - sc->rxslot, cnt, (uintmax_t)rest, cur)); \
1963 \
1964 PUT_DRQ_ENTRY(1, bcode, count, addr); \
1965 \
1966 addr += cnt; \
1967 } while (0)
1962
1968
1963 if (launch.need > EN_TXSZ * 1024) {
1964 printf("%s: tx%d: packet larger than xmit buffer (%d > %d)\n",
1965 sc->sc_dev.dv_xname, chan, launch.need, EN_TXSZ * 1024);
1966 goto dequeue_drop;
1967 }
1969 /*
1970 * Skip the RBD at the beginning
1971 */
1972 if (rx->pre_skip > 0) {
1973 /* update DMA address */
1974 EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
1968
1975
1969 /*
1970 * note: don't use the entire buffer space. if WRTX becomes equal
1971 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
1972 */
1973 if (launch.need >= sc->txslot[chan].bfree) {
1974 EN_COUNT(sc->txoutspace);
1975#ifdef EN_DEBUG
1976 printf("%s: tx%d: out of transmit space\n", sc->sc_dev.dv_xname, chan);
1977#endif
1978 return; /* >>> exit here if out of obmem buffer space <<< */
1979 }
1980
1981 /*
1982 * ensure we have enough dtqs to go, if not, wait for more.
1983 */
1976 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
1977 }
1984
1978
1985 if (launch.nodma) {
1986 dtqneed = 1;
1987 }
1988 if (dtqneed > sc->dtq_free) {
1989 sc->need_dtqs = 1;
1990 EN_COUNT(sc->txdtqout);
1991#ifdef EN_DEBUG
1992 printf("%s: tx%d: out of transmit DTQs\n", sc->sc_dev.dv_xname, chan);
1993#endif
1994 return; /* >>> exit here if out of dtqs <<< */
1995 }
1979 for (i = 0; i < nseg; i++, segs++) {
1980 addr = segs->ds_addr;
1981 rest = segs->ds_len;
1996
1982
1997 /*
1998 * it is a go, commit! dequeue mbuf start working on the xfer.
1999 */
1983 if (sc->is_adaptec) {
1984 /* adaptec card - simple */
2000
1985
2001 _IF_DEQUEUE(&sc->txslot[chan].q, tmp);
2002#ifdef EN_DIAG
2003 if (launch.t != tmp)
2004 panic("en dequeue");
2005#endif /* EN_DIAG */
1986 /* advance the on-card buffer pointer */
1987 EN_WRAPADD(slot->start, slot->stop, cur, rest);
1988 DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
1989 "(cur now 0x%x)", slot - sc->rxslot,
1990 (uintmax_t)rest, (uintmax_t)addr, cur));
2006
1991
2007 /*
2008 * launch!
2009 */
1992 PUT_DRQ_ENTRY(0, 0, rest, addr);
2010
1993
2011 EN_COUNT(sc->launch);
2012 ifp = &sc->enif;
2013 ifp->if_opackets++;
2014
2015 if ((launch.atm_flags & EN_OBHDR) == 0) {
2016 EN_COUNT(sc->lheader);
2017 /* store tbd1/tbd2 in host byte order */
2018 launch.tbd1 = MID_TBD_MK1(launch.aal, sc->txspeed[launch.atm_vci], ncells);
2019 launch.tbd2 = MID_TBD_MK2(launch.atm_vci, 0, 0);
2020 }
2021 if ((launch.atm_flags & EN_OBTRL) == 0 && launch.aal == MID_TBD_AAL5) {
2022 EN_COUNT(sc->ltail);
2023 launch.pdu1 = MID_PDU_MK1(0, 0, datalen); /* host byte order */
2024 }
1994 continue;
1995 }
2025
1996
2026 en_txlaunch(sc, chan, &launch);
1997 /*
1998 * do we need to do a DMA op to align to the maximum
1999 * burst? Note, that we are alway 32-bit aligned.
2000 */
2001 if (sc->alburst &&
2002 (needalign = (addr & sc->bestburstmask)) != 0) {
2003 /* compute number of bytes, words and code */
2004 cnt = sc->bestburstlen - needalign;
2005 if (cnt > rest)
2006 cnt = rest;
2007 count = cnt / sizeof(uint32_t);
2008 if (sc->noalbursts) {
2009 bcode = MIDDMA_WORD;
2010 } else {
2011 bcode = en_dmaplan[count].bcode;
2012 count = cnt >> en_dmaplan[count].divshift;
2013 }
2014 DO_DRQ("al_dma");
2015 }
2027
2016
2028#if NBPF > 0
2029 if (ifp->if_bpf) {
2030 /*
2031 * adjust the top of the mbuf to skip the pseudo atm header
2032 * (and TBD, if present) before passing the packet to bpf,
2033 * restore it afterwards.
2034 */
2035 int size = sizeof(struct atm_pseudohdr);
2036 if (launch.atm_flags & EN_OBHDR)
2037 size += MID_TBD_SIZE;
2017 /* do we need to do a max-sized burst? */
2018 if (rest >= sc->bestburstlen) {
2019 count = rest >> sc->bestburstshift;
2020 cnt = count << sc->bestburstshift;
2021 bcode = sc->bestburstcode;
2022 DO_DRQ("best_dma");
2023 }
2038
2024
2039 launch.t->m_data += size;
2040 launch.t->m_len -= size;
2025 /* do we need to do a cleanup burst? */
2026 if (rest != 0) {
2027 cnt = rest;
2028 count = rest / sizeof(uint32_t);
2029 if (sc->noalbursts) {
2030 bcode = MIDDMA_WORD;
2031 } else {
2032 bcode = en_dmaplan[count].bcode;
2033 count = cnt >> en_dmaplan[count].divshift;
2034 }
2035 DO_DRQ("clean_dma");
2036 }
2037 }
2041
2038
2042 BPF_MTAP(ifp, launch.t);
2039 /*
2040 * Skip stuff at the end
2041 */
2042 if (rx->post_skip > 0) {
2043 /* update DMA address */
2044 EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
2043
2045
2044 launch.t->m_data -= size;
2045 launch.t->m_len += size;
2046 }
2047#endif /* NBPF > 0 */
2048 /*
2049 * do some housekeeping and get the next packet
2050 */
2046 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
2047 }
2051
2048
2052 sc->txslot[chan].bfree -= launch.need;
2053 _IF_ENQUEUE(&sc->txslot[chan].indma, launch.t);
2054 goto again;
2049 /* record the end for the interrupt routine */
2050 sc->drq[MID_DRQ_A2REG(last_drq)] =
2051 EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
2055
2052
2056 /*
2057 * END of txdma loop!
2058 */
2053 /* set the end flag in the last descriptor */
2054 en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
2059
2055
2060 /*
2061 * error handles
2062 */
2056#undef PUT_DRQ_ENTRY
2057#undef DO_DRQ
2063
2058
2064dequeue_drop:
2065 _IF_DEQUEUE(&sc->txslot[chan].q, tmp);
2066 if (launch.t != tmp)
2067 panic("en dequeue drop");
2068 m_freem(launch.t);
2069 sc->txslot[chan].mbsize -= launch.mlen;
2070 goto again;
2059 /* commit */
2060 slot->cur = cur;
2061 sc->drq_free = free;
2062 sc->drq_us = drq;
2063
2064 /* signal to card */
2065 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2071}
2072
2066}
2067
2073
2074/*
2068/*
2075 * en_txlaunch: launch an mbuf into the dma pool!
2069 * en_service: handle a service interrupt
2070 *
2071 * Q: why do we need a software service list?
2072 *
2073 * A: if we remove a VCI from the hardware list and we find that we are
2074 * out of DRQs we must defer processing until some DRQs become free.
2075 * so we must remember to look at this RX VCI/slot later, but we can't
2076 * put it back on the hardware service list (since that isn't allowed).
2077 * so we instead save it on the software service list. it would be nice
2078 * if we could peek at the VCI on top of the hwservice list without removing
2079 * it, however this leads to a race condition: if we peek at it and
2080 * decide we are done with it new data could come in before we have a
2081 * chance to remove it from the hwslist. by the time we get it out of
2082 * the list the interrupt for the new data will be lost. oops!
2083 *
2084 * LOCK: locked, needed
2076 */
2085 */
2086static void
2087en_service(struct en_softc *sc)
2088{
2089 struct mbuf *m, *lastm;
2090 struct en_map *map;
2091 struct rxarg rx;
2092 uint32_t cur;
2093 uint32_t dstart; /* data start (as reported by card) */
2094 uint32_t rbd; /* receive buffer descriptor */
2095 uint32_t pdu; /* AAL5 trailer */
2096 int mlen;
2097 struct en_rxslot *slot;
2098 int error;
2077
2099
2078STATIC void en_txlaunch(sc, chan, l)
2100 rx.sc = sc;
2079
2101
2080struct en_softc *sc;
2081int chan;
2082struct en_launch *l;
2102 next_vci:
2103 if (sc->swsl_size == 0) {
2104 DBG(sc, SERV, ("en_service done"));
2105 return;
2106 }
2083
2107
2084{
2085 struct mbuf *tmp;
2086 u_int32_t cur = sc->txslot[chan].cur,
2087 start = sc->txslot[chan].start,
2088 stop = sc->txslot[chan].stop,
2089 dma, *data, *datastop, count, bcode;
2090 int pad, addtail, need, len, needalign, cnt, end, mx;
2108 /*
2109 * get slot to service
2110 */
2111 rx.slot = slot = &sc->rxslot[sc->swslist[sc->swsl_head]];
2091
2112
2113 KASSERT (sc->rxvc2slot[slot->atm_vci] == slot - sc->rxslot,
2114 ("en_service: rx slot/vci sync"));
2092
2115
2093 /*
2094 * vars:
2095 * need = # bytes card still needs (decr. to zero)
2096 * len = # of bytes left in current mbuf
2097 * cur = our current pointer
2098 * dma = last place we programmed into the DMA
2099 * data = pointer into data area of mbuf that needs to go next
2100 * cnt = # of bytes to transfer in this DTQ
2101 * bcode/count = DMA burst code, and chip's version of cnt
2102 *
2103 * a single buffer can require up to 5 DTQs depending on its size
2104 * and alignment requirements. the 5 possible requests are:
2105 * [1] 1, 2, or 3 byte DMA to align src data pointer to word boundary
2106 * [2] alburst DMA to align src data pointer to bestburstlen
2107 * [3] 1 or more bestburstlen DMAs
2108 * [4] clean up burst (to last word boundary)
2109 * [5] 1, 2, or 3 byte final clean up DMA
2110 */
2116 /*
2117 * determine our mode and if we've got any work to do
2118 */
2119 DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
2120 "0x%x", slot - sc->rxslot, slot->atm_vci,
2121 slot->start, slot->stop, slot->cur));
2111
2122
2112 need = l->need;
2113 dma = cur;
2114 addtail = (l->atm_flags & EN_OBTRL) == 0; /* add a tail? */
2123 same_vci:
2124 cur = slot->cur;
2115
2125
2116#ifdef EN_DIAG
2117 if ((need - MID_TBD_SIZE) % MID_ATMDATASZ)
2118 printf("%s: tx%d: bogus trasmit needs (%d)\n", sc->sc_dev.dv_xname, chan,
2119 need);
2120#endif
2121#ifdef EN_DEBUG
2122 printf("%s: tx%d: launch mbuf %p! cur=0x%x[%d], need=%d, addtail=%d\n",
2123 sc->sc_dev.dv_xname, chan, l->t, cur, (cur-start)/4, need, addtail);
2124 count = EN_READ(sc, MIDX_PLACE(chan));
2125 printf(" HW: base_address=0x%x, size=%d, read=%d, descstart=%d\n",
2126 (u_int)MIDX_BASE(count), MIDX_SZ(count),
2127 (int)EN_READ(sc, MIDX_READPTR(chan)),
2128 (int)EN_READ(sc, MIDX_DESCSTART(chan)));
2129#endif
2126 dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(slot->atm_vci)));
2127 dstart = (dstart * sizeof(uint32_t)) + slot->start;
2130
2128
2131 /*
2132 * do we need to insert the TBD by hand?
2133 * note that tbd1/tbd2/pdu1 are in host byte order.
2134 */
2129 /* check to see if there is any data at all */
2130 if (dstart == cur) {
2131 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2132 /* remove from swslist */
2133 slot->oth_flags &= ~ENOTHER_SWSL;
2134 sc->swsl_size--;
2135 DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
2136 slot - sc->rxslot, slot->atm_vci));
2137 goto next_vci;
2138 }
2135
2139
2136 if ((l->atm_flags & EN_OBHDR) == 0) {
2137#ifdef EN_DEBUG
2138 printf("%s: tx%d: insert header 0x%x 0x%x\n", sc->sc_dev.dv_xname,
2139 chan, l->tbd1, l->tbd2);
2140#endif
2141 EN_WRITE(sc, cur, l->tbd1);
2142 EN_WRAPADD(start, stop, cur, 4);
2143 EN_WRITE(sc, cur, l->tbd2);
2144 EN_WRAPADD(start, stop, cur, 4);
2145 need -= 8;
2146 }
2140 /*
2141 * figure out how many bytes we need
2142 * [mlen = # bytes to go in mbufs]
2143 */
2144 rbd = en_read(sc, cur);
2145 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2146 panic("en_service: id mismatch");
2147
2147
2148 /*
2149 * now do the mbufs...
2150 */
2148 if (rbd & MID_RBD_T) {
2149 mlen = 0; /* we've got trash */
2150 rx.pre_skip = MID_RBD_SIZE;
2151 rx.post_skip = 0;
2152 EN_COUNT(sc->stats.ttrash);
2153 DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
2151
2154
2152 for (tmp = l->t ; tmp != NULL ; tmp = tmp->m_next) {
2155 } else if (!(slot->atm_flags & ATM_PH_AAL5)) {
2156 /* 1 cell (ick!) */
2157 mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
2158 rx.pre_skip = MID_RBD_SIZE;
2159 rx.post_skip = 0;
2153
2160
2154 /* get pointer to data and length */
2155 data = mtod(tmp, u_int32_t *);
2156 len = tmp->m_len;
2157 if (tmp == l->t) {
2158 data += sizeof(struct atm_pseudohdr)/sizeof(u_int32_t);
2159 len -= sizeof(struct atm_pseudohdr);
2160 }
2161 } else {
2162 rx.pre_skip = MID_RBD_SIZE;
2161
2163
2162 /* now, determine if we should copy it */
2163 if (l->nodma || (len < EN_MINDMA &&
2164 (len % 4) == 0 && ((uintptr_t) (void *) data % 4) == 0 &&
2165 (cur % 4) == 0)) {
2164 /* get PDU trailer in correct byte order */
2165 pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
2166 MID_RBD_SIZE - MID_PDU_SIZE;
2167 if (pdu >= slot->stop)
2168 pdu -= EN_RXSZ * 1024;
2169 pdu = en_read(sc, pdu);
2166
2170
2167 /*
2168 * roundup len: the only time this will change the value of len
2169 * is when l->nodma is true, tmp is the last mbuf, and there is
2170 * a non-word number of bytes to transmit. in this case it is
2171 * safe to round up because we've en_mfix'd the mbuf (so the first
2172 * byte is word aligned there must be enough free bytes at the end
2173 * to round off to the next word boundary)...
2174 */
2175 len = roundup(len, sizeof(u_int32_t));
2176 datastop = data + (len / sizeof(u_int32_t));
2177 /* copy loop: preserve byte order!!! use WRITEDAT */
2178 while (data != datastop) {
2179 EN_WRITEDAT(sc, cur, *data);
2180 data++;
2181 EN_WRAPADD(start, stop, cur, 4);
2182 }
2183 need -= len;
2184#ifdef EN_DEBUG
2185 printf("%s: tx%d: copied %d bytes (%d left, cur now 0x%x)\n",
2186 sc->sc_dev.dv_xname, chan, len, need, cur);
2187#endif
2188 continue; /* continue on to next mbuf */
2189 }
2171 if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
2172 MID_PDU_LEN(pdu)) {
2173 if_printf(&sc->enif, "invalid AAL5 length\n");
2174 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2175 mlen = 0;
2176 sc->enif.if_ierrors++;
2190
2177
2191 /* going to do DMA, first make sure the dtq is in sync. */
2192 if (dma != cur) {
2193 EN_DTQADD(sc, WORD_IDX(start,cur), chan, MIDDMA_JK, 0, 0, 0);
2194#ifdef EN_DEBUG
2195 printf("%s: tx%d: dtq_sync: advance pointer to %d\n",
2196 sc->sc_dev.dv_xname, chan, cur);
2197#endif
2198 }
2178 } else if (rbd & MID_RBD_CRCERR) {
2179 if_printf(&sc->enif, "CRC error\n");
2180 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
2181 mlen = 0;
2182 sc->enif.if_ierrors++;
2199
2183
2200 /*
2201 * if this is the last buffer, and it looks like we are going to need to
2202 * flush the internal buffer, can we extend the length of this mbuf to
2203 * avoid the FLUSH?
2204 */
2184 } else {
2185 mlen = MID_PDU_LEN(pdu);
2186 rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
2187 }
2188 }
2205
2189
2206 if (tmp->m_next == NULL) {
2207 cnt = (need - len) % sizeof(u_int32_t);
2208 if (cnt && M_TRAILINGSPACE(tmp) >= cnt)
2209 len += cnt; /* pad for FLUSH */
2210 }
2211
2212#if !defined(MIDWAY_ENIONLY)
2190 /*
2191 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2192 *
2193 * notes:
2194 * 1. it is possible that we've already allocated an mbuf for this pkt
2195 * but ran out of DRQs, in which case we saved the allocated mbuf
2196 * on "q".
2197 * 2. if we save an buf in "q" we store the "cur" (pointer) in the
2198 * buf as an identity (that we can check later).
2199 * 3. after this block of code, if m is still NULL then we ran out of
2200 * mbufs
2201 */
2202 _IF_DEQUEUE(&slot->q, m);
2203 if (m != NULL) {
2204 if (m->m_pkthdr.csum_data != cur) {
2205 /* wasn't ours */
2206 DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
2207 slot - sc->rxslot, m));
2208 _IF_PREPEND(&slot->q, m);
2209 m = NULL;
2210 EN_COUNT(sc->stats.rxqnotus);
2211 } else {
2212 EN_COUNT(sc->stats.rxqus);
2213 DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
2214 slot - sc->rxslot, m));
2215 }
2216 }
2217 if (mlen == 0 && m != NULL) {
2218 /* should not happen */
2219 m_freem(m);
2220 m = NULL;
2221 }
2213
2222
2214 /*
2215 * the adaptec DMA engine is smart and handles everything for us.
2216 */
2223 if (mlen != 0 && m == NULL) {
2224 m = en_mget(sc, mlen);
2225 if (m == NULL) {
2226 rx.post_skip += mlen;
2227 mlen = 0;
2228 EN_COUNT(sc->stats.rxmbufout);
2229 DBG(sc, SERV, ("rx%td: out of mbufs",
2230 slot - sc->rxslot));
2231 } else
2232 rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
2217
2233
2218 if (sc->is_adaptec) {
2219 /* need to DMA "len" bytes out to card */
2220 need -= len;
2221 EN_WRAPADD(start, stop, cur, len);
2222#ifdef EN_DEBUG
2223 printf("%s: tx%d: adp_dma %d bytes (%d left, cur now 0x%x)\n",
2224 sc->sc_dev.dv_xname, chan, len, need, cur);
2225#endif
2226 end = (need == 0) ? MID_DMA_END : 0;
2227 EN_DTQADD(sc, len, chan, 0, vtophys(data), l->mlen, end);
2228 if (end)
2229 goto done;
2230 dma = cur; /* update dma pointer */
2231 continue;
2232 }
2233#endif /* !MIDWAY_ENIONLY */
2234 DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
2235 slot - sc->rxslot, m, mlen));
2236 }
2234
2237
2235#if !defined(MIDWAY_ADPONLY)
2238 DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
2239 slot - sc->rxslot, slot->atm_vci, m, mlen, rx.pre_skip,
2240 rx.post_skip));
2236
2241
2237 /*
2238 * the ENI DMA engine is not so smart and need more help from us
2239 */
2242 if (m != NULL) {
2243 /* M_NOWAIT - called from interrupt context */
2244 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
2245 if (map == NULL || !(map->flags & ENMAP_ALLOC)) {
2246 rx.post_skip += mlen;
2247 m_freem(m);
2248 DBG(sc, SERV, ("rx%td: out of maps",
2249 slot - sc->rxslot));
2250 if (map->map != NULL)
2251 uma_zfree(sc->map_zone, map);
2252 goto skip;
2253 }
2254 rx.m = m;
2255 error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
2256 en_rxdma_load, &rx, 0);
2240
2257
2241 /* do we need to do a DMA op to align to word boundary? */
2242 needalign = (uintptr_t) (void *) data % sizeof(u_int32_t);
2243 if (needalign) {
2244 EN_COUNT(sc->headbyte);
2245 cnt = sizeof(u_int32_t) - needalign;
2246 if (cnt == 2 && len >= cnt) {
2247 count = 1;
2248 bcode = MIDDMA_2BYTE;
2249 } else {
2250 cnt = min(cnt, len); /* prevent overflow */
2251 count = cnt;
2252 bcode = MIDDMA_BYTE;
2253 }
2254 need -= cnt;
2255 EN_WRAPADD(start, stop, cur, cnt);
2256#ifdef EN_DEBUG
2257 printf("%s: tx%d: small al_dma %d bytes (%d left, cur now 0x%x)\n",
2258 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2259#endif
2260 len -= cnt;
2261 end = (need == 0) ? MID_DMA_END : 0;
2262 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2263 if (end)
2264 goto done;
2265 data = (u_int32_t *) ((u_char *)data + cnt);
2266 }
2258 if (error != 0) {
2259 if_printf(&sc->enif, "loading RX map failed "
2260 "%d\n", error);
2261 uma_zfree(sc->map_zone, map);
2262 m_freem(m);
2263 rx.post_skip += mlen;
2264 goto skip;
2267
2265
2268 /* do we need to do a DMA op to align? */
2269 if (sc->alburst &&
2270 (needalign = (((uintptr_t) (void *) data) & sc->bestburstmask)) != 0
2271 && len >= sizeof(u_int32_t)) {
2272 cnt = sc->bestburstlen - needalign;
2273 mx = len & ~(sizeof(u_int32_t)-1); /* don't go past end */
2274 if (cnt > mx) {
2275 cnt = mx;
2276 count = cnt / sizeof(u_int32_t);
2277 bcode = MIDDMA_WORD;
2278 } else {
2279 count = cnt / sizeof(u_int32_t);
2280 bcode = en_dmaplan[count].bcode;
2281 count = cnt >> en_dmaplan[count].divshift;
2282 }
2283 need -= cnt;
2284 EN_WRAPADD(start, stop, cur, cnt);
2285#ifdef EN_DEBUG
2286 printf("%s: tx%d: al_dma %d bytes (%d left, cur now 0x%x)\n",
2287 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2288#endif
2289 len -= cnt;
2290 end = (need == 0) ? MID_DMA_END : 0;
2291 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2292 if (end)
2293 goto done;
2294 data = (u_int32_t *) ((u_char *)data + cnt);
2295 }
2266 }
2267 map->flags |= ENMAP_LOADED;
2296
2268
2297 /* do we need to do a max-sized burst? */
2298 if (len >= sc->bestburstlen) {
2299 count = len >> sc->bestburstshift;
2300 cnt = count << sc->bestburstshift;
2301 bcode = sc->bestburstcode;
2302 need -= cnt;
2303 EN_WRAPADD(start, stop, cur, cnt);
2304#ifdef EN_DEBUG
2305 printf("%s: tx%d: best_dma %d bytes (%d left, cur now 0x%x)\n",
2306 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2307#endif
2308 len -= cnt;
2309 end = (need == 0) ? MID_DMA_END : 0;
2310 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2311 if (end)
2312 goto done;
2313 data = (u_int32_t *) ((u_char *)data + cnt);
2314 }
2269 if (rx.wait) {
2270 /* out of DRQs - wait */
2271 uma_zfree(sc->map_zone, map);
2315
2272
2316 /* do we need to do a cleanup burst? */
2317 cnt = len & ~(sizeof(u_int32_t)-1);
2318 if (cnt) {
2319 count = cnt / sizeof(u_int32_t);
2320 bcode = en_dmaplan[count].bcode;
2321 count = cnt >> en_dmaplan[count].divshift;
2322 need -= cnt;
2323 EN_WRAPADD(start, stop, cur, cnt);
2324#ifdef EN_DEBUG
2325 printf("%s: tx%d: cleanup_dma %d bytes (%d left, cur now 0x%x)\n",
2326 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2327#endif
2328 len -= cnt;
2329 end = (need == 0) ? MID_DMA_END : 0;
2330 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2331 if (end)
2332 goto done;
2333 data = (u_int32_t *) ((u_char *)data + cnt);
2334 }
2273 m->m_pkthdr.csum_data = cur;
2274 _IF_ENQUEUE(&slot->q, m);
2275 EN_COUNT(sc->stats.rxdrqout);
2335
2276
2336 /* any word fragments left? */
2337 if (len) {
2338 EN_COUNT(sc->tailbyte);
2339 if (len == 2) {
2340 count = 1;
2341 bcode = MIDDMA_2BYTE; /* use 2byte mode */
2342 } else {
2343 count = len;
2344 bcode = MIDDMA_BYTE; /* use 1 byte mode */
2345 }
2346 need -= len;
2347 EN_WRAPADD(start, stop, cur, len);
2348#ifdef EN_DEBUG
2349 printf("%s: tx%d: byte cleanup_dma %d bytes (%d left, cur now 0x%x)\n",
2350 sc->sc_dev.dv_xname, chan, len, need, cur);
2351#endif
2352 end = (need == 0) ? MID_DMA_END : 0;
2353 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2354 if (end)
2355 goto done;
2356 }
2277 sc->need_drqs = 1; /* flag condition */
2278 return;
2357
2279
2358 dma = cur; /* update dma pointer */
2359#endif /* !MIDWAY_ADPONLY */
2280 }
2281 (void)m_length(m, &lastm);
2282 lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
2360
2283
2361 } /* next mbuf, please */
2284 m->m_pkthdr.rcvif = (void *)map;
2285 _IF_ENQUEUE(&slot->indma, m);
2362
2286
2363 /*
2364 * all mbuf data has been copied out to the obmem (or set up to be DMAd).
2365 * if the trailer or padding needs to be put in, do it now.
2366 *
2367 * NOTE: experimental results reveal the following fact:
2368 * if you DMA "X" bytes to the card, where X is not a multiple of 4,
2369 * then the card will internally buffer the last (X % 4) bytes (in
2370 * hopes of getting (4 - (X % 4)) more bytes to make a complete word).
2371 * it is imporant to make sure we don't leave any important data in
2372 * this internal buffer because it is discarded on the last (end) DTQ.
2373 * one way to do this is to DMA in (4 - (X % 4)) more bytes to flush
2374 * the darn thing out.
2375 */
2287 /* get next packet in this slot */
2288 goto same_vci;
2289 }
2290 skip:
2291 /*
2292 * Here we end if we should drop the packet from the receive buffer.
2293 * The number of bytes to drop is in fill. We can do this with on
2294 * JK entry. If we don't even have that one - wait.
2295 */
2296 if (sc->drq_free == 0) {
2297 sc->need_drqs = 1; /* flag condition */
2298 return;
2299 }
2300 rx.post_skip += rx.pre_skip;
2301 DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
2376
2302
2377 if (addtail) {
2303 /* advance buffer address */
2304 EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
2378
2305
2379 pad = need % sizeof(u_int32_t);
2380 if (pad) {
2381 /*
2382 * FLUSH internal data buffer. pad out with random data from the front
2383 * of the mbuf chain...
2384 */
2385 bcode = (sc->is_adaptec) ? 0 : MIDDMA_BYTE;
2386 EN_COUNT(sc->tailflush);
2387 EN_WRAPADD(start, stop, cur, pad);
2388 EN_DTQADD(sc, pad, chan, bcode, vtophys(l->t->m_data), 0, 0);
2389 need -= pad;
2390#ifdef EN_DEBUG
2391 printf("%s: tx%d: pad/FLUSH dma %d bytes (%d left, cur now 0x%x)\n",
2392 sc->sc_dev.dv_xname, chan, pad, need, cur);
2393#endif
2394 }
2306 /* write DRQ entry */
2307 if (sc->is_adaptec)
2308 en_write(sc, sc->drq_us,
2309 MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
2310 slot->atm_vci, MID_DMA_END, MIDDMA_JK));
2311 else
2312 en_write(sc, sc->drq_us,
2313 MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
2314 slot->atm_vci, MID_DMA_END, MIDDMA_JK));
2315 en_write(sc, sc->drq_us + 4, 0);
2316 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
2317 sc->drq_free--;
2395
2318
2396 /* copy data */
2397 pad = need / sizeof(u_int32_t); /* round *down* */
2398 if (l->aal == MID_TBD_AAL5)
2399 pad -= 2;
2400#ifdef EN_DEBUG
2401 printf("%s: tx%d: padding %d bytes (cur now 0x%x)\n",
2402 sc->sc_dev.dv_xname, chan, (int)(pad * sizeof(u_int32_t)), cur);
2403#endif
2404 while (pad--) {
2405 EN_WRITEDAT(sc, cur, 0); /* no byte order issues with zero */
2406 EN_WRAPADD(start, stop, cur, 4);
2407 }
2408 if (l->aal == MID_TBD_AAL5) {
2409 EN_WRITE(sc, cur, l->pdu1); /* in host byte order */
2410 EN_WRAPADD(start, stop, cur, 8);
2411 }
2412 }
2319 /* signal to RX interrupt */
2320 sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
2321 slot->cur = cur;
2413
2322
2414 if (addtail || dma != cur) {
2415 /* write final descritor */
2416 EN_DTQADD(sc, WORD_IDX(start,cur), chan, MIDDMA_JK, 0,
2417 l->mlen, MID_DMA_END);
2418 /* dma = cur; */ /* not necessary since we are done */
2419 }
2323 /* signal to card */
2324 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
2420
2325
2421done:
2422 /* update current pointer */
2423 sc->txslot[chan].cur = cur;
2424#ifdef EN_DEBUG
2425 printf("%s: tx%d: DONE! cur now = 0x%x\n",
2426 sc->sc_dev.dv_xname, chan, cur);
2427#endif
2428
2429 return;
2326 goto same_vci;
2430}
2431
2327}
2328
2432
2433/*
2434 * interrupt handler
2329/*
2330 * interrupt handler
2331 *
2332 * LOCK: unlocked, needed
2435 */
2333 */
2436
2437EN_INTR_TYPE en_intr(arg)
2438
2439void *arg;
2440
2334void
2335en_intr(void *arg)
2441{
2336{
2442 struct en_softc *sc = (struct en_softc *) arg;
2443 struct mbuf *m;
2444 struct atm_pseudohdr ah;
2445 struct ifnet *ifp;
2446 u_int32_t reg, kick, val, mask, chip, vci, slot, dtq, drq;
2447 int lcv, idx, need_softserv = 0;
2337 struct en_softc *sc = arg;
2338 uint32_t reg, kick, mask;
2339 int lcv, need_softserv;
2448
2340
2449 reg = EN_READ(sc, MID_INTACK);
2341 EN_LOCK(sc);
2450
2342
2451 if ((reg & MID_INT_ANY) == 0)
2452 EN_INTR_RET(0); /* not us */
2343 reg = en_read(sc, MID_INTACK);
2344 DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
2453
2345
2454#ifdef EN_DEBUG
2455 printf("%s: interrupt=0x%b\n", sc->sc_dev.dv_xname, reg, MID_INTBITS);
2456#endif
2346 if ((reg & MID_INT_ANY) == 0) {
2347 EN_UNLOCK(sc);
2348 return;
2349 }
2457
2350
2458 /*
2459 * unexpected errors that need a reset
2460 */
2461
2462 if ((reg & (MID_INT_IDENT|MID_INT_LERR|MID_INT_DMA_ERR|MID_INT_SUNI)) != 0) {
2463 printf("%s: unexpected interrupt=0x%b, resetting card\n",
2464 sc->sc_dev.dv_xname, reg, MID_INTBITS);
2351 /*
2352 * unexpected errors that need a reset
2353 */
2354 if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
2355 if_printf(&sc->enif, "unexpected interrupt=0x%b, resetting\n",
2356 reg, MID_INTBITS);
2465#ifdef EN_DEBUG
2466#ifdef DDB
2357#ifdef EN_DEBUG
2358#ifdef DDB
2467#ifdef __FreeBSD__
2468 Debugger("en: unexpected error");
2469#else
2470 Debugger();
2471#endif
2359 Debugger("en: unexpected error");
2472#endif /* DDB */
2360#endif /* DDB */
2473 sc->enif.if_flags &= ~IFF_RUNNING; /* FREEZE! */
2361 sc->enif.if_flags &= ~IFF_RUNNING; /* FREEZE! */
2474#else
2362#else
2475 en_reset(sc);
2476 en_init(sc);
2363 en_reset_ul(sc);
2364 en_init(sc);
2477#endif
2365#endif
2478 EN_INTR_RET(1); /* for us */
2479 }
2366 EN_UNLOCK(sc);
2367 return;
2368 }
2480
2369
2481 /*******************
2482 * xmit interrupts *
2483 ******************/
2484
2485 kick = 0; /* bitmask of channels to kick */
2486 if (reg & MID_INT_TX) { /* TX done! */
2487
2488 /*
2489 * check for tx complete, if detected then this means that some space
2490 * has come free on the card. we must account for it and arrange to
2491 * kick the channel to life (in case it is stalled waiting on the card).
2492 */
2493 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2) {
2494 if (reg & MID_TXCHAN(lcv)) {
2495 kick = kick | mask; /* want to kick later */
2496 val = EN_READ(sc, MIDX_READPTR(lcv)); /* current read pointer */
2497 val = (val * sizeof(u_int32_t)) + sc->txslot[lcv].start;
2498 /* convert to offset */
2499 if (val > sc->txslot[lcv].cur)
2500 sc->txslot[lcv].bfree = val - sc->txslot[lcv].cur;
2501 else
2502 sc->txslot[lcv].bfree = (val + (EN_TXSZ*1024)) - sc->txslot[lcv].cur;
2503#ifdef EN_DEBUG
2504 printf("%s: tx%d: trasmit done. %d bytes now free in buffer\n",
2505 sc->sc_dev.dv_xname, lcv, sc->txslot[lcv].bfree);
2370#if 0
2371 if (reg & MID_INT_SUNI)
2372 if_printf(&sc->enif, "interrupt from SUNI (probably carrier "
2373 "change)\n");
2506#endif
2374#endif
2507 }
2508 }
2509 }
2510
2375
2511 if (reg & MID_INT_DMA_TX) { /* TX DMA done! */
2376 kick = 0;
2377 if (reg & MID_INT_TX)
2378 kick |= en_intr_tx(sc, reg);
2512
2379
2513 /*
2514 * check for TX DMA complete, if detected then this means that some DTQs
2515 * are now free. it also means some indma mbufs can be freed.
2516 * if we needed DTQs, kick all channels.
2517 */
2518 val = EN_READ(sc, MID_DMA_RDTX); /* chip's current location */
2519 idx = MID_DTQ_A2REG(sc->dtq_chip);/* where we last saw chip */
2520 if (sc->need_dtqs) {
2521 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
2522 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
2523#ifdef EN_DEBUG
2524 printf("%s: cleared need DTQ condition\n", sc->sc_dev.dv_xname);
2525#endif
2526 }
2527 while (idx != val) {
2528 sc->dtq_free++;
2529 if ((dtq = sc->dtq[idx]) != 0) {
2530 sc->dtq[idx] = 0; /* don't forget to zero it out when done */
2531 slot = EN_DQ_SLOT(dtq);
2532 _IF_DEQUEUE(&sc->txslot[slot].indma, m);
2533 if (!m) panic("enintr: dtqsync");
2534 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
2535#ifdef EN_DEBUG
2536 printf("%s: tx%d: free %d dma bytes, mbsize now %d\n",
2537 sc->sc_dev.dv_xname, slot, EN_DQ_LEN(dtq),
2538 sc->txslot[slot].mbsize);
2539#endif
2540 m_freem(m);
2541 }
2542 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
2543 };
2544 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
2545 }
2380 if (reg & MID_INT_DMA_TX)
2381 kick |= en_intr_tx_dma(sc);
2546
2382
2383 /*
2384 * kick xmit channels as needed.
2385 */
2386 if (kick) {
2387 DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
2388 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
2389 if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
2390 en_txdma(sc, &sc->txslot[lcv]);
2391 }
2547
2392
2548 /*
2549 * kick xmit channels as needed
2550 */
2393 need_softserv = 0;
2394 if (reg & MID_INT_DMA_RX)
2395 need_softserv |= en_intr_rx_dma(sc);
2551
2396
2552 if (kick) {
2553#ifdef EN_DEBUG
2554 printf("%s: tx kick mask = 0x%x\n", sc->sc_dev.dv_xname, kick);
2555#endif
2556 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2) {
2557 if ((kick & mask) && sc->txslot[lcv].q.ifq_head) {
2558 en_txdma(sc, lcv); /* kick it! */
2559 }
2560 } /* for each slot */
2561 } /* if kick */
2397 if (reg & MID_INT_SERVICE)
2398 need_softserv |= en_intr_service(sc);
2562
2399
2400 if (need_softserv)
2401 en_service(sc);
2563
2402
2564 /*******************
2565 * recv interrupts *
2566 ******************/
2403 /*
2404 * keep our stats
2405 */
2406 if (reg & MID_INT_DMA_OVR) {
2407 EN_COUNT(sc->stats.dmaovr);
2408 DBG(sc, INTR, ("MID_INT_DMA_OVR"));
2409 }
2410 reg = en_read(sc, MID_STAT);
2411 sc->stats.otrash += MID_OTRASH(reg);
2412 sc->stats.vtrash += MID_VTRASH(reg);
2567
2413
2568 /*
2569 * check for RX DMA complete, and pass the data "upstairs"
2570 */
2414 EN_UNLOCK(sc);
2415}
2571
2416
2572 if (reg & MID_INT_DMA_RX) {
2573 val = EN_READ(sc, MID_DMA_RDRX); /* chip's current location */
2574 idx = MID_DRQ_A2REG(sc->drq_chip);/* where we last saw chip */
2575 while (idx != val) {
2576 sc->drq_free++;
2577 if ((drq = sc->drq[idx]) != 0) {
2578 sc->drq[idx] = 0; /* don't forget to zero it out when done */
2579 slot = EN_DQ_SLOT(drq);
2580 if (EN_DQ_LEN(drq) == 0) { /* "JK" trash DMA? */
2581 m = NULL;
2582 } else {
2583 _IF_DEQUEUE(&sc->rxslot[slot].indma, m);
2584 if (!m)
2585 panic("enintr: drqsync: %s: lost mbuf in slot %d!",
2586 sc->sc_dev.dv_xname, slot);
2587 }
2588 /* do something with this mbuf */
2589 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) { /* drain? */
2590 if (m)
2591 m_freem(m);
2592 vci = sc->rxslot[slot].atm_vci;
2593 if (sc->rxslot[slot].indma.ifq_head == NULL &&
2594 sc->rxslot[slot].q.ifq_head == NULL &&
2595 (EN_READ(sc, MID_VC(vci)) & MIDV_INSERVICE) == 0 &&
2596 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
2597 sc->rxslot[slot].oth_flags = ENOTHER_FREE; /* done drain */
2598 sc->rxslot[slot].atm_vci = RX_NONE;
2599 sc->rxvc2slot[vci] = RX_NONE;
2600#ifdef EN_DEBUG
2601 printf("%s: rx%d: VCI %d now free\n", sc->sc_dev.dv_xname,
2602 slot, vci);
2603#endif
2604 }
2605 } else if (m != NULL) {
2606 ATM_PH_FLAGS(&ah) = sc->rxslot[slot].atm_flags;
2607 ATM_PH_VPI(&ah) = 0;
2608 ATM_PH_SETVCI(&ah, sc->rxslot[slot].atm_vci);
2609#ifdef EN_DEBUG
2610 printf("%s: rx%d: rxvci%d: atm_input, mbuf %p, len %d, hand %p\n",
2611 sc->sc_dev.dv_xname, slot, sc->rxslot[slot].atm_vci, m,
2612 EN_DQ_LEN(drq), sc->rxslot[slot].rxhand);
2613#endif
2417/*********************************************************************/
2418/*
2419 * Probing the DMA brokeness of the card
2420 */
2614
2421
2615 ifp = &sc->enif;
2616 ifp->if_ipackets++;
2422/*
2423 * Physical address load helper function for DMA probe
2424 *
2425 * LOCK: unlocked, not needed
2426 */
2427static void
2428en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
2429{
2430 if (error == 0)
2431 *(bus_addr_t *)uarg = segs[0].ds_addr;
2432}
2617
2433
2618#if NBPF > 0
2619 if (ifp->if_bpf)
2620 BPF_MTAP(ifp, m);
2621#endif
2434/*
2435 * en_dmaprobe: helper function for en_attach.
2436 *
2437 * see how the card handles DMA by running a few DMA tests. we need
2438 * to figure out the largest number of bytes we can DMA in one burst
2439 * ("bestburstlen"), and if the starting address for a burst needs to
2440 * be aligned on any sort of boundary or not ("alburst").
2441 *
2442 * Things turn out more complex than that, because on my (harti) brand
2443 * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
2444 * we more than 4 bytes fails (with an RX DMA timeout) for physical
2445 * addresses that end with 0xc. Therefor we search not only the largest
2446 * burst that is supported (hopefully 64) but also check what is the largerst
2447 * unaligned supported size. If that appears to be lesser than 4 words,
2448 * set the noalbursts flag. That will be set only if also alburst is set.
2449 */
2622
2450
2623 atm_input(ifp, &ah, m, sc->rxslot[slot].rxhand);
2624 }
2451/*
2452 * en_dmaprobe_doit: do actual testing for the DMA test.
2453 * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
2454 * Return the largest one that works.
2455 *
2456 * LOCK: unlocked, not needed
2457 */
2458static int
2459en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
2460{
2461 uint8_t *dp = sp + MIDDMA_MAXBURST;
2462 bus_addr_t pdp = psp + MIDDMA_MAXBURST;
2463 int lcv, retval = 4, cnt;
2464 uint32_t reg, bcode, midvloc;
2625
2465
2626 }
2627 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
2628 };
2629 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
2466 if (sc->en_busreset)
2467 sc->en_busreset(sc);
2468 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2630
2469
2631 if (sc->need_drqs) { /* true if we had a DRQ shortage */
2632 need_softserv = 1;
2633 sc->need_drqs = 0;
2634#ifdef EN_DEBUG
2635 printf("%s: cleared need DRQ condition\n", sc->sc_dev.dv_xname);
2636#endif
2637 }
2638 }
2470 /*
2471 * set up a 1k buffer at MID_BUFOFF
2472 */
2473 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
2474 >> MIDV_LOCTOPSHFT;
2475 en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
2476 en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
2477 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
2478 en_write(sc, MID_DST_RP(0), 0);
2479 en_write(sc, MID_WP_ST_CNT(0), 0);
2639
2480
2640 /*
2641 * handle service interrupts
2642 */
2481 /* set up sample data */
2482 for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
2483 sp[lcv] = lcv + 1;
2643
2484
2644 if (reg & MID_INT_SERVICE) {
2645 chip = MID_SL_REG2A(EN_READ(sc, MID_SERV_WRITE));
2485 /* enable DMA (only) */
2486 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2646
2487
2647 while (sc->hwslistp != chip) {
2488 sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
2489 sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
2648
2490
2649 /* fetch and remove it from hardware service list */
2650 vci = EN_READ(sc, sc->hwslistp);
2651 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);/* advance hw ptr */
2652 slot = sc->rxvc2slot[vci];
2653 if (slot == RX_NONE) {
2654#ifdef EN_DEBUG
2655 printf("%s: unexpected rx interrupt on VCI %d\n",
2656 sc->sc_dev.dv_xname, vci);
2657#endif
2658 EN_WRITE(sc, MID_VC(vci), MIDV_TRASH); /* rx off, damn it! */
2659 continue; /* next */
2660 }
2661 EN_WRITE(sc, MID_VC(vci), sc->rxslot[slot].mode); /* remove from hwsl */
2662 EN_COUNT(sc->hwpull);
2491 /*
2492 * try it now . . . DMA it out, then DMA it back in and compare
2493 *
2494 * note: in order to get the dma stuff to reverse directions it wants
2495 * the "end" flag set! since we are not dma'ing valid data we may
2496 * get an ident mismatch interrupt (which we will ignore).
2497 */
2498 DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
2499 sp, (u_long)psp, dp, (u_long)pdp));
2500 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
2501 DBG(sc, DMA, ("test lcv=%d", lcv));
2663
2502
2664#ifdef EN_DEBUG
2665 printf("%s: pulled VCI %d off hwslist\n", sc->sc_dev.dv_xname, vci);
2666#endif
2503 /* zero SRAM and dest buffer */
2504 bus_space_set_region_4(sc->en_memt, sc->en_base,
2505 MID_BUFOFF, 0, 1024 / 4);
2506 bzero(dp, MIDDMA_MAXBURST);
2667
2507
2668 /* add it to the software service list (if needed) */
2669 if ((sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
2670 EN_COUNT(sc->swadd);
2671 need_softserv = 1;
2672 sc->rxslot[slot].oth_flags |= ENOTHER_SWSL;
2673 sc->swslist[sc->swsl_tail] = slot;
2674 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
2675 sc->swsl_size++;
2676#ifdef EN_DEBUG
2677 printf("%s: added VCI %d to swslist\n", sc->sc_dev.dv_xname, vci);
2678#endif
2679 }
2680 };
2681 }
2508 bcode = en_sz2b(lcv);
2682
2509
2683 /*
2684 * now service (function too big to include here)
2685 */
2510 /* build lcv-byte-DMA x NBURSTS */
2511 if (sc->is_adaptec)
2512 en_write(sc, sc->dtq_chip,
2513 MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
2514 else
2515 en_write(sc, sc->dtq_chip,
2516 MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
2517 en_write(sc, sc->dtq_chip + 4, psp);
2518 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
2519 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
2686
2520
2687 if (need_softserv)
2688 en_service(sc);
2521 cnt = 1000;
2522 while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
2523 MID_DTQ_A2REG(sc->dtq_chip)) {
2524 DELAY(1);
2525 if (--cnt == 0) {
2526 DBG(sc, DMA, ("unexpected timeout in tx "
2527 "DMA test\n alignment=0x%lx, burst size=%d"
2528 ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
2529 (u_long)sp & 63, lcv,
2530 en_read(sc, MID_DMA_ADDR), reg,
2531 en_read(sc, MID_INTSTAT)));
2532 return (retval);
2533 }
2534 }
2689
2535
2690 /*
2691 * keep our stats
2692 */
2536 reg = en_read(sc, MID_INTACK);
2537 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
2538 DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
2539 reg));
2540 return (retval);
2541 }
2542 /* re-enable DMA (only) */
2543 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
2693
2544
2694 if (reg & MID_INT_DMA_OVR) {
2695 EN_COUNT(sc->dmaovr);
2696#ifdef EN_DEBUG
2697 printf("%s: MID_INT_DMA_OVR\n", sc->sc_dev.dv_xname);
2698#endif
2699 }
2700 reg = EN_READ(sc, MID_STAT);
2701#ifdef EN_STAT
2702 sc->otrash += MID_OTRASH(reg);
2703 sc->vtrash += MID_VTRASH(reg);
2704#endif
2545 /* "return to sender..." address is known ... */
2705
2546
2706 EN_INTR_RET(1); /* for us */
2547 /* build lcv-byte-DMA x NBURSTS */
2548 if (sc->is_adaptec)
2549 en_write(sc, sc->drq_chip,
2550 MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
2551 else
2552 en_write(sc, sc->drq_chip,
2553 MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
2554 en_write(sc, sc->drq_chip + 4, pdp);
2555 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
2556 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
2557 cnt = 1000;
2558 while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
2559 MID_DRQ_A2REG(sc->drq_chip)) {
2560 DELAY(1);
2561 cnt--;
2562 if (--cnt == 0) {
2563 DBG(sc, DMA, ("unexpected timeout in rx "
2564 "DMA test, rdrx=%#x\n", reg));
2565 return (retval);
2566 }
2567 }
2568 reg = en_read(sc, MID_INTACK);
2569 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
2570 DBG(sc, DMA, ("unexpected status in rx DMA "
2571 "test: 0x%x\n", reg));
2572 return (retval);
2573 }
2574 if (bcmp(sp, dp, lcv)) {
2575 DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
2576 "dp=%p", lcv, sp, dp));
2577 return (retval);
2578 }
2579
2580 retval = lcv;
2581 }
2582 return (retval); /* studly 64 byte DMA present! oh baby!! */
2707}
2708
2583}
2584
2709
2710/*
2585/*
2711 * en_service: handle a service interrupt
2586 * Find the best DMA parameters
2712 *
2587 *
2713 * Q: why do we need a software service list?
2714 *
2715 * A: if we remove a VCI from the hardware list and we find that we are
2716 * out of DRQs we must defer processing until some DRQs become free.
2717 * so we must remember to look at this RX VCI/slot later, but we can't
2718 * put it back on the hardware service list (since that isn't allowed).
2719 * so we instead save it on the software service list. it would be nice
2720 * if we could peek at the VCI on top of the hwservice list without removing
2721 * it, however this leads to a race condition: if we peek at it and
2722 * decide we are done with it new data could come in before we have a
2723 * chance to remove it from the hwslist. by the time we get it out of
2724 * the list the interrupt for the new data will be lost. oops!
2725 *
2588 * LOCK: unlocked, not needed
2726 */
2589 */
2727
2728STATIC void en_service(sc)
2729
2730struct en_softc *sc;
2731
2590static void
2591en_dmaprobe(struct en_softc *sc)
2732{
2592{
2733 struct mbuf *m, *tmp;
2734 u_int32_t cur, dstart, rbd, pdu, *sav, dma, bcode, count, *data, *datastop;
2735 u_int32_t start, stop, cnt, needalign;
2736 int slot, raw, aal5, llc, vci, fill, mlen, tlen, drqneed, need, needfill, end;
2593 bus_dma_tag_t tag;
2594 bus_dmamap_t map;
2595 int err;
2596 void *buffer;
2597 int bestalgn, lcv, try, bestnoalgn;
2598 bus_addr_t phys;
2599 uint8_t *addr;
2737
2600
2738 aal5 = 0; /* Silence gcc */
2739next_vci:
2740 if (sc->swsl_size == 0) {
2741#ifdef EN_DEBUG
2742 printf("%s: en_service done\n", sc->sc_dev.dv_xname);
2743#endif
2744 return; /* >>> exit here if swsl now empty <<< */
2745 }
2601 sc->alburst = 0;
2602 sc->noalbursts = 0;
2746
2603
2747 /*
2748 * get slot/vci to service
2749 */
2604 /*
2605 * Allocate some DMA-able memory.
2606 * We need 3 times the max burst size aligned to the max burst size.
2607 */
2608 err = bus_dma_tag_create(NULL, MIDDMA_MAXBURST, 0,
2609 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2610 3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0, &tag);
2611 if (err)
2612 panic("%s: cannot create test DMA tag %d", __func__, err);
2750
2613
2751 slot = sc->swslist[sc->swsl_head];
2752 vci = sc->rxslot[slot].atm_vci;
2753#ifdef EN_DIAG
2754 if (sc->rxvc2slot[vci] != slot) panic("en_service rx slot/vci sync");
2755#endif
2614 err = bus_dmamem_alloc(tag, &buffer, 0, &map);
2615 if (err)
2616 panic("%s: cannot allocate test DMA memory %d", __func__, err);
2756
2617
2757 /*
2758 * determine our mode and if we've got any work to do
2759 */
2618 err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
2619 en_dmaprobe_load, &phys, 0);
2620 if (err)
2621 panic("%s: cannot load test DMA map %d", __func__, err);
2622 addr = buffer;
2623 DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
2760
2624
2761 raw = sc->rxslot[slot].oth_flags & ENOTHER_RAW;
2762 start= sc->rxslot[slot].start;
2763 stop= sc->rxslot[slot].stop;
2764 cur = sc->rxslot[slot].cur;
2625 /*
2626 * Now get the best burst size of the aligned case.
2627 */
2628 bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
2765
2629
2766#ifdef EN_DEBUG
2767 printf("%s: rx%d: service vci=%d raw=%d start/stop/cur=0x%x 0x%x 0x%x\n",
2768 sc->sc_dev.dv_xname, slot, vci, raw, start, stop, cur);
2769#endif
2630 /*
2631 * Now try unaligned.
2632 */
2633 for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
2634 try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
2770
2635
2771same_vci:
2772 dstart = MIDV_DSTART(EN_READ(sc, MID_DST_RP(vci)));
2773 dstart = (dstart * sizeof(u_int32_t)) + start;
2636 if (try < bestnoalgn)
2637 bestnoalgn = try;
2638 }
2774
2639
2775 /* check to see if there is any data at all */
2776 if (dstart == cur) {
2777defer: /* defer processing */
2778 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2779 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
2780 sc->swsl_size--;
2781 /* >>> remove from swslist <<< */
2782#ifdef EN_DEBUG
2783 printf("%s: rx%d: remove vci %d from swslist\n",
2784 sc->sc_dev.dv_xname, slot, vci);
2785#endif
2786 goto next_vci;
2787 }
2640 if (bestnoalgn < bestalgn) {
2641 sc->alburst = 1;
2642 if (bestnoalgn < 32)
2643 sc->noalbursts = 1;
2644 }
2788
2645
2789 /*
2790 * figure out how many bytes we need
2791 * [mlen = # bytes to go in mbufs, fill = # bytes to dump (MIDDMA_JK)]
2792 */
2646 sc->bestburstlen = bestalgn;
2647 sc->bestburstshift = en_log2(bestalgn);
2648 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
2649 sc->bestburstcode = en_sz2b(bestalgn);
2793
2650
2794 if (raw) {
2651 /*
2652 * Reset the chip before freeing the buffer. It may still be trying
2653 * to DMA.
2654 */
2655 if (sc->en_busreset)
2656 sc->en_busreset(sc);
2657 en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
2795
2658
2796 /* raw mode (aka boodi mode) */
2797 fill = 0;
2798 if (dstart > cur)
2799 mlen = dstart - cur;
2800 else
2801 mlen = (dstart + (EN_RXSZ*1024)) - cur;
2659 DELAY(10000); /* may still do DMA */
2802
2660
2803 if (mlen < sc->rxslot[slot].raw_threshold)
2804 goto defer; /* too little data to deal with */
2661 /*
2662 * Free the DMA stuff
2663 */
2664 bus_dmamap_unload(tag, map);
2665 bus_dmamem_free(tag, buffer, map);
2666 bus_dma_tag_destroy(tag);
2667}
2805
2668
2806 } else {
2669/*********************************************************************/
2670/*
2671 * Attach/detach.
2672 */
2807
2673
2808 /* normal mode */
2809 aal5 = (sc->rxslot[slot].atm_flags & ATM_PH_AAL5);
2810 llc = (aal5 && (sc->rxslot[slot].atm_flags & ATM_PH_LLCSNAP)) ? 1 : 0;
2811 rbd = EN_READ(sc, cur);
2812 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2813 panic("en_service: id mismatch");
2674/*
2675 * Attach to the card.
2676 *
2677 * LOCK: unlocked, not needed (but initialized)
2678 */
2679int
2680en_attach(struct en_softc *sc)
2681{
2682 struct ifnet *ifp = &sc->enif;
2683 int sz;
2684 uint32_t reg, lcv, check, ptr, sav, midvloc;
2814
2685
2815 if (rbd & MID_RBD_T) {
2816 mlen = 0; /* we've got trash */
2817 fill = MID_RBD_SIZE;
2818 EN_COUNT(sc->ttrash);
2819#ifdef EN_DEBUG
2686#ifdef EN_DEBUG
2820 printf("RX overflow lost %d cells!\n", MID_RBD_CNT(rbd));
2687 sc->debug = EN_DEBUG;
2821#endif
2688#endif
2822 } else if (!aal5) {
2823 mlen = MID_RBD_SIZE + MID_CHDR_SIZE + MID_ATMDATASZ; /* 1 cell (ick!) */
2824 fill = 0;
2825 } else {
2826 struct ifnet *ifp;
2689 /*
2690 * Probe card to determine memory size.
2691 *
2692 * The stupid ENI card always reports to PCI that it needs 4MB of
2693 * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
2694 * addresses wrap in the RAM address space (i.e. on a 512KB card
2695 * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
2696 * 0x27fffc [note that RAM starts at offset 0x200000]).
2697 */
2827
2698
2828 tlen = (MID_RBD_CNT(rbd) * MID_ATMDATASZ) + MID_RBD_SIZE;
2829 pdu = cur + tlen - MID_PDU_SIZE;
2830 if (pdu >= stop)
2831 pdu -= (EN_RXSZ*1024);
2832 pdu = EN_READ(sc, pdu); /* get PDU in correct byte order */
2833 fill = tlen - MID_RBD_SIZE - MID_PDU_LEN(pdu);
2834 if (fill < 0 || (rbd & MID_RBD_CRCERR) != 0) {
2835 static int first = 1;
2699 /* reset card before touching RAM */
2700 if (sc->en_busreset)
2701 sc->en_busreset(sc);
2702 en_write(sc, MID_RESID, 0x0);
2836
2703
2837 if (first) {
2838 printf("%s: %s, dropping frame\n", sc->sc_dev.dv_xname,
2839 (rbd & MID_RBD_CRCERR) ?
2840 "CRC error" : "invalid AAL5 PDU length");
2841 printf("%s: got %d cells (%d bytes), AAL5 len is %d bytes (pdu=0x%x)\n",
2842 sc->sc_dev.dv_xname, MID_RBD_CNT(rbd),
2843 tlen - MID_RBD_SIZE, MID_PDU_LEN(pdu), pdu);
2844#ifndef EN_DEBUG
2845 printf("CRC error report disabled from now on!\n");
2846 first = 0;
2847#endif
2704 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
2705 en_write(sc, lcv, lcv); /* data[address] = address */
2706 for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
2707 reg = en_read(sc, check);
2708 if (reg != check)
2709 /* found an alias! - quit */
2710 goto done_probe;
2711 }
2848 }
2712 }
2849 fill = tlen;
2713 done_probe:
2714 lcv -= MID_PROBSIZE; /* take one step back */
2715 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
2850
2716
2851 ifp = &sc->enif;
2852 ifp->if_ierrors++;
2717 /*
2718 * determine the largest DMA burst supported
2719 */
2720 en_dmaprobe(sc);
2853
2721
2854 }
2855 mlen = tlen - fill;
2856 }
2722 /*
2723 * "hello world"
2724 */
2857
2725
2858 }
2726 /* reset */
2727 if (sc->en_busreset)
2728 sc->en_busreset(sc);
2729 en_write(sc, MID_RESID, 0x0); /* reset */
2859
2730
2860 /*
2861 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2862 *
2863 * notes:
2864 * 1. it is possible that we've already allocated an mbuf for this pkt
2865 * but ran out of DRQs, in which case we saved the allocated mbuf on
2866 * "q".
2867 * 2. if we save an mbuf in "q" we store the "cur" (pointer) in the front
2868 * of the mbuf as an identity (that we can check later), and we also
2869 * store drqneed (so we don't have to recompute it).
2870 * 3. after this block of code, if m is still NULL then we ran out of mbufs
2871 */
2872
2873 m = sc->rxslot[slot].q.ifq_head;
2874 drqneed = 1;
2875 if (m) {
2876 sav = mtod(m, u_int32_t *);
2877 if (sav[0] != cur) {
2878#ifdef EN_DEBUG
2879 printf("%s: rx%d: q'ed mbuf %p not ours\n",
2880 sc->sc_dev.dv_xname, slot, m);
2881#endif
2882 m = NULL; /* wasn't ours */
2883 EN_COUNT(sc->rxqnotus);
2884 } else {
2885 EN_COUNT(sc->rxqus);
2886 _IF_DEQUEUE(&sc->rxslot[slot].q, m);
2887 drqneed = sav[1];
2888#ifdef EN_DEBUG
2889 printf("%s: rx%d: recovered q'ed mbuf %p (drqneed=%d)\n",
2890 sc->sc_dev.dv_xname, slot, m, drqneed);
2891#endif
2892 }
2893 }
2731 /* zero memory */
2732 bus_space_set_region_4(sc->en_memt, sc->en_base,
2733 MID_RAMOFF, 0, sc->en_obmemsz / 4);
2894
2734
2895 if (mlen != 0 && m == NULL) {
2896 m = en_mget(sc, mlen, &drqneed); /* allocate! */
2897 if (m == NULL) {
2898 fill += mlen;
2899 mlen = 0;
2900 EN_COUNT(sc->rxmbufout);
2901#ifdef EN_DEBUG
2902 printf("%s: rx%d: out of mbufs\n", sc->sc_dev.dv_xname, slot);
2903#endif
2904 }
2905#ifdef EN_DEBUG
2906 printf("%s: rx%d: allocate mbuf %p, mlen=%d, drqneed=%d\n",
2907 sc->sc_dev.dv_xname, slot, m, mlen, drqneed);
2908#endif
2909 }
2735 reg = en_read(sc, MID_RESID);
2910
2736
2911#ifdef EN_DEBUG
2912 printf("%s: rx%d: VCI %d, mbuf_chain %p, mlen %d, fill %d\n",
2913 sc->sc_dev.dv_xname, slot, vci, m, mlen, fill);
2914#endif
2737 if_printf(&sc->enif, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
2738 "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
2739 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
2740 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
2741 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
2742 (long)sc->en_obmemsz / 1024);
2915
2743
2916 /*
2917 * now check to see if we've got the DRQs needed. if we are out of
2918 * DRQs we must quit (saving our mbuf, if we've got one).
2919 */
2744 if (sc->is_adaptec) {
2745 if (sc->bestburstlen == 64 && sc->alburst == 0)
2746 if_printf(&sc->enif, "passed 64 byte DMA test\n");
2747 else
2748 if_printf(&sc->enif, "FAILED DMA TEST: burst=%d, "
2749 "alburst=%d\n", sc->bestburstlen, sc->alburst);
2750 } else {
2751 if_printf(&sc->enif, "maximum DMA burst length = %d bytes%s\n",
2752 sc->bestburstlen, sc->alburst ? sc->noalbursts ?
2753 " (no large bursts)" : " (must align)" : "");
2754 }
2920
2755
2921 needfill = (fill) ? 1 : 0;
2922 if (drqneed + needfill > sc->drq_free) {
2923 sc->need_drqs = 1; /* flag condition */
2924 if (m == NULL) {
2925 EN_COUNT(sc->rxoutboth);
2926#ifdef EN_DEBUG
2927 printf("%s: rx%d: out of DRQs *and* mbufs!\n", sc->sc_dev.dv_xname, slot);
2928#endif
2929 return; /* >>> exit here if out of both mbufs and DRQs <<< */
2930 }
2931 sav = mtod(m, u_int32_t *);
2932 sav[0] = cur;
2933 sav[1] = drqneed;
2934 _IF_ENQUEUE(&sc->rxslot[slot].q, m);
2935 EN_COUNT(sc->rxdrqout);
2936#ifdef EN_DEBUG
2937 printf("%s: rx%d: out of DRQs\n", sc->sc_dev.dv_xname, slot);
2938#endif
2939 return; /* >>> exit here if out of DRQs <<< */
2940 }
2756 /*
2757 * link into network subsystem and prepare card
2758 */
2759 sc->enif.if_softc = sc;
2760 ifp->if_flags = IFF_SIMPLEX;
2761 ifp->if_ioctl = en_ioctl;
2762 ifp->if_start = en_start;
2941
2763
2942 /*
2943 * at this point all resources have been allocated and we are commited
2944 * to servicing this slot.
2945 *
2946 * dma = last location we told chip about
2947 * cur = current location
2948 * mlen = space in the mbuf we want
2949 * need = bytes to xfer in (decrs to zero)
2950 * fill = how much fill we need
2951 * tlen = how much data to transfer to this mbuf
2952 * cnt/bcode/count = <same as xmit>
2953 *
2954 * 'needfill' not used after this point
2955 */
2764 /*
2765 * Make the sysctl tree
2766 */
2767 sysctl_ctx_init(&sc->sysctl_ctx);
2956
2768
2957 dma = cur; /* dma = last location we told chip about */
2958 need = roundup(mlen, sizeof(u_int32_t));
2959 fill = fill - (need - mlen); /* note: may invalidate 'needfill' */
2769 if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
2770 SYSCTL_STATIC_CHILDREN(_hw_en), OID_AUTO,
2771 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
2772 goto fail;
2960
2773
2961 for (tmp = m ; tmp != NULL && need > 0 ; tmp = tmp->m_next) {
2962 tlen = roundup(tmp->m_len, sizeof(u_int32_t)); /* m_len set by en_mget */
2963 data = mtod(tmp, u_int32_t *);
2774 if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2775 OID_AUTO, "istats", CTLFLAG_RD, sc, 0, en_sysctl_istats,
2776 "A", "internal statistics") == NULL)
2777 goto fail;
2964
2965#ifdef EN_DEBUG
2778
2779#ifdef EN_DEBUG
2966 printf("%s: rx%d: load mbuf %p, m_len=%d, m_data=%p, tlen=%d\n",
2967 sc->sc_dev.dv_xname, slot, tmp, tmp->m_len, tmp->m_data, tlen);
2780 if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
2781 OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
2782 goto fail;
2968#endif
2783#endif
2969
2970 /* copy data */
2971 if (EN_NORXDMA || !en_dma || tlen < EN_MINDMA) {
2972 datastop = (u_int32_t *)((u_char *) data + tlen);
2973 /* copy loop: preserve byte order!!! use READDAT */
2974 while (data != datastop) {
2975 *data = EN_READDAT(sc, cur);
2976 data++;
2977 EN_WRAPADD(start, stop, cur, 4);
2978 }
2979 need -= tlen;
2980#ifdef EN_DEBUG
2981 printf("%s: rx%d: vci%d: copied %d bytes (%d left)\n",
2982 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2983#endif
2984 continue;
2985 }
2986
2784
2987 /* DMA data (check to see if we need to sync DRQ first) */
2988 if (dma != cur) {
2989 EN_DRQADD(sc, WORD_IDX(start,cur), vci, MIDDMA_JK, 0, 0, 0, 0);
2990#ifdef EN_DEBUG
2991 printf("%s: rx%d: vci%d: drq_sync: advance pointer to %d\n",
2992 sc->sc_dev.dv_xname, slot, vci, cur);
2993#endif
2994 }
2785 mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
2786 MTX_NETWORK_LOCK, MTX_DEF);
2995
2787
2996#if !defined(MIDWAY_ENIONLY)
2997
2998 /*
2999 * the adaptec DMA engine is smart and handles everything for us.
3000 */
3001
3002 if (sc->is_adaptec) {
3003 need -= tlen;
3004 EN_WRAPADD(start, stop, cur, tlen);
3005#ifdef EN_DEBUG
3006 printf("%s: rx%d: vci%d: adp_dma %d bytes (%d left)\n",
3007 sc->sc_dev.dv_xname, slot, vci, tlen, need);
3008#endif
3009 end = (need == 0 && !fill) ? MID_DMA_END : 0;
3010 EN_DRQADD(sc, tlen, vci, 0, vtophys(data), mlen, slot, end);
3011 if (end)
3012 goto done;
3013 dma = cur; /* update dma pointer */
3014 continue;
3015 }
3016#endif /* !MIDWAY_ENIONLY */
2788 MGET(sc->padbuf, M_TRYWAIT, MT_DATA);
2789 if (sc->padbuf == NULL)
2790 goto fail;
2791 bzero(sc->padbuf->m_data, MLEN);
3017
2792
2793 if (bus_dma_tag_create(NULL, 1, 0,
2794 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2795 EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0, &sc->txtag))
2796 goto fail;
3018
2797
3019#if !defined(MIDWAY_ADPONLY)
2798 sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
2799 en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
2800 UMA_ZONE_ZINIT);
2801 if (sc->map_zone == NULL)
2802 goto fail;
2803 uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
3020
2804
3021 /*
3022 * the ENI DMA engine is not so smart and need more help from us
3023 */
2805 /*
2806 * init softc
2807 */
2808 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
2809 sc->rxvc2slot[lcv] = RX_NONE;
2810 sc->txspeed[lcv] = 0; /* full */
2811 sc->txvc2slot[lcv] = 0; /* full speed == slot 0 */
2812 }
3024
2813
3025 /* do we need to do a DMA op to align? */
3026 if (sc->alburst &&
3027 (needalign = (((uintptr_t) (void *) data) & sc->bestburstmask)) != 0) {
3028 cnt = sc->bestburstlen - needalign;
3029 if (cnt > tlen) {
3030 cnt = tlen;
3031 count = cnt / sizeof(u_int32_t);
3032 bcode = MIDDMA_WORD;
3033 } else {
3034 count = cnt / sizeof(u_int32_t);
3035 bcode = en_dmaplan[count].bcode;
3036 count = cnt >> en_dmaplan[count].divshift;
3037 }
3038 need -= cnt;
3039 EN_WRAPADD(start, stop, cur, cnt);
3040#ifdef EN_DEBUG
3041 printf("%s: rx%d: vci%d: al_dma %d bytes (%d left)\n",
3042 sc->sc_dev.dv_xname, slot, vci, cnt, need);
3043#endif
3044 tlen -= cnt;
3045 end = (need == 0 && !fill) ? MID_DMA_END : 0;
3046 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
3047 if (end)
3048 goto done;
3049 data = (u_int32_t *)((u_char *) data + cnt);
3050 }
2814 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
2815 ptr = sav = MID_BUFOFF;
2816 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
2817 sz = sz - (ptr - sav);
2818 if (EN_TXSZ*1024 * EN_NTX > sz) {
2819 if_printf(&sc->enif, "EN_NTX/EN_TXSZ too big\n");
2820 goto fail;
2821 }
2822 for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
2823 sc->txslot[lcv].mbsize = 0;
2824 sc->txslot[lcv].start = ptr;
2825 ptr += (EN_TXSZ * 1024);
2826 sz -= (EN_TXSZ * 1024);
2827 sc->txslot[lcv].stop = ptr;
2828 sc->txslot[lcv].nref = 0;
2829 DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
2830 sc->txslot[lcv].start, sc->txslot[lcv].stop));
2831 }
3051
2832
3052 /* do we need a max-sized burst? */
3053 if (tlen >= sc->bestburstlen) {
3054 count = tlen >> sc->bestburstshift;
3055 cnt = count << sc->bestburstshift;
3056 bcode = sc->bestburstcode;
3057 need -= cnt;
3058 EN_WRAPADD(start, stop, cur, cnt);
3059#ifdef EN_DEBUG
3060 printf("%s: rx%d: vci%d: best_dma %d bytes (%d left)\n",
3061 sc->sc_dev.dv_xname, slot, vci, cnt, need);
3062#endif
3063 tlen -= cnt;
3064 end = (need == 0 && !fill) ? MID_DMA_END : 0;
3065 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
3066 if (end)
3067 goto done;
3068 data = (u_int32_t *)((u_char *) data + cnt);
3069 }
2833 sav = ptr;
2834 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
2835 sz = sz - (ptr - sav);
2836 sc->en_nrx = sz / (EN_RXSZ * 1024);
2837 if (sc->en_nrx <= 0) {
2838 if_printf(&sc->enif, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
2839 goto fail;
2840 }
3070
2841
3071 /* do we need to do a cleanup burst? */
3072 if (tlen) {
3073 count = tlen / sizeof(u_int32_t);
3074 bcode = en_dmaplan[count].bcode;
3075 count = tlen >> en_dmaplan[count].divshift;
3076 need -= tlen;
3077 EN_WRAPADD(start, stop, cur, tlen);
3078#ifdef EN_DEBUG
3079 printf("%s: rx%d: vci%d: cleanup_dma %d bytes (%d left)\n",
3080 sc->sc_dev.dv_xname, slot, vci, tlen, need);
3081#endif
3082 end = (need == 0 && !fill) ? MID_DMA_END : 0;
3083 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
3084 if (end)
3085 goto done;
3086 }
2842 /*
2843 * ensure that there is always one VC slot on the service list free
2844 * so that we can tell the difference between a full and empty list.
2845 */
2846 if (sc->en_nrx >= MID_N_VC)
2847 sc->en_nrx = MID_N_VC - 1;
3087
2848
3088 dma = cur; /* update dma pointer */
2849 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
2850 sc->rxslot[lcv].rxhand = NULL;
2851 sc->rxslot[lcv].oth_flags = ENOTHER_FREE;
2852 midvloc = sc->rxslot[lcv].start = ptr;
2853 ptr += (EN_RXSZ * 1024);
2854 sz -= (EN_RXSZ * 1024);
2855 sc->rxslot[lcv].stop = ptr;
2856 midvloc = midvloc - MID_RAMOFF;
2857 /* mask, cvt to words */
2858 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
2859 /* we only want the top 11 bits */
2860 midvloc = midvloc >> MIDV_LOCTOPSHFT;
2861 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
2862 sc->rxslot[lcv].mode = midvloc |
2863 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
3089
2864
3090#endif /* !MIDWAY_ADPONLY */
2865 DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
2866 sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
2867 sc->rxslot[lcv].mode));
2868 }
3091
2869
3092 }
2870 bzero(&sc->stats, sizeof(sc->stats));
3093
2871
3094 /* skip the end */
3095 if (fill || dma != cur) {
3096#ifdef EN_DEBUG
3097 if (fill)
3098 printf("%s: rx%d: vci%d: skipping %d bytes of fill\n",
3099 sc->sc_dev.dv_xname, slot, vci, fill);
3100 else
3101 printf("%s: rx%d: vci%d: syncing chip from 0x%x to 0x%x [cur]\n",
3102 sc->sc_dev.dv_xname, slot, vci, dma, cur);
2872 if_printf(&sc->enif, "%d %dKB receive buffers, %d %dKB transmit "
2873 "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
2874 if_printf(&sc->enif, "end station identifier (mac address) %6D\n",
2875 sc->macaddr, ":");
2876
2877 /*
2878 * final commit
2879 */
2880 if_attach(ifp);
2881 atm_ifattach(ifp);
2882
2883#ifdef ENABLE_BPF
2884 bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
3103#endif
2885#endif
3104 EN_WRAPADD(start, stop, cur, fill);
3105 EN_DRQADD(sc, WORD_IDX(start,cur), vci, MIDDMA_JK, 0, mlen,
3106 slot, MID_DMA_END);
3107 /* dma = cur; */ /* not necessary since we are done */
3108 }
3109
2886
3110 /*
3111 * done, remove stuff we don't want to pass up:
3112 * raw mode (boodi mode): pass everything up for later processing
3113 * aal5: remove RBD
3114 * aal0: remove RBD + cell header
3115 */
2887 return (0);
3116
2888
3117done:
3118 if (m) {
3119 if (!raw) {
3120 cnt = MID_RBD_SIZE;
3121 if (!aal5) cnt += MID_CHDR_SIZE;
3122 m->m_len -= cnt; /* chop! */
3123 m->m_pkthdr.len -= cnt;
3124 m->m_data += cnt;
3125 }
3126 _IF_ENQUEUE(&sc->rxslot[slot].indma, m);
3127 }
3128 sc->rxslot[slot].cur = cur; /* update master copy of 'cur' */
2889 fail:
2890 en_destroy(sc);
2891 return (-1);
2892}
3129
2893
3130#ifdef EN_DEBUG
3131 printf("%s: rx%d: vci%d: DONE! cur now =0x%x\n",
3132 sc->sc_dev.dv_xname, slot, vci, cur);
3133#endif
2894/*
2895 * Free all internal resources. No access to bus resources here.
2896 * No locking required here (interrupt is already disabled).
2897 *
2898 * LOCK: unlocked, not needed (but destroyed)
2899 */
2900void
2901en_destroy(struct en_softc *sc)
2902{
2903 if (sc->padbuf != NULL)
2904 m_free(sc->padbuf);
3134
2905
3135 goto same_vci; /* get next packet in this slot */
2906 /*
2907 * Destroy the map zone before the tag (the fini function will
2908 * destroy the DMA maps using the tag)
2909 */
2910 if (sc->map_zone != NULL)
2911 uma_zdestroy(sc->map_zone);
2912
2913 if (sc->txtag != NULL)
2914 bus_dma_tag_destroy(sc->txtag);
2915
2916 (void)sysctl_ctx_free(&sc->sysctl_ctx);
2917
2918 mtx_destroy(&sc->en_mtx);
3136}
3137
2919}
2920
2921/*********************************************************************/
2922/*
2923 * Debugging support
2924 */
3138
3139#ifdef EN_DDBHOOK
3140/*
3141 * functions we can call from ddb
3142 */
3143
3144/*
3145 * en_dump: dump the state
3146 */
2925
2926#ifdef EN_DDBHOOK
2927/*
2928 * functions we can call from ddb
2929 */
2930
2931/*
2932 * en_dump: dump the state
2933 */
3147
3148#define END_SWSL 0x00000040 /* swsl state */
3149#define END_DRQ 0x00000020 /* drq state */
3150#define END_DTQ 0x00000010 /* dtq state */
3151#define END_RX 0x00000008 /* rx state */
3152#define END_TX 0x00000004 /* tx state */
3153#define END_MREGS 0x00000002 /* registers */
3154#define END_STATS 0x00000001 /* dump stats */
3155
3156#define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3157
2934#define END_SWSL 0x00000040 /* swsl state */
2935#define END_DRQ 0x00000020 /* drq state */
2936#define END_DTQ 0x00000010 /* dtq state */
2937#define END_RX 0x00000008 /* rx state */
2938#define END_TX 0x00000004 /* tx state */
2939#define END_MREGS 0x00000002 /* registers */
2940#define END_STATS 0x00000001 /* dump stats */
2941
2942#define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
2943
3158/* Do not staticize - meant for calling from DDB! */
3159int en_dump(unit, level)
3160
3161int unit, level;
3162
2944static void
2945en_dump_stats(const struct en_stats *s)
3163{
2946{
3164 struct en_softc *sc;
3165 int lcv, cnt, slot;
3166 u_int32_t ptr, reg;
3167#ifdef __FreeBSD__
3168 devclass_t dc;
3169 int maxunit;
3170
3171 dc = devclass_find("en");
3172 if (dc == NULL) {
3173 printf("en_dump: can't find devclass!\n");
3174 return 0;
3175 }
3176 maxunit = devclass_get_maxunit(dc);
3177 for (lcv = 0 ; lcv < maxunit ; lcv++) {
3178 sc = devclass_get_softc(dc, lcv);
3179#else
3180 for (lcv = 0 ; lcv < en_cd.cd_ndevs ; lcv++) {
3181 sc = (struct en_softc *) en_cd.cd_devs[lcv];
3182#endif
3183 if (sc == NULL) continue;
3184 if (unit != -1 && unit != lcv)
3185 continue;
3186
3187 printf("dumping device %s at level 0x%b\n", sc->sc_dev.dv_xname, level,
3188 END_BITS);
3189
3190 if (sc->dtq_us == 0) {
3191 printf("<hasn't been en_init'd yet>\n");
3192 continue;
3193 }
3194
3195 if (level & END_STATS) {
3196 printf(" en_stats:\n");
3197 printf(" %d mfix (%d failed); %d/%d head/tail byte DMAs, %d flushes\n",
3198 sc->mfix, sc->mfixfail, sc->headbyte, sc->tailbyte, sc->tailflush);
3199 printf(" %d rx dma overflow interrupts\n", sc->dmaovr);
3200 printf(" %d times we ran out of TX space and stalled\n",
3201 sc->txoutspace);
3202 printf(" %d times we ran out of DTQs\n", sc->txdtqout);
3203 printf(" %d times we launched a packet\n", sc->launch);
3204 printf(" %d times we launched without on-board header\n", sc->lheader);
3205 printf(" %d times we launched without on-board tail\n", sc->ltail);
3206 printf(" %d times we pulled the hw service list\n", sc->hwpull);
3207 printf(" %d times we pushed a vci on the sw service list\n",
3208 sc->swadd);
3209 printf(" %d times RX pulled an mbuf from Q that wasn't ours\n",
3210 sc->rxqnotus);
3211 printf(" %d times RX pulled a good mbuf from Q\n", sc->rxqus);
3212 printf(" %d times we ran out of mbufs *and* DRQs\n", sc->rxoutboth);
3213 printf(" %d times we ran out of DRQs\n", sc->rxdrqout);
3214
3215 printf(" %d trasmit packets dropped due to mbsize\n", sc->txmbovr);
3216 printf(" %d cells trashed due to turned off rxvc\n", sc->vtrash);
3217 printf(" %d cells trashed due to totally full buffer\n", sc->otrash);
3218 printf(" %d cells trashed due almost full buffer\n", sc->ttrash);
3219 printf(" %d rx mbuf allocation failures\n", sc->rxmbufout);
2947 printf("en_stats:\n");
2948 printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
2949 s->mfixfail);
2950 printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
2951 printf("\t%d times out of TX space and stalled\n", s->txoutspace);
2952 printf("\t%d times out of DTQs\n", s->txdtqout);
2953 printf("\t%d times launched a packet\n", s->launch);
2954 printf("\t%d times pulled the hw service list\n", s->hwpull);
2955 printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
2956 printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
2957 s->rxqnotus);
2958 printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
2959 printf("\t%d times ran out of DRQs\n", s->rxdrqout);
2960 printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
2961 printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
2962 printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
2963 printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
2964 printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
2965 printf("\t%d times out of tx maps\n", s->txnomap);
3220#ifdef NATM
2966#ifdef NATM
3221#if 0
3222 printf(" %d drops at natmintrq\n", natmintrq.ifq_drops);
3223#endif
3224#ifdef NATM_STAT
2967#ifdef NATM_STAT
3225 printf(" natmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3226 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
2968 printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
2969 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3227#endif
3228#endif
2970#endif
2971#endif
3229 }
2972}
3230
2973
3231 if (level & END_MREGS) {
3232 printf("mregs:\n");
3233 printf("resid = 0x%x\n", EN_READ(sc, MID_RESID));
3234 printf("interrupt status = 0x%b\n",
3235 (int)EN_READ(sc, MID_INTSTAT), MID_INTBITS);
3236 printf("interrupt enable = 0x%b\n",
3237 (int)EN_READ(sc, MID_INTENA), MID_INTBITS);
3238 printf("mcsr = 0x%b\n", (int)EN_READ(sc, MID_MAST_CSR), MID_MCSRBITS);
3239 printf("serv_write = [chip=%u] [us=%u]\n", EN_READ(sc, MID_SERV_WRITE),
2974static void
2975en_dump_mregs(struct en_softc *sc)
2976{
2977 u_int cnt;
2978
2979 printf("mregs:\n");
2980 printf("resid = 0x%x\n", en_read(sc, MID_RESID));
2981 printf("interrupt status = 0x%b\n",
2982 (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
2983 printf("interrupt enable = 0x%b\n",
2984 (int)en_read(sc, MID_INTENA), MID_INTBITS);
2985 printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
2986 printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
3240 MID_SL_A2REG(sc->hwslistp));
2987 MID_SL_A2REG(sc->hwslistp));
3241 printf("dma addr = 0x%x\n", EN_READ(sc, MID_DMA_ADDR));
3242 printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3243 MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX)),
3244 MID_DRQ_REG2A(EN_READ(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
3245 printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
3246 MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX)),
3247 MID_DTQ_REG2A(EN_READ(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
2988 printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
2989 printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
2990 MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
2991 MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
2992 printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
2993 MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
2994 MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
3248
2995
3249 printf(" unusal txspeeds: ");
3250 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3251 if (sc->txspeed[cnt])
3252 printf(" vci%d=0x%x", cnt, sc->txspeed[cnt]);
3253 printf("\n");
2996 printf(" unusal txspeeds:");
2997 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
2998 if (sc->txspeed[cnt])
2999 printf(" vci%d=0x%x", cnt, sc->txspeed[cnt]);
3000 printf("\n");
3254
3001
3255 printf(" rxvc slot mappings: ");
3256 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3257 if (sc->rxvc2slot[cnt] != RX_NONE)
3258 printf(" %d->%d", cnt, sc->rxvc2slot[cnt]);
3259 printf("\n");
3002 printf(" rxvc slot mappings:");
3003 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3004 if (sc->rxvc2slot[cnt] != RX_NONE)
3005 printf(" %d->%d", cnt, sc->rxvc2slot[cnt]);
3006 printf("\n");
3007}
3260
3008
3261 }
3009static void
3010en_dump_tx(struct en_softc *sc)
3011{
3012 u_int slot;
3262
3013
3263 if (level & END_TX) {
3264 printf("tx:\n");
3265 for (slot = 0 ; slot < EN_NTX; slot++) {
3266 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3267 sc->txslot[slot].start, sc->txslot[slot].stop, sc->txslot[slot].cur,
3268 (sc->txslot[slot].cur - sc->txslot[slot].start)/4);
3269 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3270 sc->txslot[slot].bfree);
3271 printf("txhw: base_address=0x%x, size=%u, read=%u, descstart=%u\n",
3272 (u_int)MIDX_BASE(EN_READ(sc, MIDX_PLACE(slot))),
3273 MIDX_SZ(EN_READ(sc, MIDX_PLACE(slot))),
3274 EN_READ(sc, MIDX_READPTR(slot)), EN_READ(sc, MIDX_DESCSTART(slot)));
3275 }
3276 }
3014 printf("tx:\n");
3015 for (slot = 0 ; slot < EN_NTX; slot++) {
3016 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3017 sc->txslot[slot].start, sc->txslot[slot].stop,
3018 sc->txslot[slot].cur,
3019 (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
3020 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3021 sc->txslot[slot].bfree);
3022 printf("txhw: base_address=0x%x, size=%u, read=%u, "
3023 "descstart=%u\n",
3024 (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
3025 MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
3026 en_read(sc, MIDX_READPTR(slot)),
3027 en_read(sc, MIDX_DESCSTART(slot)));
3028 }
3029}
3277
3030
3278 if (level & END_RX) {
3279 printf(" recv slots:\n");
3280 for (slot = 0 ; slot < sc->en_nrx; slot++) {
3281 printf("rx%d: vci=%d: start/stop/cur=0x%x/0x%x/0x%x ", slot,
3282 sc->rxslot[slot].atm_vci, sc->rxslot[slot].start,
3283 sc->rxslot[slot].stop, sc->rxslot[slot].cur);
3284 printf("mode=0x%x, atm_flags=0x%x, oth_flags=0x%x\n",
3285 sc->rxslot[slot].mode, sc->rxslot[slot].atm_flags,
3286 sc->rxslot[slot].oth_flags);
3287 printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3288 EN_READ(sc, MID_VC(sc->rxslot[slot].atm_vci)),
3289 EN_READ(sc, MID_DST_RP(sc->rxslot[slot].atm_vci)),
3290 EN_READ(sc, MID_WP_ST_CNT(sc->rxslot[slot].atm_vci)));
3291 }
3292 }
3031static void
3032en_dump_rx(struct en_softc *sc)
3033{
3034 u_int slot;
3293
3035
3294 if (level & END_DTQ) {
3295 printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n",
3296 sc->need_dtqs, sc->dtq_free);
3297 ptr = sc->dtq_chip;
3298 while (ptr != sc->dtq_us) {
3299 reg = EN_READ(sc, ptr);
3300 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3301 sc->dtq[MID_DTQ_A2REG(ptr)], MID_DMA_CNT(reg), MID_DMA_TXCHAN(reg),
3302 (reg & MID_DMA_END) != 0, MID_DMA_TYPE(reg), EN_READ(sc, ptr+4));
3303 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3304 }
3305 }
3306
3307 if (level & END_DRQ) {
3308 printf(" drq [need_drqs=%d,drq_free=%d]:\n",
3309 sc->need_drqs, sc->drq_free);
3310 ptr = sc->drq_chip;
3311 while (ptr != sc->drq_us) {
3312 reg = EN_READ(sc, ptr);
3313 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3314 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg), MID_DMA_RXVCI(reg),
3315 (reg & MID_DMA_END) != 0, MID_DMA_TYPE(reg), EN_READ(sc, ptr+4));
3316 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3317 }
3318 }
3319
3320 if (level & END_SWSL) {
3321 printf(" swslist [size=%d]: ", sc->swsl_size);
3322 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3323 cnt = (cnt + 1) % MID_SL_N)
3324 printf("0x%x ", sc->swslist[cnt]);
3325 printf("\n");
3326 }
3327 }
3328 return(0);
3036 printf(" recv slots:\n");
3037 for (slot = 0 ; slot < sc->en_nrx; slot++) {
3038 printf("rx%d: vci=%d: start/stop/cur=0x%x/0x%x/0x%x ",
3039 slot, sc->rxslot[slot].atm_vci,
3040 sc->rxslot[slot].start, sc->rxslot[slot].stop,
3041 sc->rxslot[slot].cur);
3042 printf("mode=0x%x, atm_flags=0x%x, oth_flags=0x%x\n",
3043 sc->rxslot[slot].mode, sc->rxslot[slot].atm_flags,
3044 sc->rxslot[slot].oth_flags);
3045 printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
3046 en_read(sc, MID_VC(sc->rxslot[slot].atm_vci)),
3047 en_read(sc, MID_DST_RP(sc->rxslot[slot].atm_vci)),
3048 en_read(sc,
3049 MID_WP_ST_CNT(sc->rxslot[slot].atm_vci)));
3050 }
3329}
3330
3331/*
3051}
3052
3053/*
3332 * en_dumpmem: dump the memory
3054 * This is only correct for non-adaptec adapters
3333 */
3055 */
3056static void
3057en_dump_dtqs(struct en_softc *sc)
3058{
3059 uint32_t ptr, reg;
3334
3060
3061 printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
3062 sc->dtq_free);
3063 ptr = sc->dtq_chip;
3064 while (ptr != sc->dtq_us) {
3065 reg = en_read(sc, ptr);
3066 printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3067 sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
3068 MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
3069 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3070 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3071 }
3072}
3073
3074static void
3075en_dump_drqs(struct en_softc *sc)
3076{
3077 uint32_t ptr, reg;
3078
3079 printf(" drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
3080 sc->drq_free);
3081 ptr = sc->drq_chip;
3082 while (ptr != sc->drq_us) {
3083 reg = en_read(sc, ptr);
3084 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
3085 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
3086 MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
3087 MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
3088 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3089 }
3090}
3091
3335/* Do not staticize - meant for calling from DDB! */
3092/* Do not staticize - meant for calling from DDB! */
3336int en_dumpmem(unit, addr, len)
3093int
3094en_dump(int unit, int level)
3095{
3096 struct en_softc *sc;
3097 int lcv, cnt;
3098 devclass_t dc;
3099 int maxunit;
3337
3100
3338int unit, addr, len;
3101 dc = devclass_find("en");
3102 if (dc == NULL) {
3103 printf("%s: can't find devclass!\n", __func__);
3104 return (0);
3105 }
3106 maxunit = devclass_get_maxunit(dc);
3107 for (lcv = 0 ; lcv < maxunit ; lcv++) {
3108 sc = devclass_get_softc(dc, lcv);
3109 if (sc == NULL)
3110 continue;
3111 if (unit != -1 && unit != lcv)
3112 continue;
3339
3113
3114 if_printf(&sc->enif, "dumping device at level 0x%b\n",
3115 level, END_BITS);
3116
3117 if (sc->dtq_us == 0) {
3118 printf("<hasn't been en_init'd yet>\n");
3119 continue;
3120 }
3121
3122 if (level & END_STATS)
3123 en_dump_stats(&sc->stats);
3124 if (level & END_MREGS)
3125 en_dump_mregs(sc);
3126 if (level & END_TX)
3127 en_dump_tx(sc);
3128 if (level & END_RX)
3129 en_dump_rx(sc);
3130 if (level & END_DTQ)
3131 en_dump_dtqs(sc);
3132 if (level & END_DRQ)
3133 en_dump_drqs(sc);
3134
3135 if (level & END_SWSL) {
3136 printf(" swslist [size=%d]: ", sc->swsl_size);
3137 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3138 cnt = (cnt + 1) % MID_SL_N)
3139 printf("0x%x ", sc->swslist[cnt]);
3140 printf("\n");
3141 }
3142 }
3143 return (0);
3144}
3145
3146/*
3147 * en_dumpmem: dump the memory
3148 *
3149 * Do not staticize - meant for calling from DDB!
3150 */
3151int
3152en_dumpmem(int unit, int addr, int len)
3340{
3153{
3341 struct en_softc *sc;
3342 u_int32_t reg;
3343#ifdef __FreeBSD__
3344 devclass_t dc;
3154 struct en_softc *sc;
3155 uint32_t reg;
3156 devclass_t dc;
3345
3157
3346 dc = devclass_find("en");
3347 if (dc == NULL) {
3348 printf("en_dumpmem: can't find devclass!\n");
3349 return 0;
3350 }
3351 sc = devclass_get_softc(dc, unit);
3352#else
3353 if (unit < 0 || unit > en_cd.cd_ndevs ||
3354 (sc = (struct en_softc *) en_cd.cd_devs[unit]) == NULL) {
3355 printf("invalid unit number: %d\n", unit);
3356 return(0);
3357 }
3358#endif
3158 dc = devclass_find("en");
3159 if (dc == NULL) {
3160 printf("%s: can't find devclass\n", __func__);
3161 return (0);
3162 }
3163 sc = devclass_get_softc(dc, unit);
3164 if (sc == NULL) {
3165 printf("%s: invalid unit number: %d\n", __func__, unit);
3166 return (0);
3167 }
3359
3168
3360 addr = addr & ~3;
3361 if (addr < MID_RAMOFF || addr + len*4 > MID_MAXOFF || len <= 0) {
3362 printf("invalid addr/len number: %d, %d\n", addr, len);
3363 return(0);
3364 }
3365 printf("dumping %d words starting at offset 0x%x\n", len, addr);
3366 while (len--) {
3367 reg = EN_READ(sc, addr);
3368 printf("mem[0x%x] = 0x%x\n", addr, reg);
3369 addr += 4;
3370 }
3371 return(0);
3169 addr = addr & ~3;
3170 if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
3171 printf("invalid addr/len number: %d, %d\n", addr, len);
3172 return (0);
3173 }
3174 printf("dumping %d words starting at offset 0x%x\n", len, addr);
3175 while (len--) {
3176 reg = en_read(sc, addr);
3177 printf("mem[0x%x] = 0x%x\n", addr, reg);
3178 addr += 4;
3179 }
3180 return (0);
3372}
3373#endif
3181}
3182#endif