1/*	$NetBSD: nextdma.c,v 1.46 2010/04/24 19:58:13 dbj Exp $	*/
2/*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.46 2010/04/24 19:58:13 dbj Exp $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/mbuf.h>
33#include <sys/syslog.h>
34#include <sys/socket.h>
35#include <sys/device.h>
36#include <sys/malloc.h>
37#include <sys/ioctl.h>
38#include <sys/errno.h>
39
40#define _M68K_BUS_DMA_PRIVATE
41#include <machine/autoconf.h>
42#include <machine/cpu.h>
43#include <machine/intr.h>
44
45#include <m68k/cacheops.h>
46
47#include <next68k/next68k/isr.h>
48#include <next68k/next68k/nextrom.h>
49
50#include <next68k/dev/intiovar.h>
51
52#include "nextdmareg.h"
53#include "nextdmavar.h"
54
55#include "esp.h"
56#include "xe.h"
57
58#if DEBUG
59#define ND_DEBUG
60#endif
61
62extern int turbo;
63
64#define panic		__asm volatile("trap  #15"); printf
65
66#define NEXTDMA_DEBUG nextdma_debug
67/* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
68#if defined(ND_DEBUG)
69int nextdma_debug = 0;
70#define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
71int ndtraceshow = 0;
72char ndtrace[8192+100];
73char *ndtracep = ndtrace;
74#define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
75#else
76#define DPRINTF(x)
77#define NDTRACEIF(x)
78#endif
79#define PRINTF(x) printf x
80
81#if defined(ND_DEBUG)
82int nextdma_debug_enetr_idx = 0;
83unsigned int nextdma_debug_enetr_state[100] = { 0 };
84int nextdma_debug_scsi_idx = 0;
85unsigned int nextdma_debug_scsi_state[100] = { 0 };
86
87void nextdma_debug_initstate(struct nextdma_softc *);
88void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
89void nextdma_debug_scsi_dumpstate(void);
90void nextdma_debug_enetr_dumpstate(void);
91#endif
92
93
94int	nextdma_match(struct device *, struct cfdata *, void *);
95void	nextdma_attach(struct device *, struct device *, void *);
96
97void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
98int nextdma_continue(struct nextdma_softc *);
99void nextdma_rotate(struct nextdma_softc *);
100
101void nextdma_setup_cont_regs(struct nextdma_softc *);
102void nextdma_setup_curr_regs(struct nextdma_softc *);
103
104#if NESP > 0
105static int nextdma_esp_intr(void *);
106#endif
107#if NXE > 0
108static int nextdma_enet_intr(void *);
109#endif
110
111#define nd_bsr4(reg) \
112	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
113#define nd_bsw4(reg,val) \
114	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
115
116CFATTACH_DECL(nextdma, sizeof(struct nextdma_softc),
117    nextdma_match, nextdma_attach, NULL, NULL);
118
119static struct nextdma_channel nextdma_channel[] = {
120#if NESP > 0
121	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
122#endif
123#if NXE > 0
124	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
125	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
126#endif
127};
128static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
129
130static int attached = 0;
131
132struct nextdma_softc *
133nextdma_findchannel(const char *name)
134{
135	device_t dev;
136	deviter_t di;
137
138	for (dev = deviter_first(&di, DEVITER_F_ROOT_FIRST);
139	     dev != NULL;
140	     dev = deviter_next(&di)) {
141		if (strncmp(dev->dv_xname, "nextdma", 7) == 0) {
142			struct nextdma_softc *nsc = device_private(dev);
143			if (strcmp(nsc->sc_chan->nd_name, name) == 0)
144				break;
145		}
146	}
147	deviter_release(&di);
148	if (dev == NULL)
149		return NULL;
150	return device_private(dev);
151}
152
153int
154nextdma_match(struct device *parent, struct cfdata *match, void *aux)
155{
156	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
157
158	if (attached >= nnextdma_channels)
159		return (0);
160
161	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
162
163	return (1);
164}
165
166void
167nextdma_attach(struct device *parent, struct device *self, void *aux)
168{
169	struct nextdma_softc *nsc = (struct nextdma_softc *)self;
170	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
171
172	if (attached >= nnextdma_channels)
173		return;
174
175	nsc->sc_chan = &nextdma_channel[attached];
176
177	nsc->sc_dmat = ia->ia_dmat;
178	nsc->sc_bst = ia->ia_bst;
179
180	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
181			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
182		panic("%s: can't map DMA registers for channel %s",
183		      nsc->sc_dev.dv_xname, nsc->sc_chan->nd_name);
184	}
185
186	nextdma_init (nsc);
187
188	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
189			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
190	INTR_ENABLE(nsc->sc_chan->nd_intr);
191
192	printf (": channel %d (%s)\n", attached,
193		nsc->sc_chan->nd_name);
194	attached++;
195
196	return;
197}
198
199void
200nextdma_init(struct nextdma_softc *nsc)
201{
202#ifdef ND_DEBUG
203	if (NEXTDMA_DEBUG) {
204		char sbuf[256];
205
206		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
207		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
208		printf("DMA init ipl (%ld) intr(0x%s)\n",
209			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
210	}
211#endif
212
213	nsc->sc_stat.nd_map = NULL;
214	nsc->sc_stat.nd_idx = 0;
215	nsc->sc_stat.nd_map_cont = NULL;
216	nsc->sc_stat.nd_idx_cont = 0;
217	nsc->sc_stat.nd_exception = 0;
218
219	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
220	nd_bsw4 (DD_CSR, 0);
221
222#if 01
223	nextdma_setup_curr_regs(nsc);
224	nextdma_setup_cont_regs(nsc);
225#endif
226
227#if defined(DIAGNOSTIC)
228	{
229		u_long state;
230		state = nd_bsr4 (DD_CSR);
231
232#if 1
233		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
234		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
235		 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
236		 */
237		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
238#else
239		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
240			  DMACSR_SUPDATE | DMACSR_ENABLE);
241#endif
242		if (state) {
243			nextdma_print(nsc);
244			panic("DMA did not reset");
245		}
246	}
247#endif
248}
249
250void
251nextdma_reset(struct nextdma_softc *nsc)
252{
253	int s;
254	struct nextdma_status *stat = &nsc->sc_stat;
255
256	s = spldma();
257
258	DPRINTF(("DMA reset\n"));
259
260#if (defined(ND_DEBUG))
261	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
262#endif
263
264	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
265	if ((stat->nd_map) || (stat->nd_map_cont)) {
266		if (stat->nd_map_cont) {
267			DPRINTF(("DMA: resetting with non null continue map\n"));
268			if (nsc->sc_conf.nd_completed_cb)
269				(*nsc->sc_conf.nd_completed_cb)
270					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
271
272			stat->nd_map_cont = 0;
273			stat->nd_idx_cont = 0;
274		}
275		if (nsc->sc_conf.nd_shutdown_cb)
276			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
277		stat->nd_map = 0;
278		stat->nd_idx = 0;
279	}
280
281	splx(s);
282}
283
284/****************************************************************/
285
286
287/* Call the completed and continue callbacks to try to fill
288 * in the dma continue buffers.
289 */
290void
291nextdma_rotate(struct nextdma_softc *nsc)
292{
293	struct nextdma_status *stat = &nsc->sc_stat;
294
295	NDTRACEIF (*ndtracep++ = 'r');
296	DPRINTF(("DMA nextdma_rotate()\n"));
297
298	/* Rotate the continue map into the current map */
299	stat->nd_map = stat->nd_map_cont;
300	stat->nd_idx = stat->nd_idx_cont;
301
302	if ((!stat->nd_map_cont) ||
303	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
304		if (nsc->sc_conf.nd_continue_cb) {
305			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
306				(nsc->sc_conf.nd_cb_arg);
307			if (stat->nd_map_cont) {
308				stat->nd_map_cont->dm_xfer_len = 0;
309			}
310		} else {
311			stat->nd_map_cont = 0;
312		}
313		stat->nd_idx_cont = 0;
314	}
315
316#if defined(DIAGNOSTIC) && 0
317	if (stat->nd_map_cont) {
318		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
319			nextdma_print(nsc);
320			panic("DMA request unaligned at start");
321		}
322		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
323				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
324			nextdma_print(nsc);
325			panic("DMA request unaligned at end");
326		}
327	}
328#endif
329
330}
331
332void
333nextdma_setup_curr_regs(struct nextdma_softc *nsc)
334{
335	bus_addr_t dd_next;
336	bus_addr_t dd_limit;
337	bus_addr_t dd_saved_next;
338	bus_addr_t dd_saved_limit;
339	struct nextdma_status *stat = &nsc->sc_stat;
340
341	NDTRACEIF (*ndtracep++ = 'C');
342	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
343
344	if (stat->nd_map) {
345		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
346		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
347			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
348
349		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
350			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
351			dd_limit += 15;
352		}
353	} else {
354		dd_next = turbo ? 0 : 0xdeadbeef;
355		dd_limit = turbo ? 0 : 0xdeadbeef;
356	}
357
358	dd_saved_next = dd_next;
359	dd_saved_limit = dd_limit;
360
361	NDTRACEIF (if (stat->nd_map) {
362		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
363		ndtracep += strlen (ndtracep);
364	});
365
366	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
367		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
368	} else {
369		nd_bsw4 (DD_NEXT, dd_next);
370	}
371	nd_bsw4 (DD_LIMIT, dd_limit);
372	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
373	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
374
375#ifdef DIAGNOSTIC
376	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
377	    || (nd_bsr4 (DD_NEXT) != dd_next)
378	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
379	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
380	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
381		) {
382		nextdma_print(nsc);
383		panic("DMA failure writing to current regs");
384	}
385#endif
386}
387
388void
389nextdma_setup_cont_regs(struct nextdma_softc *nsc)
390{
391	bus_addr_t dd_start;
392	bus_addr_t dd_stop;
393	bus_addr_t dd_saved_start;
394	bus_addr_t dd_saved_stop;
395	struct nextdma_status *stat = &nsc->sc_stat;
396
397	NDTRACEIF (*ndtracep++ = 'c');
398	DPRINTF(("DMA nextdma_setup_regs()\n"));
399
400	if (stat->nd_map_cont) {
401		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
402		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
403			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
404
405		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
406			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
407			dd_stop += 15;
408		}
409	} else {
410		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
411		dd_stop = turbo ? 0 : 0xdeadbee0;
412	}
413
414	dd_saved_start = dd_start;
415	dd_saved_stop  = dd_stop;
416
417	NDTRACEIF (if (stat->nd_map_cont) {
418		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
419		ndtracep += strlen (ndtracep);
420	});
421
422	nd_bsw4 (DD_START, dd_start);
423	nd_bsw4 (DD_STOP, dd_stop);
424	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
425	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
426	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
427		nd_bsw4 (DD_STOP - 0x40, dd_start);
428
429#ifdef DIAGNOSTIC
430	if ((nd_bsr4 (DD_START) != dd_start)
431	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
432	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
433	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
434		) {
435		nextdma_print(nsc);
436		panic("DMA failure writing to continue regs");
437	}
438#endif
439}
440
441/****************************************************************/
442
443#if NESP > 0
444static int
445nextdma_esp_intr(void *arg)
446{
447	/* @@@ This is bogus, we can't be certain of arg's type
448	 * unless the interrupt is for us.  For now we successfully
449	 * cheat because DMA interrupts are the only things invoked
450	 * at this interrupt level.
451	 */
452	struct nextdma_softc *nsc = arg;
453	int esp_dma_int(void *); /* XXX */
454
455	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
456		return 0;
457	/* Handle dma interrupts */
458
459	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
460
461}
462#endif
463
464#if NXE > 0
465static int
466nextdma_enet_intr(void *arg)
467{
468	/* @@@ This is bogus, we can't be certain of arg's type
469	 * unless the interrupt is for us.  For now we successfully
470	 * cheat because DMA interrupts are the only things invoked
471	 * at this interrupt level.
472	 */
473	struct nextdma_softc *nsc = arg;
474	unsigned int state;
475	bus_addr_t onext;
476	bus_addr_t olimit;
477	bus_addr_t slimit;
478	int result;
479	struct nextdma_status *stat = &nsc->sc_stat;
480
481	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
482		return 0;
483	/* Handle dma interrupts */
484
485	NDTRACEIF (*ndtracep++ = 'D');
486#ifdef ND_DEBUG
487	if (NEXTDMA_DEBUG) {
488		char sbuf[256];
489
490		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
491		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
492		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
493		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
494	}
495#endif
496
497#ifdef DIAGNOSTIC
498	if (!stat->nd_map) {
499		nextdma_print(nsc);
500		panic("DMA missing current map in interrupt!");
501	}
502#endif
503
504	state = nd_bsr4 (DD_CSR);
505
506#if defined(ND_DEBUG)
507	nextdma_debug_savestate(nsc, state);
508#endif
509
510#ifdef DIAGNOSTIC
511	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
512		char sbuf[256];
513		nextdma_print(nsc);
514		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
515		printf("DMA: state 0x%s\n",sbuf);
516		panic("DMA complete not set in interrupt");
517	}
518#endif
519
520	DPRINTF(("DMA: finishing xfer\n"));
521
522	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
523	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
524
525	result = 0;
526	if (state & DMACSR_ENABLE) {
527		/* enable bit was set */
528		result |= 0x01;
529	}
530	if (state & DMACSR_SUPDATE) {
531		/* supdate bit was set */
532		result |= 0x02;
533	}
534	if (stat->nd_map_cont == NULL) {
535		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
536		/* Expecting a shutdown, didn't SETSUPDATE last turn */
537		result |= 0x04;
538	}
539	if (state & DMACSR_BUSEXC) {
540		/* bus exception bit was set */
541		result |= 0x08;
542	}
543	switch (result) {
544	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
545	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
546		if (turbo) {
547			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
548			slimit = *limit;
549		} else {
550			slimit = nd_bsr4 (DD_SAVED_LIMIT);
551		}
552		break;
553	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
554	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
555		if (turbo) {
556			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
557			slimit = *limit;
558		} else {
559			slimit = nd_bsr4 (DD_SAVED_LIMIT);
560		}
561		break;
562	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
563	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
564		slimit = nd_bsr4 (DD_NEXT);
565		break;
566	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
567	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
568		slimit = nd_bsr4 (DD_LIMIT);
569		break;
570	default:
571#ifdef DIAGNOSTIC
572	{
573		char sbuf[256];
574		printf("DMA: please send this output to port-next68k-maintainer@NetBSD.org:\n");
575		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
576		printf("DMA: state 0x%s\n",sbuf);
577		nextdma_print(nsc);
578		panic("DMA: condition 0x%02x not yet documented to occur",result);
579	}
580#endif
581	slimit = olimit;
582	break;
583	}
584
585	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
586		slimit &= ~0x80000000;
587		slimit -= 15;
588	}
589
590#ifdef DIAGNOSTIC
591	if ((state & DMACSR_READ))
592		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
593			  (state & DMACSR_READ) ? "read" : "write"));
594	if ((slimit < onext) || (slimit > olimit)) {
595		char sbuf[256];
596		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
597		printf("DMA: state 0x%s\n",sbuf);
598		nextdma_print(nsc);
599		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
600	}
601#endif
602
603#ifdef DIAGNOSTIC
604	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
605		if (slimit != olimit) {
606			char sbuf[256];
607			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
608			printf("DMA: state 0x%s\n",sbuf);
609			nextdma_print(nsc);
610			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
611		}
612	}
613#endif
614
615#if (defined(ND_DEBUG))
616	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
617#endif
618
619	stat->nd_map->dm_xfer_len += slimit-onext;
620
621	/* If we've reached the end of the current map, then inform
622	 * that we've completed that map.
623	 */
624	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
625		if (nsc->sc_conf.nd_completed_cb)
626			(*nsc->sc_conf.nd_completed_cb)
627				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
628	} else {
629		KASSERT(stat->nd_map == stat->nd_map_cont);
630		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
631	}
632	stat->nd_map = 0;
633	stat->nd_idx = 0;
634
635#if (defined(ND_DEBUG))
636	if (NEXTDMA_DEBUG) {
637		char sbuf[256];
638		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
639		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
640	}
641#endif
642	if (state & DMACSR_ENABLE) {
643		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
644
645		nextdma_rotate(nsc);
646		nextdma_setup_cont_regs(nsc);
647
648		if (state & DMACSR_READ) {
649			dmadir = DMACSR_SETREAD;
650		} else {
651			dmadir = DMACSR_SETWRITE;
652		}
653
654		if (stat->nd_map_cont == NULL) {
655			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
656			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
657			NDTRACEIF (*ndtracep++ = 'g');
658		} else {
659			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
660			NDTRACEIF (*ndtracep++ = 'G');
661		}
662	} else {
663		DPRINTF(("DMA: a shutdown occurred\n"));
664		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
665
666		/* Cleanup more incomplete transfers */
667		/* cleanup continue map */
668		if (stat->nd_map_cont) {
669			DPRINTF(("DMA: shutting down with non null continue map\n"));
670			if (nsc->sc_conf.nd_completed_cb)
671				(*nsc->sc_conf.nd_completed_cb)
672					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
673
674			stat->nd_map_cont = 0;
675			stat->nd_idx_cont = 0;
676		}
677		if (nsc->sc_conf.nd_shutdown_cb)
678			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
679	}
680
681#ifdef ND_DEBUG
682	if (NEXTDMA_DEBUG) {
683		char sbuf[256];
684
685		snprintb(sbuf, sizeof(sbuf),
686		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
687		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
688		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
689	}
690#endif
691
692	return(1);
693}
694#endif
695
696/*
697 * Check to see if dma has finished for a channel */
698int
699nextdma_finished(struct nextdma_softc *nsc)
700{
701	int r;
702	int s;
703	struct nextdma_status *stat = &nsc->sc_stat;
704
705	s = spldma();
706	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
707	splx(s);
708
709	return(r);
710}
711
712void
713nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
714{
715	struct nextdma_status *stat = &nsc->sc_stat;
716
717	NDTRACEIF (*ndtracep++ = 'n');
718#ifdef DIAGNOSTIC
719	if (!nextdma_finished(nsc)) {
720		char sbuf[256];
721
722		snprintb(sbuf, sizeof(sbuf),
723		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
724		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
725	}
726#endif
727
728#ifdef ND_DEBUG
729	if (NEXTDMA_DEBUG) {
730		char sbuf[256];
731
732		snprintb(sbuf, sizeof(sbuf),
733		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
734		printf("DMA start (%ld) intr(0x%s)\n",
735		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
736	}
737#endif
738
739#ifdef DIAGNOSTIC
740	if (stat->nd_map) {
741		nextdma_print(nsc);
742		panic("DMA: nextdma_start() with non null map");
743	}
744	if (stat->nd_map_cont) {
745		nextdma_print(nsc);
746		panic("DMA: nextdma_start() with non null continue map");
747	}
748#endif
749
750#ifdef DIAGNOSTIC
751	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
752		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
753	}
754#endif
755
756#if defined(ND_DEBUG)
757	nextdma_debug_initstate(nsc);
758#endif
759
760	/* preload both the current and the continue maps */
761	nextdma_rotate(nsc);
762
763#ifdef DIAGNOSTIC
764	if (!stat->nd_map_cont) {
765		panic("No map available in nextdma_start()");
766	}
767#endif
768
769	nextdma_rotate(nsc);
770
771#ifdef ND_DEBUG
772	if (NEXTDMA_DEBUG) {
773		char sbuf[256];
774
775		snprintb(sbuf, sizeof(sbuf),
776		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
777		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
778		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
779	}
780#endif
781
782	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
783		 DMACSR_RESET | dmadir);
784	nd_bsw4 (DD_CSR, 0);
785
786	nextdma_setup_curr_regs(nsc);
787	nextdma_setup_cont_regs(nsc);
788
789#if (defined(ND_DEBUG))
790	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
791#endif
792
793	if (stat->nd_map_cont == NULL) {
794		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
795	} else {
796		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
797	}
798}
799
800/* This routine is used for debugging */
801void
802nextdma_print(struct nextdma_softc *nsc)
803{
804	u_long dd_csr;
805	u_long dd_next;
806	u_long dd_next_initbuf;
807	u_long dd_limit;
808	u_long dd_start;
809	u_long dd_stop;
810	u_long dd_saved_next;
811	u_long dd_saved_limit;
812	u_long dd_saved_start;
813	u_long dd_saved_stop;
814	char sbuf[256];
815	struct nextdma_status *stat = &nsc->sc_stat;
816
817	/* Read all of the registers before we print anything out,
818	 * in case something changes
819	 */
820	dd_csr          = nd_bsr4 (DD_CSR);
821	dd_next         = nd_bsr4 (DD_NEXT);
822	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
823	dd_limit        = nd_bsr4 (DD_LIMIT);
824	dd_start        = nd_bsr4 (DD_START);
825	dd_stop         = nd_bsr4 (DD_STOP);
826	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
827	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
828	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
829	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
830
831	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
832	    *(volatile u_long *)IIOV(NEXT_P_INTRSTAT));
833	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
834
835	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
836	    *(volatile u_long *)IIOV(NEXT_P_INTRMASK));
837	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
838
839	/* NDMAP is Next DMA Print (really!) */
840
841	if (stat->nd_map) {
842		int i;
843
844		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
845		       stat->nd_map->dm_mapsize);
846		printf("NDMAP: nd_map->dm_nsegs = %d\n",
847		       stat->nd_map->dm_nsegs);
848		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
849		       stat->nd_map->dm_xfer_len);
850		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
851		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
852		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
853		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
854
855		printf("NDMAP: Entire map;\n");
856		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
857			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
858			       i,stat->nd_map->dm_segs[i].ds_addr);
859			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
860			       i,stat->nd_map->dm_segs[i].ds_len);
861		}
862	} else {
863		printf("NDMAP: nd_map = NULL\n");
864	}
865	if (stat->nd_map_cont) {
866		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
867		       stat->nd_map_cont->dm_mapsize);
868		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
869		       stat->nd_map_cont->dm_nsegs);
870		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
871		       stat->nd_map_cont->dm_xfer_len);
872		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
873		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
874		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
875		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
876		if (stat->nd_map_cont != stat->nd_map) {
877			int i;
878			printf("NDMAP: Entire map;\n");
879			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
880				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
881				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
882				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
883				       i,stat->nd_map_cont->dm_segs[i].ds_len);
884			}
885		}
886	} else {
887		printf("NDMAP: nd_map_cont = NULL\n");
888	}
889
890	snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, dd_csr);
891	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
892
893	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
894	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
895	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
896	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
897	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
898	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
899	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
900	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
901	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
902
903	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
904	    NEXT_I_BIT(nsc->sc_chan->nd_intr));
905	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
906			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
907}
908
909#if defined(ND_DEBUG)
910void
911nextdma_debug_initstate(struct nextdma_softc *nsc)
912{
913	switch(nsc->sc_chan->nd_intr) {
914	case NEXT_I_ENETR_DMA:
915		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
916		break;
917	case NEXT_I_SCSI_DMA:
918		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
919		break;
920	}
921}
922
923void
924nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
925{
926	switch(nsc->sc_chan->nd_intr) {
927	case NEXT_I_ENETR_DMA:
928		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
929		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
930		break;
931	case NEXT_I_SCSI_DMA:
932		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
933		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
934		break;
935	}
936}
937
938void
939nextdma_debug_enetr_dumpstate(void)
940{
941	int i;
942	int s;
943	s = spldma();
944	i = nextdma_debug_enetr_idx;
945	do {
946		char sbuf[256];
947		if (nextdma_debug_enetr_state[i]) {
948			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_enetr_state[i]);
949			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
950		}
951		i++;
952		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
953	} while (i != nextdma_debug_enetr_idx);
954	splx(s);
955}
956
957void
958nextdma_debug_scsi_dumpstate(void)
959{
960	int i;
961	int s;
962	s = spldma();
963	i = nextdma_debug_scsi_idx;
964	do {
965		char sbuf[256];
966		if (nextdma_debug_scsi_state[i]) {
967			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_scsi_state[i]);
968			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
969		}
970		i++;
971		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
972	} while (i != nextdma_debug_scsi_idx);
973	splx(s);
974}
975#endif
976
977