Deleted Added
full compact
lsi64854.c (226381) lsi64854.c (226947)
1/*-
2 * Copyright (c) 2004 Scott Long
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28/* $NetBSD: lsi64854.c,v 1.33 2008/04/28 20:23:50 martin Exp $ */
29
30/*-
31 * Copyright (c) 1998 The NetBSD Foundation, Inc.
32 * All rights reserved.
33 *
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Paul Kranenburg.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
47 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
48 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
50 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56 * POSSIBILITY OF SUCH DAMAGE.
57 */
58
59#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2004 Scott Long
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28/* $NetBSD: lsi64854.c,v 1.33 2008/04/28 20:23:50 martin Exp $ */
29
30/*-
31 * Copyright (c) 1998 The NetBSD Foundation, Inc.
32 * All rights reserved.
33 *
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Paul Kranenburg.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
47 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
48 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
50 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56 * POSSIBILITY OF SUCH DAMAGE.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/sparc64/sbus/lsi64854.c 226381 2011-10-15 09:29:43Z marius $");
60__FBSDID("$FreeBSD: head/sys/sparc64/sbus/lsi64854.c 226947 2011-10-30 21:17:42Z marius $");
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/bus.h>
65#include <sys/kernel.h>
66#include <sys/lock.h>
67#include <sys/mutex.h>
68#include <sys/rman.h>
69
70#include <machine/bus.h>
71
72#include <cam/cam.h>
73#include <cam/cam_ccb.h>
74#include <cam/scsi/scsi_all.h>
75
76#include <sparc64/sbus/lsi64854reg.h>
77#include <sparc64/sbus/lsi64854var.h>
78
79#include <dev/esp/ncr53c9xreg.h>
80#include <dev/esp/ncr53c9xvar.h>
81
82#ifdef DEBUG
83#define LDB_SCSI 1
84#define LDB_ENET 2
85#define LDB_PP 4
86#define LDB_ANY 0xff
87int lsi64854debug = 0;
88#define DPRINTF(a,x) \
89 do { \
90 if ((lsi64854debug & (a)) != 0) \
91 printf x; \
92 } while (/* CONSTCOND */0)
93#else
94#define DPRINTF(a,x)
95#endif
96
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/bus.h>
65#include <sys/kernel.h>
66#include <sys/lock.h>
67#include <sys/mutex.h>
68#include <sys/rman.h>
69
70#include <machine/bus.h>
71
72#include <cam/cam.h>
73#include <cam/cam_ccb.h>
74#include <cam/scsi/scsi_all.h>
75
76#include <sparc64/sbus/lsi64854reg.h>
77#include <sparc64/sbus/lsi64854var.h>
78
79#include <dev/esp/ncr53c9xreg.h>
80#include <dev/esp/ncr53c9xvar.h>
81
82#ifdef DEBUG
83#define LDB_SCSI 1
84#define LDB_ENET 2
85#define LDB_PP 4
86#define LDB_ANY 0xff
87int lsi64854debug = 0;
88#define DPRINTF(a,x) \
89 do { \
90 if ((lsi64854debug & (a)) != 0) \
91 printf x; \
92 } while (/* CONSTCOND */0)
93#else
94#define DPRINTF(a,x)
95#endif
96
97#define MAX_DMA_SZ (16*1024*1024)
97/*
98 * The rules say we cannot transfer more than the limit of this DMA chip (64k
99 * for old and 16Mb for new), and we cannot cross a 16Mb boundary.
100 */
101#define MAX_DMA_SZ (64 * 1024)
102#define BOUNDARY (16 * 1024 * 1024)
98
99static void lsi64854_reset(struct lsi64854_softc *);
100static void lsi64854_map_scsi(void *, bus_dma_segment_t *, int, int);
101static int lsi64854_setup(struct lsi64854_softc *, void **, size_t *,
102 int, size_t *);
103static int lsi64854_scsi_intr(void *);
104static int lsi64854_enet_intr(void *);
105static int lsi64854_setup_pp(struct lsi64854_softc *, void **,
106 size_t *, int, size_t *);
107static int lsi64854_pp_intr(void *);
108
109/*
110 * Finish attaching this DMA device.
111 * Front-end must fill in these fields:
112 * sc_res
113 * sc_burst
114 * sc_channel (one of SCSI, ENET, PP)
115 * sc_client (one of SCSI, ENET, PP `soft_c' pointers)
116 */
117int
118lsi64854_attach(struct lsi64854_softc *sc)
119{
120 bus_dma_lock_t *lockfunc;
121 struct ncr53c9x_softc *nsc;
122 void *lockfuncarg;
123 uint32_t csr;
124 int error;
125
126 lockfunc = NULL;
127 lockfuncarg = NULL;
103
104static void lsi64854_reset(struct lsi64854_softc *);
105static void lsi64854_map_scsi(void *, bus_dma_segment_t *, int, int);
106static int lsi64854_setup(struct lsi64854_softc *, void **, size_t *,
107 int, size_t *);
108static int lsi64854_scsi_intr(void *);
109static int lsi64854_enet_intr(void *);
110static int lsi64854_setup_pp(struct lsi64854_softc *, void **,
111 size_t *, int, size_t *);
112static int lsi64854_pp_intr(void *);
113
114/*
115 * Finish attaching this DMA device.
116 * Front-end must fill in these fields:
117 * sc_res
118 * sc_burst
119 * sc_channel (one of SCSI, ENET, PP)
120 * sc_client (one of SCSI, ENET, PP `soft_c' pointers)
121 */
122int
123lsi64854_attach(struct lsi64854_softc *sc)
124{
125 bus_dma_lock_t *lockfunc;
126 struct ncr53c9x_softc *nsc;
127 void *lockfuncarg;
128 uint32_t csr;
129 int error;
130
131 lockfunc = NULL;
132 lockfuncarg = NULL;
133 sc->sc_maxdmasize = MAX_DMA_SZ;
128
129 switch (sc->sc_channel) {
130 case L64854_CHANNEL_SCSI:
131 nsc = sc->sc_client;
132 if (NCR_LOCK_INITIALIZED(nsc) == 0) {
133 device_printf(sc->sc_dev, "mutex not initialized\n");
134 return (ENXIO);
135 }
136 lockfunc = busdma_lock_mutex;
137 lockfuncarg = &nsc->sc_lock;
134
135 switch (sc->sc_channel) {
136 case L64854_CHANNEL_SCSI:
137 nsc = sc->sc_client;
138 if (NCR_LOCK_INITIALIZED(nsc) == 0) {
139 device_printf(sc->sc_dev, "mutex not initialized\n");
140 return (ENXIO);
141 }
142 lockfunc = busdma_lock_mutex;
143 lockfuncarg = &nsc->sc_lock;
144 sc->sc_maxdmasize = nsc->sc_maxxfer;
138 sc->intr = lsi64854_scsi_intr;
139 sc->setup = lsi64854_setup;
140 break;
141 case L64854_CHANNEL_ENET:
142 sc->intr = lsi64854_enet_intr;
143 break;
144 case L64854_CHANNEL_PP:
145 sc->intr = lsi64854_pp_intr;
146 sc->setup = lsi64854_setup_pp;
147 break;
148 default:
149 device_printf(sc->sc_dev, "unknown channel\n");
150 }
151 sc->reset = lsi64854_reset;
152
153 if (sc->setup != NULL) {
154 error = bus_dma_tag_create(
155 sc->sc_parent_dmat, /* parent */
145 sc->intr = lsi64854_scsi_intr;
146 sc->setup = lsi64854_setup;
147 break;
148 case L64854_CHANNEL_ENET:
149 sc->intr = lsi64854_enet_intr;
150 break;
151 case L64854_CHANNEL_PP:
152 sc->intr = lsi64854_pp_intr;
153 sc->setup = lsi64854_setup_pp;
154 break;
155 default:
156 device_printf(sc->sc_dev, "unknown channel\n");
157 }
158 sc->reset = lsi64854_reset;
159
160 if (sc->setup != NULL) {
161 error = bus_dma_tag_create(
162 sc->sc_parent_dmat, /* parent */
156 1, 0, /* alignment, boundary */
163 1, BOUNDARY, /* alignment, boundary */
157 BUS_SPACE_MAXADDR, /* lowaddr */
158 BUS_SPACE_MAXADDR, /* highaddr */
159 NULL, NULL, /* filter, filterarg */
164 BUS_SPACE_MAXADDR, /* lowaddr */
165 BUS_SPACE_MAXADDR, /* highaddr */
166 NULL, NULL, /* filter, filterarg */
160 MAX_DMA_SZ, /* maxsize */
167 sc->sc_maxdmasize, /* maxsize */
161 1, /* nsegments */
168 1, /* nsegments */
162 MAX_DMA_SZ, /* maxsegsize */
169 sc->sc_maxdmasize, /* maxsegsize */
163 BUS_DMA_ALLOCNOW, /* flags */
164 lockfunc, lockfuncarg, /* lockfunc, lockfuncarg */
165 &sc->sc_buffer_dmat);
166 if (error != 0) {
167 device_printf(sc->sc_dev,
168 "cannot allocate buffer DMA tag\n");
169 return (error);
170 }
171
172 error = bus_dmamap_create(sc->sc_buffer_dmat, 0,
173 &sc->sc_dmamap);
174 if (error != 0) {
175 device_printf(sc->sc_dev, "DMA map create failed\n");
176 bus_dma_tag_destroy(sc->sc_buffer_dmat);
177 return (error);
178 }
179 }
180
181 csr = L64854_GCSR(sc);
182 sc->sc_rev = csr & L64854_DEVID;
183 if (sc->sc_rev == DMAREV_HME)
184 return (0);
185 device_printf(sc->sc_dev, "DMA rev. ");
186 switch (sc->sc_rev) {
187 case DMAREV_0:
188 printf("0");
189 break;
190 case DMAREV_ESC:
191 printf("ESC");
192 break;
193 case DMAREV_1:
194 printf("1");
195 break;
196 case DMAREV_PLUS:
197 printf("1+");
198 break;
199 case DMAREV_2:
200 printf("2");
201 break;
202 default:
203 printf("unknown (0x%x)", sc->sc_rev);
204 }
205
206 DPRINTF(LDB_ANY, (", burst 0x%x, csr 0x%x", sc->sc_burst, csr));
207 printf("\n");
208
209 return (0);
210}
211
212int
213lsi64854_detach(struct lsi64854_softc *sc)
214{
215
216 if (sc->setup != NULL) {
217 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
218 (L64854_GCSR(sc) & L64854_WRITE) != 0 ?
219 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
220 bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap);
221 bus_dmamap_destroy(sc->sc_buffer_dmat, sc->sc_dmamap);
222 bus_dma_tag_destroy(sc->sc_buffer_dmat);
223 }
224
225 return (0);
226}
227
228/*
229 * DMAWAIT waits while condition is true.
230 */
231#define DMAWAIT(SC, COND, MSG, DONTPANIC) do if (COND) { \
232 int count = 500000; \
233 while ((COND) && --count > 0) DELAY(1); \
234 if (count == 0) { \
235 printf("%s: line %d: CSR = 0x%lx\n", __FILE__, __LINE__, \
236 (u_long)L64854_GCSR(SC)); \
237 if (DONTPANIC) \
238 printf(MSG); \
239 else \
240 panic(MSG); \
241 } \
242} while (/* CONSTCOND */0)
243
244#define DMA_DRAIN(sc, dontpanic) do { \
245 uint32_t csr; \
246 /* \
247 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush" \
248 * and "drain" bits while it is still thinking about a \
249 * request. \
250 * other revs: D_ESC_R_PEND bit reads as 0 \
251 */ \
252 DMAWAIT(sc, L64854_GCSR(sc) & D_ESC_R_PEND, "R_PEND", dontpanic);\
170 BUS_DMA_ALLOCNOW, /* flags */
171 lockfunc, lockfuncarg, /* lockfunc, lockfuncarg */
172 &sc->sc_buffer_dmat);
173 if (error != 0) {
174 device_printf(sc->sc_dev,
175 "cannot allocate buffer DMA tag\n");
176 return (error);
177 }
178
179 error = bus_dmamap_create(sc->sc_buffer_dmat, 0,
180 &sc->sc_dmamap);
181 if (error != 0) {
182 device_printf(sc->sc_dev, "DMA map create failed\n");
183 bus_dma_tag_destroy(sc->sc_buffer_dmat);
184 return (error);
185 }
186 }
187
188 csr = L64854_GCSR(sc);
189 sc->sc_rev = csr & L64854_DEVID;
190 if (sc->sc_rev == DMAREV_HME)
191 return (0);
192 device_printf(sc->sc_dev, "DMA rev. ");
193 switch (sc->sc_rev) {
194 case DMAREV_0:
195 printf("0");
196 break;
197 case DMAREV_ESC:
198 printf("ESC");
199 break;
200 case DMAREV_1:
201 printf("1");
202 break;
203 case DMAREV_PLUS:
204 printf("1+");
205 break;
206 case DMAREV_2:
207 printf("2");
208 break;
209 default:
210 printf("unknown (0x%x)", sc->sc_rev);
211 }
212
213 DPRINTF(LDB_ANY, (", burst 0x%x, csr 0x%x", sc->sc_burst, csr));
214 printf("\n");
215
216 return (0);
217}
218
219int
220lsi64854_detach(struct lsi64854_softc *sc)
221{
222
223 if (sc->setup != NULL) {
224 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
225 (L64854_GCSR(sc) & L64854_WRITE) != 0 ?
226 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
227 bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap);
228 bus_dmamap_destroy(sc->sc_buffer_dmat, sc->sc_dmamap);
229 bus_dma_tag_destroy(sc->sc_buffer_dmat);
230 }
231
232 return (0);
233}
234
235/*
236 * DMAWAIT waits while condition is true.
237 */
238#define DMAWAIT(SC, COND, MSG, DONTPANIC) do if (COND) { \
239 int count = 500000; \
240 while ((COND) && --count > 0) DELAY(1); \
241 if (count == 0) { \
242 printf("%s: line %d: CSR = 0x%lx\n", __FILE__, __LINE__, \
243 (u_long)L64854_GCSR(SC)); \
244 if (DONTPANIC) \
245 printf(MSG); \
246 else \
247 panic(MSG); \
248 } \
249} while (/* CONSTCOND */0)
250
251#define DMA_DRAIN(sc, dontpanic) do { \
252 uint32_t csr; \
253 /* \
254 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush" \
255 * and "drain" bits while it is still thinking about a \
256 * request. \
257 * other revs: D_ESC_R_PEND bit reads as 0 \
258 */ \
259 DMAWAIT(sc, L64854_GCSR(sc) & D_ESC_R_PEND, "R_PEND", dontpanic);\
253 if (sc->sc_rev != DMAREV_HME) { \
254 /* \
255 * Select drain bit based on revision \
256 * also clears errors and D_TC flag \
257 */ \
258 csr = L64854_GCSR(sc); \
259 if (sc->sc_rev == DMAREV_1 || sc->sc_rev == DMAREV_0) \
260 csr |= D_ESC_DRAIN; \
261 else \
262 csr |= L64854_INVALIDATE; \
260 if (sc->sc_rev != DMAREV_HME) { \
261 /* \
262 * Select drain bit based on revision \
263 * also clears errors and D_TC flag \
264 */ \
265 csr = L64854_GCSR(sc); \
266 if (sc->sc_rev == DMAREV_1 || sc->sc_rev == DMAREV_0) \
267 csr |= D_ESC_DRAIN; \
268 else \
269 csr |= L64854_INVALIDATE; \
263 \
270 \
264 L64854_SCSR(sc,csr); \
271 L64854_SCSR(sc, csr); \
265 } \
266 /* \
267 * Wait for draining to finish \
268 * rev0 & rev1 call this PACKCNT \
269 */ \
272 } \
273 /* \
274 * Wait for draining to finish \
275 * rev0 & rev1 call this PACKCNT \
276 */ \
270 DMAWAIT(sc, L64854_GCSR(sc) & L64854_DRAINING, "DRAINING", dontpanic);\
277 DMAWAIT(sc, L64854_GCSR(sc) & L64854_DRAINING, "DRAINING", \
278 dontpanic); \
271} while (/* CONSTCOND */0)
272
273#define DMA_FLUSH(sc, dontpanic) do { \
274 uint32_t csr; \
275 /* \
276 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush" \
277 * and "drain" bits while it is still thinking about a \
278 * request. \
279 * other revs: D_ESC_R_PEND bit reads as 0 \
280 */ \
281 DMAWAIT(sc, L64854_GCSR(sc) & D_ESC_R_PEND, "R_PEND", dontpanic);\
282 csr = L64854_GCSR(sc); \
283 csr &= ~(L64854_WRITE|L64854_EN_DMA); /* no-ops on ENET */ \
284 csr |= L64854_INVALIDATE; /* XXX FAS ? */ \
279} while (/* CONSTCOND */0)
280
281#define DMA_FLUSH(sc, dontpanic) do { \
282 uint32_t csr; \
283 /* \
284 * DMA rev0 & rev1: we are not allowed to touch the DMA "flush" \
285 * and "drain" bits while it is still thinking about a \
286 * request. \
287 * other revs: D_ESC_R_PEND bit reads as 0 \
288 */ \
289 DMAWAIT(sc, L64854_GCSR(sc) & D_ESC_R_PEND, "R_PEND", dontpanic);\
290 csr = L64854_GCSR(sc); \
291 csr &= ~(L64854_WRITE|L64854_EN_DMA); /* no-ops on ENET */ \
292 csr |= L64854_INVALIDATE; /* XXX FAS ? */ \
285 L64854_SCSR(sc,csr); \
293 L64854_SCSR(sc, csr); \
286} while (/* CONSTCOND */0)
287
288static void
289lsi64854_reset(struct lsi64854_softc *sc)
290{
294} while (/* CONSTCOND */0)
295
296static void
297lsi64854_reset(struct lsi64854_softc *sc)
298{
299 bus_dma_tag_t dmat;
300 bus_dmamap_t dmam;
291 uint32_t csr;
292
293 DMA_FLUSH(sc, 1);
294 csr = L64854_GCSR(sc);
295
296 DPRINTF(LDB_ANY, ("%s: csr 0x%x\n", __func__, csr));
297
298 if (sc->sc_dmasize != 0) {
301 uint32_t csr;
302
303 DMA_FLUSH(sc, 1);
304 csr = L64854_GCSR(sc);
305
306 DPRINTF(LDB_ANY, ("%s: csr 0x%x\n", __func__, csr));
307
308 if (sc->sc_dmasize != 0) {
299 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
300 (csr & D_WRITE) != 0 ? BUS_DMASYNC_PREREAD :
301 BUS_DMASYNC_PREWRITE);
302 bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap);
309 dmat = sc->sc_buffer_dmat;
310 dmam = sc->sc_dmamap;
311 bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ?
312 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
313 bus_dmamap_unload(dmat, dmam);
303 }
304
305 if (sc->sc_rev == DMAREV_HME)
306 L64854_SCSR(sc, csr | D_HW_RESET_FAS366);
307
308 csr |= L64854_RESET; /* reset DMA */
309 L64854_SCSR(sc, csr);
310 DELAY(200); /* > 10 Sbus clocks(?) */
311
312 /*DMAWAIT1(sc); why was this here? */
313 csr = L64854_GCSR(sc);
314 csr &= ~L64854_RESET; /* de-assert reset line */
315 L64854_SCSR(sc, csr);
316 DELAY(5); /* allow a few ticks to settle */
317
318 csr = L64854_GCSR(sc);
319 csr |= L64854_INT_EN; /* enable interrupts */
320 if (sc->sc_rev > DMAREV_1 && sc->sc_channel == L64854_CHANNEL_SCSI) {
321 if (sc->sc_rev == DMAREV_HME)
322 csr |= D_TWO_CYCLE;
323 else
324 csr |= D_FASTER;
325 }
326
327 /* Set burst */
328 switch (sc->sc_rev) {
329 case DMAREV_HME:
330 case DMAREV_2:
331 csr &= ~L64854_BURST_SIZE;
332 if (sc->sc_burst == 32)
333 csr |= L64854_BURST_32;
334 else if (sc->sc_burst == 16)
335 csr |= L64854_BURST_16;
336 else
337 csr |= L64854_BURST_0;
338 break;
339 case DMAREV_ESC:
340 csr |= D_ESC_AUTODRAIN; /* Auto-drain */
341 if (sc->sc_burst == 32)
342 csr &= ~D_ESC_BURST;
343 else
344 csr |= D_ESC_BURST;
345 break;
346 default:
347 break;
348 }
349 L64854_SCSR(sc, csr);
350
351 if (sc->sc_rev == DMAREV_HME) {
352 bus_write_4(sc->sc_res, L64854_REG_ADDR, 0);
353 sc->sc_dmactl = csr;
354 }
355 sc->sc_active = 0;
356
357 DPRINTF(LDB_ANY, ("%s: done, csr 0x%x\n", __func__, csr));
358}
359
360static void
361lsi64854_map_scsi(void *arg, bus_dma_segment_t *segs, int nseg, int error)
362{
363 struct lsi64854_softc *sc;
364
365 sc = (struct lsi64854_softc *)arg;
366
314 }
315
316 if (sc->sc_rev == DMAREV_HME)
317 L64854_SCSR(sc, csr | D_HW_RESET_FAS366);
318
319 csr |= L64854_RESET; /* reset DMA */
320 L64854_SCSR(sc, csr);
321 DELAY(200); /* > 10 Sbus clocks(?) */
322
323 /*DMAWAIT1(sc); why was this here? */
324 csr = L64854_GCSR(sc);
325 csr &= ~L64854_RESET; /* de-assert reset line */
326 L64854_SCSR(sc, csr);
327 DELAY(5); /* allow a few ticks to settle */
328
329 csr = L64854_GCSR(sc);
330 csr |= L64854_INT_EN; /* enable interrupts */
331 if (sc->sc_rev > DMAREV_1 && sc->sc_channel == L64854_CHANNEL_SCSI) {
332 if (sc->sc_rev == DMAREV_HME)
333 csr |= D_TWO_CYCLE;
334 else
335 csr |= D_FASTER;
336 }
337
338 /* Set burst */
339 switch (sc->sc_rev) {
340 case DMAREV_HME:
341 case DMAREV_2:
342 csr &= ~L64854_BURST_SIZE;
343 if (sc->sc_burst == 32)
344 csr |= L64854_BURST_32;
345 else if (sc->sc_burst == 16)
346 csr |= L64854_BURST_16;
347 else
348 csr |= L64854_BURST_0;
349 break;
350 case DMAREV_ESC:
351 csr |= D_ESC_AUTODRAIN; /* Auto-drain */
352 if (sc->sc_burst == 32)
353 csr &= ~D_ESC_BURST;
354 else
355 csr |= D_ESC_BURST;
356 break;
357 default:
358 break;
359 }
360 L64854_SCSR(sc, csr);
361
362 if (sc->sc_rev == DMAREV_HME) {
363 bus_write_4(sc->sc_res, L64854_REG_ADDR, 0);
364 sc->sc_dmactl = csr;
365 }
366 sc->sc_active = 0;
367
368 DPRINTF(LDB_ANY, ("%s: done, csr 0x%x\n", __func__, csr));
369}
370
371static void
372lsi64854_map_scsi(void *arg, bus_dma_segment_t *segs, int nseg, int error)
373{
374 struct lsi64854_softc *sc;
375
376 sc = (struct lsi64854_softc *)arg;
377
378 if (error != 0)
379 return;
367 if (nseg != 1)
368 panic("%s: cannot map %d segments\n", __func__, nseg);
369
370 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
380 if (nseg != 1)
381 panic("%s: cannot map %d segments\n", __func__, nseg);
382
383 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
371 sc->sc_datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
384 sc->sc_datain != 0 ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
372 bus_write_4(sc->sc_res, L64854_REG_ADDR, segs[0].ds_addr);
373}
374
385 bus_write_4(sc->sc_res, L64854_REG_ADDR, segs[0].ds_addr);
386}
387
375#define DMAMAX(a) (MAX_DMA_SZ - ((a) & (MAX_DMA_SZ - 1)))
376/*
377 * setup a DMA transfer
378 */
379static int
380lsi64854_setup(struct lsi64854_softc *sc, void **addr, size_t *len,
381 int datain, size_t *dmasize)
382{
383 long bcnt;
388/*
389 * setup a DMA transfer
390 */
391static int
392lsi64854_setup(struct lsi64854_softc *sc, void **addr, size_t *len,
393 int datain, size_t *dmasize)
394{
395 long bcnt;
396 int error;
384 uint32_t csr;
385
386 DMA_FLUSH(sc, 0);
387
388#if 0
389 DMACSR(sc) &= ~D_INT_EN;
390#endif
391 sc->sc_dmaaddr = addr;
392 sc->sc_dmalen = len;
393 sc->sc_datain = datain;
394
397 uint32_t csr;
398
399 DMA_FLUSH(sc, 0);
400
401#if 0
402 DMACSR(sc) &= ~D_INT_EN;
403#endif
404 sc->sc_dmaaddr = addr;
405 sc->sc_dmalen = len;
406 sc->sc_datain = datain;
407
395 /*
396 * The rules say we cannot transfer more than the limit
397 * of this DMA chip (64k for old and 16Mb for new),
398 * and we cannot cross a 16Mb boundary.
399 */
400 *dmasize = sc->sc_dmasize =
401 ulmin(*dmasize, DMAMAX((size_t)*sc->sc_dmaaddr));
408 KASSERT(*dmasize <= sc->sc_maxdmasize,
409 ("%s: transfer size %ld too large", __func__, (long)*dmasize));
402
410
403 DPRINTF(LDB_ANY, ("%s: dmasize=%ld\n", __func__, (long)sc->sc_dmasize));
411 sc->sc_dmasize = *dmasize;
404
412
413 DPRINTF(LDB_ANY, ("%s: dmasize=%ld\n", __func__, (long)*dmasize));
414
405 /*
406 * XXX what length?
407 */
408 if (sc->sc_rev == DMAREV_HME) {
409 L64854_SCSR(sc, sc->sc_dmactl | L64854_RESET);
410 L64854_SCSR(sc, sc->sc_dmactl);
411
412 bus_write_4(sc->sc_res, L64854_REG_CNT, *dmasize);
413 }
414
415 /*
416 * XXX what length?
417 */
418 if (sc->sc_rev == DMAREV_HME) {
419 L64854_SCSR(sc, sc->sc_dmactl | L64854_RESET);
420 L64854_SCSR(sc, sc->sc_dmactl);
421
422 bus_write_4(sc->sc_res, L64854_REG_CNT, *dmasize);
423 }
424
415 /* Program the DMA address */
416 if (sc->sc_dmasize != 0)
417 if (bus_dmamap_load(sc->sc_buffer_dmat, sc->sc_dmamap,
418 *sc->sc_dmaaddr, sc->sc_dmasize, lsi64854_map_scsi, sc, 0))
419 panic("%s: cannot allocate DVMA address", __func__);
425 /*
426 * Load the transfer buffer and program the DMA address.
427 * Note that the NCR53C9x core can't handle EINPROGRESS so we set
428 * BUS_DMA_NOWAIT.
429 */
430 if (*dmasize != 0) {
431 error = bus_dmamap_load(sc->sc_buffer_dmat, sc->sc_dmamap,
432 *sc->sc_dmaaddr, *dmasize, lsi64854_map_scsi, sc,
433 BUS_DMA_NOWAIT);
434 if (error != 0)
435 return (error);
436 }
420
421 if (sc->sc_rev == DMAREV_ESC) {
422 /* DMA ESC chip bug work-around */
437
438 if (sc->sc_rev == DMAREV_ESC) {
439 /* DMA ESC chip bug work-around */
423 bcnt = sc->sc_dmasize;
440 bcnt = *dmasize;
424 if (((bcnt + (long)*sc->sc_dmaaddr) & PAGE_MASK_8K) != 0)
425 bcnt = roundup(bcnt, PAGE_SIZE_8K);
426 bus_write_4(sc->sc_res, L64854_REG_CNT, bcnt);
427 }
428
441 if (((bcnt + (long)*sc->sc_dmaaddr) & PAGE_MASK_8K) != 0)
442 bcnt = roundup(bcnt, PAGE_SIZE_8K);
443 bus_write_4(sc->sc_res, L64854_REG_CNT, bcnt);
444 }
445
429 /* Setup DMA control register */
446 /* Setup the DMA control register. */
430 csr = L64854_GCSR(sc);
431
447 csr = L64854_GCSR(sc);
448
432 if (datain)
449 if (datain != 0)
433 csr |= L64854_WRITE;
434 else
435 csr &= ~L64854_WRITE;
436 csr |= L64854_INT_EN;
437
438 if (sc->sc_rev == DMAREV_HME)
439 csr |= (D_DSBL_SCSI_DRN | D_EN_DMA);
440
441 L64854_SCSR(sc, csr);
442
443 return (0);
444}
445
446/*
447 * Pseudo (chained) interrupt from the esp driver to kick the
448 * current running DMA transfer. Called from ncr53c9x_intr()
449 * for now.
450 *
451 * return 1 if it was a DMA continue.
452 */
453static int
454lsi64854_scsi_intr(void *arg)
455{
456 struct lsi64854_softc *sc = arg;
457 struct ncr53c9x_softc *nsc = sc->sc_client;
450 csr |= L64854_WRITE;
451 else
452 csr &= ~L64854_WRITE;
453 csr |= L64854_INT_EN;
454
455 if (sc->sc_rev == DMAREV_HME)
456 csr |= (D_DSBL_SCSI_DRN | D_EN_DMA);
457
458 L64854_SCSR(sc, csr);
459
460 return (0);
461}
462
463/*
464 * Pseudo (chained) interrupt from the esp driver to kick the
465 * current running DMA transfer. Called from ncr53c9x_intr()
466 * for now.
467 *
468 * return 1 if it was a DMA continue.
469 */
470static int
471lsi64854_scsi_intr(void *arg)
472{
473 struct lsi64854_softc *sc = arg;
474 struct ncr53c9x_softc *nsc = sc->sc_client;
458 int trans, resid;
475 bus_dma_tag_t dmat;
476 bus_dmamap_t dmam;
477 size_t dmasize;
478 int lxfer, resid, trans;
459 uint32_t csr;
460
461 csr = L64854_GCSR(sc);
462
463 DPRINTF(LDB_SCSI, ("%s: addr 0x%x, csr %b\n", __func__,
479 uint32_t csr;
480
481 csr = L64854_GCSR(sc);
482
483 DPRINTF(LDB_SCSI, ("%s: addr 0x%x, csr %b\n", __func__,
464 bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, DDMACSR_BITS));
484 bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, DDMACSR_BITS));
465
485
466 if (csr & (D_ERR_PEND|D_SLAVE_ERR)) {
467 device_printf(sc->sc_dev, "error: csr=%b\n", csr, DDMACSR_BITS);
468 csr &= ~D_EN_DMA; /* Stop DMA */
486 if (csr & (D_ERR_PEND | D_SLAVE_ERR)) {
487 device_printf(sc->sc_dev, "error: csr=%b\n", csr,
488 DDMACSR_BITS);
489 csr &= ~D_EN_DMA; /* Stop DMA. */
469 /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
490 /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
470 csr |= D_INVALIDATE|D_SLAVE_ERR;
491 csr |= D_INVALIDATE | D_SLAVE_ERR;
471 L64854_SCSR(sc, csr);
472 return (-1);
473 }
474
475 /* This is an "assertion" :) */
476 if (sc->sc_active == 0)
477 panic("%s: DMA wasn't active", __func__);
478
479 DMA_DRAIN(sc, 0);
480
481 /* DMA has stopped */
482 csr &= ~D_EN_DMA;
483 L64854_SCSR(sc, csr);
484 sc->sc_active = 0;
485
492 L64854_SCSR(sc, csr);
493 return (-1);
494 }
495
496 /* This is an "assertion" :) */
497 if (sc->sc_active == 0)
498 panic("%s: DMA wasn't active", __func__);
499
500 DMA_DRAIN(sc, 0);
501
502 /* DMA has stopped */
503 csr &= ~D_EN_DMA;
504 L64854_SCSR(sc, csr);
505 sc->sc_active = 0;
506
486 if (sc->sc_dmasize == 0) {
487 /* A "Transfer Pad" operation completed */
488 DPRINTF(LDB_SCSI, ("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
489 __func__, NCR_READ_REG(nsc, NCR_TCL) |
507 dmasize = sc->sc_dmasize;
508 if (dmasize == 0) {
509 /* A "Transfer Pad" operation completed. */
510 DPRINTF(LDB_SCSI, ("%s: discarded %d bytes (tcl=%d, "
511 "tcm=%d)\n", __func__, NCR_READ_REG(nsc, NCR_TCL) |
490 (NCR_READ_REG(nsc, NCR_TCM) << 8),
491 NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM)));
492 return (0);
493 }
494
495 resid = 0;
496 /*
497 * If a transfer onto the SCSI bus gets interrupted by the device
498 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
499 * as residual since the NCR53C9X counter registers get decremented
500 * as bytes are clocked into the FIFO.
501 */
512 (NCR_READ_REG(nsc, NCR_TCM) << 8),
513 NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM)));
514 return (0);
515 }
516
517 resid = 0;
518 /*
519 * If a transfer onto the SCSI bus gets interrupted by the device
520 * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
521 * as residual since the NCR53C9X counter registers get decremented
522 * as bytes are clocked into the FIFO.
523 */
502 if (!(csr & D_WRITE) &&
524 if ((csr & D_WRITE) == 0 &&
503 (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
504 DPRINTF(LDB_SCSI, ("%s: empty esp FIFO of %d ", __func__,
505 resid));
506 if (nsc->sc_rev == NCR_VARIANT_FAS366 &&
507 (NCR_READ_REG(nsc, NCR_CFG3) & NCRFASCFG3_EWIDE))
508 resid <<= 1;
509 }
510
511 if ((nsc->sc_espstat & NCRSTAT_TC) == 0) {
525 (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) {
526 DPRINTF(LDB_SCSI, ("%s: empty esp FIFO of %d ", __func__,
527 resid));
528 if (nsc->sc_rev == NCR_VARIANT_FAS366 &&
529 (NCR_READ_REG(nsc, NCR_CFG3) & NCRFASCFG3_EWIDE))
530 resid <<= 1;
531 }
532
533 if ((nsc->sc_espstat & NCRSTAT_TC) == 0) {
534 lxfer = nsc->sc_features & NCR_F_LARGEXFER;
512 /*
535 /*
513 * `Terminal count' is off, so read the residue
536 * "Terminal count" is off, so read the residue
514 * out of the NCR53C9X counter registers.
515 */
516 resid += (NCR_READ_REG(nsc, NCR_TCL) |
517 (NCR_READ_REG(nsc, NCR_TCM) << 8) |
537 * out of the NCR53C9X counter registers.
538 */
539 resid += (NCR_READ_REG(nsc, NCR_TCL) |
540 (NCR_READ_REG(nsc, NCR_TCM) << 8) |
518 ((nsc->sc_cfg2 & NCRCFG2_FE) ?
519 (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0));
541 (lxfer != 0 ? (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0));
520
542
521 if (resid == 0 && sc->sc_dmasize == 65536 &&
522 (nsc->sc_cfg2 & NCRCFG2_FE) == 0)
523 /* A transfer of 64K is encoded as `TCL=TCM=0' */
543 if (resid == 0 && dmasize == 65536 && lxfer == 0)
544 /* A transfer of 64k is encoded as TCL=TCM=0. */
524 resid = 65536;
525 }
526
545 resid = 65536;
546 }
547
527 trans = sc->sc_dmasize - resid;
548 trans = dmasize - resid;
528 if (trans < 0) { /* transferred < 0? */
529#if 0
530 /*
531 * This situation can happen in perfectly normal operation
532 * if the ESP is reselected while using DMA to select
533 * another target. As such, don't print the warning.
534 */
535 device_printf(sc->sc_dev, "xfer (%d) > req (%d)\n", trans,
549 if (trans < 0) { /* transferred < 0? */
550#if 0
551 /*
552 * This situation can happen in perfectly normal operation
553 * if the ESP is reselected while using DMA to select
554 * another target. As such, don't print the warning.
555 */
556 device_printf(sc->sc_dev, "xfer (%d) > req (%d)\n", trans,
536 sc->sc_dmasize);
557 dmasize);
537#endif
558#endif
538 trans = sc->sc_dmasize;
559 trans = dmasize;
539 }
540
541 DPRINTF(LDB_SCSI, ("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
542 __func__, NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM),
560 }
561
562 DPRINTF(LDB_SCSI, ("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n",
563 __func__, NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM),
543 (nsc->sc_cfg2 & NCRCFG2_FE) ? NCR_READ_REG(nsc, NCR_TCH) : 0,
544 trans, resid));
564 (nsc->sc_sc_features & NCR_F_LARGEXFER) != 0 ?
565 NCR_READ_REG(nsc, NCR_TCH) : 0, trans, resid));
545
566
546 if (sc->sc_dmasize != 0) {
547 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
548 (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD :
549 BUS_DMASYNC_POSTWRITE);
550 bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap);
567 if (dmasize != 0) {
568 dmat = sc->sc_buffer_dmat;
569 dmam = sc->sc_dmamap;
570 bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ?
571 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
572 bus_dmamap_unload(dmat, dmam);
551 }
552
553 *sc->sc_dmalen -= trans;
554 *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans;
555
556#if 0 /* this is not normal operation just yet */
557 if (*sc->sc_dmalen == 0 || nsc->sc_phase != nsc->sc_prevphase)
558 return (0);
559
560 /* and again */
561 dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE);
562 return (1);
563#endif
564 return (0);
565}
566
567/*
573 }
574
575 *sc->sc_dmalen -= trans;
576 *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans;
577
578#if 0 /* this is not normal operation just yet */
579 if (*sc->sc_dmalen == 0 || nsc->sc_phase != nsc->sc_prevphase)
580 return (0);
581
582 /* and again */
583 dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE);
584 return (1);
585#endif
586 return (0);
587}
588
589/*
568 * Pseudo (chained) interrupt to le driver to handle DMA errors.
590 * Pseudo (chained) interrupt to le(4) driver to handle DMA errors
569 */
570static int
571lsi64854_enet_intr(void *arg)
572{
573 struct lsi64854_softc *sc = arg;
574 uint32_t csr;
575 int i, rv;
576
577 csr = L64854_GCSR(sc);
578
579 /* If the DMA logic shows an interrupt, claim it */
580 rv = ((csr & E_INT_PEND) != 0) ? 1 : 0;
581
591 */
592static int
593lsi64854_enet_intr(void *arg)
594{
595 struct lsi64854_softc *sc = arg;
596 uint32_t csr;
597 int i, rv;
598
599 csr = L64854_GCSR(sc);
600
601 /* If the DMA logic shows an interrupt, claim it */
602 rv = ((csr & E_INT_PEND) != 0) ? 1 : 0;
603
582 if (csr & (E_ERR_PEND|E_SLAVE_ERR)) {
583 device_printf(sc->sc_dev, "error: csr=%b\n", csr, EDMACSR_BITS);
584 csr &= ~L64854_EN_DMA; /* Stop DMA */
604 if (csr & (E_ERR_PEND | E_SLAVE_ERR)) {
605 device_printf(sc->sc_dev, "error: csr=%b\n", csr,
606 EDMACSR_BITS);
607 csr &= ~L64854_EN_DMA; /* Stop DMA. */
585 /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
608 /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
586 csr |= E_INVALIDATE|E_SLAVE_ERR;
609 csr |= E_INVALIDATE | E_SLAVE_ERR;
587 L64854_SCSR(sc, csr);
588 /* Will be drained with the LE_C0_IDON interrupt. */
589 sc->sc_dodrain = 1;
590 return (-1);
591 }
592
593 /* XXX - is this necessary with E_DSBL_WR_INVAL on? */
594 if (sc->sc_dodrain) {
595 i = 10;
596 csr |= E_DRAIN;
597 L64854_SCSR(sc, csr);
598 while (i-- > 0 && (L64854_GCSR(sc) & E_DRAINING))
599 DELAY(1);
600 sc->sc_dodrain = 0;
601 }
602
603 return (rv);
604}
605
606static void
607lsi64854_map_pp(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
608{
609 struct lsi64854_softc *sc;
610
611 sc = (struct lsi64854_softc *)arg;
612
610 L64854_SCSR(sc, csr);
611 /* Will be drained with the LE_C0_IDON interrupt. */
612 sc->sc_dodrain = 1;
613 return (-1);
614 }
615
616 /* XXX - is this necessary with E_DSBL_WR_INVAL on? */
617 if (sc->sc_dodrain) {
618 i = 10;
619 csr |= E_DRAIN;
620 L64854_SCSR(sc, csr);
621 while (i-- > 0 && (L64854_GCSR(sc) & E_DRAINING))
622 DELAY(1);
623 sc->sc_dodrain = 0;
624 }
625
626 return (rv);
627}
628
629static void
630lsi64854_map_pp(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
631{
632 struct lsi64854_softc *sc;
633
634 sc = (struct lsi64854_softc *)arg;
635
636 if (error != 0)
637 return;
613 if (nsegs != 1)
614 panic("%s: cannot map %d segments\n", __func__, nsegs);
615
638 if (nsegs != 1)
639 panic("%s: cannot map %d segments\n", __func__, nsegs);
640
616 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap, sc->sc_datain ?
617 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
641 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
642 sc->sc_datain != 0 ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
618 bus_write_4(sc->sc_res, L64854_REG_ADDR, segs[0].ds_addr);
619
620 bus_write_4(sc->sc_res, L64854_REG_CNT, sc->sc_dmasize);
621}
622
623/*
643 bus_write_4(sc->sc_res, L64854_REG_ADDR, segs[0].ds_addr);
644
645 bus_write_4(sc->sc_res, L64854_REG_CNT, sc->sc_dmasize);
646}
647
648/*
624 * setup a DMA transfer
649 * Setup a DMA transfer.
625 */
626static int
627lsi64854_setup_pp(struct lsi64854_softc *sc, void **addr, size_t *len,
628 int datain, size_t *dmasize)
629{
650 */
651static int
652lsi64854_setup_pp(struct lsi64854_softc *sc, void **addr, size_t *len,
653 int datain, size_t *dmasize)
654{
655 int error;
630 uint32_t csr;
631
632 DMA_FLUSH(sc, 0);
633
634 sc->sc_dmaaddr = addr;
635 sc->sc_dmalen = len;
636 sc->sc_datain = datain;
637
638 DPRINTF(LDB_PP, ("%s: pp start %ld@%p,%d\n", __func__,
656 uint32_t csr;
657
658 DMA_FLUSH(sc, 0);
659
660 sc->sc_dmaaddr = addr;
661 sc->sc_dmalen = len;
662 sc->sc_datain = datain;
663
664 DPRINTF(LDB_PP, ("%s: pp start %ld@%p,%d\n", __func__,
639 (long)*sc->sc_dmalen, *sc->sc_dmaaddr, datain ? 1 : 0));
665 (long)*sc->sc_dmalen, *sc->sc_dmaaddr, datain != 0 ? 1 : 0));
640
666
641 /*
642 * the rules say we cannot transfer more than the limit
643 * of this DMA chip (64k for old and 16Mb for new),
644 * and we cannot cross a 16Mb boundary.
645 */
646 *dmasize = sc->sc_dmasize =
647 ulmin(*dmasize, DMAMAX((size_t)*sc->sc_dmaaddr));
667 KASSERT(*dmasize <= sc->sc_maxdmasize,
668 ("%s: transfer size %ld too large", __func__, (long)*dmasize));
648
669
649 DPRINTF(LDB_PP, ("%s: dmasize=%ld\n", __func__, (long)sc->sc_dmasize));
670 sc->sc_dmasize = *dmasize;
650
671
651 /* Program the DMA address */
652 if (sc->sc_dmasize != 0)
653 if (bus_dmamap_load(sc->sc_buffer_dmat, sc->sc_dmamap,
654 *sc->sc_dmaaddr, sc->sc_dmasize, lsi64854_map_pp, sc, 0))
655 panic("%s: pp cannot allocate DVMA address", __func__);
672 DPRINTF(LDB_PP, ("%s: dmasize=%ld\n", __func__, (long)*dmasize));
656
673
657 /* Setup DMA control register */
674 /* Load the transfer buffer and program the DMA address. */
675 if (*dmasize != 0) {
676 error = bus_dmamap_load(sc->sc_buffer_dmat, sc->sc_dmamap,
677 *sc->sc_dmaaddr, *dmasize, lsi64854_map_pp, sc,
678 BUS_DMA_NOWAIT);
679 if (error != 0)
680 return (error);
681 }
682
683 /* Setup the DMA control register. */
658 csr = L64854_GCSR(sc);
659 csr &= ~L64854_BURST_SIZE;
660 if (sc->sc_burst == 32)
661 csr |= L64854_BURST_32;
662 else if (sc->sc_burst == 16)
663 csr |= L64854_BURST_16;
664 else
665 csr |= L64854_BURST_0;
684 csr = L64854_GCSR(sc);
685 csr &= ~L64854_BURST_SIZE;
686 if (sc->sc_burst == 32)
687 csr |= L64854_BURST_32;
688 else if (sc->sc_burst == 16)
689 csr |= L64854_BURST_16;
690 else
691 csr |= L64854_BURST_0;
666 csr |= P_EN_DMA|P_INT_EN|P_EN_CNT;
692 csr |= P_EN_DMA | P_INT_EN | P_EN_CNT;
667#if 0
693#if 0
668 /* This bit is read-only in PP csr register */
669 if (datain)
694 /* This bit is read-only in PP csr register. */
695 if (datain != 0)
670 csr |= P_WRITE;
671 else
672 csr &= ~P_WRITE;
673#endif
674 L64854_SCSR(sc, csr);
675
676 return (0);
677}
678
679/*
696 csr |= P_WRITE;
697 else
698 csr &= ~P_WRITE;
699#endif
700 L64854_SCSR(sc, csr);
701
702 return (0);
703}
704
705/*
680 * Parallel port DMA interrupt.
706 * Parallel port DMA interrupt
681 */
682static int
683lsi64854_pp_intr(void *arg)
684{
685 struct lsi64854_softc *sc = arg;
707 */
708static int
709lsi64854_pp_intr(void *arg)
710{
711 struct lsi64854_softc *sc = arg;
712 bus_dma_tag_t dmat;
713 bus_dmamap_t dmam;
714 size_t dmasize;
686 int ret, trans, resid = 0;
687 uint32_t csr;
688
689 csr = L64854_GCSR(sc);
690
691 DPRINTF(LDB_PP, ("%s: addr 0x%x, csr %b\n", __func__,
692 bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, PDMACSR_BITS));
693
715 int ret, trans, resid = 0;
716 uint32_t csr;
717
718 csr = L64854_GCSR(sc);
719
720 DPRINTF(LDB_PP, ("%s: addr 0x%x, csr %b\n", __func__,
721 bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, PDMACSR_BITS));
722
694 if (csr & (P_ERR_PEND|P_SLAVE_ERR)) {
723 if ((csr & (P_ERR_PEND | P_SLAVE_ERR)) != 0) {
695 resid = bus_read_4(sc->sc_res, L64854_REG_CNT);
696 device_printf(sc->sc_dev, "error: resid %d csr=%b\n", resid,
697 csr, PDMACSR_BITS);
724 resid = bus_read_4(sc->sc_res, L64854_REG_CNT);
725 device_printf(sc->sc_dev, "error: resid %d csr=%b\n", resid,
726 csr, PDMACSR_BITS);
698 csr &= ~P_EN_DMA; /* Stop DMA */
727 csr &= ~P_EN_DMA; /* Stop DMA. */
699 /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
728 /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */
700 csr |= P_INVALIDATE|P_SLAVE_ERR;
729 csr |= P_INVALIDATE | P_SLAVE_ERR;
701 L64854_SCSR(sc, csr);
702 return (-1);
703 }
704
705 ret = (csr & P_INT_PEND) != 0;
706
707 if (sc->sc_active != 0) {
708 DMA_DRAIN(sc, 0);
709 resid = bus_read_4(sc->sc_res, L64854_REG_CNT);
710 }
711
712 /* DMA has stopped */
713 csr &= ~D_EN_DMA;
714 L64854_SCSR(sc, csr);
715 sc->sc_active = 0;
716
730 L64854_SCSR(sc, csr);
731 return (-1);
732 }
733
734 ret = (csr & P_INT_PEND) != 0;
735
736 if (sc->sc_active != 0) {
737 DMA_DRAIN(sc, 0);
738 resid = bus_read_4(sc->sc_res, L64854_REG_CNT);
739 }
740
741 /* DMA has stopped */
742 csr &= ~D_EN_DMA;
743 L64854_SCSR(sc, csr);
744 sc->sc_active = 0;
745
717 trans = sc->sc_dmasize - resid;
746 dmasize = sc->sc_dmasize;
747 trans = dmasize - resid;
718 if (trans < 0) /* transferred < 0? */
748 if (trans < 0) /* transferred < 0? */
719 trans = sc->sc_dmasize;
749 trans = dmasize;
720 *sc->sc_dmalen -= trans;
721 *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans;
722
750 *sc->sc_dmalen -= trans;
751 *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans;
752
723 if (sc->sc_dmasize != 0) {
724 bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap,
725 (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD :
726 BUS_DMASYNC_POSTWRITE);
727 bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap);
753 if (dmasize != 0) {
754 dmat = sc->sc_buffer_dmat;
755 dmam = sc->sc_dmamap;
756 bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ?
757 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
758 bus_dmamap_unload(dmat, dmam);
728 }
729
730 return (ret != 0);
731}
759 }
760
761 return (ret != 0);
762}