isp_sbus.c revision 168240
187182Salfred/*-
287182Salfred * Copyright (c) 1997-2006 by Matthew Jacob
3254673Skib * All rights reserved.
487182Salfred *
5254673Skib * Redistribution and use in source and binary forms, with or without
6254673Skib * modification, are permitted provided that the following conditions
7254673Skib * are met:
887182Salfred * 1. Redistributions of source code must retain the above copyright
987182Salfred *    notice immediately at the beginning of the file, without modification,
1087182Salfred *    this list of conditions, and the following disclaimer.
1187182Salfred * 2. The name of the author may not be used to endorse or promote products
1287182Salfred *    derived from this software without specific prior written permission.
1387182Salfred *
1487182Salfred * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1587182Salfred * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1687182Salfred * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1787182Salfred * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1887182Salfred * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1987182Salfred * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2087182Salfred * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2187182Salfred * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2287182Salfred * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2387182Salfred * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2487182Salfred * SUCH DAMAGE.
2587182Salfred */
2687182Salfred/*
2787182Salfred * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
2887182Salfred * FreeBSD Version.
2987182Salfred */
3087182Salfred
3187182Salfred#include <sys/cdefs.h>
3287182Salfred__FBSDID("$FreeBSD: head/sys/dev/isp/isp_sbus.c 168240 2007-04-02 01:04:20Z mjacob $");
33254673Skib
3487182Salfred#include <sys/param.h>
3587182Salfred#include <sys/systm.h>
3687182Salfred#if __FreeBSD_version >= 700000
3787182Salfred#include <sys/linker.h>
3887182Salfred#include <sys/firmware.h>
3987182Salfred#endif
4087182Salfred#include <sys/bus.h>
4187182Salfred#include <sys/kernel.h>
4287182Salfred#include <sys/module.h>
4387182Salfred#include <sys/resource.h>
4487182Salfred
4587182Salfred#include <dev/ofw/ofw_bus.h>
4687182Salfred
4787182Salfred#include <machine/bus.h>
4887182Salfred#include <machine/resource.h>
4987182Salfred#include <sys/rman.h>
5087182Salfred#include <sparc64/sbus/sbusvar.h>
5187857Sru
5287182Salfred#include <dev/isp/isp_freebsd.h>
5387182Salfred
5487857Srustatic uint32_t
55254673Skibisp_sbus_rd_reg(ispsoftc_t *, int);
5687182Salfredstatic void
57254673Skibisp_sbus_wr_reg(ispsoftc_t *, int, uint32_t);
58254673Skibstatic int
59254673Skibisp_sbus_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
60254673Skibstatic int isp_sbus_mbxdma(ispsoftc_t *);
61254673Skibstatic int
62254673Skibisp_sbus_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t);
63254673Skibstatic void
64254673Skibisp_sbus_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
65254673Skib
66254673Skibstatic void isp_sbus_reset0(ispsoftc_t *);
67256114Sjmgstatic void isp_sbus_reset1(ispsoftc_t *);
68254673Skibstatic void isp_sbus_dumpregs(ispsoftc_t *, const char *);
69254673Skib
70254673Skibstatic struct ispmdvec mdvec = {
71254673Skib	isp_sbus_rd_isr,
72254673Skib	isp_sbus_rd_reg,
73254673Skib	isp_sbus_wr_reg,
74254673Skib	isp_sbus_mbxdma,
75254673Skib	isp_sbus_dmasetup,
7687182Salfred	isp_sbus_dmateardown,
77254673Skib	isp_sbus_reset0,
7887182Salfred	isp_sbus_reset1,
79254673Skib	isp_sbus_dumpregs,
8087182Salfred	NULL,
8187182Salfred	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
8287182Salfred};
83147647Shmp
8487182Salfredstatic int isp_sbus_probe (device_t);
85static int isp_sbus_attach (device_t);
86
87
88struct isp_sbussoftc {
89	ispsoftc_t			sbus_isp;
90	device_t			sbus_dev;
91	struct resource *		sbus_reg;
92	void *				ih;
93	int16_t				sbus_poff[_NREG_BLKS];
94	bus_dma_tag_t			dmat;
95	bus_dmamap_t			*dmaps;
96	sdparam				sbus_param;
97	struct ispmdvec			sbus_mdvec;
98	struct resource *		sbus_ires;
99};
100
101
102static device_method_t isp_sbus_methods[] = {
103	/* Device interface */
104	DEVMETHOD(device_probe,		isp_sbus_probe),
105	DEVMETHOD(device_attach,	isp_sbus_attach),
106	{ 0, 0 }
107};
108static void isp_sbus_intr(void *);
109
110static driver_t isp_sbus_driver = {
111	"isp", isp_sbus_methods, sizeof (struct isp_sbussoftc)
112};
113static devclass_t isp_devclass;
114DRIVER_MODULE(isp, sbus, isp_sbus_driver, isp_devclass, 0, 0);
115#if __FreeBSD_version < 700000
116extern ispfwfunc *isp_get_firmware_p;
117#endif
118
119static int
120isp_sbus_probe(device_t dev)
121{
122	int found = 0;
123	const char *name = ofw_bus_get_name(dev);
124	if (strcmp(name, "SUNW,isp") == 0 ||
125	    strcmp(name, "QLGC,isp") == 0 ||
126	    strcmp(name, "ptisp") == 0 ||
127	    strcmp(name, "PTI,ptisp") == 0) {
128		found++;
129	}
130	if (!found)
131		return (ENXIO);
132
133	if (isp_announced == 0 && bootverbose) {
134		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
135		    "Core Version %d.%d\n",
136		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
137		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
138		isp_announced++;
139	}
140	return (0);
141}
142
143static int
144isp_sbus_attach(device_t dev)
145{
146	struct resource *regs;
147	int tval, iqd, isp_debug, role, rid, ispburst;
148	struct isp_sbussoftc *sbs;
149	ispsoftc_t *isp = NULL;
150	int locksetup = 0;
151	int ints_setup = 0;
152
153	/*
154	 * Figure out if we're supposed to skip this one.
155	 * If we are, we actually go to ISP_ROLE_NONE.
156	 */
157
158	tval = 0;
159	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
160	    "disable", &tval) == 0 && tval) {
161		device_printf(dev, "device is disabled\n");
162		/* but return 0 so the !$)$)*!$*) unit isn't reused */
163		return (0);
164	}
165
166	role = 0;
167	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
168	    "role", &role) == 0 &&
169	    ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) {
170		device_printf(dev, "setting role to 0x%x\n", role);
171	} else {
172#ifdef	ISP_TARGET_MODE
173		role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET;
174#else
175		role = ISP_DEFAULT_ROLES;
176#endif
177	}
178
179	sbs = malloc(sizeof (*sbs), M_DEVBUF, M_NOWAIT | M_ZERO);
180	if (sbs == NULL) {
181		device_printf(dev, "cannot allocate softc\n");
182		return (ENOMEM);
183	}
184
185	regs = NULL;
186	iqd = 0;
187	rid = 0;
188	regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
189	if (regs == 0) {
190		device_printf(dev, "unable to map registers\n");
191		goto bad;
192	}
193	sbs->sbus_dev = dev;
194	sbs->sbus_reg = regs;
195	sbs->sbus_mdvec = mdvec;
196
197	sbs->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
198	sbs->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
199	sbs->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
200	sbs->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
201	sbs->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
202	isp = &sbs->sbus_isp;
203	isp->isp_bus_tag = rman_get_bustag(regs);
204	isp->isp_bus_handle = rman_get_bushandle(regs);
205	isp->isp_mdvec = &sbs->sbus_mdvec;
206	isp->isp_bustype = ISP_BT_SBUS;
207	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
208	isp->isp_param = &sbs->sbus_param;
209	isp->isp_revision = 0;	/* XXX */
210	isp->isp_role = role;
211	isp->isp_dev = dev;
212
213	/*
214	 * Get the clock frequency and convert it from HZ to MHz,
215	 * rounding up. This defaults to 25MHz if there isn't a
216	 * device specific one in the OFW device tree.
217	 */
218	sbs->sbus_mdvec.dv_clock = (sbus_get_clockfreq(dev) + 500000)/1000000;
219
220	/*
221	 * Now figure out what the proper burst sizes, etc., to use.
222	 * Unfortunately, there is no ddi_dma_burstsizes here which
223	 * walks up the tree finding the limiting burst size node (if
224	 * any). We just use what's here for isp.
225	 */
226	ispburst = sbus_get_burstsz(dev);
227	if (ispburst == 0) {
228		ispburst = SBUS_BURST_32 - 1;
229	}
230	sbs->sbus_mdvec.dv_conf1 =  0;
231	if (ispburst & (1 << 5)) {
232		sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
233	} else if (ispburst & (1 << 4)) {
234		sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
235	} else if (ispburst & (1 << 3)) {
236		sbs->sbus_mdvec.dv_conf1 =
237		    BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
238	}
239	if (sbs->sbus_mdvec.dv_conf1) {
240		sbs->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
241	}
242
243	/*
244	 * We don't trust NVRAM on SBus cards
245	 */
246	isp->isp_confopts |= ISP_CFG_NONVRAM;
247
248	/*
249	 * Mark things if we're a PTI SBus adapter.
250	 */
251	if (strcmp("PTI,ptisp", ofw_bus_get_name(dev)) == 0 ||
252	    strcmp("ptisp", ofw_bus_get_name(dev)) == 0) {
253		SDPARAM(isp)->isp_ptisp = 1;
254	}
255
256
257#if __FreeBSD_version >= 700000
258	isp->isp_osinfo.fw = firmware_get("isp_1000");
259	if (isp->isp_osinfo.fw) {
260		union {
261			const void *cp;
262			uint16_t *sp;
263		} stupid;
264		stupid.cp = isp->isp_osinfo.fw->data;
265		isp->isp_mdvec->dv_ispfw = stupid.sp;
266	}
267#else
268	/*
269	 * Try and find firmware for this device.
270	 */
271	if (isp_get_firmware_p) {
272		(*isp_get_firmware_p)(0, 0, 0x1000, &sbs->sbus_mdvec.dv_ispfw);
273	}
274#endif
275
276	tval = 0;
277        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
278            "fwload_disable", &tval) == 0 && tval != 0) {
279		isp->isp_confopts |= ISP_CFG_NORELOAD;
280	}
281
282	isp->isp_osinfo.default_id = -1;
283	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
284            "iid", &tval) == 0) {
285		isp->isp_osinfo.default_id = tval;
286		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
287	}
288	if (isp->isp_osinfo.default_id == -1) {
289		/*
290		 * XXX: should be a way to get properties w/o having
291		 * XXX: to call OF_xxx functions
292		 */
293		isp->isp_osinfo.default_id = 7;
294	}
295
296	isp_debug = 0;
297        (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
298            "debug", &isp_debug);
299
300	/* Make sure the lock is set up. */
301	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
302	locksetup++;
303
304	iqd = 0;
305	sbs->sbus_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
306	    RF_ACTIVE | RF_SHAREABLE);
307	if (sbs->sbus_ires == NULL) {
308		device_printf(dev, "could not allocate interrupt\n");
309		goto bad;
310	}
311
312	if (isp_setup_intr(dev, sbs->sbus_ires, ISP_IFLAGS,
313	    NULL, isp_sbus_intr, isp, &sbs->ih)) {
314		device_printf(dev, "could not setup interrupt\n");
315		goto bad;
316	}
317	ints_setup++;
318
319	/*
320	 * Set up logging levels.
321	 */
322	if (isp_debug) {
323		isp->isp_dblev = isp_debug;
324	} else {
325		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
326	}
327	if (bootverbose) {
328		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
329	}
330
331	/*
332	 * Make sure we're in reset state.
333	 */
334	ISP_LOCK(isp);
335	isp_reset(isp);
336	if (isp->isp_state != ISP_RESETSTATE) {
337		isp_uninit(isp);
338		ISP_UNLOCK(isp);
339		goto bad;
340	}
341	isp_init(isp);
342	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
343		isp_uninit(isp);
344		ISP_UNLOCK(isp);
345		goto bad;
346	}
347	isp_attach(isp);
348	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
349		isp_uninit(isp);
350		ISP_UNLOCK(isp);
351		goto bad;
352	}
353	ISP_UNLOCK(isp);
354	return (0);
355
356bad:
357
358	if (sbs && ints_setup) {
359		(void) bus_teardown_intr(dev, sbs->sbus_ires, sbs->ih);
360	}
361
362	if (sbs && sbs->sbus_ires) {
363		bus_release_resource(dev, SYS_RES_IRQ, iqd, sbs->sbus_ires);
364	}
365
366	if (locksetup && isp) {
367		mtx_destroy(&isp->isp_osinfo.lock);
368	}
369
370	if (regs) {
371		(void) bus_release_resource(dev, 0, 0, regs);
372	}
373
374	if (sbs) {
375		if (sbs->sbus_isp.isp_param) {
376			free(sbs->sbus_isp.isp_param, M_DEVBUF);
377		}
378		free(sbs, M_DEVBUF);
379	}
380	return (ENXIO);
381}
382
383static void
384isp_sbus_intr(void *arg)
385{
386	ispsoftc_t *isp = arg;
387	uint32_t isr;
388	uint16_t sema, mbox;
389
390	ISP_LOCK(isp);
391	isp->isp_intcnt++;
392	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
393		isp->isp_intbogus++;
394	} else {
395		isp_intr(isp, isr, sema, mbox);
396	}
397	ISP_UNLOCK(isp);
398}
399
400#define	IspVirt2Off(a, x)	\
401	(((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
402	_BLK_REG_SHFT] + ((x) & 0xff))
403
404#define	BXR2(sbc, off)		\
405	bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off)
406
407static int
408isp_sbus_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp)
409{
410	uint16_t isr, sema;
411
412	isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
413	sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
414	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
415	isr &= INT_PENDING_MASK(isp);
416	sema &= BIU_SEMA_LOCK;
417	if (isr == 0 && sema == 0) {
418		return (0);
419	}
420	*isrp = isr;
421	if ((*semap = sema) != 0) {
422		*mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
423	}
424	return (1);
425}
426
427static uint32_t
428isp_sbus_rd_reg(ispsoftc_t *isp, int regoff)
429{
430	uint16_t rval;
431	struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp;
432	int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
433	offset += (regoff & 0xff);
434	rval = bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, offset);
435	isp_prt(isp, ISP_LOGDEBUG3,
436	    "isp_sbus_rd_reg(off %x) = %x", regoff, rval);
437	return (rval);
438}
439
440static void
441isp_sbus_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
442{
443	struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp;
444	int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
445	offset += (regoff & 0xff);
446	isp_prt(isp, ISP_LOGDEBUG3,
447	    "isp_sbus_wr_reg(off %x) = %x", regoff, val);
448	bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, offset, val);
449	MEMORYBARRIER(isp, SYNC_REG, offset, 2);
450}
451
452struct imush {
453	ispsoftc_t *isp;
454	int error;
455};
456
457static void imc(void *, bus_dma_segment_t *, int, int);
458
459static void
460imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
461{
462	struct imush *imushp = (struct imush *) arg;
463	if (error) {
464		imushp->error = error;
465	} else {
466		ispsoftc_t *isp =imushp->isp;
467		bus_addr_t addr = segs->ds_addr;
468
469		isp->isp_rquest_dma = addr;
470		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
471		isp->isp_result_dma = addr;
472	}
473}
474
475static int
476isp_sbus_mbxdma(ispsoftc_t *isp)
477{
478	struct isp_sbussoftc *sbs = (struct isp_sbussoftc *)isp;
479	caddr_t base;
480	uint32_t len;
481	int i, error, ns;
482	struct imush im;
483
484	/*
485	 * Already been here? If so, leave...
486	 */
487	if (isp->isp_rquest) {
488		return (0);
489	}
490
491	ISP_UNLOCK(isp);
492
493	if (isp_dma_tag_create(BUS_DMA_ROOTARG(sbs->sbus_dev), 1,
494	    BUS_SPACE_MAXADDR_24BIT+1, BUS_SPACE_MAXADDR_32BIT,
495	    BUS_SPACE_MAXADDR_32BIT, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT,
496	    ISP_NSEGS, BUS_SPACE_MAXADDR_24BIT, 0, &sbs->dmat)) {
497		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
498		ISP_LOCK(isp);
499		return(1);
500	}
501
502	len = sizeof (XS_T **) * isp->isp_maxcmds;
503	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
504	if (isp->isp_xflist == NULL) {
505		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
506		ISP_LOCK(isp);
507		return (1);
508	}
509	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
510	sbs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
511	if (sbs->dmaps == NULL) {
512		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
513		free(isp->isp_xflist, M_DEVBUF);
514		ISP_LOCK(isp);
515		return (1);
516	}
517
518	/*
519	 * Allocate and map the request, result queues, plus FC scratch area.
520	 */
521	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
522	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
523
524	ns = (len / PAGE_SIZE) + 1;
525	if (bus_dma_tag_create(sbs->dmat, QENTRY_LEN, BUS_SPACE_MAXADDR_24BIT+1,
526	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT, NULL, NULL,
527	    len, ns, BUS_SPACE_MAXADDR_24BIT, 0, busdma_lock_mutex, &Giant,
528	    &isp->isp_cdmat)) {
529		isp_prt(isp, ISP_LOGERR,
530		    "cannot create a dma tag for control spaces");
531		free(sbs->dmaps, M_DEVBUF);
532		free(isp->isp_xflist, M_DEVBUF);
533		ISP_LOCK(isp);
534		return (1);
535	}
536
537	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
538	    &isp->isp_cdmap) != 0) {
539		isp_prt(isp, ISP_LOGERR,
540		    "cannot allocate %d bytes of CCB memory", len);
541		bus_dma_tag_destroy(isp->isp_cdmat);
542		free(isp->isp_xflist, M_DEVBUF);
543		free(sbs->dmaps, M_DEVBUF);
544		ISP_LOCK(isp);
545		return (1);
546	}
547
548	for (i = 0; i < isp->isp_maxcmds; i++) {
549		error = bus_dmamap_create(sbs->dmat, 0, &sbs->dmaps[i]);
550		if (error) {
551			isp_prt(isp, ISP_LOGERR,
552			    "error %d creating per-cmd DMA maps", error);
553			while (--i >= 0) {
554				bus_dmamap_destroy(sbs->dmat, sbs->dmaps[i]);
555			}
556			goto bad;
557		}
558	}
559
560	im.isp = isp;
561	im.error = 0;
562	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
563	if (im.error) {
564		isp_prt(isp, ISP_LOGERR,
565		    "error %d loading dma map for control areas", im.error);
566		goto bad;
567	}
568
569	isp->isp_rquest = base;
570	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
571	ISP_LOCK(isp);
572	isp->isp_result = base;
573	return (0);
574
575bad:
576	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
577	bus_dma_tag_destroy(isp->isp_cdmat);
578	free(isp->isp_xflist, M_DEVBUF);
579	free(sbs->dmaps, M_DEVBUF);
580	ISP_LOCK(isp);
581	isp->isp_rquest = NULL;
582	return (1);
583}
584
585typedef struct {
586	ispsoftc_t *isp;
587	void *cmd_token;
588	void *rq;
589	uint32_t *nxtip;
590	uint32_t optr;
591	int error;
592} mush_t;
593
594#define	MUSHERR_NOQENTRIES	-2
595
596
597static void dma2(void *, bus_dma_segment_t *, int, int);
598
599static void
600dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
601{
602	mush_t *mp;
603	ispsoftc_t *isp;
604	struct ccb_scsiio *csio;
605	struct isp_sbussoftc *sbs;
606	bus_dmamap_t *dp;
607	bus_dma_segment_t *eseg;
608	ispreq_t *rq;
609	int seglim, datalen;
610	uint16_t nxti;
611
612	mp = (mush_t *) arg;
613	if (error) {
614		mp->error = error;
615		return;
616	}
617
618	if (nseg < 1) {
619		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
620		mp->error = EFAULT;
621		return;
622	}
623	csio = mp->cmd_token;
624	isp = mp->isp;
625	rq = mp->rq;
626	sbs = (struct isp_sbussoftc *)mp->isp;
627	dp = &sbs->dmaps[isp_handle_index(rq->req_handle & ISP_HANDLE_MASK)];
628	nxti = *mp->nxtip;
629
630	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
631		bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_PREREAD);
632	} else {
633		bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_PREWRITE);
634	}
635
636	datalen = XS_XFRLEN(csio);
637
638	/*
639	 * We're passed an initial partially filled in entry that
640	 * has most fields filled in except for data transfer
641	 * related values.
642	 *
643	 * Our job is to fill in the initial request queue entry and
644	 * then to start allocating and filling in continuation entries
645	 * until we've covered the entire transfer.
646	 */
647
648	if (csio->cdb_len > 12) {
649		seglim = 0;
650	} else {
651		seglim = ISP_RQDSEG;
652	}
653	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
654		rq->req_flags |= REQFLAG_DATA_IN;
655	} else {
656		rq->req_flags |= REQFLAG_DATA_OUT;
657	}
658
659	eseg = dm_segs + nseg;
660
661	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
662		rq->req_dataseg[rq->req_seg_count].ds_base = dm_segs->ds_addr;
663		rq->req_dataseg[rq->req_seg_count].ds_count = dm_segs->ds_len;
664		datalen -= dm_segs->ds_len;
665		rq->req_seg_count++;
666		dm_segs++;
667	}
668
669	while (datalen > 0 && dm_segs != eseg) {
670		uint16_t onxti;
671		ispcontreq_t local, *crq = &local, *cqe;
672
673		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
674		onxti = nxti;
675		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
676		if (nxti == mp->optr) {
677			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
678			mp->error = MUSHERR_NOQENTRIES;
679			return;
680		}
681		rq->req_header.rqs_entry_count++;
682		MEMZERO((void *)crq, sizeof (*crq));
683		crq->req_header.rqs_entry_count = 1;
684		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
685
686		seglim = 0;
687		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
688			crq->req_dataseg[seglim].ds_base =
689			    dm_segs->ds_addr;
690			crq->req_dataseg[seglim].ds_count =
691			    dm_segs->ds_len;
692			rq->req_seg_count++;
693			dm_segs++;
694			seglim++;
695			datalen -= dm_segs->ds_len;
696		}
697		isp_put_cont_req(isp, crq, cqe);
698		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
699	}
700	*mp->nxtip = nxti;
701}
702
703static int
704isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
705	uint32_t *nxtip, uint32_t optr)
706{
707	struct isp_sbussoftc *sbs = (struct isp_sbussoftc *)isp;
708	ispreq_t *qep;
709	bus_dmamap_t *dp = NULL;
710	mush_t mush, *mp;
711	void (*eptr)(void *, bus_dma_segment_t *, int, int);
712
713	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
714	eptr = dma2;
715
716
717	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
718	    (csio->dxfer_len == 0)) {
719		rq->req_seg_count = 1;
720		goto mbxsync;
721	}
722
723	/*
724	 * Do a virtual grapevine step to collect info for
725	 * the callback dma allocation that we have to use...
726	 */
727	mp = &mush;
728	mp->isp = isp;
729	mp->cmd_token = csio;
730	mp->rq = rq;
731	mp->nxtip = nxtip;
732	mp->optr = optr;
733	mp->error = 0;
734
735	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
736		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
737			int error, s;
738			dp = &sbs->dmaps[isp_handle_index(
739			    rq->req_handle & ISP_HANDLE_MASK)];
740			s = splsoftvm();
741			error = bus_dmamap_load(sbs->dmat, *dp,
742			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
743			if (error == EINPROGRESS) {
744				bus_dmamap_unload(sbs->dmat, *dp);
745				mp->error = EINVAL;
746				isp_prt(isp, ISP_LOGERR,
747				    "deferred dma allocation not supported");
748			} else if (error && mp->error == 0) {
749#ifdef	DIAGNOSTIC
750				isp_prt(isp, ISP_LOGERR,
751				    "error %d in dma mapping code", error);
752#endif
753				mp->error = error;
754			}
755			splx(s);
756		} else {
757			/* Pointer to physical buffer */
758			struct bus_dma_segment seg;
759			seg.ds_addr = (bus_addr_t)csio->data_ptr;
760			seg.ds_len = csio->dxfer_len;
761			(*eptr)(mp, &seg, 1, 0);
762		}
763	} else {
764		struct bus_dma_segment *segs;
765
766		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
767			isp_prt(isp, ISP_LOGERR,
768			    "Physical segment pointers unsupported");
769			mp->error = EINVAL;
770		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
771			isp_prt(isp, ISP_LOGERR,
772			    "Virtual segment addresses unsupported");
773			mp->error = EINVAL;
774		} else {
775			/* Just use the segments provided */
776			segs = (struct bus_dma_segment *) csio->data_ptr;
777			(*eptr)(mp, segs, csio->sglist_cnt, 0);
778		}
779	}
780	if (mp->error) {
781		int retval = CMD_COMPLETE;
782		if (mp->error == MUSHERR_NOQENTRIES) {
783			retval = CMD_EAGAIN;
784		} else if (mp->error == EFBIG) {
785			XS_SETERR(csio, CAM_REQ_TOO_BIG);
786		} else if (mp->error == EINVAL) {
787			XS_SETERR(csio, CAM_REQ_INVALID);
788		} else {
789			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
790		}
791		return (retval);
792	}
793mbxsync:
794	if (isp->isp_dblev & ISP_LOGDEBUG1) {
795		isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq);
796	}
797	switch (rq->req_header.rqs_entry_type) {
798	case RQSTYPE_REQUEST:
799		isp_put_request(isp, rq, qep);
800		break;
801	case RQSTYPE_CMDONLY:
802		isp_put_extended_request(isp, (ispextreq_t *)rq,
803		    (ispextreq_t *)qep);
804		break;
805	}
806	return (CMD_QUEUED);
807}
808
809static void
810isp_sbus_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
811{
812	struct isp_sbussoftc *sbs = (struct isp_sbussoftc *)isp;
813	bus_dmamap_t *dp;
814	dp = &sbs->dmaps[isp_handle_index(handle & ISP_HANDLE_MASK)];
815	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
816		bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_POSTREAD);
817	} else {
818		bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
819	}
820	bus_dmamap_unload(sbs->dmat, *dp);
821}
822
823static void
824isp_sbus_reset0(ispsoftc_t *isp)
825{
826	ISP_DISABLE_INTS(isp);
827}
828
829static void
830isp_sbus_reset1(ispsoftc_t *isp)
831{
832	ISP_ENABLE_INTS(isp);
833}
834
835static void
836isp_sbus_dumpregs(ispsoftc_t *isp, const char *msg)
837{
838	if (msg)
839		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
840	else
841		printf("%s:\n", device_get_nameunit(isp->isp_dev));
842	printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
843	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
844	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
845	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
846
847
848	ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
849	printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
850		ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
851		ISP_READ(isp, CDMA_FIFO_STS));
852	printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
853		ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
854		ISP_READ(isp, DDMA_FIFO_STS));
855	printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
856		ISP_READ(isp, SXP_INTERRUPT),
857		ISP_READ(isp, SXP_GROSS_ERR),
858		ISP_READ(isp, SXP_PINS_CTRL));
859	ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
860	printf("    mbox regs: %x %x %x %x %x\n",
861	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
862	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
863	    ISP_READ(isp, OUTMAILBOX4));
864}
865