isp_pci.c revision 77365
1/* $FreeBSD: head/sys/dev/isp/isp_pci.c 77365 2001-05-28 21:20:43Z mjacob $ */
2/*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice immediately at the beginning of the file, without modification,
13 *    this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/bus.h>
35
36#include <pci/pcireg.h>
37#include <pci/pcivar.h>
38
39#include <machine/bus_memio.h>
40#include <machine/bus_pio.h>
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <sys/rman.h>
44#include <sys/malloc.h>
45
46#include <dev/isp/isp_freebsd.h>
47
48static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
49static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
50static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
51static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
52static int isp_pci_mbxdma __P((struct ispsoftc *));
53static int isp_pci_dmasetup __P((struct ispsoftc *, XS_T *,
54	ispreq_t *, u_int16_t *, u_int16_t));
55static void
56isp_pci_dmateardown __P((struct ispsoftc *, XS_T *, u_int16_t));
57
58static void isp_pci_reset1 __P((struct ispsoftc *));
59static void isp_pci_dumpregs __P((struct ispsoftc *, const char *));
60
61#ifndef	ISP_CODE_ORG
62#define	ISP_CODE_ORG		0x1000
63#endif
64
65static struct ispmdvec mdvec = {
66	isp_pci_rd_reg,
67	isp_pci_wr_reg,
68	isp_pci_mbxdma,
69	isp_pci_dmasetup,
70	isp_pci_dmateardown,
71	NULL,
72	isp_pci_reset1,
73	isp_pci_dumpregs,
74	NULL,
75	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
76};
77
78static struct ispmdvec mdvec_1080 = {
79	isp_pci_rd_reg_1080,
80	isp_pci_wr_reg_1080,
81	isp_pci_mbxdma,
82	isp_pci_dmasetup,
83	isp_pci_dmateardown,
84	NULL,
85	isp_pci_reset1,
86	isp_pci_dumpregs,
87	NULL,
88	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
89};
90
91static struct ispmdvec mdvec_12160 = {
92	isp_pci_rd_reg_1080,
93	isp_pci_wr_reg_1080,
94	isp_pci_mbxdma,
95	isp_pci_dmasetup,
96	isp_pci_dmateardown,
97	NULL,
98	isp_pci_reset1,
99	isp_pci_dumpregs,
100	NULL,
101	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
102};
103
104static struct ispmdvec mdvec_2100 = {
105	isp_pci_rd_reg,
106	isp_pci_wr_reg,
107	isp_pci_mbxdma,
108	isp_pci_dmasetup,
109	isp_pci_dmateardown,
110	NULL,
111	isp_pci_reset1,
112	isp_pci_dumpregs
113};
114
115static struct ispmdvec mdvec_2200 = {
116	isp_pci_rd_reg,
117	isp_pci_wr_reg,
118	isp_pci_mbxdma,
119	isp_pci_dmasetup,
120	isp_pci_dmateardown,
121	NULL,
122	isp_pci_reset1,
123	isp_pci_dumpregs
124};
125
126#ifndef	PCIM_CMD_INVEN
127#define	PCIM_CMD_INVEN			0x10
128#endif
129#ifndef	PCIM_CMD_BUSMASTEREN
130#define	PCIM_CMD_BUSMASTEREN		0x0004
131#endif
132#ifndef	PCIM_CMD_PERRESPEN
133#define	PCIM_CMD_PERRESPEN		0x0040
134#endif
135#ifndef	PCIM_CMD_SEREN
136#define	PCIM_CMD_SEREN			0x0100
137#endif
138
139#ifndef	PCIR_COMMAND
140#define	PCIR_COMMAND			0x04
141#endif
142
143#ifndef	PCIR_CACHELNSZ
144#define	PCIR_CACHELNSZ			0x0c
145#endif
146
147#ifndef	PCIR_LATTIMER
148#define	PCIR_LATTIMER			0x0d
149#endif
150
151#ifndef	PCIR_ROMADDR
152#define	PCIR_ROMADDR			0x30
153#endif
154
155#ifndef	PCI_VENDOR_QLOGIC
156#define	PCI_VENDOR_QLOGIC		0x1077
157#endif
158
159#ifndef	PCI_PRODUCT_QLOGIC_ISP1020
160#define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
161#endif
162
163#ifndef	PCI_PRODUCT_QLOGIC_ISP1080
164#define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
165#endif
166
167#ifndef	PCI_PRODUCT_QLOGIC_ISP12160
168#define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
169#endif
170
171#ifndef	PCI_PRODUCT_QLOGIC_ISP1240
172#define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
173#endif
174
175#ifndef	PCI_PRODUCT_QLOGIC_ISP1280
176#define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
177#endif
178
179#ifndef	PCI_PRODUCT_QLOGIC_ISP2100
180#define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
181#endif
182
183#ifndef	PCI_PRODUCT_QLOGIC_ISP2200
184#define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
185#endif
186
187#define	PCI_QLOGIC_ISP1020	\
188	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
189
190#define	PCI_QLOGIC_ISP1080	\
191	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
192
193#define	PCI_QLOGIC_ISP12160	\
194	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
195
196#define	PCI_QLOGIC_ISP1240	\
197	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
198
199#define	PCI_QLOGIC_ISP1280	\
200	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
201
202#define	PCI_QLOGIC_ISP2100	\
203	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
204
205#define	PCI_QLOGIC_ISP2200	\
206	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
207
208/*
209 * Odd case for some AMI raid cards... We need to *not* attach to this.
210 */
211#define	AMI_RAID_SUBVENDOR_ID	0x101e
212
213#define	IO_MAP_REG	0x10
214#define	MEM_MAP_REG	0x14
215
216#define	PCI_DFLT_LTNCY	0x40
217#define	PCI_DFLT_LNSZ	0x10
218
219static int isp_pci_probe (device_t);
220static int isp_pci_attach (device_t);
221
222struct isp_pcisoftc {
223	struct ispsoftc			pci_isp;
224	device_t			pci_dev;
225	struct resource *		pci_reg;
226	bus_space_tag_t			pci_st;
227	bus_space_handle_t		pci_sh;
228	void *				ih;
229	int16_t				pci_poff[_NREG_BLKS];
230	bus_dma_tag_t			parent_dmat;
231	bus_dma_tag_t			cntrol_dmat;
232	bus_dmamap_t			cntrol_dmap;
233	bus_dmamap_t			*dmaps;
234};
235ispfwfunc *isp_get_firmware_p = NULL;
236
237static device_method_t isp_pci_methods[] = {
238	/* Device interface */
239	DEVMETHOD(device_probe,		isp_pci_probe),
240	DEVMETHOD(device_attach,	isp_pci_attach),
241	{ 0, 0 }
242};
243static void isp_pci_intr __P((void *));
244
245static driver_t isp_pci_driver = {
246	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
247};
248static devclass_t isp_devclass;
249DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
250MODULE_VERSION(isp, 1);
251
252static int
253isp_pci_probe(device_t dev)
254{
255        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
256	case PCI_QLOGIC_ISP1020:
257		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
258		break;
259	case PCI_QLOGIC_ISP1080:
260		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
261		break;
262	case PCI_QLOGIC_ISP1240:
263		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
264		break;
265	case PCI_QLOGIC_ISP1280:
266		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
267		break;
268	case PCI_QLOGIC_ISP12160:
269		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
270			return (ENXIO);
271		}
272		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
273		break;
274	case PCI_QLOGIC_ISP2100:
275		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
276		break;
277	case PCI_QLOGIC_ISP2200:
278		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
279		break;
280	default:
281		return (ENXIO);
282	}
283	if (device_get_unit(dev) == 0 && bootverbose) {
284		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
285		    "Core Version %d.%d\n",
286		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
287		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
288	}
289	/*
290	 * XXXX: Here is where we might load the f/w module
291	 * XXXX: (or increase a reference count to it).
292	 */
293	return (0);
294}
295
296static int
297isp_pci_attach(device_t dev)
298{
299	struct resource *regs, *irq;
300	int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
301	u_int32_t data, cmd, linesz, psize, basetype;
302	struct isp_pcisoftc *pcs;
303	struct ispsoftc *isp = NULL;
304	struct ispmdvec *mdvp;
305	bus_size_t lim;
306	char *sptr;
307	int locksetup = 0;
308
309	/*
310	 * Figure out if we're supposed to skip this one.
311	 * If we are, we actually go to ISP_ROLE_NONE.
312	 */
313
314	tval = 0;
315	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
316	    "disable", &tval) == 0 && tval) {
317		device_printf(dev, "device is disabled\n");
318		/* but return 0 so the !$)$)*!$*) unit isn't reused */
319		return (0);
320	}
321
322	role = 0;
323	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
324	    "role", &role) == 0 &&
325	    ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) {
326		device_printf(dev, "setting role to 0x%x\n", role);
327	} else {
328#ifdef	ISP_TARGET_MODE
329		role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET;
330#else
331		role = ISP_DEFAULT_ROLES;
332#endif
333	}
334
335	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
336	if (pcs == NULL) {
337		device_printf(dev, "cannot allocate softc\n");
338		return (ENOMEM);
339	}
340
341	/*
342	 * Figure out which we should try first - memory mapping or i/o mapping?
343	 */
344#ifdef	__alpha__
345	m1 = PCIM_CMD_MEMEN;
346	m2 = PCIM_CMD_PORTEN;
347#else
348	m1 = PCIM_CMD_PORTEN;
349	m2 = PCIM_CMD_MEMEN;
350#endif
351
352	tval = 0;
353        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
354            "prefer_iomap", &tval) == 0 && tval != 0) {
355		m1 = PCIM_CMD_PORTEN;
356		m2 = PCIM_CMD_MEMEN;
357	}
358	tval = 0;
359        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
360            "prefer_memmap", &tval) == 0 && tval != 0) {
361		m1 = PCIM_CMD_MEMEN;
362		m2 = PCIM_CMD_PORTEN;
363	}
364
365	linesz = PCI_DFLT_LNSZ;
366	irq = regs = NULL;
367	rgd = rtp = iqd = 0;
368
369	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
370	if (cmd & m1) {
371		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
372		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
373		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
374	}
375	if (regs == NULL && (cmd & m2)) {
376		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
377		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
378		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
379	}
380	if (regs == NULL) {
381		device_printf(dev, "unable to map any ports\n");
382		goto bad;
383	}
384	if (bootverbose)
385		device_printf(dev, "using %s space register mapping\n",
386		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
387	pcs->pci_dev = dev;
388	pcs->pci_reg = regs;
389	pcs->pci_st = rman_get_bustag(regs);
390	pcs->pci_sh = rman_get_bushandle(regs);
391
392	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
393	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
394	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
395	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
396	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
397	mdvp = &mdvec;
398	basetype = ISP_HA_SCSI_UNKNOWN;
399	psize = sizeof (sdparam);
400	lim = BUS_SPACE_MAXSIZE_32BIT;
401	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
402		mdvp = &mdvec;
403		basetype = ISP_HA_SCSI_UNKNOWN;
404		psize = sizeof (sdparam);
405		lim = BUS_SPACE_MAXSIZE_24BIT;
406	}
407	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
408		mdvp = &mdvec_1080;
409		basetype = ISP_HA_SCSI_1080;
410		psize = sizeof (sdparam);
411		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
412		    ISP1080_DMA_REGS_OFF;
413	}
414	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
415		mdvp = &mdvec_1080;
416		basetype = ISP_HA_SCSI_1240;
417		psize = 2 * sizeof (sdparam);
418		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
419		    ISP1080_DMA_REGS_OFF;
420	}
421	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
422		mdvp = &mdvec_1080;
423		basetype = ISP_HA_SCSI_1280;
424		psize = 2 * sizeof (sdparam);
425		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
426		    ISP1080_DMA_REGS_OFF;
427	}
428	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
429		mdvp = &mdvec_12160;
430		basetype = ISP_HA_SCSI_12160;
431		psize = 2 * sizeof (sdparam);
432		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
433		    ISP1080_DMA_REGS_OFF;
434	}
435	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
436		mdvp = &mdvec_2100;
437		basetype = ISP_HA_FC_2100;
438		psize = sizeof (fcparam);
439		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
440		    PCI_MBOX_REGS2100_OFF;
441		if (pci_get_revid(dev) < 3) {
442			/*
443			 * XXX: Need to get the actual revision
444			 * XXX: number of the 2100 FB. At any rate,
445			 * XXX: lower cache line size for early revision
446			 * XXX; boards.
447			 */
448			linesz = 1;
449		}
450	}
451	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
452		mdvp = &mdvec_2200;
453		basetype = ISP_HA_FC_2200;
454		psize = sizeof (fcparam);
455		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
456		    PCI_MBOX_REGS2100_OFF;
457	}
458	isp = &pcs->pci_isp;
459	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
460	if (isp->isp_param == NULL) {
461		device_printf(dev, "cannot allocate parameter data\n");
462		goto bad;
463	}
464	isp->isp_mdvec = mdvp;
465	isp->isp_type = basetype;
466	isp->isp_revision = pci_get_revid(dev);
467	isp->isp_role = role;
468	isp->isp_dev = dev;
469
470	/*
471	 * Try and find firmware for this device.
472	 */
473
474	if (isp_get_firmware_p) {
475		int device = (int) pci_get_device(dev);
476#ifdef	ISP_TARGET_MODE
477		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
478#else
479		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
480#endif
481	}
482
483	/*
484	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
485	 * are set.
486	 */
487	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
488		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
489	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
490
491	/*
492	 * Make sure the Cache Line Size register is set sensibly.
493	 */
494	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
495	if (data != linesz) {
496		data = PCI_DFLT_LNSZ;
497		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
498		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
499	}
500
501	/*
502	 * Make sure the Latency Timer is sane.
503	 */
504	data = pci_read_config(dev, PCIR_LATTIMER, 1);
505	if (data < PCI_DFLT_LTNCY) {
506		data = PCI_DFLT_LTNCY;
507		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
508		pci_write_config(dev, PCIR_LATTIMER, data, 1);
509	}
510
511	/*
512	 * Make sure we've disabled the ROM.
513	 */
514	data = pci_read_config(dev, PCIR_ROMADDR, 4);
515	data &= ~1;
516	pci_write_config(dev, PCIR_ROMADDR, data, 4);
517
518
519	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
520	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
521	    255, lim, 0, &pcs->parent_dmat) != 0) {
522		device_printf(dev, "could not create master dma tag\n");
523		free(isp->isp_param, M_DEVBUF);
524		free(pcs, M_DEVBUF);
525		return (ENXIO);
526	}
527
528	iqd = 0;
529	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
530	    1, RF_ACTIVE | RF_SHAREABLE);
531	if (irq == NULL) {
532		device_printf(dev, "could not allocate interrupt\n");
533		goto bad;
534	}
535
536	tval = 0;
537        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
538            "fwload_disable", &tval) == 0 && tval != 0) {
539		isp->isp_confopts |= ISP_CFG_NORELOAD;
540	}
541	tval = 0;
542        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
543            "ignore_nvram", &tval) == 0 && tval != 0) {
544		isp->isp_confopts |= ISP_CFG_NONVRAM;
545	}
546	tval = 0;
547        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
548            "fullduplex", &tval) == 0 && tval != 0) {
549		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
550	}
551
552	sptr = 0;
553        if (resource_string_value(device_get_name(dev), device_get_unit(dev),
554            "topology", &sptr) == 0 && sptr != 0) {
555		if (strcmp(sptr, "lport") == 0) {
556			isp->isp_confopts |= ISP_CFG_LPORT;
557		} else if (strcmp(sptr, "nport") == 0) {
558			isp->isp_confopts |= ISP_CFG_NPORT;
559		} else if (strcmp(sptr, "lport-only") == 0) {
560			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
561		} else if (strcmp(sptr, "nport-only") == 0) {
562			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
563		}
564	}
565
566	/*
567	 * Because the resource_*_value functions can neither return
568	 * 64 bit integer values, nor can they be directly coerced
569	 * to interpret the right hand side of the assignment as
570	 * you want them to interpret it, we have to force WWN
571	 * hint replacement to specify WWN strings with a leading
572	 * 'w' (e..g w50000000aaaa0001). Sigh.
573	 */
574	sptr = 0;
575	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
576            "portwwn", &sptr);
577	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
578		char *eptr = 0;
579		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
580		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
581			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
582			isp->isp_osinfo.default_port_wwn = 0;
583		} else {
584			isp->isp_confopts |= ISP_CFG_OWNWWN;
585		}
586	}
587	if (isp->isp_osinfo.default_port_wwn == 0) {
588		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
589	}
590
591	sptr = 0;
592	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
593            "nodewwn", &sptr);
594	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
595		char *eptr = 0;
596		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
597		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
598			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
599			isp->isp_osinfo.default_node_wwn = 0;
600		} else {
601			isp->isp_confopts |= ISP_CFG_OWNWWN;
602		}
603	}
604	if (isp->isp_osinfo.default_node_wwn == 0) {
605		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
606	}
607
608	isp_debug = 0;
609        (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
610            "debug", &isp_debug);
611
612	/* Make sure the lock is set up. */
613	mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF);
614	locksetup++;
615
616#ifdef	ISP_SMPLOCK
617#define	INTR_FLAGS	INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY
618#else
619#define	INTR_FLAGS	INTR_TYPE_CAM | INTR_ENTROPY
620#endif
621	if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) {
622		device_printf(dev, "could not setup interrupt\n");
623		goto bad;
624	}
625
626	/*
627	 * Set up logging levels.
628	 */
629	if (isp_debug) {
630		isp->isp_dblev = isp_debug;
631	} else {
632		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
633	}
634	if (bootverbose)
635		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
636
637	/*
638	 * Make sure we're in reset state.
639	 */
640	ISP_LOCK(isp);
641	isp_reset(isp);
642	if (isp->isp_state != ISP_RESETSTATE) {
643		ISP_UNLOCK(isp);
644		goto bad;
645	}
646	isp_init(isp);
647	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
648		isp_uninit(isp);
649		ISP_UNLOCK(isp);
650		goto bad;
651	}
652	isp_attach(isp);
653	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
654		isp_uninit(isp);
655		ISP_UNLOCK(isp);
656		goto bad;
657	}
658	/*
659	 * XXXX: Here is where we might unload the f/w module
660	 * XXXX: (or decrease the reference count to it).
661	 */
662	ISP_UNLOCK(isp);
663	return (0);
664
665bad:
666
667	if (pcs && pcs->ih) {
668		(void) bus_teardown_intr(dev, irq, pcs->ih);
669	}
670
671	if (locksetup && isp) {
672		mtx_destroy(&isp->isp_osinfo.lock);
673	}
674
675	if (irq) {
676		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
677	}
678
679
680	if (regs) {
681		(void) bus_release_resource(dev, rtp, rgd, regs);
682	}
683
684	if (pcs) {
685		if (pcs->pci_isp.isp_param)
686			free(pcs->pci_isp.isp_param, M_DEVBUF);
687		free(pcs, M_DEVBUF);
688	}
689
690	/*
691	 * XXXX: Here is where we might unload the f/w module
692	 * XXXX: (or decrease the reference count to it).
693	 */
694	return (ENXIO);
695}
696
697static void
698isp_pci_intr(void *arg)
699{
700	struct ispsoftc *isp = arg;
701	ISP_LOCK(isp);
702	(void) isp_intr(isp);
703	ISP_UNLOCK(isp);
704}
705
706static u_int16_t
707isp_pci_rd_reg(isp, regoff)
708	struct ispsoftc *isp;
709	int regoff;
710{
711	u_int16_t rv;
712	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
713	int offset, oldconf = 0;
714
715	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
716		/*
717		 * We will assume that someone has paused the RISC processor.
718		 */
719		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
720		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
721	}
722	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
723	offset += (regoff & 0xff);
724	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
725	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
726		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
727	}
728	return (rv);
729}
730
731static void
732isp_pci_wr_reg(isp, regoff, val)
733	struct ispsoftc *isp;
734	int regoff;
735	u_int16_t val;
736{
737	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
738	int offset, oldconf = 0;
739
740	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
741		/*
742		 * We will assume that someone has paused the RISC processor.
743		 */
744		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
745		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
746	}
747	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
748	offset += (regoff & 0xff);
749	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
750	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
751		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
752	}
753}
754
755static u_int16_t
756isp_pci_rd_reg_1080(isp, regoff)
757	struct ispsoftc *isp;
758	int regoff;
759{
760	u_int16_t rv, oc = 0;
761	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
762	int offset;
763
764	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
765	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
766		u_int16_t tc;
767		/*
768		 * We will assume that someone has paused the RISC processor.
769		 */
770		oc = isp_pci_rd_reg(isp, BIU_CONF1);
771		tc = oc & ~BIU_PCI1080_CONF1_DMA;
772		if (regoff & SXP_BANK1_SELECT)
773			tc |= BIU_PCI1080_CONF1_SXP1;
774		else
775			tc |= BIU_PCI1080_CONF1_SXP0;
776		isp_pci_wr_reg(isp, BIU_CONF1, tc);
777	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
778		oc = isp_pci_rd_reg(isp, BIU_CONF1);
779		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
780	}
781	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
782	offset += (regoff & 0xff);
783	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
784	if (oc) {
785		isp_pci_wr_reg(isp, BIU_CONF1, oc);
786	}
787	return (rv);
788}
789
790static void
791isp_pci_wr_reg_1080(isp, regoff, val)
792	struct ispsoftc *isp;
793	int regoff;
794	u_int16_t val;
795{
796	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
797	int offset, oc = 0;
798
799	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
800	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
801		u_int16_t tc;
802		/*
803		 * We will assume that someone has paused the RISC processor.
804		 */
805		oc = isp_pci_rd_reg(isp, BIU_CONF1);
806		tc = oc & ~BIU_PCI1080_CONF1_DMA;
807		if (regoff & SXP_BANK1_SELECT)
808			tc |= BIU_PCI1080_CONF1_SXP1;
809		else
810			tc |= BIU_PCI1080_CONF1_SXP0;
811		isp_pci_wr_reg(isp, BIU_CONF1, tc);
812	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
813		oc = isp_pci_rd_reg(isp, BIU_CONF1);
814		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
815	}
816	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
817	offset += (regoff & 0xff);
818	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
819	if (oc) {
820		isp_pci_wr_reg(isp, BIU_CONF1, oc);
821	}
822}
823
824static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
825static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
826static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
827
828struct imush {
829	struct ispsoftc *isp;
830	int error;
831};
832
833static void
834isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
835{
836	struct imush *imushp = (struct imush *) arg;
837	if (error) {
838		imushp->error = error;
839	} else {
840		imushp->isp->isp_rquest_dma = segs->ds_addr;
841	}
842}
843
844static void
845isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
846{
847	struct imush *imushp = (struct imush *) arg;
848	if (error) {
849		imushp->error = error;
850	} else {
851		imushp->isp->isp_result_dma = segs->ds_addr;
852	}
853}
854
855static void
856isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
857{
858	struct imush *imushp = (struct imush *) arg;
859	if (error) {
860		imushp->error = error;
861	} else {
862		fcparam *fcp = imushp->isp->isp_param;
863		fcp->isp_scdma = segs->ds_addr;
864	}
865}
866
867static int
868isp_pci_mbxdma(struct ispsoftc *isp)
869{
870	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
871	caddr_t base;
872	u_int32_t len;
873	int i, error;
874	bus_size_t lim;
875	struct imush im;
876
877
878	/*
879	 * Already been here? If so, leave...
880	 */
881	if (isp->isp_rquest) {
882		return (0);
883	}
884
885	len = sizeof (XS_T **) * isp->isp_maxcmds;
886	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
887	if (isp->isp_xflist == NULL) {
888		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
889		return (1);
890	}
891	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
892	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
893	if (pci->dmaps == NULL) {
894		isp_prt(isp, ISP_LOGERR, "can't alloc dma maps");
895		free(isp->isp_xflist, M_DEVBUF);
896		return (1);
897	}
898
899	if (IS_FC(isp) || IS_ULTRA2(isp))
900		lim = BUS_SPACE_MAXADDR + 1;
901	else
902		lim = BUS_SPACE_MAXADDR_24BIT + 1;
903
904	/*
905	 * Allocate and map the request, result queues, plus FC scratch area.
906	 */
907	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
908	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
909	if (IS_FC(isp)) {
910		len += ISP2100_SCRLEN;
911	}
912	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
913	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
914	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
915		isp_prt(isp, ISP_LOGERR,
916		    "cannot create a dma tag for control spaces");
917		free(isp->isp_xflist, M_DEVBUF);
918		free(pci->dmaps, M_DEVBUF);
919		return (1);
920	}
921	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
922	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
923		isp_prt(isp, ISP_LOGERR,
924		    "cannot allocate %d bytes of CCB memory", len);
925		free(isp->isp_xflist, M_DEVBUF);
926		free(pci->dmaps, M_DEVBUF);
927		return (1);
928	}
929
930	isp->isp_rquest = base;
931	im.isp = isp;
932	im.error = 0;
933	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
934	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0);
935	if (im.error) {
936		isp_prt(isp, ISP_LOGERR,
937		    "error %d loading dma map for DMA request queue", im.error);
938		free(isp->isp_xflist, M_DEVBUF);
939		free(pci->dmaps, M_DEVBUF);
940		isp->isp_rquest = NULL;
941		return (1);
942	}
943	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
944	im.error = 0;
945	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
946	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0);
947	if (im.error) {
948		isp_prt(isp, ISP_LOGERR,
949		    "error %d loading dma map for DMA result queue", im.error);
950		free(isp->isp_xflist, M_DEVBUF);
951		free(pci->dmaps, M_DEVBUF);
952		isp->isp_rquest = NULL;
953		return (1);
954	}
955
956	for (i = 0; i < isp->isp_maxcmds; i++) {
957		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
958		if (error) {
959			isp_prt(isp, ISP_LOGERR,
960			    "error %d creating per-cmd DMA maps", error);
961			free(isp->isp_xflist, M_DEVBUF);
962			free(pci->dmaps, M_DEVBUF);
963			isp->isp_rquest = NULL;
964			return (1);
965		}
966	}
967
968	if (IS_FC(isp)) {
969		fcparam *fcp = (fcparam *) isp->isp_param;
970		fcp->isp_scratch = base +
971			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) +
972			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
973		im.error = 0;
974		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
975		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
976		if (im.error) {
977			isp_prt(isp, ISP_LOGERR,
978			    "error %d loading FC scratch area", im.error);
979			free(isp->isp_xflist, M_DEVBUF);
980			free(pci->dmaps, M_DEVBUF);
981			isp->isp_rquest = NULL;
982			return (1);
983		}
984	}
985	return (0);
986}
987
988typedef struct {
989	struct ispsoftc *isp;
990	void *cmd_token;
991	void *rq;
992	u_int16_t *iptrp;
993	u_int16_t optr;
994	u_int error;
995} mush_t;
996
997#define	MUSHERR_NOQENTRIES	-2
998
999#ifdef	ISP_TARGET_MODE
1000/*
1001 * We need to handle DMA for target mode differently from initiator mode.
1002 *
1003 * DMA mapping and construction and submission of CTIO Request Entries
1004 * and rendevous for completion are very tightly coupled because we start
1005 * out by knowing (per platform) how much data we have to move, but we
1006 * don't know, up front, how many DMA mapping segments will have to be used
1007 * cover that data, so we don't know how many CTIO Request Entries we
1008 * will end up using. Further, for performance reasons we may want to
1009 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1010 *
1011 * The standard vector still goes through isp_pci_dmasetup, but the callback
1012 * for the DMA mapping routines comes here instead with the whole transfer
1013 * mapped and a pointer to a partially filled in already allocated request
1014 * queue entry. We finish the job.
1015 */
1016static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1017static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1018
1019#define	STATUS_WITH_DATA	1
1020
1021static void
1022tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1023{
1024	mush_t *mp;
1025	struct ccb_scsiio *csio;
1026	struct isp_pcisoftc *pci;
1027	bus_dmamap_t *dp;
1028	u_int8_t scsi_status;
1029	ct_entry_t *cto;
1030	u_int16_t handle;
1031	u_int32_t totxfr, sflags;
1032	int nctios, send_status;
1033	int32_t resid;
1034	int i, j;
1035
1036	mp = (mush_t *) arg;
1037	if (error) {
1038		mp->error = error;
1039		return;
1040	}
1041	csio = mp->cmd_token;
1042	cto = mp->rq;
1043
1044	cto->ct_xfrlen = 0;
1045	cto->ct_seg_count = 0;
1046	cto->ct_header.rqs_entry_count = 1;
1047	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1048
1049	if (nseg == 0) {
1050		cto->ct_header.rqs_seqno = 1;
1051		isp_prt(mp->isp, ISP_LOGTDEBUG1,
1052		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1053		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1054		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1055		    cto->ct_scsi_status, cto->ct_resid);
1056		ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto);
1057		ISP_SWIZ_CTIO(mp->isp, cto, cto);
1058		return;
1059	}
1060
1061	nctios = nseg / ISP_RQDSEG;
1062	if (nseg % ISP_RQDSEG) {
1063		nctios++;
1064	}
1065
1066	/*
1067	 * Check to see that we don't overflow.
1068	 */
1069	for (i = 0, j = *mp->iptrp; i < nctios; i++) {
1070		j = ISP_NXT_QENTRY(j, RQUEST_QUEUE_LEN(isp));
1071		if (j == mp->optr) {
1072			isp_prt(mp->isp, ISP_LOGWARN,
1073			    "Request Queue Overflow [tdma_mk]");
1074			mp->error = MUSHERR_NOQENTRIES;
1075			return;
1076		}
1077	}
1078
1079	/*
1080	 * Save syshandle, and potentially any SCSI status, which we'll
1081	 * reinsert on the last CTIO we're going to send.
1082	 */
1083	handle = cto->ct_syshandle;
1084	cto->ct_syshandle = 0;
1085	cto->ct_header.rqs_seqno = 0;
1086	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1087
1088	if (send_status) {
1089		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1090		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1091		/*
1092		 * Preserve residual.
1093		 */
1094		resid = cto->ct_resid;
1095
1096		/*
1097		 * Save actual SCSI status.
1098		 */
1099		scsi_status = cto->ct_scsi_status;
1100
1101#ifndef	STATUS_WITH_DATA
1102		sflags |= CT_NO_DATA;
1103		/*
1104		 * We can't do a status at the same time as a data CTIO, so
1105		 * we need to synthesize an extra CTIO at this level.
1106		 */
1107		nctios++;
1108#endif
1109	} else {
1110		sflags = scsi_status = resid = 0;
1111	}
1112
1113	totxfr = cto->ct_resid = 0;
1114	cto->ct_scsi_status = 0;
1115
1116	pci = (struct isp_pcisoftc *)mp->isp;
1117	dp = &pci->dmaps[isp_handle_index(handle)];
1118	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1119		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1120	} else {
1121		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1122	}
1123
1124
1125	while (nctios--) {
1126		int seglim;
1127
1128		seglim = nseg;
1129		if (seglim) {
1130			int seg;
1131
1132			if (seglim > ISP_RQDSEG)
1133				seglim = ISP_RQDSEG;
1134
1135			for (seg = 0; seg < seglim; seg++, nseg--) {
1136				/*
1137				 * Unlike normal initiator commands, we don't
1138				 * do any swizzling here.
1139				 */
1140				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1141				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1142				cto->ct_xfrlen += dm_segs->ds_len;
1143				totxfr += dm_segs->ds_len;
1144				dm_segs++;
1145			}
1146			cto->ct_seg_count = seg;
1147		} else {
1148			/*
1149			 * This case should only happen when we're sending an
1150			 * extra CTIO with final status.
1151			 */
1152			if (send_status == 0) {
1153				isp_prt(mp->isp, ISP_LOGWARN,
1154				    "tdma_mk ran out of segments");
1155				mp->error = EINVAL;
1156				return;
1157			}
1158		}
1159
1160		/*
1161		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1162		 * ct_tagtype, and ct_timeout have been carried over
1163		 * unchanged from what our caller had set.
1164		 *
1165		 * The dataseg fields and the seg_count fields we just got
1166		 * through setting. The data direction we've preserved all
1167		 * along and only clear it if we're now sending status.
1168		 */
1169
1170		if (nctios == 0) {
1171			/*
1172			 * We're the last in a sequence of CTIOs, so mark
1173			 * this CTIO and save the handle to the CCB such that
1174			 * when this CTIO completes we can free dma resources
1175			 * and do whatever else we need to do to finish the
1176			 * rest of the command. We *don't* give this to the
1177			 * firmware to work on- the caller will do that.
1178			 */
1179			cto->ct_syshandle = handle;
1180			cto->ct_header.rqs_seqno = 1;
1181
1182			if (send_status) {
1183				cto->ct_scsi_status = scsi_status;
1184				cto->ct_flags |= sflags;
1185				cto->ct_resid = resid;
1186			}
1187			if (send_status) {
1188				isp_prt(mp->isp, ISP_LOGTDEBUG1,
1189				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1190				    "scsi status %x resid %d",
1191				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1192				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1193				    cto->ct_scsi_status, cto->ct_resid);
1194			} else {
1195				isp_prt(mp->isp, ISP_LOGTDEBUG1,
1196				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1197				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1198				    cto->ct_iid, cto->ct_tag_val,
1199				    cto->ct_flags);
1200			}
1201			ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto);
1202			ISP_SWIZ_CTIO(mp->isp, cto, cto);
1203		} else {
1204			ct_entry_t     *octo = cto;
1205
1206			/*
1207			 * Make sure syshandle fields are clean
1208			 */
1209			cto->ct_syshandle = 0;
1210			cto->ct_header.rqs_seqno = 0;
1211
1212			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1213			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1214			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1215			    cto->ct_iid, cto->ct_flags);
1216			ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto);
1217
1218			/*
1219			 * Get a new CTIO
1220			 */
1221			cto = (ct_entry_t *)
1222			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1223			j = *mp->iptrp;
1224			*mp->iptrp =
1225			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1226			if (*mp->iptrp == mp->optr) {
1227				isp_prt(mp->isp, ISP_LOGTDEBUG0,
1228				    "Queue Overflow in tdma_mk");
1229				mp->error = MUSHERR_NOQENTRIES;
1230				return;
1231			}
1232			/*
1233			 * Fill in the new CTIO with info from the old one.
1234			 */
1235			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1236			cto->ct_header.rqs_entry_count = 1;
1237			cto->ct_fwhandle = octo->ct_fwhandle;
1238			cto->ct_header.rqs_flags = 0;
1239			cto->ct_lun = octo->ct_lun;
1240			cto->ct_iid = octo->ct_iid;
1241			cto->ct_reserved2 = octo->ct_reserved2;
1242			cto->ct_tgt = octo->ct_tgt;
1243			cto->ct_flags = octo->ct_flags;
1244			cto->ct_status = 0;
1245			cto->ct_scsi_status = 0;
1246			cto->ct_tag_val = octo->ct_tag_val;
1247			cto->ct_tag_type = octo->ct_tag_type;
1248			cto->ct_xfrlen = 0;
1249			cto->ct_resid = 0;
1250			cto->ct_timeout = octo->ct_timeout;
1251			cto->ct_seg_count = 0;
1252			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1253			/*
1254			 * Now swizzle the old one for the consumption
1255			 * of the chip and give it to the firmware to
1256			 * work on while we do the next.
1257			 */
1258			ISP_SWIZ_CTIO(mp->isp, octo, octo);
1259			ISP_ADD_REQUEST(mp->isp, j);
1260		}
1261	}
1262}
1263
1264static void
1265tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1266{
1267	mush_t *mp;
1268	struct ccb_scsiio *csio;
1269	struct isp_pcisoftc *pci;
1270	bus_dmamap_t *dp;
1271	ct2_entry_t *cto;
1272	u_int16_t scsi_status, send_status, send_sense, handle;
1273	u_int32_t totxfr, datalen;
1274	u_int8_t sense[QLTM_SENSELEN];
1275	int nctios, j;
1276
1277	mp = (mush_t *) arg;
1278	if (error) {
1279		mp->error = error;
1280		return;
1281	}
1282
1283	csio = mp->cmd_token;
1284	cto = mp->rq;
1285
1286	if (nseg == 0) {
1287		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1288			isp_prt(mp->isp, ISP_LOGWARN,
1289			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1290			    "set (0x%x)", cto->ct_flags);
1291			mp->error = EINVAL;
1292			return;
1293		}
1294	 	cto->ct_header.rqs_entry_count = 1;
1295		cto->ct_header.rqs_seqno = 1;
1296		/* ct_syshandle contains the handle set by caller */
1297		/*
1298		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1299		 * flags to NO DATA and clear relative offset flags.
1300		 * We preserve the ct_resid and the response area.
1301		 */
1302		cto->ct_flags |= CT2_NO_DATA;
1303		if (cto->ct_resid > 0)
1304			cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER;
1305		else if (cto->ct_resid < 0)
1306			cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER;
1307		cto->ct_seg_count = 0;
1308		cto->ct_reloff = 0;
1309		ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto);
1310		isp_prt(mp->isp, ISP_LOGTDEBUG1,
1311		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1312		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1313		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1314		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1315		ISP_SWIZ_CTIO2(isp, cto, cto);
1316		return;
1317	}
1318
1319	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1320		isp_prt(mp->isp, ISP_LOGWARN,
1321		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1322		    "(0x%x)", cto->ct_flags);
1323		mp->error = EINVAL;
1324		return;
1325	}
1326
1327
1328	nctios = nseg / ISP_RQDSEG_T2;
1329	if (nseg % ISP_RQDSEG_T2) {
1330		nctios++;
1331	}
1332
1333	/*
1334	 * Save the handle, status, reloff, and residual. We'll reinsert the
1335	 * handle into the last CTIO2 we're going to send, and reinsert status
1336	 * and residual (and possibly sense data) if that's to be sent as well.
1337	 *
1338	 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1339	 * the first one. This is needed so that the FCP DATA IUs being sent
1340	 * out have the correct offset (they can arrive at the other end out
1341	 * of order).
1342	 */
1343
1344	handle = cto->ct_syshandle;
1345	cto->ct_syshandle = 0;
1346	send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0;
1347
1348	if (send_status) {
1349		cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR);
1350
1351		/*
1352		 * Preserve residual, which is actually the total count.
1353		 */
1354		datalen = cto->ct_resid;
1355
1356		/*
1357		 * Save actual SCSI status. We'll reinsert the
1358		 * CT2_SNSLEN_VALID later if appropriate.
1359		 */
1360		scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1361		send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1362
1363		/*
1364		 * If we're sending status and have a CHECK CONDTION and
1365		 * have sense data,  we send one more CTIO2 with just the
1366		 * status and sense data. The upper layers have stashed
1367		 * the sense data in the dataseg structure for us.
1368		 */
1369
1370		if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1371		    send_sense) {
1372			bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1373			nctios++;
1374		}
1375	} else {
1376		scsi_status = send_sense = datalen = 0;
1377	}
1378
1379	totxfr = cto->ct_resid = 0;
1380	cto->rsp.m0.ct_scsi_status = 0;
1381	MEMZERO(&cto->rsp, sizeof (cto->rsp));
1382
1383	pci = (struct isp_pcisoftc *)mp->isp;
1384	dp = &pci->dmaps[isp_handle_index(handle)];
1385	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1386		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1387	} else {
1388		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1389	}
1390
1391	while (nctios--) {
1392		int seg, seglim;
1393
1394		seglim = nseg;
1395		if (seglim) {
1396			if (seglim > ISP_RQDSEG_T2)
1397				seglim = ISP_RQDSEG_T2;
1398
1399			for (seg = 0; seg < seglim; seg++) {
1400				cto->rsp.m0.ct_dataseg[seg].ds_base =
1401				    dm_segs->ds_addr;
1402				cto->rsp.m0.ct_dataseg[seg].ds_count =
1403				    dm_segs->ds_len;
1404				cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1405				totxfr += dm_segs->ds_len;
1406				dm_segs++;
1407			}
1408			cto->ct_seg_count = seg;
1409		} else {
1410			/*
1411			 * This case should only happen when we're sending a
1412			 * synthesized MODE1 final status with sense data.
1413			 */
1414			if (send_sense == 0) {
1415				isp_prt(mp->isp, ISP_LOGWARN,
1416				    "dma2_tgt_fc ran out of segments, "
1417				    "no SENSE DATA");
1418				mp->error = EINVAL;
1419				return;
1420			}
1421		}
1422
1423		/*
1424		 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1425		 * ct_timeout have been carried over unchanged from what
1426		 * our caller had set.
1427		 *
1428		 * The field ct_reloff is either what the caller set, or
1429		 * what we've added to below.
1430		 *
1431		 * The dataseg fields and the seg_count fields we just got
1432		 * through setting. The data direction we've preserved all
1433		 * along and only clear it if we're sending a MODE1 status
1434		 * as the last CTIO.
1435		 *
1436		 */
1437
1438		if (nctios == 0) {
1439
1440			/*
1441			 * We're the last in a sequence of CTIO2s, so mark this
1442			 * CTIO2 and save the handle to the CCB such that when
1443			 * this CTIO2 completes we can free dma resources and
1444			 * do whatever else we need to do to finish the rest
1445			 * of the command.
1446			 */
1447
1448			cto->ct_syshandle = handle;
1449			cto->ct_header.rqs_seqno = 1;
1450
1451			if (send_status) {
1452				/*
1453				 * Get 'real' residual and set flags based
1454				 * on it.
1455				 */
1456				cto->ct_resid = datalen - totxfr;
1457				if (send_sense) {
1458					MEMCPY(cto->rsp.m1.ct_resp, sense,
1459					    QLTM_SENSELEN);
1460					cto->rsp.m1.ct_senselen =
1461					    QLTM_SENSELEN;
1462					scsi_status |= CT2_SNSLEN_VALID;
1463					cto->rsp.m1.ct_scsi_status =
1464					    scsi_status;
1465					cto->ct_flags &= CT2_FLAG_MMASK;
1466					cto->ct_flags |= CT2_FLAG_MODE1 |
1467					    CT2_NO_DATA | CT2_SENDSTATUS |
1468					    CT2_CCINCR;
1469					if (cto->ct_resid > 0)
1470						cto->rsp.m1.ct_scsi_status |=
1471						    CT2_DATA_UNDER;
1472					else if (cto->ct_resid < 0)
1473						cto->rsp.m1.ct_scsi_status |=
1474						    CT2_DATA_OVER;
1475				} else {
1476					cto->rsp.m0.ct_scsi_status =
1477					    scsi_status;
1478					cto->ct_flags |=
1479					    CT2_SENDSTATUS | CT2_CCINCR;
1480					if (cto->ct_resid > 0)
1481						cto->rsp.m0.ct_scsi_status |=
1482						    CT2_DATA_UNDER;
1483					else if (cto->ct_resid < 0)
1484						cto->rsp.m0.ct_scsi_status |=
1485						    CT2_DATA_OVER;
1486				}
1487			}
1488			ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto);
1489			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1490			    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x"
1491			    " ssts 0x%x res %d", cto->ct_rxid,
1492			    csio->ccb_h.target_lun, (int) cto->ct_iid,
1493			    cto->ct_flags, cto->ct_status,
1494			    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1495			ISP_SWIZ_CTIO2(isp, cto, cto);
1496		} else {
1497			ct2_entry_t *octo = cto;
1498
1499			/*
1500			 * Make sure handle fields are clean
1501			 */
1502			cto->ct_syshandle = 0;
1503			cto->ct_header.rqs_seqno = 0;
1504
1505			ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto);
1506			isp_prt(mp->isp, ISP_LOGTDEBUG1,
1507			    "CTIO2[%x] lun %d->iid%d flgs 0x%x",
1508			    cto->ct_rxid, csio->ccb_h.target_lun,
1509			    (int) cto->ct_iid, cto->ct_flags);
1510			/*
1511			 * Get a new CTIO2
1512			 */
1513			cto = (ct2_entry_t *)
1514			    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1515			j = *mp->iptrp;
1516			*mp->iptrp =
1517			    ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1518			if (*mp->iptrp == mp->optr) {
1519				isp_prt(mp->isp, ISP_LOGWARN,
1520				    "Queue Overflow in dma2_tgt_fc");
1521				mp->error = MUSHERR_NOQENTRIES;
1522				return;
1523			}
1524
1525			/*
1526			 * Fill in the new CTIO2 with info from the old one.
1527			 */
1528			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1529			cto->ct_header.rqs_entry_count = 1;
1530			cto->ct_header.rqs_flags = 0;
1531			/* ct_header.rqs_seqno && ct_syshandle done later */
1532			cto->ct_fwhandle = octo->ct_fwhandle;
1533			cto->ct_lun = octo->ct_lun;
1534			cto->ct_iid = octo->ct_iid;
1535			cto->ct_rxid = octo->ct_rxid;
1536			cto->ct_flags = octo->ct_flags;
1537			cto->ct_status = 0;
1538			cto->ct_resid = 0;
1539			cto->ct_timeout = octo->ct_timeout;
1540			cto->ct_seg_count = 0;
1541			/*
1542			 * Adjust the new relative offset by the amount which
1543			 * is recorded in the data segment of the old CTIO2 we
1544			 * just finished filling out.
1545			 */
1546			cto->ct_reloff += octo->rsp.m0.ct_xfrlen;
1547			MEMZERO(&cto->rsp, sizeof (cto->rsp));
1548			ISP_SWIZ_CTIO2(isp, octo, octo);
1549			ISP_ADD_REQUEST(mp->isp, j);
1550		}
1551	}
1552}
1553#endif
1554
1555static void dma2 __P((void *, bus_dma_segment_t *, int, int));
1556
1557static void
1558dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1559{
1560	mush_t *mp;
1561	struct ccb_scsiio *csio;
1562	struct isp_pcisoftc *pci;
1563	bus_dmamap_t *dp;
1564	bus_dma_segment_t *eseg;
1565	ispreq_t *rq;
1566	ispcontreq_t *crq;
1567	int seglim, datalen;
1568
1569	mp = (mush_t *) arg;
1570	if (error) {
1571		mp->error = error;
1572		return;
1573	}
1574
1575	if (nseg < 1) {
1576		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1577		mp->error = EFAULT;
1578		return;
1579	}
1580	csio = mp->cmd_token;
1581	rq = mp->rq;
1582	pci = (struct isp_pcisoftc *)mp->isp;
1583	dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1584
1585	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1586		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1587	} else {
1588		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1589	}
1590
1591	datalen = XS_XFRLEN(csio);
1592
1593	/*
1594	 * We're passed an initial partially filled in entry that
1595	 * has most fields filled in except for data transfer
1596	 * related values.
1597	 *
1598	 * Our job is to fill in the initial request queue entry and
1599	 * then to start allocating and filling in continuation entries
1600	 * until we've covered the entire transfer.
1601	 */
1602
1603	if (IS_FC(mp->isp)) {
1604		seglim = ISP_RQDSEG_T2;
1605		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1606		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1607			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1608		} else {
1609			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1610		}
1611	} else {
1612		if (csio->cdb_len > 12) {
1613			seglim = 0;
1614		} else {
1615			seglim = ISP_RQDSEG;
1616		}
1617		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1618			rq->req_flags |= REQFLAG_DATA_IN;
1619		} else {
1620			rq->req_flags |= REQFLAG_DATA_OUT;
1621		}
1622	}
1623
1624	eseg = dm_segs + nseg;
1625
1626	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1627		if (IS_FC(mp->isp)) {
1628			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1629			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1630			    dm_segs->ds_addr;
1631			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1632			    dm_segs->ds_len;
1633		} else {
1634			rq->req_dataseg[rq->req_seg_count].ds_base =
1635				dm_segs->ds_addr;
1636			rq->req_dataseg[rq->req_seg_count].ds_count =
1637				dm_segs->ds_len;
1638		}
1639		datalen -= dm_segs->ds_len;
1640#if	0
1641		if (IS_FC(mp->isp)) {
1642			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1643			device_printf(mp->isp->isp_dev,
1644			    "seg0[%d] cnt 0x%x paddr 0x%08x\n",
1645			    rq->req_seg_count,
1646			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1647			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1648		} else {
1649			device_printf(mp->isp->isp_dev,
1650			    "seg0[%d] cnt 0x%x paddr 0x%08x\n",
1651			    rq->req_seg_count,
1652			    rq->req_dataseg[rq->req_seg_count].ds_count,
1653			    rq->req_dataseg[rq->req_seg_count].ds_base);
1654		}
1655#endif
1656		rq->req_seg_count++;
1657		dm_segs++;
1658	}
1659
1660	while (datalen > 0 && dm_segs != eseg) {
1661		crq = (ispcontreq_t *)
1662		    ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1663		*mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1664		if (*mp->iptrp == mp->optr) {
1665			isp_prt(mp->isp,
1666			    ISP_LOGDEBUG0, "Request Queue Overflow++");
1667			mp->error = MUSHERR_NOQENTRIES;
1668			return;
1669		}
1670		rq->req_header.rqs_entry_count++;
1671		bzero((void *)crq, sizeof (*crq));
1672		crq->req_header.rqs_entry_count = 1;
1673		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1674
1675		seglim = 0;
1676		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1677			crq->req_dataseg[seglim].ds_base =
1678			    dm_segs->ds_addr;
1679			crq->req_dataseg[seglim].ds_count =
1680			    dm_segs->ds_len;
1681#if	0
1682			device_printf(mp->isp->isp_dev,
1683			    "seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1684			    rq->req_header.rqs_entry_count-1,
1685			    seglim, crq->req_dataseg[seglim].ds_count,
1686			    crq->req_dataseg[seglim].ds_base);
1687#endif
1688			rq->req_seg_count++;
1689			dm_segs++;
1690			seglim++;
1691			datalen -= dm_segs->ds_len;
1692		}
1693	}
1694}
1695
1696static int
1697isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1698	u_int16_t *iptrp, u_int16_t optr)
1699{
1700	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1701	bus_dmamap_t *dp = NULL;
1702	mush_t mush, *mp;
1703	void (*eptr) __P((void *, bus_dma_segment_t *, int, int));
1704
1705#ifdef	ISP_TARGET_MODE
1706	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1707		if (IS_FC(isp)) {
1708			eptr = tdma_mkfc;
1709		} else {
1710			eptr = tdma_mk;
1711		}
1712		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1713		    (csio->dxfer_len == 0)) {
1714			mp = &mush;
1715			mp->isp = isp;
1716			mp->cmd_token = csio;
1717			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
1718			mp->iptrp = iptrp;
1719			mp->optr = optr;
1720			mp->error = 0;
1721			(*eptr)(mp, NULL, 0, 0);
1722			goto exit;
1723		}
1724	} else
1725#endif
1726	eptr = dma2;
1727
1728	/*
1729	 * NB: if we need to do request queue entry swizzling,
1730	 * NB: this is where it would need to be done for cmds
1731	 * NB: that move no data. For commands that move data,
1732	 * NB: swizzling would take place in those functions.
1733	 */
1734	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1735	    (csio->dxfer_len == 0)) {
1736		rq->req_seg_count = 1;
1737		return (CMD_QUEUED);
1738	}
1739
1740	/*
1741	 * Do a virtual grapevine step to collect info for
1742	 * the callback dma allocation that we have to use...
1743	 */
1744	mp = &mush;
1745	mp->isp = isp;
1746	mp->cmd_token = csio;
1747	mp->rq = rq;
1748	mp->iptrp = iptrp;
1749	mp->optr = optr;
1750	mp->error = 0;
1751
1752	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1753		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1754			int error, s;
1755			dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1756			s = splsoftvm();
1757			error = bus_dmamap_load(pci->parent_dmat, *dp,
1758			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1759			if (error == EINPROGRESS) {
1760				bus_dmamap_unload(pci->parent_dmat, *dp);
1761				mp->error = EINVAL;
1762				isp_prt(isp, ISP_LOGERR,
1763				    "deferred dma allocation not supported");
1764			} else if (error && mp->error == 0) {
1765#ifdef	DIAGNOSTIC
1766				isp_prt(isp, ISP_LOGERR,
1767				    "error %d in dma mapping code", error);
1768#endif
1769				mp->error = error;
1770			}
1771			splx(s);
1772		} else {
1773			/* Pointer to physical buffer */
1774			struct bus_dma_segment seg;
1775			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1776			seg.ds_len = csio->dxfer_len;
1777			(*eptr)(mp, &seg, 1, 0);
1778		}
1779	} else {
1780		struct bus_dma_segment *segs;
1781
1782		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1783			isp_prt(isp, ISP_LOGERR,
1784			    "Physical segment pointers unsupported");
1785			mp->error = EINVAL;
1786		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1787			isp_prt(isp, ISP_LOGERR,
1788			    "Virtual segment addresses unsupported");
1789			mp->error = EINVAL;
1790		} else {
1791			/* Just use the segments provided */
1792			segs = (struct bus_dma_segment *) csio->data_ptr;
1793			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1794		}
1795	}
1796#ifdef	ISP_TARGET_MODE
1797exit:
1798#endif
1799	if (mp->error) {
1800		int retval = CMD_COMPLETE;
1801		if (mp->error == MUSHERR_NOQENTRIES) {
1802			retval = CMD_EAGAIN;
1803		} else if (mp->error == EFBIG) {
1804			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1805		} else if (mp->error == EINVAL) {
1806			XS_SETERR(csio, CAM_REQ_INVALID);
1807		} else {
1808			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1809		}
1810		return (retval);
1811	} else {
1812		/*
1813		 * Check to see if we weren't cancelled while sleeping on
1814		 * getting DMA resources...
1815		 */
1816		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1817			if (dp) {
1818				bus_dmamap_unload(pci->parent_dmat, *dp);
1819			}
1820			return (CMD_COMPLETE);
1821		}
1822		return (CMD_QUEUED);
1823	}
1824}
1825
1826static void
1827isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1828{
1829	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1830	bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)];
1831	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1832		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1833	} else {
1834		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1835	}
1836	bus_dmamap_unload(pci->parent_dmat, *dp);
1837}
1838
1839
1840static void
1841isp_pci_reset1(struct ispsoftc *isp)
1842{
1843	/* Make sure the BIOS is disabled */
1844	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1845	/* and enable interrupts */
1846	ENABLE_INTS(isp);
1847}
1848
1849static void
1850isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1851{
1852	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1853	if (msg)
1854		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1855	else
1856		printf("%s:\n", device_get_nameunit(isp->isp_dev));
1857	if (IS_SCSI(isp))
1858		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1859	else
1860		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1861	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1862	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1863	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1864
1865
1866	if (IS_SCSI(isp)) {
1867		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1868		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1869			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1870			ISP_READ(isp, CDMA_FIFO_STS));
1871		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1872			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1873			ISP_READ(isp, DDMA_FIFO_STS));
1874		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1875			ISP_READ(isp, SXP_INTERRUPT),
1876			ISP_READ(isp, SXP_GROSS_ERR),
1877			ISP_READ(isp, SXP_PINS_CTRL));
1878		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1879	}
1880	printf("    mbox regs: %x %x %x %x %x\n",
1881	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1882	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1883	    ISP_READ(isp, OUTMAILBOX4));
1884	printf("    PCI Status Command/Status=%x\n",
1885	    pci_read_config(pci->pci_dev, PCIR_COMMAND, 1));
1886}
1887