isp_pci.c revision 111119
1/* $FreeBSD: head/sys/dev/isp/isp_pci.c 111119 2003-02-19 05:47:46Z imp $ */
2/*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice immediately at the beginning of the file, without modification,
13 *    this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/bus.h>
35
36#include <pci/pcireg.h>
37#include <pci/pcivar.h>
38
39#include <machine/bus_memio.h>
40#include <machine/bus_pio.h>
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <sys/rman.h>
44#include <sys/malloc.h>
45
46#include <dev/isp/isp_freebsd.h>
47
48static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
49static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
50static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
51static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
52static int
53isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
54static int
55isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
56static int isp_pci_mbxdma(struct ispsoftc *);
57static int
58isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
59static void
60isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
61
62static void isp_pci_reset1(struct ispsoftc *);
63static void isp_pci_dumpregs(struct ispsoftc *, const char *);
64
65static struct ispmdvec mdvec = {
66	isp_pci_rd_isr,
67	isp_pci_rd_reg,
68	isp_pci_wr_reg,
69	isp_pci_mbxdma,
70	isp_pci_dmasetup,
71	isp_pci_dmateardown,
72	NULL,
73	isp_pci_reset1,
74	isp_pci_dumpregs,
75	NULL,
76	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
77};
78
79static struct ispmdvec mdvec_1080 = {
80	isp_pci_rd_isr,
81	isp_pci_rd_reg_1080,
82	isp_pci_wr_reg_1080,
83	isp_pci_mbxdma,
84	isp_pci_dmasetup,
85	isp_pci_dmateardown,
86	NULL,
87	isp_pci_reset1,
88	isp_pci_dumpregs,
89	NULL,
90	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
91};
92
93static struct ispmdvec mdvec_12160 = {
94	isp_pci_rd_isr,
95	isp_pci_rd_reg_1080,
96	isp_pci_wr_reg_1080,
97	isp_pci_mbxdma,
98	isp_pci_dmasetup,
99	isp_pci_dmateardown,
100	NULL,
101	isp_pci_reset1,
102	isp_pci_dumpregs,
103	NULL,
104	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
105};
106
107static struct ispmdvec mdvec_2100 = {
108	isp_pci_rd_isr,
109	isp_pci_rd_reg,
110	isp_pci_wr_reg,
111	isp_pci_mbxdma,
112	isp_pci_dmasetup,
113	isp_pci_dmateardown,
114	NULL,
115	isp_pci_reset1,
116	isp_pci_dumpregs
117};
118
119static struct ispmdvec mdvec_2200 = {
120	isp_pci_rd_isr,
121	isp_pci_rd_reg,
122	isp_pci_wr_reg,
123	isp_pci_mbxdma,
124	isp_pci_dmasetup,
125	isp_pci_dmateardown,
126	NULL,
127	isp_pci_reset1,
128	isp_pci_dumpregs
129};
130
131static struct ispmdvec mdvec_2300 = {
132	isp_pci_rd_isr_2300,
133	isp_pci_rd_reg,
134	isp_pci_wr_reg,
135	isp_pci_mbxdma,
136	isp_pci_dmasetup,
137	isp_pci_dmateardown,
138	NULL,
139	isp_pci_reset1,
140	isp_pci_dumpregs
141};
142
143#ifndef	PCIM_CMD_INVEN
144#define	PCIM_CMD_INVEN			0x10
145#endif
146#ifndef	PCIM_CMD_BUSMASTEREN
147#define	PCIM_CMD_BUSMASTEREN		0x0004
148#endif
149#ifndef	PCIM_CMD_PERRESPEN
150#define	PCIM_CMD_PERRESPEN		0x0040
151#endif
152#ifndef	PCIM_CMD_SEREN
153#define	PCIM_CMD_SEREN			0x0100
154#endif
155
156#ifndef	PCIR_COMMAND
157#define	PCIR_COMMAND			0x04
158#endif
159
160#ifndef	PCIR_CACHELNSZ
161#define	PCIR_CACHELNSZ			0x0c
162#endif
163
164#ifndef	PCIR_LATTIMER
165#define	PCIR_LATTIMER			0x0d
166#endif
167
168#ifndef	PCIR_ROMADDR
169#define	PCIR_ROMADDR			0x30
170#endif
171
172#ifndef	PCI_VENDOR_QLOGIC
173#define	PCI_VENDOR_QLOGIC		0x1077
174#endif
175
176#ifndef	PCI_PRODUCT_QLOGIC_ISP1020
177#define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
178#endif
179
180#ifndef	PCI_PRODUCT_QLOGIC_ISP1080
181#define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
182#endif
183
184#ifndef	PCI_PRODUCT_QLOGIC_ISP10160
185#define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
186#endif
187
188#ifndef	PCI_PRODUCT_QLOGIC_ISP12160
189#define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
190#endif
191
192#ifndef	PCI_PRODUCT_QLOGIC_ISP1240
193#define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
194#endif
195
196#ifndef	PCI_PRODUCT_QLOGIC_ISP1280
197#define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
198#endif
199
200#ifndef	PCI_PRODUCT_QLOGIC_ISP2100
201#define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
202#endif
203
204#ifndef	PCI_PRODUCT_QLOGIC_ISP2200
205#define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
206#endif
207
208#ifndef	PCI_PRODUCT_QLOGIC_ISP2300
209#define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
210#endif
211
212#ifndef	PCI_PRODUCT_QLOGIC_ISP2312
213#define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
214#endif
215
216#define	PCI_QLOGIC_ISP1020	\
217	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
218
219#define	PCI_QLOGIC_ISP1080	\
220	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
221
222#define	PCI_QLOGIC_ISP10160	\
223	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
224
225#define	PCI_QLOGIC_ISP12160	\
226	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
227
228#define	PCI_QLOGIC_ISP1240	\
229	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
230
231#define	PCI_QLOGIC_ISP1280	\
232	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
233
234#define	PCI_QLOGIC_ISP2100	\
235	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
236
237#define	PCI_QLOGIC_ISP2200	\
238	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
239
240#define	PCI_QLOGIC_ISP2300	\
241	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
242
243#define	PCI_QLOGIC_ISP2312	\
244	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
245
246/*
247 * Odd case for some AMI raid cards... We need to *not* attach to this.
248 */
249#define	AMI_RAID_SUBVENDOR_ID	0x101e
250
251#define	IO_MAP_REG	0x10
252#define	MEM_MAP_REG	0x14
253
254#define	PCI_DFLT_LTNCY	0x40
255#define	PCI_DFLT_LNSZ	0x10
256
257static int isp_pci_probe (device_t);
258static int isp_pci_attach (device_t);
259
260
261struct isp_pcisoftc {
262	struct ispsoftc			pci_isp;
263	device_t			pci_dev;
264	struct resource *		pci_reg;
265	bus_space_tag_t			pci_st;
266	bus_space_handle_t		pci_sh;
267	void *				ih;
268	int16_t				pci_poff[_NREG_BLKS];
269	bus_dma_tag_t			dmat;
270	bus_dmamap_t			*dmaps;
271};
272extern ispfwfunc *isp_get_firmware_p;
273
274static device_method_t isp_pci_methods[] = {
275	/* Device interface */
276	DEVMETHOD(device_probe,		isp_pci_probe),
277	DEVMETHOD(device_attach,	isp_pci_attach),
278	{ 0, 0 }
279};
280static void isp_pci_intr(void *);
281
282static driver_t isp_pci_driver = {
283	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
284};
285static devclass_t isp_devclass;
286DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
287
288static int
289isp_pci_probe(device_t dev)
290{
291        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
292	case PCI_QLOGIC_ISP1020:
293		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
294		break;
295	case PCI_QLOGIC_ISP1080:
296		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
297		break;
298	case PCI_QLOGIC_ISP1240:
299		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
300		break;
301	case PCI_QLOGIC_ISP1280:
302		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
303		break;
304	case PCI_QLOGIC_ISP10160:
305		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
306		break;
307	case PCI_QLOGIC_ISP12160:
308		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
309			return (ENXIO);
310		}
311		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
312		break;
313	case PCI_QLOGIC_ISP2100:
314		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
315		break;
316	case PCI_QLOGIC_ISP2200:
317		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
318		break;
319	case PCI_QLOGIC_ISP2300:
320		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
321		break;
322	case PCI_QLOGIC_ISP2312:
323		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
324		break;
325	default:
326		return (ENXIO);
327	}
328	if (isp_announced == 0 && bootverbose) {
329		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
330		    "Core Version %d.%d\n",
331		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
332		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
333		isp_announced++;
334	}
335	/*
336	 * XXXX: Here is where we might load the f/w module
337	 * XXXX: (or increase a reference count to it).
338	 */
339	return (0);
340}
341
342static int
343isp_pci_attach(device_t dev)
344{
345	struct resource *regs, *irq;
346	int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
347	u_int32_t data, cmd, linesz, psize, basetype;
348	struct isp_pcisoftc *pcs;
349	struct ispsoftc *isp = NULL;
350	struct ispmdvec *mdvp;
351	const char *sptr;
352	int locksetup = 0;
353
354	/*
355	 * Figure out if we're supposed to skip this one.
356	 * If we are, we actually go to ISP_ROLE_NONE.
357	 */
358
359	tval = 0;
360	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
361	    "disable", &tval) == 0 && tval) {
362		device_printf(dev, "device is disabled\n");
363		/* but return 0 so the !$)$)*!$*) unit isn't reused */
364		return (0);
365	}
366
367	role = 0;
368	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
369	    "role", &role) == 0 &&
370	    ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) {
371		device_printf(dev, "setting role to 0x%x\n", role);
372	} else {
373#ifdef	ISP_TARGET_MODE
374		role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET;
375#else
376		role = ISP_DEFAULT_ROLES;
377#endif
378	}
379
380	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
381	if (pcs == NULL) {
382		device_printf(dev, "cannot allocate softc\n");
383		return (ENOMEM);
384	}
385
386	/*
387	 * Figure out which we should try first - memory mapping or i/o mapping?
388	 */
389#ifdef	__alpha__
390	m1 = PCIM_CMD_MEMEN;
391	m2 = PCIM_CMD_PORTEN;
392#else
393	m1 = PCIM_CMD_PORTEN;
394	m2 = PCIM_CMD_MEMEN;
395#endif
396
397	tval = 0;
398        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
399            "prefer_iomap", &tval) == 0 && tval != 0) {
400		m1 = PCIM_CMD_PORTEN;
401		m2 = PCIM_CMD_MEMEN;
402	}
403	tval = 0;
404        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
405            "prefer_memmap", &tval) == 0 && tval != 0) {
406		m1 = PCIM_CMD_MEMEN;
407		m2 = PCIM_CMD_PORTEN;
408	}
409
410	linesz = PCI_DFLT_LNSZ;
411	irq = regs = NULL;
412	rgd = rtp = iqd = 0;
413
414	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
415	if (cmd & m1) {
416		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
417		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
418		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
419	}
420	if (regs == NULL && (cmd & m2)) {
421		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
422		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
423		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
424	}
425	if (regs == NULL) {
426		device_printf(dev, "unable to map any ports\n");
427		goto bad;
428	}
429	if (bootverbose)
430		device_printf(dev, "using %s space register mapping\n",
431		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
432	pcs->pci_dev = dev;
433	pcs->pci_reg = regs;
434	pcs->pci_st = rman_get_bustag(regs);
435	pcs->pci_sh = rman_get_bushandle(regs);
436
437	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
438	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
439	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
440	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
441	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
442	mdvp = &mdvec;
443	basetype = ISP_HA_SCSI_UNKNOWN;
444	psize = sizeof (sdparam);
445	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
446		mdvp = &mdvec;
447		basetype = ISP_HA_SCSI_UNKNOWN;
448		psize = sizeof (sdparam);
449	}
450	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
451		mdvp = &mdvec_1080;
452		basetype = ISP_HA_SCSI_1080;
453		psize = sizeof (sdparam);
454		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
455		    ISP1080_DMA_REGS_OFF;
456	}
457	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
458		mdvp = &mdvec_1080;
459		basetype = ISP_HA_SCSI_1240;
460		psize = 2 * sizeof (sdparam);
461		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
462		    ISP1080_DMA_REGS_OFF;
463	}
464	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
465		mdvp = &mdvec_1080;
466		basetype = ISP_HA_SCSI_1280;
467		psize = 2 * sizeof (sdparam);
468		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
469		    ISP1080_DMA_REGS_OFF;
470	}
471	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
472		mdvp = &mdvec_12160;
473		basetype = ISP_HA_SCSI_10160;
474		psize = sizeof (sdparam);
475		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
476		    ISP1080_DMA_REGS_OFF;
477	}
478	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
479		mdvp = &mdvec_12160;
480		basetype = ISP_HA_SCSI_12160;
481		psize = 2 * sizeof (sdparam);
482		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
483		    ISP1080_DMA_REGS_OFF;
484	}
485	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
486		mdvp = &mdvec_2100;
487		basetype = ISP_HA_FC_2100;
488		psize = sizeof (fcparam);
489		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
490		    PCI_MBOX_REGS2100_OFF;
491		if (pci_get_revid(dev) < 3) {
492			/*
493			 * XXX: Need to get the actual revision
494			 * XXX: number of the 2100 FB. At any rate,
495			 * XXX: lower cache line size for early revision
496			 * XXX; boards.
497			 */
498			linesz = 1;
499		}
500	}
501	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
502		mdvp = &mdvec_2200;
503		basetype = ISP_HA_FC_2200;
504		psize = sizeof (fcparam);
505		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
506		    PCI_MBOX_REGS2100_OFF;
507	}
508	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
509		mdvp = &mdvec_2300;
510		basetype = ISP_HA_FC_2300;
511		psize = sizeof (fcparam);
512		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
513		    PCI_MBOX_REGS2300_OFF;
514	}
515	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
516		mdvp = &mdvec_2300;
517		basetype = ISP_HA_FC_2312;
518		psize = sizeof (fcparam);
519		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
520		    PCI_MBOX_REGS2300_OFF;
521	}
522	isp = &pcs->pci_isp;
523	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
524	if (isp->isp_param == NULL) {
525		device_printf(dev, "cannot allocate parameter data\n");
526		goto bad;
527	}
528	isp->isp_mdvec = mdvp;
529	isp->isp_type = basetype;
530	isp->isp_revision = pci_get_revid(dev);
531	isp->isp_role = role;
532	isp->isp_dev = dev;
533
534	/*
535	 * Try and find firmware for this device.
536	 */
537
538	if (isp_get_firmware_p) {
539		int device = (int) pci_get_device(dev);
540#ifdef	ISP_TARGET_MODE
541		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
542#else
543		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
544#endif
545	}
546
547	/*
548	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
549	 * are set.
550	 */
551	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
552		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
553	if (IS_2300(isp)) {	/* per QLogic errata */
554		cmd &= ~PCIM_CMD_INVEN;
555	}
556	if (IS_23XX(isp)) {
557		/*
558		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
559		 */
560		isp->isp_touched = 1;
561
562	}
563	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
564
565	/*
566	 * Make sure the Cache Line Size register is set sensibly.
567	 */
568	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
569	if (data != linesz) {
570		data = PCI_DFLT_LNSZ;
571		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
572		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
573	}
574
575	/*
576	 * Make sure the Latency Timer is sane.
577	 */
578	data = pci_read_config(dev, PCIR_LATTIMER, 1);
579	if (data < PCI_DFLT_LTNCY) {
580		data = PCI_DFLT_LTNCY;
581		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
582		pci_write_config(dev, PCIR_LATTIMER, data, 1);
583	}
584
585	/*
586	 * Make sure we've disabled the ROM.
587	 */
588	data = pci_read_config(dev, PCIR_ROMADDR, 4);
589	data &= ~1;
590	pci_write_config(dev, PCIR_ROMADDR, data, 4);
591
592	iqd = 0;
593	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
594	    1, RF_ACTIVE | RF_SHAREABLE);
595	if (irq == NULL) {
596		device_printf(dev, "could not allocate interrupt\n");
597		goto bad;
598	}
599
600	tval = 0;
601        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
602            "fwload_disable", &tval) == 0 && tval != 0) {
603		isp->isp_confopts |= ISP_CFG_NORELOAD;
604	}
605	tval = 0;
606        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
607            "ignore_nvram", &tval) == 0 && tval != 0) {
608		isp->isp_confopts |= ISP_CFG_NONVRAM;
609	}
610	tval = 0;
611        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
612            "fullduplex", &tval) == 0 && tval != 0) {
613		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
614	}
615#ifdef	ISP_FW_CRASH_DUMP
616	tval = 0;
617        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
618            "fw_dump_enable", &tval) == 0 && tval != 0) {
619		size_t amt = 0;
620		if (IS_2200(isp)) {
621			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
622		} else if (IS_23XX(isp)) {
623			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
624		}
625		if (amt) {
626			FCPARAM(isp)->isp_dump_data =
627			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
628		} else {
629			device_printf(dev,
630			    "f/w crash dumps not supported for this model\n");
631		}
632	}
633#endif
634
635	sptr = 0;
636        if (resource_string_value(device_get_name(dev), device_get_unit(dev),
637            "topology", (const char **) &sptr) == 0 && sptr != 0) {
638		if (strcmp(sptr, "lport") == 0) {
639			isp->isp_confopts |= ISP_CFG_LPORT;
640		} else if (strcmp(sptr, "nport") == 0) {
641			isp->isp_confopts |= ISP_CFG_NPORT;
642		} else if (strcmp(sptr, "lport-only") == 0) {
643			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
644		} else if (strcmp(sptr, "nport-only") == 0) {
645			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
646		}
647	}
648
649	/*
650	 * Because the resource_*_value functions can neither return
651	 * 64 bit integer values, nor can they be directly coerced
652	 * to interpret the right hand side of the assignment as
653	 * you want them to interpret it, we have to force WWN
654	 * hint replacement to specify WWN strings with a leading
655	 * 'w' (e..g w50000000aaaa0001). Sigh.
656	 */
657	sptr = 0;
658	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
659            "portwwn", (const char **) &sptr);
660	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
661		char *eptr = 0;
662		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
663		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
664			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
665			isp->isp_osinfo.default_port_wwn = 0;
666		} else {
667			isp->isp_confopts |= ISP_CFG_OWNWWPN;
668		}
669	}
670	if (isp->isp_osinfo.default_port_wwn == 0) {
671		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
672	}
673
674	sptr = 0;
675	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
676            "nodewwn", (const char **) &sptr);
677	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
678		char *eptr = 0;
679		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
680		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
681			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
682			isp->isp_osinfo.default_node_wwn = 0;
683		} else {
684			isp->isp_confopts |= ISP_CFG_OWNWWNN;
685		}
686	}
687	if (isp->isp_osinfo.default_node_wwn == 0) {
688		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
689	}
690
691	isp->isp_osinfo.default_id = -1;
692	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
693            "iid", &tval) == 0) {
694		isp->isp_osinfo.default_id = tval;
695		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
696	}
697	if (isp->isp_osinfo.default_id == -1) {
698		if (IS_FC(isp)) {
699			isp->isp_osinfo.default_id = 109;
700		} else {
701			isp->isp_osinfo.default_id = 7;
702		}
703	}
704
705	isp_debug = 0;
706        (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
707            "debug", &isp_debug);
708
709	/* Make sure the lock is set up. */
710	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
711	locksetup++;
712
713	if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
714		device_printf(dev, "could not setup interrupt\n");
715		goto bad;
716	}
717
718	/*
719	 * Set up logging levels.
720	 */
721	if (isp_debug) {
722		isp->isp_dblev = isp_debug;
723	} else {
724		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
725	}
726	if (bootverbose)
727		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
728
729	/*
730	 * Last minute checks...
731	 */
732	if (IS_2312(isp)) {
733		isp->isp_port = pci_get_function(dev);
734	}
735
736	/*
737	 * Make sure we're in reset state.
738	 */
739	ISP_LOCK(isp);
740	isp_reset(isp);
741	if (isp->isp_state != ISP_RESETSTATE) {
742		ISP_UNLOCK(isp);
743		goto bad;
744	}
745	isp_init(isp);
746	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
747		isp_uninit(isp);
748		ISP_UNLOCK(isp);
749		goto bad;
750	}
751	isp_attach(isp);
752	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
753		isp_uninit(isp);
754		ISP_UNLOCK(isp);
755		goto bad;
756	}
757	/*
758	 * XXXX: Here is where we might unload the f/w module
759	 * XXXX: (or decrease the reference count to it).
760	 */
761	ISP_UNLOCK(isp);
762	return (0);
763
764bad:
765
766	if (pcs && pcs->ih) {
767		(void) bus_teardown_intr(dev, irq, pcs->ih);
768	}
769
770	if (locksetup && isp) {
771		mtx_destroy(&isp->isp_osinfo.lock);
772	}
773
774	if (irq) {
775		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
776	}
777
778
779	if (regs) {
780		(void) bus_release_resource(dev, rtp, rgd, regs);
781	}
782
783	if (pcs) {
784		if (pcs->pci_isp.isp_param)
785			free(pcs->pci_isp.isp_param, M_DEVBUF);
786		free(pcs, M_DEVBUF);
787	}
788
789	/*
790	 * XXXX: Here is where we might unload the f/w module
791	 * XXXX: (or decrease the reference count to it).
792	 */
793	return (ENXIO);
794}
795
796static void
797isp_pci_intr(void *arg)
798{
799	struct ispsoftc *isp = arg;
800	u_int16_t isr, sema, mbox;
801
802	ISP_LOCK(isp);
803	isp->isp_intcnt++;
804	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
805		isp->isp_intbogus++;
806	} else {
807		int iok = isp->isp_osinfo.intsok;
808		isp->isp_osinfo.intsok = 0;
809		isp_intr(isp, isr, sema, mbox);
810		isp->isp_osinfo.intsok = iok;
811	}
812	ISP_UNLOCK(isp);
813}
814
815
816#define	IspVirt2Off(a, x)	\
817	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
818	_BLK_REG_SHFT] + ((x) & 0xff))
819
820#define	BXR2(pcs, off)		\
821	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
822#define	BXW2(pcs, off, v)	\
823	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
824
825
826static INLINE int
827isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
828{
829	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
830	u_int16_t val0, val1;
831	int i = 0;
832
833	do {
834		val0 = BXR2(pcs, IspVirt2Off(isp, off));
835		val1 = BXR2(pcs, IspVirt2Off(isp, off));
836	} while (val0 != val1 && ++i < 1000);
837	if (val0 != val1) {
838		return (1);
839	}
840	*rp = val0;
841	return (0);
842}
843
844static int
845isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
846    u_int16_t *semap, u_int16_t *mbp)
847{
848	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
849	u_int16_t isr, sema;
850
851	if (IS_2100(isp)) {
852		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
853		    return (0);
854		}
855		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
856		    return (0);
857		}
858	} else {
859		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
860		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
861	}
862	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
863	isr &= INT_PENDING_MASK(isp);
864	sema &= BIU_SEMA_LOCK;
865	if (isr == 0 && sema == 0) {
866		return (0);
867	}
868	*isrp = isr;
869	if ((*semap = sema) != 0) {
870		if (IS_2100(isp)) {
871			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
872				return (0);
873			}
874		} else {
875			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
876		}
877	}
878	return (1);
879}
880
881static int
882isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
883    u_int16_t *semap, u_int16_t *mbox0p)
884{
885	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
886	u_int32_t r2hisr;
887
888	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
889		*isrp = 0;
890		return (0);
891	}
892	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
893	    IspVirt2Off(pcs, BIU_R2HSTSLO));
894	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
895	if ((r2hisr & BIU_R2HST_INTR) == 0) {
896		*isrp = 0;
897		return (0);
898	}
899	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
900	case ISPR2HST_ROM_MBX_OK:
901	case ISPR2HST_ROM_MBX_FAIL:
902	case ISPR2HST_MBX_OK:
903	case ISPR2HST_MBX_FAIL:
904	case ISPR2HST_ASYNC_EVENT:
905		*isrp = r2hisr & 0xffff;
906		*mbox0p = (r2hisr >> 16);
907		*semap = 1;
908		return (1);
909	case ISPR2HST_RIO_16:
910		*isrp = r2hisr & 0xffff;
911		*mbox0p = ASYNC_RIO1;
912		*semap = 1;
913		return (1);
914	case ISPR2HST_FPOST:
915		*isrp = r2hisr & 0xffff;
916		*mbox0p = ASYNC_CMD_CMPLT;
917		*semap = 1;
918		return (1);
919	case ISPR2HST_FPOST_CTIO:
920		*isrp = r2hisr & 0xffff;
921		*mbox0p = ASYNC_CTIO_DONE;
922		*semap = 1;
923		return (1);
924	case ISPR2HST_RSPQ_UPDATE:
925		*isrp = r2hisr & 0xffff;
926		*mbox0p = 0;
927		*semap = 0;
928		return (1);
929	default:
930		return (0);
931	}
932}
933
934static u_int16_t
935isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
936{
937	u_int16_t rv;
938	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
939	int oldconf = 0;
940
941	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
942		/*
943		 * We will assume that someone has paused the RISC processor.
944		 */
945		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
946		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
947		    oldconf | BIU_PCI_CONF1_SXP);
948	}
949	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
950	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
951		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
952	}
953	return (rv);
954}
955
956static void
957isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
958{
959	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
960	int oldconf = 0;
961
962	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
963		/*
964		 * We will assume that someone has paused the RISC processor.
965		 */
966		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
967		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
968		    oldconf | BIU_PCI_CONF1_SXP);
969	}
970	BXW2(pcs, IspVirt2Off(isp, regoff), val);
971	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
972		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
973	}
974}
975
976static u_int16_t
977isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
978{
979	u_int16_t rv, oc = 0;
980	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
981
982	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
983	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
984		u_int16_t tc;
985		/*
986		 * We will assume that someone has paused the RISC processor.
987		 */
988		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
989		tc = oc & ~BIU_PCI1080_CONF1_DMA;
990		if (regoff & SXP_BANK1_SELECT)
991			tc |= BIU_PCI1080_CONF1_SXP1;
992		else
993			tc |= BIU_PCI1080_CONF1_SXP0;
994		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
995	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
996		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
997		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
998		    oc | BIU_PCI1080_CONF1_DMA);
999	}
1000	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1001	if (oc) {
1002		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1003	}
1004	return (rv);
1005}
1006
1007static void
1008isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
1009{
1010	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1011	int oc = 0;
1012
1013	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1014	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1015		u_int16_t tc;
1016		/*
1017		 * We will assume that someone has paused the RISC processor.
1018		 */
1019		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1020		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1021		if (regoff & SXP_BANK1_SELECT)
1022			tc |= BIU_PCI1080_CONF1_SXP1;
1023		else
1024			tc |= BIU_PCI1080_CONF1_SXP0;
1025		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1026	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1027		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1028		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1029		    oc | BIU_PCI1080_CONF1_DMA);
1030	}
1031	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1032	if (oc) {
1033		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1034	}
1035}
1036
1037
1038struct imush {
1039	struct ispsoftc *isp;
1040	int error;
1041};
1042
1043static void imc(void *, bus_dma_segment_t *, int, int);
1044
1045static void
1046imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1047{
1048	struct imush *imushp = (struct imush *) arg;
1049	if (error) {
1050		imushp->error = error;
1051	} else {
1052		struct ispsoftc *isp =imushp->isp;
1053		bus_addr_t addr = segs->ds_addr;
1054
1055		isp->isp_rquest_dma = addr;
1056		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1057		isp->isp_result_dma = addr;
1058		if (IS_FC(isp)) {
1059			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1060			FCPARAM(isp)->isp_scdma = addr;
1061		}
1062	}
1063}
1064
1065/*
1066 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1067 */
1068#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1069
1070static int
1071isp_pci_mbxdma(struct ispsoftc *isp)
1072{
1073	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1074	caddr_t base;
1075	u_int32_t len;
1076	int i, error, ns;
1077	bus_size_t alim, slim;
1078	struct imush im;
1079
1080	/*
1081	 * Already been here? If so, leave...
1082	 */
1083	if (isp->isp_rquest) {
1084		return (0);
1085	}
1086
1087#ifdef	ISP_DAC_SUPPORTED
1088	alim = BUS_SPACE_UNRESTRICTED;
1089#else
1090	alim = BUS_SPACE_MAXADDR_32BIT;
1091#endif
1092	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1093		slim = BUS_SPACE_MAXADDR_32BIT;
1094	} else {
1095		slim = BUS_SPACE_MAXADDR_24BIT;
1096	}
1097
1098	ISP_UNLOCK(isp);
1099	if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1100	    NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1101		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1102		ISP_LOCK(isp);
1103		return(1);
1104	}
1105
1106
1107	len = sizeof (XS_T **) * isp->isp_maxcmds;
1108	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1109	if (isp->isp_xflist == NULL) {
1110		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1111		ISP_LOCK(isp);
1112		return (1);
1113	}
1114	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1115	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1116	if (pcs->dmaps == NULL) {
1117		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1118		free(isp->isp_xflist, M_DEVBUF);
1119		ISP_LOCK(isp);
1120		return (1);
1121	}
1122
1123	/*
1124	 * Allocate and map the request, result queues, plus FC scratch area.
1125	 */
1126	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1127	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1128	if (IS_FC(isp)) {
1129		len += ISP2100_SCRLEN;
1130	}
1131
1132	ns = (len / PAGE_SIZE) + 1;
1133	if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim,
1134	    NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1135		isp_prt(isp, ISP_LOGERR,
1136		    "cannot create a dma tag for control spaces");
1137		free(pcs->dmaps, M_DEVBUF);
1138		free(isp->isp_xflist, M_DEVBUF);
1139		ISP_LOCK(isp);
1140		return (1);
1141	}
1142
1143	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1144	    &isp->isp_cdmap) != 0) {
1145		isp_prt(isp, ISP_LOGERR,
1146		    "cannot allocate %d bytes of CCB memory", len);
1147		bus_dma_tag_destroy(isp->isp_cdmat);
1148		free(isp->isp_xflist, M_DEVBUF);
1149		free(pcs->dmaps, M_DEVBUF);
1150		ISP_LOCK(isp);
1151		return (1);
1152	}
1153
1154	for (i = 0; i < isp->isp_maxcmds; i++) {
1155		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1156		if (error) {
1157			isp_prt(isp, ISP_LOGERR,
1158			    "error %d creating per-cmd DMA maps", error);
1159			while (--i >= 0) {
1160				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1161			}
1162			goto bad;
1163		}
1164	}
1165
1166	im.isp = isp;
1167	im.error = 0;
1168	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1169	if (im.error) {
1170		isp_prt(isp, ISP_LOGERR,
1171		    "error %d loading dma map for control areas", im.error);
1172		goto bad;
1173	}
1174
1175	isp->isp_rquest = base;
1176	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1177	isp->isp_result = base;
1178	if (IS_FC(isp)) {
1179		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1180		FCPARAM(isp)->isp_scratch = base;
1181	}
1182	ISP_LOCK(isp);
1183	return (0);
1184
1185bad:
1186	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1187	bus_dma_tag_destroy(isp->isp_cdmat);
1188	free(isp->isp_xflist, M_DEVBUF);
1189	free(pcs->dmaps, M_DEVBUF);
1190	ISP_LOCK(isp);
1191	isp->isp_rquest = NULL;
1192	return (1);
1193}
1194
1195typedef struct {
1196	struct ispsoftc *isp;
1197	void *cmd_token;
1198	void *rq;
1199	u_int16_t *nxtip;
1200	u_int16_t optr;
1201	u_int error;
1202} mush_t;
1203
1204#define	MUSHERR_NOQENTRIES	-2
1205
1206#ifdef	ISP_TARGET_MODE
1207/*
1208 * We need to handle DMA for target mode differently from initiator mode.
1209 *
1210 * DMA mapping and construction and submission of CTIO Request Entries
1211 * and rendevous for completion are very tightly coupled because we start
1212 * out by knowing (per platform) how much data we have to move, but we
1213 * don't know, up front, how many DMA mapping segments will have to be used
1214 * cover that data, so we don't know how many CTIO Request Entries we
1215 * will end up using. Further, for performance reasons we may want to
1216 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1217 *
1218 * The standard vector still goes through isp_pci_dmasetup, but the callback
1219 * for the DMA mapping routines comes here instead with the whole transfer
1220 * mapped and a pointer to a partially filled in already allocated request
1221 * queue entry. We finish the job.
1222 */
1223static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1224static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1225
1226#define	STATUS_WITH_DATA	1
1227
1228static void
1229tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1230{
1231	mush_t *mp;
1232	struct ccb_scsiio *csio;
1233	struct ispsoftc *isp;
1234	struct isp_pcisoftc *pcs;
1235	bus_dmamap_t *dp;
1236	ct_entry_t *cto, *qe;
1237	u_int8_t scsi_status;
1238	u_int16_t curi, nxti, handle;
1239	u_int32_t sflags;
1240	int32_t resid;
1241	int nth_ctio, nctios, send_status;
1242
1243	mp = (mush_t *) arg;
1244	if (error) {
1245		mp->error = error;
1246		return;
1247	}
1248
1249	isp = mp->isp;
1250	csio = mp->cmd_token;
1251	cto = mp->rq;
1252	curi = isp->isp_reqidx;
1253	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1254
1255	cto->ct_xfrlen = 0;
1256	cto->ct_seg_count = 0;
1257	cto->ct_header.rqs_entry_count = 1;
1258	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1259
1260	if (nseg == 0) {
1261		cto->ct_header.rqs_seqno = 1;
1262		isp_prt(isp, ISP_LOGTDEBUG1,
1263		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1264		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1265		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1266		    cto->ct_scsi_status, cto->ct_resid);
1267		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1268		isp_put_ctio(isp, cto, qe);
1269		return;
1270	}
1271
1272	nctios = nseg / ISP_RQDSEG;
1273	if (nseg % ISP_RQDSEG) {
1274		nctios++;
1275	}
1276
1277	/*
1278	 * Save syshandle, and potentially any SCSI status, which we'll
1279	 * reinsert on the last CTIO we're going to send.
1280	 */
1281
1282	handle = cto->ct_syshandle;
1283	cto->ct_syshandle = 0;
1284	cto->ct_header.rqs_seqno = 0;
1285	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1286
1287	if (send_status) {
1288		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1289		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1290		/*
1291		 * Preserve residual.
1292		 */
1293		resid = cto->ct_resid;
1294
1295		/*
1296		 * Save actual SCSI status.
1297		 */
1298		scsi_status = cto->ct_scsi_status;
1299
1300#ifndef	STATUS_WITH_DATA
1301		sflags |= CT_NO_DATA;
1302		/*
1303		 * We can't do a status at the same time as a data CTIO, so
1304		 * we need to synthesize an extra CTIO at this level.
1305		 */
1306		nctios++;
1307#endif
1308	} else {
1309		sflags = scsi_status = resid = 0;
1310	}
1311
1312	cto->ct_resid = 0;
1313	cto->ct_scsi_status = 0;
1314
1315	pcs = (struct isp_pcisoftc *)isp;
1316	dp = &pcs->dmaps[isp_handle_index(handle)];
1317	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1318		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1319	} else {
1320		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1321	}
1322
1323	nxti = *mp->nxtip;
1324
1325	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1326		int seglim;
1327
1328		seglim = nseg;
1329		if (seglim) {
1330			int seg;
1331
1332			if (seglim > ISP_RQDSEG)
1333				seglim = ISP_RQDSEG;
1334
1335			for (seg = 0; seg < seglim; seg++, nseg--) {
1336				/*
1337				 * Unlike normal initiator commands, we don't
1338				 * do any swizzling here.
1339				 */
1340				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1341				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1342				cto->ct_xfrlen += dm_segs->ds_len;
1343				dm_segs++;
1344			}
1345			cto->ct_seg_count = seg;
1346		} else {
1347			/*
1348			 * This case should only happen when we're sending an
1349			 * extra CTIO with final status.
1350			 */
1351			if (send_status == 0) {
1352				isp_prt(isp, ISP_LOGWARN,
1353				    "tdma_mk ran out of segments");
1354				mp->error = EINVAL;
1355				return;
1356			}
1357		}
1358
1359		/*
1360		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1361		 * ct_tagtype, and ct_timeout have been carried over
1362		 * unchanged from what our caller had set.
1363		 *
1364		 * The dataseg fields and the seg_count fields we just got
1365		 * through setting. The data direction we've preserved all
1366		 * along and only clear it if we're now sending status.
1367		 */
1368
1369		if (nth_ctio == nctios - 1) {
1370			/*
1371			 * We're the last in a sequence of CTIOs, so mark
1372			 * this CTIO and save the handle to the CCB such that
1373			 * when this CTIO completes we can free dma resources
1374			 * and do whatever else we need to do to finish the
1375			 * rest of the command. We *don't* give this to the
1376			 * firmware to work on- the caller will do that.
1377			 */
1378
1379			cto->ct_syshandle = handle;
1380			cto->ct_header.rqs_seqno = 1;
1381
1382			if (send_status) {
1383				cto->ct_scsi_status = scsi_status;
1384				cto->ct_flags |= sflags;
1385				cto->ct_resid = resid;
1386			}
1387			if (send_status) {
1388				isp_prt(isp, ISP_LOGTDEBUG1,
1389				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1390				    "scsi status %x resid %d",
1391				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1392				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1393				    cto->ct_scsi_status, cto->ct_resid);
1394			} else {
1395				isp_prt(isp, ISP_LOGTDEBUG1,
1396				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1397				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1398				    cto->ct_iid, cto->ct_tag_val,
1399				    cto->ct_flags);
1400			}
1401			isp_put_ctio(isp, cto, qe);
1402			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1403			if (nctios > 1) {
1404				MEMORYBARRIER(isp, SYNC_REQUEST,
1405				    curi, QENTRY_LEN);
1406			}
1407		} else {
1408			ct_entry_t *oqe = qe;
1409
1410			/*
1411			 * Make sure syshandle fields are clean
1412			 */
1413			cto->ct_syshandle = 0;
1414			cto->ct_header.rqs_seqno = 0;
1415
1416			isp_prt(isp, ISP_LOGTDEBUG1,
1417			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1418			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1419			    cto->ct_iid, cto->ct_flags);
1420
1421			/*
1422			 * Get a new CTIO
1423			 */
1424			qe = (ct_entry_t *)
1425			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1426			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1427			if (nxti == mp->optr) {
1428				isp_prt(isp, ISP_LOGTDEBUG0,
1429				    "Queue Overflow in tdma_mk");
1430				mp->error = MUSHERR_NOQENTRIES;
1431				return;
1432			}
1433
1434			/*
1435			 * Now that we're done with the old CTIO,
1436			 * flush it out to the request queue.
1437			 */
1438			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1439			isp_put_ctio(isp, cto, oqe);
1440			if (nth_ctio != 0) {
1441				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1442				    QENTRY_LEN);
1443			}
1444			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1445
1446			/*
1447			 * Reset some fields in the CTIO so we can reuse
1448			 * for the next one we'll flush to the request
1449			 * queue.
1450			 */
1451			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1452			cto->ct_header.rqs_entry_count = 1;
1453			cto->ct_header.rqs_flags = 0;
1454			cto->ct_status = 0;
1455			cto->ct_scsi_status = 0;
1456			cto->ct_xfrlen = 0;
1457			cto->ct_resid = 0;
1458			cto->ct_seg_count = 0;
1459			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1460		}
1461	}
1462	*mp->nxtip = nxti;
1463}
1464
1465/*
1466 * We don't have to do multiple CTIOs here. Instead, we can just do
1467 * continuation segments as needed. This greatly simplifies the code
1468 * improves performance.
1469 */
1470
1471static void
1472tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1473{
1474	mush_t *mp;
1475	struct ccb_scsiio *csio;
1476	struct ispsoftc *isp;
1477	ct2_entry_t *cto, *qe;
1478	u_int16_t curi, nxti;
1479	int segcnt;
1480
1481	mp = (mush_t *) arg;
1482	if (error) {
1483		mp->error = error;
1484		return;
1485	}
1486
1487	isp = mp->isp;
1488	csio = mp->cmd_token;
1489	cto = mp->rq;
1490
1491	curi = isp->isp_reqidx;
1492	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1493
1494	if (nseg == 0) {
1495		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1496			isp_prt(isp, ISP_LOGWARN,
1497			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1498			    "set (0x%x)", cto->ct_flags);
1499			mp->error = EINVAL;
1500			return;
1501		}
1502		/*
1503		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1504		 * flags to NO DATA and clear relative offset flags.
1505		 * We preserve the ct_resid and the response area.
1506		 */
1507		cto->ct_header.rqs_seqno = 1;
1508		cto->ct_seg_count = 0;
1509		cto->ct_reloff = 0;
1510		isp_prt(isp, ISP_LOGTDEBUG1,
1511		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1512		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1513		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1514		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1515		isp_put_ctio2(isp, cto, qe);
1516		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1517		return;
1518	}
1519
1520	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1521		isp_prt(isp, ISP_LOGERR,
1522		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1523		    "(0x%x)", cto->ct_flags);
1524		mp->error = EINVAL;
1525		return;
1526	}
1527
1528
1529	nxti = *mp->nxtip;
1530
1531	/*
1532	 * Set up the CTIO2 data segments.
1533	 */
1534	for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1535	    cto->ct_seg_count++, segcnt++) {
1536		cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1537		    dm_segs[segcnt].ds_addr;
1538		cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1539		    dm_segs[segcnt].ds_len;
1540		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1541		isp_prt(isp, ISP_LOGTDEBUG1,
1542		    "isp_send_ctio2: ent0[%d]0x%llx:%lld",
1543		    cto->ct_seg_count, (long long)dm_segs[segcnt].ds_addr,
1544		    (long long)dm_segs[segcnt].ds_len);
1545	}
1546
1547	while (segcnt < nseg) {
1548		u_int16_t curip;
1549		int seg;
1550		ispcontreq_t local, *crq = &local, *qep;
1551
1552		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1553		curip = nxti;
1554		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1555		if (nxti == mp->optr) {
1556			ISP_UNLOCK(isp);
1557			isp_prt(isp, ISP_LOGTDEBUG0,
1558			    "tdma_mkfc: request queue overflow");
1559			mp->error = MUSHERR_NOQENTRIES;
1560			return;
1561		}
1562		cto->ct_header.rqs_entry_count++;
1563		MEMZERO((void *)crq, sizeof (*crq));
1564		crq->req_header.rqs_entry_count = 1;
1565		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1566		for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1567		    segcnt++, seg++) {
1568			crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1569			crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1570			isp_prt(isp, ISP_LOGTDEBUG1,
1571			    "isp_send_ctio2: ent%d[%d]0x%llx:%lld",
1572			    cto->ct_header.rqs_entry_count-1, seg,
1573			    (long long) dm_segs[segcnt].ds_addr,
1574			    (long long) dm_segs[segcnt].ds_len);
1575			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1576			cto->ct_seg_count++;
1577		}
1578		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1579		isp_put_cont_req(isp, crq, qep);
1580		ISP_TDQE(isp, "cont entry", curi, qep);
1581	}
1582
1583	/*
1584	 * No do final twiddling for the CTIO itself.
1585	 */
1586	cto->ct_header.rqs_seqno = 1;
1587	isp_prt(isp, ISP_LOGTDEBUG1,
1588	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1589	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1590	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1591	    cto->ct_resid);
1592	isp_put_ctio2(isp, cto, qe);
1593	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1594	*mp->nxtip = nxti;
1595}
1596#endif
1597
1598static void dma2(void *, bus_dma_segment_t *, int, int);
1599
1600static void
1601dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1602{
1603	mush_t *mp;
1604	struct ispsoftc *isp;
1605	struct ccb_scsiio *csio;
1606	struct isp_pcisoftc *pcs;
1607	bus_dmamap_t *dp;
1608	bus_dma_segment_t *eseg;
1609	ispreq_t *rq;
1610	int seglim, datalen;
1611	u_int16_t nxti;
1612
1613	mp = (mush_t *) arg;
1614	if (error) {
1615		mp->error = error;
1616		return;
1617	}
1618
1619	if (nseg < 1) {
1620		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1621		mp->error = EFAULT;
1622		return;
1623	}
1624	csio = mp->cmd_token;
1625	isp = mp->isp;
1626	rq = mp->rq;
1627	pcs = (struct isp_pcisoftc *)mp->isp;
1628	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1629	nxti = *mp->nxtip;
1630
1631	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1632		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1633	} else {
1634		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1635	}
1636
1637	datalen = XS_XFRLEN(csio);
1638
1639	/*
1640	 * We're passed an initial partially filled in entry that
1641	 * has most fields filled in except for data transfer
1642	 * related values.
1643	 *
1644	 * Our job is to fill in the initial request queue entry and
1645	 * then to start allocating and filling in continuation entries
1646	 * until we've covered the entire transfer.
1647	 */
1648
1649	if (IS_FC(isp)) {
1650		seglim = ISP_RQDSEG_T2;
1651		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1652		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1653			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1654		} else {
1655			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1656		}
1657	} else {
1658		if (csio->cdb_len > 12) {
1659			seglim = 0;
1660		} else {
1661			seglim = ISP_RQDSEG;
1662		}
1663		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1664			rq->req_flags |= REQFLAG_DATA_IN;
1665		} else {
1666			rq->req_flags |= REQFLAG_DATA_OUT;
1667		}
1668	}
1669
1670	eseg = dm_segs + nseg;
1671
1672	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1673		if (IS_FC(isp)) {
1674			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1675			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1676			    dm_segs->ds_addr;
1677			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1678			    dm_segs->ds_len;
1679		} else {
1680			rq->req_dataseg[rq->req_seg_count].ds_base =
1681				dm_segs->ds_addr;
1682			rq->req_dataseg[rq->req_seg_count].ds_count =
1683				dm_segs->ds_len;
1684		}
1685		datalen -= dm_segs->ds_len;
1686		rq->req_seg_count++;
1687		dm_segs++;
1688	}
1689
1690	while (datalen > 0 && dm_segs != eseg) {
1691		u_int16_t onxti;
1692		ispcontreq_t local, *crq = &local, *cqe;
1693
1694		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1695		onxti = nxti;
1696		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1697		if (nxti == mp->optr) {
1698			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1699			mp->error = MUSHERR_NOQENTRIES;
1700			return;
1701		}
1702		rq->req_header.rqs_entry_count++;
1703		MEMZERO((void *)crq, sizeof (*crq));
1704		crq->req_header.rqs_entry_count = 1;
1705		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1706
1707		seglim = 0;
1708		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1709			crq->req_dataseg[seglim].ds_base =
1710			    dm_segs->ds_addr;
1711			crq->req_dataseg[seglim].ds_count =
1712			    dm_segs->ds_len;
1713			rq->req_seg_count++;
1714			dm_segs++;
1715			seglim++;
1716			datalen -= dm_segs->ds_len;
1717		}
1718		isp_put_cont_req(isp, crq, cqe);
1719		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1720	}
1721	*mp->nxtip = nxti;
1722}
1723
1724static int
1725isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1726	u_int16_t *nxtip, u_int16_t optr)
1727{
1728	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1729	ispreq_t *qep;
1730	bus_dmamap_t *dp = NULL;
1731	mush_t mush, *mp;
1732	void (*eptr)(void *, bus_dma_segment_t *, int, int);
1733
1734	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1735#ifdef	ISP_TARGET_MODE
1736	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1737		if (IS_FC(isp)) {
1738			eptr = tdma_mkfc;
1739		} else {
1740			eptr = tdma_mk;
1741		}
1742		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1743		    (csio->dxfer_len == 0)) {
1744			mp = &mush;
1745			mp->isp = isp;
1746			mp->cmd_token = csio;
1747			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
1748			mp->nxtip = nxtip;
1749			mp->optr = optr;
1750			mp->error = 0;
1751			(*eptr)(mp, NULL, 0, 0);
1752			goto mbxsync;
1753		}
1754	} else
1755#endif
1756	eptr = dma2;
1757
1758
1759	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1760	    (csio->dxfer_len == 0)) {
1761		rq->req_seg_count = 1;
1762		goto mbxsync;
1763	}
1764
1765	/*
1766	 * Do a virtual grapevine step to collect info for
1767	 * the callback dma allocation that we have to use...
1768	 */
1769	mp = &mush;
1770	mp->isp = isp;
1771	mp->cmd_token = csio;
1772	mp->rq = rq;
1773	mp->nxtip = nxtip;
1774	mp->optr = optr;
1775	mp->error = 0;
1776
1777	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1778		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1779			int error, s;
1780			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1781			s = splsoftvm();
1782			error = bus_dmamap_load(pcs->dmat, *dp,
1783			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1784			if (error == EINPROGRESS) {
1785				bus_dmamap_unload(pcs->dmat, *dp);
1786				mp->error = EINVAL;
1787				isp_prt(isp, ISP_LOGERR,
1788				    "deferred dma allocation not supported");
1789			} else if (error && mp->error == 0) {
1790#ifdef	DIAGNOSTIC
1791				isp_prt(isp, ISP_LOGERR,
1792				    "error %d in dma mapping code", error);
1793#endif
1794				mp->error = error;
1795			}
1796			splx(s);
1797		} else {
1798			/* Pointer to physical buffer */
1799			struct bus_dma_segment seg;
1800			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1801			seg.ds_len = csio->dxfer_len;
1802			(*eptr)(mp, &seg, 1, 0);
1803		}
1804	} else {
1805		struct bus_dma_segment *segs;
1806
1807		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1808			isp_prt(isp, ISP_LOGERR,
1809			    "Physical segment pointers unsupported");
1810			mp->error = EINVAL;
1811		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1812			isp_prt(isp, ISP_LOGERR,
1813			    "Virtual segment addresses unsupported");
1814			mp->error = EINVAL;
1815		} else {
1816			/* Just use the segments provided */
1817			segs = (struct bus_dma_segment *) csio->data_ptr;
1818			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1819		}
1820	}
1821	if (mp->error) {
1822		int retval = CMD_COMPLETE;
1823		if (mp->error == MUSHERR_NOQENTRIES) {
1824			retval = CMD_EAGAIN;
1825		} else if (mp->error == EFBIG) {
1826			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1827		} else if (mp->error == EINVAL) {
1828			XS_SETERR(csio, CAM_REQ_INVALID);
1829		} else {
1830			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1831		}
1832		return (retval);
1833	}
1834mbxsync:
1835	switch (rq->req_header.rqs_entry_type) {
1836	case RQSTYPE_REQUEST:
1837		isp_put_request(isp, rq, qep);
1838		break;
1839	case RQSTYPE_CMDONLY:
1840		isp_put_extended_request(isp, (ispextreq_t *)rq,
1841		    (ispextreq_t *)qep);
1842		break;
1843	case RQSTYPE_T2RQS:
1844		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1845		break;
1846	}
1847	return (CMD_QUEUED);
1848}
1849
1850static void
1851isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1852{
1853	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1854	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
1855	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1856		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
1857	} else {
1858		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
1859	}
1860	bus_dmamap_unload(pcs->dmat, *dp);
1861}
1862
1863
1864static void
1865isp_pci_reset1(struct ispsoftc *isp)
1866{
1867	/* Make sure the BIOS is disabled */
1868	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1869	/* and enable interrupts */
1870	ENABLE_INTS(isp);
1871}
1872
1873static void
1874isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1875{
1876	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1877	if (msg)
1878		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1879	else
1880		printf("%s:\n", device_get_nameunit(isp->isp_dev));
1881	if (IS_SCSI(isp))
1882		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1883	else
1884		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1885	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1886	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1887	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1888
1889
1890	if (IS_SCSI(isp)) {
1891		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1892		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1893			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1894			ISP_READ(isp, CDMA_FIFO_STS));
1895		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1896			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1897			ISP_READ(isp, DDMA_FIFO_STS));
1898		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1899			ISP_READ(isp, SXP_INTERRUPT),
1900			ISP_READ(isp, SXP_GROSS_ERR),
1901			ISP_READ(isp, SXP_PINS_CTRL));
1902		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1903	}
1904	printf("    mbox regs: %x %x %x %x %x\n",
1905	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1906	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1907	    ISP_READ(isp, OUTMAILBOX4));
1908	printf("    PCI Status Command/Status=%x\n",
1909	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
1910}
1911