isp_pci.c revision 158817
1/*-
2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 1997-2006 by Matthew Jacob
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice immediately at the beginning of the file, without modification,
13 *    this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/isp/isp_pci.c 158817 2006-05-22 06:51:48Z mjacob $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/bus.h>
39#if __FreeBSD_version < 500000
40#include <sys/bus.h>
41#include <pci/pcireg.h>
42#include <pci/pcivar.h>
43#include <machine/bus_memio.h>
44#include <machine/bus_pio.h>
45#else
46#include <sys/stdint.h>
47#include <dev/pci/pcireg.h>
48#include <dev/pci/pcivar.h>
49#endif
50#include <machine/bus.h>
51#include <machine/resource.h>
52#include <sys/rman.h>
53#include <sys/malloc.h>
54
55#include <dev/isp/isp_freebsd.h>
56
57#if __FreeBSD_version < 500000
58#define	BUS_PROBE_DEFAULT	0
59#endif
60
61static uint16_t isp_pci_rd_reg(ispsoftc_t *, int);
62static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t);
63static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
64static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t);
65static int
66isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
67static int
68isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
69static int isp_pci_mbxdma(ispsoftc_t *);
70static int
71isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t);
72static void
73isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t);
74
75static void isp_pci_reset1(ispsoftc_t *);
76static void isp_pci_dumpregs(ispsoftc_t *, const char *);
77
78static struct ispmdvec mdvec = {
79	isp_pci_rd_isr,
80	isp_pci_rd_reg,
81	isp_pci_wr_reg,
82	isp_pci_mbxdma,
83	isp_pci_dmasetup,
84	isp_pci_dmateardown,
85	NULL,
86	isp_pci_reset1,
87	isp_pci_dumpregs,
88	NULL,
89	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
90};
91
92static struct ispmdvec mdvec_1080 = {
93	isp_pci_rd_isr,
94	isp_pci_rd_reg_1080,
95	isp_pci_wr_reg_1080,
96	isp_pci_mbxdma,
97	isp_pci_dmasetup,
98	isp_pci_dmateardown,
99	NULL,
100	isp_pci_reset1,
101	isp_pci_dumpregs,
102	NULL,
103	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
104};
105
106static struct ispmdvec mdvec_12160 = {
107	isp_pci_rd_isr,
108	isp_pci_rd_reg_1080,
109	isp_pci_wr_reg_1080,
110	isp_pci_mbxdma,
111	isp_pci_dmasetup,
112	isp_pci_dmateardown,
113	NULL,
114	isp_pci_reset1,
115	isp_pci_dumpregs,
116	NULL,
117	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
118};
119
120static struct ispmdvec mdvec_2100 = {
121	isp_pci_rd_isr,
122	isp_pci_rd_reg,
123	isp_pci_wr_reg,
124	isp_pci_mbxdma,
125	isp_pci_dmasetup,
126	isp_pci_dmateardown,
127	NULL,
128	isp_pci_reset1,
129	isp_pci_dumpregs
130};
131
132static struct ispmdvec mdvec_2200 = {
133	isp_pci_rd_isr,
134	isp_pci_rd_reg,
135	isp_pci_wr_reg,
136	isp_pci_mbxdma,
137	isp_pci_dmasetup,
138	isp_pci_dmateardown,
139	NULL,
140	isp_pci_reset1,
141	isp_pci_dumpregs
142};
143
144static struct ispmdvec mdvec_2300 = {
145	isp_pci_rd_isr_2300,
146	isp_pci_rd_reg,
147	isp_pci_wr_reg,
148	isp_pci_mbxdma,
149	isp_pci_dmasetup,
150	isp_pci_dmateardown,
151	NULL,
152	isp_pci_reset1,
153	isp_pci_dumpregs
154};
155
156#ifndef	PCIM_CMD_INVEN
157#define	PCIM_CMD_INVEN			0x10
158#endif
159#ifndef	PCIM_CMD_BUSMASTEREN
160#define	PCIM_CMD_BUSMASTEREN		0x0004
161#endif
162#ifndef	PCIM_CMD_PERRESPEN
163#define	PCIM_CMD_PERRESPEN		0x0040
164#endif
165#ifndef	PCIM_CMD_SEREN
166#define	PCIM_CMD_SEREN			0x0100
167#endif
168
169#ifndef	PCIR_COMMAND
170#define	PCIR_COMMAND			0x04
171#endif
172
173#ifndef	PCIR_CACHELNSZ
174#define	PCIR_CACHELNSZ			0x0c
175#endif
176
177#ifndef	PCIR_LATTIMER
178#define	PCIR_LATTIMER			0x0d
179#endif
180
181#ifndef	PCIR_ROMADDR
182#define	PCIR_ROMADDR			0x30
183#endif
184
185#ifndef	PCI_VENDOR_QLOGIC
186#define	PCI_VENDOR_QLOGIC		0x1077
187#endif
188
189#ifndef	PCI_PRODUCT_QLOGIC_ISP1020
190#define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
191#endif
192
193#ifndef	PCI_PRODUCT_QLOGIC_ISP1080
194#define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
195#endif
196
197#ifndef	PCI_PRODUCT_QLOGIC_ISP10160
198#define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
199#endif
200
201#ifndef	PCI_PRODUCT_QLOGIC_ISP12160
202#define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
203#endif
204
205#ifndef	PCI_PRODUCT_QLOGIC_ISP1240
206#define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
207#endif
208
209#ifndef	PCI_PRODUCT_QLOGIC_ISP1280
210#define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
211#endif
212
213#ifndef	PCI_PRODUCT_QLOGIC_ISP2100
214#define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
215#endif
216
217#ifndef	PCI_PRODUCT_QLOGIC_ISP2200
218#define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
219#endif
220
221#ifndef	PCI_PRODUCT_QLOGIC_ISP2300
222#define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
223#endif
224
225#ifndef	PCI_PRODUCT_QLOGIC_ISP2312
226#define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
227#endif
228
229#ifndef	PCI_PRODUCT_QLOGIC_ISP2322
230#define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
231#endif
232
233#ifndef	PCI_PRODUCT_QLOGIC_ISP2422
234#define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
235#endif
236
237#ifndef	PCI_PRODUCT_QLOGIC_ISP6312
238#define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
239#endif
240
241#define	PCI_QLOGIC_ISP1020	\
242	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
243
244#define	PCI_QLOGIC_ISP1080	\
245	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
246
247#define	PCI_QLOGIC_ISP10160	\
248	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
249
250#define	PCI_QLOGIC_ISP12160	\
251	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
252
253#define	PCI_QLOGIC_ISP1240	\
254	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
255
256#define	PCI_QLOGIC_ISP1280	\
257	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
258
259#define	PCI_QLOGIC_ISP2100	\
260	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
261
262#define	PCI_QLOGIC_ISP2200	\
263	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
264
265#define	PCI_QLOGIC_ISP2300	\
266	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
267
268#define	PCI_QLOGIC_ISP2312	\
269	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
270
271#define	PCI_QLOGIC_ISP2322	\
272	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
273
274#define	PCI_QLOGIC_ISP2422	\
275	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
276
277#define	PCI_QLOGIC_ISP6312	\
278	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
279
280/*
281 * Odd case for some AMI raid cards... We need to *not* attach to this.
282 */
283#define	AMI_RAID_SUBVENDOR_ID	0x101e
284
285#define	IO_MAP_REG	0x10
286#define	MEM_MAP_REG	0x14
287
288#define	PCI_DFLT_LTNCY	0x40
289#define	PCI_DFLT_LNSZ	0x10
290
291static int isp_pci_probe (device_t);
292static int isp_pci_attach (device_t);
293
294
295struct isp_pcisoftc {
296	ispsoftc_t			pci_isp;
297	device_t			pci_dev;
298	struct resource *		pci_reg;
299	bus_space_tag_t			pci_st;
300	bus_space_handle_t		pci_sh;
301	void *				ih;
302	int16_t				pci_poff[_NREG_BLKS];
303	bus_dma_tag_t			dmat;
304	bus_dmamap_t			*dmaps;
305};
306extern ispfwfunc *isp_get_firmware_p;
307
308static device_method_t isp_pci_methods[] = {
309	/* Device interface */
310	DEVMETHOD(device_probe,		isp_pci_probe),
311	DEVMETHOD(device_attach,	isp_pci_attach),
312	{ 0, 0 }
313};
314static void isp_pci_intr(void *);
315
316static driver_t isp_pci_driver = {
317	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
318};
319static devclass_t isp_devclass;
320DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
321
322static int
323isp_pci_probe(device_t dev)
324{
325        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
326	case PCI_QLOGIC_ISP1020:
327		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
328		break;
329	case PCI_QLOGIC_ISP1080:
330		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
331		break;
332	case PCI_QLOGIC_ISP1240:
333		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
334		break;
335	case PCI_QLOGIC_ISP1280:
336		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
337		break;
338	case PCI_QLOGIC_ISP10160:
339		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
340		break;
341	case PCI_QLOGIC_ISP12160:
342		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
343			return (ENXIO);
344		}
345		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
346		break;
347	case PCI_QLOGIC_ISP2100:
348		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
349		break;
350	case PCI_QLOGIC_ISP2200:
351		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
352		break;
353	case PCI_QLOGIC_ISP2300:
354		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
355		break;
356	case PCI_QLOGIC_ISP2312:
357		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
358		break;
359	case PCI_QLOGIC_ISP2322:
360		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
361		break;
362	case PCI_QLOGIC_ISP2422:
363		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
364		break;
365	case PCI_QLOGIC_ISP6312:
366		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
367		break;
368	default:
369		return (ENXIO);
370	}
371	if (isp_announced == 0 && bootverbose) {
372		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
373		    "Core Version %d.%d\n",
374		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
375		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
376		isp_announced++;
377	}
378	/*
379	 * XXXX: Here is where we might load the f/w module
380	 * XXXX: (or increase a reference count to it).
381	 */
382	return (BUS_PROBE_DEFAULT);
383}
384
385#if __FreeBSD_version < 500000
386static void
387isp_get_options(device_t dev, ispsoftc_t *isp)
388{
389	uint64_t wwn;
390	int bitmap, unit;
391
392	unit = device_get_unit(dev);
393	if (getenv_int("isp_disable", &bitmap)) {
394		if (bitmap & (1 << unit)) {
395			isp->isp_osinfo.disabled = 1;
396			return;
397		}
398	}
399
400	if (getenv_int("isp_no_fwload", &bitmap)) {
401		if (bitmap & (1 << unit))
402			isp->isp_confopts |= ISP_CFG_NORELOAD;
403	}
404	if (getenv_int("isp_fwload", &bitmap)) {
405		if (bitmap & (1 << unit))
406			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
407	}
408	if (getenv_int("isp_no_nvram", &bitmap)) {
409		if (bitmap & (1 << unit))
410			isp->isp_confopts |= ISP_CFG_NONVRAM;
411	}
412	if (getenv_int("isp_nvram", &bitmap)) {
413		if (bitmap & (1 << unit))
414			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
415	}
416	if (getenv_int("isp_fcduplex", &bitmap)) {
417		if (bitmap & (1 << unit))
418			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
419	}
420	if (getenv_int("isp_no_fcduplex", &bitmap)) {
421		if (bitmap & (1 << unit))
422			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
423	}
424	if (getenv_int("isp_nport", &bitmap)) {
425		if (bitmap & (1 << unit))
426			isp->isp_confopts |= ISP_CFG_NPORT;
427	}
428
429	/*
430	 * Because the resource_*_value functions can neither return
431	 * 64 bit integer values, nor can they be directly coerced
432	 * to interpret the right hand side of the assignment as
433	 * you want them to interpret it, we have to force WWN
434	 * hint replacement to specify WWN strings with a leading
435	 * 'w' (e..g w50000000aaaa0001). Sigh.
436	 */
437	if (getenv_quad("isp_portwwn", &wwn)) {
438		isp->isp_osinfo.default_port_wwn = wwn;
439		isp->isp_confopts |= ISP_CFG_OWNWWPN;
440	}
441	if (isp->isp_osinfo.default_port_wwn == 0) {
442		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
443	}
444
445	if (getenv_quad("isp_nodewwn", &wwn)) {
446		isp->isp_osinfo.default_node_wwn = wwn;
447		isp->isp_confopts |= ISP_CFG_OWNWWNN;
448	}
449	if (isp->isp_osinfo.default_node_wwn == 0) {
450		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
451	}
452
453	bitmap = 0;
454	(void) getenv_int("isp_debug", &bitmap);
455	if (bitmap) {
456		isp->isp_dblev = bitmap;
457	} else {
458		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
459	}
460	if (bootverbose) {
461		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
462	}
463
464#ifdef	ISP_FW_CRASH_DUMP
465	bitmap = 0;
466	if (getenv_int("isp_fw_dump_enable", &bitmap)) {
467		if (bitmap & (1 << unit) {
468			size_t amt = 0;
469			if (IS_2200(isp)) {
470				amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
471			} else if (IS_23XX(isp)) {
472				amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
473			}
474			if (amt) {
475				FCPARAM(isp)->isp_dump_data =
476				    malloc(amt, M_DEVBUF, M_WAITOK);
477				memset(FCPARAM(isp)->isp_dump_data, 0, amt);
478			} else {
479				device_printf(dev,
480				    "f/w crash dumps not supported for card\n");
481			}
482		}
483	}
484#endif
485}
486
487static void
488isp_get_pci_options(device_t dev, int *m1, int *m2)
489{
490	int bitmap;
491	int unit = device_get_unit(dev);
492
493	*m1 = PCIM_CMD_MEMEN;
494	*m2 = PCIM_CMD_PORTEN;
495	if (getenv_int("isp_mem_map", &bitmap)) {
496		if (bitmap & (1 << unit)) {
497			*m1 = PCIM_CMD_MEMEN;
498			*m2 = PCIM_CMD_PORTEN;
499		}
500	}
501	bitmap = 0;
502	if (getenv_int("isp_io_map", &bitmap)) {
503		if (bitmap & (1 << unit)) {
504			*m1 = PCIM_CMD_PORTEN;
505			*m2 = PCIM_CMD_MEMEN;
506		}
507	}
508}
509#else
510static void
511isp_get_options(device_t dev, ispsoftc_t *isp)
512{
513	int tval;
514	const char *sptr;
515	/*
516	 * Figure out if we're supposed to skip this one.
517	 */
518
519	tval = 0;
520	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
521	    "disable", &tval) == 0 && tval) {
522		device_printf(dev, "disabled at user request\n");
523		isp->isp_osinfo.disabled = 1;
524		return;
525	}
526
527	tval = -1;
528	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
529	    "role", &tval) == 0 && tval != -1) {
530		tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
531		isp->isp_role = tval;
532		device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
533	} else {
534#ifdef	ISP_TARGET_MODE
535		isp->isp_role = ISP_ROLE_TARGET;
536#else
537		isp->isp_role = ISP_DEFAULT_ROLES;
538#endif
539	}
540
541	tval = 0;
542        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
543            "fwload_disable", &tval) == 0 && tval != 0) {
544		isp->isp_confopts |= ISP_CFG_NORELOAD;
545	}
546	tval = 0;
547        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
548            "ignore_nvram", &tval) == 0 && tval != 0) {
549		isp->isp_confopts |= ISP_CFG_NONVRAM;
550	}
551	tval = 0;
552        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
553            "fullduplex", &tval) == 0 && tval != 0) {
554		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
555	}
556#ifdef	ISP_FW_CRASH_DUMP
557	tval = 0;
558        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
559            "fw_dump_enable", &tval) == 0 && tval != 0) {
560		size_t amt = 0;
561		if (IS_2200(isp)) {
562			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
563		} else if (IS_23XX(isp)) {
564			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
565		}
566		if (amt) {
567			FCPARAM(isp)->isp_dump_data =
568			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
569		} else {
570			device_printf(dev,
571			    "f/w crash dumps not supported for this model\n");
572		}
573	}
574#endif
575
576	sptr = 0;
577        if (resource_string_value(device_get_name(dev), device_get_unit(dev),
578            "topology", (const char **) &sptr) == 0 && sptr != 0) {
579		if (strcmp(sptr, "lport") == 0) {
580			isp->isp_confopts |= ISP_CFG_LPORT;
581		} else if (strcmp(sptr, "nport") == 0) {
582			isp->isp_confopts |= ISP_CFG_NPORT;
583		} else if (strcmp(sptr, "lport-only") == 0) {
584			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
585		} else if (strcmp(sptr, "nport-only") == 0) {
586			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
587		}
588	}
589
590	/*
591	 * Because the resource_*_value functions can neither return
592	 * 64 bit integer values, nor can they be directly coerced
593	 * to interpret the right hand side of the assignment as
594	 * you want them to interpret it, we have to force WWN
595	 * hint replacement to specify WWN strings with a leading
596	 * 'w' (e..g w50000000aaaa0001). Sigh.
597	 */
598	sptr = 0;
599	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
600            "portwwn", (const char **) &sptr);
601	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
602		char *eptr = 0;
603		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
604		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
605			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
606			isp->isp_osinfo.default_port_wwn = 0;
607		} else {
608			isp->isp_confopts |= ISP_CFG_OWNWWPN;
609		}
610	}
611	if (isp->isp_osinfo.default_port_wwn == 0) {
612		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
613	}
614
615	sptr = 0;
616	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
617            "nodewwn", (const char **) &sptr);
618	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
619		char *eptr = 0;
620		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
621		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
622			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
623			isp->isp_osinfo.default_node_wwn = 0;
624		} else {
625			isp->isp_confopts |= ISP_CFG_OWNWWNN;
626		}
627	}
628	if (isp->isp_osinfo.default_node_wwn == 0) {
629		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
630	}
631
632	isp->isp_osinfo.default_id = -1;
633	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
634            "iid", &tval) == 0) {
635		isp->isp_osinfo.default_id = tval;
636		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
637	}
638	if (isp->isp_osinfo.default_id == -1) {
639		if (IS_FC(isp)) {
640			isp->isp_osinfo.default_id = 109;
641		} else {
642			isp->isp_osinfo.default_id = 7;
643		}
644	}
645
646	/*
647	 * Set up logging levels.
648	 */
649	tval = 0;
650        (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
651            "debug", &tval);
652	if (tval) {
653		isp->isp_dblev = tval;
654	} else {
655		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
656	}
657	if (bootverbose) {
658		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
659	}
660
661}
662
663static void
664isp_get_pci_options(device_t dev, int *m1, int *m2)
665{
666	int tval;
667	/*
668	 * Which we should try first - memory mapping or i/o mapping?
669	 *
670	 * We used to try memory first followed by i/o on alpha, otherwise
671	 * the reverse, but we should just try memory first all the time now.
672	 */
673	*m1 = PCIM_CMD_MEMEN;
674	*m2 = PCIM_CMD_PORTEN;
675
676	tval = 0;
677        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
678            "prefer_iomap", &tval) == 0 && tval != 0) {
679		*m1 = PCIM_CMD_PORTEN;
680		*m2 = PCIM_CMD_MEMEN;
681	}
682	tval = 0;
683        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
684            "prefer_memmap", &tval) == 0 && tval != 0) {
685		*m1 = PCIM_CMD_MEMEN;
686		*m2 = PCIM_CMD_PORTEN;
687	}
688}
689#endif
690
691static int
692isp_pci_attach(device_t dev)
693{
694	struct resource *regs, *irq;
695	int rtp, rgd, iqd, m1, m2;
696	uint32_t data, cmd, linesz, psize, basetype;
697	struct isp_pcisoftc *pcs;
698	ispsoftc_t *isp = NULL;
699	struct ispmdvec *mdvp;
700#if __FreeBSD_version >= 500000
701	int locksetup = 0;
702#endif
703
704	pcs = device_get_softc(dev);
705	if (pcs == NULL) {
706		device_printf(dev, "cannot get softc\n");
707		return (ENOMEM);
708	}
709	memset(pcs, 0, sizeof (*pcs));
710	pcs->pci_dev = dev;
711	isp = &pcs->pci_isp;
712
713	/*
714	 * Get Generic Options
715	 */
716	isp_get_options(dev, isp);
717
718	/*
719	 * Check to see if options have us disabled
720	 */
721	if (isp->isp_osinfo.disabled) {
722		/*
723		 * But return zero to preserve unit numbering
724		 */
725		return (0);
726	}
727
728	/*
729	 * Get PCI options- which in this case are just mapping preferences.
730	 */
731	isp_get_pci_options(dev, &m1, &m2);
732
733
734	linesz = PCI_DFLT_LNSZ;
735	irq = regs = NULL;
736	rgd = rtp = iqd = 0;
737
738	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
739	if (cmd & m1) {
740		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
741		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
742		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
743	}
744	if (regs == NULL && (cmd & m2)) {
745		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
746		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
747		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
748	}
749	if (regs == NULL) {
750		device_printf(dev, "unable to map any ports\n");
751		goto bad;
752	}
753	if (bootverbose) {
754		device_printf(dev, "using %s space register mapping\n",
755		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
756	}
757	pcs->pci_dev = dev;
758	pcs->pci_reg = regs;
759	pcs->pci_st = rman_get_bustag(regs);
760	pcs->pci_sh = rman_get_bushandle(regs);
761
762	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
763	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
764	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
765	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
766	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
767	mdvp = &mdvec;
768	basetype = ISP_HA_SCSI_UNKNOWN;
769	psize = sizeof (sdparam);
770	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
771		mdvp = &mdvec;
772		basetype = ISP_HA_SCSI_UNKNOWN;
773		psize = sizeof (sdparam);
774	}
775	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
776		mdvp = &mdvec_1080;
777		basetype = ISP_HA_SCSI_1080;
778		psize = sizeof (sdparam);
779		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
780		    ISP1080_DMA_REGS_OFF;
781	}
782	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
783		mdvp = &mdvec_1080;
784		basetype = ISP_HA_SCSI_1240;
785		psize = 2 * sizeof (sdparam);
786		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
787		    ISP1080_DMA_REGS_OFF;
788	}
789	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
790		mdvp = &mdvec_1080;
791		basetype = ISP_HA_SCSI_1280;
792		psize = 2 * sizeof (sdparam);
793		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
794		    ISP1080_DMA_REGS_OFF;
795	}
796	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
797		mdvp = &mdvec_12160;
798		basetype = ISP_HA_SCSI_10160;
799		psize = sizeof (sdparam);
800		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
801		    ISP1080_DMA_REGS_OFF;
802	}
803	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
804		mdvp = &mdvec_12160;
805		basetype = ISP_HA_SCSI_12160;
806		psize = 2 * sizeof (sdparam);
807		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
808		    ISP1080_DMA_REGS_OFF;
809	}
810	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
811		mdvp = &mdvec_2100;
812		basetype = ISP_HA_FC_2100;
813		psize = sizeof (fcparam);
814		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
815		    PCI_MBOX_REGS2100_OFF;
816		if (pci_get_revid(dev) < 3) {
817			/*
818			 * XXX: Need to get the actual revision
819			 * XXX: number of the 2100 FB. At any rate,
820			 * XXX: lower cache line size for early revision
821			 * XXX; boards.
822			 */
823			linesz = 1;
824		}
825	}
826	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
827		mdvp = &mdvec_2200;
828		basetype = ISP_HA_FC_2200;
829		psize = sizeof (fcparam);
830		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
831		    PCI_MBOX_REGS2100_OFF;
832	}
833	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
834		mdvp = &mdvec_2300;
835		basetype = ISP_HA_FC_2300;
836		psize = sizeof (fcparam);
837		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
838		    PCI_MBOX_REGS2300_OFF;
839	}
840	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
841	    pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
842		mdvp = &mdvec_2300;
843		basetype = ISP_HA_FC_2312;
844		psize = sizeof (fcparam);
845		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
846		    PCI_MBOX_REGS2300_OFF;
847	}
848	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322) {
849		mdvp = &mdvec_2300;
850		basetype = ISP_HA_FC_2322;
851		psize = sizeof (fcparam);
852		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
853		    PCI_MBOX_REGS2300_OFF;
854	}
855	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
856		mdvp = &mdvec_2300;
857		basetype = ISP_HA_FC_2422;
858		psize = sizeof (fcparam);
859		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
860		    PCI_MBOX_REGS2300_OFF;
861	}
862	isp = &pcs->pci_isp;
863	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
864	if (isp->isp_param == NULL) {
865		device_printf(dev, "cannot allocate parameter data\n");
866		goto bad;
867	}
868	isp->isp_mdvec = mdvp;
869	isp->isp_type = basetype;
870	isp->isp_revision = pci_get_revid(dev);
871	isp->isp_dev = dev;
872
873	/*
874	 * Try and find firmware for this device.
875	 */
876
877	/*
878	 * Don't even attempt to get firmware for the 2322/2422 (yet)
879	 */
880	if (IS_2322(isp) == 0 && IS_24XX(isp) == 0 && isp_get_firmware_p) {
881		int device = (int) pci_get_device(dev);
882#ifdef	ISP_TARGET_MODE
883		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
884#else
885		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
886#endif
887	}
888
889	/*
890	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
891	 * are set.
892	 */
893	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
894		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
895	if (IS_2300(isp)) {	/* per QLogic errata */
896		cmd &= ~PCIM_CMD_INVEN;
897	}
898	if (IS_23XX(isp)) {
899		/*
900		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
901		 */
902		isp->isp_touched = 1;
903
904	}
905	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
906
907	/*
908	 * Make sure the Cache Line Size register is set sensibly.
909	 */
910	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
911	if (data != linesz) {
912		data = PCI_DFLT_LNSZ;
913		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
914		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
915	}
916
917	/*
918	 * Make sure the Latency Timer is sane.
919	 */
920	data = pci_read_config(dev, PCIR_LATTIMER, 1);
921	if (data < PCI_DFLT_LTNCY) {
922		data = PCI_DFLT_LTNCY;
923		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
924		pci_write_config(dev, PCIR_LATTIMER, data, 1);
925	}
926
927	/*
928	 * Make sure we've disabled the ROM.
929	 */
930	data = pci_read_config(dev, PCIR_ROMADDR, 4);
931	data &= ~1;
932	pci_write_config(dev, PCIR_ROMADDR, data, 4);
933
934	iqd = 0;
935	irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
936	    RF_ACTIVE | RF_SHAREABLE);
937	if (irq == NULL) {
938		device_printf(dev, "could not allocate interrupt\n");
939		goto bad;
940	}
941
942#if __FreeBSD_version >= 500000
943	/* Make sure the lock is set up. */
944	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
945	locksetup++;
946#endif
947
948	if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
949		device_printf(dev, "could not setup interrupt\n");
950		goto bad;
951	}
952
953	/*
954	 * Last minute checks...
955	 */
956	if (IS_23XX(isp)) {
957		isp->isp_port = pci_get_function(dev);
958	}
959
960	/*
961	 * Make sure we're in reset state.
962	 */
963	ISP_LOCK(isp);
964	isp_reset(isp);
965	if (isp->isp_state != ISP_RESETSTATE) {
966		ISP_UNLOCK(isp);
967		goto bad;
968	}
969	isp_init(isp);
970	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
971		isp_uninit(isp);
972		ISP_UNLOCK(isp);
973		goto bad;
974	}
975	isp_attach(isp);
976	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
977		isp_uninit(isp);
978		ISP_UNLOCK(isp);
979		goto bad;
980	}
981	/*
982	 * XXXX: Here is where we might unload the f/w module
983	 * XXXX: (or decrease the reference count to it).
984	 */
985	ISP_UNLOCK(isp);
986	return (0);
987
988bad:
989
990	if (pcs && pcs->ih) {
991		(void) bus_teardown_intr(dev, irq, pcs->ih);
992	}
993
994#if __FreeBSD_version >= 500000
995	if (locksetup && isp) {
996		mtx_destroy(&isp->isp_osinfo.lock);
997	}
998#endif
999
1000	if (irq) {
1001		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1002	}
1003
1004
1005	if (regs) {
1006		(void) bus_release_resource(dev, rtp, rgd, regs);
1007	}
1008
1009	if (pcs) {
1010		if (pcs->pci_isp.isp_param) {
1011#ifdef	ISP_FW_CRASH_DUMP
1012			if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1013				free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1014			}
1015#endif
1016			free(pcs->pci_isp.isp_param, M_DEVBUF);
1017		}
1018	}
1019
1020	/*
1021	 * XXXX: Here is where we might unload the f/w module
1022	 * XXXX: (or decrease the reference count to it).
1023	 */
1024	return (ENXIO);
1025}
1026
1027static void
1028isp_pci_intr(void *arg)
1029{
1030	ispsoftc_t *isp = arg;
1031	uint16_t isr, sema, mbox;
1032
1033	ISP_LOCK(isp);
1034	isp->isp_intcnt++;
1035	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1036		isp->isp_intbogus++;
1037	} else {
1038		int iok = isp->isp_osinfo.intsok;
1039		isp->isp_osinfo.intsok = 0;
1040		isp_intr(isp, isr, sema, mbox);
1041		isp->isp_osinfo.intsok = iok;
1042	}
1043	ISP_UNLOCK(isp);
1044}
1045
1046
1047#define	IspVirt2Off(a, x)	\
1048	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1049	_BLK_REG_SHFT] + ((x) & 0xff))
1050
1051#define	BXR2(pcs, off)		\
1052	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1053#define	BXW2(pcs, off, v)	\
1054	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1055
1056
1057static __inline int
1058isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1059{
1060	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1061	uint16_t val0, val1;
1062	int i = 0;
1063
1064	do {
1065		val0 = BXR2(pcs, IspVirt2Off(isp, off));
1066		val1 = BXR2(pcs, IspVirt2Off(isp, off));
1067	} while (val0 != val1 && ++i < 1000);
1068	if (val0 != val1) {
1069		return (1);
1070	}
1071	*rp = val0;
1072	return (0);
1073}
1074
1075static int
1076isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp,
1077    uint16_t *semap, uint16_t *mbp)
1078{
1079	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1080	uint16_t isr, sema;
1081
1082	if (IS_2100(isp)) {
1083		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1084		    return (0);
1085		}
1086		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1087		    return (0);
1088		}
1089	} else {
1090		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1091		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1092	}
1093	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1094	isr &= INT_PENDING_MASK(isp);
1095	sema &= BIU_SEMA_LOCK;
1096	if (isr == 0 && sema == 0) {
1097		return (0);
1098	}
1099	*isrp = isr;
1100	if ((*semap = sema) != 0) {
1101		if (IS_2100(isp)) {
1102			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1103				return (0);
1104			}
1105		} else {
1106			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1107		}
1108	}
1109	return (1);
1110}
1111
1112static int
1113isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp,
1114    uint16_t *semap, uint16_t *mbox0p)
1115{
1116	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1117	uint32_t r2hisr;
1118
1119	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1120		*isrp = 0;
1121		return (0);
1122	}
1123	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
1124	    IspVirt2Off(pcs, BIU_R2HSTSLO));
1125	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1126	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1127		*isrp = 0;
1128		return (0);
1129	}
1130	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1131	case ISPR2HST_ROM_MBX_OK:
1132	case ISPR2HST_ROM_MBX_FAIL:
1133	case ISPR2HST_MBX_OK:
1134	case ISPR2HST_MBX_FAIL:
1135	case ISPR2HST_ASYNC_EVENT:
1136		*isrp = r2hisr & 0xffff;
1137		*mbox0p = (r2hisr >> 16);
1138		*semap = 1;
1139		return (1);
1140	case ISPR2HST_RIO_16:
1141		*isrp = r2hisr & 0xffff;
1142		*mbox0p = ASYNC_RIO1;
1143		*semap = 1;
1144		return (1);
1145	case ISPR2HST_FPOST:
1146		*isrp = r2hisr & 0xffff;
1147		*mbox0p = ASYNC_CMD_CMPLT;
1148		*semap = 1;
1149		return (1);
1150	case ISPR2HST_FPOST_CTIO:
1151		*isrp = r2hisr & 0xffff;
1152		*mbox0p = ASYNC_CTIO_DONE;
1153		*semap = 1;
1154		return (1);
1155	case ISPR2HST_RSPQ_UPDATE:
1156		*isrp = r2hisr & 0xffff;
1157		*mbox0p = 0;
1158		*semap = 0;
1159		return (1);
1160	default:
1161		return (0);
1162	}
1163}
1164
1165static uint16_t
1166isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1167{
1168	uint16_t rv;
1169	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1170	int oldconf = 0;
1171
1172	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1173		/*
1174		 * We will assume that someone has paused the RISC processor.
1175		 */
1176		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1177		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1178		    oldconf | BIU_PCI_CONF1_SXP);
1179	}
1180	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1181	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1182		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1183	}
1184	return (rv);
1185}
1186
1187static void
1188isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val)
1189{
1190	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1191	int oldconf = 0;
1192
1193	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1194		/*
1195		 * We will assume that someone has paused the RISC processor.
1196		 */
1197		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1198		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1199		    oldconf | BIU_PCI_CONF1_SXP);
1200	}
1201	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1202	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1203		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1204	}
1205}
1206
1207static uint16_t
1208isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1209{
1210	uint16_t rv, oc = 0;
1211	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1212
1213	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1214	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1215		uint16_t tc;
1216		/*
1217		 * We will assume that someone has paused the RISC processor.
1218		 */
1219		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1220		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1221		if (regoff & SXP_BANK1_SELECT)
1222			tc |= BIU_PCI1080_CONF1_SXP1;
1223		else
1224			tc |= BIU_PCI1080_CONF1_SXP0;
1225		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1226	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1227		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1228		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1229		    oc | BIU_PCI1080_CONF1_DMA);
1230	}
1231	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1232	if (oc) {
1233		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1234	}
1235	return (rv);
1236}
1237
1238static void
1239isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val)
1240{
1241	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1242	int oc = 0;
1243
1244	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1245	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1246		uint16_t tc;
1247		/*
1248		 * We will assume that someone has paused the RISC processor.
1249		 */
1250		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1251		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1252		if (regoff & SXP_BANK1_SELECT)
1253			tc |= BIU_PCI1080_CONF1_SXP1;
1254		else
1255			tc |= BIU_PCI1080_CONF1_SXP0;
1256		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1257	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1258		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1259		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1260		    oc | BIU_PCI1080_CONF1_DMA);
1261	}
1262	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1263	if (oc) {
1264		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1265	}
1266}
1267
1268
1269struct imush {
1270	ispsoftc_t *isp;
1271	int error;
1272};
1273
1274static void imc(void *, bus_dma_segment_t *, int, int);
1275
1276static void
1277imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1278{
1279	struct imush *imushp = (struct imush *) arg;
1280	if (error) {
1281		imushp->error = error;
1282	} else {
1283		ispsoftc_t *isp =imushp->isp;
1284		bus_addr_t addr = segs->ds_addr;
1285
1286		isp->isp_rquest_dma = addr;
1287		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1288		isp->isp_result_dma = addr;
1289		if (IS_FC(isp)) {
1290			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1291			FCPARAM(isp)->isp_scdma = addr;
1292		}
1293	}
1294}
1295
1296/*
1297 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1298 */
1299#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1300
1301#if __FreeBSD_version < 500000
1302#define	isp_dma_tag_create	bus_dma_tag_create
1303#else
1304#define	isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)	\
1305	bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1306	    busdma_lock_mutex, &Giant, z)
1307#endif
1308
1309static int
1310isp_pci_mbxdma(ispsoftc_t *isp)
1311{
1312	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1313	caddr_t base;
1314	uint32_t len;
1315	int i, error, ns;
1316	bus_size_t slim;	/* segment size */
1317	bus_addr_t llim;	/* low limit of unavailable dma */
1318	bus_addr_t hlim;	/* high limit of unavailable dma */
1319	struct imush im;
1320
1321	/*
1322	 * Already been here? If so, leave...
1323	 */
1324	if (isp->isp_rquest) {
1325		return (0);
1326	}
1327
1328	hlim = BUS_SPACE_MAXADDR;
1329	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1330		slim = (bus_size_t) (1ULL << 32);
1331		llim = BUS_SPACE_MAXADDR;
1332	} else {
1333		llim = BUS_SPACE_MAXADDR_32BIT;
1334		slim = (1 << 24);
1335	}
1336
1337	/*
1338	 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1339	 */
1340#ifdef	ISP_TARGET_MODE
1341	if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1342		isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1343		return (1);
1344	}
1345#endif
1346
1347	ISP_UNLOCK(isp);
1348	if (isp_dma_tag_create(NULL, 1, slim, llim, hlim,
1349	    NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1350		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1351		ISP_LOCK(isp);
1352		return (1);
1353	}
1354
1355
1356	len = sizeof (XS_T **) * isp->isp_maxcmds;
1357	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1358	if (isp->isp_xflist == NULL) {
1359		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1360		ISP_LOCK(isp);
1361		return (1);
1362	}
1363#ifdef	ISP_TARGET_MODE
1364	len = sizeof (void **) * isp->isp_maxcmds;
1365	isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1366	if (isp->isp_tgtlist == NULL) {
1367		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1368		ISP_LOCK(isp);
1369		return (1);
1370	}
1371#endif
1372	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1373	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1374	if (pcs->dmaps == NULL) {
1375		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1376		free(isp->isp_xflist, M_DEVBUF);
1377#ifdef	ISP_TARGET_MODE
1378		free(isp->isp_tgtlist, M_DEVBUF);
1379#endif
1380		ISP_LOCK(isp);
1381		return (1);
1382	}
1383
1384	/*
1385	 * Allocate and map the request, result queues, plus FC scratch area.
1386	 */
1387	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1388	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1389	if (IS_FC(isp)) {
1390		len += ISP2100_SCRLEN;
1391	}
1392
1393	ns = (len / PAGE_SIZE) + 1;
1394	/*
1395	 * Create a tag for the control spaces- force it to within 32 bits.
1396	 */
1397	if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1398	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1399	    NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1400		isp_prt(isp, ISP_LOGERR,
1401		    "cannot create a dma tag for control spaces");
1402		free(pcs->dmaps, M_DEVBUF);
1403		free(isp->isp_xflist, M_DEVBUF);
1404#ifdef	ISP_TARGET_MODE
1405		free(isp->isp_tgtlist, M_DEVBUF);
1406#endif
1407		ISP_LOCK(isp);
1408		return (1);
1409	}
1410
1411	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1412	    &isp->isp_cdmap) != 0) {
1413		isp_prt(isp, ISP_LOGERR,
1414		    "cannot allocate %d bytes of CCB memory", len);
1415		bus_dma_tag_destroy(isp->isp_cdmat);
1416		free(isp->isp_xflist, M_DEVBUF);
1417#ifdef	ISP_TARGET_MODE
1418		free(isp->isp_tgtlist, M_DEVBUF);
1419#endif
1420		free(pcs->dmaps, M_DEVBUF);
1421		ISP_LOCK(isp);
1422		return (1);
1423	}
1424
1425	for (i = 0; i < isp->isp_maxcmds; i++) {
1426		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1427		if (error) {
1428			isp_prt(isp, ISP_LOGERR,
1429			    "error %d creating per-cmd DMA maps", error);
1430			while (--i >= 0) {
1431				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1432			}
1433			goto bad;
1434		}
1435	}
1436
1437	im.isp = isp;
1438	im.error = 0;
1439	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1440	if (im.error) {
1441		isp_prt(isp, ISP_LOGERR,
1442		    "error %d loading dma map for control areas", im.error);
1443		goto bad;
1444	}
1445
1446	isp->isp_rquest = base;
1447	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1448	isp->isp_result = base;
1449	if (IS_FC(isp)) {
1450		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1451		FCPARAM(isp)->isp_scratch = base;
1452	}
1453	ISP_LOCK(isp);
1454	return (0);
1455
1456bad:
1457	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1458	bus_dma_tag_destroy(isp->isp_cdmat);
1459	free(isp->isp_xflist, M_DEVBUF);
1460#ifdef	ISP_TARGET_MODE
1461	free(isp->isp_tgtlist, M_DEVBUF);
1462#endif
1463	free(pcs->dmaps, M_DEVBUF);
1464	ISP_LOCK(isp);
1465	isp->isp_rquest = NULL;
1466	return (1);
1467}
1468
1469typedef struct {
1470	ispsoftc_t *isp;
1471	void *cmd_token;
1472	void *rq;
1473	uint16_t *nxtip;
1474	uint16_t optr;
1475	int error;
1476} mush_t;
1477
1478#define	MUSHERR_NOQENTRIES	-2
1479
1480#ifdef	ISP_TARGET_MODE
1481/*
1482 * We need to handle DMA for target mode differently from initiator mode.
1483 *
1484 * DMA mapping and construction and submission of CTIO Request Entries
1485 * and rendevous for completion are very tightly coupled because we start
1486 * out by knowing (per platform) how much data we have to move, but we
1487 * don't know, up front, how many DMA mapping segments will have to be used
1488 * cover that data, so we don't know how many CTIO Request Entries we
1489 * will end up using. Further, for performance reasons we may want to
1490 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1491 *
1492 * The standard vector still goes through isp_pci_dmasetup, but the callback
1493 * for the DMA mapping routines comes here instead with the whole transfer
1494 * mapped and a pointer to a partially filled in already allocated request
1495 * queue entry. We finish the job.
1496 */
1497static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1498static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1499
1500#define	STATUS_WITH_DATA	1
1501
1502static void
1503tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1504{
1505	mush_t *mp;
1506	struct ccb_scsiio *csio;
1507	ispsoftc_t *isp;
1508	struct isp_pcisoftc *pcs;
1509	bus_dmamap_t *dp;
1510	ct_entry_t *cto, *qe;
1511	uint8_t scsi_status;
1512	uint16_t curi, nxti, handle;
1513	uint32_t sflags;
1514	int32_t resid;
1515	int nth_ctio, nctios, send_status;
1516
1517	mp = (mush_t *) arg;
1518	if (error) {
1519		mp->error = error;
1520		return;
1521	}
1522
1523	isp = mp->isp;
1524	csio = mp->cmd_token;
1525	cto = mp->rq;
1526	curi = isp->isp_reqidx;
1527	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1528
1529	cto->ct_xfrlen = 0;
1530	cto->ct_seg_count = 0;
1531	cto->ct_header.rqs_entry_count = 1;
1532	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1533
1534	if (nseg == 0) {
1535		cto->ct_header.rqs_seqno = 1;
1536		isp_prt(isp, ISP_LOGTDEBUG1,
1537		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1538		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1539		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1540		    cto->ct_scsi_status, cto->ct_resid);
1541		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1542		isp_put_ctio(isp, cto, qe);
1543		return;
1544	}
1545
1546	nctios = nseg / ISP_RQDSEG;
1547	if (nseg % ISP_RQDSEG) {
1548		nctios++;
1549	}
1550
1551	/*
1552	 * Save syshandle, and potentially any SCSI status, which we'll
1553	 * reinsert on the last CTIO we're going to send.
1554	 */
1555
1556	handle = cto->ct_syshandle;
1557	cto->ct_syshandle = 0;
1558	cto->ct_header.rqs_seqno = 0;
1559	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1560
1561	if (send_status) {
1562		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1563		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1564		/*
1565		 * Preserve residual.
1566		 */
1567		resid = cto->ct_resid;
1568
1569		/*
1570		 * Save actual SCSI status.
1571		 */
1572		scsi_status = cto->ct_scsi_status;
1573
1574#ifndef	STATUS_WITH_DATA
1575		sflags |= CT_NO_DATA;
1576		/*
1577		 * We can't do a status at the same time as a data CTIO, so
1578		 * we need to synthesize an extra CTIO at this level.
1579		 */
1580		nctios++;
1581#endif
1582	} else {
1583		sflags = scsi_status = resid = 0;
1584	}
1585
1586	cto->ct_resid = 0;
1587	cto->ct_scsi_status = 0;
1588
1589	pcs = (struct isp_pcisoftc *)isp;
1590	dp = &pcs->dmaps[isp_handle_index(handle)];
1591	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1592		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1593	} else {
1594		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1595	}
1596
1597	nxti = *mp->nxtip;
1598
1599	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1600		int seglim;
1601
1602		seglim = nseg;
1603		if (seglim) {
1604			int seg;
1605
1606			if (seglim > ISP_RQDSEG)
1607				seglim = ISP_RQDSEG;
1608
1609			for (seg = 0; seg < seglim; seg++, nseg--) {
1610				/*
1611				 * Unlike normal initiator commands, we don't
1612				 * do any swizzling here.
1613				 */
1614				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1615				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1616				cto->ct_xfrlen += dm_segs->ds_len;
1617				dm_segs++;
1618			}
1619			cto->ct_seg_count = seg;
1620		} else {
1621			/*
1622			 * This case should only happen when we're sending an
1623			 * extra CTIO with final status.
1624			 */
1625			if (send_status == 0) {
1626				isp_prt(isp, ISP_LOGWARN,
1627				    "tdma_mk ran out of segments");
1628				mp->error = EINVAL;
1629				return;
1630			}
1631		}
1632
1633		/*
1634		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1635		 * ct_tagtype, and ct_timeout have been carried over
1636		 * unchanged from what our caller had set.
1637		 *
1638		 * The dataseg fields and the seg_count fields we just got
1639		 * through setting. The data direction we've preserved all
1640		 * along and only clear it if we're now sending status.
1641		 */
1642
1643		if (nth_ctio == nctios - 1) {
1644			/*
1645			 * We're the last in a sequence of CTIOs, so mark
1646			 * this CTIO and save the handle to the CCB such that
1647			 * when this CTIO completes we can free dma resources
1648			 * and do whatever else we need to do to finish the
1649			 * rest of the command. We *don't* give this to the
1650			 * firmware to work on- the caller will do that.
1651			 */
1652
1653			cto->ct_syshandle = handle;
1654			cto->ct_header.rqs_seqno = 1;
1655
1656			if (send_status) {
1657				cto->ct_scsi_status = scsi_status;
1658				cto->ct_flags |= sflags;
1659				cto->ct_resid = resid;
1660			}
1661			if (send_status) {
1662				isp_prt(isp, ISP_LOGTDEBUG1,
1663				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1664				    "scsi status %x resid %d",
1665				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1666				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1667				    cto->ct_scsi_status, cto->ct_resid);
1668			} else {
1669				isp_prt(isp, ISP_LOGTDEBUG1,
1670				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1671				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1672				    cto->ct_iid, cto->ct_tag_val,
1673				    cto->ct_flags);
1674			}
1675			isp_put_ctio(isp, cto, qe);
1676			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1677			if (nctios > 1) {
1678				MEMORYBARRIER(isp, SYNC_REQUEST,
1679				    curi, QENTRY_LEN);
1680			}
1681		} else {
1682			ct_entry_t *oqe = qe;
1683
1684			/*
1685			 * Make sure syshandle fields are clean
1686			 */
1687			cto->ct_syshandle = 0;
1688			cto->ct_header.rqs_seqno = 0;
1689
1690			isp_prt(isp, ISP_LOGTDEBUG1,
1691			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1692			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1693			    cto->ct_iid, cto->ct_flags);
1694
1695			/*
1696			 * Get a new CTIO
1697			 */
1698			qe = (ct_entry_t *)
1699			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1700			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1701			if (nxti == mp->optr) {
1702				isp_prt(isp, ISP_LOGTDEBUG0,
1703				    "Queue Overflow in tdma_mk");
1704				mp->error = MUSHERR_NOQENTRIES;
1705				return;
1706			}
1707
1708			/*
1709			 * Now that we're done with the old CTIO,
1710			 * flush it out to the request queue.
1711			 */
1712			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1713			isp_put_ctio(isp, cto, oqe);
1714			if (nth_ctio != 0) {
1715				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1716				    QENTRY_LEN);
1717			}
1718			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1719
1720			/*
1721			 * Reset some fields in the CTIO so we can reuse
1722			 * for the next one we'll flush to the request
1723			 * queue.
1724			 */
1725			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1726			cto->ct_header.rqs_entry_count = 1;
1727			cto->ct_header.rqs_flags = 0;
1728			cto->ct_status = 0;
1729			cto->ct_scsi_status = 0;
1730			cto->ct_xfrlen = 0;
1731			cto->ct_resid = 0;
1732			cto->ct_seg_count = 0;
1733			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1734		}
1735	}
1736	*mp->nxtip = nxti;
1737}
1738
1739/*
1740 * We don't have to do multiple CTIOs here. Instead, we can just do
1741 * continuation segments as needed. This greatly simplifies the code
1742 * improves performance.
1743 */
1744
1745static void
1746tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1747{
1748	mush_t *mp;
1749	struct ccb_scsiio *csio;
1750	ispsoftc_t *isp;
1751	ct2_entry_t *cto, *qe;
1752	uint16_t curi, nxti;
1753	ispds_t *ds;
1754	ispds64_t *ds64;
1755	int segcnt, seglim;
1756
1757	mp = (mush_t *) arg;
1758	if (error) {
1759		mp->error = error;
1760		return;
1761	}
1762
1763	isp = mp->isp;
1764	csio = mp->cmd_token;
1765	cto = mp->rq;
1766
1767	curi = isp->isp_reqidx;
1768	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1769
1770	if (nseg == 0) {
1771		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1772			isp_prt(isp, ISP_LOGWARN,
1773			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1774			    "set (0x%x)", cto->ct_flags);
1775			mp->error = EINVAL;
1776			return;
1777		}
1778		/*
1779		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1780		 * flags to NO DATA and clear relative offset flags.
1781		 * We preserve the ct_resid and the response area.
1782		 */
1783		cto->ct_header.rqs_seqno = 1;
1784		cto->ct_seg_count = 0;
1785		cto->ct_reloff = 0;
1786		isp_prt(isp, ISP_LOGTDEBUG1,
1787		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1788		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1789		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1790		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1791		if (IS_2KLOGIN(isp)) {
1792			isp_put_ctio2e(isp,
1793			    (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1794		} else {
1795			isp_put_ctio2(isp, cto, qe);
1796		}
1797		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1798		return;
1799	}
1800
1801	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1802		isp_prt(isp, ISP_LOGERR,
1803		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1804		    "(0x%x)", cto->ct_flags);
1805		mp->error = EINVAL;
1806		return;
1807	}
1808
1809
1810	nxti = *mp->nxtip;
1811
1812	/*
1813	 * Check to see if we need to DAC addressing or not.
1814	 *
1815	 * Any address that's over the 4GB boundary causes this
1816	 * to happen.
1817	 */
1818	segcnt = nseg;
1819	if (sizeof (bus_addr_t) > 4) {
1820		for (segcnt = 0; segcnt < nseg; segcnt++) {
1821			uint64_t addr = dm_segs[segcnt].ds_addr;
1822			if (addr >= 0x100000000LL) {
1823				break;
1824			}
1825		}
1826	}
1827	if (segcnt != nseg) {
1828		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
1829		seglim = ISP_RQDSEG_T3;
1830		ds64 = &cto->rsp.m0.ct_dataseg64[0];
1831		ds = NULL;
1832	} else {
1833		seglim = ISP_RQDSEG_T2;
1834		ds64 = NULL;
1835		ds = &cto->rsp.m0.ct_dataseg[0];
1836	}
1837	cto->ct_seg_count = 0;
1838
1839	/*
1840	 * Set up the CTIO2 data segments.
1841	 */
1842	for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
1843	    cto->ct_seg_count++, segcnt++) {
1844		if (ds64) {
1845			ds64->ds_basehi =
1846			    ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1847			ds64->ds_base = dm_segs[segcnt].ds_addr;
1848			ds64->ds_count = dm_segs[segcnt].ds_len;
1849			ds64++;
1850		} else {
1851			ds->ds_base = dm_segs[segcnt].ds_addr;
1852			ds->ds_count = dm_segs[segcnt].ds_len;
1853			ds++;
1854		}
1855		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1856#if __FreeBSD_version < 500000
1857		isp_prt(isp, ISP_LOGTDEBUG1,
1858		    "isp_send_ctio2: ent0[%d]0x%llx:%llu",
1859		    cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
1860		    (uint64_t)dm_segs[segcnt].ds_len);
1861#else
1862		isp_prt(isp, ISP_LOGTDEBUG1,
1863		    "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1864		    cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1865		    (uintmax_t)dm_segs[segcnt].ds_len);
1866#endif
1867	}
1868
1869	while (segcnt < nseg) {
1870		uint16_t curip;
1871		int seg;
1872		ispcontreq_t local, *crq = &local, *qep;
1873
1874		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1875		curip = nxti;
1876		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1877		if (nxti == mp->optr) {
1878			ISP_UNLOCK(isp);
1879			isp_prt(isp, ISP_LOGTDEBUG0,
1880			    "tdma_mkfc: request queue overflow");
1881			mp->error = MUSHERR_NOQENTRIES;
1882			return;
1883		}
1884		cto->ct_header.rqs_entry_count++;
1885		MEMZERO((void *)crq, sizeof (*crq));
1886		crq->req_header.rqs_entry_count = 1;
1887		if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
1888			seglim = ISP_CDSEG64;
1889			ds = NULL;
1890			ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
1891			crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1892		} else {
1893			seglim = ISP_CDSEG;
1894			ds = &crq->req_dataseg[0];
1895			ds64 = NULL;
1896			crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1897		}
1898		for (seg = 0; segcnt < nseg && seg < seglim;
1899		    segcnt++, seg++) {
1900			if (ds64) {
1901				ds64->ds_basehi =
1902				  ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1903				ds64->ds_base = dm_segs[segcnt].ds_addr;
1904				ds64->ds_count = dm_segs[segcnt].ds_len;
1905				ds64++;
1906			} else {
1907				ds->ds_base = dm_segs[segcnt].ds_addr;
1908				ds->ds_count = dm_segs[segcnt].ds_len;
1909				ds++;
1910			}
1911#if __FreeBSD_version < 500000
1912			isp_prt(isp, ISP_LOGTDEBUG1,
1913			    "isp_send_ctio2: ent%d[%d]%llx:%llu",
1914			    cto->ct_header.rqs_entry_count-1, seg,
1915			    (uint64_t)dm_segs[segcnt].ds_addr,
1916			    (uint64_t)dm_segs[segcnt].ds_len);
1917#else
1918			isp_prt(isp, ISP_LOGTDEBUG1,
1919			    "isp_send_ctio2: ent%d[%d]%jx:%ju",
1920			    cto->ct_header.rqs_entry_count-1, seg,
1921			    (uintmax_t)dm_segs[segcnt].ds_addr,
1922			    (uintmax_t)dm_segs[segcnt].ds_len);
1923#endif
1924			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1925			cto->ct_seg_count++;
1926		}
1927		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1928		isp_put_cont_req(isp, crq, qep);
1929		ISP_TDQE(isp, "cont entry", curi, qep);
1930	}
1931
1932	/*
1933	 * No do final twiddling for the CTIO itself.
1934	 */
1935	cto->ct_header.rqs_seqno = 1;
1936	isp_prt(isp, ISP_LOGTDEBUG1,
1937	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1938	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1939	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1940	    cto->ct_resid);
1941	if (IS_2KLOGIN(isp))
1942		isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1943	else
1944		isp_put_ctio2(isp, cto, qe);
1945	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1946	*mp->nxtip = nxti;
1947}
1948#endif
1949
1950static void dma2_a64(void *, bus_dma_segment_t *, int, int);
1951static void dma2(void *, bus_dma_segment_t *, int, int);
1952
1953static void
1954dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1955{
1956	mush_t *mp;
1957	ispsoftc_t *isp;
1958	struct ccb_scsiio *csio;
1959	struct isp_pcisoftc *pcs;
1960	bus_dmamap_t *dp;
1961	bus_dma_segment_t *eseg;
1962	ispreq64_t *rq;
1963	int seglim, datalen;
1964	uint16_t nxti;
1965
1966	mp = (mush_t *) arg;
1967	if (error) {
1968		mp->error = error;
1969		return;
1970	}
1971
1972	if (nseg < 1) {
1973		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1974		mp->error = EFAULT;
1975		return;
1976	}
1977	csio = mp->cmd_token;
1978	isp = mp->isp;
1979	rq = mp->rq;
1980	pcs = (struct isp_pcisoftc *)mp->isp;
1981	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1982	nxti = *mp->nxtip;
1983
1984	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1985		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1986	} else {
1987		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1988	}
1989	datalen = XS_XFRLEN(csio);
1990
1991	/*
1992	 * We're passed an initial partially filled in entry that
1993	 * has most fields filled in except for data transfer
1994	 * related values.
1995	 *
1996	 * Our job is to fill in the initial request queue entry and
1997	 * then to start allocating and filling in continuation entries
1998	 * until we've covered the entire transfer.
1999	 */
2000
2001	if (IS_FC(isp)) {
2002		rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2003		seglim = ISP_RQDSEG_T3;
2004		((ispreqt3_t *)rq)->req_totalcnt = datalen;
2005		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2006			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2007		} else {
2008			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2009		}
2010	} else {
2011		rq->req_header.rqs_entry_type = RQSTYPE_A64;
2012		if (csio->cdb_len > 12) {
2013			seglim = 0;
2014		} else {
2015			seglim = ISP_RQDSEG_A64;
2016		}
2017		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2018			rq->req_flags |= REQFLAG_DATA_IN;
2019		} else {
2020			rq->req_flags |= REQFLAG_DATA_OUT;
2021		}
2022	}
2023
2024	eseg = dm_segs + nseg;
2025
2026	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2027		if (IS_FC(isp)) {
2028			ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2029			rq3->req_dataseg[rq3->req_seg_count].ds_base =
2030			    DMA_LO32(dm_segs->ds_addr);
2031			rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2032			    DMA_HI32(dm_segs->ds_addr);
2033			rq3->req_dataseg[rq3->req_seg_count].ds_count =
2034			    dm_segs->ds_len;
2035		} else {
2036			rq->req_dataseg[rq->req_seg_count].ds_base =
2037			    DMA_LO32(dm_segs->ds_addr);
2038			rq->req_dataseg[rq->req_seg_count].ds_basehi =
2039			    DMA_HI32(dm_segs->ds_addr);
2040			rq->req_dataseg[rq->req_seg_count].ds_count =
2041			    dm_segs->ds_len;
2042		}
2043		datalen -= dm_segs->ds_len;
2044		rq->req_seg_count++;
2045		dm_segs++;
2046	}
2047
2048	while (datalen > 0 && dm_segs != eseg) {
2049		uint16_t onxti;
2050		ispcontreq64_t local, *crq = &local, *cqe;
2051
2052		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2053		onxti = nxti;
2054		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2055		if (nxti == mp->optr) {
2056			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2057			mp->error = MUSHERR_NOQENTRIES;
2058			return;
2059		}
2060		rq->req_header.rqs_entry_count++;
2061		MEMZERO((void *)crq, sizeof (*crq));
2062		crq->req_header.rqs_entry_count = 1;
2063		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2064
2065		seglim = 0;
2066		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2067			crq->req_dataseg[seglim].ds_base =
2068			    DMA_LO32(dm_segs->ds_addr);
2069			crq->req_dataseg[seglim].ds_basehi =
2070			    DMA_HI32(dm_segs->ds_addr);
2071			crq->req_dataseg[seglim].ds_count =
2072			    dm_segs->ds_len;
2073			rq->req_seg_count++;
2074			dm_segs++;
2075			seglim++;
2076			datalen -= dm_segs->ds_len;
2077		}
2078		isp_put_cont64_req(isp, crq, cqe);
2079		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2080	}
2081	*mp->nxtip = nxti;
2082}
2083
2084static void
2085dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2086{
2087	mush_t *mp;
2088	ispsoftc_t *isp;
2089	struct ccb_scsiio *csio;
2090	struct isp_pcisoftc *pcs;
2091	bus_dmamap_t *dp;
2092	bus_dma_segment_t *eseg;
2093	ispreq_t *rq;
2094	int seglim, datalen;
2095	uint16_t nxti;
2096
2097	mp = (mush_t *) arg;
2098	if (error) {
2099		mp->error = error;
2100		return;
2101	}
2102
2103	if (nseg < 1) {
2104		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2105		mp->error = EFAULT;
2106		return;
2107	}
2108	csio = mp->cmd_token;
2109	isp = mp->isp;
2110	rq = mp->rq;
2111	pcs = (struct isp_pcisoftc *)mp->isp;
2112	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2113	nxti = *mp->nxtip;
2114
2115	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2116		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2117	} else {
2118		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2119	}
2120
2121	datalen = XS_XFRLEN(csio);
2122
2123	/*
2124	 * We're passed an initial partially filled in entry that
2125	 * has most fields filled in except for data transfer
2126	 * related values.
2127	 *
2128	 * Our job is to fill in the initial request queue entry and
2129	 * then to start allocating and filling in continuation entries
2130	 * until we've covered the entire transfer.
2131	 */
2132
2133	if (IS_FC(isp)) {
2134		seglim = ISP_RQDSEG_T2;
2135		((ispreqt2_t *)rq)->req_totalcnt = datalen;
2136		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2137			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2138		} else {
2139			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2140		}
2141	} else {
2142		if (csio->cdb_len > 12) {
2143			seglim = 0;
2144		} else {
2145			seglim = ISP_RQDSEG;
2146		}
2147		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2148			rq->req_flags |= REQFLAG_DATA_IN;
2149		} else {
2150			rq->req_flags |= REQFLAG_DATA_OUT;
2151		}
2152	}
2153
2154	eseg = dm_segs + nseg;
2155
2156	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2157		if (IS_FC(isp)) {
2158			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2159			rq2->req_dataseg[rq2->req_seg_count].ds_base =
2160			    DMA_LO32(dm_segs->ds_addr);
2161			rq2->req_dataseg[rq2->req_seg_count].ds_count =
2162			    dm_segs->ds_len;
2163		} else {
2164			rq->req_dataseg[rq->req_seg_count].ds_base =
2165				DMA_LO32(dm_segs->ds_addr);
2166			rq->req_dataseg[rq->req_seg_count].ds_count =
2167				dm_segs->ds_len;
2168		}
2169		datalen -= dm_segs->ds_len;
2170		rq->req_seg_count++;
2171		dm_segs++;
2172	}
2173
2174	while (datalen > 0 && dm_segs != eseg) {
2175		uint16_t onxti;
2176		ispcontreq_t local, *crq = &local, *cqe;
2177
2178		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2179		onxti = nxti;
2180		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2181		if (nxti == mp->optr) {
2182			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2183			mp->error = MUSHERR_NOQENTRIES;
2184			return;
2185		}
2186		rq->req_header.rqs_entry_count++;
2187		MEMZERO((void *)crq, sizeof (*crq));
2188		crq->req_header.rqs_entry_count = 1;
2189		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2190
2191		seglim = 0;
2192		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2193			crq->req_dataseg[seglim].ds_base =
2194			    DMA_LO32(dm_segs->ds_addr);
2195			crq->req_dataseg[seglim].ds_count =
2196			    dm_segs->ds_len;
2197			rq->req_seg_count++;
2198			dm_segs++;
2199			seglim++;
2200			datalen -= dm_segs->ds_len;
2201		}
2202		isp_put_cont_req(isp, crq, cqe);
2203		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2204	}
2205	*mp->nxtip = nxti;
2206}
2207
2208/*
2209 * We enter with ISP_LOCK held
2210 */
2211static int
2212isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2213	uint16_t *nxtip, uint16_t optr)
2214{
2215	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2216	ispreq_t *qep;
2217	bus_dmamap_t *dp = NULL;
2218	mush_t mush, *mp;
2219	void (*eptr)(void *, bus_dma_segment_t *, int, int);
2220
2221	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2222#ifdef	ISP_TARGET_MODE
2223	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2224		if (IS_FC(isp)) {
2225			eptr = tdma_mkfc;
2226		} else {
2227			eptr = tdma_mk;
2228		}
2229		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2230		    (csio->dxfer_len == 0)) {
2231			mp = &mush;
2232			mp->isp = isp;
2233			mp->cmd_token = csio;
2234			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
2235			mp->nxtip = nxtip;
2236			mp->optr = optr;
2237			mp->error = 0;
2238			ISPLOCK_2_CAMLOCK(isp);
2239			(*eptr)(mp, NULL, 0, 0);
2240			CAMLOCK_2_ISPLOCK(isp);
2241			goto mbxsync;
2242		}
2243	} else
2244#endif
2245	if (sizeof (bus_addr_t) > 4) {
2246		eptr = dma2_a64;
2247	} else {
2248		eptr = dma2;
2249	}
2250
2251
2252	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2253	    (csio->dxfer_len == 0)) {
2254		rq->req_seg_count = 1;
2255		goto mbxsync;
2256	}
2257
2258	/*
2259	 * Do a virtual grapevine step to collect info for
2260	 * the callback dma allocation that we have to use...
2261	 */
2262	mp = &mush;
2263	mp->isp = isp;
2264	mp->cmd_token = csio;
2265	mp->rq = rq;
2266	mp->nxtip = nxtip;
2267	mp->optr = optr;
2268	mp->error = 0;
2269
2270	ISPLOCK_2_CAMLOCK(isp);
2271	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2272		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2273			int error, s;
2274			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2275			s = splsoftvm();
2276			error = bus_dmamap_load(pcs->dmat, *dp,
2277			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2278			if (error == EINPROGRESS) {
2279				bus_dmamap_unload(pcs->dmat, *dp);
2280				mp->error = EINVAL;
2281				isp_prt(isp, ISP_LOGERR,
2282				    "deferred dma allocation not supported");
2283			} else if (error && mp->error == 0) {
2284#ifdef	DIAGNOSTIC
2285				isp_prt(isp, ISP_LOGERR,
2286				    "error %d in dma mapping code", error);
2287#endif
2288				mp->error = error;
2289			}
2290			splx(s);
2291		} else {
2292			/* Pointer to physical buffer */
2293			struct bus_dma_segment seg;
2294			seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2295			seg.ds_len = csio->dxfer_len;
2296			(*eptr)(mp, &seg, 1, 0);
2297		}
2298	} else {
2299		struct bus_dma_segment *segs;
2300
2301		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2302			isp_prt(isp, ISP_LOGERR,
2303			    "Physical segment pointers unsupported");
2304			mp->error = EINVAL;
2305		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2306			isp_prt(isp, ISP_LOGERR,
2307			    "Virtual segment addresses unsupported");
2308			mp->error = EINVAL;
2309		} else {
2310			/* Just use the segments provided */
2311			segs = (struct bus_dma_segment *) csio->data_ptr;
2312			(*eptr)(mp, segs, csio->sglist_cnt, 0);
2313		}
2314	}
2315	CAMLOCK_2_ISPLOCK(isp);
2316	if (mp->error) {
2317		int retval = CMD_COMPLETE;
2318		if (mp->error == MUSHERR_NOQENTRIES) {
2319			retval = CMD_EAGAIN;
2320		} else if (mp->error == EFBIG) {
2321			XS_SETERR(csio, CAM_REQ_TOO_BIG);
2322		} else if (mp->error == EINVAL) {
2323			XS_SETERR(csio, CAM_REQ_INVALID);
2324		} else {
2325			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2326		}
2327		return (retval);
2328	}
2329mbxsync:
2330	switch (rq->req_header.rqs_entry_type) {
2331	case RQSTYPE_REQUEST:
2332		isp_put_request(isp, rq, qep);
2333		break;
2334	case RQSTYPE_CMDONLY:
2335		isp_put_extended_request(isp, (ispextreq_t *)rq,
2336		    (ispextreq_t *)qep);
2337		break;
2338	case RQSTYPE_T2RQS:
2339		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2340		break;
2341	case RQSTYPE_A64:
2342	case RQSTYPE_T3RQS:
2343		isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2344		break;
2345	}
2346	return (CMD_QUEUED);
2347}
2348
2349static void
2350isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle)
2351{
2352	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2353	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2354	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2355		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2356	} else {
2357		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2358	}
2359	bus_dmamap_unload(pcs->dmat, *dp);
2360}
2361
2362
2363static void
2364isp_pci_reset1(ispsoftc_t *isp)
2365{
2366	/* Make sure the BIOS is disabled */
2367	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2368	/* and enable interrupts */
2369	ENABLE_INTS(isp);
2370}
2371
2372static void
2373isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2374{
2375	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2376	if (msg)
2377		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2378	else
2379		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2380	if (IS_SCSI(isp))
2381		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2382	else
2383		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2384	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2385	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2386	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2387
2388
2389	if (IS_SCSI(isp)) {
2390		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2391		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2392			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2393			ISP_READ(isp, CDMA_FIFO_STS));
2394		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2395			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2396			ISP_READ(isp, DDMA_FIFO_STS));
2397		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2398			ISP_READ(isp, SXP_INTERRUPT),
2399			ISP_READ(isp, SXP_GROSS_ERR),
2400			ISP_READ(isp, SXP_PINS_CTRL));
2401		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2402	}
2403	printf("    mbox regs: %x %x %x %x %x\n",
2404	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2405	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2406	    ISP_READ(isp, OUTMAILBOX4));
2407	printf("    PCI Status Command/Status=%x\n",
2408	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2409}
2410