isp_pci.c revision 93818
1/* $FreeBSD: head/sys/dev/isp/isp_pci.c 93818 2002-04-04 21:03:38Z jhb $ */
2/*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice immediately at the beginning of the file, without modification,
13 *    this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/bus.h>
35
36#include <pci/pcireg.h>
37#include <pci/pcivar.h>
38
39#include <machine/bus_memio.h>
40#include <machine/bus_pio.h>
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <sys/rman.h>
44#include <sys/malloc.h>
45
46#include <dev/isp/isp_freebsd.h>
47
48static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
49static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
50static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
51static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
52static int
53isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
54static int
55isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
56static int isp_pci_mbxdma(struct ispsoftc *);
57static int
58isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
59static void
60isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
61
62static void isp_pci_reset1(struct ispsoftc *);
63static void isp_pci_dumpregs(struct ispsoftc *, const char *);
64
65#ifndef	ISP_CODE_ORG
66#define	ISP_CODE_ORG		0x1000
67#endif
68
69static struct ispmdvec mdvec = {
70	isp_pci_rd_isr,
71	isp_pci_rd_reg,
72	isp_pci_wr_reg,
73	isp_pci_mbxdma,
74	isp_pci_dmasetup,
75	isp_pci_dmateardown,
76	NULL,
77	isp_pci_reset1,
78	isp_pci_dumpregs,
79	NULL,
80	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
81};
82
83static struct ispmdvec mdvec_1080 = {
84	isp_pci_rd_isr,
85	isp_pci_rd_reg_1080,
86	isp_pci_wr_reg_1080,
87	isp_pci_mbxdma,
88	isp_pci_dmasetup,
89	isp_pci_dmateardown,
90	NULL,
91	isp_pci_reset1,
92	isp_pci_dumpregs,
93	NULL,
94	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
95};
96
97static struct ispmdvec mdvec_12160 = {
98	isp_pci_rd_isr,
99	isp_pci_rd_reg_1080,
100	isp_pci_wr_reg_1080,
101	isp_pci_mbxdma,
102	isp_pci_dmasetup,
103	isp_pci_dmateardown,
104	NULL,
105	isp_pci_reset1,
106	isp_pci_dumpregs,
107	NULL,
108	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
109};
110
111static struct ispmdvec mdvec_2100 = {
112	isp_pci_rd_isr,
113	isp_pci_rd_reg,
114	isp_pci_wr_reg,
115	isp_pci_mbxdma,
116	isp_pci_dmasetup,
117	isp_pci_dmateardown,
118	NULL,
119	isp_pci_reset1,
120	isp_pci_dumpregs
121};
122
123static struct ispmdvec mdvec_2200 = {
124	isp_pci_rd_isr,
125	isp_pci_rd_reg,
126	isp_pci_wr_reg,
127	isp_pci_mbxdma,
128	isp_pci_dmasetup,
129	isp_pci_dmateardown,
130	NULL,
131	isp_pci_reset1,
132	isp_pci_dumpregs
133};
134
135static struct ispmdvec mdvec_2300 = {
136	isp_pci_rd_isr_2300,
137	isp_pci_rd_reg,
138	isp_pci_wr_reg,
139	isp_pci_mbxdma,
140	isp_pci_dmasetup,
141	isp_pci_dmateardown,
142	NULL,
143	isp_pci_reset1,
144	isp_pci_dumpregs
145};
146
147#ifndef	PCIM_CMD_INVEN
148#define	PCIM_CMD_INVEN			0x10
149#endif
150#ifndef	PCIM_CMD_BUSMASTEREN
151#define	PCIM_CMD_BUSMASTEREN		0x0004
152#endif
153#ifndef	PCIM_CMD_PERRESPEN
154#define	PCIM_CMD_PERRESPEN		0x0040
155#endif
156#ifndef	PCIM_CMD_SEREN
157#define	PCIM_CMD_SEREN			0x0100
158#endif
159
160#ifndef	PCIR_COMMAND
161#define	PCIR_COMMAND			0x04
162#endif
163
164#ifndef	PCIR_CACHELNSZ
165#define	PCIR_CACHELNSZ			0x0c
166#endif
167
168#ifndef	PCIR_LATTIMER
169#define	PCIR_LATTIMER			0x0d
170#endif
171
172#ifndef	PCIR_ROMADDR
173#define	PCIR_ROMADDR			0x30
174#endif
175
176#ifndef	PCI_VENDOR_QLOGIC
177#define	PCI_VENDOR_QLOGIC		0x1077
178#endif
179
180#ifndef	PCI_PRODUCT_QLOGIC_ISP1020
181#define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
182#endif
183
184#ifndef	PCI_PRODUCT_QLOGIC_ISP1080
185#define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
186#endif
187
188#ifndef	PCI_PRODUCT_QLOGIC_ISP12160
189#define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
190#endif
191
192#ifndef	PCI_PRODUCT_QLOGIC_ISP1240
193#define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
194#endif
195
196#ifndef	PCI_PRODUCT_QLOGIC_ISP1280
197#define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
198#endif
199
200#ifndef	PCI_PRODUCT_QLOGIC_ISP2100
201#define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
202#endif
203
204#ifndef	PCI_PRODUCT_QLOGIC_ISP2200
205#define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
206#endif
207
208#ifndef	PCI_PRODUCT_QLOGIC_ISP2300
209#define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
210#endif
211
212#ifndef	PCI_PRODUCT_QLOGIC_ISP2312
213#define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
214#endif
215
216#define	PCI_QLOGIC_ISP1020	\
217	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
218
219#define	PCI_QLOGIC_ISP1080	\
220	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
221
222#define	PCI_QLOGIC_ISP12160	\
223	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
224
225#define	PCI_QLOGIC_ISP1240	\
226	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
227
228#define	PCI_QLOGIC_ISP1280	\
229	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
230
231#define	PCI_QLOGIC_ISP2100	\
232	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
233
234#define	PCI_QLOGIC_ISP2200	\
235	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
236
237#define	PCI_QLOGIC_ISP2300	\
238	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
239
240#define	PCI_QLOGIC_ISP2312	\
241	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
242
243/*
244 * I/O Mapping Stuff
245 */
246#if	_MACHINE_ARCH == SPARC64
247/* An IOMMU means that we only will ever need one descriptor. */
248#define	ISP_NSEG	2
249#else
250#define	ISP_NSEG	((MAXPHYS/PAGE_SIZE) + 1)
251#endif
252
253/*
254 * Odd case for some AMI raid cards... We need to *not* attach to this.
255 */
256#define	AMI_RAID_SUBVENDOR_ID	0x101e
257
258#define	IO_MAP_REG	0x10
259#define	MEM_MAP_REG	0x14
260
261#define	PCI_DFLT_LTNCY	0x40
262#define	PCI_DFLT_LNSZ	0x10
263
264static int isp_pci_probe (device_t);
265static int isp_pci_attach (device_t);
266
267
268struct isp_pcisoftc {
269	struct ispsoftc			pci_isp;
270	device_t			pci_dev;
271	struct resource *		pci_reg;
272	bus_space_tag_t			pci_st;
273	bus_space_handle_t		pci_sh;
274	void *				ih;
275	int16_t				pci_poff[_NREG_BLKS];
276	bus_dma_tag_t			dmat;
277	bus_dmamap_t			*dmaps;
278};
279ispfwfunc *isp_get_firmware_p = NULL;
280
281static device_method_t isp_pci_methods[] = {
282	/* Device interface */
283	DEVMETHOD(device_probe,		isp_pci_probe),
284	DEVMETHOD(device_attach,	isp_pci_attach),
285	{ 0, 0 }
286};
287static void isp_pci_intr(void *);
288
289static driver_t isp_pci_driver = {
290	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
291};
292static devclass_t isp_devclass;
293DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
294MODULE_VERSION(isp, 1);
295
296static int
297isp_pci_probe(device_t dev)
298{
299        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
300	case PCI_QLOGIC_ISP1020:
301		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
302		break;
303	case PCI_QLOGIC_ISP1080:
304		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
305		break;
306	case PCI_QLOGIC_ISP1240:
307		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
308		break;
309	case PCI_QLOGIC_ISP1280:
310		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
311		break;
312	case PCI_QLOGIC_ISP12160:
313		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
314			return (ENXIO);
315		}
316		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
317		break;
318	case PCI_QLOGIC_ISP2100:
319		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
320		break;
321	case PCI_QLOGIC_ISP2200:
322		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
323		break;
324	case PCI_QLOGIC_ISP2300:
325		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
326		break;
327	case PCI_QLOGIC_ISP2312:
328		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
329		break;
330	default:
331		return (ENXIO);
332	}
333	if (device_get_unit(dev) == 0 && bootverbose) {
334		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
335		    "Core Version %d.%d\n",
336		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
337		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
338	}
339	/*
340	 * XXXX: Here is where we might load the f/w module
341	 * XXXX: (or increase a reference count to it).
342	 */
343	return (0);
344}
345
346static int
347isp_pci_attach(device_t dev)
348{
349	struct resource *regs, *irq;
350	int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
351	u_int32_t data, cmd, linesz, psize, basetype;
352	struct isp_pcisoftc *pcs;
353	struct ispsoftc *isp = NULL;
354	struct ispmdvec *mdvp;
355	const char *sptr;
356	int locksetup = 0;
357
358	/*
359	 * Figure out if we're supposed to skip this one.
360	 * If we are, we actually go to ISP_ROLE_NONE.
361	 */
362
363	tval = 0;
364	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
365	    "disable", &tval) == 0 && tval) {
366		device_printf(dev, "device is disabled\n");
367		/* but return 0 so the !$)$)*!$*) unit isn't reused */
368		return (0);
369	}
370
371	role = 0;
372	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
373	    "role", &role) == 0 &&
374	    ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) {
375		device_printf(dev, "setting role to 0x%x\n", role);
376	} else {
377#ifdef	ISP_TARGET_MODE
378		role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET;
379#else
380		role = ISP_DEFAULT_ROLES;
381#endif
382	}
383
384	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
385	if (pcs == NULL) {
386		device_printf(dev, "cannot allocate softc\n");
387		return (ENOMEM);
388	}
389
390	/*
391	 * Figure out which we should try first - memory mapping or i/o mapping?
392	 */
393#ifdef	__alpha__
394	m1 = PCIM_CMD_MEMEN;
395	m2 = PCIM_CMD_PORTEN;
396#else
397	m1 = PCIM_CMD_PORTEN;
398	m2 = PCIM_CMD_MEMEN;
399#endif
400
401	tval = 0;
402        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
403            "prefer_iomap", &tval) == 0 && tval != 0) {
404		m1 = PCIM_CMD_PORTEN;
405		m2 = PCIM_CMD_MEMEN;
406	}
407	tval = 0;
408        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
409            "prefer_memmap", &tval) == 0 && tval != 0) {
410		m1 = PCIM_CMD_MEMEN;
411		m2 = PCIM_CMD_PORTEN;
412	}
413
414	linesz = PCI_DFLT_LNSZ;
415	irq = regs = NULL;
416	rgd = rtp = iqd = 0;
417
418	cmd = pci_read_config(dev, PCIR_COMMAND, 1);
419	if (cmd & m1) {
420		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
421		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
422		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
423	}
424	if (regs == NULL && (cmd & m2)) {
425		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
426		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
427		regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
428	}
429	if (regs == NULL) {
430		device_printf(dev, "unable to map any ports\n");
431		goto bad;
432	}
433	if (bootverbose)
434		device_printf(dev, "using %s space register mapping\n",
435		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
436	pcs->pci_dev = dev;
437	pcs->pci_reg = regs;
438	pcs->pci_st = rman_get_bustag(regs);
439	pcs->pci_sh = rman_get_bushandle(regs);
440
441	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
442	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
443	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
444	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
445	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
446	mdvp = &mdvec;
447	basetype = ISP_HA_SCSI_UNKNOWN;
448	psize = sizeof (sdparam);
449	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
450		mdvp = &mdvec;
451		basetype = ISP_HA_SCSI_UNKNOWN;
452		psize = sizeof (sdparam);
453	}
454	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
455		mdvp = &mdvec_1080;
456		basetype = ISP_HA_SCSI_1080;
457		psize = sizeof (sdparam);
458		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
459		    ISP1080_DMA_REGS_OFF;
460	}
461	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
462		mdvp = &mdvec_1080;
463		basetype = ISP_HA_SCSI_1240;
464		psize = 2 * sizeof (sdparam);
465		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
466		    ISP1080_DMA_REGS_OFF;
467	}
468	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
469		mdvp = &mdvec_1080;
470		basetype = ISP_HA_SCSI_1280;
471		psize = 2 * sizeof (sdparam);
472		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
473		    ISP1080_DMA_REGS_OFF;
474	}
475	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
476		mdvp = &mdvec_12160;
477		basetype = ISP_HA_SCSI_12160;
478		psize = 2 * sizeof (sdparam);
479		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
480		    ISP1080_DMA_REGS_OFF;
481	}
482	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
483		mdvp = &mdvec_2100;
484		basetype = ISP_HA_FC_2100;
485		psize = sizeof (fcparam);
486		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
487		    PCI_MBOX_REGS2100_OFF;
488		if (pci_get_revid(dev) < 3) {
489			/*
490			 * XXX: Need to get the actual revision
491			 * XXX: number of the 2100 FB. At any rate,
492			 * XXX: lower cache line size for early revision
493			 * XXX; boards.
494			 */
495			linesz = 1;
496		}
497	}
498	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
499		mdvp = &mdvec_2200;
500		basetype = ISP_HA_FC_2200;
501		psize = sizeof (fcparam);
502		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
503		    PCI_MBOX_REGS2100_OFF;
504	}
505	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
506		mdvp = &mdvec_2300;
507		basetype = ISP_HA_FC_2300;
508		psize = sizeof (fcparam);
509		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
510		    PCI_MBOX_REGS2300_OFF;
511	}
512	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
513		mdvp = &mdvec_2300;
514		basetype = ISP_HA_FC_2312;
515		psize = sizeof (fcparam);
516		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
517		    PCI_MBOX_REGS2300_OFF;
518	}
519	isp = &pcs->pci_isp;
520	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
521	if (isp->isp_param == NULL) {
522		device_printf(dev, "cannot allocate parameter data\n");
523		goto bad;
524	}
525	isp->isp_mdvec = mdvp;
526	isp->isp_type = basetype;
527	isp->isp_revision = pci_get_revid(dev);
528	isp->isp_role = role;
529	isp->isp_dev = dev;
530
531	/*
532	 * Try and find firmware for this device.
533	 */
534
535	if (isp_get_firmware_p) {
536		int device = (int) pci_get_device(dev);
537#ifdef	ISP_TARGET_MODE
538		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
539#else
540		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
541#endif
542	}
543
544	/*
545	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
546	 * are set.
547	 */
548	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
549		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
550	if (IS_2300(isp)) {	/* per QLogic errata */
551		cmd &= ~PCIM_CMD_INVEN;
552	}
553	pci_write_config(dev, PCIR_COMMAND, cmd, 1);
554
555	/*
556	 * Make sure the Cache Line Size register is set sensibly.
557	 */
558	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
559	if (data != linesz) {
560		data = PCI_DFLT_LNSZ;
561		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
562		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
563	}
564
565	/*
566	 * Make sure the Latency Timer is sane.
567	 */
568	data = pci_read_config(dev, PCIR_LATTIMER, 1);
569	if (data < PCI_DFLT_LTNCY) {
570		data = PCI_DFLT_LTNCY;
571		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
572		pci_write_config(dev, PCIR_LATTIMER, data, 1);
573	}
574
575	/*
576	 * Make sure we've disabled the ROM.
577	 */
578	data = pci_read_config(dev, PCIR_ROMADDR, 4);
579	data &= ~1;
580	pci_write_config(dev, PCIR_ROMADDR, data, 4);
581
582	iqd = 0;
583	irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
584	    1, RF_ACTIVE | RF_SHAREABLE);
585	if (irq == NULL) {
586		device_printf(dev, "could not allocate interrupt\n");
587		goto bad;
588	}
589
590	tval = 0;
591        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
592            "fwload_disable", &tval) == 0 && tval != 0) {
593		isp->isp_confopts |= ISP_CFG_NORELOAD;
594	}
595	tval = 0;
596        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
597            "ignore_nvram", &tval) == 0 && tval != 0) {
598		isp->isp_confopts |= ISP_CFG_NONVRAM;
599	}
600	tval = 0;
601        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
602            "fullduplex", &tval) == 0 && tval != 0) {
603		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
604	}
605#ifdef	ISP_FW_CRASH_DUMP
606	tval = 0;
607        if (resource_int_value(device_get_name(dev), device_get_unit(dev),
608            "fw_dump_enable", &tval) == 0 && tval != 0) {
609		size_t amt = 0;
610		if (IS_2200(isp)) {
611			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
612		} else if (IS_23XX(isp)) {
613			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
614		}
615		if (amt) {
616			FCPARAM(isp)->isp_dump_data =
617			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
618		} else {
619			device_printf(dev,
620			    "f/w crash dumps not supported for this model\n");
621		}
622	}
623#endif
624
625	sptr = 0;
626        if (resource_string_value(device_get_name(dev), device_get_unit(dev),
627            "topology", (const char **) &sptr) == 0 && sptr != 0) {
628		if (strcmp(sptr, "lport") == 0) {
629			isp->isp_confopts |= ISP_CFG_LPORT;
630		} else if (strcmp(sptr, "nport") == 0) {
631			isp->isp_confopts |= ISP_CFG_NPORT;
632		} else if (strcmp(sptr, "lport-only") == 0) {
633			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
634		} else if (strcmp(sptr, "nport-only") == 0) {
635			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
636		}
637	}
638
639	/*
640	 * Because the resource_*_value functions can neither return
641	 * 64 bit integer values, nor can they be directly coerced
642	 * to interpret the right hand side of the assignment as
643	 * you want them to interpret it, we have to force WWN
644	 * hint replacement to specify WWN strings with a leading
645	 * 'w' (e..g w50000000aaaa0001). Sigh.
646	 */
647	sptr = 0;
648	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
649            "portwwn", (const char **) &sptr);
650	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
651		char *eptr = 0;
652		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
653		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
654			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
655			isp->isp_osinfo.default_port_wwn = 0;
656		} else {
657			isp->isp_confopts |= ISP_CFG_OWNWWPN;
658		}
659	}
660	if (isp->isp_osinfo.default_port_wwn == 0) {
661		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
662	}
663
664	sptr = 0;
665	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
666            "nodewwn", (const char **) &sptr);
667	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
668		char *eptr = 0;
669		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
670		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
671			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
672			isp->isp_osinfo.default_node_wwn = 0;
673		} else {
674			isp->isp_confopts |= ISP_CFG_OWNWWNN;
675		}
676	}
677	if (isp->isp_osinfo.default_node_wwn == 0) {
678		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
679	}
680
681	isp_debug = 0;
682        (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
683            "debug", &isp_debug);
684
685	/* Make sure the lock is set up. */
686	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
687	locksetup++;
688
689#ifdef	ISP_SMPLOCK
690#define	INTR_FLAGS	INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY
691#else
692#define	INTR_FLAGS	INTR_TYPE_CAM | INTR_ENTROPY
693#endif
694	if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) {
695		device_printf(dev, "could not setup interrupt\n");
696		goto bad;
697	}
698
699	/*
700	 * Set up logging levels.
701	 */
702	if (isp_debug) {
703		isp->isp_dblev = isp_debug;
704	} else {
705		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
706	}
707	if (bootverbose)
708		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
709
710	/*
711	 * Last minute checks...
712	 */
713	if (IS_2312(isp)) {
714		isp->isp_port = pci_get_function(dev);
715	}
716
717	/*
718	 * Make sure we're in reset state.
719	 */
720	ISP_LOCK(isp);
721	isp_reset(isp);
722	if (isp->isp_state != ISP_RESETSTATE) {
723		ISP_UNLOCK(isp);
724		goto bad;
725	}
726	isp_init(isp);
727	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
728		isp_uninit(isp);
729		ISP_UNLOCK(isp);
730		goto bad;
731	}
732	isp_attach(isp);
733	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
734		isp_uninit(isp);
735		ISP_UNLOCK(isp);
736		goto bad;
737	}
738	/*
739	 * XXXX: Here is where we might unload the f/w module
740	 * XXXX: (or decrease the reference count to it).
741	 */
742	ISP_UNLOCK(isp);
743	return (0);
744
745bad:
746
747	if (pcs && pcs->ih) {
748		(void) bus_teardown_intr(dev, irq, pcs->ih);
749	}
750
751	if (locksetup && isp) {
752		mtx_destroy(&isp->isp_osinfo.lock);
753	}
754
755	if (irq) {
756		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
757	}
758
759
760	if (regs) {
761		(void) bus_release_resource(dev, rtp, rgd, regs);
762	}
763
764	if (pcs) {
765		if (pcs->pci_isp.isp_param)
766			free(pcs->pci_isp.isp_param, M_DEVBUF);
767		free(pcs, M_DEVBUF);
768	}
769
770	/*
771	 * XXXX: Here is where we might unload the f/w module
772	 * XXXX: (or decrease the reference count to it).
773	 */
774	return (ENXIO);
775}
776
777static void
778isp_pci_intr(void *arg)
779{
780	struct ispsoftc *isp = arg;
781	u_int16_t isr, sema, mbox;
782
783	ISP_LOCK(isp);
784	isp->isp_intcnt++;
785	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
786		isp->isp_intbogus++;
787	} else {
788		int iok = isp->isp_osinfo.intsok;
789		isp->isp_osinfo.intsok = 0;
790		isp_intr(isp, isr, sema, mbox);
791		isp->isp_osinfo.intsok = iok;
792	}
793	ISP_UNLOCK(isp);
794}
795
796
797#define	IspVirt2Off(a, x)	\
798	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
799	_BLK_REG_SHFT] + ((x) & 0xff))
800
801#define	BXR2(pcs, off)		\
802	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
803#define	BXW2(pcs, off, v)	\
804	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
805
806
807static INLINE int
808isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
809{
810	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
811	u_int16_t val0, val1;
812	int i = 0;
813
814	do {
815		val0 = BXR2(pcs, IspVirt2Off(isp, off));
816		val1 = BXR2(pcs, IspVirt2Off(isp, off));
817	} while (val0 != val1 && ++i < 1000);
818	if (val0 != val1) {
819		return (1);
820	}
821	*rp = val0;
822	return (0);
823}
824
825static int
826isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
827    u_int16_t *semap, u_int16_t *mbp)
828{
829	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
830	u_int16_t isr, sema;
831
832	if (IS_2100(isp)) {
833		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
834		    return (0);
835		}
836		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
837		    return (0);
838		}
839	} else {
840		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
841		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
842	}
843	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
844	isr &= INT_PENDING_MASK(isp);
845	sema &= BIU_SEMA_LOCK;
846	if (isr == 0 && sema == 0) {
847		return (0);
848	}
849	*isrp = isr;
850	if ((*semap = sema) != 0) {
851		if (IS_2100(isp)) {
852			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
853				return (0);
854			}
855		} else {
856			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
857		}
858	}
859	return (1);
860}
861
862static int
863isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
864    u_int16_t *semap, u_int16_t *mbox0p)
865{
866	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
867	u_int32_t r2hisr;
868
869	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
870		*isrp = 0;
871		return (0);
872	}
873	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
874	    IspVirt2Off(pcs, BIU_R2HSTSLO));
875	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
876	if ((r2hisr & BIU_R2HST_INTR) == 0) {
877		*isrp = 0;
878		return (0);
879	}
880	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
881	case ISPR2HST_ROM_MBX_OK:
882	case ISPR2HST_ROM_MBX_FAIL:
883	case ISPR2HST_MBX_OK:
884	case ISPR2HST_MBX_FAIL:
885	case ISPR2HST_ASYNC_EVENT:
886	case ISPR2HST_RIO_16:
887	case ISPR2HST_FPOST:
888	case ISPR2HST_FPOST_CTIO:
889		*isrp = r2hisr & 0xffff;
890		*mbox0p = (r2hisr >> 16);
891		*semap = 1;
892		return (1);
893	case ISPR2HST_RSPQ_UPDATE:
894		*isrp = r2hisr & 0xffff;
895		*mbox0p = 0;
896		*semap = 0;
897		return (1);
898	default:
899		return (0);
900	}
901}
902
903static u_int16_t
904isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
905{
906	u_int16_t rv;
907	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
908	int oldconf = 0;
909
910	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
911		/*
912		 * We will assume that someone has paused the RISC processor.
913		 */
914		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
915		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
916		    oldconf | BIU_PCI_CONF1_SXP);
917	}
918	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
919	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
920		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
921	}
922	return (rv);
923}
924
925static void
926isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
927{
928	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
929	int oldconf = 0;
930
931	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
932		/*
933		 * We will assume that someone has paused the RISC processor.
934		 */
935		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
936		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
937		    oldconf | BIU_PCI_CONF1_SXP);
938	}
939	BXW2(pcs, IspVirt2Off(isp, regoff), val);
940	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
941		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
942	}
943}
944
945static u_int16_t
946isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
947{
948	u_int16_t rv, oc = 0;
949	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
950
951	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
952	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
953		u_int16_t tc;
954		/*
955		 * We will assume that someone has paused the RISC processor.
956		 */
957		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
958		tc = oc & ~BIU_PCI1080_CONF1_DMA;
959		if (regoff & SXP_BANK1_SELECT)
960			tc |= BIU_PCI1080_CONF1_SXP1;
961		else
962			tc |= BIU_PCI1080_CONF1_SXP0;
963		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
964	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
965		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
966		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
967		    oc | BIU_PCI1080_CONF1_DMA);
968	}
969	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
970	if (oc) {
971		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
972	}
973	return (rv);
974}
975
976static void
977isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
978{
979	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
980	int oc = 0;
981
982	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
983	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
984		u_int16_t tc;
985		/*
986		 * We will assume that someone has paused the RISC processor.
987		 */
988		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
989		tc = oc & ~BIU_PCI1080_CONF1_DMA;
990		if (regoff & SXP_BANK1_SELECT)
991			tc |= BIU_PCI1080_CONF1_SXP1;
992		else
993			tc |= BIU_PCI1080_CONF1_SXP0;
994		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
995	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
996		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
997		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
998		    oc | BIU_PCI1080_CONF1_DMA);
999	}
1000	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1001	if (oc) {
1002		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1003	}
1004}
1005
1006
1007struct imush {
1008	struct ispsoftc *isp;
1009	int error;
1010};
1011
1012static void imc(void *, bus_dma_segment_t *, int, int);
1013
1014static void
1015imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1016{
1017	struct imush *imushp = (struct imush *) arg;
1018	if (error) {
1019		imushp->error = error;
1020	} else {
1021		struct ispsoftc *isp =imushp->isp;
1022		bus_addr_t addr = segs->ds_addr;
1023
1024		isp->isp_rquest_dma = addr;
1025		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1026		isp->isp_result_dma = addr;
1027		if (IS_FC(isp)) {
1028			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1029			FCPARAM(isp)->isp_scdma = addr;
1030		}
1031	}
1032}
1033
1034#define ISP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
1035
1036static int
1037isp_pci_mbxdma(struct ispsoftc *isp)
1038{
1039	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1040	caddr_t base;
1041	u_int32_t len;
1042	int i, error, ns;
1043	bus_size_t bl;
1044	struct imush im;
1045
1046	/*
1047	 * Already been here? If so, leave...
1048	 */
1049	if (isp->isp_rquest) {
1050		return (0);
1051	}
1052
1053	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1054		bl = BUS_SPACE_UNRESTRICTED;
1055	} else {
1056		bl = BUS_SPACE_MAXADDR_24BIT;
1057	}
1058
1059	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR,
1060	    BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
1061	    ISP_NSEGS, bl, 0, &pcs->dmat)) {
1062		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1063		return(1);
1064	}
1065
1066
1067	len = sizeof (XS_T **) * isp->isp_maxcmds;
1068	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1069	if (isp->isp_xflist == NULL) {
1070		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1071		return (1);
1072	}
1073	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1074	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1075	if (pcs->dmaps == NULL) {
1076		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1077		free(isp->isp_xflist, M_DEVBUF);
1078		return (1);
1079	}
1080
1081	/*
1082	 * Allocate and map the request, result queues, plus FC scratch area.
1083	 */
1084	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1085	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1086	if (IS_FC(isp)) {
1087		len += ISP2100_SCRLEN;
1088	}
1089
1090	ns = (len / PAGE_SIZE) + 1;
1091	if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, 0, BUS_SPACE_MAXADDR,
1092	    BUS_SPACE_MAXADDR, NULL, NULL, len, ns, bl, 0, &isp->isp_cdmat)) {
1093		isp_prt(isp, ISP_LOGERR,
1094		    "cannot create a dma tag for control spaces");
1095		free(pcs->dmaps, M_DEVBUF);
1096		free(isp->isp_xflist, M_DEVBUF);
1097		return (1);
1098	}
1099
1100	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1101	    &isp->isp_cdmap) != 0) {
1102		isp_prt(isp, ISP_LOGERR,
1103		    "cannot allocate %d bytes of CCB memory", len);
1104		bus_dma_tag_destroy(isp->isp_cdmat);
1105		free(isp->isp_xflist, M_DEVBUF);
1106		free(pcs->dmaps, M_DEVBUF);
1107		return (1);
1108	}
1109
1110	for (i = 0; i < isp->isp_maxcmds; i++) {
1111		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1112		if (error) {
1113			isp_prt(isp, ISP_LOGERR,
1114			    "error %d creating per-cmd DMA maps", error);
1115			while (--i >= 0) {
1116				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1117			}
1118			goto bad;
1119		}
1120	}
1121
1122	im.isp = isp;
1123	im.error = 0;
1124	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1125	if (im.error) {
1126		isp_prt(isp, ISP_LOGERR,
1127		    "error %d loading dma map for control areas", im.error);
1128		goto bad;
1129	}
1130
1131	isp->isp_rquest = base;
1132	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1133	isp->isp_result = base;
1134	if (IS_FC(isp)) {
1135		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1136		FCPARAM(isp)->isp_scratch = base;
1137	}
1138	return (0);
1139
1140bad:
1141	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1142	bus_dma_tag_destroy(isp->isp_cdmat);
1143	free(isp->isp_xflist, M_DEVBUF);
1144	free(pcs->dmaps, M_DEVBUF);
1145	isp->isp_rquest = NULL;
1146	return (1);
1147}
1148
1149typedef struct {
1150	struct ispsoftc *isp;
1151	void *cmd_token;
1152	void *rq;
1153	u_int16_t *nxtip;
1154	u_int16_t optr;
1155	u_int error;
1156} mush_t;
1157
1158#define	MUSHERR_NOQENTRIES	-2
1159
1160#ifdef	ISP_TARGET_MODE
1161/*
1162 * We need to handle DMA for target mode differently from initiator mode.
1163 *
1164 * DMA mapping and construction and submission of CTIO Request Entries
1165 * and rendevous for completion are very tightly coupled because we start
1166 * out by knowing (per platform) how much data we have to move, but we
1167 * don't know, up front, how many DMA mapping segments will have to be used
1168 * cover that data, so we don't know how many CTIO Request Entries we
1169 * will end up using. Further, for performance reasons we may want to
1170 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1171 *
1172 * The standard vector still goes through isp_pci_dmasetup, but the callback
1173 * for the DMA mapping routines comes here instead with the whole transfer
1174 * mapped and a pointer to a partially filled in already allocated request
1175 * queue entry. We finish the job.
1176 */
1177static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1178static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1179
1180#define	STATUS_WITH_DATA	1
1181
1182static void
1183tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1184{
1185	mush_t *mp;
1186	struct ccb_scsiio *csio;
1187	struct ispsoftc *isp;
1188	struct isp_pcisoftc *pcs;
1189	bus_dmamap_t *dp;
1190	ct_entry_t *cto, *qe;
1191	u_int8_t scsi_status;
1192	u_int16_t curi, nxti, handle;
1193	u_int32_t sflags;
1194	int32_t resid;
1195	int nth_ctio, nctios, send_status;
1196
1197	mp = (mush_t *) arg;
1198	if (error) {
1199		mp->error = error;
1200		return;
1201	}
1202
1203	isp = mp->isp;
1204	csio = mp->cmd_token;
1205	cto = mp->rq;
1206	curi = isp->isp_reqidx;
1207	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1208
1209	cto->ct_xfrlen = 0;
1210	cto->ct_seg_count = 0;
1211	cto->ct_header.rqs_entry_count = 1;
1212	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1213
1214	if (nseg == 0) {
1215		cto->ct_header.rqs_seqno = 1;
1216		isp_prt(isp, ISP_LOGTDEBUG1,
1217		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1218		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1219		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1220		    cto->ct_scsi_status, cto->ct_resid);
1221		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1222		isp_put_ctio(isp, cto, qe);
1223		return;
1224	}
1225
1226	nctios = nseg / ISP_RQDSEG;
1227	if (nseg % ISP_RQDSEG) {
1228		nctios++;
1229	}
1230
1231	/*
1232	 * Save syshandle, and potentially any SCSI status, which we'll
1233	 * reinsert on the last CTIO we're going to send.
1234	 */
1235
1236	handle = cto->ct_syshandle;
1237	cto->ct_syshandle = 0;
1238	cto->ct_header.rqs_seqno = 0;
1239	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1240
1241	if (send_status) {
1242		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1243		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1244		/*
1245		 * Preserve residual.
1246		 */
1247		resid = cto->ct_resid;
1248
1249		/*
1250		 * Save actual SCSI status.
1251		 */
1252		scsi_status = cto->ct_scsi_status;
1253
1254#ifndef	STATUS_WITH_DATA
1255		sflags |= CT_NO_DATA;
1256		/*
1257		 * We can't do a status at the same time as a data CTIO, so
1258		 * we need to synthesize an extra CTIO at this level.
1259		 */
1260		nctios++;
1261#endif
1262	} else {
1263		sflags = scsi_status = resid = 0;
1264	}
1265
1266	cto->ct_resid = 0;
1267	cto->ct_scsi_status = 0;
1268
1269	pcs = (struct isp_pcisoftc *)isp;
1270	dp = &pcs->dmaps[isp_handle_index(handle)];
1271	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1272		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1273	} else {
1274		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1275	}
1276
1277	nxti = *mp->nxtip;
1278
1279	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1280		int seglim;
1281
1282		seglim = nseg;
1283		if (seglim) {
1284			int seg;
1285
1286			if (seglim > ISP_RQDSEG)
1287				seglim = ISP_RQDSEG;
1288
1289			for (seg = 0; seg < seglim; seg++, nseg--) {
1290				/*
1291				 * Unlike normal initiator commands, we don't
1292				 * do any swizzling here.
1293				 */
1294				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1295				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1296				cto->ct_xfrlen += dm_segs->ds_len;
1297				dm_segs++;
1298			}
1299			cto->ct_seg_count = seg;
1300		} else {
1301			/*
1302			 * This case should only happen when we're sending an
1303			 * extra CTIO with final status.
1304			 */
1305			if (send_status == 0) {
1306				isp_prt(isp, ISP_LOGWARN,
1307				    "tdma_mk ran out of segments");
1308				mp->error = EINVAL;
1309				return;
1310			}
1311		}
1312
1313		/*
1314		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1315		 * ct_tagtype, and ct_timeout have been carried over
1316		 * unchanged from what our caller had set.
1317		 *
1318		 * The dataseg fields and the seg_count fields we just got
1319		 * through setting. The data direction we've preserved all
1320		 * along and only clear it if we're now sending status.
1321		 */
1322
1323		if (nth_ctio == nctios - 1) {
1324			/*
1325			 * We're the last in a sequence of CTIOs, so mark
1326			 * this CTIO and save the handle to the CCB such that
1327			 * when this CTIO completes we can free dma resources
1328			 * and do whatever else we need to do to finish the
1329			 * rest of the command. We *don't* give this to the
1330			 * firmware to work on- the caller will do that.
1331			 */
1332
1333			cto->ct_syshandle = handle;
1334			cto->ct_header.rqs_seqno = 1;
1335
1336			if (send_status) {
1337				cto->ct_scsi_status = scsi_status;
1338				cto->ct_flags |= sflags;
1339				cto->ct_resid = resid;
1340			}
1341			if (send_status) {
1342				isp_prt(isp, ISP_LOGTDEBUG1,
1343				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1344				    "scsi status %x resid %d",
1345				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1346				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1347				    cto->ct_scsi_status, cto->ct_resid);
1348			} else {
1349				isp_prt(isp, ISP_LOGTDEBUG1,
1350				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1351				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1352				    cto->ct_iid, cto->ct_tag_val,
1353				    cto->ct_flags);
1354			}
1355			isp_put_ctio(isp, cto, qe);
1356			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1357			if (nctios > 1) {
1358				MEMORYBARRIER(isp, SYNC_REQUEST,
1359				    curi, QENTRY_LEN);
1360			}
1361		} else {
1362			ct_entry_t *oqe = qe;
1363
1364			/*
1365			 * Make sure syshandle fields are clean
1366			 */
1367			cto->ct_syshandle = 0;
1368			cto->ct_header.rqs_seqno = 0;
1369
1370			isp_prt(isp, ISP_LOGTDEBUG1,
1371			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1372			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1373			    cto->ct_iid, cto->ct_flags);
1374
1375			/*
1376			 * Get a new CTIO
1377			 */
1378			qe = (ct_entry_t *)
1379			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1380			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1381			if (nxti == mp->optr) {
1382				isp_prt(isp, ISP_LOGTDEBUG0,
1383				    "Queue Overflow in tdma_mk");
1384				mp->error = MUSHERR_NOQENTRIES;
1385				return;
1386			}
1387
1388			/*
1389			 * Now that we're done with the old CTIO,
1390			 * flush it out to the request queue.
1391			 */
1392			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1393			isp_put_ctio(isp, cto, oqe);
1394			if (nth_ctio != 0) {
1395				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1396				    QENTRY_LEN);
1397			}
1398			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1399
1400			/*
1401			 * Reset some fields in the CTIO so we can reuse
1402			 * for the next one we'll flush to the request
1403			 * queue.
1404			 */
1405			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1406			cto->ct_header.rqs_entry_count = 1;
1407			cto->ct_header.rqs_flags = 0;
1408			cto->ct_status = 0;
1409			cto->ct_scsi_status = 0;
1410			cto->ct_xfrlen = 0;
1411			cto->ct_resid = 0;
1412			cto->ct_seg_count = 0;
1413			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1414		}
1415	}
1416	*mp->nxtip = nxti;
1417}
1418
1419static void
1420tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1421{
1422	mush_t *mp;
1423	u_int8_t sense[QLTM_SENSELEN];
1424	struct ccb_scsiio *csio;
1425	struct ispsoftc *isp;
1426	struct isp_pcisoftc *pcs;
1427	bus_dmamap_t *dp;
1428	ct2_entry_t *cto, *qe;
1429	u_int16_t scsi_status, send_status, send_sense, handle;
1430	u_int16_t curi, nxti;
1431	int32_t resid;
1432	int nth_ctio, nctios;
1433
1434	mp = (mush_t *) arg;
1435	if (error) {
1436		mp->error = error;
1437		return;
1438	}
1439
1440	isp = mp->isp;
1441	csio = mp->cmd_token;
1442	cto = mp->rq;
1443	curi = isp->isp_reqidx;
1444	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1445
1446	if (nseg == 0) {
1447		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1448			isp_prt(isp, ISP_LOGWARN,
1449			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1450			    "set (0x%x)", cto->ct_flags);
1451			mp->error = EINVAL;
1452			return;
1453		}
1454	 	cto->ct_header.rqs_entry_count = 1;
1455		cto->ct_header.rqs_seqno = 1;
1456		/* ct_syshandle contains the handle set by caller */
1457		/*
1458		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1459		 * flags to NO DATA and clear relative offset flags.
1460		 * We preserve the ct_resid and the response area.
1461		 */
1462		cto->ct_flags |= CT2_NO_DATA;
1463		if (cto->ct_resid > 0)
1464			cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER;
1465		else if (cto->ct_resid < 0)
1466			cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER;
1467		cto->ct_seg_count = 0;
1468		cto->ct_reloff = 0;
1469		isp_prt(isp, ISP_LOGTDEBUG1,
1470		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1471		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1472		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1473		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1474		isp_put_ctio2(isp, cto, qe);
1475		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1476		return;
1477	}
1478
1479	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1480		isp_prt(isp, ISP_LOGWARN,
1481		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1482		    "(0x%x)", cto->ct_flags);
1483		mp->error = EINVAL;
1484		return;
1485	}
1486
1487
1488	nctios = nseg / ISP_RQDSEG_T2;
1489	if (nseg % ISP_RQDSEG_T2) {
1490		nctios++;
1491	}
1492
1493	/*
1494	 * Save the handle, status, reloff, and residual. We'll reinsert the
1495	 * handle into the last CTIO2 we're going to send, and reinsert status
1496	 * and residual (and possibly sense data) if that's to be sent as well.
1497	 *
1498	 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1499	 * the first one. This is needed so that the FCP DATA IUs being sent
1500	 * out have the correct offset (they can arrive at the other end out
1501	 * of order).
1502	 */
1503
1504	handle = cto->ct_syshandle;
1505	cto->ct_syshandle = 0;
1506	send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0;
1507
1508	if (send_status) {
1509		cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR);
1510
1511		/*
1512		 * Preserve residual.
1513		 */
1514		resid = cto->ct_resid;
1515
1516		/*
1517		 * Save actual SCSI status. We'll reinsert the
1518		 * CT2_SNSLEN_VALID later if appropriate.
1519		 */
1520		scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1521		send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1522
1523		/*
1524		 * If we're sending status and have a CHECK CONDTION and
1525		 * have sense data,  we send one more CTIO2 with just the
1526		 * status and sense data. The upper layers have stashed
1527		 * the sense data in the dataseg structure for us.
1528		 */
1529
1530		if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1531		    send_sense) {
1532			bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1533			nctios++;
1534		}
1535	} else {
1536		scsi_status = send_sense = resid = 0;
1537	}
1538
1539	cto->ct_resid = 0;
1540	cto->rsp.m0.ct_scsi_status = 0;
1541	MEMZERO(&cto->rsp, sizeof (cto->rsp));
1542
1543	pcs = (struct isp_pcisoftc *)isp;
1544	dp = &pcs->dmaps[isp_handle_index(handle)];
1545	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1546		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1547	} else {
1548		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1549	}
1550
1551	nxti = *mp->nxtip;
1552
1553	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1554		u_int32_t oxfrlen;
1555		int seglim;
1556
1557		seglim = nseg;
1558		if (seglim) {
1559			int seg;
1560			if (seglim > ISP_RQDSEG_T2)
1561				seglim = ISP_RQDSEG_T2;
1562			for (seg = 0; seg < seglim; seg++) {
1563				cto->rsp.m0.ct_dataseg[seg].ds_base =
1564				    dm_segs->ds_addr;
1565				cto->rsp.m0.ct_dataseg[seg].ds_count =
1566				    dm_segs->ds_len;
1567				cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1568				dm_segs++;
1569			}
1570			cto->ct_seg_count = seg;
1571			oxfrlen = cto->rsp.m0.ct_xfrlen;
1572		} else {
1573			/*
1574			 * This case should only happen when we're sending a
1575			 * synthesized MODE1 final status with sense data.
1576			 */
1577			if (send_sense == 0) {
1578				isp_prt(isp, ISP_LOGWARN,
1579				    "dma2_tgt_fc ran out of segments, "
1580				    "no SENSE DATA");
1581				mp->error = EINVAL;
1582				return;
1583			}
1584			oxfrlen = 0;
1585		}
1586
1587
1588		/*
1589		 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1590		 * ct_timeout have been carried over unchanged from what
1591		 * our caller had set.
1592		 *
1593		 * The field ct_reloff is either what the caller set, or
1594		 * what we've added to below.
1595		 *
1596		 * The dataseg fields and the seg_count fields we just got
1597		 * through setting. The data direction we've preserved all
1598		 * along and only clear it if we're sending a MODE1 status
1599		 * as the last CTIO.
1600		 *
1601		 */
1602
1603		if (nth_ctio == nctios - 1) {
1604			/*
1605			 * We're the last in a sequence of CTIO2s, so mark this
1606			 * CTIO2 and save the handle to the CCB such that when
1607			 * this CTIO2 completes we can free dma resources and
1608			 * do whatever else we need to do to finish the rest
1609			 * of the command.
1610			 */
1611
1612			cto->ct_syshandle = handle;
1613			cto->ct_header.rqs_seqno = 1;
1614
1615			if (send_status) {
1616				/*
1617				 * Get 'real' residual and set flags based
1618				 * on it.
1619				 */
1620				cto->ct_resid = resid;
1621				if (send_sense) {
1622					MEMCPY(cto->rsp.m1.ct_resp, sense,
1623					    QLTM_SENSELEN);
1624					cto->rsp.m1.ct_senselen =
1625					    QLTM_SENSELEN;
1626					scsi_status |= CT2_SNSLEN_VALID;
1627					cto->rsp.m1.ct_scsi_status =
1628					    scsi_status;
1629					cto->ct_flags &= CT2_FLAG_MMASK;
1630					cto->ct_flags |= CT2_FLAG_MODE1 |
1631					    CT2_NO_DATA | CT2_SENDSTATUS |
1632					    CT2_CCINCR;
1633					if (cto->ct_resid > 0)
1634						cto->rsp.m1.ct_scsi_status |=
1635						    CT2_DATA_UNDER;
1636					else if (cto->ct_resid < 0)
1637						cto->rsp.m1.ct_scsi_status |=
1638						    CT2_DATA_OVER;
1639				} else {
1640					cto->rsp.m0.ct_scsi_status =
1641					    scsi_status;
1642					cto->ct_flags |=
1643					    CT2_SENDSTATUS | CT2_CCINCR;
1644					if (cto->ct_resid > 0)
1645						cto->rsp.m0.ct_scsi_status |=
1646						    CT2_DATA_UNDER;
1647					else if (cto->ct_resid < 0)
1648						cto->rsp.m0.ct_scsi_status |=
1649						    CT2_DATA_OVER;
1650				}
1651			}
1652			isp_prt(isp, ISP_LOGTDEBUG1,
1653			    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x"
1654			    " ssts 0x%x res %d", cto->ct_rxid,
1655			    csio->ccb_h.target_lun, (int) cto->ct_iid,
1656			    cto->ct_flags, cto->ct_status,
1657			    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1658			isp_put_ctio2(isp, cto, qe);
1659			ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1660			if (nctios > 1) {
1661				MEMORYBARRIER(isp, SYNC_REQUEST,
1662				    curi, QENTRY_LEN);
1663			}
1664		} else {
1665			ct2_entry_t *oqe = qe;
1666
1667			/*
1668			 * Make sure handle fields are clean
1669			 */
1670			cto->ct_syshandle = 0;
1671			cto->ct_header.rqs_seqno = 0;
1672			isp_prt(isp, ISP_LOGTDEBUG1,
1673			    "CTIO2[%x] lun %d->iid%d flgs 0x%x",
1674			    cto->ct_rxid, csio->ccb_h.target_lun,
1675			    (int) cto->ct_iid, cto->ct_flags);
1676			/*
1677			 * Get a new CTIO2 entry from the request queue.
1678			 */
1679			qe = (ct2_entry_t *)
1680			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1681			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1682			if (nxti == mp->optr) {
1683				isp_prt(isp, ISP_LOGWARN,
1684				    "Queue Overflow in dma2_tgt_fc");
1685				mp->error = MUSHERR_NOQENTRIES;
1686				return;
1687			}
1688
1689			/*
1690			 * Now that we're done with the old CTIO2,
1691			 * flush it out to the request queue.
1692			 */
1693			ISP_TDQE(isp, "tdma_mkfc", curi, cto);
1694			isp_put_ctio2(isp, cto, oqe);
1695			if (nth_ctio != 0) {
1696				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1697				    QENTRY_LEN);
1698			}
1699			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1700
1701			/*
1702			 * Reset some fields in the CTIO2 so we can reuse
1703			 * for the next one we'll flush to the request
1704			 * queue.
1705			 */
1706			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1707			cto->ct_header.rqs_entry_count = 1;
1708			cto->ct_header.rqs_flags = 0;
1709			cto->ct_status = 0;
1710			cto->ct_resid = 0;
1711			cto->ct_seg_count = 0;
1712			/*
1713			 * Adjust the new relative offset by the amount which
1714			 * is recorded in the data segment of the old CTIO2 we
1715			 * just finished filling out.
1716			 */
1717			cto->ct_reloff += oxfrlen;
1718			MEMZERO(&cto->rsp, sizeof (cto->rsp));
1719		}
1720	}
1721	*mp->nxtip = nxti;
1722}
1723#endif
1724
1725static void dma2(void *, bus_dma_segment_t *, int, int);
1726
1727static void
1728dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1729{
1730	mush_t *mp;
1731	struct ispsoftc *isp;
1732	struct ccb_scsiio *csio;
1733	struct isp_pcisoftc *pcs;
1734	bus_dmamap_t *dp;
1735	bus_dma_segment_t *eseg;
1736	ispreq_t *rq;
1737	int seglim, datalen;
1738	u_int16_t nxti;
1739
1740	mp = (mush_t *) arg;
1741	if (error) {
1742		mp->error = error;
1743		return;
1744	}
1745
1746	if (nseg < 1) {
1747		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1748		mp->error = EFAULT;
1749		return;
1750	}
1751	csio = mp->cmd_token;
1752	isp = mp->isp;
1753	rq = mp->rq;
1754	pcs = (struct isp_pcisoftc *)mp->isp;
1755	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1756	nxti = *mp->nxtip;
1757
1758	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1759		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1760	} else {
1761		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1762	}
1763
1764	datalen = XS_XFRLEN(csio);
1765
1766	/*
1767	 * We're passed an initial partially filled in entry that
1768	 * has most fields filled in except for data transfer
1769	 * related values.
1770	 *
1771	 * Our job is to fill in the initial request queue entry and
1772	 * then to start allocating and filling in continuation entries
1773	 * until we've covered the entire transfer.
1774	 */
1775
1776	if (IS_FC(isp)) {
1777		seglim = ISP_RQDSEG_T2;
1778		((ispreqt2_t *)rq)->req_totalcnt = datalen;
1779		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1780			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1781		} else {
1782			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1783		}
1784	} else {
1785		if (csio->cdb_len > 12) {
1786			seglim = 0;
1787		} else {
1788			seglim = ISP_RQDSEG;
1789		}
1790		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1791			rq->req_flags |= REQFLAG_DATA_IN;
1792		} else {
1793			rq->req_flags |= REQFLAG_DATA_OUT;
1794		}
1795	}
1796
1797	eseg = dm_segs + nseg;
1798
1799	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1800		if (IS_FC(isp)) {
1801			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1802			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1803			    dm_segs->ds_addr;
1804			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1805			    dm_segs->ds_len;
1806		} else {
1807			rq->req_dataseg[rq->req_seg_count].ds_base =
1808				dm_segs->ds_addr;
1809			rq->req_dataseg[rq->req_seg_count].ds_count =
1810				dm_segs->ds_len;
1811		}
1812		datalen -= dm_segs->ds_len;
1813		rq->req_seg_count++;
1814		dm_segs++;
1815	}
1816
1817	while (datalen > 0 && dm_segs != eseg) {
1818		u_int16_t onxti;
1819		ispcontreq_t local, *crq = &local, *cqe;
1820
1821		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1822		onxti = nxti;
1823		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1824		if (nxti == mp->optr) {
1825			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1826			mp->error = MUSHERR_NOQENTRIES;
1827			return;
1828		}
1829		rq->req_header.rqs_entry_count++;
1830		MEMZERO((void *)crq, sizeof (*crq));
1831		crq->req_header.rqs_entry_count = 1;
1832		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1833
1834		seglim = 0;
1835		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1836			crq->req_dataseg[seglim].ds_base =
1837			    dm_segs->ds_addr;
1838			crq->req_dataseg[seglim].ds_count =
1839			    dm_segs->ds_len;
1840			rq->req_seg_count++;
1841			dm_segs++;
1842			seglim++;
1843			datalen -= dm_segs->ds_len;
1844		}
1845		isp_put_cont_req(isp, crq, cqe);
1846		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1847	}
1848	*mp->nxtip = nxti;
1849}
1850
1851static int
1852isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1853	u_int16_t *nxtip, u_int16_t optr)
1854{
1855	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1856	ispreq_t *qep;
1857	bus_dmamap_t *dp = NULL;
1858	mush_t mush, *mp;
1859	void (*eptr)(void *, bus_dma_segment_t *, int, int);
1860
1861	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1862#ifdef	ISP_TARGET_MODE
1863	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1864		if (IS_FC(isp)) {
1865			eptr = tdma_mkfc;
1866		} else {
1867			eptr = tdma_mk;
1868		}
1869		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1870		    (csio->dxfer_len == 0)) {
1871			mp = &mush;
1872			mp->isp = isp;
1873			mp->cmd_token = csio;
1874			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
1875			mp->nxtip = nxtip;
1876			mp->optr = optr;
1877			mp->error = 0;
1878			(*eptr)(mp, NULL, 0, 0);
1879			goto mbxsync;
1880		}
1881	} else
1882#endif
1883	eptr = dma2;
1884
1885
1886	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1887	    (csio->dxfer_len == 0)) {
1888		rq->req_seg_count = 1;
1889		goto mbxsync;
1890	}
1891
1892	/*
1893	 * Do a virtual grapevine step to collect info for
1894	 * the callback dma allocation that we have to use...
1895	 */
1896	mp = &mush;
1897	mp->isp = isp;
1898	mp->cmd_token = csio;
1899	mp->rq = rq;
1900	mp->nxtip = nxtip;
1901	mp->optr = optr;
1902	mp->error = 0;
1903
1904	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1905		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1906			int error, s;
1907			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1908			s = splsoftvm();
1909			error = bus_dmamap_load(pcs->dmat, *dp,
1910			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1911			if (error == EINPROGRESS) {
1912				bus_dmamap_unload(pcs->dmat, *dp);
1913				mp->error = EINVAL;
1914				isp_prt(isp, ISP_LOGERR,
1915				    "deferred dma allocation not supported");
1916			} else if (error && mp->error == 0) {
1917#ifdef	DIAGNOSTIC
1918				isp_prt(isp, ISP_LOGERR,
1919				    "error %d in dma mapping code", error);
1920#endif
1921				mp->error = error;
1922			}
1923			splx(s);
1924		} else {
1925			/* Pointer to physical buffer */
1926			struct bus_dma_segment seg;
1927			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1928			seg.ds_len = csio->dxfer_len;
1929			(*eptr)(mp, &seg, 1, 0);
1930		}
1931	} else {
1932		struct bus_dma_segment *segs;
1933
1934		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1935			isp_prt(isp, ISP_LOGERR,
1936			    "Physical segment pointers unsupported");
1937			mp->error = EINVAL;
1938		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1939			isp_prt(isp, ISP_LOGERR,
1940			    "Virtual segment addresses unsupported");
1941			mp->error = EINVAL;
1942		} else {
1943			/* Just use the segments provided */
1944			segs = (struct bus_dma_segment *) csio->data_ptr;
1945			(*eptr)(mp, segs, csio->sglist_cnt, 0);
1946		}
1947	}
1948	if (mp->error) {
1949		int retval = CMD_COMPLETE;
1950		if (mp->error == MUSHERR_NOQENTRIES) {
1951			retval = CMD_EAGAIN;
1952		} else if (mp->error == EFBIG) {
1953			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1954		} else if (mp->error == EINVAL) {
1955			XS_SETERR(csio, CAM_REQ_INVALID);
1956		} else {
1957			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1958		}
1959		return (retval);
1960	}
1961mbxsync:
1962	switch (rq->req_header.rqs_entry_type) {
1963	case RQSTYPE_REQUEST:
1964		isp_put_request(isp, rq, qep);
1965		break;
1966	case RQSTYPE_CMDONLY:
1967		isp_put_extended_request(isp, (ispextreq_t *)rq,
1968		    (ispextreq_t *)qep);
1969		break;
1970	case RQSTYPE_T2RQS:
1971		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1972		break;
1973	}
1974	return (CMD_QUEUED);
1975}
1976
1977static void
1978isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1979{
1980	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1981	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
1982	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1983		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
1984	} else {
1985		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
1986	}
1987	bus_dmamap_unload(pcs->dmat, *dp);
1988}
1989
1990
1991static void
1992isp_pci_reset1(struct ispsoftc *isp)
1993{
1994	/* Make sure the BIOS is disabled */
1995	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1996	/* and enable interrupts */
1997	ENABLE_INTS(isp);
1998}
1999
2000static void
2001isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
2002{
2003	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2004	if (msg)
2005		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2006	else
2007		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2008	if (IS_SCSI(isp))
2009		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2010	else
2011		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2012	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2013	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2014	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2015
2016
2017	if (IS_SCSI(isp)) {
2018		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2019		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2020			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2021			ISP_READ(isp, CDMA_FIFO_STS));
2022		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2023			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2024			ISP_READ(isp, DDMA_FIFO_STS));
2025		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2026			ISP_READ(isp, SXP_INTERRUPT),
2027			ISP_READ(isp, SXP_GROSS_ERR),
2028			ISP_READ(isp, SXP_PINS_CTRL));
2029		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2030	}
2031	printf("    mbox regs: %x %x %x %x %x\n",
2032	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2033	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2034	    ISP_READ(isp, OUTMAILBOX4));
2035	printf("    PCI Status Command/Status=%x\n",
2036	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2037}
2038