isp_pci.c revision 44820
1/* $Id: isp_pci.c,v 1.15 1999/02/09 01:12:52 mjacob Exp $ */
2/* release_03_16_99 */
3/*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * FreeBSD Version.
6 *
7 *---------------------------------------
8 * Copyright (c) 1997, 1998 by Matthew Jacob
9 * NASA/Ames Research Center
10 * All rights reserved.
11 *---------------------------------------
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice immediately at the beginning of the file, without modification,
18 *    this list of conditions, and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. The name of the author may not be used to endorse or promote products
23 *    derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
29 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37#include <dev/isp/isp_freebsd.h>
38#include <dev/isp/asm_pci.h>
39#include <sys/malloc.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42#include <vm/vm_extern.h>
43
44
45#include <pci/pcireg.h>
46#include <pci/pcivar.h>
47
48#if	__FreeBSD_version >= 300004
49#include <machine/bus_memio.h>
50#include <machine/bus_pio.h>
51#include <machine/bus.h>
52#endif
53
54#include "opt_isp.h"
55
56static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
57static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
58#ifndef ISP_DISABLE_1080_SUPPORT
59static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
60static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
61#endif
62static int isp_pci_mbxdma __P((struct ispsoftc *));
63static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
64	ispreq_t *, u_int8_t *, u_int8_t));
65#if	__FreeBSD_version >= 300004
66static void
67isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
68#else
69#define	isp_pci_dmateardown	NULL
70#endif
71
72static void isp_pci_reset1 __P((struct ispsoftc *));
73static void isp_pci_dumpregs __P((struct ispsoftc *));
74
75#ifndef ISP_DISABLE_1020_SUPPORT
76static struct ispmdvec mdvec = {
77	isp_pci_rd_reg,
78	isp_pci_wr_reg,
79	isp_pci_mbxdma,
80	isp_pci_dmasetup,
81	isp_pci_dmateardown,
82	NULL,
83	isp_pci_reset1,
84	isp_pci_dumpregs,
85	ISP_RISC_CODE,
86	ISP_CODE_LENGTH,
87	ISP_CODE_ORG,
88	ISP_CODE_VERSION,
89	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
90	0
91};
92#endif
93
94#ifndef ISP_DISABLE_1080_SUPPORT
95static struct ispmdvec mdvec_1080 = {
96	isp_pci_rd_reg_1080,
97	isp_pci_wr_reg_1080,
98	isp_pci_mbxdma,
99	isp_pci_dmasetup,
100	isp_pci_dmateardown,
101	NULL,
102	isp_pci_reset1,
103	isp_pci_dumpregs,
104	ISP_RISC_CODE,
105	ISP_CODE_LENGTH,
106	ISP_CODE_ORG,
107	ISP_CODE_VERSION,
108	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
109	0
110};
111#endif
112
113#ifndef ISP_DISABLE_2100_SUPPORT
114static struct ispmdvec mdvec_2100 = {
115	isp_pci_rd_reg,
116	isp_pci_wr_reg,
117	isp_pci_mbxdma,
118	isp_pci_dmasetup,
119	isp_pci_dmateardown,
120	NULL,
121	isp_pci_reset1,
122	isp_pci_dumpregs,
123	ISP2100_RISC_CODE,
124	ISP2100_CODE_LENGTH,
125	ISP2100_CODE_ORG,
126	ISP2100_CODE_VERSION,
127	0,			/* Irrelevant to the 2100 */
128	0
129};
130#endif
131
132#ifndef	SCSI_ISP_PREFER_MEM_MAP
133#ifdef	__alpha__
134#define	SCSI_ISP_PREFER_MEM_MAP	0
135#else
136#define	SCSI_ISP_PREFER_MEM_MAP	1
137#endif
138#endif
139
140#ifndef	PCIM_CMD_INVEN
141#define	PCIM_CMD_INVEN			0x10
142#endif
143#ifndef	PCIM_CMD_BUSMASTEREN
144#define	PCIM_CMD_BUSMASTEREN		0x0004
145#endif
146#ifndef	PCIM_CMD_PERRESPEN
147#define	PCIM_CMD_PERRESPEN		0x0040
148#endif
149#ifndef	PCIM_CMD_SEREN
150#define	PCIM_CMD_SEREN			0x0100
151#endif
152
153#ifndef	PCIR_COMMAND
154#define	PCIR_COMMAND			0x04
155#endif
156
157#ifndef	PCIR_CACHELNSZ
158#define	PCIR_CACHELNSZ			0x0c
159#endif
160
161#ifndef	PCIR_LATTIMER
162#define	PCIR_LATTIMER			0x0d
163#endif
164
165#ifndef	PCI_VENDOR_QLOGIC
166#define	PCI_VENDOR_QLOGIC	0x1077
167#endif
168
169#ifndef	PCI_PRODUCT_QLOGIC_ISP1020
170#define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
171#endif
172
173#ifndef	PCI_PRODUCT_QLOGIC_ISP1080
174#define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
175#endif
176
177#ifndef	PCI_PRODUCT_QLOGIC_ISP1240
178#define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
179#endif
180
181#ifndef	PCI_PRODUCT_QLOGIC_ISP2100
182#define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
183#endif
184
185#define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
186
187#define	PCI_QLOGIC_ISP1080	\
188	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
189
190#define	PCI_QLOGIC_ISP1240	\
191	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
192
193#define	PCI_QLOGIC_ISP2100	\
194	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
195
196#define	IO_MAP_REG	0x10
197#define	MEM_MAP_REG	0x14
198
199#define	PCI_DFLT_LTNCY	0x40
200#define	PCI_DFLT_LNSZ	0x10
201
202static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
203static void isp_pci_attach __P((pcici_t config_d, int unit));
204
205/* This distinguishing define is not right, but it does work */
206
207#if	__FreeBSD_version < 300004
208#define	IO_SPACE_MAPPING	0
209#define	MEM_SPACE_MAPPING	1
210typedef int bus_space_tag_t;
211typedef u_long bus_space_handle_t;
212#ifdef __alpha__
213#define	bus_space_read_2(st, sh, offset)	\
214	alpha_mb(),
215	(st == IO_SPACE_MAPPING)? \
216		inw((pci_port_t)sh + offset) : readw((pci_port_t)sh + offset)
217#define	bus_space_write_2(st, sh, offset, val)	\
218	((st == IO_SPACE_MAPPING)? outw((pci_port_t)sh + offset, val) : \
219                writew((pci_port_t)sh + offset, val)), alpha_mb()
220#else
221#define	bus_space_read_2(st, sh, offset)	\
222	(st == IO_SPACE_MAPPING)? \
223		inw((pci_port_t)sh + offset) : *((u_int16_t *)(uintptr_t)sh)
224#define	bus_space_write_2(st, sh, offset, val)	\
225	if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \
226		*((u_int16_t *)(uintptr_t)sh) = val
227#endif
228#else
229#ifdef __alpha__
230#define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
231#define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
232#else
233#define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
234#define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
235#endif
236#endif
237
238struct isp_pcisoftc {
239	struct ispsoftc			pci_isp;
240        pcici_t				pci_id;
241	bus_space_tag_t			pci_st;
242	bus_space_handle_t		pci_sh;
243	int16_t				pci_poff[_NREG_BLKS];
244#if	__FreeBSD_version >= 300004
245	bus_dma_tag_t			parent_dmat;
246	bus_dma_tag_t			cntrol_dmat;
247	bus_dmamap_t			cntrol_dmap;
248	bus_dmamap_t			dmaps[MAXISPREQUEST];
249#endif
250	union {
251		sdparam	_x;
252		struct {
253			fcparam _a;
254			char _b[ISP2100_SCRLEN];
255		} _y;
256	} _z;
257};
258
259static u_long ispunit;
260
261struct pci_device isp_pci_driver = {
262	"isp",
263	isp_pci_probe,
264	isp_pci_attach,
265	&ispunit,
266	NULL
267};
268DATA_SET (pcidevice_set, isp_pci_driver);
269
270
271static const char *
272isp_pci_probe(pcici_t tag, pcidi_t type)
273{
274	static int oneshot = 1;
275	char *x;
276
277        switch (type) {
278#ifndef	ISP_DISABLE_1020_SUPPORT
279	case PCI_QLOGIC_ISP:
280		x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
281		break;
282#endif
283#ifndef	ISP_DISABLE_1080_SUPPORT
284	case PCI_QLOGIC_ISP1080:
285#if	0
286	case PCI_QLOGIC_ISP1240:	/* 1240 not ready yet */
287		x = "Qlogic ISP 1080/1240 PCI SCSI Adapter";
288		break;
289#endif
290#endif
291#ifndef	ISP_DISABLE_2100_SUPPORT
292	case PCI_QLOGIC_ISP2100:
293		x = "Qlogic ISP 2100 PCI FC-AL Adapter";
294		break;
295#endif
296	default:
297		return (NULL);
298	}
299	if (oneshot) {
300		oneshot = 0;
301		printf("%s Version %d.%d, Core Version %d.%d\n", PVS,
302		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
303		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
304	}
305	return (x);
306}
307
308
309static void
310isp_pci_attach(pcici_t config_id, int unit)
311{
312	int mapped;
313	pci_port_t io_port;
314	u_int32_t data;
315	struct isp_pcisoftc *pcs;
316	struct ispsoftc *isp;
317	vm_offset_t vaddr, paddr;
318	ISP_LOCKVAL_DECL;
319
320
321	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
322	if (pcs == NULL) {
323		printf("isp%d: cannot allocate softc\n", unit);
324		return;
325	}
326	bzero(pcs, sizeof (struct isp_pcisoftc));
327
328	vaddr = paddr = NULL;
329	mapped = 0;
330	/*
331	 * Note that pci_conf_read is a 32 bit word aligned function.
332	 */
333	data = pci_conf_read(config_id, PCIR_COMMAND);
334#if	SCSI_ISP_PREFER_MEM_MAP == 1
335	if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
336		if (pci_map_mem(config_id, MEM_MAP_REG, &vaddr, &paddr)) {
337			pcs->pci_st = MEM_SPACE_MAPPING;
338			pcs->pci_sh = vaddr;
339			mapped++;
340		}
341	}
342	if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
343		if (pci_map_port(config_id, PCI_MAP_REG_START, &io_port)) {
344			pcs->pci_st = IO_SPACE_MAPPING;
345			pcs->pci_sh = io_port;
346			mapped++;
347		}
348	}
349#else
350	if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
351		if (pci_map_port(config_id, PCI_MAP_REG_START, &io_port)) {
352			pcs->pci_st = IO_SPACE_MAPPING;
353			pcs->pci_sh = io_port;
354			mapped++;
355		}
356	}
357	if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
358		if (pci_map_mem(config_id, MEM_MAP_REG, &vaddr, &paddr)) {
359			pcs->pci_st = MEM_SPACE_MAPPING;
360			pcs->pci_sh = vaddr;
361			mapped++;
362		}
363	}
364#endif
365	if (mapped == 0) {
366		printf("isp%d: unable to map any ports!\n", unit);
367		free(pcs, M_DEVBUF);
368		return;
369	}
370	printf("isp%d: using %s space register mapping\n", unit,
371	    pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
372
373	isp = &pcs->pci_isp;
374#if	__FreeBSD_version >= 300006
375	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
376#else
377	(void) sprintf(isp->isp_name, "isp%d", unit);
378#endif
379	isp->isp_osinfo.unit = unit;
380
381	data = pci_conf_read(config_id, PCI_ID_REG);
382	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
383	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
384	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
385	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
386	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
387#ifndef	ISP_DISABLE_1020_SUPPORT
388	if (data == PCI_QLOGIC_ISP) {
389		isp->isp_mdvec = &mdvec;
390		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
391		isp->isp_param = &pcs->_z._x;
392	}
393#endif
394#ifndef	ISP_DISABLE_1080_SUPPORT
395	if (data == PCI_QLOGIC_ISP1080 || data == PCI_QLOGIC_ISP1240) {
396		isp->isp_mdvec = &mdvec_1080;
397		isp->isp_type = ISP_HA_SCSI_1080;
398		isp->isp_param = &pcs->_z._x;
399		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
400		    ISP1080_DMA_REGS_OFF;
401	}
402#endif
403#ifndef	ISP_DISABLE_2100_SUPPORT
404	if (data == PCI_QLOGIC_ISP2100) {
405		isp->isp_mdvec = &mdvec_2100;
406		isp->isp_type = ISP_HA_FC_2100;
407		isp->isp_param = &pcs->_z._y._a;
408		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
409		    PCI_MBOX_REGS2100_OFF;
410	}
411#endif
412
413#if	__FreeBSD_version >= 300004
414	ISP_LOCK(isp);
415	/*
416	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
417	 * are set.
418	 */
419	data = pci_cfgread(config_id, PCIR_COMMAND, 2);
420	data |=	PCIM_CMD_SEREN		|
421		PCIM_CMD_PERRESPEN	|
422		PCIM_CMD_BUSMASTEREN	|
423		PCIM_CMD_INVEN;
424	pci_cfgwrite(config_id, PCIR_COMMAND, 2, data);
425	/*
426	 * Make sure the CACHE Line Size register is set sensibly.
427	 */
428	data = pci_cfgread(config_id, PCIR_CACHELNSZ, 1);
429	if (data != PCI_DFLT_LNSZ) {
430		data = PCI_DFLT_LNSZ;
431		printf("%s: set PCI line size to %d\n", isp->isp_name, data);
432		pci_cfgwrite(config_id, PCIR_CACHELNSZ, data, 1);
433	}
434	/*
435	 * Make sure the Latency Timer is sane.
436	 */
437	data = pci_cfgread(config_id, PCIR_LATTIMER, 1);
438	if (data < PCI_DFLT_LTNCY) {
439		data = PCI_DFLT_LTNCY;
440		printf("%s: set PCI latency to %d\n", isp->isp_name, data);
441		pci_cfgwrite(config_id, PCIR_LATTIMER, data, 1);
442	}
443	ISP_UNLOCK(isp);
444
445	if (bus_dma_tag_create(NULL, 0, 0, BUS_SPACE_MAXADDR_32BIT,
446	    BUS_SPACE_MAXADDR, NULL, NULL, 1<<24,
447	    255, 1<<24, 0, &pcs->parent_dmat) != 0) {
448		printf("%s: could not create master dma tag\n", isp->isp_name);
449		free(pcs, M_DEVBUF);
450		return;
451	}
452
453#endif
454	if (pci_map_int(config_id, (void (*)(void *))isp_intr,
455	    (void *)isp, &IMASK) == 0) {
456		printf("%s: could not map interrupt\n", isp->isp_name);
457		free(pcs, M_DEVBUF);
458		return;
459	}
460
461	pcs->pci_id = config_id;
462#ifdef	SCSI_ISP_NO_FWLOAD_MASK
463	if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
464		isp->isp_confopts |= ISP_CFG_NORELOAD;
465#endif
466#ifdef	SCSI_ISP_NO_NVRAM_MASK
467	if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit)))
468		isp->isp_confopts |= ISP_CFG_NONVRAM;
469#endif
470	ISP_LOCK(isp);
471	isp_reset(isp);
472	if (isp->isp_state != ISP_RESETSTATE) {
473		ISP_UNLOCK(isp);
474		free(pcs, M_DEVBUF);
475		return;
476	}
477	isp_init(isp);
478	if (isp->isp_state != ISP_INITSTATE) {
479		/* If we're a Fibre Channel Card, we allow deferred attach */
480		if (isp->isp_type & ISP_HA_SCSI) {
481			isp_uninit(isp);
482			free(pcs, M_DEVBUF);
483		}
484	}
485	isp_attach(isp);
486	if (isp->isp_state != ISP_RUNSTATE) {
487		/* If we're a Fibre Channel Card, we allow deferred attach */
488		if (isp->isp_type & ISP_HA_SCSI) {
489			isp_uninit(isp);
490			free(pcs, M_DEVBUF);
491		}
492	}
493	ISP_UNLOCK(isp);
494#ifdef __alpha__
495	/*
496	 * THIS SHOULD NOT HAVE TO BE HERE
497	 */
498	alpha_register_pci_scsi(config_id->bus, config_id->slot, isp->isp_sim);
499#endif
500}
501
502static u_int16_t
503isp_pci_rd_reg(isp, regoff)
504	struct ispsoftc *isp;
505	int regoff;
506{
507	u_int16_t rv;
508	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
509	int offset, oldconf = 0;
510
511	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
512		/*
513		 * We will assume that someone has paused the RISC processor.
514		 */
515		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
516		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
517	}
518	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
519	offset += (regoff & 0xff);
520	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
521	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
522		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
523	}
524	return (rv);
525}
526
527static void
528isp_pci_wr_reg(isp, regoff, val)
529	struct ispsoftc *isp;
530	int regoff;
531	u_int16_t val;
532{
533	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
534	int offset, oldconf = 0;
535
536	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
537		/*
538		 * We will assume that someone has paused the RISC processor.
539		 */
540		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
541		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
542	}
543	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
544	offset += (regoff & 0xff);
545	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
546	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
547		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
548	}
549}
550
551#ifndef	ISP_DISABLE_1080_SUPPORT
552static u_int16_t
553isp_pci_rd_reg_1080(isp, regoff)
554	struct ispsoftc *isp;
555	int regoff;
556{
557	u_int16_t rv;
558	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
559	int offset, oc = 0;
560
561	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
562		/*
563		 * We will assume that someone has paused the RISC processor.
564		 */
565		oc = isp_pci_rd_reg(isp, BIU_CONF1);
566		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
567	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
568		oc = isp_pci_rd_reg(isp, BIU_CONF1);
569		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
570	}
571	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
572	offset += (regoff & 0xff);
573	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
574	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
575	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
576		isp_pci_wr_reg(isp, BIU_CONF1, oc);
577	}
578	return (rv);
579}
580
581static void
582isp_pci_wr_reg_1080(isp, regoff, val)
583	struct ispsoftc *isp;
584	int regoff;
585	u_int16_t val;
586{
587	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
588	int offset, oc = 0;
589
590	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
591		/*
592		 * We will assume that someone has paused the RISC processor.
593		 */
594		oc = isp_pci_rd_reg(isp, BIU_CONF1);
595		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
596	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
597		oc = isp_pci_rd_reg(isp, BIU_CONF1);
598		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
599	}
600	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
601	offset += (regoff & 0xff);
602	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
603	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
604	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
605		isp_pci_wr_reg(isp, BIU_CONF1, oc);
606	}
607}
608#endif
609
610
611#if	__FreeBSD_version >= 300004
612static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
613static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
614static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
615
616static void
617isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
618{
619	struct ispsoftc *isp = (struct ispsoftc *) arg;
620	isp->isp_rquest_dma = segs->ds_addr;
621}
622
623static void
624isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
625{
626	struct ispsoftc *isp = (struct ispsoftc *) arg;
627	isp->isp_result_dma = segs->ds_addr;
628}
629
630static void
631isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
632{
633	struct ispsoftc *isp = (struct ispsoftc *) arg;
634	fcparam *fcp = isp->isp_param;
635	fcp->isp_scdma = segs->ds_addr;
636}
637
638static int
639isp_pci_mbxdma(struct ispsoftc *isp)
640{
641	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
642	caddr_t base;
643	u_int32_t len;
644	int i, error;
645
646	/*
647	 * Allocate and map the request, result queues, plus FC scratch area.
648	 */
649	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
650	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
651	if (isp->isp_type & ISP_HA_FC) {
652		len += ISP2100_SCRLEN;
653	}
654	if (bus_dma_tag_create(pci->parent_dmat, 0, 0, BUS_SPACE_MAXADDR,
655	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, BUS_SPACE_MAXSIZE_32BIT,
656	    0, &pci->cntrol_dmat) != 0) {
657		printf("%s: cannot create a dma tag for control spaces\n",
658		    isp->isp_name);
659		return (1);
660	}
661	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
662	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
663		printf("%s: cannot allocate %d bytes of CCB memory\n",
664		    isp->isp_name, len);
665		return (1);
666	}
667
668	isp->isp_rquest = base;
669	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
670	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, pci, 0);
671
672	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
673	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
674	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, pci, 0);
675
676	if (isp->isp_type & ISP_HA_FC) {
677		fcparam *fcp = (fcparam *) isp->isp_param;
678		fcp->isp_scratch = isp->isp_result +
679		    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
680		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
681		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, pci, 0);
682	}
683
684	/*
685	 * Use this opportunity to initialize/create data DMA maps.
686	 */
687	for (i = 0; i < MAXISPREQUEST; i++) {
688		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
689		if (error) {
690			printf("%s: error %d creating mailbox DMA maps\n",
691			    isp->isp_name, error);
692			return (1);
693		}
694	}
695	return (0);
696}
697
698static void dma2 __P((void *, bus_dma_segment_t *, int, int));
699typedef struct {
700	struct ispsoftc *isp;
701	ISP_SCSI_XFER_T *ccb;
702	ispreq_t *rq;
703	u_int8_t *iptrp;
704	u_int8_t optr;
705	u_int error;
706} mush_t;
707
708#define	MUSHERR_NOQENTRIES	-2
709
710static void
711dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
712{
713	mush_t *mp;
714	ISP_SCSI_XFER_T *ccb;
715	struct ispsoftc *isp;
716	struct isp_pcisoftc *pci;
717	bus_dmamap_t *dp;
718	bus_dma_segment_t *eseg;
719	ispreq_t *rq;
720	u_int8_t *iptrp;
721	u_int8_t optr;
722	ispcontreq_t *crq;
723	int drq, seglim, datalen;
724
725	mp = (mush_t *) arg;
726	if (error) {
727		mp->error = error;
728		return;
729	}
730
731	isp = mp->isp;
732	if (nseg < 1) {
733		printf("%s: zero or negative segment count\n", isp->isp_name);
734		mp->error = EFAULT;
735		return;
736	}
737	ccb = mp->ccb;
738	rq = mp->rq;
739	iptrp = mp->iptrp;
740	optr = mp->optr;
741
742	pci = (struct isp_pcisoftc *)isp;
743	dp = &pci->dmaps[rq->req_handle - 1];
744	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
745		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
746		drq = REQFLAG_DATA_IN;
747	} else {
748		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
749		drq = REQFLAG_DATA_OUT;
750	}
751
752	datalen = XS_XFRLEN(ccb);
753	if (isp->isp_type & ISP_HA_FC) {
754		seglim = ISP_RQDSEG_T2;
755		((ispreqt2_t *)rq)->req_totalcnt = datalen;
756		((ispreqt2_t *)rq)->req_flags |= drq;
757	} else {
758		seglim = ISP_RQDSEG;
759		rq->req_flags |= drq;
760	}
761
762	eseg = dm_segs + nseg;
763
764	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
765		if (isp->isp_type & ISP_HA_FC) {
766			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
767			rq2->req_dataseg[rq2->req_seg_count].ds_base =
768			    dm_segs->ds_addr;
769			rq2->req_dataseg[rq2->req_seg_count].ds_count =
770			    dm_segs->ds_len;
771		} else {
772			rq->req_dataseg[rq->req_seg_count].ds_base =
773				dm_segs->ds_addr;
774			rq->req_dataseg[rq->req_seg_count].ds_count =
775				dm_segs->ds_len;
776		}
777		datalen -= dm_segs->ds_len;
778#if	0
779		if (isp->isp_type & ISP_HA_FC) {
780			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
781			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
782			    isp->isp_name, rq->req_seg_count,
783			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
784			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
785		} else {
786			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
787			    isp->isp_name, rq->req_seg_count,
788			    rq->req_dataseg[rq->req_seg_count].ds_count,
789			    rq->req_dataseg[rq->req_seg_count].ds_base);
790		}
791#endif
792		rq->req_seg_count++;
793		dm_segs++;
794	}
795
796	while (datalen > 0 && dm_segs != eseg) {
797		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
798		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
799		if (*iptrp == optr) {
800#if	0
801			printf("%s: Request Queue Overflow++\n", isp->isp_name);
802#endif
803			mp->error = MUSHERR_NOQENTRIES;
804			return;
805		}
806		rq->req_header.rqs_entry_count++;
807		bzero((void *)crq, sizeof (*crq));
808		crq->req_header.rqs_entry_count = 1;
809		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
810
811		seglim = 0;
812		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
813			crq->req_dataseg[seglim].ds_base =
814			    dm_segs->ds_addr;
815			crq->req_dataseg[seglim].ds_count =
816			    dm_segs->ds_len;
817#if	0
818			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
819			    isp->isp_name, rq->req_header.rqs_entry_count-1,
820			    seglim, crq->req_dataseg[seglim].ds_count,
821			    crq->req_dataseg[seglim].ds_base);
822#endif
823			rq->req_seg_count++;
824			dm_segs++;
825			seglim++;
826			datalen -= dm_segs->ds_len;
827		}
828	}
829}
830
831static int
832isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
833	u_int8_t *iptrp, u_int8_t optr)
834{
835	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
836	struct ccb_hdr *ccb_h;
837	struct ccb_scsiio *csio;
838	bus_dmamap_t *dp;
839	mush_t mush, *mp;
840
841	csio = (struct ccb_scsiio *) ccb;
842	ccb_h = &csio->ccb_h;
843
844	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
845		rq->req_seg_count = 1;
846		return (CMD_QUEUED);
847	}
848	dp = &pci->dmaps[rq->req_handle - 1];
849
850	/*
851	 * Do a virtual grapevine step to collect info for
852	 * the callback dma allocation that we have to use...
853	 */
854	mp = &mush;
855	mp->isp = isp;
856	mp->ccb = ccb;
857	mp->rq = rq;
858	mp->iptrp = iptrp;
859	mp->optr = optr;
860	mp->error = 0;
861
862	if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
863		if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
864			int error, s;
865
866			s = splsoftvm();
867			error = bus_dmamap_load(pci->parent_dmat, *dp,
868			    csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
869			if (error == EINPROGRESS) {
870				bus_dmamap_unload(pci->parent_dmat, *dp);
871				mp->error = EINVAL;
872				printf("%s: deferred dma allocation not "
873				    "supported\n", isp->isp_name);
874			} else if (error && mp->error == 0) {
875				mp->error = error;
876			}
877			splx(s);
878		} else {
879			/* Pointer to physical buffer */
880			struct bus_dma_segment seg;
881			seg.ds_addr = (bus_addr_t)csio->data_ptr;
882			seg.ds_len = csio->dxfer_len;
883			dma2(mp, &seg, 1, 0);
884		}
885	} else {
886		struct bus_dma_segment *segs;
887
888		if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
889			printf("%s: Physical segment pointers unsupported",
890				isp->isp_name);
891			mp->error = EINVAL;
892		} else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
893			printf("%s: Virtual segment addresses unsupported",
894				isp->isp_name);
895			mp->error = EINVAL;
896		} else {
897			/* Just use the segments provided */
898			segs = (struct bus_dma_segment *) csio->data_ptr;
899			dma2(mp, segs, csio->sglist_cnt, 0);
900		}
901	}
902	if (mp->error) {
903		int retval = CMD_COMPLETE;
904		if (mp->error == MUSHERR_NOQENTRIES) {
905			retval = CMD_EAGAIN;
906			ccb_h->status = CAM_UNREC_HBA_ERROR;
907		} else if (mp->error == EFBIG) {
908			ccb_h->status = CAM_REQ_TOO_BIG;
909		} else if (mp->error == EINVAL) {
910			ccb_h->status = CAM_REQ_INVALID;
911		} else {
912			ccb_h->status = CAM_UNREC_HBA_ERROR;
913		}
914		return (retval);
915	} else {
916		return (CMD_QUEUED);
917	}
918}
919
920static void
921isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb,
922	u_int32_t handle)
923{
924	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
925	bus_dmamap_t *dp = &pci->dmaps[handle];
926
927	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
928		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
929	} else {
930		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
931	}
932	bus_dmamap_unload(pci->parent_dmat, *dp);
933}
934
935#else	/* __FreeBSD_version >= 300004 */
936
937
938static int
939isp_pci_mbxdma(struct ispsoftc *isp)
940{
941	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
942	u_int32_t len;
943	int rseg;
944
945	/* XXXX CHECK FOR ALIGNMENT */
946	/*
947	 * Allocate and map the request queue.
948	 */
949	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
950	isp->isp_rquest = malloc(len, M_DEVBUF, M_NOWAIT);
951	if (isp->isp_rquest == NULL) {
952		printf("%s: cannot malloc request queue\n", isp->isp_name);
953		return (1);
954	}
955	isp->isp_rquest_dma = vtophys(isp->isp_rquest);
956
957#if	0
958	printf("RQUEST=0x%x (0x%x)...", isp->isp_rquest, isp->isp_rquest_dma);
959#endif
960
961	/*
962	 * Allocate and map the result queue.
963	 */
964	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
965	isp->isp_result = malloc(len, M_DEVBUF, M_NOWAIT);
966	if (isp->isp_result == NULL) {
967		free(isp->isp_rquest, M_DEVBUF);
968		printf("%s: cannot malloc result queue\n", isp->isp_name);
969		return (1);
970	}
971	isp->isp_result_dma = vtophys(isp->isp_result);
972#if	0
973	printf("RESULT=0x%x (0x%x)\n", isp->isp_result, isp->isp_result_dma);
974#endif
975	if (isp->isp_type & ISP_HA_FC) {
976		fcparam *fcp = isp->isp_param;
977		len = ISP2100_SCRLEN;
978		fcp->isp_scratch = (volatile caddr_t) &pci->_z._y._b;
979		fcp->isp_scdma = vtophys(fcp->isp_scratch);
980	}
981	return (0);
982}
983
984static int
985isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs,
986	ispreq_t *rq, u_int8_t *iptrp, u_int8_t optr)
987{
988	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
989	ispcontreq_t *crq;
990	vm_offset_t vaddr;
991	int drq, seglim;
992	u_int32_t paddr, nextpaddr, datalen, size, *ctrp;
993
994	if (xs->datalen == 0) {
995		rq->req_seg_count = 1;
996		return (CMD_QUEUED);
997	}
998
999	if (xs->flags & SCSI_DATA_IN) {
1000		drq = REQFLAG_DATA_IN;
1001	} else {
1002		drq = REQFLAG_DATA_OUT;
1003	}
1004
1005	if (isp->isp_type & ISP_HA_FC) {
1006		seglim = ISP_RQDSEG_T2;
1007		((ispreqt2_t *)rq)->req_totalcnt = XS_XFRLEN(xs);
1008		((ispreqt2_t *)rq)->req_flags |= drq;
1009	} else {
1010		seglim = ISP_RQDSEG;
1011		rq->req_flags |= drq;
1012	}
1013
1014	datalen = XS_XFRLEN(xs);
1015	vaddr = (vm_offset_t) xs->data;
1016	paddr = vtophys(vaddr);
1017
1018	while (datalen != 0 && rq->req_seg_count < seglim) {
1019		if (isp->isp_type & ISP_HA_FC) {
1020			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1021			rq2->req_dataseg[rq2->req_seg_count].ds_base = paddr;
1022			ctrp = &rq2->req_dataseg[rq2->req_seg_count].ds_count;
1023		} else {
1024			rq->req_dataseg[rq->req_seg_count].ds_base = paddr;
1025			ctrp = &rq->req_dataseg[rq->req_seg_count].ds_count;
1026		}
1027		nextpaddr = paddr;
1028		*(ctrp) = 0;
1029
1030		while (datalen != 0 && paddr == nextpaddr) {
1031			nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE;
1032			size = nextpaddr - paddr;
1033			if (size > datalen)
1034				size = datalen;
1035			*(ctrp) += size;
1036			vaddr += size;
1037			datalen -= size;
1038			if (datalen != 0)
1039				paddr = vtophys(vaddr);
1040
1041		}
1042#if	0
1043		if (isp->isp_type & ISP_HA_FC) {
1044			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1045			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1046			    isp->isp_name, rq->req_seg_count,
1047			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1048			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1049		} else {
1050			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1051			    isp->isp_name, rq->req_seg_count,
1052			    rq->req_dataseg[rq->req_seg_count].ds_count,
1053			    rq->req_dataseg[rq->req_seg_count].ds_base);
1054		}
1055#endif
1056		rq->req_seg_count++;
1057	}
1058
1059
1060
1061	if (datalen == 0)
1062		return (CMD_QUEUED);
1063
1064	paddr = vtophys(vaddr);
1065	while (datalen > 0) {
1066		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
1067		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
1068		if (*iptrp == optr) {
1069			printf("%s: Request Queue Overflow\n", isp->isp_name);
1070			XS_SETERR(xs, HBA_BOTCH);
1071			return (CMD_EAGAIN);
1072		}
1073		rq->req_header.rqs_entry_count++;
1074		bzero((void *)crq, sizeof (*crq));
1075		crq->req_header.rqs_entry_count = 1;
1076		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1077
1078		for (seglim = 0; datalen != 0 && seglim < ISP_CDSEG; seglim++) {
1079			crq->req_dataseg[seglim].ds_base = paddr;
1080			ctrp = &crq->req_dataseg[seglim].ds_count;
1081			*(ctrp) = 0;
1082			nextpaddr = paddr;
1083			while (datalen != 0 && paddr == nextpaddr) {
1084				nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE;
1085				size = nextpaddr - paddr;
1086				if (size > datalen)
1087					size = datalen;
1088				*(ctrp) += size;
1089				vaddr += size;
1090				datalen -= size;
1091				if (datalen != 0)
1092					paddr = vtophys(vaddr);
1093			}
1094#if	0
1095			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1096			    isp->isp_name, rq->req_header.rqs_entry_count-1,
1097			    seglim, crq->req_dataseg[seglim].ds_count,
1098			    crq->req_dataseg[seglim].ds_base);
1099#endif
1100			rq->req_seg_count++;
1101		}
1102	}
1103
1104	return (CMD_QUEUED);
1105}
1106#endif
1107
1108static void
1109isp_pci_reset1(struct ispsoftc *isp)
1110{
1111	/* Make sure the BIOS is disabled */
1112	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1113}
1114
1115static void
1116isp_pci_dumpregs(struct ispsoftc *isp)
1117{
1118	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1119	printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1120	    pci_conf_read(pci->pci_id, PCIR_COMMAND));
1121}
1122