Deleted Added
full compact
isp_pci.c (41515) isp_pci.c (41771)
1/* $FreeBSD: head/sys/dev/isp/isp_pci.c 41515 1998-12-05 00:07:04Z mjacob $ */
1/* $FreeBSD: head/sys/dev/isp/isp_pci.c 41771 1998-12-14 06:37:37Z dillon $ */
2/* isp_pci.c 1.18 */
3/*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * FreeBSD Version.
6 *
7 *---------------------------------------
8 * Copyright (c) 1997, 1998 by Matthew Jacob
9 * NASA/Ames Research Center
10 * All rights reserved.
11 *---------------------------------------
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice immediately at the beginning of the file, without modification,
18 * this list of conditions, and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. The name of the author may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
29 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37#include <dev/isp/isp_freebsd.h>
38#include <dev/isp/asm_pci.h>
39#include <sys/malloc.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42#include <vm/vm_extern.h>
43
44
45#include <pci/pcireg.h>
46#include <pci/pcivar.h>
47
48#if __FreeBSD_version >= 300004
49#include <machine/bus_memio.h>
50#include <machine/bus_pio.h>
51#include <machine/bus.h>
52#endif
53
54static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
55static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
56static int isp_pci_mbxdma __P((struct ispsoftc *));
57static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
58 ispreq_t *, u_int8_t *, u_int8_t));
59#if __FreeBSD_version >= 300004
60static void
61isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
62#else
63#define isp_pci_dmateardown NULL
64#endif
65
66static void isp_pci_reset1 __P((struct ispsoftc *));
67static void isp_pci_dumpregs __P((struct ispsoftc *));
68
69static struct ispmdvec mdvec = {
70 isp_pci_rd_reg,
71 isp_pci_wr_reg,
72 isp_pci_mbxdma,
73 isp_pci_dmasetup,
74 isp_pci_dmateardown,
75 NULL,
76 isp_pci_reset1,
77 isp_pci_dumpregs,
78 ISP_RISC_CODE,
79 ISP_CODE_LENGTH,
80 ISP_CODE_ORG,
81 ISP_CODE_VERSION,
82 BIU_BURST_ENABLE,
83 0
84};
85
86static struct ispmdvec mdvec_2100 = {
87 isp_pci_rd_reg,
88 isp_pci_wr_reg,
89 isp_pci_mbxdma,
90 isp_pci_dmasetup,
91 isp_pci_dmateardown,
92 NULL,
93 isp_pci_reset1,
94 isp_pci_dumpregs,
95 ISP2100_RISC_CODE,
96 ISP2100_CODE_LENGTH,
97 ISP2100_CODE_ORG,
98 ISP2100_CODE_VERSION,
99 BIU_BURST_ENABLE,
100 0
101};
102
103#ifndef PCIM_CMD_INVEN
104#define PCIM_CMD_INVEN 0x10
105#endif
106#ifndef PCIM_CMD_BUSMASTEREN
107#define PCIM_CMD_BUSMASTEREN 0x0004
108#endif
109
110#ifndef PCI_VENDOR_QLOGIC
111#define PCI_VENDOR_QLOGIC 0x1077
112#endif
113
114#ifndef PCI_PRODUCT_QLOGIC_ISP1020
115#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
116#endif
117
118#define PCI_QLOGIC_ISP \
119 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
120
121#ifndef PCI_PRODUCT_QLOGIC_ISP2100
122#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
123#endif
124
125#define PCI_QLOGIC_ISP2100 \
126 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
127
128#define IO_MAP_REG 0x10
129#define MEM_MAP_REG 0x14
130
131
2/* isp_pci.c 1.18 */
3/*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * FreeBSD Version.
6 *
7 *---------------------------------------
8 * Copyright (c) 1997, 1998 by Matthew Jacob
9 * NASA/Ames Research Center
10 * All rights reserved.
11 *---------------------------------------
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice immediately at the beginning of the file, without modification,
18 * this list of conditions, and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. The name of the author may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
29 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37#include <dev/isp/isp_freebsd.h>
38#include <dev/isp/asm_pci.h>
39#include <sys/malloc.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42#include <vm/vm_extern.h>
43
44
45#include <pci/pcireg.h>
46#include <pci/pcivar.h>
47
48#if __FreeBSD_version >= 300004
49#include <machine/bus_memio.h>
50#include <machine/bus_pio.h>
51#include <machine/bus.h>
52#endif
53
54static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
55static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
56static int isp_pci_mbxdma __P((struct ispsoftc *));
57static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
58 ispreq_t *, u_int8_t *, u_int8_t));
59#if __FreeBSD_version >= 300004
60static void
61isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
62#else
63#define isp_pci_dmateardown NULL
64#endif
65
66static void isp_pci_reset1 __P((struct ispsoftc *));
67static void isp_pci_dumpregs __P((struct ispsoftc *));
68
69static struct ispmdvec mdvec = {
70 isp_pci_rd_reg,
71 isp_pci_wr_reg,
72 isp_pci_mbxdma,
73 isp_pci_dmasetup,
74 isp_pci_dmateardown,
75 NULL,
76 isp_pci_reset1,
77 isp_pci_dumpregs,
78 ISP_RISC_CODE,
79 ISP_CODE_LENGTH,
80 ISP_CODE_ORG,
81 ISP_CODE_VERSION,
82 BIU_BURST_ENABLE,
83 0
84};
85
86static struct ispmdvec mdvec_2100 = {
87 isp_pci_rd_reg,
88 isp_pci_wr_reg,
89 isp_pci_mbxdma,
90 isp_pci_dmasetup,
91 isp_pci_dmateardown,
92 NULL,
93 isp_pci_reset1,
94 isp_pci_dumpregs,
95 ISP2100_RISC_CODE,
96 ISP2100_CODE_LENGTH,
97 ISP2100_CODE_ORG,
98 ISP2100_CODE_VERSION,
99 BIU_BURST_ENABLE,
100 0
101};
102
103#ifndef PCIM_CMD_INVEN
104#define PCIM_CMD_INVEN 0x10
105#endif
106#ifndef PCIM_CMD_BUSMASTEREN
107#define PCIM_CMD_BUSMASTEREN 0x0004
108#endif
109
110#ifndef PCI_VENDOR_QLOGIC
111#define PCI_VENDOR_QLOGIC 0x1077
112#endif
113
114#ifndef PCI_PRODUCT_QLOGIC_ISP1020
115#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
116#endif
117
118#define PCI_QLOGIC_ISP \
119 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
120
121#ifndef PCI_PRODUCT_QLOGIC_ISP2100
122#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
123#endif
124
125#define PCI_QLOGIC_ISP2100 \
126 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
127
128#define IO_MAP_REG 0x10
129#define MEM_MAP_REG 0x14
130
131
132static char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
132static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
133static void isp_pci_attach __P((pcici_t config_d, int unit));
134
135/* This distinguishing define is not right, but it does work */
136
137#if __FreeBSD_version < 300004
138#define IO_SPACE_MAPPING 0
139#define MEM_SPACE_MAPPING 1
140typedef int bus_space_tag_t;
141typedef u_long bus_space_handle_t;
142#ifdef __alpha__
143#define bus_space_read_2(st, sh, offset) \
144 (st == IO_SPACE_MAPPING)? \
145 inw((pci_port_t)sh + offset) : readw((pci_port_t)sh + offset)
146#define bus_space_write_2(st, sh, offset, val) \
147 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \
148 writew((pci_port_t)sh + offset, val)
149#else
150#define bus_space_read_2(st, sh, offset) \
151 (st == IO_SPACE_MAPPING)? \
152 inw((pci_port_t)sh + offset) : *((u_int16_t *)(uintptr_t)sh)
153#define bus_space_write_2(st, sh, offset, val) \
154 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \
155 *((u_int16_t *)(uintptr_t)sh) = val
156#endif
157#else
158#ifdef __alpha__
159#define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO
160#define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM
161#else
162#define IO_SPACE_MAPPING I386_BUS_SPACE_IO
163#define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM
164#endif
165#endif
166
167struct isp_pcisoftc {
168 struct ispsoftc pci_isp;
169 pcici_t pci_id;
170 bus_space_tag_t pci_st;
171 bus_space_handle_t pci_sh;
172#if __FreeBSD_version >= 300004
173 bus_dma_tag_t parent_dmat;
174 bus_dma_tag_t cntrol_dmat;
175 bus_dmamap_t cntrol_dmap;
176 bus_dmamap_t dmaps[MAXISPREQUEST];
177#endif
178 union {
179 sdparam _x;
180 struct {
181 fcparam _a;
182 char _b[ISP2100_SCRLEN];
183 } _y;
184 } _z;
185};
186
187static u_long ispunit;
188
189struct pci_device isp_pci_driver = {
190 "isp",
191 isp_pci_probe,
192 isp_pci_attach,
193 &ispunit,
194 NULL
195};
196DATA_SET (pcidevice_set, isp_pci_driver);
197
198
133static void isp_pci_attach __P((pcici_t config_d, int unit));
134
135/* This distinguishing define is not right, but it does work */
136
137#if __FreeBSD_version < 300004
138#define IO_SPACE_MAPPING 0
139#define MEM_SPACE_MAPPING 1
140typedef int bus_space_tag_t;
141typedef u_long bus_space_handle_t;
142#ifdef __alpha__
143#define bus_space_read_2(st, sh, offset) \
144 (st == IO_SPACE_MAPPING)? \
145 inw((pci_port_t)sh + offset) : readw((pci_port_t)sh + offset)
146#define bus_space_write_2(st, sh, offset, val) \
147 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \
148 writew((pci_port_t)sh + offset, val)
149#else
150#define bus_space_read_2(st, sh, offset) \
151 (st == IO_SPACE_MAPPING)? \
152 inw((pci_port_t)sh + offset) : *((u_int16_t *)(uintptr_t)sh)
153#define bus_space_write_2(st, sh, offset, val) \
154 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \
155 *((u_int16_t *)(uintptr_t)sh) = val
156#endif
157#else
158#ifdef __alpha__
159#define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO
160#define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM
161#else
162#define IO_SPACE_MAPPING I386_BUS_SPACE_IO
163#define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM
164#endif
165#endif
166
167struct isp_pcisoftc {
168 struct ispsoftc pci_isp;
169 pcici_t pci_id;
170 bus_space_tag_t pci_st;
171 bus_space_handle_t pci_sh;
172#if __FreeBSD_version >= 300004
173 bus_dma_tag_t parent_dmat;
174 bus_dma_tag_t cntrol_dmat;
175 bus_dmamap_t cntrol_dmap;
176 bus_dmamap_t dmaps[MAXISPREQUEST];
177#endif
178 union {
179 sdparam _x;
180 struct {
181 fcparam _a;
182 char _b[ISP2100_SCRLEN];
183 } _y;
184 } _z;
185};
186
187static u_long ispunit;
188
189struct pci_device isp_pci_driver = {
190 "isp",
191 isp_pci_probe,
192 isp_pci_attach,
193 &ispunit,
194 NULL
195};
196DATA_SET (pcidevice_set, isp_pci_driver);
197
198
199static char *
199static const char *
200isp_pci_probe(tag, type)
201 pcici_t tag;
202 pcidi_t type;
203{
204 static int oneshot = 1;
205 char *x;
206
207 switch (type) {
208 case PCI_QLOGIC_ISP:
209 x = "Qlogic ISP 10X0 PCI SCSI Adapter";
210 break;
211 case PCI_QLOGIC_ISP2100:
212 x = "Qlogic ISP 2100 PCI FC-AL Adapter";
213 break;
214 default:
215 return (NULL);
216 }
217 if (oneshot) {
218 oneshot = 0;
219 printf("%s Version %d.%d, Core Version %d.%d\n", PVS,
220 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
221 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
222 }
223 return (x);
224}
225
226
227static void
228isp_pci_attach(config_id, unit)
229 pcici_t config_id;
230 int unit;
231{
232 int mapped;
233 pci_port_t io_port;
234 u_int32_t data;
235 struct isp_pcisoftc *pcs;
236 struct ispsoftc *isp;
237 vm_offset_t vaddr, paddr;
238 ISP_LOCKVAL_DECL;
239
240
241 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
242 if (pcs == NULL) {
243 printf("isp%d: cannot allocate softc\n", unit);
244 return;
245 }
246 bzero(pcs, sizeof (struct isp_pcisoftc));
247
248 vaddr = paddr = NULL;
249 mapped = 0;
250 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
251 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
252 if (pci_map_mem(config_id, MEM_MAP_REG, &vaddr, &paddr)) {
253 pcs->pci_st = MEM_SPACE_MAPPING;
254 pcs->pci_sh = vaddr;
255 mapped++;
256 }
257 }
258 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
259 if (pci_map_port(config_id, PCI_MAP_REG_START, &io_port)) {
260 pcs->pci_st = IO_SPACE_MAPPING;
261 pcs->pci_sh = io_port;
262 mapped++;
263 }
264 }
265 if (mapped == 0) {
266 printf("isp%d: unable to map any ports!\n", unit);
267 free(pcs, M_DEVBUF);
268 return;
269 }
270 printf("isp%d: using %s space register mapping\n", unit,
271 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
272
273 isp = &pcs->pci_isp;
274#if __FreeBSD_version >= 300006
275 (void) snprintf(isp->isp_name, sizeof(isp->isp_name), "isp%d", unit);
276#else
277 (void) sprintf(isp->isp_name, "isp%d", unit);
278#endif
279 isp->isp_osinfo.unit = unit;
280
281 data = pci_conf_read(config_id, PCI_ID_REG);
282 if (data == PCI_QLOGIC_ISP) {
283 isp->isp_mdvec = &mdvec;
284 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
285 isp->isp_param = &pcs->_z._x;
286 } else if (data == PCI_QLOGIC_ISP2100) {
287 isp->isp_mdvec = &mdvec_2100;
288 isp->isp_type = ISP_HA_FC_2100;
289 isp->isp_param = &pcs->_z._y._a;
290
291 ISP_LOCK(isp);
292 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
293 data |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
294 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, data);
295
296 /*
297 * Wierd- we need to clear the lsb in offset 0x30 to take the
298 * chip out of reset state.
299 */
300 data = pci_conf_read(config_id, 0x30);
301 data &= ~1;
302 pci_conf_write(config_id, 0x30, data);
303 ISP_UNLOCK(isp);
304 } else {
305 printf("%s: unknown dev (%x)- punting\n", isp->isp_name, data);
306 free(pcs, M_DEVBUF);
307 return;
308 }
309
310#if __FreeBSD_version >= 300004
311 if (bus_dma_tag_create(NULL, 0, 0, BUS_SPACE_MAXADDR_32BIT,
312 BUS_SPACE_MAXADDR, NULL, NULL, 1<<24,
313 255, 1<<24, 0, &pcs->parent_dmat) != 0) {
314 printf("%s: could not create master dma tag\n", isp->isp_name);
315 free(pcs, M_DEVBUF);
316 return;
317 }
318#endif
319 if (pci_map_int(config_id, (void (*)(void *))isp_intr,
320 (void *)isp, &IMASK) == 0) {
321 printf("%s: could not map interrupt\n", isp->isp_name);
322 free(pcs, M_DEVBUF);
323 return;
324 }
325
326 pcs->pci_id = config_id;
327 ISP_LOCK(isp);
328 isp_reset(isp);
329 if (isp->isp_state != ISP_RESETSTATE) {
330 ISP_UNLOCK(isp);
331 free(pcs, M_DEVBUF);
332 return;
333 }
334 isp_init(isp);
335 if (isp->isp_state != ISP_INITSTATE) {
336 isp_uninit(isp);
337 ISP_UNLOCK(isp);
338 free(pcs, M_DEVBUF);
339 return;
340 }
341 isp_attach(isp);
342 if (isp->isp_state != ISP_RUNSTATE) {
343 isp_uninit(isp);
344 free(pcs, M_DEVBUF);
345 }
346 ISP_UNLOCK(isp);
347#ifdef __alpha__
348 alpha_register_pci_scsi(config_id->bus, config_id->slot, isp->isp_sim);
349#endif
350}
351
352#define PCI_BIU_REGS_OFF BIU_REGS_OFF
353
354static u_int16_t
355isp_pci_rd_reg(isp, regoff)
356 struct ispsoftc *isp;
357 int regoff;
358{
359 u_int16_t rv;
360 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
361 int offset, oldsxp = 0;
362
363 if ((regoff & BIU_BLOCK) != 0) {
364 offset = PCI_BIU_REGS_OFF;
365 } else if ((regoff & MBOX_BLOCK) != 0) {
366 if (isp->isp_type & ISP_HA_SCSI)
367 offset = PCI_MBOX_REGS_OFF;
368 else
369 offset = PCI_MBOX_REGS2100_OFF;
370 } else if ((regoff & SXP_BLOCK) != 0) {
371 offset = PCI_SXP_REGS_OFF;
372 /*
373 * We will assume that someone has paused the RISC processor.
374 */
375 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
376 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
377 } else {
378 offset = PCI_RISC_REGS_OFF;
379 }
380 regoff &= 0xff;
381 offset += regoff;
382 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
383 if ((regoff & SXP_BLOCK) != 0) {
384 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
385 }
386 return (rv);
387}
388
389static void
390isp_pci_wr_reg(isp, regoff, val)
391 struct ispsoftc *isp;
392 int regoff;
393 u_int16_t val;
394{
395 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
396 int offset, oldsxp = 0;
397 if ((regoff & BIU_BLOCK) != 0) {
398 offset = PCI_BIU_REGS_OFF;
399 } else if ((regoff & MBOX_BLOCK) != 0) {
400 if (isp->isp_type & ISP_HA_SCSI)
401 offset = PCI_MBOX_REGS_OFF;
402 else
403 offset = PCI_MBOX_REGS2100_OFF;
404 } else if ((regoff & SXP_BLOCK) != 0) {
405 offset = PCI_SXP_REGS_OFF;
406 /*
407 * We will assume that someone has paused the RISC processor.
408 */
409 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
410 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
411 } else {
412 offset = PCI_RISC_REGS_OFF;
413 }
414 regoff &= 0xff;
415 offset += regoff;
416 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
417 if ((regoff & SXP_BLOCK) != 0) {
418 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
419 }
420}
421
422#if __FreeBSD_version >= 300004
423static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
424static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
425static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
426
427static void
428isp_map_rquest(arg, segs, nseg, error)
429 void *arg;
430 bus_dma_segment_t *segs;
431 int nseg;
432 int error;
433{
434 struct ispsoftc *isp = (struct ispsoftc *) arg;
435 isp->isp_rquest_dma = segs->ds_addr;
436}
437
438static void
439isp_map_result(arg, segs, nseg, error)
440 void *arg;
441 bus_dma_segment_t *segs;
442 int nseg;
443 int error;
444{
445 struct ispsoftc *isp = (struct ispsoftc *) arg;
446 isp->isp_result_dma = segs->ds_addr;
447}
448
449static void
450isp_map_fcscrt(arg, segs, nseg, error)
451 void *arg;
452 bus_dma_segment_t *segs;
453 int nseg;
454 int error;
455{
456 struct ispsoftc *isp = (struct ispsoftc *) arg;
457 fcparam *fcp = isp->isp_param;
458 fcp->isp_scdma = segs->ds_addr;
459}
460
461static int
462isp_pci_mbxdma(isp)
463 struct ispsoftc *isp;
464{
465 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
466 caddr_t base;
467 u_int32_t len;
468 int i, error;
469
470 /*
471 * Allocate and map the request, result queues, plus FC scratch area.
472 */
473 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
474 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
475 if (isp->isp_type & ISP_HA_FC) {
476 len += ISP2100_SCRLEN;
477 }
478 if (bus_dma_tag_create(pci->parent_dmat, 0, 0, BUS_SPACE_MAXADDR,
479 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, BUS_SPACE_MAXSIZE_32BIT,
480 0, &pci->cntrol_dmat) != 0) {
481 printf("%s: cannot create a dma tag for control spaces\n",
482 isp->isp_name);
483 return (1);
484 }
485 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
486 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
487 printf("%s: cannot allocate %d bytes of CCB memory\n",
488 isp->isp_name, len);
489 return (1);
490 }
491
492 isp->isp_rquest = base;
493 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
494 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, pci, 0);
495
496 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
497 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
498 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, pci, 0);
499
500 if (isp->isp_type & ISP_HA_FC) {
501 fcparam *fcp = (fcparam *) isp->isp_param;
502 fcp->isp_scratch = isp->isp_result +
503 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
504 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
505 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, pci, 0);
506 }
507
508 /*
509 * Use this opportunity to initialize/create data DMA maps.
510 */
511 for (i = 0; i < MAXISPREQUEST; i++) {
512 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
513 if (error) {
514 printf("%s: error %d creating mailbox DMA maps\n",
515 isp->isp_name, error);
516 return (1);
517 }
518 }
519 return (0);
520}
521
522static void dma2 __P((void *, bus_dma_segment_t *, int, int));
523typedef struct {
524 struct ispsoftc *isp;
525 ISP_SCSI_XFER_T *ccb;
526 ispreq_t *rq;
527 u_int8_t *iptrp;
528 u_int8_t optr;
529 u_int error;
530} mush_t;
531
532#define MUSHERR_NOQENTRIES -2
533
534static void
535dma2(arg, dm_segs, nseg, error)
536 void *arg;
537 bus_dma_segment_t *dm_segs;
538 int nseg;
539 int error;
540{
541 mush_t *mp;
542 ISP_SCSI_XFER_T *ccb;
543 struct ispsoftc *isp;
544 struct isp_pcisoftc *pci;
545 bus_dmamap_t *dp;
546 bus_dma_segment_t *eseg;
547 ispreq_t *rq;
548 u_int8_t *iptrp;
549 u_int8_t optr;
550 ispcontreq_t *crq;
551 int drq, seglim, datalen;
552
553 mp = (mush_t *) arg;
554 if (error) {
555 mp->error = error;
556 return;
557 }
558
559 isp = mp->isp;
560 if (nseg < 1) {
561 printf("%s: zero or negative segment count\n", isp->isp_name);
562 mp->error = EFAULT;
563 return;
564 }
565 ccb = mp->ccb;
566 rq = mp->rq;
567 iptrp = mp->iptrp;
568 optr = mp->optr;
569
570 pci = (struct isp_pcisoftc *)isp;
571 dp = &pci->dmaps[rq->req_handle - 1];
572 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
573 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
574 drq = REQFLAG_DATA_IN;
575 } else {
576 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
577 drq = REQFLAG_DATA_OUT;
578 }
579
580 datalen = XS_XFRLEN(ccb);
581 if (isp->isp_type & ISP_HA_FC) {
582 seglim = ISP_RQDSEG_T2;
583 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
584 ((ispreqt2_t *)rq)->req_flags |= drq;
585 } else {
586 seglim = ISP_RQDSEG;
587 rq->req_flags |= drq;
588 }
589
590 eseg = dm_segs + nseg;
591
592 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
593 if (isp->isp_type & ISP_HA_FC) {
594 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
595 rq2->req_dataseg[rq2->req_seg_count].ds_base =
596 dm_segs->ds_addr;
597 rq2->req_dataseg[rq2->req_seg_count].ds_count =
598 dm_segs->ds_len;
599 } else {
600 rq->req_dataseg[rq->req_seg_count].ds_base =
601 dm_segs->ds_addr;
602 rq->req_dataseg[rq->req_seg_count].ds_count =
603 dm_segs->ds_len;
604 }
605 datalen -= dm_segs->ds_len;
606#if 0
607 if (isp->isp_type & ISP_HA_FC) {
608 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
609 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
610 isp->isp_name, rq->req_seg_count,
611 rq2->req_dataseg[rq2->req_seg_count].ds_count,
612 rq2->req_dataseg[rq2->req_seg_count].ds_base);
613 } else {
614 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
615 isp->isp_name, rq->req_seg_count,
616 rq->req_dataseg[rq->req_seg_count].ds_count,
617 rq->req_dataseg[rq->req_seg_count].ds_base);
618 }
619#endif
620 rq->req_seg_count++;
621 dm_segs++;
622 }
623
624 while (datalen > 0 && dm_segs != eseg) {
625 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
626 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
627 if (*iptrp == optr) {
628#if 0
629 printf("%s: Request Queue Overflow++\n", isp->isp_name);
630#endif
631 mp->error = MUSHERR_NOQENTRIES;
632 return;
633 }
634 rq->req_header.rqs_entry_count++;
635 bzero((void *)crq, sizeof (*crq));
636 crq->req_header.rqs_entry_count = 1;
637 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
638
639 seglim = 0;
640 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
641 crq->req_dataseg[seglim].ds_base =
642 dm_segs->ds_addr;
643 crq->req_dataseg[seglim].ds_count =
644 dm_segs->ds_len;
645#if 0
646 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
647 isp->isp_name, rq->req_header.rqs_entry_count-1,
648 seglim, crq->req_dataseg[seglim].ds_count,
649 crq->req_dataseg[seglim].ds_base);
650#endif
651 rq->req_seg_count++;
652 dm_segs++;
653 seglim++;
654 datalen -= dm_segs->ds_len;
655 }
656 }
657}
658
659static int
660isp_pci_dmasetup(isp, ccb, rq, iptrp, optr)
661 struct ispsoftc *isp;
662 ISP_SCSI_XFER_T *ccb;
663 ispreq_t *rq;
664 u_int8_t *iptrp;
665 u_int8_t optr;
666{
667 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
668 struct ccb_hdr *ccb_h;
669 struct ccb_scsiio *csio;
670 bus_dmamap_t *dp;
671 mush_t mush, *mp;
672
673 csio = (struct ccb_scsiio *) ccb;
674 ccb_h = &csio->ccb_h;
675
676 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
677 rq->req_seg_count = 1;
678 return (CMD_QUEUED);
679 }
680 dp = &pci->dmaps[rq->req_handle - 1];
681
682 /*
683 * Do a virtual grapevine step to collect info for
684 * the callback dma allocation that we have to use...
685 */
686 mp = &mush;
687 mp->isp = isp;
688 mp->ccb = ccb;
689 mp->rq = rq;
690 mp->iptrp = iptrp;
691 mp->optr = optr;
692 mp->error = 0;
693
694 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
695 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
696 int error, s;
697
698 s = splsoftvm();
699 error = bus_dmamap_load(pci->parent_dmat, *dp,
700 csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
701 if (error == EINPROGRESS) {
702 bus_dmamap_unload(pci->parent_dmat, *dp);
703 mp->error = EINVAL;
704 printf("%s: deferred dma allocation not "
705 "supported\n", isp->isp_name);
706 } else if (error && mp->error == 0) {
707 mp->error = error;
708 }
709 splx(s);
710 } else {
711 /* Pointer to physical buffer */
712 struct bus_dma_segment seg;
713 seg.ds_addr = (bus_addr_t)csio->data_ptr;
714 seg.ds_len = csio->dxfer_len;
715 dma2(mp, &seg, 1, 0);
716 }
717 } else {
718 struct bus_dma_segment *segs;
719
720 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
721 printf("%s: Physical segment pointers unsupported",
722 isp->isp_name);
723 mp->error = EINVAL;
724 } else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
725 printf("%s: Virtual segment addresses unsupported",
726 isp->isp_name);
727 mp->error = EINVAL;
728 } else {
729 /* Just use the segments provided */
730 segs = (struct bus_dma_segment *) csio->data_ptr;
731 dma2(mp, segs, csio->sglist_cnt, 0);
732 }
733 }
734 if (mp->error) {
735 int retval = CMD_COMPLETE;
736 if (mp->error == MUSHERR_NOQENTRIES) {
737 retval = CMD_EAGAIN;
738 ccb_h->status = CAM_UNREC_HBA_ERROR;
739 } else if (mp->error == EFBIG) {
740 ccb_h->status = CAM_REQ_TOO_BIG;
741 } else if (mp->error == EINVAL) {
742 ccb_h->status = CAM_REQ_INVALID;
743 } else {
744 ccb_h->status = CAM_UNREC_HBA_ERROR;
745 }
746 return (retval);
747 } else {
748 return (CMD_QUEUED);
749 }
750}
751
752static void
753isp_pci_dmateardown(isp, ccb, handle)
754 struct ispsoftc *isp;
755 ISP_SCSI_XFER_T *ccb;
756 u_int32_t handle;
757{
758 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
759 bus_dmamap_t *dp = &pci->dmaps[handle];
760
761 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
762 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
763 } else {
764 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
765 }
766 bus_dmamap_unload(pci->parent_dmat, *dp);
767}
768
769#else /* __FreeBSD_version >= 300004 */
770
771
772static int
773isp_pci_mbxdma(isp)
774 struct ispsoftc *isp;
775{
776 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
777 u_int32_t len;
778 int rseg;
779
780 /* XXXX CHECK FOR ALIGNMENT */
781 /*
782 * Allocate and map the request queue.
783 */
784 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
785 isp->isp_rquest = malloc(len, M_DEVBUF, M_NOWAIT);
786 if (isp->isp_rquest == NULL) {
787 printf("%s: cannot malloc request queue\n", isp->isp_name);
788 return (1);
789 }
790 isp->isp_rquest_dma = vtophys(isp->isp_rquest);
791
792#if 0
793 printf("RQUEST=0x%x (0x%x)...", isp->isp_rquest, isp->isp_rquest_dma);
794#endif
795
796 /*
797 * Allocate and map the result queue.
798 */
799 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
800 isp->isp_result = malloc(len, M_DEVBUF, M_NOWAIT);
801 if (isp->isp_result == NULL) {
802 free(isp->isp_rquest, M_DEVBUF);
803 printf("%s: cannot malloc result queue\n", isp->isp_name);
804 return (1);
805 }
806 isp->isp_result_dma = vtophys(isp->isp_result);
807#if 0
808 printf("RESULT=0x%x (0x%x)\n", isp->isp_result, isp->isp_result_dma);
809#endif
810 if (isp->isp_type & ISP_HA_FC) {
811 fcparam *fcp = isp->isp_param;
812 len = ISP2100_SCRLEN;
813 fcp->isp_scratch = (volatile caddr_t) &pci->_z._y._b;
814 fcp->isp_scdma = vtophys(fcp->isp_scratch);
815 }
816 return (0);
817}
818
819static int
820isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
821 struct ispsoftc *isp;
822 ISP_SCSI_XFER_T *xs;
823 ispreq_t *rq;
824 u_int8_t *iptrp;
825 u_int8_t optr;
826{
827 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
828 ispcontreq_t *crq;
829 vm_offset_t vaddr;
830 int drq, seglim;
831 u_int32_t paddr, nextpaddr, datalen, size, *ctrp;
832
833 if (xs->datalen == 0) {
834 rq->req_seg_count = 1;
835 return (CMD_QUEUED);
836 }
837
838 if (xs->flags & SCSI_DATA_IN) {
839 drq = REQFLAG_DATA_IN;
840 } else {
841 drq = REQFLAG_DATA_OUT;
842 }
843
844 if (isp->isp_type & ISP_HA_FC) {
845 seglim = ISP_RQDSEG_T2;
846 ((ispreqt2_t *)rq)->req_totalcnt = XS_XFRLEN(xs);
847 ((ispreqt2_t *)rq)->req_flags |= drq;
848 } else {
849 seglim = ISP_RQDSEG;
850 rq->req_flags |= drq;
851 }
852
853 datalen = XS_XFRLEN(xs);
854 vaddr = (vm_offset_t) xs->data;
855 paddr = vtophys(vaddr);
856
857 while (datalen != 0 && rq->req_seg_count < seglim) {
858 if (isp->isp_type & ISP_HA_FC) {
859 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
860 rq2->req_dataseg[rq2->req_seg_count].ds_base = paddr;
861 ctrp = &rq2->req_dataseg[rq2->req_seg_count].ds_count;
862 } else {
863 rq->req_dataseg[rq->req_seg_count].ds_base = paddr;
864 ctrp = &rq->req_dataseg[rq->req_seg_count].ds_count;
865 }
866 nextpaddr = paddr;
867 *(ctrp) = 0;
868
869 while (datalen != 0 && paddr == nextpaddr) {
870 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE;
871 size = nextpaddr - paddr;
872 if (size > datalen)
873 size = datalen;
874
875 *(ctrp) += size;
876 vaddr += size;
877 datalen -= size;
878 if (datalen != 0)
879 paddr = vtophys(vaddr);
880
881 }
882#if 0
883 if (isp->isp_type & ISP_HA_FC) {
884 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
885 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
886 isp->isp_name, rq->req_seg_count,
887 rq2->req_dataseg[rq2->req_seg_count].ds_count,
888 rq2->req_dataseg[rq2->req_seg_count].ds_base);
889 } else {
890 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
891 isp->isp_name, rq->req_seg_count,
892 rq->req_dataseg[rq->req_seg_count].ds_count,
893 rq->req_dataseg[rq->req_seg_count].ds_base);
894 }
895#endif
896 rq->req_seg_count++;
897 }
898
899
900
901 if (datalen == 0)
902 return (CMD_QUEUED);
903
904 paddr = vtophys(vaddr);
905 while (datalen > 0) {
906 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
907 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
908 if (*iptrp == optr) {
909 printf("%s: Request Queue Overflow\n", isp->isp_name);
910 XS_SETERR(xs, HBA_BOTCH);
911 return (CMD_EAGAIN);
912 }
913 rq->req_header.rqs_entry_count++;
914 bzero((void *)crq, sizeof (*crq));
915 crq->req_header.rqs_entry_count = 1;
916 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
917
918 for (seglim = 0; datalen != 0 && seglim < ISP_CDSEG; seglim++) {
919 crq->req_dataseg[seglim].ds_base = paddr;
920 ctrp = &crq->req_dataseg[seglim].ds_count;
921 *(ctrp) = 0;
922 nextpaddr = paddr;
923 while (datalen != 0 && paddr == nextpaddr) {
924 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE;
925 size = nextpaddr - paddr;
926 if (size > datalen)
927 size = datalen;
928
929 *(ctrp) += size;
930 vaddr += size;
931 datalen -= size;
932 if (datalen != 0)
933 paddr = vtophys(vaddr);
934 }
935#if 0
936 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
937 isp->isp_name, rq->req_header.rqs_entry_count-1,
938 seglim, crq->req_dataseg[seglim].ds_count,
939 crq->req_dataseg[seglim].ds_base);
940#endif
941 rq->req_seg_count++;
942 }
943 }
944
945 return (CMD_QUEUED);
946}
947#endif
948
949static void
950isp_pci_reset1(isp)
951 struct ispsoftc *isp;
952{
953 /* Make sure the BIOS is disabled */
954 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
955}
956
957static void
958isp_pci_dumpregs(isp)
959 struct ispsoftc *isp;
960{
961 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
962 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
963 pci_conf_read(pci->pci_id, PCI_COMMAND_STATUS_REG));
964}
200isp_pci_probe(tag, type)
201 pcici_t tag;
202 pcidi_t type;
203{
204 static int oneshot = 1;
205 char *x;
206
207 switch (type) {
208 case PCI_QLOGIC_ISP:
209 x = "Qlogic ISP 10X0 PCI SCSI Adapter";
210 break;
211 case PCI_QLOGIC_ISP2100:
212 x = "Qlogic ISP 2100 PCI FC-AL Adapter";
213 break;
214 default:
215 return (NULL);
216 }
217 if (oneshot) {
218 oneshot = 0;
219 printf("%s Version %d.%d, Core Version %d.%d\n", PVS,
220 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
221 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
222 }
223 return (x);
224}
225
226
227static void
228isp_pci_attach(config_id, unit)
229 pcici_t config_id;
230 int unit;
231{
232 int mapped;
233 pci_port_t io_port;
234 u_int32_t data;
235 struct isp_pcisoftc *pcs;
236 struct ispsoftc *isp;
237 vm_offset_t vaddr, paddr;
238 ISP_LOCKVAL_DECL;
239
240
241 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
242 if (pcs == NULL) {
243 printf("isp%d: cannot allocate softc\n", unit);
244 return;
245 }
246 bzero(pcs, sizeof (struct isp_pcisoftc));
247
248 vaddr = paddr = NULL;
249 mapped = 0;
250 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
251 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
252 if (pci_map_mem(config_id, MEM_MAP_REG, &vaddr, &paddr)) {
253 pcs->pci_st = MEM_SPACE_MAPPING;
254 pcs->pci_sh = vaddr;
255 mapped++;
256 }
257 }
258 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
259 if (pci_map_port(config_id, PCI_MAP_REG_START, &io_port)) {
260 pcs->pci_st = IO_SPACE_MAPPING;
261 pcs->pci_sh = io_port;
262 mapped++;
263 }
264 }
265 if (mapped == 0) {
266 printf("isp%d: unable to map any ports!\n", unit);
267 free(pcs, M_DEVBUF);
268 return;
269 }
270 printf("isp%d: using %s space register mapping\n", unit,
271 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
272
273 isp = &pcs->pci_isp;
274#if __FreeBSD_version >= 300006
275 (void) snprintf(isp->isp_name, sizeof(isp->isp_name), "isp%d", unit);
276#else
277 (void) sprintf(isp->isp_name, "isp%d", unit);
278#endif
279 isp->isp_osinfo.unit = unit;
280
281 data = pci_conf_read(config_id, PCI_ID_REG);
282 if (data == PCI_QLOGIC_ISP) {
283 isp->isp_mdvec = &mdvec;
284 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
285 isp->isp_param = &pcs->_z._x;
286 } else if (data == PCI_QLOGIC_ISP2100) {
287 isp->isp_mdvec = &mdvec_2100;
288 isp->isp_type = ISP_HA_FC_2100;
289 isp->isp_param = &pcs->_z._y._a;
290
291 ISP_LOCK(isp);
292 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
293 data |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
294 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, data);
295
296 /*
297 * Wierd- we need to clear the lsb in offset 0x30 to take the
298 * chip out of reset state.
299 */
300 data = pci_conf_read(config_id, 0x30);
301 data &= ~1;
302 pci_conf_write(config_id, 0x30, data);
303 ISP_UNLOCK(isp);
304 } else {
305 printf("%s: unknown dev (%x)- punting\n", isp->isp_name, data);
306 free(pcs, M_DEVBUF);
307 return;
308 }
309
310#if __FreeBSD_version >= 300004
311 if (bus_dma_tag_create(NULL, 0, 0, BUS_SPACE_MAXADDR_32BIT,
312 BUS_SPACE_MAXADDR, NULL, NULL, 1<<24,
313 255, 1<<24, 0, &pcs->parent_dmat) != 0) {
314 printf("%s: could not create master dma tag\n", isp->isp_name);
315 free(pcs, M_DEVBUF);
316 return;
317 }
318#endif
319 if (pci_map_int(config_id, (void (*)(void *))isp_intr,
320 (void *)isp, &IMASK) == 0) {
321 printf("%s: could not map interrupt\n", isp->isp_name);
322 free(pcs, M_DEVBUF);
323 return;
324 }
325
326 pcs->pci_id = config_id;
327 ISP_LOCK(isp);
328 isp_reset(isp);
329 if (isp->isp_state != ISP_RESETSTATE) {
330 ISP_UNLOCK(isp);
331 free(pcs, M_DEVBUF);
332 return;
333 }
334 isp_init(isp);
335 if (isp->isp_state != ISP_INITSTATE) {
336 isp_uninit(isp);
337 ISP_UNLOCK(isp);
338 free(pcs, M_DEVBUF);
339 return;
340 }
341 isp_attach(isp);
342 if (isp->isp_state != ISP_RUNSTATE) {
343 isp_uninit(isp);
344 free(pcs, M_DEVBUF);
345 }
346 ISP_UNLOCK(isp);
347#ifdef __alpha__
348 alpha_register_pci_scsi(config_id->bus, config_id->slot, isp->isp_sim);
349#endif
350}
351
352#define PCI_BIU_REGS_OFF BIU_REGS_OFF
353
354static u_int16_t
355isp_pci_rd_reg(isp, regoff)
356 struct ispsoftc *isp;
357 int regoff;
358{
359 u_int16_t rv;
360 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
361 int offset, oldsxp = 0;
362
363 if ((regoff & BIU_BLOCK) != 0) {
364 offset = PCI_BIU_REGS_OFF;
365 } else if ((regoff & MBOX_BLOCK) != 0) {
366 if (isp->isp_type & ISP_HA_SCSI)
367 offset = PCI_MBOX_REGS_OFF;
368 else
369 offset = PCI_MBOX_REGS2100_OFF;
370 } else if ((regoff & SXP_BLOCK) != 0) {
371 offset = PCI_SXP_REGS_OFF;
372 /*
373 * We will assume that someone has paused the RISC processor.
374 */
375 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
376 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
377 } else {
378 offset = PCI_RISC_REGS_OFF;
379 }
380 regoff &= 0xff;
381 offset += regoff;
382 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
383 if ((regoff & SXP_BLOCK) != 0) {
384 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
385 }
386 return (rv);
387}
388
389static void
390isp_pci_wr_reg(isp, regoff, val)
391 struct ispsoftc *isp;
392 int regoff;
393 u_int16_t val;
394{
395 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
396 int offset, oldsxp = 0;
397 if ((regoff & BIU_BLOCK) != 0) {
398 offset = PCI_BIU_REGS_OFF;
399 } else if ((regoff & MBOX_BLOCK) != 0) {
400 if (isp->isp_type & ISP_HA_SCSI)
401 offset = PCI_MBOX_REGS_OFF;
402 else
403 offset = PCI_MBOX_REGS2100_OFF;
404 } else if ((regoff & SXP_BLOCK) != 0) {
405 offset = PCI_SXP_REGS_OFF;
406 /*
407 * We will assume that someone has paused the RISC processor.
408 */
409 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
410 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
411 } else {
412 offset = PCI_RISC_REGS_OFF;
413 }
414 regoff &= 0xff;
415 offset += regoff;
416 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
417 if ((regoff & SXP_BLOCK) != 0) {
418 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
419 }
420}
421
422#if __FreeBSD_version >= 300004
423static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
424static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
425static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
426
427static void
428isp_map_rquest(arg, segs, nseg, error)
429 void *arg;
430 bus_dma_segment_t *segs;
431 int nseg;
432 int error;
433{
434 struct ispsoftc *isp = (struct ispsoftc *) arg;
435 isp->isp_rquest_dma = segs->ds_addr;
436}
437
438static void
439isp_map_result(arg, segs, nseg, error)
440 void *arg;
441 bus_dma_segment_t *segs;
442 int nseg;
443 int error;
444{
445 struct ispsoftc *isp = (struct ispsoftc *) arg;
446 isp->isp_result_dma = segs->ds_addr;
447}
448
449static void
450isp_map_fcscrt(arg, segs, nseg, error)
451 void *arg;
452 bus_dma_segment_t *segs;
453 int nseg;
454 int error;
455{
456 struct ispsoftc *isp = (struct ispsoftc *) arg;
457 fcparam *fcp = isp->isp_param;
458 fcp->isp_scdma = segs->ds_addr;
459}
460
461static int
462isp_pci_mbxdma(isp)
463 struct ispsoftc *isp;
464{
465 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
466 caddr_t base;
467 u_int32_t len;
468 int i, error;
469
470 /*
471 * Allocate and map the request, result queues, plus FC scratch area.
472 */
473 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
474 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
475 if (isp->isp_type & ISP_HA_FC) {
476 len += ISP2100_SCRLEN;
477 }
478 if (bus_dma_tag_create(pci->parent_dmat, 0, 0, BUS_SPACE_MAXADDR,
479 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, BUS_SPACE_MAXSIZE_32BIT,
480 0, &pci->cntrol_dmat) != 0) {
481 printf("%s: cannot create a dma tag for control spaces\n",
482 isp->isp_name);
483 return (1);
484 }
485 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
486 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
487 printf("%s: cannot allocate %d bytes of CCB memory\n",
488 isp->isp_name, len);
489 return (1);
490 }
491
492 isp->isp_rquest = base;
493 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
494 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, pci, 0);
495
496 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
497 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
498 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, pci, 0);
499
500 if (isp->isp_type & ISP_HA_FC) {
501 fcparam *fcp = (fcparam *) isp->isp_param;
502 fcp->isp_scratch = isp->isp_result +
503 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
504 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
505 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, pci, 0);
506 }
507
508 /*
509 * Use this opportunity to initialize/create data DMA maps.
510 */
511 for (i = 0; i < MAXISPREQUEST; i++) {
512 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
513 if (error) {
514 printf("%s: error %d creating mailbox DMA maps\n",
515 isp->isp_name, error);
516 return (1);
517 }
518 }
519 return (0);
520}
521
522static void dma2 __P((void *, bus_dma_segment_t *, int, int));
523typedef struct {
524 struct ispsoftc *isp;
525 ISP_SCSI_XFER_T *ccb;
526 ispreq_t *rq;
527 u_int8_t *iptrp;
528 u_int8_t optr;
529 u_int error;
530} mush_t;
531
532#define MUSHERR_NOQENTRIES -2
533
534static void
535dma2(arg, dm_segs, nseg, error)
536 void *arg;
537 bus_dma_segment_t *dm_segs;
538 int nseg;
539 int error;
540{
541 mush_t *mp;
542 ISP_SCSI_XFER_T *ccb;
543 struct ispsoftc *isp;
544 struct isp_pcisoftc *pci;
545 bus_dmamap_t *dp;
546 bus_dma_segment_t *eseg;
547 ispreq_t *rq;
548 u_int8_t *iptrp;
549 u_int8_t optr;
550 ispcontreq_t *crq;
551 int drq, seglim, datalen;
552
553 mp = (mush_t *) arg;
554 if (error) {
555 mp->error = error;
556 return;
557 }
558
559 isp = mp->isp;
560 if (nseg < 1) {
561 printf("%s: zero or negative segment count\n", isp->isp_name);
562 mp->error = EFAULT;
563 return;
564 }
565 ccb = mp->ccb;
566 rq = mp->rq;
567 iptrp = mp->iptrp;
568 optr = mp->optr;
569
570 pci = (struct isp_pcisoftc *)isp;
571 dp = &pci->dmaps[rq->req_handle - 1];
572 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
573 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
574 drq = REQFLAG_DATA_IN;
575 } else {
576 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
577 drq = REQFLAG_DATA_OUT;
578 }
579
580 datalen = XS_XFRLEN(ccb);
581 if (isp->isp_type & ISP_HA_FC) {
582 seglim = ISP_RQDSEG_T2;
583 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
584 ((ispreqt2_t *)rq)->req_flags |= drq;
585 } else {
586 seglim = ISP_RQDSEG;
587 rq->req_flags |= drq;
588 }
589
590 eseg = dm_segs + nseg;
591
592 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
593 if (isp->isp_type & ISP_HA_FC) {
594 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
595 rq2->req_dataseg[rq2->req_seg_count].ds_base =
596 dm_segs->ds_addr;
597 rq2->req_dataseg[rq2->req_seg_count].ds_count =
598 dm_segs->ds_len;
599 } else {
600 rq->req_dataseg[rq->req_seg_count].ds_base =
601 dm_segs->ds_addr;
602 rq->req_dataseg[rq->req_seg_count].ds_count =
603 dm_segs->ds_len;
604 }
605 datalen -= dm_segs->ds_len;
606#if 0
607 if (isp->isp_type & ISP_HA_FC) {
608 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
609 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
610 isp->isp_name, rq->req_seg_count,
611 rq2->req_dataseg[rq2->req_seg_count].ds_count,
612 rq2->req_dataseg[rq2->req_seg_count].ds_base);
613 } else {
614 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
615 isp->isp_name, rq->req_seg_count,
616 rq->req_dataseg[rq->req_seg_count].ds_count,
617 rq->req_dataseg[rq->req_seg_count].ds_base);
618 }
619#endif
620 rq->req_seg_count++;
621 dm_segs++;
622 }
623
624 while (datalen > 0 && dm_segs != eseg) {
625 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
626 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
627 if (*iptrp == optr) {
628#if 0
629 printf("%s: Request Queue Overflow++\n", isp->isp_name);
630#endif
631 mp->error = MUSHERR_NOQENTRIES;
632 return;
633 }
634 rq->req_header.rqs_entry_count++;
635 bzero((void *)crq, sizeof (*crq));
636 crq->req_header.rqs_entry_count = 1;
637 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
638
639 seglim = 0;
640 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
641 crq->req_dataseg[seglim].ds_base =
642 dm_segs->ds_addr;
643 crq->req_dataseg[seglim].ds_count =
644 dm_segs->ds_len;
645#if 0
646 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
647 isp->isp_name, rq->req_header.rqs_entry_count-1,
648 seglim, crq->req_dataseg[seglim].ds_count,
649 crq->req_dataseg[seglim].ds_base);
650#endif
651 rq->req_seg_count++;
652 dm_segs++;
653 seglim++;
654 datalen -= dm_segs->ds_len;
655 }
656 }
657}
658
659static int
660isp_pci_dmasetup(isp, ccb, rq, iptrp, optr)
661 struct ispsoftc *isp;
662 ISP_SCSI_XFER_T *ccb;
663 ispreq_t *rq;
664 u_int8_t *iptrp;
665 u_int8_t optr;
666{
667 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
668 struct ccb_hdr *ccb_h;
669 struct ccb_scsiio *csio;
670 bus_dmamap_t *dp;
671 mush_t mush, *mp;
672
673 csio = (struct ccb_scsiio *) ccb;
674 ccb_h = &csio->ccb_h;
675
676 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
677 rq->req_seg_count = 1;
678 return (CMD_QUEUED);
679 }
680 dp = &pci->dmaps[rq->req_handle - 1];
681
682 /*
683 * Do a virtual grapevine step to collect info for
684 * the callback dma allocation that we have to use...
685 */
686 mp = &mush;
687 mp->isp = isp;
688 mp->ccb = ccb;
689 mp->rq = rq;
690 mp->iptrp = iptrp;
691 mp->optr = optr;
692 mp->error = 0;
693
694 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
695 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
696 int error, s;
697
698 s = splsoftvm();
699 error = bus_dmamap_load(pci->parent_dmat, *dp,
700 csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
701 if (error == EINPROGRESS) {
702 bus_dmamap_unload(pci->parent_dmat, *dp);
703 mp->error = EINVAL;
704 printf("%s: deferred dma allocation not "
705 "supported\n", isp->isp_name);
706 } else if (error && mp->error == 0) {
707 mp->error = error;
708 }
709 splx(s);
710 } else {
711 /* Pointer to physical buffer */
712 struct bus_dma_segment seg;
713 seg.ds_addr = (bus_addr_t)csio->data_ptr;
714 seg.ds_len = csio->dxfer_len;
715 dma2(mp, &seg, 1, 0);
716 }
717 } else {
718 struct bus_dma_segment *segs;
719
720 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
721 printf("%s: Physical segment pointers unsupported",
722 isp->isp_name);
723 mp->error = EINVAL;
724 } else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
725 printf("%s: Virtual segment addresses unsupported",
726 isp->isp_name);
727 mp->error = EINVAL;
728 } else {
729 /* Just use the segments provided */
730 segs = (struct bus_dma_segment *) csio->data_ptr;
731 dma2(mp, segs, csio->sglist_cnt, 0);
732 }
733 }
734 if (mp->error) {
735 int retval = CMD_COMPLETE;
736 if (mp->error == MUSHERR_NOQENTRIES) {
737 retval = CMD_EAGAIN;
738 ccb_h->status = CAM_UNREC_HBA_ERROR;
739 } else if (mp->error == EFBIG) {
740 ccb_h->status = CAM_REQ_TOO_BIG;
741 } else if (mp->error == EINVAL) {
742 ccb_h->status = CAM_REQ_INVALID;
743 } else {
744 ccb_h->status = CAM_UNREC_HBA_ERROR;
745 }
746 return (retval);
747 } else {
748 return (CMD_QUEUED);
749 }
750}
751
752static void
753isp_pci_dmateardown(isp, ccb, handle)
754 struct ispsoftc *isp;
755 ISP_SCSI_XFER_T *ccb;
756 u_int32_t handle;
757{
758 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
759 bus_dmamap_t *dp = &pci->dmaps[handle];
760
761 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
762 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
763 } else {
764 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
765 }
766 bus_dmamap_unload(pci->parent_dmat, *dp);
767}
768
769#else /* __FreeBSD_version >= 300004 */
770
771
772static int
773isp_pci_mbxdma(isp)
774 struct ispsoftc *isp;
775{
776 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
777 u_int32_t len;
778 int rseg;
779
780 /* XXXX CHECK FOR ALIGNMENT */
781 /*
782 * Allocate and map the request queue.
783 */
784 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
785 isp->isp_rquest = malloc(len, M_DEVBUF, M_NOWAIT);
786 if (isp->isp_rquest == NULL) {
787 printf("%s: cannot malloc request queue\n", isp->isp_name);
788 return (1);
789 }
790 isp->isp_rquest_dma = vtophys(isp->isp_rquest);
791
792#if 0
793 printf("RQUEST=0x%x (0x%x)...", isp->isp_rquest, isp->isp_rquest_dma);
794#endif
795
796 /*
797 * Allocate and map the result queue.
798 */
799 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
800 isp->isp_result = malloc(len, M_DEVBUF, M_NOWAIT);
801 if (isp->isp_result == NULL) {
802 free(isp->isp_rquest, M_DEVBUF);
803 printf("%s: cannot malloc result queue\n", isp->isp_name);
804 return (1);
805 }
806 isp->isp_result_dma = vtophys(isp->isp_result);
807#if 0
808 printf("RESULT=0x%x (0x%x)\n", isp->isp_result, isp->isp_result_dma);
809#endif
810 if (isp->isp_type & ISP_HA_FC) {
811 fcparam *fcp = isp->isp_param;
812 len = ISP2100_SCRLEN;
813 fcp->isp_scratch = (volatile caddr_t) &pci->_z._y._b;
814 fcp->isp_scdma = vtophys(fcp->isp_scratch);
815 }
816 return (0);
817}
818
819static int
820isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
821 struct ispsoftc *isp;
822 ISP_SCSI_XFER_T *xs;
823 ispreq_t *rq;
824 u_int8_t *iptrp;
825 u_int8_t optr;
826{
827 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
828 ispcontreq_t *crq;
829 vm_offset_t vaddr;
830 int drq, seglim;
831 u_int32_t paddr, nextpaddr, datalen, size, *ctrp;
832
833 if (xs->datalen == 0) {
834 rq->req_seg_count = 1;
835 return (CMD_QUEUED);
836 }
837
838 if (xs->flags & SCSI_DATA_IN) {
839 drq = REQFLAG_DATA_IN;
840 } else {
841 drq = REQFLAG_DATA_OUT;
842 }
843
844 if (isp->isp_type & ISP_HA_FC) {
845 seglim = ISP_RQDSEG_T2;
846 ((ispreqt2_t *)rq)->req_totalcnt = XS_XFRLEN(xs);
847 ((ispreqt2_t *)rq)->req_flags |= drq;
848 } else {
849 seglim = ISP_RQDSEG;
850 rq->req_flags |= drq;
851 }
852
853 datalen = XS_XFRLEN(xs);
854 vaddr = (vm_offset_t) xs->data;
855 paddr = vtophys(vaddr);
856
857 while (datalen != 0 && rq->req_seg_count < seglim) {
858 if (isp->isp_type & ISP_HA_FC) {
859 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
860 rq2->req_dataseg[rq2->req_seg_count].ds_base = paddr;
861 ctrp = &rq2->req_dataseg[rq2->req_seg_count].ds_count;
862 } else {
863 rq->req_dataseg[rq->req_seg_count].ds_base = paddr;
864 ctrp = &rq->req_dataseg[rq->req_seg_count].ds_count;
865 }
866 nextpaddr = paddr;
867 *(ctrp) = 0;
868
869 while (datalen != 0 && paddr == nextpaddr) {
870 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE;
871 size = nextpaddr - paddr;
872 if (size > datalen)
873 size = datalen;
874
875 *(ctrp) += size;
876 vaddr += size;
877 datalen -= size;
878 if (datalen != 0)
879 paddr = vtophys(vaddr);
880
881 }
882#if 0
883 if (isp->isp_type & ISP_HA_FC) {
884 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
885 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
886 isp->isp_name, rq->req_seg_count,
887 rq2->req_dataseg[rq2->req_seg_count].ds_count,
888 rq2->req_dataseg[rq2->req_seg_count].ds_base);
889 } else {
890 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
891 isp->isp_name, rq->req_seg_count,
892 rq->req_dataseg[rq->req_seg_count].ds_count,
893 rq->req_dataseg[rq->req_seg_count].ds_base);
894 }
895#endif
896 rq->req_seg_count++;
897 }
898
899
900
901 if (datalen == 0)
902 return (CMD_QUEUED);
903
904 paddr = vtophys(vaddr);
905 while (datalen > 0) {
906 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
907 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
908 if (*iptrp == optr) {
909 printf("%s: Request Queue Overflow\n", isp->isp_name);
910 XS_SETERR(xs, HBA_BOTCH);
911 return (CMD_EAGAIN);
912 }
913 rq->req_header.rqs_entry_count++;
914 bzero((void *)crq, sizeof (*crq));
915 crq->req_header.rqs_entry_count = 1;
916 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
917
918 for (seglim = 0; datalen != 0 && seglim < ISP_CDSEG; seglim++) {
919 crq->req_dataseg[seglim].ds_base = paddr;
920 ctrp = &crq->req_dataseg[seglim].ds_count;
921 *(ctrp) = 0;
922 nextpaddr = paddr;
923 while (datalen != 0 && paddr == nextpaddr) {
924 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE;
925 size = nextpaddr - paddr;
926 if (size > datalen)
927 size = datalen;
928
929 *(ctrp) += size;
930 vaddr += size;
931 datalen -= size;
932 if (datalen != 0)
933 paddr = vtophys(vaddr);
934 }
935#if 0
936 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
937 isp->isp_name, rq->req_header.rqs_entry_count-1,
938 seglim, crq->req_dataseg[seglim].ds_count,
939 crq->req_dataseg[seglim].ds_base);
940#endif
941 rq->req_seg_count++;
942 }
943 }
944
945 return (CMD_QUEUED);
946}
947#endif
948
949static void
950isp_pci_reset1(isp)
951 struct ispsoftc *isp;
952{
953 /* Make sure the BIOS is disabled */
954 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
955}
956
957static void
958isp_pci_dumpregs(isp)
959 struct ispsoftc *isp;
960{
961 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
962 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
963 pci_conf_read(pci->pci_id, PCI_COMMAND_STATUS_REG));
964}