Deleted Added
full compact
isp_pci.c (48611) isp_pci.c (49860)
1/* $Id: isp_pci.c,v 1.27 1999/07/05 22:04:08 mjacob Exp $ */
1/* $Id: isp_pci.c,v 1.28 1999/07/06 01:24:20 mjacob Exp $ */
2/* release_6_5_99 */
3/*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * FreeBSD Version.
6 *
7 *---------------------------------------
8 * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
9 * NASA/Ames Research Center
10 * All rights reserved.
11 *---------------------------------------
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice immediately at the beginning of the file, without modification,
18 * this list of conditions, and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. The name of the author may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
29 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37#include <dev/isp/isp_freebsd.h>
38#include <dev/isp/asm_pci.h>
39#include <sys/malloc.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42#include <vm/vm_extern.h>
43
44
45#include <pci/pcireg.h>
46#include <pci/pcivar.h>
47
48#include <machine/bus_memio.h>
49#include <machine/bus_pio.h>
50#include <machine/bus.h>
51#include <machine/md_var.h>
52
53
54static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
55static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
56#ifndef ISP_DISABLE_1080_SUPPORT
57static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
58static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
59#endif
60static int isp_pci_mbxdma __P((struct ispsoftc *));
61static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
62 ispreq_t *, u_int8_t *, u_int8_t));
63static void
64isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
65
66static void isp_pci_reset1 __P((struct ispsoftc *));
67static void isp_pci_dumpregs __P((struct ispsoftc *));
68
69#ifndef ISP_DISABLE_1020_SUPPORT
70static struct ispmdvec mdvec = {
71 isp_pci_rd_reg,
72 isp_pci_wr_reg,
73 isp_pci_mbxdma,
74 isp_pci_dmasetup,
75 isp_pci_dmateardown,
76 NULL,
77 isp_pci_reset1,
78 isp_pci_dumpregs,
79 ISP_RISC_CODE,
80 ISP_CODE_LENGTH,
81 ISP_CODE_ORG,
82 ISP_CODE_VERSION,
83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
84 0
85};
86#endif
87
88#ifndef ISP_DISABLE_1080_SUPPORT
89static struct ispmdvec mdvec_1080 = {
90 isp_pci_rd_reg_1080,
91 isp_pci_wr_reg_1080,
92 isp_pci_mbxdma,
93 isp_pci_dmasetup,
94 isp_pci_dmateardown,
95 NULL,
96 isp_pci_reset1,
97 isp_pci_dumpregs,
98 ISP1080_RISC_CODE,
99 ISP1080_CODE_LENGTH,
100 ISP1080_CODE_ORG,
101 ISP1080_CODE_VERSION,
102 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
103 0
104};
105#endif
106
107#ifndef ISP_DISABLE_2100_SUPPORT
108static struct ispmdvec mdvec_2100 = {
109 isp_pci_rd_reg,
110 isp_pci_wr_reg,
111 isp_pci_mbxdma,
112 isp_pci_dmasetup,
113 isp_pci_dmateardown,
114 NULL,
115 isp_pci_reset1,
116 isp_pci_dumpregs,
117 ISP2100_RISC_CODE,
118 ISP2100_CODE_LENGTH,
119 ISP2100_CODE_ORG,
120 ISP2100_CODE_VERSION,
121 0, /* Irrelevant to the 2100 */
122 0
123};
124#endif
125
126#ifndef ISP_DISABLE_2200_SUPPORT
127static struct ispmdvec mdvec_2200 = {
128 isp_pci_rd_reg,
129 isp_pci_wr_reg,
130 isp_pci_mbxdma,
131 isp_pci_dmasetup,
132 isp_pci_dmateardown,
133 NULL,
134 isp_pci_reset1,
135 isp_pci_dumpregs,
136 ISP2200_RISC_CODE,
137 ISP2200_CODE_LENGTH,
138 ISP2100_CODE_ORG,
139 ISP2200_CODE_VERSION,
140 0,
141 0
142};
143#endif
144
145#ifndef SCSI_ISP_PREFER_MEM_MAP
146#ifdef __alpha__
147#define SCSI_ISP_PREFER_MEM_MAP 0
148#else
149#define SCSI_ISP_PREFER_MEM_MAP 1
150#endif
151#endif
152
153#ifndef PCIM_CMD_INVEN
154#define PCIM_CMD_INVEN 0x10
155#endif
156#ifndef PCIM_CMD_BUSMASTEREN
157#define PCIM_CMD_BUSMASTEREN 0x0004
158#endif
159#ifndef PCIM_CMD_PERRESPEN
160#define PCIM_CMD_PERRESPEN 0x0040
161#endif
162#ifndef PCIM_CMD_SEREN
163#define PCIM_CMD_SEREN 0x0100
164#endif
165
166#ifndef PCIR_COMMAND
167#define PCIR_COMMAND 0x04
168#endif
169
170#ifndef PCIR_CACHELNSZ
171#define PCIR_CACHELNSZ 0x0c
172#endif
173
174#ifndef PCIR_LATTIMER
175#define PCIR_LATTIMER 0x0d
176#endif
177
178#ifndef PCIR_ROMADDR
179#define PCIR_ROMADDR 0x30
180#endif
181
182#ifndef PCI_VENDOR_QLOGIC
183#define PCI_VENDOR_QLOGIC 0x1077
184#endif
185
186#ifndef PCI_PRODUCT_QLOGIC_ISP1020
187#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
188#endif
189
190#ifndef PCI_PRODUCT_QLOGIC_ISP1080
191#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
192#endif
193
194#ifndef PCI_PRODUCT_QLOGIC_ISP1240
195#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
196#endif
197
198#ifndef PCI_PRODUCT_QLOGIC_ISP2100
199#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
200#endif
201
202#ifndef PCI_PRODUCT_QLOGIC_ISP2200
203#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
204#endif
205
206#define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
207
208#define PCI_QLOGIC_ISP1080 \
209 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
210
211#define PCI_QLOGIC_ISP1240 \
212 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
213
214#define PCI_QLOGIC_ISP2100 \
215 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
216
217#define PCI_QLOGIC_ISP2200 \
218 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
219
220#define IO_MAP_REG 0x10
221#define MEM_MAP_REG 0x14
222
223#define PCI_DFLT_LTNCY 0x40
224#define PCI_DFLT_LNSZ 0x10
225
226static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
227static void isp_pci_attach __P((pcici_t config_d, int unit));
228
229/* This distinguishing define is not right, but it does work */
230#ifdef __alpha__
231#define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO
232#define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM
233#else
234#define IO_SPACE_MAPPING I386_BUS_SPACE_IO
235#define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM
236#endif
237
238struct isp_pcisoftc {
239 struct ispsoftc pci_isp;
240 pcici_t pci_id;
241 bus_space_tag_t pci_st;
242 bus_space_handle_t pci_sh;
243 int16_t pci_poff[_NREG_BLKS];
244 bus_dma_tag_t parent_dmat;
245 bus_dma_tag_t cntrol_dmat;
246 bus_dmamap_t cntrol_dmap;
247 bus_dmamap_t dmaps[MAXISPREQUEST];
248};
249
250static u_long ispunit;
251
252static struct pci_device isp_pci_driver = {
253 "isp",
254 isp_pci_probe,
255 isp_pci_attach,
256 &ispunit,
257 NULL
258};
259COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver);
260
261
262static const char *
263isp_pci_probe(pcici_t tag, pcidi_t type)
264{
265 static int oneshot = 1;
266 char *x;
267
268 switch (type) {
269#ifndef ISP_DISABLE_1020_SUPPORT
270 case PCI_QLOGIC_ISP:
271 x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
272 break;
273#endif
274#ifndef ISP_DISABLE_1080_SUPPORT
275 case PCI_QLOGIC_ISP1080:
276 x = "Qlogic ISP 1080 PCI SCSI Adapter";
277 break;
278 case PCI_QLOGIC_ISP1240:
279 x = "Qlogic ISP 1240 PCI SCSI Adapter";
280 break;
281#endif
282#ifndef ISP_DISABLE_2100_SUPPORT
283 case PCI_QLOGIC_ISP2100:
284 x = "Qlogic ISP 2100 PCI FC-AL Adapter";
285 break;
286#endif
287#ifndef ISP_DISABLE_2200_SUPPORT
288 case PCI_QLOGIC_ISP2200:
289 x = "Qlogic ISP 2200 PCI FC-AL Adapter";
290 break;
291#endif
292 default:
293 return (NULL);
294 }
295 if (oneshot) {
296 oneshot = 0;
297 printf("%s Version %d.%d, Core Version %d.%d\n", PVS,
298 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
299 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
300 }
301 return (x);
302}
303
304
305static void
306isp_pci_attach(pcici_t cfid, int unit)
307{
308 int mapped, prefer_mem_map, bitmap;
309 pci_port_t io_port;
310 u_int32_t data, linesz, psize, basetype;
311 struct isp_pcisoftc *pcs;
312 struct ispsoftc *isp;
313 vm_offset_t vaddr, paddr;
314 struct ispmdvec *mdvp;
315 bus_size_t lim;
316 ISP_LOCKVAL_DECL;
317
318 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
319 if (pcs == NULL) {
320 printf("isp%d: cannot allocate softc\n", unit);
321 return;
322 }
323 bzero(pcs, sizeof (struct isp_pcisoftc));
324
325 /*
326 * Figure out if we're supposed to skip this one.
327 */
328 if (getenv_int("isp_disable", &bitmap)) {
329 if (bitmap & (1 << unit)) {
330 printf("isp%d: not configuring\n", unit);
331 return;
332 }
333 }
334
335 /*
336 * Figure out which we should try first - memory mapping or i/o mapping?
337 */
338#if SCSI_ISP_PREFER_MEM_MAP == 1
339 prefer_mem_map = 1;
340#else
341 prefer_mem_map = 0;
342#endif
343 bitmap = 0;
344 if (getenv_int("isp_mem_map", &bitmap)) {
345 if (bitmap & (1 << unit))
346 prefer_mem_map = 1;
347 }
348 bitmap = 0;
349 if (getenv_int("isp_io_map", &bitmap)) {
350 if (bitmap & (1 << unit))
351 prefer_mem_map = 0;
352 }
353
354 vaddr = paddr = NULL;
355 mapped = 0;
356 linesz = PCI_DFLT_LNSZ;
357 /*
358 * Note that pci_conf_read is a 32 bit word aligned function.
359 */
360 data = pci_conf_read(cfid, PCIR_COMMAND);
361 if (prefer_mem_map) {
362 if (data & PCI_COMMAND_MEM_ENABLE) {
363 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
364 pcs->pci_st = MEM_SPACE_MAPPING;
365 pcs->pci_sh = vaddr;
366 mapped++;
367 }
368 }
369 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
370 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
371 pcs->pci_st = IO_SPACE_MAPPING;
372 pcs->pci_sh = io_port;
373 mapped++;
374 }
375 }
376 } else {
377 if (data & PCI_COMMAND_IO_ENABLE) {
378 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
379 pcs->pci_st = IO_SPACE_MAPPING;
380 pcs->pci_sh = io_port;
381 mapped++;
382 }
383 }
384 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
385 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
386 pcs->pci_st = MEM_SPACE_MAPPING;
387 pcs->pci_sh = vaddr;
388 mapped++;
389 }
390 }
391 }
392 if (mapped == 0) {
393 printf("isp%d: unable to map any ports!\n", unit);
394 free(pcs, M_DEVBUF);
395 return;
396 }
397 if (bootverbose)
398 printf("isp%d: using %s space register mapping\n", unit,
399 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
400
401 data = pci_conf_read(cfid, PCI_ID_REG);
402 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
403 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
404 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
405 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
406 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
407 /*
408 * GCC!
409 */
410 mdvp = &mdvec;
411 basetype = ISP_HA_SCSI_UNKNOWN;
412 psize = sizeof (sdparam);
413 lim = BUS_SPACE_MAXSIZE_32BIT;
414#ifndef ISP_DISABLE_1020_SUPPORT
415 if (data == PCI_QLOGIC_ISP) {
416 mdvp = &mdvec;
417 basetype = ISP_HA_SCSI_UNKNOWN;
418 psize = sizeof (sdparam);
419 lim = BUS_SPACE_MAXSIZE_24BIT;
420 }
421#endif
422#ifndef ISP_DISABLE_1080_SUPPORT
423 if (data == PCI_QLOGIC_ISP1080) {
424 mdvp = &mdvec_1080;
425 basetype = ISP_HA_SCSI_1080;
426 psize = sizeof (sdparam);
427 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
428 ISP1080_DMA_REGS_OFF;
429 }
430 if (data == PCI_QLOGIC_ISP1240) {
431 mdvp = &mdvec_1080;
432 basetype = ISP_HA_SCSI_12X0;
433 psize = 2 * sizeof (sdparam);
434 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
435 ISP1080_DMA_REGS_OFF;
436 }
437#endif
438#ifndef ISP_DISABLE_2100_SUPPORT
439 if (data == PCI_QLOGIC_ISP2100) {
440 mdvp = &mdvec_2100;
441 basetype = ISP_HA_FC_2100;
442 psize = sizeof (fcparam);
443 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
444 PCI_MBOX_REGS2100_OFF;
445 data = pci_conf_read(cfid, PCI_CLASS_REG);
446 if ((data & 0xff) < 3) {
447 /*
448 * XXX: Need to get the actual revision
449 * XXX: number of the 2100 FB. At any rate,
450 * XXX: lower cache line size for early revision
451 * XXX; boards.
452 */
453 linesz = 1;
454 }
455 }
456#endif
457#ifndef ISP_DISABLE_2200_SUPPORT
458 if (data == PCI_QLOGIC_ISP2200) {
459 mdvp = &mdvec_2200;
460 basetype = ISP_HA_FC_2200;
461 psize = sizeof (fcparam);
462 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
463 PCI_MBOX_REGS2100_OFF;
464 }
465#endif
466 isp = &pcs->pci_isp;
467 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
468 if (isp->isp_param == NULL) {
469 printf("isp%d: cannot allocate parameter data\n", unit);
470 return;
471 }
472 bzero(isp->isp_param, psize);
473 isp->isp_mdvec = mdvp;
474 isp->isp_type = basetype;
475 (void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
476 isp->isp_osinfo.unit = unit;
477
478 ISP_LOCK(isp);
479
480 /*
481 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
482 * are set.
483 */
484 data = pci_cfgread(cfid, PCIR_COMMAND, 2);
485 data |= PCIM_CMD_SEREN |
486 PCIM_CMD_PERRESPEN |
487 PCIM_CMD_BUSMASTEREN |
488 PCIM_CMD_INVEN;
489 pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
490
491 /*
492 * Make sure the Cache Line Size register is set sensibly.
493 */
494 data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
495 if (data != linesz) {
496 data = PCI_DFLT_LNSZ;
497 printf("%s: set PCI line size to %d\n", isp->isp_name, data);
498 pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
499 }
500
501 /*
502 * Make sure the Latency Timer is sane.
503 */
504 data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
505 if (data < PCI_DFLT_LTNCY) {
506 data = PCI_DFLT_LTNCY;
507 printf("%s: set PCI latency to %d\n", isp->isp_name, data);
508 pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
509 }
510
511 /*
512 * Make sure we've disabled the ROM.
513 */
514 data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
515 data &= ~1;
516 pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
517 ISP_UNLOCK(isp);
518
2/* release_6_5_99 */
3/*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * FreeBSD Version.
6 *
7 *---------------------------------------
8 * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
9 * NASA/Ames Research Center
10 * All rights reserved.
11 *---------------------------------------
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice immediately at the beginning of the file, without modification,
18 * this list of conditions, and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. The name of the author may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
29 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37#include <dev/isp/isp_freebsd.h>
38#include <dev/isp/asm_pci.h>
39#include <sys/malloc.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42#include <vm/vm_extern.h>
43
44
45#include <pci/pcireg.h>
46#include <pci/pcivar.h>
47
48#include <machine/bus_memio.h>
49#include <machine/bus_pio.h>
50#include <machine/bus.h>
51#include <machine/md_var.h>
52
53
54static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
55static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
56#ifndef ISP_DISABLE_1080_SUPPORT
57static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
58static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
59#endif
60static int isp_pci_mbxdma __P((struct ispsoftc *));
61static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
62 ispreq_t *, u_int8_t *, u_int8_t));
63static void
64isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
65
66static void isp_pci_reset1 __P((struct ispsoftc *));
67static void isp_pci_dumpregs __P((struct ispsoftc *));
68
69#ifndef ISP_DISABLE_1020_SUPPORT
70static struct ispmdvec mdvec = {
71 isp_pci_rd_reg,
72 isp_pci_wr_reg,
73 isp_pci_mbxdma,
74 isp_pci_dmasetup,
75 isp_pci_dmateardown,
76 NULL,
77 isp_pci_reset1,
78 isp_pci_dumpregs,
79 ISP_RISC_CODE,
80 ISP_CODE_LENGTH,
81 ISP_CODE_ORG,
82 ISP_CODE_VERSION,
83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
84 0
85};
86#endif
87
88#ifndef ISP_DISABLE_1080_SUPPORT
89static struct ispmdvec mdvec_1080 = {
90 isp_pci_rd_reg_1080,
91 isp_pci_wr_reg_1080,
92 isp_pci_mbxdma,
93 isp_pci_dmasetup,
94 isp_pci_dmateardown,
95 NULL,
96 isp_pci_reset1,
97 isp_pci_dumpregs,
98 ISP1080_RISC_CODE,
99 ISP1080_CODE_LENGTH,
100 ISP1080_CODE_ORG,
101 ISP1080_CODE_VERSION,
102 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
103 0
104};
105#endif
106
107#ifndef ISP_DISABLE_2100_SUPPORT
108static struct ispmdvec mdvec_2100 = {
109 isp_pci_rd_reg,
110 isp_pci_wr_reg,
111 isp_pci_mbxdma,
112 isp_pci_dmasetup,
113 isp_pci_dmateardown,
114 NULL,
115 isp_pci_reset1,
116 isp_pci_dumpregs,
117 ISP2100_RISC_CODE,
118 ISP2100_CODE_LENGTH,
119 ISP2100_CODE_ORG,
120 ISP2100_CODE_VERSION,
121 0, /* Irrelevant to the 2100 */
122 0
123};
124#endif
125
126#ifndef ISP_DISABLE_2200_SUPPORT
127static struct ispmdvec mdvec_2200 = {
128 isp_pci_rd_reg,
129 isp_pci_wr_reg,
130 isp_pci_mbxdma,
131 isp_pci_dmasetup,
132 isp_pci_dmateardown,
133 NULL,
134 isp_pci_reset1,
135 isp_pci_dumpregs,
136 ISP2200_RISC_CODE,
137 ISP2200_CODE_LENGTH,
138 ISP2100_CODE_ORG,
139 ISP2200_CODE_VERSION,
140 0,
141 0
142};
143#endif
144
145#ifndef SCSI_ISP_PREFER_MEM_MAP
146#ifdef __alpha__
147#define SCSI_ISP_PREFER_MEM_MAP 0
148#else
149#define SCSI_ISP_PREFER_MEM_MAP 1
150#endif
151#endif
152
153#ifndef PCIM_CMD_INVEN
154#define PCIM_CMD_INVEN 0x10
155#endif
156#ifndef PCIM_CMD_BUSMASTEREN
157#define PCIM_CMD_BUSMASTEREN 0x0004
158#endif
159#ifndef PCIM_CMD_PERRESPEN
160#define PCIM_CMD_PERRESPEN 0x0040
161#endif
162#ifndef PCIM_CMD_SEREN
163#define PCIM_CMD_SEREN 0x0100
164#endif
165
166#ifndef PCIR_COMMAND
167#define PCIR_COMMAND 0x04
168#endif
169
170#ifndef PCIR_CACHELNSZ
171#define PCIR_CACHELNSZ 0x0c
172#endif
173
174#ifndef PCIR_LATTIMER
175#define PCIR_LATTIMER 0x0d
176#endif
177
178#ifndef PCIR_ROMADDR
179#define PCIR_ROMADDR 0x30
180#endif
181
182#ifndef PCI_VENDOR_QLOGIC
183#define PCI_VENDOR_QLOGIC 0x1077
184#endif
185
186#ifndef PCI_PRODUCT_QLOGIC_ISP1020
187#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
188#endif
189
190#ifndef PCI_PRODUCT_QLOGIC_ISP1080
191#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
192#endif
193
194#ifndef PCI_PRODUCT_QLOGIC_ISP1240
195#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
196#endif
197
198#ifndef PCI_PRODUCT_QLOGIC_ISP2100
199#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
200#endif
201
202#ifndef PCI_PRODUCT_QLOGIC_ISP2200
203#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
204#endif
205
206#define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
207
208#define PCI_QLOGIC_ISP1080 \
209 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
210
211#define PCI_QLOGIC_ISP1240 \
212 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
213
214#define PCI_QLOGIC_ISP2100 \
215 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
216
217#define PCI_QLOGIC_ISP2200 \
218 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
219
220#define IO_MAP_REG 0x10
221#define MEM_MAP_REG 0x14
222
223#define PCI_DFLT_LTNCY 0x40
224#define PCI_DFLT_LNSZ 0x10
225
226static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
227static void isp_pci_attach __P((pcici_t config_d, int unit));
228
229/* This distinguishing define is not right, but it does work */
230#ifdef __alpha__
231#define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO
232#define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM
233#else
234#define IO_SPACE_MAPPING I386_BUS_SPACE_IO
235#define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM
236#endif
237
238struct isp_pcisoftc {
239 struct ispsoftc pci_isp;
240 pcici_t pci_id;
241 bus_space_tag_t pci_st;
242 bus_space_handle_t pci_sh;
243 int16_t pci_poff[_NREG_BLKS];
244 bus_dma_tag_t parent_dmat;
245 bus_dma_tag_t cntrol_dmat;
246 bus_dmamap_t cntrol_dmap;
247 bus_dmamap_t dmaps[MAXISPREQUEST];
248};
249
250static u_long ispunit;
251
252static struct pci_device isp_pci_driver = {
253 "isp",
254 isp_pci_probe,
255 isp_pci_attach,
256 &ispunit,
257 NULL
258};
259COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver);
260
261
262static const char *
263isp_pci_probe(pcici_t tag, pcidi_t type)
264{
265 static int oneshot = 1;
266 char *x;
267
268 switch (type) {
269#ifndef ISP_DISABLE_1020_SUPPORT
270 case PCI_QLOGIC_ISP:
271 x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
272 break;
273#endif
274#ifndef ISP_DISABLE_1080_SUPPORT
275 case PCI_QLOGIC_ISP1080:
276 x = "Qlogic ISP 1080 PCI SCSI Adapter";
277 break;
278 case PCI_QLOGIC_ISP1240:
279 x = "Qlogic ISP 1240 PCI SCSI Adapter";
280 break;
281#endif
282#ifndef ISP_DISABLE_2100_SUPPORT
283 case PCI_QLOGIC_ISP2100:
284 x = "Qlogic ISP 2100 PCI FC-AL Adapter";
285 break;
286#endif
287#ifndef ISP_DISABLE_2200_SUPPORT
288 case PCI_QLOGIC_ISP2200:
289 x = "Qlogic ISP 2200 PCI FC-AL Adapter";
290 break;
291#endif
292 default:
293 return (NULL);
294 }
295 if (oneshot) {
296 oneshot = 0;
297 printf("%s Version %d.%d, Core Version %d.%d\n", PVS,
298 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
299 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
300 }
301 return (x);
302}
303
304
305static void
306isp_pci_attach(pcici_t cfid, int unit)
307{
308 int mapped, prefer_mem_map, bitmap;
309 pci_port_t io_port;
310 u_int32_t data, linesz, psize, basetype;
311 struct isp_pcisoftc *pcs;
312 struct ispsoftc *isp;
313 vm_offset_t vaddr, paddr;
314 struct ispmdvec *mdvp;
315 bus_size_t lim;
316 ISP_LOCKVAL_DECL;
317
318 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
319 if (pcs == NULL) {
320 printf("isp%d: cannot allocate softc\n", unit);
321 return;
322 }
323 bzero(pcs, sizeof (struct isp_pcisoftc));
324
325 /*
326 * Figure out if we're supposed to skip this one.
327 */
328 if (getenv_int("isp_disable", &bitmap)) {
329 if (bitmap & (1 << unit)) {
330 printf("isp%d: not configuring\n", unit);
331 return;
332 }
333 }
334
335 /*
336 * Figure out which we should try first - memory mapping or i/o mapping?
337 */
338#if SCSI_ISP_PREFER_MEM_MAP == 1
339 prefer_mem_map = 1;
340#else
341 prefer_mem_map = 0;
342#endif
343 bitmap = 0;
344 if (getenv_int("isp_mem_map", &bitmap)) {
345 if (bitmap & (1 << unit))
346 prefer_mem_map = 1;
347 }
348 bitmap = 0;
349 if (getenv_int("isp_io_map", &bitmap)) {
350 if (bitmap & (1 << unit))
351 prefer_mem_map = 0;
352 }
353
354 vaddr = paddr = NULL;
355 mapped = 0;
356 linesz = PCI_DFLT_LNSZ;
357 /*
358 * Note that pci_conf_read is a 32 bit word aligned function.
359 */
360 data = pci_conf_read(cfid, PCIR_COMMAND);
361 if (prefer_mem_map) {
362 if (data & PCI_COMMAND_MEM_ENABLE) {
363 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
364 pcs->pci_st = MEM_SPACE_MAPPING;
365 pcs->pci_sh = vaddr;
366 mapped++;
367 }
368 }
369 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
370 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
371 pcs->pci_st = IO_SPACE_MAPPING;
372 pcs->pci_sh = io_port;
373 mapped++;
374 }
375 }
376 } else {
377 if (data & PCI_COMMAND_IO_ENABLE) {
378 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
379 pcs->pci_st = IO_SPACE_MAPPING;
380 pcs->pci_sh = io_port;
381 mapped++;
382 }
383 }
384 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
385 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
386 pcs->pci_st = MEM_SPACE_MAPPING;
387 pcs->pci_sh = vaddr;
388 mapped++;
389 }
390 }
391 }
392 if (mapped == 0) {
393 printf("isp%d: unable to map any ports!\n", unit);
394 free(pcs, M_DEVBUF);
395 return;
396 }
397 if (bootverbose)
398 printf("isp%d: using %s space register mapping\n", unit,
399 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
400
401 data = pci_conf_read(cfid, PCI_ID_REG);
402 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
403 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
404 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
405 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
406 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
407 /*
408 * GCC!
409 */
410 mdvp = &mdvec;
411 basetype = ISP_HA_SCSI_UNKNOWN;
412 psize = sizeof (sdparam);
413 lim = BUS_SPACE_MAXSIZE_32BIT;
414#ifndef ISP_DISABLE_1020_SUPPORT
415 if (data == PCI_QLOGIC_ISP) {
416 mdvp = &mdvec;
417 basetype = ISP_HA_SCSI_UNKNOWN;
418 psize = sizeof (sdparam);
419 lim = BUS_SPACE_MAXSIZE_24BIT;
420 }
421#endif
422#ifndef ISP_DISABLE_1080_SUPPORT
423 if (data == PCI_QLOGIC_ISP1080) {
424 mdvp = &mdvec_1080;
425 basetype = ISP_HA_SCSI_1080;
426 psize = sizeof (sdparam);
427 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
428 ISP1080_DMA_REGS_OFF;
429 }
430 if (data == PCI_QLOGIC_ISP1240) {
431 mdvp = &mdvec_1080;
432 basetype = ISP_HA_SCSI_12X0;
433 psize = 2 * sizeof (sdparam);
434 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
435 ISP1080_DMA_REGS_OFF;
436 }
437#endif
438#ifndef ISP_DISABLE_2100_SUPPORT
439 if (data == PCI_QLOGIC_ISP2100) {
440 mdvp = &mdvec_2100;
441 basetype = ISP_HA_FC_2100;
442 psize = sizeof (fcparam);
443 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
444 PCI_MBOX_REGS2100_OFF;
445 data = pci_conf_read(cfid, PCI_CLASS_REG);
446 if ((data & 0xff) < 3) {
447 /*
448 * XXX: Need to get the actual revision
449 * XXX: number of the 2100 FB. At any rate,
450 * XXX: lower cache line size for early revision
451 * XXX; boards.
452 */
453 linesz = 1;
454 }
455 }
456#endif
457#ifndef ISP_DISABLE_2200_SUPPORT
458 if (data == PCI_QLOGIC_ISP2200) {
459 mdvp = &mdvec_2200;
460 basetype = ISP_HA_FC_2200;
461 psize = sizeof (fcparam);
462 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
463 PCI_MBOX_REGS2100_OFF;
464 }
465#endif
466 isp = &pcs->pci_isp;
467 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
468 if (isp->isp_param == NULL) {
469 printf("isp%d: cannot allocate parameter data\n", unit);
470 return;
471 }
472 bzero(isp->isp_param, psize);
473 isp->isp_mdvec = mdvp;
474 isp->isp_type = basetype;
475 (void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
476 isp->isp_osinfo.unit = unit;
477
478 ISP_LOCK(isp);
479
480 /*
481 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
482 * are set.
483 */
484 data = pci_cfgread(cfid, PCIR_COMMAND, 2);
485 data |= PCIM_CMD_SEREN |
486 PCIM_CMD_PERRESPEN |
487 PCIM_CMD_BUSMASTEREN |
488 PCIM_CMD_INVEN;
489 pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
490
491 /*
492 * Make sure the Cache Line Size register is set sensibly.
493 */
494 data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
495 if (data != linesz) {
496 data = PCI_DFLT_LNSZ;
497 printf("%s: set PCI line size to %d\n", isp->isp_name, data);
498 pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
499 }
500
501 /*
502 * Make sure the Latency Timer is sane.
503 */
504 data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
505 if (data < PCI_DFLT_LTNCY) {
506 data = PCI_DFLT_LTNCY;
507 printf("%s: set PCI latency to %d\n", isp->isp_name, data);
508 pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
509 }
510
511 /*
512 * Make sure we've disabled the ROM.
513 */
514 data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
515 data &= ~1;
516 pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
517 ISP_UNLOCK(isp);
518
519 if (bus_dma_tag_create(NULL, 0, 0, BUS_SPACE_MAXADDR_32BIT,
519 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
520 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
521 255, lim, 0, &pcs->parent_dmat) != 0) {
522 printf("%s: could not create master dma tag\n", isp->isp_name);
523 free(pcs, M_DEVBUF);
524 return;
525 }
526 if (pci_map_int(cfid, (void (*)(void *))isp_intr,
527 (void *)isp, &IMASK) == 0) {
528 printf("%s: could not map interrupt\n", isp->isp_name);
529 free(pcs, M_DEVBUF);
530 return;
531 }
532
533 pcs->pci_id = cfid;
534#ifdef SCSI_ISP_NO_FWLOAD_MASK
535 if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
536 isp->isp_confopts |= ISP_CFG_NORELOAD;
537#endif
538 if (getenv_int("isp_no_fwload", &bitmap)) {
539 if (bitmap & (1 << unit))
540 isp->isp_confopts |= ISP_CFG_NORELOAD;
541 }
542 if (getenv_int("isp_fwload", &bitmap)) {
543 if (bitmap & (1 << unit))
544 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
545 }
546
547#ifdef SCSI_ISP_NO_NVRAM_MASK
548 if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
549 printf("%s: ignoring NVRAM\n", isp->isp_name);
550 isp->isp_confopts |= ISP_CFG_NONVRAM;
551 }
552#endif
553 if (getenv_int("isp_no_nvram", &bitmap)) {
554 if (bitmap & (1 << unit))
555 isp->isp_confopts |= ISP_CFG_NONVRAM;
556 }
557 if (getenv_int("isp_nvram", &bitmap)) {
558 if (bitmap & (1 << unit))
559 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
560 }
561
562#ifdef SCSI_ISP_FCDUPLEX
563 if (IS_FC(isp)) {
564 if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
565 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
566 }
567 }
568#endif
569 if (getenv_int("isp_fcduplex", &bitmap)) {
570 if (bitmap & (1 << unit))
571 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
572 }
573 if (getenv_int("isp_no_fcduplex", &bitmap)) {
574 if (bitmap & (1 << unit))
575 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
576 }
577
578 if (getenv_int("isp_seed", &isp->isp_osinfo.seed)) {
579 isp->isp_osinfo.seed <<= 8;
580 isp->isp_osinfo.seed += (unit + 1);
581 } else {
582 /*
583 * poor man's attempt at pseudo randomness.
584 */
585 long i = (long) isp;
586
587 i >>= 5;
588 i &= 0x7;
589
590 /*
591 * This isn't very random, but it's the best we can do for
592 * the real edge case of cards that don't have WWNs.
593 */
594 isp->isp_osinfo.seed += ((int) cfid->bus) << 16;
595 isp->isp_osinfo.seed += ((int) cfid->slot) << 8;
596 isp->isp_osinfo.seed += ((int) cfid->func);
597 while (version[i])
598 isp->isp_osinfo.seed += (int) version[i++];
599 isp->isp_osinfo.seed <<= 8;
600 isp->isp_osinfo.seed += (unit + 1);
601 }
602
603 ISP_LOCK(isp);
604 isp_reset(isp);
605 if (isp->isp_state != ISP_RESETSTATE) {
606 (void) pci_unmap_int(cfid);
607 ISP_UNLOCK(isp);
608 free(pcs, M_DEVBUF);
609 return;
610 }
611 isp_init(isp);
612 if (isp->isp_state != ISP_INITSTATE) {
613 /* If we're a Fibre Channel Card, we allow deferred attach */
614 if (IS_SCSI(isp)) {
615 isp_uninit(isp);
616 (void) pci_unmap_int(cfid); /* Does nothing */
617 ISP_UNLOCK(isp);
618 free(pcs, M_DEVBUF);
619 return;
620 }
621 }
622 isp_attach(isp);
623 if (isp->isp_state != ISP_RUNSTATE) {
624 /* If we're a Fibre Channel Card, we allow deferred attach */
625 if (IS_SCSI(isp)) {
626 isp_uninit(isp);
627 (void) pci_unmap_int(cfid); /* Does nothing */
628 ISP_UNLOCK(isp);
629 free(pcs, M_DEVBUF);
630 return;
631 }
632 }
633 ISP_UNLOCK(isp);
634#ifdef __alpha__
635 /*
636 * THIS SHOULD NOT HAVE TO BE HERE
637 */
638 alpha_register_pci_scsi(cfid->bus, cfid->slot, isp->isp_sim);
639#endif
640}
641
642static u_int16_t
643isp_pci_rd_reg(isp, regoff)
644 struct ispsoftc *isp;
645 int regoff;
646{
647 u_int16_t rv;
648 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
649 int offset, oldconf = 0;
650
651 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
652 /*
653 * We will assume that someone has paused the RISC processor.
654 */
655 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
656 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
657 }
658 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
659 offset += (regoff & 0xff);
660 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
661 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
662 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
663 }
664 return (rv);
665}
666
667static void
668isp_pci_wr_reg(isp, regoff, val)
669 struct ispsoftc *isp;
670 int regoff;
671 u_int16_t val;
672{
673 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
674 int offset, oldconf = 0;
675
676 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
677 /*
678 * We will assume that someone has paused the RISC processor.
679 */
680 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
681 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
682 }
683 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
684 offset += (regoff & 0xff);
685 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
686 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
687 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
688 }
689}
690
691#ifndef ISP_DISABLE_1080_SUPPORT
692static u_int16_t
693isp_pci_rd_reg_1080(isp, regoff)
694 struct ispsoftc *isp;
695 int regoff;
696{
697 u_int16_t rv;
698 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
699 int offset, oc = 0;
700
701 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
702 /*
703 * We will assume that someone has paused the RISC processor.
704 */
705 oc = isp_pci_rd_reg(isp, BIU_CONF1);
706 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
707 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
708 oc = isp_pci_rd_reg(isp, BIU_CONF1);
709 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
710 }
711 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
712 offset += (regoff & 0xff);
713 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
714 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
715 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
716 isp_pci_wr_reg(isp, BIU_CONF1, oc);
717 }
718 return (rv);
719}
720
721static void
722isp_pci_wr_reg_1080(isp, regoff, val)
723 struct ispsoftc *isp;
724 int regoff;
725 u_int16_t val;
726{
727 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
728 int offset, oc = 0;
729
730 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
731 /*
732 * We will assume that someone has paused the RISC processor.
733 */
734 oc = isp_pci_rd_reg(isp, BIU_CONF1);
735 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
736 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
737 oc = isp_pci_rd_reg(isp, BIU_CONF1);
738 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
739 }
740 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
741 offset += (regoff & 0xff);
742 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
743 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
744 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
745 isp_pci_wr_reg(isp, BIU_CONF1, oc);
746 }
747}
748#endif
749
750
751static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
752static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
753static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
754
755struct imush {
756 struct ispsoftc *isp;
757 int error;
758};
759
760static void
761isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
762{
763 struct imush *imushp = (struct imush *) arg;
764 if (error) {
765 imushp->error = error;
766 } else {
767 imushp->isp->isp_rquest_dma = segs->ds_addr;
768 }
769}
770
771static void
772isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
773{
774 struct imush *imushp = (struct imush *) arg;
775 if (error) {
776 imushp->error = error;
777 } else {
778 imushp->isp->isp_result_dma = segs->ds_addr;
779 }
780}
781
782static void
783isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
784{
785 struct imush *imushp = (struct imush *) arg;
786 if (error) {
787 imushp->error = error;
788 } else {
789 fcparam *fcp = imushp->isp->isp_param;
790 fcp->isp_scdma = segs->ds_addr;
791 }
792}
793
794static int
795isp_pci_mbxdma(struct ispsoftc *isp)
796{
797 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
798 caddr_t base;
799 u_int32_t len;
800 int i, error;
801 bus_size_t lim;
802 struct imush im;
803
804
805 if (IS_FC(isp) || IS_1080(isp) || IS_12X0(isp))
806 lim = BUS_SPACE_MAXADDR + 1;
807 else
808 lim = BUS_SPACE_MAXADDR_24BIT + 1;
809
810 /*
811 * Allocate and map the request, result queues, plus FC scratch area.
812 */
813 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
814 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
815 if (IS_FC(isp)) {
816 len += ISP2100_SCRLEN;
817 }
818 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
819 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
820 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
821 printf("%s: cannot create a dma tag for control spaces\n",
822 isp->isp_name);
823 return (1);
824 }
825 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
826 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
827 printf("%s: cannot allocate %d bytes of CCB memory\n",
828 isp->isp_name, len);
829 return (1);
830 }
831
832 isp->isp_rquest = base;
833 im.isp = isp;
834 im.error = 0;
835 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
836 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
837 if (im.error) {
838 printf("%s: error %d loading dma map for DMA request queue\n",
839 isp->isp_name, im.error);
840 return (1);
841 }
842 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
843 im.error = 0;
844 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
845 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
846 if (im.error) {
847 printf("%s: error %d loading dma map for DMA result queue\n",
848 isp->isp_name, im.error);
849 return (1);
850 }
851
852 /*
853 * Use this opportunity to initialize/create data DMA maps.
854 */
855 for (i = 0; i < MAXISPREQUEST; i++) {
856 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
857 if (error) {
858 printf("%s: error %d creating mailbox DMA maps\n",
859 isp->isp_name, error);
860 return (1);
861 }
862 }
863 if (IS_FC(isp)) {
864 fcparam *fcp = (fcparam *) isp->isp_param;
865 fcp->isp_scratch = base +
866 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
867 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
868 im.error = 0;
869 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
870 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
871 if (im.error) {
872 printf("%s: error %d loading FC scratch area\n",
873 isp->isp_name, im.error);
874 return (1);
875 }
876 }
877 return (0);
878}
879
880static void dma2 __P((void *, bus_dma_segment_t *, int, int));
881typedef struct {
882 struct ispsoftc *isp;
883 ISP_SCSI_XFER_T *ccb;
884 ispreq_t *rq;
885 u_int8_t *iptrp;
886 u_int8_t optr;
887 u_int error;
888} mush_t;
889
890#define MUSHERR_NOQENTRIES -2
891
892static void
893dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
894{
895 mush_t *mp;
896 ISP_SCSI_XFER_T *ccb;
897 struct ispsoftc *isp;
898 struct isp_pcisoftc *pci;
899 bus_dmamap_t *dp;
900 bus_dma_segment_t *eseg;
901 ispreq_t *rq;
902 u_int8_t *iptrp;
903 u_int8_t optr;
904 ispcontreq_t *crq;
905 int drq, seglim, datalen;
906
907 mp = (mush_t *) arg;
908 if (error) {
909 mp->error = error;
910 return;
911 }
912
913 isp = mp->isp;
914 if (nseg < 1) {
915 printf("%s: zero or negative segment count\n", isp->isp_name);
916 mp->error = EFAULT;
917 return;
918 }
919 ccb = mp->ccb;
920 rq = mp->rq;
921 iptrp = mp->iptrp;
922 optr = mp->optr;
923
924 pci = (struct isp_pcisoftc *)isp;
925 dp = &pci->dmaps[rq->req_handle - 1];
926 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
927 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
928 drq = REQFLAG_DATA_IN;
929 } else {
930 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
931 drq = REQFLAG_DATA_OUT;
932 }
933
934 datalen = XS_XFRLEN(ccb);
935 if (IS_FC(isp)) {
936 seglim = ISP_RQDSEG_T2;
937 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
938 ((ispreqt2_t *)rq)->req_flags |= drq;
939 } else {
940 seglim = ISP_RQDSEG;
941 rq->req_flags |= drq;
942 }
943
944 eseg = dm_segs + nseg;
945
946 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
947 if (IS_FC(isp)) {
948 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
949 rq2->req_dataseg[rq2->req_seg_count].ds_base =
950 dm_segs->ds_addr;
951 rq2->req_dataseg[rq2->req_seg_count].ds_count =
952 dm_segs->ds_len;
953 } else {
954 rq->req_dataseg[rq->req_seg_count].ds_base =
955 dm_segs->ds_addr;
956 rq->req_dataseg[rq->req_seg_count].ds_count =
957 dm_segs->ds_len;
958 }
959 datalen -= dm_segs->ds_len;
960#if 0
961 if (IS_FC(isp)) {
962 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
963 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
964 isp->isp_name, rq->req_seg_count,
965 rq2->req_dataseg[rq2->req_seg_count].ds_count,
966 rq2->req_dataseg[rq2->req_seg_count].ds_base);
967 } else {
968 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
969 isp->isp_name, rq->req_seg_count,
970 rq->req_dataseg[rq->req_seg_count].ds_count,
971 rq->req_dataseg[rq->req_seg_count].ds_base);
972 }
973#endif
974 rq->req_seg_count++;
975 dm_segs++;
976 }
977
978 while (datalen > 0 && dm_segs != eseg) {
979 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
980 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
981 if (*iptrp == optr) {
982#if 0
983 printf("%s: Request Queue Overflow++\n", isp->isp_name);
984#endif
985 mp->error = MUSHERR_NOQENTRIES;
986 return;
987 }
988 rq->req_header.rqs_entry_count++;
989 bzero((void *)crq, sizeof (*crq));
990 crq->req_header.rqs_entry_count = 1;
991 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
992
993 seglim = 0;
994 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
995 crq->req_dataseg[seglim].ds_base =
996 dm_segs->ds_addr;
997 crq->req_dataseg[seglim].ds_count =
998 dm_segs->ds_len;
999#if 0
1000 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1001 isp->isp_name, rq->req_header.rqs_entry_count-1,
1002 seglim, crq->req_dataseg[seglim].ds_count,
1003 crq->req_dataseg[seglim].ds_base);
1004#endif
1005 rq->req_seg_count++;
1006 dm_segs++;
1007 seglim++;
1008 datalen -= dm_segs->ds_len;
1009 }
1010 }
1011}
1012
1013static int
1014isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
1015 u_int8_t *iptrp, u_int8_t optr)
1016{
1017 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1018 struct ccb_hdr *ccb_h;
1019 struct ccb_scsiio *csio;
1020 bus_dmamap_t *dp;
1021 mush_t mush, *mp;
1022
1023 csio = (struct ccb_scsiio *) ccb;
1024 ccb_h = &csio->ccb_h;
1025
1026 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1027 rq->req_seg_count = 1;
1028 return (CMD_QUEUED);
1029 }
1030 dp = &pci->dmaps[rq->req_handle - 1];
1031
1032 /*
1033 * Do a virtual grapevine step to collect info for
1034 * the callback dma allocation that we have to use...
1035 */
1036 mp = &mush;
1037 mp->isp = isp;
1038 mp->ccb = ccb;
1039 mp->rq = rq;
1040 mp->iptrp = iptrp;
1041 mp->optr = optr;
1042 mp->error = 0;
1043
1044 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1045 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1046 int error, s;
1047
1048 s = splsoftvm();
1049 error = bus_dmamap_load(pci->parent_dmat, *dp,
1050 csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
1051 if (error == EINPROGRESS) {
1052 bus_dmamap_unload(pci->parent_dmat, *dp);
1053 mp->error = EINVAL;
1054 printf("%s: deferred dma allocation not "
1055 "supported\n", isp->isp_name);
1056 } else if (error && mp->error == 0) {
1057 mp->error = error;
1058 }
1059 splx(s);
1060 } else {
1061 /* Pointer to physical buffer */
1062 struct bus_dma_segment seg;
1063 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1064 seg.ds_len = csio->dxfer_len;
1065 dma2(mp, &seg, 1, 0);
1066 }
1067 } else {
1068 struct bus_dma_segment *segs;
1069
1070 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
1071 printf("%s: Physical segment pointers unsupported",
1072 isp->isp_name);
1073 mp->error = EINVAL;
1074 } else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
1075 printf("%s: Virtual segment addresses unsupported",
1076 isp->isp_name);
1077 mp->error = EINVAL;
1078 } else {
1079 /* Just use the segments provided */
1080 segs = (struct bus_dma_segment *) csio->data_ptr;
1081 dma2(mp, segs, csio->sglist_cnt, 0);
1082 }
1083 }
1084 if (mp->error) {
1085 int retval = CMD_COMPLETE;
1086 if (mp->error == MUSHERR_NOQENTRIES) {
1087 retval = CMD_EAGAIN;
1088 ccb_h->status = CAM_UNREC_HBA_ERROR;
1089 } else if (mp->error == EFBIG) {
1090 ccb_h->status = CAM_REQ_TOO_BIG;
1091 } else if (mp->error == EINVAL) {
1092 ccb_h->status = CAM_REQ_INVALID;
1093 } else {
1094 ccb_h->status = CAM_UNREC_HBA_ERROR;
1095 }
1096 return (retval);
1097 } else {
1098 return (CMD_QUEUED);
1099 }
1100}
1101
1102static void
1103isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb,
1104 u_int32_t handle)
1105{
1106 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1107 bus_dmamap_t *dp = &pci->dmaps[handle];
1108
1109 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1110 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1111 } else {
1112 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1113 }
1114 bus_dmamap_unload(pci->parent_dmat, *dp);
1115}
1116
1117
1118static void
1119isp_pci_reset1(struct ispsoftc *isp)
1120{
1121 /* Make sure the BIOS is disabled */
1122 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1123}
1124
1125static void
1126isp_pci_dumpregs(struct ispsoftc *isp)
1127{
1128 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1129 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1130 pci_conf_read(pci->pci_id, PCIR_COMMAND));
1131}
520 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
521 255, lim, 0, &pcs->parent_dmat) != 0) {
522 printf("%s: could not create master dma tag\n", isp->isp_name);
523 free(pcs, M_DEVBUF);
524 return;
525 }
526 if (pci_map_int(cfid, (void (*)(void *))isp_intr,
527 (void *)isp, &IMASK) == 0) {
528 printf("%s: could not map interrupt\n", isp->isp_name);
529 free(pcs, M_DEVBUF);
530 return;
531 }
532
533 pcs->pci_id = cfid;
534#ifdef SCSI_ISP_NO_FWLOAD_MASK
535 if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
536 isp->isp_confopts |= ISP_CFG_NORELOAD;
537#endif
538 if (getenv_int("isp_no_fwload", &bitmap)) {
539 if (bitmap & (1 << unit))
540 isp->isp_confopts |= ISP_CFG_NORELOAD;
541 }
542 if (getenv_int("isp_fwload", &bitmap)) {
543 if (bitmap & (1 << unit))
544 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
545 }
546
547#ifdef SCSI_ISP_NO_NVRAM_MASK
548 if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
549 printf("%s: ignoring NVRAM\n", isp->isp_name);
550 isp->isp_confopts |= ISP_CFG_NONVRAM;
551 }
552#endif
553 if (getenv_int("isp_no_nvram", &bitmap)) {
554 if (bitmap & (1 << unit))
555 isp->isp_confopts |= ISP_CFG_NONVRAM;
556 }
557 if (getenv_int("isp_nvram", &bitmap)) {
558 if (bitmap & (1 << unit))
559 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
560 }
561
562#ifdef SCSI_ISP_FCDUPLEX
563 if (IS_FC(isp)) {
564 if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
565 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
566 }
567 }
568#endif
569 if (getenv_int("isp_fcduplex", &bitmap)) {
570 if (bitmap & (1 << unit))
571 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
572 }
573 if (getenv_int("isp_no_fcduplex", &bitmap)) {
574 if (bitmap & (1 << unit))
575 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
576 }
577
578 if (getenv_int("isp_seed", &isp->isp_osinfo.seed)) {
579 isp->isp_osinfo.seed <<= 8;
580 isp->isp_osinfo.seed += (unit + 1);
581 } else {
582 /*
583 * poor man's attempt at pseudo randomness.
584 */
585 long i = (long) isp;
586
587 i >>= 5;
588 i &= 0x7;
589
590 /*
591 * This isn't very random, but it's the best we can do for
592 * the real edge case of cards that don't have WWNs.
593 */
594 isp->isp_osinfo.seed += ((int) cfid->bus) << 16;
595 isp->isp_osinfo.seed += ((int) cfid->slot) << 8;
596 isp->isp_osinfo.seed += ((int) cfid->func);
597 while (version[i])
598 isp->isp_osinfo.seed += (int) version[i++];
599 isp->isp_osinfo.seed <<= 8;
600 isp->isp_osinfo.seed += (unit + 1);
601 }
602
603 ISP_LOCK(isp);
604 isp_reset(isp);
605 if (isp->isp_state != ISP_RESETSTATE) {
606 (void) pci_unmap_int(cfid);
607 ISP_UNLOCK(isp);
608 free(pcs, M_DEVBUF);
609 return;
610 }
611 isp_init(isp);
612 if (isp->isp_state != ISP_INITSTATE) {
613 /* If we're a Fibre Channel Card, we allow deferred attach */
614 if (IS_SCSI(isp)) {
615 isp_uninit(isp);
616 (void) pci_unmap_int(cfid); /* Does nothing */
617 ISP_UNLOCK(isp);
618 free(pcs, M_DEVBUF);
619 return;
620 }
621 }
622 isp_attach(isp);
623 if (isp->isp_state != ISP_RUNSTATE) {
624 /* If we're a Fibre Channel Card, we allow deferred attach */
625 if (IS_SCSI(isp)) {
626 isp_uninit(isp);
627 (void) pci_unmap_int(cfid); /* Does nothing */
628 ISP_UNLOCK(isp);
629 free(pcs, M_DEVBUF);
630 return;
631 }
632 }
633 ISP_UNLOCK(isp);
634#ifdef __alpha__
635 /*
636 * THIS SHOULD NOT HAVE TO BE HERE
637 */
638 alpha_register_pci_scsi(cfid->bus, cfid->slot, isp->isp_sim);
639#endif
640}
641
642static u_int16_t
643isp_pci_rd_reg(isp, regoff)
644 struct ispsoftc *isp;
645 int regoff;
646{
647 u_int16_t rv;
648 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
649 int offset, oldconf = 0;
650
651 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
652 /*
653 * We will assume that someone has paused the RISC processor.
654 */
655 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
656 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
657 }
658 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
659 offset += (regoff & 0xff);
660 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
661 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
662 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
663 }
664 return (rv);
665}
666
667static void
668isp_pci_wr_reg(isp, regoff, val)
669 struct ispsoftc *isp;
670 int regoff;
671 u_int16_t val;
672{
673 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
674 int offset, oldconf = 0;
675
676 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
677 /*
678 * We will assume that someone has paused the RISC processor.
679 */
680 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
681 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
682 }
683 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
684 offset += (regoff & 0xff);
685 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
686 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
687 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
688 }
689}
690
691#ifndef ISP_DISABLE_1080_SUPPORT
692static u_int16_t
693isp_pci_rd_reg_1080(isp, regoff)
694 struct ispsoftc *isp;
695 int regoff;
696{
697 u_int16_t rv;
698 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
699 int offset, oc = 0;
700
701 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
702 /*
703 * We will assume that someone has paused the RISC processor.
704 */
705 oc = isp_pci_rd_reg(isp, BIU_CONF1);
706 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
707 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
708 oc = isp_pci_rd_reg(isp, BIU_CONF1);
709 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
710 }
711 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
712 offset += (regoff & 0xff);
713 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
714 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
715 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
716 isp_pci_wr_reg(isp, BIU_CONF1, oc);
717 }
718 return (rv);
719}
720
721static void
722isp_pci_wr_reg_1080(isp, regoff, val)
723 struct ispsoftc *isp;
724 int regoff;
725 u_int16_t val;
726{
727 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
728 int offset, oc = 0;
729
730 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
731 /*
732 * We will assume that someone has paused the RISC processor.
733 */
734 oc = isp_pci_rd_reg(isp, BIU_CONF1);
735 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
736 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
737 oc = isp_pci_rd_reg(isp, BIU_CONF1);
738 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
739 }
740 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
741 offset += (regoff & 0xff);
742 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
743 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
744 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
745 isp_pci_wr_reg(isp, BIU_CONF1, oc);
746 }
747}
748#endif
749
750
751static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
752static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
753static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
754
755struct imush {
756 struct ispsoftc *isp;
757 int error;
758};
759
760static void
761isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
762{
763 struct imush *imushp = (struct imush *) arg;
764 if (error) {
765 imushp->error = error;
766 } else {
767 imushp->isp->isp_rquest_dma = segs->ds_addr;
768 }
769}
770
771static void
772isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
773{
774 struct imush *imushp = (struct imush *) arg;
775 if (error) {
776 imushp->error = error;
777 } else {
778 imushp->isp->isp_result_dma = segs->ds_addr;
779 }
780}
781
782static void
783isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
784{
785 struct imush *imushp = (struct imush *) arg;
786 if (error) {
787 imushp->error = error;
788 } else {
789 fcparam *fcp = imushp->isp->isp_param;
790 fcp->isp_scdma = segs->ds_addr;
791 }
792}
793
794static int
795isp_pci_mbxdma(struct ispsoftc *isp)
796{
797 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
798 caddr_t base;
799 u_int32_t len;
800 int i, error;
801 bus_size_t lim;
802 struct imush im;
803
804
805 if (IS_FC(isp) || IS_1080(isp) || IS_12X0(isp))
806 lim = BUS_SPACE_MAXADDR + 1;
807 else
808 lim = BUS_SPACE_MAXADDR_24BIT + 1;
809
810 /*
811 * Allocate and map the request, result queues, plus FC scratch area.
812 */
813 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
814 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
815 if (IS_FC(isp)) {
816 len += ISP2100_SCRLEN;
817 }
818 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
819 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
820 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
821 printf("%s: cannot create a dma tag for control spaces\n",
822 isp->isp_name);
823 return (1);
824 }
825 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
826 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
827 printf("%s: cannot allocate %d bytes of CCB memory\n",
828 isp->isp_name, len);
829 return (1);
830 }
831
832 isp->isp_rquest = base;
833 im.isp = isp;
834 im.error = 0;
835 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
836 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
837 if (im.error) {
838 printf("%s: error %d loading dma map for DMA request queue\n",
839 isp->isp_name, im.error);
840 return (1);
841 }
842 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
843 im.error = 0;
844 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
845 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
846 if (im.error) {
847 printf("%s: error %d loading dma map for DMA result queue\n",
848 isp->isp_name, im.error);
849 return (1);
850 }
851
852 /*
853 * Use this opportunity to initialize/create data DMA maps.
854 */
855 for (i = 0; i < MAXISPREQUEST; i++) {
856 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
857 if (error) {
858 printf("%s: error %d creating mailbox DMA maps\n",
859 isp->isp_name, error);
860 return (1);
861 }
862 }
863 if (IS_FC(isp)) {
864 fcparam *fcp = (fcparam *) isp->isp_param;
865 fcp->isp_scratch = base +
866 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
867 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
868 im.error = 0;
869 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
870 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
871 if (im.error) {
872 printf("%s: error %d loading FC scratch area\n",
873 isp->isp_name, im.error);
874 return (1);
875 }
876 }
877 return (0);
878}
879
880static void dma2 __P((void *, bus_dma_segment_t *, int, int));
881typedef struct {
882 struct ispsoftc *isp;
883 ISP_SCSI_XFER_T *ccb;
884 ispreq_t *rq;
885 u_int8_t *iptrp;
886 u_int8_t optr;
887 u_int error;
888} mush_t;
889
890#define MUSHERR_NOQENTRIES -2
891
892static void
893dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
894{
895 mush_t *mp;
896 ISP_SCSI_XFER_T *ccb;
897 struct ispsoftc *isp;
898 struct isp_pcisoftc *pci;
899 bus_dmamap_t *dp;
900 bus_dma_segment_t *eseg;
901 ispreq_t *rq;
902 u_int8_t *iptrp;
903 u_int8_t optr;
904 ispcontreq_t *crq;
905 int drq, seglim, datalen;
906
907 mp = (mush_t *) arg;
908 if (error) {
909 mp->error = error;
910 return;
911 }
912
913 isp = mp->isp;
914 if (nseg < 1) {
915 printf("%s: zero or negative segment count\n", isp->isp_name);
916 mp->error = EFAULT;
917 return;
918 }
919 ccb = mp->ccb;
920 rq = mp->rq;
921 iptrp = mp->iptrp;
922 optr = mp->optr;
923
924 pci = (struct isp_pcisoftc *)isp;
925 dp = &pci->dmaps[rq->req_handle - 1];
926 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
927 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
928 drq = REQFLAG_DATA_IN;
929 } else {
930 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
931 drq = REQFLAG_DATA_OUT;
932 }
933
934 datalen = XS_XFRLEN(ccb);
935 if (IS_FC(isp)) {
936 seglim = ISP_RQDSEG_T2;
937 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
938 ((ispreqt2_t *)rq)->req_flags |= drq;
939 } else {
940 seglim = ISP_RQDSEG;
941 rq->req_flags |= drq;
942 }
943
944 eseg = dm_segs + nseg;
945
946 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
947 if (IS_FC(isp)) {
948 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
949 rq2->req_dataseg[rq2->req_seg_count].ds_base =
950 dm_segs->ds_addr;
951 rq2->req_dataseg[rq2->req_seg_count].ds_count =
952 dm_segs->ds_len;
953 } else {
954 rq->req_dataseg[rq->req_seg_count].ds_base =
955 dm_segs->ds_addr;
956 rq->req_dataseg[rq->req_seg_count].ds_count =
957 dm_segs->ds_len;
958 }
959 datalen -= dm_segs->ds_len;
960#if 0
961 if (IS_FC(isp)) {
962 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
963 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
964 isp->isp_name, rq->req_seg_count,
965 rq2->req_dataseg[rq2->req_seg_count].ds_count,
966 rq2->req_dataseg[rq2->req_seg_count].ds_base);
967 } else {
968 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
969 isp->isp_name, rq->req_seg_count,
970 rq->req_dataseg[rq->req_seg_count].ds_count,
971 rq->req_dataseg[rq->req_seg_count].ds_base);
972 }
973#endif
974 rq->req_seg_count++;
975 dm_segs++;
976 }
977
978 while (datalen > 0 && dm_segs != eseg) {
979 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
980 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
981 if (*iptrp == optr) {
982#if 0
983 printf("%s: Request Queue Overflow++\n", isp->isp_name);
984#endif
985 mp->error = MUSHERR_NOQENTRIES;
986 return;
987 }
988 rq->req_header.rqs_entry_count++;
989 bzero((void *)crq, sizeof (*crq));
990 crq->req_header.rqs_entry_count = 1;
991 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
992
993 seglim = 0;
994 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
995 crq->req_dataseg[seglim].ds_base =
996 dm_segs->ds_addr;
997 crq->req_dataseg[seglim].ds_count =
998 dm_segs->ds_len;
999#if 0
1000 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1001 isp->isp_name, rq->req_header.rqs_entry_count-1,
1002 seglim, crq->req_dataseg[seglim].ds_count,
1003 crq->req_dataseg[seglim].ds_base);
1004#endif
1005 rq->req_seg_count++;
1006 dm_segs++;
1007 seglim++;
1008 datalen -= dm_segs->ds_len;
1009 }
1010 }
1011}
1012
1013static int
1014isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
1015 u_int8_t *iptrp, u_int8_t optr)
1016{
1017 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1018 struct ccb_hdr *ccb_h;
1019 struct ccb_scsiio *csio;
1020 bus_dmamap_t *dp;
1021 mush_t mush, *mp;
1022
1023 csio = (struct ccb_scsiio *) ccb;
1024 ccb_h = &csio->ccb_h;
1025
1026 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1027 rq->req_seg_count = 1;
1028 return (CMD_QUEUED);
1029 }
1030 dp = &pci->dmaps[rq->req_handle - 1];
1031
1032 /*
1033 * Do a virtual grapevine step to collect info for
1034 * the callback dma allocation that we have to use...
1035 */
1036 mp = &mush;
1037 mp->isp = isp;
1038 mp->ccb = ccb;
1039 mp->rq = rq;
1040 mp->iptrp = iptrp;
1041 mp->optr = optr;
1042 mp->error = 0;
1043
1044 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1045 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1046 int error, s;
1047
1048 s = splsoftvm();
1049 error = bus_dmamap_load(pci->parent_dmat, *dp,
1050 csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
1051 if (error == EINPROGRESS) {
1052 bus_dmamap_unload(pci->parent_dmat, *dp);
1053 mp->error = EINVAL;
1054 printf("%s: deferred dma allocation not "
1055 "supported\n", isp->isp_name);
1056 } else if (error && mp->error == 0) {
1057 mp->error = error;
1058 }
1059 splx(s);
1060 } else {
1061 /* Pointer to physical buffer */
1062 struct bus_dma_segment seg;
1063 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1064 seg.ds_len = csio->dxfer_len;
1065 dma2(mp, &seg, 1, 0);
1066 }
1067 } else {
1068 struct bus_dma_segment *segs;
1069
1070 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
1071 printf("%s: Physical segment pointers unsupported",
1072 isp->isp_name);
1073 mp->error = EINVAL;
1074 } else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
1075 printf("%s: Virtual segment addresses unsupported",
1076 isp->isp_name);
1077 mp->error = EINVAL;
1078 } else {
1079 /* Just use the segments provided */
1080 segs = (struct bus_dma_segment *) csio->data_ptr;
1081 dma2(mp, segs, csio->sglist_cnt, 0);
1082 }
1083 }
1084 if (mp->error) {
1085 int retval = CMD_COMPLETE;
1086 if (mp->error == MUSHERR_NOQENTRIES) {
1087 retval = CMD_EAGAIN;
1088 ccb_h->status = CAM_UNREC_HBA_ERROR;
1089 } else if (mp->error == EFBIG) {
1090 ccb_h->status = CAM_REQ_TOO_BIG;
1091 } else if (mp->error == EINVAL) {
1092 ccb_h->status = CAM_REQ_INVALID;
1093 } else {
1094 ccb_h->status = CAM_UNREC_HBA_ERROR;
1095 }
1096 return (retval);
1097 } else {
1098 return (CMD_QUEUED);
1099 }
1100}
1101
1102static void
1103isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb,
1104 u_int32_t handle)
1105{
1106 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1107 bus_dmamap_t *dp = &pci->dmaps[handle];
1108
1109 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1110 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1111 } else {
1112 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1113 }
1114 bus_dmamap_unload(pci->parent_dmat, *dp);
1115}
1116
1117
1118static void
1119isp_pci_reset1(struct ispsoftc *isp)
1120{
1121 /* Make sure the BIOS is disabled */
1122 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1123}
1124
1125static void
1126isp_pci_dumpregs(struct ispsoftc *isp)
1127{
1128 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1129 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1130 pci_conf_read(pci->pci_id, PCIR_COMMAND));
1131}