Deleted Added
sdiff udiff text old ( 160080 ) new ( 160212 )
full compact
1/*-
2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 1997-2006 by Matthew Jacob
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/isp/isp_pci.c 160212 2006-07-09 17:50:20Z mjacob $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#if __FreeBSD_version >= 700000
39#include <sys/linker.h>
40#include <sys/firmware.h>
41#endif
42#include <sys/bus.h>
43#if __FreeBSD_version < 500000
44#include <pci/pcireg.h>
45#include <pci/pcivar.h>
46#include <machine/bus_memio.h>
47#include <machine/bus_pio.h>
48#else
49#include <sys/stdint.h>
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pcivar.h>
52#endif
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/rman.h>
56#include <sys/malloc.h>
57
58#include <dev/isp/isp_freebsd.h>
59
60#if __FreeBSD_version < 500000
61#define BUS_PROBE_DEFAULT 0
62#endif
63
64static uint16_t isp_pci_rd_reg(ispsoftc_t *, int);
65static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t);
66static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
67static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t);
68static int
69isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
70static int
71isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
72static int isp_pci_mbxdma(ispsoftc_t *);
73static int
74isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t);
75static void
76isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t);
77
78
79static void isp_pci_reset1(ispsoftc_t *);
80static void isp_pci_dumpregs(ispsoftc_t *, const char *);
81
82static struct ispmdvec mdvec = {
83 isp_pci_rd_isr,
84 isp_pci_rd_reg,
85 isp_pci_wr_reg,
86 isp_pci_mbxdma,
87 isp_pci_dmasetup,
88 isp_pci_dmateardown,
89 NULL,
90 isp_pci_reset1,
91 isp_pci_dumpregs,
92 NULL,
93 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
94};
95
96static struct ispmdvec mdvec_1080 = {
97 isp_pci_rd_isr,
98 isp_pci_rd_reg_1080,
99 isp_pci_wr_reg_1080,
100 isp_pci_mbxdma,
101 isp_pci_dmasetup,
102 isp_pci_dmateardown,
103 NULL,
104 isp_pci_reset1,
105 isp_pci_dumpregs,
106 NULL,
107 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
108};
109
110static struct ispmdvec mdvec_12160 = {
111 isp_pci_rd_isr,
112 isp_pci_rd_reg_1080,
113 isp_pci_wr_reg_1080,
114 isp_pci_mbxdma,
115 isp_pci_dmasetup,
116 isp_pci_dmateardown,
117 NULL,
118 isp_pci_reset1,
119 isp_pci_dumpregs,
120 NULL,
121 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
122};
123
124static struct ispmdvec mdvec_2100 = {
125 isp_pci_rd_isr,
126 isp_pci_rd_reg,
127 isp_pci_wr_reg,
128 isp_pci_mbxdma,
129 isp_pci_dmasetup,
130 isp_pci_dmateardown,
131 NULL,
132 isp_pci_reset1,
133 isp_pci_dumpregs
134};
135
136static struct ispmdvec mdvec_2200 = {
137 isp_pci_rd_isr,
138 isp_pci_rd_reg,
139 isp_pci_wr_reg,
140 isp_pci_mbxdma,
141 isp_pci_dmasetup,
142 isp_pci_dmateardown,
143 NULL,
144 isp_pci_reset1,
145 isp_pci_dumpregs
146};
147
148static struct ispmdvec mdvec_2300 = {
149 isp_pci_rd_isr_2300,
150 isp_pci_rd_reg,
151 isp_pci_wr_reg,
152 isp_pci_mbxdma,
153 isp_pci_dmasetup,
154 isp_pci_dmateardown,
155 NULL,
156 isp_pci_reset1,
157 isp_pci_dumpregs
158};
159
160#ifndef PCIM_CMD_INVEN
161#define PCIM_CMD_INVEN 0x10
162#endif
163#ifndef PCIM_CMD_BUSMASTEREN
164#define PCIM_CMD_BUSMASTEREN 0x0004
165#endif
166#ifndef PCIM_CMD_PERRESPEN
167#define PCIM_CMD_PERRESPEN 0x0040
168#endif
169#ifndef PCIM_CMD_SEREN
170#define PCIM_CMD_SEREN 0x0100
171#endif
172#ifndef PCIM_CMD_INTX_DISABLE
173#define PCIM_CMD_INTX_DISABLE 0x0400
174#endif
175
176#ifndef PCIR_COMMAND
177#define PCIR_COMMAND 0x04
178#endif
179
180#ifndef PCIR_CACHELNSZ
181#define PCIR_CACHELNSZ 0x0c
182#endif
183
184#ifndef PCIR_LATTIMER
185#define PCIR_LATTIMER 0x0d
186#endif
187
188#ifndef PCIR_ROMADDR
189#define PCIR_ROMADDR 0x30
190#endif
191
192#ifndef PCI_VENDOR_QLOGIC
193#define PCI_VENDOR_QLOGIC 0x1077
194#endif
195
196#ifndef PCI_PRODUCT_QLOGIC_ISP1020
197#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
198#endif
199
200#ifndef PCI_PRODUCT_QLOGIC_ISP1080
201#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
202#endif
203
204#ifndef PCI_PRODUCT_QLOGIC_ISP10160
205#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
206#endif
207
208#ifndef PCI_PRODUCT_QLOGIC_ISP12160
209#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
210#endif
211
212#ifndef PCI_PRODUCT_QLOGIC_ISP1240
213#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
214#endif
215
216#ifndef PCI_PRODUCT_QLOGIC_ISP1280
217#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
218#endif
219
220#ifndef PCI_PRODUCT_QLOGIC_ISP2100
221#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
222#endif
223
224#ifndef PCI_PRODUCT_QLOGIC_ISP2200
225#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
226#endif
227
228#ifndef PCI_PRODUCT_QLOGIC_ISP2300
229#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
230#endif
231
232#ifndef PCI_PRODUCT_QLOGIC_ISP2312
233#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
234#endif
235
236#ifndef PCI_PRODUCT_QLOGIC_ISP2322
237#define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
238#endif
239
240#ifndef PCI_PRODUCT_QLOGIC_ISP2422
241#define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
242#endif
243
244#ifndef PCI_PRODUCT_QLOGIC_ISP6312
245#define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
246#endif
247
248#ifndef PCI_PRODUCT_QLOGIC_ISP6322
249#define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
250#endif
251
252
253#define PCI_QLOGIC_ISP1020 \
254 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
255
256#define PCI_QLOGIC_ISP1080 \
257 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
258
259#define PCI_QLOGIC_ISP10160 \
260 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
261
262#define PCI_QLOGIC_ISP12160 \
263 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
264
265#define PCI_QLOGIC_ISP1240 \
266 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
267
268#define PCI_QLOGIC_ISP1280 \
269 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
270
271#define PCI_QLOGIC_ISP2100 \
272 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
273
274#define PCI_QLOGIC_ISP2200 \
275 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
276
277#define PCI_QLOGIC_ISP2300 \
278 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
279
280#define PCI_QLOGIC_ISP2312 \
281 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
282
283#define PCI_QLOGIC_ISP2322 \
284 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
285
286#define PCI_QLOGIC_ISP2422 \
287 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
288
289#define PCI_QLOGIC_ISP6312 \
290 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
291
292#define PCI_QLOGIC_ISP6322 \
293 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
294
295/*
296 * Odd case for some AMI raid cards... We need to *not* attach to this.
297 */
298#define AMI_RAID_SUBVENDOR_ID 0x101e
299
300#define IO_MAP_REG 0x10
301#define MEM_MAP_REG 0x14
302
303#define PCI_DFLT_LTNCY 0x40
304#define PCI_DFLT_LNSZ 0x10
305
306static int isp_pci_probe (device_t);
307static int isp_pci_attach (device_t);
308
309
310struct isp_pcisoftc {
311 ispsoftc_t pci_isp;
312 device_t pci_dev;
313 struct resource * pci_reg;
314 bus_space_tag_t pci_st;
315 bus_space_handle_t pci_sh;
316 void * ih;
317 int16_t pci_poff[_NREG_BLKS];
318 bus_dma_tag_t dmat;
319 bus_dmamap_t *dmaps;
320};
321
322static device_method_t isp_pci_methods[] = {
323 /* Device interface */
324 DEVMETHOD(device_probe, isp_pci_probe),
325 DEVMETHOD(device_attach, isp_pci_attach),
326 { 0, 0 }
327};
328static void isp_pci_intr(void *);
329
330static driver_t isp_pci_driver = {
331 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
332};
333static devclass_t isp_devclass;
334DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
335#if __FreeBSD_version >= 700000
336MODULE_DEPEND(isp, ispfw, 1, 1, 1);
337MODULE_DEPEND(isp, firmware, 1, 1, 1);
338#else
339typedef void ispfwfunc(int, int, int, uint16_t **);
340extern ispfwfunc *isp_get_firmware_p;
341#endif
342
343static int
344isp_pci_probe(device_t dev)
345{
346 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
347 case PCI_QLOGIC_ISP1020:
348 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
349 break;
350 case PCI_QLOGIC_ISP1080:
351 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
352 break;
353 case PCI_QLOGIC_ISP1240:
354 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
355 break;
356 case PCI_QLOGIC_ISP1280:
357 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
358 break;
359 case PCI_QLOGIC_ISP10160:
360 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
361 break;
362 case PCI_QLOGIC_ISP12160:
363 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
364 return (ENXIO);
365 }
366 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
367 break;
368 case PCI_QLOGIC_ISP2100:
369 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
370 break;
371 case PCI_QLOGIC_ISP2200:
372 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
373 break;
374 case PCI_QLOGIC_ISP2300:
375 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
376 break;
377 case PCI_QLOGIC_ISP2312:
378 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
379 break;
380 case PCI_QLOGIC_ISP2322:
381 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
382 break;
383 case PCI_QLOGIC_ISP2422:
384 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
385 break;
386 case PCI_QLOGIC_ISP6312:
387 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
388 break;
389 case PCI_QLOGIC_ISP6322:
390 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
391 break;
392 default:
393 return (ENXIO);
394 }
395 if (isp_announced == 0 && bootverbose) {
396 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
397 "Core Version %d.%d\n",
398 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
399 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
400 isp_announced++;
401 }
402 /*
403 * XXXX: Here is where we might load the f/w module
404 * XXXX: (or increase a reference count to it).
405 */
406 return (BUS_PROBE_DEFAULT);
407}
408
409#if __FreeBSD_version < 500000
410static void
411isp_get_options(device_t dev, ispsoftc_t *isp)
412{
413 uint64_t wwn;
414 int bitmap, unit;
415
416 unit = device_get_unit(dev);
417 if (getenv_int("isp_disable", &bitmap)) {
418 if (bitmap & (1 << unit)) {
419 isp->isp_osinfo.disabled = 1;
420 return;
421 }
422 }
423
424 if (getenv_int("isp_no_fwload", &bitmap)) {
425 if (bitmap & (1 << unit))
426 isp->isp_confopts |= ISP_CFG_NORELOAD;
427 }
428 if (getenv_int("isp_fwload", &bitmap)) {
429 if (bitmap & (1 << unit))
430 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
431 }
432 if (getenv_int("isp_no_nvram", &bitmap)) {
433 if (bitmap & (1 << unit))
434 isp->isp_confopts |= ISP_CFG_NONVRAM;
435 }
436 if (getenv_int("isp_nvram", &bitmap)) {
437 if (bitmap & (1 << unit))
438 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
439 }
440 if (getenv_int("isp_fcduplex", &bitmap)) {
441 if (bitmap & (1 << unit))
442 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
443 }
444 if (getenv_int("isp_no_fcduplex", &bitmap)) {
445 if (bitmap & (1 << unit))
446 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
447 }
448 if (getenv_int("isp_nport", &bitmap)) {
449 if (bitmap & (1 << unit))
450 isp->isp_confopts |= ISP_CFG_NPORT;
451 }
452
453 /*
454 * Because the resource_*_value functions can neither return
455 * 64 bit integer values, nor can they be directly coerced
456 * to interpret the right hand side of the assignment as
457 * you want them to interpret it, we have to force WWN
458 * hint replacement to specify WWN strings with a leading
459 * 'w' (e..g w50000000aaaa0001). Sigh.
460 */
461 if (getenv_quad("isp_portwwn", &wwn)) {
462 isp->isp_osinfo.default_port_wwn = wwn;
463 isp->isp_confopts |= ISP_CFG_OWNWWPN;
464 }
465 if (isp->isp_osinfo.default_port_wwn == 0) {
466 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
467 }
468
469 if (getenv_quad("isp_nodewwn", &wwn)) {
470 isp->isp_osinfo.default_node_wwn = wwn;
471 isp->isp_confopts |= ISP_CFG_OWNWWNN;
472 }
473 if (isp->isp_osinfo.default_node_wwn == 0) {
474 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
475 }
476
477 bitmap = 0;
478 (void) getenv_int("isp_debug", &bitmap);
479 if (bitmap) {
480 isp->isp_dblev = bitmap;
481 } else {
482 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
483 }
484 if (bootverbose) {
485 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
486 }
487
488#ifdef ISP_FW_CRASH_DUMP
489 bitmap = 0;
490 if (getenv_int("isp_fw_dump_enable", &bitmap)) {
491 if (bitmap & (1 << unit) {
492 size_t amt = 0;
493 if (IS_2200(isp)) {
494 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
495 } else if (IS_23XX(isp)) {
496 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
497 }
498 if (amt) {
499 FCPARAM(isp)->isp_dump_data =
500 malloc(amt, M_DEVBUF, M_WAITOK);
501 memset(FCPARAM(isp)->isp_dump_data, 0, amt);
502 } else {
503 device_printf(dev,
504 "f/w crash dumps not supported for card\n");
505 }
506 }
507 }
508#endif
509}
510
511static void
512isp_get_pci_options(device_t dev, int *m1, int *m2)
513{
514 int bitmap;
515 int unit = device_get_unit(dev);
516
517 *m1 = PCIM_CMD_MEMEN;
518 *m2 = PCIM_CMD_PORTEN;
519 if (getenv_int("isp_mem_map", &bitmap)) {
520 if (bitmap & (1 << unit)) {
521 *m1 = PCIM_CMD_MEMEN;
522 *m2 = PCIM_CMD_PORTEN;
523 }
524 }
525 bitmap = 0;
526 if (getenv_int("isp_io_map", &bitmap)) {
527 if (bitmap & (1 << unit)) {
528 *m1 = PCIM_CMD_PORTEN;
529 *m2 = PCIM_CMD_MEMEN;
530 }
531 }
532}
533#else
534static void
535isp_get_options(device_t dev, ispsoftc_t *isp)
536{
537 int tval;
538 const char *sptr;
539 /*
540 * Figure out if we're supposed to skip this one.
541 */
542
543 tval = 0;
544 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
545 "disable", &tval) == 0 && tval) {
546 device_printf(dev, "disabled at user request\n");
547 isp->isp_osinfo.disabled = 1;
548 return;
549 }
550
551 tval = -1;
552 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
553 "role", &tval) == 0 && tval != -1) {
554 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
555 isp->isp_role = tval;
556 device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
557 } else {
558#ifdef ISP_TARGET_MODE
559 isp->isp_role = ISP_ROLE_TARGET;
560#else
561 isp->isp_role = ISP_DEFAULT_ROLES;
562#endif
563 }
564
565 tval = 0;
566 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
567 "fwload_disable", &tval) == 0 && tval != 0) {
568 isp->isp_confopts |= ISP_CFG_NORELOAD;
569 }
570 tval = 0;
571 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
572 "ignore_nvram", &tval) == 0 && tval != 0) {
573 isp->isp_confopts |= ISP_CFG_NONVRAM;
574 }
575 tval = 0;
576 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
577 "fullduplex", &tval) == 0 && tval != 0) {
578 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
579 }
580#ifdef ISP_FW_CRASH_DUMP
581 tval = 0;
582 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
583 "fw_dump_enable", &tval) == 0 && tval != 0) {
584 size_t amt = 0;
585 if (IS_2200(isp)) {
586 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
587 } else if (IS_23XX(isp)) {
588 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
589 }
590 if (amt) {
591 FCPARAM(isp)->isp_dump_data =
592 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
593 } else {
594 device_printf(dev,
595 "f/w crash dumps not supported for this model\n");
596 }
597 }
598#endif
599
600 sptr = 0;
601 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
602 "topology", (const char **) &sptr) == 0 && sptr != 0) {
603 if (strcmp(sptr, "lport") == 0) {
604 isp->isp_confopts |= ISP_CFG_LPORT;
605 } else if (strcmp(sptr, "nport") == 0) {
606 isp->isp_confopts |= ISP_CFG_NPORT;
607 } else if (strcmp(sptr, "lport-only") == 0) {
608 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
609 } else if (strcmp(sptr, "nport-only") == 0) {
610 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
611 }
612 }
613
614 /*
615 * Because the resource_*_value functions can neither return
616 * 64 bit integer values, nor can they be directly coerced
617 * to interpret the right hand side of the assignment as
618 * you want them to interpret it, we have to force WWN
619 * hint replacement to specify WWN strings with a leading
620 * 'w' (e..g w50000000aaaa0001). Sigh.
621 */
622 sptr = 0;
623 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
624 "portwwn", (const char **) &sptr);
625 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
626 char *eptr = 0;
627 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
628 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
629 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
630 isp->isp_osinfo.default_port_wwn = 0;
631 } else {
632 isp->isp_confopts |= ISP_CFG_OWNWWPN;
633 }
634 }
635 if (isp->isp_osinfo.default_port_wwn == 0) {
636 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
637 }
638
639 sptr = 0;
640 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
641 "nodewwn", (const char **) &sptr);
642 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
643 char *eptr = 0;
644 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
645 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
646 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
647 isp->isp_osinfo.default_node_wwn = 0;
648 } else {
649 isp->isp_confopts |= ISP_CFG_OWNWWNN;
650 }
651 }
652 if (isp->isp_osinfo.default_node_wwn == 0) {
653 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
654 }
655
656 isp->isp_osinfo.default_id = -1;
657 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
658 "iid", &tval) == 0) {
659 isp->isp_osinfo.default_id = tval;
660 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
661 }
662 if (isp->isp_osinfo.default_id == -1) {
663 if (IS_FC(isp)) {
664 isp->isp_osinfo.default_id = 109;
665 } else {
666 isp->isp_osinfo.default_id = 7;
667 }
668 }
669
670 /*
671 * Set up logging levels.
672 */
673 tval = 0;
674 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
675 "debug", &tval);
676 if (tval) {
677 isp->isp_dblev = tval;
678 } else {
679 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
680 }
681 if (bootverbose) {
682 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
683 }
684
685}
686
687static void
688isp_get_pci_options(device_t dev, int *m1, int *m2)
689{
690 int tval;
691 /*
692 * Which we should try first - memory mapping or i/o mapping?
693 *
694 * We used to try memory first followed by i/o on alpha, otherwise
695 * the reverse, but we should just try memory first all the time now.
696 */
697 *m1 = PCIM_CMD_MEMEN;
698 *m2 = PCIM_CMD_PORTEN;
699
700 tval = 0;
701 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
702 "prefer_iomap", &tval) == 0 && tval != 0) {
703 *m1 = PCIM_CMD_PORTEN;
704 *m2 = PCIM_CMD_MEMEN;
705 }
706 tval = 0;
707 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
708 "prefer_memmap", &tval) == 0 && tval != 0) {
709 *m1 = PCIM_CMD_MEMEN;
710 *m2 = PCIM_CMD_PORTEN;
711 }
712}
713#endif
714
715static int
716isp_pci_attach(device_t dev)
717{
718 struct resource *regs, *irq;
719 int rtp, rgd, iqd, m1, m2;
720 uint32_t data, cmd, linesz, psize, basetype;
721 struct isp_pcisoftc *pcs;
722 ispsoftc_t *isp = NULL;
723 struct ispmdvec *mdvp;
724#if __FreeBSD_version >= 500000
725 int locksetup = 0;
726#endif
727
728 pcs = device_get_softc(dev);
729 if (pcs == NULL) {
730 device_printf(dev, "cannot get softc\n");
731 return (ENOMEM);
732 }
733 memset(pcs, 0, sizeof (*pcs));
734 pcs->pci_dev = dev;
735 isp = &pcs->pci_isp;
736
737 /*
738 * Get Generic Options
739 */
740 isp_get_options(dev, isp);
741
742 /*
743 * Check to see if options have us disabled
744 */
745 if (isp->isp_osinfo.disabled) {
746 /*
747 * But return zero to preserve unit numbering
748 */
749 return (0);
750 }
751
752 /*
753 * Get PCI options- which in this case are just mapping preferences.
754 */
755 isp_get_pci_options(dev, &m1, &m2);
756
757
758 linesz = PCI_DFLT_LNSZ;
759 irq = regs = NULL;
760 rgd = rtp = iqd = 0;
761
762 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
763 if (cmd & m1) {
764 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
765 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
766 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
767 }
768 if (regs == NULL && (cmd & m2)) {
769 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
770 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
771 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
772 }
773 if (regs == NULL) {
774 device_printf(dev, "unable to map any ports\n");
775 goto bad;
776 }
777 if (bootverbose) {
778 device_printf(dev, "using %s space register mapping\n",
779 (rgd == IO_MAP_REG)? "I/O" : "Memory");
780 }
781 pcs->pci_dev = dev;
782 pcs->pci_reg = regs;
783 pcs->pci_st = rman_get_bustag(regs);
784 pcs->pci_sh = rman_get_bushandle(regs);
785
786 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
787 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
788 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
789 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
790 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
791 mdvp = &mdvec;
792 basetype = ISP_HA_SCSI_UNKNOWN;
793 psize = sizeof (sdparam);
794 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
795 mdvp = &mdvec;
796 basetype = ISP_HA_SCSI_UNKNOWN;
797 psize = sizeof (sdparam);
798 }
799 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
800 mdvp = &mdvec_1080;
801 basetype = ISP_HA_SCSI_1080;
802 psize = sizeof (sdparam);
803 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
804 ISP1080_DMA_REGS_OFF;
805 }
806 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
807 mdvp = &mdvec_1080;
808 basetype = ISP_HA_SCSI_1240;
809 psize = 2 * sizeof (sdparam);
810 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
811 ISP1080_DMA_REGS_OFF;
812 }
813 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
814 mdvp = &mdvec_1080;
815 basetype = ISP_HA_SCSI_1280;
816 psize = 2 * sizeof (sdparam);
817 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
818 ISP1080_DMA_REGS_OFF;
819 }
820 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
821 mdvp = &mdvec_12160;
822 basetype = ISP_HA_SCSI_10160;
823 psize = sizeof (sdparam);
824 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
825 ISP1080_DMA_REGS_OFF;
826 }
827 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
828 mdvp = &mdvec_12160;
829 basetype = ISP_HA_SCSI_12160;
830 psize = 2 * sizeof (sdparam);
831 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
832 ISP1080_DMA_REGS_OFF;
833 }
834 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
835 mdvp = &mdvec_2100;
836 basetype = ISP_HA_FC_2100;
837 psize = sizeof (fcparam);
838 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
839 PCI_MBOX_REGS2100_OFF;
840 if (pci_get_revid(dev) < 3) {
841 /*
842 * XXX: Need to get the actual revision
843 * XXX: number of the 2100 FB. At any rate,
844 * XXX: lower cache line size for early revision
845 * XXX; boards.
846 */
847 linesz = 1;
848 }
849 }
850 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
851 mdvp = &mdvec_2200;
852 basetype = ISP_HA_FC_2200;
853 psize = sizeof (fcparam);
854 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
855 PCI_MBOX_REGS2100_OFF;
856 }
857 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
858 mdvp = &mdvec_2300;
859 basetype = ISP_HA_FC_2300;
860 psize = sizeof (fcparam);
861 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
862 PCI_MBOX_REGS2300_OFF;
863 }
864 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
865 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
866 mdvp = &mdvec_2300;
867 basetype = ISP_HA_FC_2312;
868 psize = sizeof (fcparam);
869 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
870 PCI_MBOX_REGS2300_OFF;
871 }
872 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
873 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
874 mdvp = &mdvec_2300;
875 basetype = ISP_HA_FC_2322;
876 psize = sizeof (fcparam);
877 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
878 PCI_MBOX_REGS2300_OFF;
879 }
880 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
881 mdvp = &mdvec_2300;
882 basetype = ISP_HA_FC_2422;
883 psize = sizeof (fcparam);
884 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
885 PCI_MBOX_REGS2300_OFF;
886 }
887 isp = &pcs->pci_isp;
888 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
889 if (isp->isp_param == NULL) {
890 device_printf(dev, "cannot allocate parameter data\n");
891 goto bad;
892 }
893 isp->isp_mdvec = mdvp;
894 isp->isp_type = basetype;
895 isp->isp_revision = pci_get_revid(dev);
896 isp->isp_dev = dev;
897
898#if __FreeBSD_version >= 700000
899 /*
900 * Try and find firmware for this device.
901 */
902 {
903 char fwname[32];
904 unsigned int did = pci_get_device(dev);
905
906 /*
907 * Map a few pci ids to fw names
908 */
909 switch (did) {
910 case PCI_PRODUCT_QLOGIC_ISP1020:
911 did = 0x1040;
912 break;
913 case PCI_PRODUCT_QLOGIC_ISP1240:
914 did = 0x1080;
915 break;
916 case PCI_PRODUCT_QLOGIC_ISP10160:
917 case PCI_PRODUCT_QLOGIC_ISP12160:
918 did = 0x12160;
919 break;
920 case PCI_PRODUCT_QLOGIC_ISP6312:
921 case PCI_PRODUCT_QLOGIC_ISP2312:
922 did = 0x2300;
923 break;
924 case PCI_PRODUCT_QLOGIC_ISP6322:
925 did = 0x2322;
926 break;
927 default:
928 break;
929 }
930
931 isp->isp_osinfo.fw = NULL;
932 if (isp->isp_role & ISP_ROLE_TARGET) {
933 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
934 isp->isp_osinfo.fw = firmware_get(fwname);
935 }
936 if (isp->isp_osinfo.fw == NULL) {
937 snprintf(fwname, sizeof (fwname), "isp_%04x", did);
938 isp->isp_osinfo.fw = firmware_get(fwname);
939 }
940 if (isp->isp_osinfo.fw != NULL) {
941 union {
942 const void *fred;
943 uint16_t *bob;
944 } u;
945 u.fred = isp->isp_osinfo.fw->data;
946 isp->isp_mdvec->dv_ispfw = u.bob;
947 }
948 }
949#else
950 if (isp_get_firmware_p) {
951 int device = (int) pci_get_device(dev);
952#ifdef ISP_TARGET_MODE
953 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
954#else
955 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
956#endif
957 }
958#endif
959
960 /*
961 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
962 * are set.
963 */
964 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
965 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
966
967 if (IS_2300(isp)) { /* per QLogic errata */
968 cmd &= ~PCIM_CMD_INVEN;
969 }
970
971 if (IS_23XX(isp)) {
972 /*
973 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
974 */
975 isp->isp_touched = 1;
976
977 }
978
979 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
980 cmd &= ~PCIM_CMD_INTX_DISABLE;
981 }
982
983 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
984
985 /*
986 * Make sure the Cache Line Size register is set sensibly.
987 */
988 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
989 if (data != linesz) {
990 data = PCI_DFLT_LNSZ;
991 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
992 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
993 }
994
995 /*
996 * Make sure the Latency Timer is sane.
997 */
998 data = pci_read_config(dev, PCIR_LATTIMER, 1);
999 if (data < PCI_DFLT_LTNCY) {
1000 data = PCI_DFLT_LTNCY;
1001 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1002 pci_write_config(dev, PCIR_LATTIMER, data, 1);
1003 }
1004
1005 /*
1006 * Make sure we've disabled the ROM.
1007 */
1008 data = pci_read_config(dev, PCIR_ROMADDR, 4);
1009 data &= ~1;
1010 pci_write_config(dev, PCIR_ROMADDR, data, 4);
1011
1012 iqd = 0;
1013 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1014 RF_ACTIVE | RF_SHAREABLE);
1015 if (irq == NULL) {
1016 device_printf(dev, "could not allocate interrupt\n");
1017 goto bad;
1018 }
1019
1020#if __FreeBSD_version >= 500000
1021 /* Make sure the lock is set up. */
1022 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1023 locksetup++;
1024#endif
1025
1026 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1027 device_printf(dev, "could not setup interrupt\n");
1028 goto bad;
1029 }
1030
1031 /*
1032 * Last minute checks...
1033 */
1034 if (IS_23XX(isp)) {
1035 isp->isp_port = pci_get_function(dev);
1036 }
1037
1038 /*
1039 * Make sure we're in reset state.
1040 */
1041 ISP_LOCK(isp);
1042 isp_reset(isp);
1043 if (isp->isp_state != ISP_RESETSTATE) {
1044 ISP_UNLOCK(isp);
1045 goto bad;
1046 }
1047 isp_init(isp);
1048 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1049 isp_uninit(isp);
1050 ISP_UNLOCK(isp);
1051 goto bad;
1052 }
1053 isp_attach(isp);
1054 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1055 isp_uninit(isp);
1056 ISP_UNLOCK(isp);
1057 goto bad;
1058 }
1059 /*
1060 * XXXX: Here is where we might unload the f/w module
1061 * XXXX: (or decrease the reference count to it).
1062 */
1063 ISP_UNLOCK(isp);
1064 return (0);
1065
1066bad:
1067
1068 if (pcs && pcs->ih) {
1069 (void) bus_teardown_intr(dev, irq, pcs->ih);
1070 }
1071
1072#if __FreeBSD_version >= 500000
1073 if (locksetup && isp) {
1074 mtx_destroy(&isp->isp_osinfo.lock);
1075 }
1076#endif
1077
1078 if (irq) {
1079 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1080 }
1081
1082
1083 if (regs) {
1084 (void) bus_release_resource(dev, rtp, rgd, regs);
1085 }
1086
1087 if (pcs) {
1088 if (pcs->pci_isp.isp_param) {
1089#ifdef ISP_FW_CRASH_DUMP
1090 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1091 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1092 }
1093#endif
1094 free(pcs->pci_isp.isp_param, M_DEVBUF);
1095 }
1096 }
1097
1098 /*
1099 * XXXX: Here is where we might unload the f/w module
1100 * XXXX: (or decrease the reference count to it).
1101 */
1102 return (ENXIO);
1103}
1104
1105static void
1106isp_pci_intr(void *arg)
1107{
1108 ispsoftc_t *isp = arg;
1109 uint16_t isr, sema, mbox;
1110
1111 ISP_LOCK(isp);
1112 isp->isp_intcnt++;
1113 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1114 isp->isp_intbogus++;
1115 } else {
1116 int iok = isp->isp_osinfo.intsok;
1117 isp->isp_osinfo.intsok = 0;
1118 isp_intr(isp, isr, sema, mbox);
1119 isp->isp_osinfo.intsok = iok;
1120 }
1121 ISP_UNLOCK(isp);
1122}
1123
1124
1125#define IspVirt2Off(a, x) \
1126 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1127 _BLK_REG_SHFT] + ((x) & 0xff))
1128
1129#define BXR2(pcs, off) \
1130 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1131#define BXW2(pcs, off, v) \
1132 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1133
1134
1135static __inline int
1136isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1137{
1138 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1139 uint16_t val0, val1;
1140 int i = 0;
1141
1142 do {
1143 val0 = BXR2(pcs, IspVirt2Off(isp, off));
1144 val1 = BXR2(pcs, IspVirt2Off(isp, off));
1145 } while (val0 != val1 && ++i < 1000);
1146 if (val0 != val1) {
1147 return (1);
1148 }
1149 *rp = val0;
1150 return (0);
1151}
1152
1153static int
1154isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp,
1155 uint16_t *semap, uint16_t *mbp)
1156{
1157 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1158 uint16_t isr, sema;
1159
1160 if (IS_2100(isp)) {
1161 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1162 return (0);
1163 }
1164 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1165 return (0);
1166 }
1167 } else {
1168 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1169 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1170 }
1171 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1172 isr &= INT_PENDING_MASK(isp);
1173 sema &= BIU_SEMA_LOCK;
1174 if (isr == 0 && sema == 0) {
1175 return (0);
1176 }
1177 *isrp = isr;
1178 if ((*semap = sema) != 0) {
1179 if (IS_2100(isp)) {
1180 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1181 return (0);
1182 }
1183 } else {
1184 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1185 }
1186 }
1187 return (1);
1188}
1189
1190static int
1191isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp,
1192 uint16_t *semap, uint16_t *mbox0p)
1193{
1194 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1195 uint16_t hccr;
1196 uint32_t r2hisr;
1197
1198 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1199 *isrp = 0;
1200 return (0);
1201 }
1202 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
1203 IspVirt2Off(pcs, BIU_R2HSTSLO));
1204 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1205 if ((r2hisr & BIU_R2HST_INTR) == 0) {
1206 *isrp = 0;
1207 return (0);
1208 }
1209 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1210 case ISPR2HST_ROM_MBX_OK:
1211 case ISPR2HST_ROM_MBX_FAIL:
1212 case ISPR2HST_MBX_OK:
1213 case ISPR2HST_MBX_FAIL:
1214 case ISPR2HST_ASYNC_EVENT:
1215 *isrp = r2hisr & 0xffff;
1216 *mbox0p = (r2hisr >> 16);
1217 *semap = 1;
1218 return (1);
1219 case ISPR2HST_RIO_16:
1220 *isrp = r2hisr & 0xffff;
1221 *mbox0p = ASYNC_RIO1;
1222 *semap = 1;
1223 return (1);
1224 case ISPR2HST_FPOST:
1225 *isrp = r2hisr & 0xffff;
1226 *mbox0p = ASYNC_CMD_CMPLT;
1227 *semap = 1;
1228 return (1);
1229 case ISPR2HST_FPOST_CTIO:
1230 *isrp = r2hisr & 0xffff;
1231 *mbox0p = ASYNC_CTIO_DONE;
1232 *semap = 1;
1233 return (1);
1234 case ISPR2HST_RSPQ_UPDATE:
1235 *isrp = r2hisr & 0xffff;
1236 *mbox0p = 0;
1237 *semap = 0;
1238 return (1);
1239 default:
1240 hccr = ISP_READ(isp, HCCR);
1241 if (hccr & HCCR_PAUSE) {
1242 ISP_WRITE(isp, HCCR, HCCR_RESET);
1243 isp_prt(isp, ISP_LOGERR,
1244 "RISC paused at interrupt (%x->%x\n", hccr,
1245 ISP_READ(isp, HCCR));
1246 } else {
1247 isp_prt(isp, ISP_LOGERR, "unknown interrerupt 0x%x\n",
1248 r2hisr);
1249 }
1250 return (0);
1251 }
1252}
1253
1254static uint16_t
1255isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1256{
1257 uint16_t rv;
1258 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1259 int oldconf = 0;
1260
1261 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1262 /*
1263 * We will assume that someone has paused the RISC processor.
1264 */
1265 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1266 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1267 oldconf | BIU_PCI_CONF1_SXP);
1268 }
1269 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1270 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1271 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1272 }
1273 return (rv);
1274}
1275
1276static void
1277isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val)
1278{
1279 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1280 int oldconf = 0;
1281
1282 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1283 /*
1284 * We will assume that someone has paused the RISC processor.
1285 */
1286 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1287 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1288 oldconf | BIU_PCI_CONF1_SXP);
1289 }
1290 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1291 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1292 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1293 }
1294}
1295
1296static uint16_t
1297isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1298{
1299 uint16_t rv, oc = 0;
1300 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1301
1302 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1303 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1304 uint16_t tc;
1305 /*
1306 * We will assume that someone has paused the RISC processor.
1307 */
1308 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1309 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1310 if (regoff & SXP_BANK1_SELECT)
1311 tc |= BIU_PCI1080_CONF1_SXP1;
1312 else
1313 tc |= BIU_PCI1080_CONF1_SXP0;
1314 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1315 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1316 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1317 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1318 oc | BIU_PCI1080_CONF1_DMA);
1319 }
1320 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1321 if (oc) {
1322 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1323 }
1324 return (rv);
1325}
1326
1327static void
1328isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val)
1329{
1330 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1331 int oc = 0;
1332
1333 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1334 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1335 uint16_t tc;
1336 /*
1337 * We will assume that someone has paused the RISC processor.
1338 */
1339 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1340 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1341 if (regoff & SXP_BANK1_SELECT)
1342 tc |= BIU_PCI1080_CONF1_SXP1;
1343 else
1344 tc |= BIU_PCI1080_CONF1_SXP0;
1345 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1346 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1347 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1348 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1349 oc | BIU_PCI1080_CONF1_DMA);
1350 }
1351 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1352 if (oc) {
1353 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1354 }
1355}
1356
1357
1358struct imush {
1359 ispsoftc_t *isp;
1360 int error;
1361};
1362
1363static void imc(void *, bus_dma_segment_t *, int, int);
1364
1365static void
1366imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1367{
1368 struct imush *imushp = (struct imush *) arg;
1369 if (error) {
1370 imushp->error = error;
1371 } else {
1372 ispsoftc_t *isp =imushp->isp;
1373 bus_addr_t addr = segs->ds_addr;
1374
1375 isp->isp_rquest_dma = addr;
1376 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1377 isp->isp_result_dma = addr;
1378 if (IS_FC(isp)) {
1379 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1380 FCPARAM(isp)->isp_scdma = addr;
1381 }
1382 }
1383}
1384
1385/*
1386 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1387 */
1388#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1389
1390#if __FreeBSD_version < 500000
1391#define isp_dma_tag_create bus_dma_tag_create
1392#else
1393#define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \
1394 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1395 busdma_lock_mutex, &Giant, z)
1396#endif
1397
1398static int
1399isp_pci_mbxdma(ispsoftc_t *isp)
1400{
1401 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1402 caddr_t base;
1403 uint32_t len;
1404 int i, error, ns;
1405 bus_size_t slim; /* segment size */
1406 bus_addr_t llim; /* low limit of unavailable dma */
1407 bus_addr_t hlim; /* high limit of unavailable dma */
1408 struct imush im;
1409
1410 /*
1411 * Already been here? If so, leave...
1412 */
1413 if (isp->isp_rquest) {
1414 return (0);
1415 }
1416
1417 hlim = BUS_SPACE_MAXADDR;
1418 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1419 slim = (bus_size_t) (1ULL << 32);
1420 llim = BUS_SPACE_MAXADDR;
1421 } else {
1422 llim = BUS_SPACE_MAXADDR_32BIT;
1423 slim = (1 << 24);
1424 }
1425
1426 /*
1427 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1428 */
1429#ifdef ISP_TARGET_MODE
1430 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1431 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1432 return (1);
1433 }
1434#endif
1435
1436 ISP_UNLOCK(isp);
1437 if (isp_dma_tag_create(NULL, 1, slim, llim, hlim,
1438 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1439 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1440 ISP_LOCK(isp);
1441 return (1);
1442 }
1443
1444
1445 len = sizeof (XS_T **) * isp->isp_maxcmds;
1446 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1447 if (isp->isp_xflist == NULL) {
1448 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1449 ISP_LOCK(isp);
1450 return (1);
1451 }
1452#ifdef ISP_TARGET_MODE
1453 len = sizeof (void **) * isp->isp_maxcmds;
1454 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1455 if (isp->isp_tgtlist == NULL) {
1456 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1457 ISP_LOCK(isp);
1458 return (1);
1459 }
1460#endif
1461 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1462 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1463 if (pcs->dmaps == NULL) {
1464 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1465 free(isp->isp_xflist, M_DEVBUF);
1466#ifdef ISP_TARGET_MODE
1467 free(isp->isp_tgtlist, M_DEVBUF);
1468#endif
1469 ISP_LOCK(isp);
1470 return (1);
1471 }
1472
1473 /*
1474 * Allocate and map the request, result queues, plus FC scratch area.
1475 */
1476 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1477 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1478 if (IS_FC(isp)) {
1479 len += ISP2100_SCRLEN;
1480 }
1481
1482 ns = (len / PAGE_SIZE) + 1;
1483 /*
1484 * Create a tag for the control spaces- force it to within 32 bits.
1485 */
1486 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1487 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1488 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1489 isp_prt(isp, ISP_LOGERR,
1490 "cannot create a dma tag for control spaces");
1491 free(pcs->dmaps, M_DEVBUF);
1492 free(isp->isp_xflist, M_DEVBUF);
1493#ifdef ISP_TARGET_MODE
1494 free(isp->isp_tgtlist, M_DEVBUF);
1495#endif
1496 ISP_LOCK(isp);
1497 return (1);
1498 }
1499
1500 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1501 &isp->isp_cdmap) != 0) {
1502 isp_prt(isp, ISP_LOGERR,
1503 "cannot allocate %d bytes of CCB memory", len);
1504 bus_dma_tag_destroy(isp->isp_cdmat);
1505 free(isp->isp_xflist, M_DEVBUF);
1506#ifdef ISP_TARGET_MODE
1507 free(isp->isp_tgtlist, M_DEVBUF);
1508#endif
1509 free(pcs->dmaps, M_DEVBUF);
1510 ISP_LOCK(isp);
1511 return (1);
1512 }
1513
1514 for (i = 0; i < isp->isp_maxcmds; i++) {
1515 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1516 if (error) {
1517 isp_prt(isp, ISP_LOGERR,
1518 "error %d creating per-cmd DMA maps", error);
1519 while (--i >= 0) {
1520 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1521 }
1522 goto bad;
1523 }
1524 }
1525
1526 im.isp = isp;
1527 im.error = 0;
1528 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1529 if (im.error) {
1530 isp_prt(isp, ISP_LOGERR,
1531 "error %d loading dma map for control areas", im.error);
1532 goto bad;
1533 }
1534
1535 isp->isp_rquest = base;
1536 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1537 isp->isp_result = base;
1538 if (IS_FC(isp)) {
1539 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1540 FCPARAM(isp)->isp_scratch = base;
1541 }
1542 ISP_LOCK(isp);
1543 return (0);
1544
1545bad:
1546 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1547 bus_dma_tag_destroy(isp->isp_cdmat);
1548 free(isp->isp_xflist, M_DEVBUF);
1549#ifdef ISP_TARGET_MODE
1550 free(isp->isp_tgtlist, M_DEVBUF);
1551#endif
1552 free(pcs->dmaps, M_DEVBUF);
1553 ISP_LOCK(isp);
1554 isp->isp_rquest = NULL;
1555 return (1);
1556}
1557
1558typedef struct {
1559 ispsoftc_t *isp;
1560 void *cmd_token;
1561 void *rq;
1562 uint16_t *nxtip;
1563 uint16_t optr;
1564 int error;
1565} mush_t;
1566
1567#define MUSHERR_NOQENTRIES -2
1568
1569#ifdef ISP_TARGET_MODE
1570/*
1571 * We need to handle DMA for target mode differently from initiator mode.
1572 *
1573 * DMA mapping and construction and submission of CTIO Request Entries
1574 * and rendevous for completion are very tightly coupled because we start
1575 * out by knowing (per platform) how much data we have to move, but we
1576 * don't know, up front, how many DMA mapping segments will have to be used
1577 * cover that data, so we don't know how many CTIO Request Entries we
1578 * will end up using. Further, for performance reasons we may want to
1579 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1580 *
1581 * The standard vector still goes through isp_pci_dmasetup, but the callback
1582 * for the DMA mapping routines comes here instead with the whole transfer
1583 * mapped and a pointer to a partially filled in already allocated request
1584 * queue entry. We finish the job.
1585 */
1586static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1587static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1588
1589#define STATUS_WITH_DATA 1
1590
1591static void
1592tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1593{
1594 mush_t *mp;
1595 struct ccb_scsiio *csio;
1596 ispsoftc_t *isp;
1597 struct isp_pcisoftc *pcs;
1598 bus_dmamap_t *dp;
1599 ct_entry_t *cto, *qe;
1600 uint8_t scsi_status;
1601 uint16_t curi, nxti, handle;
1602 uint32_t sflags;
1603 int32_t resid;
1604 int nth_ctio, nctios, send_status;
1605
1606 mp = (mush_t *) arg;
1607 if (error) {
1608 mp->error = error;
1609 return;
1610 }
1611
1612 isp = mp->isp;
1613 csio = mp->cmd_token;
1614 cto = mp->rq;
1615 curi = isp->isp_reqidx;
1616 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1617
1618 cto->ct_xfrlen = 0;
1619 cto->ct_seg_count = 0;
1620 cto->ct_header.rqs_entry_count = 1;
1621 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1622
1623 if (nseg == 0) {
1624 cto->ct_header.rqs_seqno = 1;
1625 isp_prt(isp, ISP_LOGTDEBUG1,
1626 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1627 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1628 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1629 cto->ct_scsi_status, cto->ct_resid);
1630 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1631 isp_put_ctio(isp, cto, qe);
1632 return;
1633 }
1634
1635 nctios = nseg / ISP_RQDSEG;
1636 if (nseg % ISP_RQDSEG) {
1637 nctios++;
1638 }
1639
1640 /*
1641 * Save syshandle, and potentially any SCSI status, which we'll
1642 * reinsert on the last CTIO we're going to send.
1643 */
1644
1645 handle = cto->ct_syshandle;
1646 cto->ct_syshandle = 0;
1647 cto->ct_header.rqs_seqno = 0;
1648 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1649
1650 if (send_status) {
1651 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1652 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1653 /*
1654 * Preserve residual.
1655 */
1656 resid = cto->ct_resid;
1657
1658 /*
1659 * Save actual SCSI status.
1660 */
1661 scsi_status = cto->ct_scsi_status;
1662
1663#ifndef STATUS_WITH_DATA
1664 sflags |= CT_NO_DATA;
1665 /*
1666 * We can't do a status at the same time as a data CTIO, so
1667 * we need to synthesize an extra CTIO at this level.
1668 */
1669 nctios++;
1670#endif
1671 } else {
1672 sflags = scsi_status = resid = 0;
1673 }
1674
1675 cto->ct_resid = 0;
1676 cto->ct_scsi_status = 0;
1677
1678 pcs = (struct isp_pcisoftc *)isp;
1679 dp = &pcs->dmaps[isp_handle_index(handle)];
1680 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1681 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1682 } else {
1683 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1684 }
1685
1686 nxti = *mp->nxtip;
1687
1688 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1689 int seglim;
1690
1691 seglim = nseg;
1692 if (seglim) {
1693 int seg;
1694
1695 if (seglim > ISP_RQDSEG)
1696 seglim = ISP_RQDSEG;
1697
1698 for (seg = 0; seg < seglim; seg++, nseg--) {
1699 /*
1700 * Unlike normal initiator commands, we don't
1701 * do any swizzling here.
1702 */
1703 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1704 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1705 cto->ct_xfrlen += dm_segs->ds_len;
1706 dm_segs++;
1707 }
1708 cto->ct_seg_count = seg;
1709 } else {
1710 /*
1711 * This case should only happen when we're sending an
1712 * extra CTIO with final status.
1713 */
1714 if (send_status == 0) {
1715 isp_prt(isp, ISP_LOGWARN,
1716 "tdma_mk ran out of segments");
1717 mp->error = EINVAL;
1718 return;
1719 }
1720 }
1721
1722 /*
1723 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1724 * ct_tagtype, and ct_timeout have been carried over
1725 * unchanged from what our caller had set.
1726 *
1727 * The dataseg fields and the seg_count fields we just got
1728 * through setting. The data direction we've preserved all
1729 * along and only clear it if we're now sending status.
1730 */
1731
1732 if (nth_ctio == nctios - 1) {
1733 /*
1734 * We're the last in a sequence of CTIOs, so mark
1735 * this CTIO and save the handle to the CCB such that
1736 * when this CTIO completes we can free dma resources
1737 * and do whatever else we need to do to finish the
1738 * rest of the command. We *don't* give this to the
1739 * firmware to work on- the caller will do that.
1740 */
1741
1742 cto->ct_syshandle = handle;
1743 cto->ct_header.rqs_seqno = 1;
1744
1745 if (send_status) {
1746 cto->ct_scsi_status = scsi_status;
1747 cto->ct_flags |= sflags;
1748 cto->ct_resid = resid;
1749 }
1750 if (send_status) {
1751 isp_prt(isp, ISP_LOGTDEBUG1,
1752 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1753 "scsi status %x resid %d",
1754 cto->ct_fwhandle, csio->ccb_h.target_lun,
1755 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1756 cto->ct_scsi_status, cto->ct_resid);
1757 } else {
1758 isp_prt(isp, ISP_LOGTDEBUG1,
1759 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1760 cto->ct_fwhandle, csio->ccb_h.target_lun,
1761 cto->ct_iid, cto->ct_tag_val,
1762 cto->ct_flags);
1763 }
1764 isp_put_ctio(isp, cto, qe);
1765 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1766 if (nctios > 1) {
1767 MEMORYBARRIER(isp, SYNC_REQUEST,
1768 curi, QENTRY_LEN);
1769 }
1770 } else {
1771 ct_entry_t *oqe = qe;
1772
1773 /*
1774 * Make sure syshandle fields are clean
1775 */
1776 cto->ct_syshandle = 0;
1777 cto->ct_header.rqs_seqno = 0;
1778
1779 isp_prt(isp, ISP_LOGTDEBUG1,
1780 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1781 cto->ct_fwhandle, csio->ccb_h.target_lun,
1782 cto->ct_iid, cto->ct_flags);
1783
1784 /*
1785 * Get a new CTIO
1786 */
1787 qe = (ct_entry_t *)
1788 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1789 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1790 if (nxti == mp->optr) {
1791 isp_prt(isp, ISP_LOGTDEBUG0,
1792 "Queue Overflow in tdma_mk");
1793 mp->error = MUSHERR_NOQENTRIES;
1794 return;
1795 }
1796
1797 /*
1798 * Now that we're done with the old CTIO,
1799 * flush it out to the request queue.
1800 */
1801 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1802 isp_put_ctio(isp, cto, oqe);
1803 if (nth_ctio != 0) {
1804 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1805 QENTRY_LEN);
1806 }
1807 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1808
1809 /*
1810 * Reset some fields in the CTIO so we can reuse
1811 * for the next one we'll flush to the request
1812 * queue.
1813 */
1814 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1815 cto->ct_header.rqs_entry_count = 1;
1816 cto->ct_header.rqs_flags = 0;
1817 cto->ct_status = 0;
1818 cto->ct_scsi_status = 0;
1819 cto->ct_xfrlen = 0;
1820 cto->ct_resid = 0;
1821 cto->ct_seg_count = 0;
1822 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1823 }
1824 }
1825 *mp->nxtip = nxti;
1826}
1827
1828/*
1829 * We don't have to do multiple CTIOs here. Instead, we can just do
1830 * continuation segments as needed. This greatly simplifies the code
1831 * improves performance.
1832 */
1833
1834static void
1835tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1836{
1837 mush_t *mp;
1838 struct ccb_scsiio *csio;
1839 ispsoftc_t *isp;
1840 ct2_entry_t *cto, *qe;
1841 uint16_t curi, nxti;
1842 ispds_t *ds;
1843 ispds64_t *ds64;
1844 int segcnt, seglim;
1845
1846 mp = (mush_t *) arg;
1847 if (error) {
1848 mp->error = error;
1849 return;
1850 }
1851
1852 isp = mp->isp;
1853 csio = mp->cmd_token;
1854 cto = mp->rq;
1855
1856 curi = isp->isp_reqidx;
1857 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1858
1859 if (nseg == 0) {
1860 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1861 isp_prt(isp, ISP_LOGWARN,
1862 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1863 "set (0x%x)", cto->ct_flags);
1864 mp->error = EINVAL;
1865 return;
1866 }
1867 /*
1868 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1869 * flags to NO DATA and clear relative offset flags.
1870 * We preserve the ct_resid and the response area.
1871 */
1872 cto->ct_header.rqs_seqno = 1;
1873 cto->ct_seg_count = 0;
1874 cto->ct_reloff = 0;
1875 isp_prt(isp, ISP_LOGTDEBUG1,
1876 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1877 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1878 cto->ct_iid, cto->ct_flags, cto->ct_status,
1879 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1880 if (IS_2KLOGIN(isp)) {
1881 isp_put_ctio2e(isp,
1882 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1883 } else {
1884 isp_put_ctio2(isp, cto, qe);
1885 }
1886 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1887 return;
1888 }
1889
1890 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1891 isp_prt(isp, ISP_LOGERR,
1892 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1893 "(0x%x)", cto->ct_flags);
1894 mp->error = EINVAL;
1895 return;
1896 }
1897
1898
1899 nxti = *mp->nxtip;
1900
1901 /*
1902 * Check to see if we need to DAC addressing or not.
1903 *
1904 * Any address that's over the 4GB boundary causes this
1905 * to happen.
1906 */
1907 segcnt = nseg;
1908 if (sizeof (bus_addr_t) > 4) {
1909 for (segcnt = 0; segcnt < nseg; segcnt++) {
1910 uint64_t addr = dm_segs[segcnt].ds_addr;
1911 if (addr >= 0x100000000LL) {
1912 break;
1913 }
1914 }
1915 }
1916 if (segcnt != nseg) {
1917 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
1918 seglim = ISP_RQDSEG_T3;
1919 ds64 = &cto->rsp.m0.ct_dataseg64[0];
1920 ds = NULL;
1921 } else {
1922 seglim = ISP_RQDSEG_T2;
1923 ds64 = NULL;
1924 ds = &cto->rsp.m0.ct_dataseg[0];
1925 }
1926 cto->ct_seg_count = 0;
1927
1928 /*
1929 * Set up the CTIO2 data segments.
1930 */
1931 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
1932 cto->ct_seg_count++, segcnt++) {
1933 if (ds64) {
1934 ds64->ds_basehi =
1935 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1936 ds64->ds_base = dm_segs[segcnt].ds_addr;
1937 ds64->ds_count = dm_segs[segcnt].ds_len;
1938 ds64++;
1939 } else {
1940 ds->ds_base = dm_segs[segcnt].ds_addr;
1941 ds->ds_count = dm_segs[segcnt].ds_len;
1942 ds++;
1943 }
1944 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1945#if __FreeBSD_version < 500000
1946 isp_prt(isp, ISP_LOGTDEBUG1,
1947 "isp_send_ctio2: ent0[%d]0x%llx:%llu",
1948 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
1949 (uint64_t)dm_segs[segcnt].ds_len);
1950#else
1951 isp_prt(isp, ISP_LOGTDEBUG1,
1952 "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1953 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1954 (uintmax_t)dm_segs[segcnt].ds_len);
1955#endif
1956 }
1957
1958 while (segcnt < nseg) {
1959 uint16_t curip;
1960 int seg;
1961 ispcontreq_t local, *crq = &local, *qep;
1962
1963 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1964 curip = nxti;
1965 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1966 if (nxti == mp->optr) {
1967 ISP_UNLOCK(isp);
1968 isp_prt(isp, ISP_LOGTDEBUG0,
1969 "tdma_mkfc: request queue overflow");
1970 mp->error = MUSHERR_NOQENTRIES;
1971 return;
1972 }
1973 cto->ct_header.rqs_entry_count++;
1974 MEMZERO((void *)crq, sizeof (*crq));
1975 crq->req_header.rqs_entry_count = 1;
1976 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
1977 seglim = ISP_CDSEG64;
1978 ds = NULL;
1979 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
1980 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1981 } else {
1982 seglim = ISP_CDSEG;
1983 ds = &crq->req_dataseg[0];
1984 ds64 = NULL;
1985 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1986 }
1987 for (seg = 0; segcnt < nseg && seg < seglim;
1988 segcnt++, seg++) {
1989 if (ds64) {
1990 ds64->ds_basehi =
1991 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1992 ds64->ds_base = dm_segs[segcnt].ds_addr;
1993 ds64->ds_count = dm_segs[segcnt].ds_len;
1994 ds64++;
1995 } else {
1996 ds->ds_base = dm_segs[segcnt].ds_addr;
1997 ds->ds_count = dm_segs[segcnt].ds_len;
1998 ds++;
1999 }
2000#if __FreeBSD_version < 500000
2001 isp_prt(isp, ISP_LOGTDEBUG1,
2002 "isp_send_ctio2: ent%d[%d]%llx:%llu",
2003 cto->ct_header.rqs_entry_count-1, seg,
2004 (uint64_t)dm_segs[segcnt].ds_addr,
2005 (uint64_t)dm_segs[segcnt].ds_len);
2006#else
2007 isp_prt(isp, ISP_LOGTDEBUG1,
2008 "isp_send_ctio2: ent%d[%d]%jx:%ju",
2009 cto->ct_header.rqs_entry_count-1, seg,
2010 (uintmax_t)dm_segs[segcnt].ds_addr,
2011 (uintmax_t)dm_segs[segcnt].ds_len);
2012#endif
2013 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2014 cto->ct_seg_count++;
2015 }
2016 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2017 isp_put_cont_req(isp, crq, qep);
2018 ISP_TDQE(isp, "cont entry", curi, qep);
2019 }
2020
2021 /*
2022 * No do final twiddling for the CTIO itself.
2023 */
2024 cto->ct_header.rqs_seqno = 1;
2025 isp_prt(isp, ISP_LOGTDEBUG1,
2026 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2027 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2028 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2029 cto->ct_resid);
2030 if (IS_2KLOGIN(isp))
2031 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2032 else
2033 isp_put_ctio2(isp, cto, qe);
2034 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2035 *mp->nxtip = nxti;
2036}
2037#endif
2038
2039static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2040static void dma2(void *, bus_dma_segment_t *, int, int);
2041
2042static void
2043dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2044{
2045 mush_t *mp;
2046 ispsoftc_t *isp;
2047 struct ccb_scsiio *csio;
2048 struct isp_pcisoftc *pcs;
2049 bus_dmamap_t *dp;
2050 bus_dma_segment_t *eseg;
2051 ispreq64_t *rq;
2052 int seglim, datalen;
2053 uint16_t nxti;
2054
2055 mp = (mush_t *) arg;
2056 if (error) {
2057 mp->error = error;
2058 return;
2059 }
2060
2061 if (nseg < 1) {
2062 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2063 mp->error = EFAULT;
2064 return;
2065 }
2066 csio = mp->cmd_token;
2067 isp = mp->isp;
2068 rq = mp->rq;
2069 pcs = (struct isp_pcisoftc *)mp->isp;
2070 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2071 nxti = *mp->nxtip;
2072
2073 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2074 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2075 } else {
2076 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2077 }
2078 datalen = XS_XFRLEN(csio);
2079
2080 /*
2081 * We're passed an initial partially filled in entry that
2082 * has most fields filled in except for data transfer
2083 * related values.
2084 *
2085 * Our job is to fill in the initial request queue entry and
2086 * then to start allocating and filling in continuation entries
2087 * until we've covered the entire transfer.
2088 */
2089
2090 if (IS_FC(isp)) {
2091 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2092 seglim = ISP_RQDSEG_T3;
2093 ((ispreqt3_t *)rq)->req_totalcnt = datalen;
2094 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2095 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2096 } else {
2097 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2098 }
2099 } else {
2100 rq->req_header.rqs_entry_type = RQSTYPE_A64;
2101 if (csio->cdb_len > 12) {
2102 seglim = 0;
2103 } else {
2104 seglim = ISP_RQDSEG_A64;
2105 }
2106 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2107 rq->req_flags |= REQFLAG_DATA_IN;
2108 } else {
2109 rq->req_flags |= REQFLAG_DATA_OUT;
2110 }
2111 }
2112
2113 eseg = dm_segs + nseg;
2114
2115 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2116 if (IS_FC(isp)) {
2117 ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2118 rq3->req_dataseg[rq3->req_seg_count].ds_base =
2119 DMA_LO32(dm_segs->ds_addr);
2120 rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2121 DMA_HI32(dm_segs->ds_addr);
2122 rq3->req_dataseg[rq3->req_seg_count].ds_count =
2123 dm_segs->ds_len;
2124 } else {
2125 rq->req_dataseg[rq->req_seg_count].ds_base =
2126 DMA_LO32(dm_segs->ds_addr);
2127 rq->req_dataseg[rq->req_seg_count].ds_basehi =
2128 DMA_HI32(dm_segs->ds_addr);
2129 rq->req_dataseg[rq->req_seg_count].ds_count =
2130 dm_segs->ds_len;
2131 }
2132 datalen -= dm_segs->ds_len;
2133 rq->req_seg_count++;
2134 dm_segs++;
2135 }
2136
2137 while (datalen > 0 && dm_segs != eseg) {
2138 uint16_t onxti;
2139 ispcontreq64_t local, *crq = &local, *cqe;
2140
2141 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2142 onxti = nxti;
2143 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2144 if (nxti == mp->optr) {
2145 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2146 mp->error = MUSHERR_NOQENTRIES;
2147 return;
2148 }
2149 rq->req_header.rqs_entry_count++;
2150 MEMZERO((void *)crq, sizeof (*crq));
2151 crq->req_header.rqs_entry_count = 1;
2152 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2153
2154 seglim = 0;
2155 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2156 crq->req_dataseg[seglim].ds_base =
2157 DMA_LO32(dm_segs->ds_addr);
2158 crq->req_dataseg[seglim].ds_basehi =
2159 DMA_HI32(dm_segs->ds_addr);
2160 crq->req_dataseg[seglim].ds_count =
2161 dm_segs->ds_len;
2162 rq->req_seg_count++;
2163 dm_segs++;
2164 seglim++;
2165 datalen -= dm_segs->ds_len;
2166 }
2167 isp_put_cont64_req(isp, crq, cqe);
2168 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2169 }
2170 *mp->nxtip = nxti;
2171}
2172
2173static void
2174dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2175{
2176 mush_t *mp;
2177 ispsoftc_t *isp;
2178 struct ccb_scsiio *csio;
2179 struct isp_pcisoftc *pcs;
2180 bus_dmamap_t *dp;
2181 bus_dma_segment_t *eseg;
2182 ispreq_t *rq;
2183 int seglim, datalen;
2184 uint16_t nxti;
2185
2186 mp = (mush_t *) arg;
2187 if (error) {
2188 mp->error = error;
2189 return;
2190 }
2191
2192 if (nseg < 1) {
2193 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2194 mp->error = EFAULT;
2195 return;
2196 }
2197 csio = mp->cmd_token;
2198 isp = mp->isp;
2199 rq = mp->rq;
2200 pcs = (struct isp_pcisoftc *)mp->isp;
2201 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2202 nxti = *mp->nxtip;
2203
2204 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2205 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2206 } else {
2207 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2208 }
2209
2210 datalen = XS_XFRLEN(csio);
2211
2212 /*
2213 * We're passed an initial partially filled in entry that
2214 * has most fields filled in except for data transfer
2215 * related values.
2216 *
2217 * Our job is to fill in the initial request queue entry and
2218 * then to start allocating and filling in continuation entries
2219 * until we've covered the entire transfer.
2220 */
2221
2222 if (IS_FC(isp)) {
2223 seglim = ISP_RQDSEG_T2;
2224 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
2225 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2226 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2227 } else {
2228 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2229 }
2230 } else {
2231 if (csio->cdb_len > 12) {
2232 seglim = 0;
2233 } else {
2234 seglim = ISP_RQDSEG;
2235 }
2236 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2237 rq->req_flags |= REQFLAG_DATA_IN;
2238 } else {
2239 rq->req_flags |= REQFLAG_DATA_OUT;
2240 }
2241 }
2242
2243 eseg = dm_segs + nseg;
2244
2245 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2246 if (IS_FC(isp)) {
2247 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2248 rq2->req_dataseg[rq2->req_seg_count].ds_base =
2249 DMA_LO32(dm_segs->ds_addr);
2250 rq2->req_dataseg[rq2->req_seg_count].ds_count =
2251 dm_segs->ds_len;
2252 } else {
2253 rq->req_dataseg[rq->req_seg_count].ds_base =
2254 DMA_LO32(dm_segs->ds_addr);
2255 rq->req_dataseg[rq->req_seg_count].ds_count =
2256 dm_segs->ds_len;
2257 }
2258 datalen -= dm_segs->ds_len;
2259 rq->req_seg_count++;
2260 dm_segs++;
2261 }
2262
2263 while (datalen > 0 && dm_segs != eseg) {
2264 uint16_t onxti;
2265 ispcontreq_t local, *crq = &local, *cqe;
2266
2267 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2268 onxti = nxti;
2269 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2270 if (nxti == mp->optr) {
2271 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2272 mp->error = MUSHERR_NOQENTRIES;
2273 return;
2274 }
2275 rq->req_header.rqs_entry_count++;
2276 MEMZERO((void *)crq, sizeof (*crq));
2277 crq->req_header.rqs_entry_count = 1;
2278 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2279
2280 seglim = 0;
2281 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2282 crq->req_dataseg[seglim].ds_base =
2283 DMA_LO32(dm_segs->ds_addr);
2284 crq->req_dataseg[seglim].ds_count =
2285 dm_segs->ds_len;
2286 rq->req_seg_count++;
2287 dm_segs++;
2288 seglim++;
2289 datalen -= dm_segs->ds_len;
2290 }
2291 isp_put_cont_req(isp, crq, cqe);
2292 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2293 }
2294 *mp->nxtip = nxti;
2295}
2296
2297/*
2298 * We enter with ISP_LOCK held
2299 */
2300static int
2301isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2302 uint16_t *nxtip, uint16_t optr)
2303{
2304 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2305 ispreq_t *qep;
2306 bus_dmamap_t *dp = NULL;
2307 mush_t mush, *mp;
2308 void (*eptr)(void *, bus_dma_segment_t *, int, int);
2309
2310 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2311#ifdef ISP_TARGET_MODE
2312 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2313 if (IS_FC(isp)) {
2314 eptr = tdma_mkfc;
2315 } else {
2316 eptr = tdma_mk;
2317 }
2318 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2319 (csio->dxfer_len == 0)) {
2320 mp = &mush;
2321 mp->isp = isp;
2322 mp->cmd_token = csio;
2323 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
2324 mp->nxtip = nxtip;
2325 mp->optr = optr;
2326 mp->error = 0;
2327 ISPLOCK_2_CAMLOCK(isp);
2328 (*eptr)(mp, NULL, 0, 0);
2329 CAMLOCK_2_ISPLOCK(isp);
2330 goto mbxsync;
2331 }
2332 } else
2333#endif
2334 if (sizeof (bus_addr_t) > 4) {
2335 eptr = dma2_a64;
2336 } else {
2337 eptr = dma2;
2338 }
2339
2340
2341 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2342 (csio->dxfer_len == 0)) {
2343 rq->req_seg_count = 1;
2344 goto mbxsync;
2345 }
2346
2347 /*
2348 * Do a virtual grapevine step to collect info for
2349 * the callback dma allocation that we have to use...
2350 */
2351 mp = &mush;
2352 mp->isp = isp;
2353 mp->cmd_token = csio;
2354 mp->rq = rq;
2355 mp->nxtip = nxtip;
2356 mp->optr = optr;
2357 mp->error = 0;
2358
2359 ISPLOCK_2_CAMLOCK(isp);
2360 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2361 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2362 int error, s;
2363 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2364 s = splsoftvm();
2365 error = bus_dmamap_load(pcs->dmat, *dp,
2366 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2367 if (error == EINPROGRESS) {
2368 bus_dmamap_unload(pcs->dmat, *dp);
2369 mp->error = EINVAL;
2370 isp_prt(isp, ISP_LOGERR,
2371 "deferred dma allocation not supported");
2372 } else if (error && mp->error == 0) {
2373#ifdef DIAGNOSTIC
2374 isp_prt(isp, ISP_LOGERR,
2375 "error %d in dma mapping code", error);
2376#endif
2377 mp->error = error;
2378 }
2379 splx(s);
2380 } else {
2381 /* Pointer to physical buffer */
2382 struct bus_dma_segment seg;
2383 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2384 seg.ds_len = csio->dxfer_len;
2385 (*eptr)(mp, &seg, 1, 0);
2386 }
2387 } else {
2388 struct bus_dma_segment *segs;
2389
2390 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2391 isp_prt(isp, ISP_LOGERR,
2392 "Physical segment pointers unsupported");
2393 mp->error = EINVAL;
2394 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2395 isp_prt(isp, ISP_LOGERR,
2396 "Virtual segment addresses unsupported");
2397 mp->error = EINVAL;
2398 } else {
2399 /* Just use the segments provided */
2400 segs = (struct bus_dma_segment *) csio->data_ptr;
2401 (*eptr)(mp, segs, csio->sglist_cnt, 0);
2402 }
2403 }
2404 CAMLOCK_2_ISPLOCK(isp);
2405 if (mp->error) {
2406 int retval = CMD_COMPLETE;
2407 if (mp->error == MUSHERR_NOQENTRIES) {
2408 retval = CMD_EAGAIN;
2409 } else if (mp->error == EFBIG) {
2410 XS_SETERR(csio, CAM_REQ_TOO_BIG);
2411 } else if (mp->error == EINVAL) {
2412 XS_SETERR(csio, CAM_REQ_INVALID);
2413 } else {
2414 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2415 }
2416 return (retval);
2417 }
2418mbxsync:
2419 switch (rq->req_header.rqs_entry_type) {
2420 case RQSTYPE_REQUEST:
2421 isp_put_request(isp, rq, qep);
2422 break;
2423 case RQSTYPE_CMDONLY:
2424 isp_put_extended_request(isp, (ispextreq_t *)rq,
2425 (ispextreq_t *)qep);
2426 break;
2427 case RQSTYPE_T2RQS:
2428 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2429 break;
2430 case RQSTYPE_A64:
2431 case RQSTYPE_T3RQS:
2432 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2433 break;
2434 }
2435 return (CMD_QUEUED);
2436}
2437
2438static void
2439isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle)
2440{
2441 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2442 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2443 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2444 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2445 } else {
2446 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2447 }
2448 bus_dmamap_unload(pcs->dmat, *dp);
2449}
2450
2451
2452static void
2453isp_pci_reset1(ispsoftc_t *isp)
2454{
2455 /* Make sure the BIOS is disabled */
2456 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2457 /* and enable interrupts */
2458 ENABLE_INTS(isp);
2459}
2460
2461static void
2462isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2463{
2464 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2465 if (msg)
2466 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2467 else
2468 printf("%s:\n", device_get_nameunit(isp->isp_dev));
2469 if (IS_SCSI(isp))
2470 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2471 else
2472 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2473 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2474 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2475 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2476
2477
2478 if (IS_SCSI(isp)) {
2479 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2480 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2481 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2482 ISP_READ(isp, CDMA_FIFO_STS));
2483 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2484 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2485 ISP_READ(isp, DDMA_FIFO_STS));
2486 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2487 ISP_READ(isp, SXP_INTERRUPT),
2488 ISP_READ(isp, SXP_GROSS_ERR),
2489 ISP_READ(isp, SXP_PINS_CTRL));
2490 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2491 }
2492 printf(" mbox regs: %x %x %x %x %x\n",
2493 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2494 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2495 ISP_READ(isp, OUTMAILBOX4));
2496 printf(" PCI Status Command/Status=%x\n",
2497 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2498}