Deleted Added
full compact
isp_pci.c (153462) isp_pci.c (154704)
1/*-
2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3 * FreeBSD Version.
4 *
1/*-
2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
5 * Copyright (c) 1997-2006 by Matthew Jacob
6 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
27 */
28
29#include <sys/cdefs.h>
29 */
30
31#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/isp/isp_pci.c 153462 2005-12-15 22:12:27Z jhb $");
32__FBSDID("$FreeBSD: head/sys/dev/isp/isp_pci.c 154704 2006-01-23 06:23:37Z mjacob $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/bus.h>
37#include <sys/stdint.h>
38
39#include <dev/pci/pcireg.h>
40#include <dev/pci/pcivar.h>
41
42#include <machine/bus.h>
43#include <machine/resource.h>
44#include <sys/rman.h>
45#include <sys/malloc.h>
46
47#ifdef ISP_TARGET_MODE
48#ifdef PAE
49#error "PAE and ISP_TARGET_MODE not supported yet"
50#endif
51#endif
52
53#include <dev/isp/isp_freebsd.h>
54
55static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
56static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
57static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
58static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
59static int
60isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
61static int
62isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
63static int isp_pci_mbxdma(struct ispsoftc *);
64static int
65isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
66static void
67isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
68
69static void isp_pci_reset1(struct ispsoftc *);
70static void isp_pci_dumpregs(struct ispsoftc *, const char *);
71
72static struct ispmdvec mdvec = {
73 isp_pci_rd_isr,
74 isp_pci_rd_reg,
75 isp_pci_wr_reg,
76 isp_pci_mbxdma,
77 isp_pci_dmasetup,
78 isp_pci_dmateardown,
79 NULL,
80 isp_pci_reset1,
81 isp_pci_dumpregs,
82 NULL,
83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
84};
85
86static struct ispmdvec mdvec_1080 = {
87 isp_pci_rd_isr,
88 isp_pci_rd_reg_1080,
89 isp_pci_wr_reg_1080,
90 isp_pci_mbxdma,
91 isp_pci_dmasetup,
92 isp_pci_dmateardown,
93 NULL,
94 isp_pci_reset1,
95 isp_pci_dumpregs,
96 NULL,
97 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
98};
99
100static struct ispmdvec mdvec_12160 = {
101 isp_pci_rd_isr,
102 isp_pci_rd_reg_1080,
103 isp_pci_wr_reg_1080,
104 isp_pci_mbxdma,
105 isp_pci_dmasetup,
106 isp_pci_dmateardown,
107 NULL,
108 isp_pci_reset1,
109 isp_pci_dumpregs,
110 NULL,
111 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
112};
113
114static struct ispmdvec mdvec_2100 = {
115 isp_pci_rd_isr,
116 isp_pci_rd_reg,
117 isp_pci_wr_reg,
118 isp_pci_mbxdma,
119 isp_pci_dmasetup,
120 isp_pci_dmateardown,
121 NULL,
122 isp_pci_reset1,
123 isp_pci_dumpregs
124};
125
126static struct ispmdvec mdvec_2200 = {
127 isp_pci_rd_isr,
128 isp_pci_rd_reg,
129 isp_pci_wr_reg,
130 isp_pci_mbxdma,
131 isp_pci_dmasetup,
132 isp_pci_dmateardown,
133 NULL,
134 isp_pci_reset1,
135 isp_pci_dumpregs
136};
137
138static struct ispmdvec mdvec_2300 = {
139 isp_pci_rd_isr_2300,
140 isp_pci_rd_reg,
141 isp_pci_wr_reg,
142 isp_pci_mbxdma,
143 isp_pci_dmasetup,
144 isp_pci_dmateardown,
145 NULL,
146 isp_pci_reset1,
147 isp_pci_dumpregs
148};
149
150#ifndef PCIM_CMD_INVEN
151#define PCIM_CMD_INVEN 0x10
152#endif
153#ifndef PCIM_CMD_BUSMASTEREN
154#define PCIM_CMD_BUSMASTEREN 0x0004
155#endif
156#ifndef PCIM_CMD_PERRESPEN
157#define PCIM_CMD_PERRESPEN 0x0040
158#endif
159#ifndef PCIM_CMD_SEREN
160#define PCIM_CMD_SEREN 0x0100
161#endif
162
163#ifndef PCIR_COMMAND
164#define PCIR_COMMAND 0x04
165#endif
166
167#ifndef PCIR_CACHELNSZ
168#define PCIR_CACHELNSZ 0x0c
169#endif
170
171#ifndef PCIR_LATTIMER
172#define PCIR_LATTIMER 0x0d
173#endif
174
175#ifndef PCIR_ROMADDR
176#define PCIR_ROMADDR 0x30
177#endif
178
179#ifndef PCI_VENDOR_QLOGIC
180#define PCI_VENDOR_QLOGIC 0x1077
181#endif
182
183#ifndef PCI_PRODUCT_QLOGIC_ISP1020
184#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
185#endif
186
187#ifndef PCI_PRODUCT_QLOGIC_ISP1080
188#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
189#endif
190
191#ifndef PCI_PRODUCT_QLOGIC_ISP10160
192#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
193#endif
194
195#ifndef PCI_PRODUCT_QLOGIC_ISP12160
196#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
197#endif
198
199#ifndef PCI_PRODUCT_QLOGIC_ISP1240
200#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
201#endif
202
203#ifndef PCI_PRODUCT_QLOGIC_ISP1280
204#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
205#endif
206
207#ifndef PCI_PRODUCT_QLOGIC_ISP2100
208#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
209#endif
210
211#ifndef PCI_PRODUCT_QLOGIC_ISP2200
212#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
213#endif
214
215#ifndef PCI_PRODUCT_QLOGIC_ISP2300
216#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
217#endif
218
219#ifndef PCI_PRODUCT_QLOGIC_ISP2312
220#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
221#endif
222
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/bus.h>
39#include <sys/stdint.h>
40
41#include <dev/pci/pcireg.h>
42#include <dev/pci/pcivar.h>
43
44#include <machine/bus.h>
45#include <machine/resource.h>
46#include <sys/rman.h>
47#include <sys/malloc.h>
48
49#ifdef ISP_TARGET_MODE
50#ifdef PAE
51#error "PAE and ISP_TARGET_MODE not supported yet"
52#endif
53#endif
54
55#include <dev/isp/isp_freebsd.h>
56
57static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
58static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
59static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
60static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
61static int
62isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
63static int
64isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
65static int isp_pci_mbxdma(struct ispsoftc *);
66static int
67isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
68static void
69isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
70
71static void isp_pci_reset1(struct ispsoftc *);
72static void isp_pci_dumpregs(struct ispsoftc *, const char *);
73
74static struct ispmdvec mdvec = {
75 isp_pci_rd_isr,
76 isp_pci_rd_reg,
77 isp_pci_wr_reg,
78 isp_pci_mbxdma,
79 isp_pci_dmasetup,
80 isp_pci_dmateardown,
81 NULL,
82 isp_pci_reset1,
83 isp_pci_dumpregs,
84 NULL,
85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
86};
87
88static struct ispmdvec mdvec_1080 = {
89 isp_pci_rd_isr,
90 isp_pci_rd_reg_1080,
91 isp_pci_wr_reg_1080,
92 isp_pci_mbxdma,
93 isp_pci_dmasetup,
94 isp_pci_dmateardown,
95 NULL,
96 isp_pci_reset1,
97 isp_pci_dumpregs,
98 NULL,
99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
100};
101
102static struct ispmdvec mdvec_12160 = {
103 isp_pci_rd_isr,
104 isp_pci_rd_reg_1080,
105 isp_pci_wr_reg_1080,
106 isp_pci_mbxdma,
107 isp_pci_dmasetup,
108 isp_pci_dmateardown,
109 NULL,
110 isp_pci_reset1,
111 isp_pci_dumpregs,
112 NULL,
113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
114};
115
116static struct ispmdvec mdvec_2100 = {
117 isp_pci_rd_isr,
118 isp_pci_rd_reg,
119 isp_pci_wr_reg,
120 isp_pci_mbxdma,
121 isp_pci_dmasetup,
122 isp_pci_dmateardown,
123 NULL,
124 isp_pci_reset1,
125 isp_pci_dumpregs
126};
127
128static struct ispmdvec mdvec_2200 = {
129 isp_pci_rd_isr,
130 isp_pci_rd_reg,
131 isp_pci_wr_reg,
132 isp_pci_mbxdma,
133 isp_pci_dmasetup,
134 isp_pci_dmateardown,
135 NULL,
136 isp_pci_reset1,
137 isp_pci_dumpregs
138};
139
140static struct ispmdvec mdvec_2300 = {
141 isp_pci_rd_isr_2300,
142 isp_pci_rd_reg,
143 isp_pci_wr_reg,
144 isp_pci_mbxdma,
145 isp_pci_dmasetup,
146 isp_pci_dmateardown,
147 NULL,
148 isp_pci_reset1,
149 isp_pci_dumpregs
150};
151
152#ifndef PCIM_CMD_INVEN
153#define PCIM_CMD_INVEN 0x10
154#endif
155#ifndef PCIM_CMD_BUSMASTEREN
156#define PCIM_CMD_BUSMASTEREN 0x0004
157#endif
158#ifndef PCIM_CMD_PERRESPEN
159#define PCIM_CMD_PERRESPEN 0x0040
160#endif
161#ifndef PCIM_CMD_SEREN
162#define PCIM_CMD_SEREN 0x0100
163#endif
164
165#ifndef PCIR_COMMAND
166#define PCIR_COMMAND 0x04
167#endif
168
169#ifndef PCIR_CACHELNSZ
170#define PCIR_CACHELNSZ 0x0c
171#endif
172
173#ifndef PCIR_LATTIMER
174#define PCIR_LATTIMER 0x0d
175#endif
176
177#ifndef PCIR_ROMADDR
178#define PCIR_ROMADDR 0x30
179#endif
180
181#ifndef PCI_VENDOR_QLOGIC
182#define PCI_VENDOR_QLOGIC 0x1077
183#endif
184
185#ifndef PCI_PRODUCT_QLOGIC_ISP1020
186#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
187#endif
188
189#ifndef PCI_PRODUCT_QLOGIC_ISP1080
190#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
191#endif
192
193#ifndef PCI_PRODUCT_QLOGIC_ISP10160
194#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
195#endif
196
197#ifndef PCI_PRODUCT_QLOGIC_ISP12160
198#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
199#endif
200
201#ifndef PCI_PRODUCT_QLOGIC_ISP1240
202#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
203#endif
204
205#ifndef PCI_PRODUCT_QLOGIC_ISP1280
206#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
207#endif
208
209#ifndef PCI_PRODUCT_QLOGIC_ISP2100
210#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
211#endif
212
213#ifndef PCI_PRODUCT_QLOGIC_ISP2200
214#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
215#endif
216
217#ifndef PCI_PRODUCT_QLOGIC_ISP2300
218#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
219#endif
220
221#ifndef PCI_PRODUCT_QLOGIC_ISP2312
222#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
223#endif
224
225#ifndef PCI_PRODUCT_QLOGIC_ISP2322
226#define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
227#endif
228
223#ifndef PCI_PRODUCT_QLOGIC_ISP6312
224#define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
225#endif
226
227#define PCI_QLOGIC_ISP1020 \
228 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
229
230#define PCI_QLOGIC_ISP1080 \
231 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
232
233#define PCI_QLOGIC_ISP10160 \
234 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
235
236#define PCI_QLOGIC_ISP12160 \
237 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
238
239#define PCI_QLOGIC_ISP1240 \
240 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
241
242#define PCI_QLOGIC_ISP1280 \
243 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
244
245#define PCI_QLOGIC_ISP2100 \
246 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
247
248#define PCI_QLOGIC_ISP2200 \
249 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
250
251#define PCI_QLOGIC_ISP2300 \
252 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
253
254#define PCI_QLOGIC_ISP2312 \
255 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
256
229#ifndef PCI_PRODUCT_QLOGIC_ISP6312
230#define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
231#endif
232
233#define PCI_QLOGIC_ISP1020 \
234 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
235
236#define PCI_QLOGIC_ISP1080 \
237 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
238
239#define PCI_QLOGIC_ISP10160 \
240 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
241
242#define PCI_QLOGIC_ISP12160 \
243 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
244
245#define PCI_QLOGIC_ISP1240 \
246 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
247
248#define PCI_QLOGIC_ISP1280 \
249 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
250
251#define PCI_QLOGIC_ISP2100 \
252 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
253
254#define PCI_QLOGIC_ISP2200 \
255 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
256
257#define PCI_QLOGIC_ISP2300 \
258 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
259
260#define PCI_QLOGIC_ISP2312 \
261 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
262
263#define PCI_QLOGIC_ISP2322 \
264 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
265
257#define PCI_QLOGIC_ISP6312 \
258 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
259
260/*
261 * Odd case for some AMI raid cards... We need to *not* attach to this.
262 */
263#define AMI_RAID_SUBVENDOR_ID 0x101e
264
265#define IO_MAP_REG 0x10
266#define MEM_MAP_REG 0x14
267
268#define PCI_DFLT_LTNCY 0x40
269#define PCI_DFLT_LNSZ 0x10
270
271static int isp_pci_probe (device_t);
272static int isp_pci_attach (device_t);
273
274
275struct isp_pcisoftc {
276 struct ispsoftc pci_isp;
277 device_t pci_dev;
278 struct resource * pci_reg;
279 bus_space_tag_t pci_st;
280 bus_space_handle_t pci_sh;
281 void * ih;
282 int16_t pci_poff[_NREG_BLKS];
283 bus_dma_tag_t dmat;
284 bus_dmamap_t *dmaps;
285};
286extern ispfwfunc *isp_get_firmware_p;
287
288static device_method_t isp_pci_methods[] = {
289 /* Device interface */
290 DEVMETHOD(device_probe, isp_pci_probe),
291 DEVMETHOD(device_attach, isp_pci_attach),
292 { 0, 0 }
293};
294static void isp_pci_intr(void *);
295
296static driver_t isp_pci_driver = {
297 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
298};
299static devclass_t isp_devclass;
300DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
301
302static int
303isp_pci_probe(device_t dev)
304{
305 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
306 case PCI_QLOGIC_ISP1020:
307 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
308 break;
309 case PCI_QLOGIC_ISP1080:
310 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
311 break;
312 case PCI_QLOGIC_ISP1240:
313 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
314 break;
315 case PCI_QLOGIC_ISP1280:
316 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
317 break;
318 case PCI_QLOGIC_ISP10160:
319 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
320 break;
321 case PCI_QLOGIC_ISP12160:
322 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
323 return (ENXIO);
324 }
325 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
326 break;
327 case PCI_QLOGIC_ISP2100:
328 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
329 break;
330 case PCI_QLOGIC_ISP2200:
331 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
332 break;
333 case PCI_QLOGIC_ISP2300:
334 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
335 break;
336 case PCI_QLOGIC_ISP2312:
337 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
338 break;
266#define PCI_QLOGIC_ISP6312 \
267 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
268
269/*
270 * Odd case for some AMI raid cards... We need to *not* attach to this.
271 */
272#define AMI_RAID_SUBVENDOR_ID 0x101e
273
274#define IO_MAP_REG 0x10
275#define MEM_MAP_REG 0x14
276
277#define PCI_DFLT_LTNCY 0x40
278#define PCI_DFLT_LNSZ 0x10
279
280static int isp_pci_probe (device_t);
281static int isp_pci_attach (device_t);
282
283
284struct isp_pcisoftc {
285 struct ispsoftc pci_isp;
286 device_t pci_dev;
287 struct resource * pci_reg;
288 bus_space_tag_t pci_st;
289 bus_space_handle_t pci_sh;
290 void * ih;
291 int16_t pci_poff[_NREG_BLKS];
292 bus_dma_tag_t dmat;
293 bus_dmamap_t *dmaps;
294};
295extern ispfwfunc *isp_get_firmware_p;
296
297static device_method_t isp_pci_methods[] = {
298 /* Device interface */
299 DEVMETHOD(device_probe, isp_pci_probe),
300 DEVMETHOD(device_attach, isp_pci_attach),
301 { 0, 0 }
302};
303static void isp_pci_intr(void *);
304
305static driver_t isp_pci_driver = {
306 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
307};
308static devclass_t isp_devclass;
309DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
310
311static int
312isp_pci_probe(device_t dev)
313{
314 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
315 case PCI_QLOGIC_ISP1020:
316 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
317 break;
318 case PCI_QLOGIC_ISP1080:
319 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
320 break;
321 case PCI_QLOGIC_ISP1240:
322 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
323 break;
324 case PCI_QLOGIC_ISP1280:
325 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
326 break;
327 case PCI_QLOGIC_ISP10160:
328 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
329 break;
330 case PCI_QLOGIC_ISP12160:
331 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
332 return (ENXIO);
333 }
334 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
335 break;
336 case PCI_QLOGIC_ISP2100:
337 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
338 break;
339 case PCI_QLOGIC_ISP2200:
340 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
341 break;
342 case PCI_QLOGIC_ISP2300:
343 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
344 break;
345 case PCI_QLOGIC_ISP2312:
346 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
347 break;
348 case PCI_QLOGIC_ISP2322:
349 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
350 break;
339 case PCI_QLOGIC_ISP6312:
340 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
341 break;
342 default:
343 return (ENXIO);
344 }
345 if (isp_announced == 0 && bootverbose) {
346 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
347 "Core Version %d.%d\n",
348 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
349 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
350 isp_announced++;
351 }
352 /*
353 * XXXX: Here is where we might load the f/w module
354 * XXXX: (or increase a reference count to it).
355 */
356 return (BUS_PROBE_DEFAULT);
357}
358
359static int
360isp_pci_attach(device_t dev)
361{
362 struct resource *regs, *irq;
363 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
364 u_int32_t data, cmd, linesz, psize, basetype;
365 struct isp_pcisoftc *pcs;
366 struct ispsoftc *isp = NULL;
367 struct ispmdvec *mdvp;
368 const char *sptr;
369 int locksetup = 0;
370
371 /*
372 * Figure out if we're supposed to skip this one.
373 */
374
375 tval = 0;
376 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
377 "disable", &tval) == 0 && tval) {
378 device_printf(dev, "device is disabled\n");
379 /* but return 0 so the !$)$)*!$*) unit isn't reused */
380 return (0);
381 }
382
383 role = -1;
384 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
385 "role", &role) == 0 && role != -1) {
386 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
387 device_printf(dev, "setting role to 0x%x\n", role);
388 } else {
389#ifdef ISP_TARGET_MODE
390 role = ISP_ROLE_TARGET;
391#else
392 role = ISP_DEFAULT_ROLES;
393#endif
394 }
395
396 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
397 if (pcs == NULL) {
398 device_printf(dev, "cannot allocate softc\n");
399 return (ENOMEM);
400 }
401
402 /*
403 * Which we should try first - memory mapping or i/o mapping?
404 *
405 * We used to try memory first followed by i/o on alpha, otherwise
406 * the reverse, but we should just try memory first all the time now.
407 */
408 m1 = PCIM_CMD_MEMEN;
409 m2 = PCIM_CMD_PORTEN;
410
411 tval = 0;
412 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
413 "prefer_iomap", &tval) == 0 && tval != 0) {
414 m1 = PCIM_CMD_PORTEN;
415 m2 = PCIM_CMD_MEMEN;
416 }
417 tval = 0;
418 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
419 "prefer_memmap", &tval) == 0 && tval != 0) {
420 m1 = PCIM_CMD_MEMEN;
421 m2 = PCIM_CMD_PORTEN;
422 }
423
424 linesz = PCI_DFLT_LNSZ;
425 irq = regs = NULL;
426 rgd = rtp = iqd = 0;
427
428 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
429 if (cmd & m1) {
430 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
431 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
432 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
433 }
434 if (regs == NULL && (cmd & m2)) {
435 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
436 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
437 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
438 }
439 if (regs == NULL) {
440 device_printf(dev, "unable to map any ports\n");
441 goto bad;
442 }
443 if (bootverbose)
444 device_printf(dev, "using %s space register mapping\n",
445 (rgd == IO_MAP_REG)? "I/O" : "Memory");
446 pcs->pci_dev = dev;
447 pcs->pci_reg = regs;
448 pcs->pci_st = rman_get_bustag(regs);
449 pcs->pci_sh = rman_get_bushandle(regs);
450
451 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
452 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
453 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
454 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
455 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
456 mdvp = &mdvec;
457 basetype = ISP_HA_SCSI_UNKNOWN;
458 psize = sizeof (sdparam);
459 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
460 mdvp = &mdvec;
461 basetype = ISP_HA_SCSI_UNKNOWN;
462 psize = sizeof (sdparam);
463 }
464 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
465 mdvp = &mdvec_1080;
466 basetype = ISP_HA_SCSI_1080;
467 psize = sizeof (sdparam);
468 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
469 ISP1080_DMA_REGS_OFF;
470 }
471 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
472 mdvp = &mdvec_1080;
473 basetype = ISP_HA_SCSI_1240;
474 psize = 2 * sizeof (sdparam);
475 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
476 ISP1080_DMA_REGS_OFF;
477 }
478 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
479 mdvp = &mdvec_1080;
480 basetype = ISP_HA_SCSI_1280;
481 psize = 2 * sizeof (sdparam);
482 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
483 ISP1080_DMA_REGS_OFF;
484 }
485 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
486 mdvp = &mdvec_12160;
487 basetype = ISP_HA_SCSI_10160;
488 psize = sizeof (sdparam);
489 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
490 ISP1080_DMA_REGS_OFF;
491 }
492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
493 mdvp = &mdvec_12160;
494 basetype = ISP_HA_SCSI_12160;
495 psize = 2 * sizeof (sdparam);
496 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
497 ISP1080_DMA_REGS_OFF;
498 }
499 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
500 mdvp = &mdvec_2100;
501 basetype = ISP_HA_FC_2100;
502 psize = sizeof (fcparam);
503 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
504 PCI_MBOX_REGS2100_OFF;
505 if (pci_get_revid(dev) < 3) {
506 /*
507 * XXX: Need to get the actual revision
508 * XXX: number of the 2100 FB. At any rate,
509 * XXX: lower cache line size for early revision
510 * XXX; boards.
511 */
512 linesz = 1;
513 }
514 }
515 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
516 mdvp = &mdvec_2200;
517 basetype = ISP_HA_FC_2200;
518 psize = sizeof (fcparam);
519 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
520 PCI_MBOX_REGS2100_OFF;
521 }
522 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
523 mdvp = &mdvec_2300;
524 basetype = ISP_HA_FC_2300;
525 psize = sizeof (fcparam);
526 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
527 PCI_MBOX_REGS2300_OFF;
528 }
529 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
530 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
531 mdvp = &mdvec_2300;
532 basetype = ISP_HA_FC_2312;
533 psize = sizeof (fcparam);
534 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
535 PCI_MBOX_REGS2300_OFF;
536 }
351 case PCI_QLOGIC_ISP6312:
352 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
353 break;
354 default:
355 return (ENXIO);
356 }
357 if (isp_announced == 0 && bootverbose) {
358 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
359 "Core Version %d.%d\n",
360 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
361 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
362 isp_announced++;
363 }
364 /*
365 * XXXX: Here is where we might load the f/w module
366 * XXXX: (or increase a reference count to it).
367 */
368 return (BUS_PROBE_DEFAULT);
369}
370
371static int
372isp_pci_attach(device_t dev)
373{
374 struct resource *regs, *irq;
375 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role;
376 u_int32_t data, cmd, linesz, psize, basetype;
377 struct isp_pcisoftc *pcs;
378 struct ispsoftc *isp = NULL;
379 struct ispmdvec *mdvp;
380 const char *sptr;
381 int locksetup = 0;
382
383 /*
384 * Figure out if we're supposed to skip this one.
385 */
386
387 tval = 0;
388 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
389 "disable", &tval) == 0 && tval) {
390 device_printf(dev, "device is disabled\n");
391 /* but return 0 so the !$)$)*!$*) unit isn't reused */
392 return (0);
393 }
394
395 role = -1;
396 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
397 "role", &role) == 0 && role != -1) {
398 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
399 device_printf(dev, "setting role to 0x%x\n", role);
400 } else {
401#ifdef ISP_TARGET_MODE
402 role = ISP_ROLE_TARGET;
403#else
404 role = ISP_DEFAULT_ROLES;
405#endif
406 }
407
408 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO);
409 if (pcs == NULL) {
410 device_printf(dev, "cannot allocate softc\n");
411 return (ENOMEM);
412 }
413
414 /*
415 * Which we should try first - memory mapping or i/o mapping?
416 *
417 * We used to try memory first followed by i/o on alpha, otherwise
418 * the reverse, but we should just try memory first all the time now.
419 */
420 m1 = PCIM_CMD_MEMEN;
421 m2 = PCIM_CMD_PORTEN;
422
423 tval = 0;
424 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
425 "prefer_iomap", &tval) == 0 && tval != 0) {
426 m1 = PCIM_CMD_PORTEN;
427 m2 = PCIM_CMD_MEMEN;
428 }
429 tval = 0;
430 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
431 "prefer_memmap", &tval) == 0 && tval != 0) {
432 m1 = PCIM_CMD_MEMEN;
433 m2 = PCIM_CMD_PORTEN;
434 }
435
436 linesz = PCI_DFLT_LNSZ;
437 irq = regs = NULL;
438 rgd = rtp = iqd = 0;
439
440 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
441 if (cmd & m1) {
442 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
443 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
444 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
445 }
446 if (regs == NULL && (cmd & m2)) {
447 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
448 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
449 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
450 }
451 if (regs == NULL) {
452 device_printf(dev, "unable to map any ports\n");
453 goto bad;
454 }
455 if (bootverbose)
456 device_printf(dev, "using %s space register mapping\n",
457 (rgd == IO_MAP_REG)? "I/O" : "Memory");
458 pcs->pci_dev = dev;
459 pcs->pci_reg = regs;
460 pcs->pci_st = rman_get_bustag(regs);
461 pcs->pci_sh = rman_get_bushandle(regs);
462
463 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
464 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
465 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
466 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
467 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
468 mdvp = &mdvec;
469 basetype = ISP_HA_SCSI_UNKNOWN;
470 psize = sizeof (sdparam);
471 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
472 mdvp = &mdvec;
473 basetype = ISP_HA_SCSI_UNKNOWN;
474 psize = sizeof (sdparam);
475 }
476 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
477 mdvp = &mdvec_1080;
478 basetype = ISP_HA_SCSI_1080;
479 psize = sizeof (sdparam);
480 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
481 ISP1080_DMA_REGS_OFF;
482 }
483 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
484 mdvp = &mdvec_1080;
485 basetype = ISP_HA_SCSI_1240;
486 psize = 2 * sizeof (sdparam);
487 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
488 ISP1080_DMA_REGS_OFF;
489 }
490 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
491 mdvp = &mdvec_1080;
492 basetype = ISP_HA_SCSI_1280;
493 psize = 2 * sizeof (sdparam);
494 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
495 ISP1080_DMA_REGS_OFF;
496 }
497 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
498 mdvp = &mdvec_12160;
499 basetype = ISP_HA_SCSI_10160;
500 psize = sizeof (sdparam);
501 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
502 ISP1080_DMA_REGS_OFF;
503 }
504 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
505 mdvp = &mdvec_12160;
506 basetype = ISP_HA_SCSI_12160;
507 psize = 2 * sizeof (sdparam);
508 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
509 ISP1080_DMA_REGS_OFF;
510 }
511 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
512 mdvp = &mdvec_2100;
513 basetype = ISP_HA_FC_2100;
514 psize = sizeof (fcparam);
515 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
516 PCI_MBOX_REGS2100_OFF;
517 if (pci_get_revid(dev) < 3) {
518 /*
519 * XXX: Need to get the actual revision
520 * XXX: number of the 2100 FB. At any rate,
521 * XXX: lower cache line size for early revision
522 * XXX; boards.
523 */
524 linesz = 1;
525 }
526 }
527 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
528 mdvp = &mdvec_2200;
529 basetype = ISP_HA_FC_2200;
530 psize = sizeof (fcparam);
531 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
532 PCI_MBOX_REGS2100_OFF;
533 }
534 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
535 mdvp = &mdvec_2300;
536 basetype = ISP_HA_FC_2300;
537 psize = sizeof (fcparam);
538 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
539 PCI_MBOX_REGS2300_OFF;
540 }
541 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
542 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
543 mdvp = &mdvec_2300;
544 basetype = ISP_HA_FC_2312;
545 psize = sizeof (fcparam);
546 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
547 PCI_MBOX_REGS2300_OFF;
548 }
549 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322) {
550 mdvp = &mdvec_2300;
551 basetype = ISP_HA_FC_2322;
552 psize = sizeof (fcparam);
553 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
554 PCI_MBOX_REGS2300_OFF;
555 }
537 isp = &pcs->pci_isp;
538 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
539 if (isp->isp_param == NULL) {
540 device_printf(dev, "cannot allocate parameter data\n");
541 goto bad;
542 }
543 isp->isp_mdvec = mdvp;
544 isp->isp_type = basetype;
545 isp->isp_revision = pci_get_revid(dev);
546 isp->isp_role = role;
547 isp->isp_dev = dev;
548
549 /*
550 * Try and find firmware for this device.
551 */
552
556 isp = &pcs->pci_isp;
557 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
558 if (isp->isp_param == NULL) {
559 device_printf(dev, "cannot allocate parameter data\n");
560 goto bad;
561 }
562 isp->isp_mdvec = mdvp;
563 isp->isp_type = basetype;
564 isp->isp_revision = pci_get_revid(dev);
565 isp->isp_role = role;
566 isp->isp_dev = dev;
567
568 /*
569 * Try and find firmware for this device.
570 */
571
553 if (isp_get_firmware_p) {
572 /*
573 * Don't even attempt to get firmware for the 2322/2422 (yet)
574 */
575 if (IS_2322(isp) == 0 && IS_24XX(isp) == 0 && isp_get_firmware_p) {
554 int device = (int) pci_get_device(dev);
555#ifdef ISP_TARGET_MODE
556 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
557#else
558 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
559#endif
560 }
561
562 /*
563 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
564 * are set.
565 */
566 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
567 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
568 if (IS_2300(isp)) { /* per QLogic errata */
569 cmd &= ~PCIM_CMD_INVEN;
570 }
571 if (IS_23XX(isp)) {
572 /*
573 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
574 */
575 isp->isp_touched = 1;
576
577 }
578 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
579
580 /*
581 * Make sure the Cache Line Size register is set sensibly.
582 */
583 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
584 if (data != linesz) {
585 data = PCI_DFLT_LNSZ;
586 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
587 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
588 }
589
590 /*
591 * Make sure the Latency Timer is sane.
592 */
593 data = pci_read_config(dev, PCIR_LATTIMER, 1);
594 if (data < PCI_DFLT_LTNCY) {
595 data = PCI_DFLT_LTNCY;
596 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
597 pci_write_config(dev, PCIR_LATTIMER, data, 1);
598 }
599
600 /*
601 * Make sure we've disabled the ROM.
602 */
603 data = pci_read_config(dev, PCIR_ROMADDR, 4);
604 data &= ~1;
605 pci_write_config(dev, PCIR_ROMADDR, data, 4);
606
607 iqd = 0;
608 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
609 RF_ACTIVE | RF_SHAREABLE);
610 if (irq == NULL) {
611 device_printf(dev, "could not allocate interrupt\n");
612 goto bad;
613 }
614
615 tval = 0;
616 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
617 "fwload_disable", &tval) == 0 && tval != 0) {
618 isp->isp_confopts |= ISP_CFG_NORELOAD;
619 }
620 tval = 0;
621 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
622 "ignore_nvram", &tval) == 0 && tval != 0) {
623 isp->isp_confopts |= ISP_CFG_NONVRAM;
624 }
625 tval = 0;
626 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
627 "fullduplex", &tval) == 0 && tval != 0) {
628 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
629 }
630#ifdef ISP_FW_CRASH_DUMP
631 tval = 0;
632 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
633 "fw_dump_enable", &tval) == 0 && tval != 0) {
634 size_t amt = 0;
635 if (IS_2200(isp)) {
636 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
637 } else if (IS_23XX(isp)) {
638 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
639 }
640 if (amt) {
641 FCPARAM(isp)->isp_dump_data =
642 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
643 } else {
644 device_printf(dev,
645 "f/w crash dumps not supported for this model\n");
646 }
647 }
648#endif
649
650 sptr = 0;
651 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
652 "topology", (const char **) &sptr) == 0 && sptr != 0) {
653 if (strcmp(sptr, "lport") == 0) {
654 isp->isp_confopts |= ISP_CFG_LPORT;
655 } else if (strcmp(sptr, "nport") == 0) {
656 isp->isp_confopts |= ISP_CFG_NPORT;
657 } else if (strcmp(sptr, "lport-only") == 0) {
658 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
659 } else if (strcmp(sptr, "nport-only") == 0) {
660 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
661 }
662 }
663
664 /*
665 * Because the resource_*_value functions can neither return
666 * 64 bit integer values, nor can they be directly coerced
667 * to interpret the right hand side of the assignment as
668 * you want them to interpret it, we have to force WWN
669 * hint replacement to specify WWN strings with a leading
670 * 'w' (e..g w50000000aaaa0001). Sigh.
671 */
672 sptr = 0;
673 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
674 "portwwn", (const char **) &sptr);
675 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
676 char *eptr = 0;
677 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
678 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
679 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
680 isp->isp_osinfo.default_port_wwn = 0;
681 } else {
682 isp->isp_confopts |= ISP_CFG_OWNWWPN;
683 }
684 }
685 if (isp->isp_osinfo.default_port_wwn == 0) {
686 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
687 }
688
689 sptr = 0;
690 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
691 "nodewwn", (const char **) &sptr);
692 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
693 char *eptr = 0;
694 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
695 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
696 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
697 isp->isp_osinfo.default_node_wwn = 0;
698 } else {
699 isp->isp_confopts |= ISP_CFG_OWNWWNN;
700 }
701 }
702 if (isp->isp_osinfo.default_node_wwn == 0) {
703 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
704 }
705
706 isp->isp_osinfo.default_id = -1;
707 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
708 "iid", &tval) == 0) {
709 isp->isp_osinfo.default_id = tval;
710 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
711 }
712 if (isp->isp_osinfo.default_id == -1) {
713 if (IS_FC(isp)) {
714 isp->isp_osinfo.default_id = 109;
715 } else {
716 isp->isp_osinfo.default_id = 7;
717 }
718 }
719
720 isp_debug = 0;
721 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
722 "debug", &isp_debug);
723
724 /* Make sure the lock is set up. */
725 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
726 locksetup++;
727
728 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
729 device_printf(dev, "could not setup interrupt\n");
730 goto bad;
731 }
732
733 /*
734 * Set up logging levels.
735 */
736 if (isp_debug) {
737 isp->isp_dblev = isp_debug;
738 } else {
739 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
740 }
741 if (bootverbose)
742 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
743
744 /*
745 * Last minute checks...
746 */
576 int device = (int) pci_get_device(dev);
577#ifdef ISP_TARGET_MODE
578 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
579#else
580 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
581#endif
582 }
583
584 /*
585 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
586 * are set.
587 */
588 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
589 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
590 if (IS_2300(isp)) { /* per QLogic errata */
591 cmd &= ~PCIM_CMD_INVEN;
592 }
593 if (IS_23XX(isp)) {
594 /*
595 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
596 */
597 isp->isp_touched = 1;
598
599 }
600 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
601
602 /*
603 * Make sure the Cache Line Size register is set sensibly.
604 */
605 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
606 if (data != linesz) {
607 data = PCI_DFLT_LNSZ;
608 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
609 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
610 }
611
612 /*
613 * Make sure the Latency Timer is sane.
614 */
615 data = pci_read_config(dev, PCIR_LATTIMER, 1);
616 if (data < PCI_DFLT_LTNCY) {
617 data = PCI_DFLT_LTNCY;
618 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
619 pci_write_config(dev, PCIR_LATTIMER, data, 1);
620 }
621
622 /*
623 * Make sure we've disabled the ROM.
624 */
625 data = pci_read_config(dev, PCIR_ROMADDR, 4);
626 data &= ~1;
627 pci_write_config(dev, PCIR_ROMADDR, data, 4);
628
629 iqd = 0;
630 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
631 RF_ACTIVE | RF_SHAREABLE);
632 if (irq == NULL) {
633 device_printf(dev, "could not allocate interrupt\n");
634 goto bad;
635 }
636
637 tval = 0;
638 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
639 "fwload_disable", &tval) == 0 && tval != 0) {
640 isp->isp_confopts |= ISP_CFG_NORELOAD;
641 }
642 tval = 0;
643 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
644 "ignore_nvram", &tval) == 0 && tval != 0) {
645 isp->isp_confopts |= ISP_CFG_NONVRAM;
646 }
647 tval = 0;
648 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
649 "fullduplex", &tval) == 0 && tval != 0) {
650 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
651 }
652#ifdef ISP_FW_CRASH_DUMP
653 tval = 0;
654 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
655 "fw_dump_enable", &tval) == 0 && tval != 0) {
656 size_t amt = 0;
657 if (IS_2200(isp)) {
658 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
659 } else if (IS_23XX(isp)) {
660 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
661 }
662 if (amt) {
663 FCPARAM(isp)->isp_dump_data =
664 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
665 } else {
666 device_printf(dev,
667 "f/w crash dumps not supported for this model\n");
668 }
669 }
670#endif
671
672 sptr = 0;
673 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
674 "topology", (const char **) &sptr) == 0 && sptr != 0) {
675 if (strcmp(sptr, "lport") == 0) {
676 isp->isp_confopts |= ISP_CFG_LPORT;
677 } else if (strcmp(sptr, "nport") == 0) {
678 isp->isp_confopts |= ISP_CFG_NPORT;
679 } else if (strcmp(sptr, "lport-only") == 0) {
680 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
681 } else if (strcmp(sptr, "nport-only") == 0) {
682 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
683 }
684 }
685
686 /*
687 * Because the resource_*_value functions can neither return
688 * 64 bit integer values, nor can they be directly coerced
689 * to interpret the right hand side of the assignment as
690 * you want them to interpret it, we have to force WWN
691 * hint replacement to specify WWN strings with a leading
692 * 'w' (e..g w50000000aaaa0001). Sigh.
693 */
694 sptr = 0;
695 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
696 "portwwn", (const char **) &sptr);
697 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
698 char *eptr = 0;
699 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
700 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
701 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
702 isp->isp_osinfo.default_port_wwn = 0;
703 } else {
704 isp->isp_confopts |= ISP_CFG_OWNWWPN;
705 }
706 }
707 if (isp->isp_osinfo.default_port_wwn == 0) {
708 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
709 }
710
711 sptr = 0;
712 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
713 "nodewwn", (const char **) &sptr);
714 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
715 char *eptr = 0;
716 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
717 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
718 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
719 isp->isp_osinfo.default_node_wwn = 0;
720 } else {
721 isp->isp_confopts |= ISP_CFG_OWNWWNN;
722 }
723 }
724 if (isp->isp_osinfo.default_node_wwn == 0) {
725 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
726 }
727
728 isp->isp_osinfo.default_id = -1;
729 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
730 "iid", &tval) == 0) {
731 isp->isp_osinfo.default_id = tval;
732 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
733 }
734 if (isp->isp_osinfo.default_id == -1) {
735 if (IS_FC(isp)) {
736 isp->isp_osinfo.default_id = 109;
737 } else {
738 isp->isp_osinfo.default_id = 7;
739 }
740 }
741
742 isp_debug = 0;
743 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
744 "debug", &isp_debug);
745
746 /* Make sure the lock is set up. */
747 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
748 locksetup++;
749
750 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
751 device_printf(dev, "could not setup interrupt\n");
752 goto bad;
753 }
754
755 /*
756 * Set up logging levels.
757 */
758 if (isp_debug) {
759 isp->isp_dblev = isp_debug;
760 } else {
761 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
762 }
763 if (bootverbose)
764 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
765
766 /*
767 * Last minute checks...
768 */
747 if (IS_2312(isp)) {
769 if (IS_23XX(isp)) {
748 isp->isp_port = pci_get_function(dev);
749 }
750
751 /*
752 * Make sure we're in reset state.
753 */
754 ISP_LOCK(isp);
755 isp_reset(isp);
756 if (isp->isp_state != ISP_RESETSTATE) {
757 ISP_UNLOCK(isp);
758 goto bad;
759 }
760 isp_init(isp);
761 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
762 isp_uninit(isp);
763 ISP_UNLOCK(isp);
764 goto bad;
765 }
766 isp_attach(isp);
767 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
768 isp_uninit(isp);
769 ISP_UNLOCK(isp);
770 goto bad;
771 }
772 /*
773 * XXXX: Here is where we might unload the f/w module
774 * XXXX: (or decrease the reference count to it).
775 */
776 ISP_UNLOCK(isp);
777 return (0);
778
779bad:
780
781 if (pcs && pcs->ih) {
782 (void) bus_teardown_intr(dev, irq, pcs->ih);
783 }
784
785 if (locksetup && isp) {
786 mtx_destroy(&isp->isp_osinfo.lock);
787 }
788
789 if (irq) {
790 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
791 }
792
793
794 if (regs) {
795 (void) bus_release_resource(dev, rtp, rgd, regs);
796 }
797
798 if (pcs) {
799 if (pcs->pci_isp.isp_param)
800 free(pcs->pci_isp.isp_param, M_DEVBUF);
801 free(pcs, M_DEVBUF);
802 }
803
804 /*
805 * XXXX: Here is where we might unload the f/w module
806 * XXXX: (or decrease the reference count to it).
807 */
808 return (ENXIO);
809}
810
811static void
812isp_pci_intr(void *arg)
813{
814 struct ispsoftc *isp = arg;
815 u_int16_t isr, sema, mbox;
816
817 ISP_LOCK(isp);
818 isp->isp_intcnt++;
819 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
820 isp->isp_intbogus++;
821 } else {
822 int iok = isp->isp_osinfo.intsok;
823 isp->isp_osinfo.intsok = 0;
824 isp_intr(isp, isr, sema, mbox);
825 isp->isp_osinfo.intsok = iok;
826 }
827 ISP_UNLOCK(isp);
828}
829
830
831#define IspVirt2Off(a, x) \
832 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
833 _BLK_REG_SHFT] + ((x) & 0xff))
834
835#define BXR2(pcs, off) \
836 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
837#define BXW2(pcs, off, v) \
838 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
839
840
841static INLINE int
842isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
843{
844 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
845 u_int16_t val0, val1;
846 int i = 0;
847
848 do {
849 val0 = BXR2(pcs, IspVirt2Off(isp, off));
850 val1 = BXR2(pcs, IspVirt2Off(isp, off));
851 } while (val0 != val1 && ++i < 1000);
852 if (val0 != val1) {
853 return (1);
854 }
855 *rp = val0;
856 return (0);
857}
858
859static int
860isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
861 u_int16_t *semap, u_int16_t *mbp)
862{
863 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
864 u_int16_t isr, sema;
865
866 if (IS_2100(isp)) {
867 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
868 return (0);
869 }
870 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
871 return (0);
872 }
873 } else {
874 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
875 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
876 }
877 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
878 isr &= INT_PENDING_MASK(isp);
879 sema &= BIU_SEMA_LOCK;
880 if (isr == 0 && sema == 0) {
881 return (0);
882 }
883 *isrp = isr;
884 if ((*semap = sema) != 0) {
885 if (IS_2100(isp)) {
886 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
887 return (0);
888 }
889 } else {
890 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
891 }
892 }
893 return (1);
894}
895
896static int
897isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
898 u_int16_t *semap, u_int16_t *mbox0p)
899{
900 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
901 u_int32_t r2hisr;
902
903 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
904 *isrp = 0;
905 return (0);
906 }
907 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
908 IspVirt2Off(pcs, BIU_R2HSTSLO));
909 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
910 if ((r2hisr & BIU_R2HST_INTR) == 0) {
911 *isrp = 0;
912 return (0);
913 }
914 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
915 case ISPR2HST_ROM_MBX_OK:
916 case ISPR2HST_ROM_MBX_FAIL:
917 case ISPR2HST_MBX_OK:
918 case ISPR2HST_MBX_FAIL:
919 case ISPR2HST_ASYNC_EVENT:
920 *isrp = r2hisr & 0xffff;
921 *mbox0p = (r2hisr >> 16);
922 *semap = 1;
923 return (1);
924 case ISPR2HST_RIO_16:
925 *isrp = r2hisr & 0xffff;
926 *mbox0p = ASYNC_RIO1;
927 *semap = 1;
928 return (1);
929 case ISPR2HST_FPOST:
930 *isrp = r2hisr & 0xffff;
931 *mbox0p = ASYNC_CMD_CMPLT;
932 *semap = 1;
933 return (1);
934 case ISPR2HST_FPOST_CTIO:
935 *isrp = r2hisr & 0xffff;
936 *mbox0p = ASYNC_CTIO_DONE;
937 *semap = 1;
938 return (1);
939 case ISPR2HST_RSPQ_UPDATE:
940 *isrp = r2hisr & 0xffff;
941 *mbox0p = 0;
942 *semap = 0;
943 return (1);
944 default:
945 return (0);
946 }
947}
948
949static u_int16_t
950isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
951{
952 u_int16_t rv;
953 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
954 int oldconf = 0;
955
956 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
957 /*
958 * We will assume that someone has paused the RISC processor.
959 */
960 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
961 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
962 oldconf | BIU_PCI_CONF1_SXP);
963 }
964 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
965 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
966 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
967 }
968 return (rv);
969}
970
971static void
972isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
973{
974 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
975 int oldconf = 0;
976
977 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
978 /*
979 * We will assume that someone has paused the RISC processor.
980 */
981 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
982 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
983 oldconf | BIU_PCI_CONF1_SXP);
984 }
985 BXW2(pcs, IspVirt2Off(isp, regoff), val);
986 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
987 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
988 }
989}
990
991static u_int16_t
992isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
993{
994 u_int16_t rv, oc = 0;
995 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
996
997 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
998 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
999 u_int16_t tc;
1000 /*
1001 * We will assume that someone has paused the RISC processor.
1002 */
1003 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1004 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1005 if (regoff & SXP_BANK1_SELECT)
1006 tc |= BIU_PCI1080_CONF1_SXP1;
1007 else
1008 tc |= BIU_PCI1080_CONF1_SXP0;
1009 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1010 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1011 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1012 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1013 oc | BIU_PCI1080_CONF1_DMA);
1014 }
1015 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1016 if (oc) {
1017 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1018 }
1019 return (rv);
1020}
1021
1022static void
1023isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
1024{
1025 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1026 int oc = 0;
1027
1028 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1029 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1030 u_int16_t tc;
1031 /*
1032 * We will assume that someone has paused the RISC processor.
1033 */
1034 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1035 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1036 if (regoff & SXP_BANK1_SELECT)
1037 tc |= BIU_PCI1080_CONF1_SXP1;
1038 else
1039 tc |= BIU_PCI1080_CONF1_SXP0;
1040 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1041 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1042 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1043 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1044 oc | BIU_PCI1080_CONF1_DMA);
1045 }
1046 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1047 if (oc) {
1048 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1049 }
1050}
1051
1052
1053struct imush {
1054 struct ispsoftc *isp;
1055 int error;
1056};
1057
1058static void imc(void *, bus_dma_segment_t *, int, int);
1059
1060static void
1061imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1062{
1063 struct imush *imushp = (struct imush *) arg;
1064 if (error) {
1065 imushp->error = error;
1066 } else {
1067 struct ispsoftc *isp =imushp->isp;
1068 bus_addr_t addr = segs->ds_addr;
1069
1070 isp->isp_rquest_dma = addr;
1071 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1072 isp->isp_result_dma = addr;
1073 if (IS_FC(isp)) {
1074 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1075 FCPARAM(isp)->isp_scdma = addr;
1076 }
1077 }
1078}
1079
1080/*
1081 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1082 */
1083#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1084
1085static int
1086isp_pci_mbxdma(struct ispsoftc *isp)
1087{
1088 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1089 caddr_t base;
1090 u_int32_t len;
1091 int i, error, ns;
1092 bus_size_t alim, slim, xlim;
1093 struct imush im;
1094
1095 /*
1096 * Already been here? If so, leave...
1097 */
1098 if (isp->isp_rquest) {
1099 return (0);
1100 }
1101
1102#ifdef ISP_DAC_SUPPORTED
1103 alim = BUS_SPACE_UNRESTRICTED;
1104 xlim = BUS_SPACE_MAXADDR_32BIT;
1105#else
1106 xlim = alim = BUS_SPACE_MAXADDR_32BIT;
1107#endif
1108 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1109 slim = BUS_SPACE_MAXADDR_32BIT;
1110 } else {
1111 slim = BUS_SPACE_MAXADDR_24BIT;
1112 }
1113
1114 ISP_UNLOCK(isp);
1115 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1116 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1117 busdma_lock_mutex, &Giant, &pcs->dmat)) {
1118 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1119 ISP_LOCK(isp);
1120 return(1);
1121 }
1122
1123
1124 len = sizeof (XS_T **) * isp->isp_maxcmds;
1125 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1126 if (isp->isp_xflist == NULL) {
1127 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1128 ISP_LOCK(isp);
1129 return (1);
1130 }
1131#ifdef ISP_TARGET_MODE
1132 len = sizeof (void **) * isp->isp_maxcmds;
1133 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1134 if (isp->isp_tgtlist == NULL) {
1135 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1136 ISP_LOCK(isp);
1137 return (1);
1138 }
1139#endif
1140 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1141 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1142 if (pcs->dmaps == NULL) {
1143 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1144 free(isp->isp_xflist, M_DEVBUF);
1145#ifdef ISP_TARGET_MODE
1146 free(isp->isp_tgtlist, M_DEVBUF);
1147#endif
1148 ISP_LOCK(isp);
1149 return (1);
1150 }
1151
1152 /*
1153 * Allocate and map the request, result queues, plus FC scratch area.
1154 */
1155 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1156 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1157 if (IS_FC(isp)) {
1158 len += ISP2100_SCRLEN;
1159 }
1160
1161 ns = (len / PAGE_SIZE) + 1;
1162 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim,
1163 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant,
1164 &isp->isp_cdmat)) {
1165 isp_prt(isp, ISP_LOGERR,
1166 "cannot create a dma tag for control spaces");
1167 free(pcs->dmaps, M_DEVBUF);
1168 free(isp->isp_xflist, M_DEVBUF);
1169#ifdef ISP_TARGET_MODE
1170 free(isp->isp_tgtlist, M_DEVBUF);
1171#endif
1172 ISP_LOCK(isp);
1173 return (1);
1174 }
1175
1176 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1177 &isp->isp_cdmap) != 0) {
1178 isp_prt(isp, ISP_LOGERR,
1179 "cannot allocate %d bytes of CCB memory", len);
1180 bus_dma_tag_destroy(isp->isp_cdmat);
1181 free(isp->isp_xflist, M_DEVBUF);
1182#ifdef ISP_TARGET_MODE
1183 free(isp->isp_tgtlist, M_DEVBUF);
1184#endif
1185 free(pcs->dmaps, M_DEVBUF);
1186 ISP_LOCK(isp);
1187 return (1);
1188 }
1189
1190 for (i = 0; i < isp->isp_maxcmds; i++) {
1191 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1192 if (error) {
1193 isp_prt(isp, ISP_LOGERR,
1194 "error %d creating per-cmd DMA maps", error);
1195 while (--i >= 0) {
1196 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1197 }
1198 goto bad;
1199 }
1200 }
1201
1202 im.isp = isp;
1203 im.error = 0;
1204 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1205 if (im.error) {
1206 isp_prt(isp, ISP_LOGERR,
1207 "error %d loading dma map for control areas", im.error);
1208 goto bad;
1209 }
1210
1211 isp->isp_rquest = base;
1212 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1213 isp->isp_result = base;
1214 if (IS_FC(isp)) {
1215 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1216 FCPARAM(isp)->isp_scratch = base;
1217 }
1218 ISP_LOCK(isp);
1219 return (0);
1220
1221bad:
1222 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1223 bus_dma_tag_destroy(isp->isp_cdmat);
1224 free(isp->isp_xflist, M_DEVBUF);
1225#ifdef ISP_TARGET_MODE
1226 free(isp->isp_tgtlist, M_DEVBUF);
1227#endif
1228 free(pcs->dmaps, M_DEVBUF);
1229 ISP_LOCK(isp);
1230 isp->isp_rquest = NULL;
1231 return (1);
1232}
1233
1234typedef struct {
1235 struct ispsoftc *isp;
1236 void *cmd_token;
1237 void *rq;
1238 u_int16_t *nxtip;
1239 u_int16_t optr;
1240 u_int error;
1241} mush_t;
1242
1243#define MUSHERR_NOQENTRIES -2
1244
1245#ifdef ISP_TARGET_MODE
1246/*
1247 * We need to handle DMA for target mode differently from initiator mode.
1248 *
1249 * DMA mapping and construction and submission of CTIO Request Entries
1250 * and rendevous for completion are very tightly coupled because we start
1251 * out by knowing (per platform) how much data we have to move, but we
1252 * don't know, up front, how many DMA mapping segments will have to be used
1253 * cover that data, so we don't know how many CTIO Request Entries we
1254 * will end up using. Further, for performance reasons we may want to
1255 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1256 *
1257 * The standard vector still goes through isp_pci_dmasetup, but the callback
1258 * for the DMA mapping routines comes here instead with the whole transfer
1259 * mapped and a pointer to a partially filled in already allocated request
1260 * queue entry. We finish the job.
1261 */
1262static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1263static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1264
1265#define STATUS_WITH_DATA 1
1266
1267static void
1268tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1269{
1270 mush_t *mp;
1271 struct ccb_scsiio *csio;
1272 struct ispsoftc *isp;
1273 struct isp_pcisoftc *pcs;
1274 bus_dmamap_t *dp;
1275 ct_entry_t *cto, *qe;
1276 u_int8_t scsi_status;
1277 u_int16_t curi, nxti, handle;
1278 u_int32_t sflags;
1279 int32_t resid;
1280 int nth_ctio, nctios, send_status;
1281
1282 mp = (mush_t *) arg;
1283 if (error) {
1284 mp->error = error;
1285 return;
1286 }
1287
1288 isp = mp->isp;
1289 csio = mp->cmd_token;
1290 cto = mp->rq;
1291 curi = isp->isp_reqidx;
1292 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1293
1294 cto->ct_xfrlen = 0;
1295 cto->ct_seg_count = 0;
1296 cto->ct_header.rqs_entry_count = 1;
1297 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1298
1299 if (nseg == 0) {
1300 cto->ct_header.rqs_seqno = 1;
1301 isp_prt(isp, ISP_LOGTDEBUG1,
1302 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1303 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1304 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1305 cto->ct_scsi_status, cto->ct_resid);
1306 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1307 isp_put_ctio(isp, cto, qe);
1308 return;
1309 }
1310
1311 nctios = nseg / ISP_RQDSEG;
1312 if (nseg % ISP_RQDSEG) {
1313 nctios++;
1314 }
1315
1316 /*
1317 * Save syshandle, and potentially any SCSI status, which we'll
1318 * reinsert on the last CTIO we're going to send.
1319 */
1320
1321 handle = cto->ct_syshandle;
1322 cto->ct_syshandle = 0;
1323 cto->ct_header.rqs_seqno = 0;
1324 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1325
1326 if (send_status) {
1327 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1328 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1329 /*
1330 * Preserve residual.
1331 */
1332 resid = cto->ct_resid;
1333
1334 /*
1335 * Save actual SCSI status.
1336 */
1337 scsi_status = cto->ct_scsi_status;
1338
1339#ifndef STATUS_WITH_DATA
1340 sflags |= CT_NO_DATA;
1341 /*
1342 * We can't do a status at the same time as a data CTIO, so
1343 * we need to synthesize an extra CTIO at this level.
1344 */
1345 nctios++;
1346#endif
1347 } else {
1348 sflags = scsi_status = resid = 0;
1349 }
1350
1351 cto->ct_resid = 0;
1352 cto->ct_scsi_status = 0;
1353
1354 pcs = (struct isp_pcisoftc *)isp;
1355 dp = &pcs->dmaps[isp_handle_index(handle)];
1356 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1357 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1358 } else {
1359 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1360 }
1361
1362 nxti = *mp->nxtip;
1363
1364 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1365 int seglim;
1366
1367 seglim = nseg;
1368 if (seglim) {
1369 int seg;
1370
1371 if (seglim > ISP_RQDSEG)
1372 seglim = ISP_RQDSEG;
1373
1374 for (seg = 0; seg < seglim; seg++, nseg--) {
1375 /*
1376 * Unlike normal initiator commands, we don't
1377 * do any swizzling here.
1378 */
1379 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1380 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1381 cto->ct_xfrlen += dm_segs->ds_len;
1382 dm_segs++;
1383 }
1384 cto->ct_seg_count = seg;
1385 } else {
1386 /*
1387 * This case should only happen when we're sending an
1388 * extra CTIO with final status.
1389 */
1390 if (send_status == 0) {
1391 isp_prt(isp, ISP_LOGWARN,
1392 "tdma_mk ran out of segments");
1393 mp->error = EINVAL;
1394 return;
1395 }
1396 }
1397
1398 /*
1399 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1400 * ct_tagtype, and ct_timeout have been carried over
1401 * unchanged from what our caller had set.
1402 *
1403 * The dataseg fields and the seg_count fields we just got
1404 * through setting. The data direction we've preserved all
1405 * along and only clear it if we're now sending status.
1406 */
1407
1408 if (nth_ctio == nctios - 1) {
1409 /*
1410 * We're the last in a sequence of CTIOs, so mark
1411 * this CTIO and save the handle to the CCB such that
1412 * when this CTIO completes we can free dma resources
1413 * and do whatever else we need to do to finish the
1414 * rest of the command. We *don't* give this to the
1415 * firmware to work on- the caller will do that.
1416 */
1417
1418 cto->ct_syshandle = handle;
1419 cto->ct_header.rqs_seqno = 1;
1420
1421 if (send_status) {
1422 cto->ct_scsi_status = scsi_status;
1423 cto->ct_flags |= sflags;
1424 cto->ct_resid = resid;
1425 }
1426 if (send_status) {
1427 isp_prt(isp, ISP_LOGTDEBUG1,
1428 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1429 "scsi status %x resid %d",
1430 cto->ct_fwhandle, csio->ccb_h.target_lun,
1431 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1432 cto->ct_scsi_status, cto->ct_resid);
1433 } else {
1434 isp_prt(isp, ISP_LOGTDEBUG1,
1435 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1436 cto->ct_fwhandle, csio->ccb_h.target_lun,
1437 cto->ct_iid, cto->ct_tag_val,
1438 cto->ct_flags);
1439 }
1440 isp_put_ctio(isp, cto, qe);
1441 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1442 if (nctios > 1) {
1443 MEMORYBARRIER(isp, SYNC_REQUEST,
1444 curi, QENTRY_LEN);
1445 }
1446 } else {
1447 ct_entry_t *oqe = qe;
1448
1449 /*
1450 * Make sure syshandle fields are clean
1451 */
1452 cto->ct_syshandle = 0;
1453 cto->ct_header.rqs_seqno = 0;
1454
1455 isp_prt(isp, ISP_LOGTDEBUG1,
1456 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1457 cto->ct_fwhandle, csio->ccb_h.target_lun,
1458 cto->ct_iid, cto->ct_flags);
1459
1460 /*
1461 * Get a new CTIO
1462 */
1463 qe = (ct_entry_t *)
1464 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1465 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1466 if (nxti == mp->optr) {
1467 isp_prt(isp, ISP_LOGTDEBUG0,
1468 "Queue Overflow in tdma_mk");
1469 mp->error = MUSHERR_NOQENTRIES;
1470 return;
1471 }
1472
1473 /*
1474 * Now that we're done with the old CTIO,
1475 * flush it out to the request queue.
1476 */
1477 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1478 isp_put_ctio(isp, cto, oqe);
1479 if (nth_ctio != 0) {
1480 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1481 QENTRY_LEN);
1482 }
1483 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1484
1485 /*
1486 * Reset some fields in the CTIO so we can reuse
1487 * for the next one we'll flush to the request
1488 * queue.
1489 */
1490 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1491 cto->ct_header.rqs_entry_count = 1;
1492 cto->ct_header.rqs_flags = 0;
1493 cto->ct_status = 0;
1494 cto->ct_scsi_status = 0;
1495 cto->ct_xfrlen = 0;
1496 cto->ct_resid = 0;
1497 cto->ct_seg_count = 0;
1498 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1499 }
1500 }
1501 *mp->nxtip = nxti;
1502}
1503
1504/*
1505 * We don't have to do multiple CTIOs here. Instead, we can just do
1506 * continuation segments as needed. This greatly simplifies the code
1507 * improves performance.
1508 */
1509
1510static void
1511tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1512{
1513 mush_t *mp;
1514 struct ccb_scsiio *csio;
1515 struct ispsoftc *isp;
1516 ct2_entry_t *cto, *qe;
1517 u_int16_t curi, nxti;
1518 int segcnt;
1519
1520 mp = (mush_t *) arg;
1521 if (error) {
1522 mp->error = error;
1523 return;
1524 }
1525
1526 isp = mp->isp;
1527 csio = mp->cmd_token;
1528 cto = mp->rq;
1529
1530 curi = isp->isp_reqidx;
1531 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1532
1533 if (nseg == 0) {
1534 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1535 isp_prt(isp, ISP_LOGWARN,
1536 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1537 "set (0x%x)", cto->ct_flags);
1538 mp->error = EINVAL;
1539 return;
1540 }
1541 /*
1542 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1543 * flags to NO DATA and clear relative offset flags.
1544 * We preserve the ct_resid and the response area.
1545 */
1546 cto->ct_header.rqs_seqno = 1;
1547 cto->ct_seg_count = 0;
1548 cto->ct_reloff = 0;
1549 isp_prt(isp, ISP_LOGTDEBUG1,
1550 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1551 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1552 cto->ct_iid, cto->ct_flags, cto->ct_status,
1553 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1554 isp_put_ctio2(isp, cto, qe);
1555 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1556 return;
1557 }
1558
1559 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1560 isp_prt(isp, ISP_LOGERR,
1561 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1562 "(0x%x)", cto->ct_flags);
1563 mp->error = EINVAL;
1564 return;
1565 }
1566
1567
1568 nxti = *mp->nxtip;
1569
1570 /*
1571 * Set up the CTIO2 data segments.
1572 */
1573 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1574 cto->ct_seg_count++, segcnt++) {
1575 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1576 dm_segs[segcnt].ds_addr;
1577 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1578 dm_segs[segcnt].ds_len;
1579 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1580 isp_prt(isp, ISP_LOGTDEBUG1,
1581 "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1582 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1583 (uintmax_t)dm_segs[segcnt].ds_len);
1584 }
1585
1586 while (segcnt < nseg) {
1587 u_int16_t curip;
1588 int seg;
1589 ispcontreq_t local, *crq = &local, *qep;
1590
1591 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1592 curip = nxti;
1593 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1594 if (nxti == mp->optr) {
1595 ISP_UNLOCK(isp);
1596 isp_prt(isp, ISP_LOGTDEBUG0,
1597 "tdma_mkfc: request queue overflow");
1598 mp->error = MUSHERR_NOQENTRIES;
1599 return;
1600 }
1601 cto->ct_header.rqs_entry_count++;
1602 MEMZERO((void *)crq, sizeof (*crq));
1603 crq->req_header.rqs_entry_count = 1;
1604 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1605 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1606 segcnt++, seg++) {
1607 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1608 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1609 isp_prt(isp, ISP_LOGTDEBUG1,
1610 "isp_send_ctio2: ent%d[%d]%jx:%ju",
1611 cto->ct_header.rqs_entry_count-1, seg,
1612 (uintmax_t)dm_segs[segcnt].ds_addr,
1613 (uintmax_t)dm_segs[segcnt].ds_len);
1614 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1615 cto->ct_seg_count++;
1616 }
1617 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1618 isp_put_cont_req(isp, crq, qep);
1619 ISP_TDQE(isp, "cont entry", curi, qep);
1620 }
1621
1622 /*
1623 * No do final twiddling for the CTIO itself.
1624 */
1625 cto->ct_header.rqs_seqno = 1;
1626 isp_prt(isp, ISP_LOGTDEBUG1,
1627 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1628 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1629 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1630 cto->ct_resid);
1631 isp_put_ctio2(isp, cto, qe);
1632 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1633 *mp->nxtip = nxti;
1634}
1635#endif
1636
1637static void dma2(void *, bus_dma_segment_t *, int, int);
1638
1639#ifdef PAE
1640#define LOWD(x) ((uint32_t) x)
1641#define HIWD(x) ((uint32_t) (x >> 32))
1642
1643static void
1644dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1645{
1646 mush_t *mp;
1647 struct ispsoftc *isp;
1648 struct ccb_scsiio *csio;
1649 struct isp_pcisoftc *pcs;
1650 bus_dmamap_t *dp;
1651 bus_dma_segment_t *eseg;
1652 ispreq64_t *rq;
1653 int seglim, datalen;
1654 u_int16_t nxti;
1655
1656 mp = (mush_t *) arg;
1657 if (error) {
1658 mp->error = error;
1659 return;
1660 }
1661
1662 if (nseg < 1) {
1663 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1664 mp->error = EFAULT;
1665 return;
1666 }
1667 csio = mp->cmd_token;
1668 isp = mp->isp;
1669 rq = mp->rq;
1670 pcs = (struct isp_pcisoftc *)mp->isp;
1671 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1672 nxti = *mp->nxtip;
1673
1674 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1675 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1676 } else {
1677 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1678 }
1679 datalen = XS_XFRLEN(csio);
1680
1681 /*
1682 * We're passed an initial partially filled in entry that
1683 * has most fields filled in except for data transfer
1684 * related values.
1685 *
1686 * Our job is to fill in the initial request queue entry and
1687 * then to start allocating and filling in continuation entries
1688 * until we've covered the entire transfer.
1689 */
1690
1691 if (IS_FC(isp)) {
1692 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
1693 seglim = ISP_RQDSEG_T3;
1694 ((ispreqt3_t *)rq)->req_totalcnt = datalen;
1695 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1696 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1697 } else {
1698 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1699 }
1700 } else {
1701 rq->req_header.rqs_entry_type = RQSTYPE_A64;
1702 if (csio->cdb_len > 12) {
1703 seglim = 0;
1704 } else {
1705 seglim = ISP_RQDSEG_A64;
1706 }
1707 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1708 rq->req_flags |= REQFLAG_DATA_IN;
1709 } else {
1710 rq->req_flags |= REQFLAG_DATA_OUT;
1711 }
1712 }
1713
1714 eseg = dm_segs + nseg;
1715
1716 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1717 if (IS_FC(isp)) {
1718 ispreqt3_t *rq3 = (ispreqt3_t *)rq;
1719 rq3->req_dataseg[rq3->req_seg_count].ds_base =
1720 LOWD(dm_segs->ds_addr);
1721 rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
1722 HIWD(dm_segs->ds_addr);
1723 rq3->req_dataseg[rq3->req_seg_count].ds_count =
1724 dm_segs->ds_len;
1725 } else {
1726 rq->req_dataseg[rq->req_seg_count].ds_base =
1727 LOWD(dm_segs->ds_addr);
1728 rq->req_dataseg[rq->req_seg_count].ds_basehi =
1729 HIWD(dm_segs->ds_addr);
1730 rq->req_dataseg[rq->req_seg_count].ds_count =
1731 dm_segs->ds_len;
1732 }
1733 datalen -= dm_segs->ds_len;
1734 rq->req_seg_count++;
1735 dm_segs++;
1736 }
1737
1738 while (datalen > 0 && dm_segs != eseg) {
1739 u_int16_t onxti;
1740 ispcontreq64_t local, *crq = &local, *cqe;
1741
1742 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1743 onxti = nxti;
1744 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1745 if (nxti == mp->optr) {
1746 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1747 mp->error = MUSHERR_NOQENTRIES;
1748 return;
1749 }
1750 rq->req_header.rqs_entry_count++;
1751 MEMZERO((void *)crq, sizeof (*crq));
1752 crq->req_header.rqs_entry_count = 1;
1753 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1754
1755 seglim = 0;
1756 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
1757 crq->req_dataseg[seglim].ds_base =
1758 LOWD(dm_segs->ds_addr);
1759 crq->req_dataseg[seglim].ds_basehi =
1760 HIWD(dm_segs->ds_addr);
1761 crq->req_dataseg[seglim].ds_count =
1762 dm_segs->ds_len;
1763 rq->req_seg_count++;
1764 dm_segs++;
1765 seglim++;
1766 datalen -= dm_segs->ds_len;
1767 }
1768 isp_put_cont64_req(isp, crq, cqe);
1769 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1770 }
1771 *mp->nxtip = nxti;
1772}
1773#else
1774static void
1775dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1776{
1777 mush_t *mp;
1778 struct ispsoftc *isp;
1779 struct ccb_scsiio *csio;
1780 struct isp_pcisoftc *pcs;
1781 bus_dmamap_t *dp;
1782 bus_dma_segment_t *eseg;
1783 ispreq_t *rq;
1784 int seglim, datalen;
1785 u_int16_t nxti;
1786
1787 mp = (mush_t *) arg;
1788 if (error) {
1789 mp->error = error;
1790 return;
1791 }
1792
1793 if (nseg < 1) {
1794 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1795 mp->error = EFAULT;
1796 return;
1797 }
1798 csio = mp->cmd_token;
1799 isp = mp->isp;
1800 rq = mp->rq;
1801 pcs = (struct isp_pcisoftc *)mp->isp;
1802 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1803 nxti = *mp->nxtip;
1804
1805 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1806 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1807 } else {
1808 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1809 }
1810
1811 datalen = XS_XFRLEN(csio);
1812
1813 /*
1814 * We're passed an initial partially filled in entry that
1815 * has most fields filled in except for data transfer
1816 * related values.
1817 *
1818 * Our job is to fill in the initial request queue entry and
1819 * then to start allocating and filling in continuation entries
1820 * until we've covered the entire transfer.
1821 */
1822
1823 if (IS_FC(isp)) {
1824 seglim = ISP_RQDSEG_T2;
1825 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1826 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1827 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1828 } else {
1829 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1830 }
1831 } else {
1832 if (csio->cdb_len > 12) {
1833 seglim = 0;
1834 } else {
1835 seglim = ISP_RQDSEG;
1836 }
1837 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1838 rq->req_flags |= REQFLAG_DATA_IN;
1839 } else {
1840 rq->req_flags |= REQFLAG_DATA_OUT;
1841 }
1842 }
1843
1844 eseg = dm_segs + nseg;
1845
1846 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1847 if (IS_FC(isp)) {
1848 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1849 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1850 dm_segs->ds_addr;
1851 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1852 dm_segs->ds_len;
1853 } else {
1854 rq->req_dataseg[rq->req_seg_count].ds_base =
1855 dm_segs->ds_addr;
1856 rq->req_dataseg[rq->req_seg_count].ds_count =
1857 dm_segs->ds_len;
1858 }
1859 datalen -= dm_segs->ds_len;
1860 rq->req_seg_count++;
1861 dm_segs++;
1862 }
1863
1864 while (datalen > 0 && dm_segs != eseg) {
1865 u_int16_t onxti;
1866 ispcontreq_t local, *crq = &local, *cqe;
1867
1868 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1869 onxti = nxti;
1870 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1871 if (nxti == mp->optr) {
1872 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1873 mp->error = MUSHERR_NOQENTRIES;
1874 return;
1875 }
1876 rq->req_header.rqs_entry_count++;
1877 MEMZERO((void *)crq, sizeof (*crq));
1878 crq->req_header.rqs_entry_count = 1;
1879 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1880
1881 seglim = 0;
1882 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1883 crq->req_dataseg[seglim].ds_base =
1884 dm_segs->ds_addr;
1885 crq->req_dataseg[seglim].ds_count =
1886 dm_segs->ds_len;
1887 rq->req_seg_count++;
1888 dm_segs++;
1889 seglim++;
1890 datalen -= dm_segs->ds_len;
1891 }
1892 isp_put_cont_req(isp, crq, cqe);
1893 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1894 }
1895 *mp->nxtip = nxti;
1896}
1897#endif
1898
1899static int
1900isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1901 u_int16_t *nxtip, u_int16_t optr)
1902{
1903 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1904 ispreq_t *qep;
1905 bus_dmamap_t *dp = NULL;
1906 mush_t mush, *mp;
1907 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1908
1909 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1910#ifdef ISP_TARGET_MODE
1911 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1912 if (IS_FC(isp)) {
1913 eptr = tdma_mkfc;
1914 } else {
1915 eptr = tdma_mk;
1916 }
1917 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1918 (csio->dxfer_len == 0)) {
1919 mp = &mush;
1920 mp->isp = isp;
1921 mp->cmd_token = csio;
1922 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
1923 mp->nxtip = nxtip;
1924 mp->optr = optr;
1925 mp->error = 0;
1926 (*eptr)(mp, NULL, 0, 0);
1927 goto mbxsync;
1928 }
1929 } else
1930#endif
1931 eptr = dma2;
1932
1933
1934 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1935 (csio->dxfer_len == 0)) {
1936 rq->req_seg_count = 1;
1937 goto mbxsync;
1938 }
1939
1940 /*
1941 * Do a virtual grapevine step to collect info for
1942 * the callback dma allocation that we have to use...
1943 */
1944 mp = &mush;
1945 mp->isp = isp;
1946 mp->cmd_token = csio;
1947 mp->rq = rq;
1948 mp->nxtip = nxtip;
1949 mp->optr = optr;
1950 mp->error = 0;
1951
1952 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1953 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1954 int error, s;
1955 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1956 s = splsoftvm();
1957 error = bus_dmamap_load(pcs->dmat, *dp,
1958 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1959 if (error == EINPROGRESS) {
1960 bus_dmamap_unload(pcs->dmat, *dp);
1961 mp->error = EINVAL;
1962 isp_prt(isp, ISP_LOGERR,
1963 "deferred dma allocation not supported");
1964 } else if (error && mp->error == 0) {
1965#ifdef DIAGNOSTIC
1966 isp_prt(isp, ISP_LOGERR,
1967 "error %d in dma mapping code", error);
1968#endif
1969 mp->error = error;
1970 }
1971 splx(s);
1972 } else {
1973 /* Pointer to physical buffer */
1974 struct bus_dma_segment seg;
1975 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
1976 seg.ds_len = csio->dxfer_len;
1977 (*eptr)(mp, &seg, 1, 0);
1978 }
1979 } else {
1980 struct bus_dma_segment *segs;
1981
1982 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1983 isp_prt(isp, ISP_LOGERR,
1984 "Physical segment pointers unsupported");
1985 mp->error = EINVAL;
1986 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1987 isp_prt(isp, ISP_LOGERR,
1988 "Virtual segment addresses unsupported");
1989 mp->error = EINVAL;
1990 } else {
1991 /* Just use the segments provided */
1992 segs = (struct bus_dma_segment *) csio->data_ptr;
1993 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1994 }
1995 }
1996 if (mp->error) {
1997 int retval = CMD_COMPLETE;
1998 if (mp->error == MUSHERR_NOQENTRIES) {
1999 retval = CMD_EAGAIN;
2000 } else if (mp->error == EFBIG) {
2001 XS_SETERR(csio, CAM_REQ_TOO_BIG);
2002 } else if (mp->error == EINVAL) {
2003 XS_SETERR(csio, CAM_REQ_INVALID);
2004 } else {
2005 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2006 }
2007 return (retval);
2008 }
2009mbxsync:
2010 switch (rq->req_header.rqs_entry_type) {
2011 case RQSTYPE_REQUEST:
2012 isp_put_request(isp, rq, qep);
2013 break;
2014 case RQSTYPE_CMDONLY:
2015 isp_put_extended_request(isp, (ispextreq_t *)rq,
2016 (ispextreq_t *)qep);
2017 break;
2018 case RQSTYPE_T2RQS:
2019 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2020 break;
2021 case RQSTYPE_A64:
2022 case RQSTYPE_T3RQS:
2023 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2024 break;
2025 }
2026 return (CMD_QUEUED);
2027}
2028
2029static void
2030isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
2031{
2032 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2033 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2034 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2035 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2036 } else {
2037 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2038 }
2039 bus_dmamap_unload(pcs->dmat, *dp);
2040}
2041
2042
2043static void
2044isp_pci_reset1(struct ispsoftc *isp)
2045{
2046 /* Make sure the BIOS is disabled */
2047 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2048 /* and enable interrupts */
2049 ENABLE_INTS(isp);
2050}
2051
2052static void
2053isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
2054{
2055 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2056 if (msg)
2057 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2058 else
2059 printf("%s:\n", device_get_nameunit(isp->isp_dev));
2060 if (IS_SCSI(isp))
2061 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2062 else
2063 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2064 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2065 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2066 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2067
2068
2069 if (IS_SCSI(isp)) {
2070 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2071 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2072 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2073 ISP_READ(isp, CDMA_FIFO_STS));
2074 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2075 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2076 ISP_READ(isp, DDMA_FIFO_STS));
2077 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2078 ISP_READ(isp, SXP_INTERRUPT),
2079 ISP_READ(isp, SXP_GROSS_ERR),
2080 ISP_READ(isp, SXP_PINS_CTRL));
2081 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2082 }
2083 printf(" mbox regs: %x %x %x %x %x\n",
2084 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2085 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2086 ISP_READ(isp, OUTMAILBOX4));
2087 printf(" PCI Status Command/Status=%x\n",
2088 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2089}
770 isp->isp_port = pci_get_function(dev);
771 }
772
773 /*
774 * Make sure we're in reset state.
775 */
776 ISP_LOCK(isp);
777 isp_reset(isp);
778 if (isp->isp_state != ISP_RESETSTATE) {
779 ISP_UNLOCK(isp);
780 goto bad;
781 }
782 isp_init(isp);
783 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
784 isp_uninit(isp);
785 ISP_UNLOCK(isp);
786 goto bad;
787 }
788 isp_attach(isp);
789 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
790 isp_uninit(isp);
791 ISP_UNLOCK(isp);
792 goto bad;
793 }
794 /*
795 * XXXX: Here is where we might unload the f/w module
796 * XXXX: (or decrease the reference count to it).
797 */
798 ISP_UNLOCK(isp);
799 return (0);
800
801bad:
802
803 if (pcs && pcs->ih) {
804 (void) bus_teardown_intr(dev, irq, pcs->ih);
805 }
806
807 if (locksetup && isp) {
808 mtx_destroy(&isp->isp_osinfo.lock);
809 }
810
811 if (irq) {
812 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
813 }
814
815
816 if (regs) {
817 (void) bus_release_resource(dev, rtp, rgd, regs);
818 }
819
820 if (pcs) {
821 if (pcs->pci_isp.isp_param)
822 free(pcs->pci_isp.isp_param, M_DEVBUF);
823 free(pcs, M_DEVBUF);
824 }
825
826 /*
827 * XXXX: Here is where we might unload the f/w module
828 * XXXX: (or decrease the reference count to it).
829 */
830 return (ENXIO);
831}
832
833static void
834isp_pci_intr(void *arg)
835{
836 struct ispsoftc *isp = arg;
837 u_int16_t isr, sema, mbox;
838
839 ISP_LOCK(isp);
840 isp->isp_intcnt++;
841 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
842 isp->isp_intbogus++;
843 } else {
844 int iok = isp->isp_osinfo.intsok;
845 isp->isp_osinfo.intsok = 0;
846 isp_intr(isp, isr, sema, mbox);
847 isp->isp_osinfo.intsok = iok;
848 }
849 ISP_UNLOCK(isp);
850}
851
852
853#define IspVirt2Off(a, x) \
854 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
855 _BLK_REG_SHFT] + ((x) & 0xff))
856
857#define BXR2(pcs, off) \
858 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
859#define BXW2(pcs, off, v) \
860 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
861
862
863static INLINE int
864isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
865{
866 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
867 u_int16_t val0, val1;
868 int i = 0;
869
870 do {
871 val0 = BXR2(pcs, IspVirt2Off(isp, off));
872 val1 = BXR2(pcs, IspVirt2Off(isp, off));
873 } while (val0 != val1 && ++i < 1000);
874 if (val0 != val1) {
875 return (1);
876 }
877 *rp = val0;
878 return (0);
879}
880
881static int
882isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
883 u_int16_t *semap, u_int16_t *mbp)
884{
885 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
886 u_int16_t isr, sema;
887
888 if (IS_2100(isp)) {
889 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
890 return (0);
891 }
892 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
893 return (0);
894 }
895 } else {
896 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
897 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
898 }
899 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
900 isr &= INT_PENDING_MASK(isp);
901 sema &= BIU_SEMA_LOCK;
902 if (isr == 0 && sema == 0) {
903 return (0);
904 }
905 *isrp = isr;
906 if ((*semap = sema) != 0) {
907 if (IS_2100(isp)) {
908 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
909 return (0);
910 }
911 } else {
912 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
913 }
914 }
915 return (1);
916}
917
918static int
919isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
920 u_int16_t *semap, u_int16_t *mbox0p)
921{
922 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
923 u_int32_t r2hisr;
924
925 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
926 *isrp = 0;
927 return (0);
928 }
929 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
930 IspVirt2Off(pcs, BIU_R2HSTSLO));
931 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
932 if ((r2hisr & BIU_R2HST_INTR) == 0) {
933 *isrp = 0;
934 return (0);
935 }
936 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
937 case ISPR2HST_ROM_MBX_OK:
938 case ISPR2HST_ROM_MBX_FAIL:
939 case ISPR2HST_MBX_OK:
940 case ISPR2HST_MBX_FAIL:
941 case ISPR2HST_ASYNC_EVENT:
942 *isrp = r2hisr & 0xffff;
943 *mbox0p = (r2hisr >> 16);
944 *semap = 1;
945 return (1);
946 case ISPR2HST_RIO_16:
947 *isrp = r2hisr & 0xffff;
948 *mbox0p = ASYNC_RIO1;
949 *semap = 1;
950 return (1);
951 case ISPR2HST_FPOST:
952 *isrp = r2hisr & 0xffff;
953 *mbox0p = ASYNC_CMD_CMPLT;
954 *semap = 1;
955 return (1);
956 case ISPR2HST_FPOST_CTIO:
957 *isrp = r2hisr & 0xffff;
958 *mbox0p = ASYNC_CTIO_DONE;
959 *semap = 1;
960 return (1);
961 case ISPR2HST_RSPQ_UPDATE:
962 *isrp = r2hisr & 0xffff;
963 *mbox0p = 0;
964 *semap = 0;
965 return (1);
966 default:
967 return (0);
968 }
969}
970
971static u_int16_t
972isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
973{
974 u_int16_t rv;
975 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
976 int oldconf = 0;
977
978 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
979 /*
980 * We will assume that someone has paused the RISC processor.
981 */
982 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
983 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
984 oldconf | BIU_PCI_CONF1_SXP);
985 }
986 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
987 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
988 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
989 }
990 return (rv);
991}
992
993static void
994isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
995{
996 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
997 int oldconf = 0;
998
999 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1000 /*
1001 * We will assume that someone has paused the RISC processor.
1002 */
1003 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1004 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1005 oldconf | BIU_PCI_CONF1_SXP);
1006 }
1007 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1008 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1009 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1010 }
1011}
1012
1013static u_int16_t
1014isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
1015{
1016 u_int16_t rv, oc = 0;
1017 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1018
1019 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1020 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1021 u_int16_t tc;
1022 /*
1023 * We will assume that someone has paused the RISC processor.
1024 */
1025 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1026 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1027 if (regoff & SXP_BANK1_SELECT)
1028 tc |= BIU_PCI1080_CONF1_SXP1;
1029 else
1030 tc |= BIU_PCI1080_CONF1_SXP0;
1031 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1032 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1033 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1034 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1035 oc | BIU_PCI1080_CONF1_DMA);
1036 }
1037 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1038 if (oc) {
1039 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1040 }
1041 return (rv);
1042}
1043
1044static void
1045isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
1046{
1047 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1048 int oc = 0;
1049
1050 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1051 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1052 u_int16_t tc;
1053 /*
1054 * We will assume that someone has paused the RISC processor.
1055 */
1056 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1057 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1058 if (regoff & SXP_BANK1_SELECT)
1059 tc |= BIU_PCI1080_CONF1_SXP1;
1060 else
1061 tc |= BIU_PCI1080_CONF1_SXP0;
1062 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1063 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1064 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1065 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1066 oc | BIU_PCI1080_CONF1_DMA);
1067 }
1068 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1069 if (oc) {
1070 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1071 }
1072}
1073
1074
1075struct imush {
1076 struct ispsoftc *isp;
1077 int error;
1078};
1079
1080static void imc(void *, bus_dma_segment_t *, int, int);
1081
1082static void
1083imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1084{
1085 struct imush *imushp = (struct imush *) arg;
1086 if (error) {
1087 imushp->error = error;
1088 } else {
1089 struct ispsoftc *isp =imushp->isp;
1090 bus_addr_t addr = segs->ds_addr;
1091
1092 isp->isp_rquest_dma = addr;
1093 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1094 isp->isp_result_dma = addr;
1095 if (IS_FC(isp)) {
1096 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1097 FCPARAM(isp)->isp_scdma = addr;
1098 }
1099 }
1100}
1101
1102/*
1103 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1104 */
1105#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1106
1107static int
1108isp_pci_mbxdma(struct ispsoftc *isp)
1109{
1110 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1111 caddr_t base;
1112 u_int32_t len;
1113 int i, error, ns;
1114 bus_size_t alim, slim, xlim;
1115 struct imush im;
1116
1117 /*
1118 * Already been here? If so, leave...
1119 */
1120 if (isp->isp_rquest) {
1121 return (0);
1122 }
1123
1124#ifdef ISP_DAC_SUPPORTED
1125 alim = BUS_SPACE_UNRESTRICTED;
1126 xlim = BUS_SPACE_MAXADDR_32BIT;
1127#else
1128 xlim = alim = BUS_SPACE_MAXADDR_32BIT;
1129#endif
1130 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1131 slim = BUS_SPACE_MAXADDR_32BIT;
1132 } else {
1133 slim = BUS_SPACE_MAXADDR_24BIT;
1134 }
1135
1136 ISP_UNLOCK(isp);
1137 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1138 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1139 busdma_lock_mutex, &Giant, &pcs->dmat)) {
1140 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1141 ISP_LOCK(isp);
1142 return(1);
1143 }
1144
1145
1146 len = sizeof (XS_T **) * isp->isp_maxcmds;
1147 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1148 if (isp->isp_xflist == NULL) {
1149 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1150 ISP_LOCK(isp);
1151 return (1);
1152 }
1153#ifdef ISP_TARGET_MODE
1154 len = sizeof (void **) * isp->isp_maxcmds;
1155 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1156 if (isp->isp_tgtlist == NULL) {
1157 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1158 ISP_LOCK(isp);
1159 return (1);
1160 }
1161#endif
1162 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1163 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1164 if (pcs->dmaps == NULL) {
1165 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1166 free(isp->isp_xflist, M_DEVBUF);
1167#ifdef ISP_TARGET_MODE
1168 free(isp->isp_tgtlist, M_DEVBUF);
1169#endif
1170 ISP_LOCK(isp);
1171 return (1);
1172 }
1173
1174 /*
1175 * Allocate and map the request, result queues, plus FC scratch area.
1176 */
1177 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1178 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1179 if (IS_FC(isp)) {
1180 len += ISP2100_SCRLEN;
1181 }
1182
1183 ns = (len / PAGE_SIZE) + 1;
1184 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim,
1185 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant,
1186 &isp->isp_cdmat)) {
1187 isp_prt(isp, ISP_LOGERR,
1188 "cannot create a dma tag for control spaces");
1189 free(pcs->dmaps, M_DEVBUF);
1190 free(isp->isp_xflist, M_DEVBUF);
1191#ifdef ISP_TARGET_MODE
1192 free(isp->isp_tgtlist, M_DEVBUF);
1193#endif
1194 ISP_LOCK(isp);
1195 return (1);
1196 }
1197
1198 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1199 &isp->isp_cdmap) != 0) {
1200 isp_prt(isp, ISP_LOGERR,
1201 "cannot allocate %d bytes of CCB memory", len);
1202 bus_dma_tag_destroy(isp->isp_cdmat);
1203 free(isp->isp_xflist, M_DEVBUF);
1204#ifdef ISP_TARGET_MODE
1205 free(isp->isp_tgtlist, M_DEVBUF);
1206#endif
1207 free(pcs->dmaps, M_DEVBUF);
1208 ISP_LOCK(isp);
1209 return (1);
1210 }
1211
1212 for (i = 0; i < isp->isp_maxcmds; i++) {
1213 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1214 if (error) {
1215 isp_prt(isp, ISP_LOGERR,
1216 "error %d creating per-cmd DMA maps", error);
1217 while (--i >= 0) {
1218 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1219 }
1220 goto bad;
1221 }
1222 }
1223
1224 im.isp = isp;
1225 im.error = 0;
1226 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1227 if (im.error) {
1228 isp_prt(isp, ISP_LOGERR,
1229 "error %d loading dma map for control areas", im.error);
1230 goto bad;
1231 }
1232
1233 isp->isp_rquest = base;
1234 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1235 isp->isp_result = base;
1236 if (IS_FC(isp)) {
1237 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1238 FCPARAM(isp)->isp_scratch = base;
1239 }
1240 ISP_LOCK(isp);
1241 return (0);
1242
1243bad:
1244 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1245 bus_dma_tag_destroy(isp->isp_cdmat);
1246 free(isp->isp_xflist, M_DEVBUF);
1247#ifdef ISP_TARGET_MODE
1248 free(isp->isp_tgtlist, M_DEVBUF);
1249#endif
1250 free(pcs->dmaps, M_DEVBUF);
1251 ISP_LOCK(isp);
1252 isp->isp_rquest = NULL;
1253 return (1);
1254}
1255
1256typedef struct {
1257 struct ispsoftc *isp;
1258 void *cmd_token;
1259 void *rq;
1260 u_int16_t *nxtip;
1261 u_int16_t optr;
1262 u_int error;
1263} mush_t;
1264
1265#define MUSHERR_NOQENTRIES -2
1266
1267#ifdef ISP_TARGET_MODE
1268/*
1269 * We need to handle DMA for target mode differently from initiator mode.
1270 *
1271 * DMA mapping and construction and submission of CTIO Request Entries
1272 * and rendevous for completion are very tightly coupled because we start
1273 * out by knowing (per platform) how much data we have to move, but we
1274 * don't know, up front, how many DMA mapping segments will have to be used
1275 * cover that data, so we don't know how many CTIO Request Entries we
1276 * will end up using. Further, for performance reasons we may want to
1277 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1278 *
1279 * The standard vector still goes through isp_pci_dmasetup, but the callback
1280 * for the DMA mapping routines comes here instead with the whole transfer
1281 * mapped and a pointer to a partially filled in already allocated request
1282 * queue entry. We finish the job.
1283 */
1284static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1285static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1286
1287#define STATUS_WITH_DATA 1
1288
1289static void
1290tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1291{
1292 mush_t *mp;
1293 struct ccb_scsiio *csio;
1294 struct ispsoftc *isp;
1295 struct isp_pcisoftc *pcs;
1296 bus_dmamap_t *dp;
1297 ct_entry_t *cto, *qe;
1298 u_int8_t scsi_status;
1299 u_int16_t curi, nxti, handle;
1300 u_int32_t sflags;
1301 int32_t resid;
1302 int nth_ctio, nctios, send_status;
1303
1304 mp = (mush_t *) arg;
1305 if (error) {
1306 mp->error = error;
1307 return;
1308 }
1309
1310 isp = mp->isp;
1311 csio = mp->cmd_token;
1312 cto = mp->rq;
1313 curi = isp->isp_reqidx;
1314 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1315
1316 cto->ct_xfrlen = 0;
1317 cto->ct_seg_count = 0;
1318 cto->ct_header.rqs_entry_count = 1;
1319 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1320
1321 if (nseg == 0) {
1322 cto->ct_header.rqs_seqno = 1;
1323 isp_prt(isp, ISP_LOGTDEBUG1,
1324 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1325 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1326 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1327 cto->ct_scsi_status, cto->ct_resid);
1328 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1329 isp_put_ctio(isp, cto, qe);
1330 return;
1331 }
1332
1333 nctios = nseg / ISP_RQDSEG;
1334 if (nseg % ISP_RQDSEG) {
1335 nctios++;
1336 }
1337
1338 /*
1339 * Save syshandle, and potentially any SCSI status, which we'll
1340 * reinsert on the last CTIO we're going to send.
1341 */
1342
1343 handle = cto->ct_syshandle;
1344 cto->ct_syshandle = 0;
1345 cto->ct_header.rqs_seqno = 0;
1346 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1347
1348 if (send_status) {
1349 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1350 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1351 /*
1352 * Preserve residual.
1353 */
1354 resid = cto->ct_resid;
1355
1356 /*
1357 * Save actual SCSI status.
1358 */
1359 scsi_status = cto->ct_scsi_status;
1360
1361#ifndef STATUS_WITH_DATA
1362 sflags |= CT_NO_DATA;
1363 /*
1364 * We can't do a status at the same time as a data CTIO, so
1365 * we need to synthesize an extra CTIO at this level.
1366 */
1367 nctios++;
1368#endif
1369 } else {
1370 sflags = scsi_status = resid = 0;
1371 }
1372
1373 cto->ct_resid = 0;
1374 cto->ct_scsi_status = 0;
1375
1376 pcs = (struct isp_pcisoftc *)isp;
1377 dp = &pcs->dmaps[isp_handle_index(handle)];
1378 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1379 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1380 } else {
1381 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1382 }
1383
1384 nxti = *mp->nxtip;
1385
1386 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1387 int seglim;
1388
1389 seglim = nseg;
1390 if (seglim) {
1391 int seg;
1392
1393 if (seglim > ISP_RQDSEG)
1394 seglim = ISP_RQDSEG;
1395
1396 for (seg = 0; seg < seglim; seg++, nseg--) {
1397 /*
1398 * Unlike normal initiator commands, we don't
1399 * do any swizzling here.
1400 */
1401 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1402 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1403 cto->ct_xfrlen += dm_segs->ds_len;
1404 dm_segs++;
1405 }
1406 cto->ct_seg_count = seg;
1407 } else {
1408 /*
1409 * This case should only happen when we're sending an
1410 * extra CTIO with final status.
1411 */
1412 if (send_status == 0) {
1413 isp_prt(isp, ISP_LOGWARN,
1414 "tdma_mk ran out of segments");
1415 mp->error = EINVAL;
1416 return;
1417 }
1418 }
1419
1420 /*
1421 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1422 * ct_tagtype, and ct_timeout have been carried over
1423 * unchanged from what our caller had set.
1424 *
1425 * The dataseg fields and the seg_count fields we just got
1426 * through setting. The data direction we've preserved all
1427 * along and only clear it if we're now sending status.
1428 */
1429
1430 if (nth_ctio == nctios - 1) {
1431 /*
1432 * We're the last in a sequence of CTIOs, so mark
1433 * this CTIO and save the handle to the CCB such that
1434 * when this CTIO completes we can free dma resources
1435 * and do whatever else we need to do to finish the
1436 * rest of the command. We *don't* give this to the
1437 * firmware to work on- the caller will do that.
1438 */
1439
1440 cto->ct_syshandle = handle;
1441 cto->ct_header.rqs_seqno = 1;
1442
1443 if (send_status) {
1444 cto->ct_scsi_status = scsi_status;
1445 cto->ct_flags |= sflags;
1446 cto->ct_resid = resid;
1447 }
1448 if (send_status) {
1449 isp_prt(isp, ISP_LOGTDEBUG1,
1450 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1451 "scsi status %x resid %d",
1452 cto->ct_fwhandle, csio->ccb_h.target_lun,
1453 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1454 cto->ct_scsi_status, cto->ct_resid);
1455 } else {
1456 isp_prt(isp, ISP_LOGTDEBUG1,
1457 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1458 cto->ct_fwhandle, csio->ccb_h.target_lun,
1459 cto->ct_iid, cto->ct_tag_val,
1460 cto->ct_flags);
1461 }
1462 isp_put_ctio(isp, cto, qe);
1463 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1464 if (nctios > 1) {
1465 MEMORYBARRIER(isp, SYNC_REQUEST,
1466 curi, QENTRY_LEN);
1467 }
1468 } else {
1469 ct_entry_t *oqe = qe;
1470
1471 /*
1472 * Make sure syshandle fields are clean
1473 */
1474 cto->ct_syshandle = 0;
1475 cto->ct_header.rqs_seqno = 0;
1476
1477 isp_prt(isp, ISP_LOGTDEBUG1,
1478 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1479 cto->ct_fwhandle, csio->ccb_h.target_lun,
1480 cto->ct_iid, cto->ct_flags);
1481
1482 /*
1483 * Get a new CTIO
1484 */
1485 qe = (ct_entry_t *)
1486 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1487 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1488 if (nxti == mp->optr) {
1489 isp_prt(isp, ISP_LOGTDEBUG0,
1490 "Queue Overflow in tdma_mk");
1491 mp->error = MUSHERR_NOQENTRIES;
1492 return;
1493 }
1494
1495 /*
1496 * Now that we're done with the old CTIO,
1497 * flush it out to the request queue.
1498 */
1499 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1500 isp_put_ctio(isp, cto, oqe);
1501 if (nth_ctio != 0) {
1502 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1503 QENTRY_LEN);
1504 }
1505 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1506
1507 /*
1508 * Reset some fields in the CTIO so we can reuse
1509 * for the next one we'll flush to the request
1510 * queue.
1511 */
1512 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1513 cto->ct_header.rqs_entry_count = 1;
1514 cto->ct_header.rqs_flags = 0;
1515 cto->ct_status = 0;
1516 cto->ct_scsi_status = 0;
1517 cto->ct_xfrlen = 0;
1518 cto->ct_resid = 0;
1519 cto->ct_seg_count = 0;
1520 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1521 }
1522 }
1523 *mp->nxtip = nxti;
1524}
1525
1526/*
1527 * We don't have to do multiple CTIOs here. Instead, we can just do
1528 * continuation segments as needed. This greatly simplifies the code
1529 * improves performance.
1530 */
1531
1532static void
1533tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1534{
1535 mush_t *mp;
1536 struct ccb_scsiio *csio;
1537 struct ispsoftc *isp;
1538 ct2_entry_t *cto, *qe;
1539 u_int16_t curi, nxti;
1540 int segcnt;
1541
1542 mp = (mush_t *) arg;
1543 if (error) {
1544 mp->error = error;
1545 return;
1546 }
1547
1548 isp = mp->isp;
1549 csio = mp->cmd_token;
1550 cto = mp->rq;
1551
1552 curi = isp->isp_reqidx;
1553 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1554
1555 if (nseg == 0) {
1556 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1557 isp_prt(isp, ISP_LOGWARN,
1558 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1559 "set (0x%x)", cto->ct_flags);
1560 mp->error = EINVAL;
1561 return;
1562 }
1563 /*
1564 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1565 * flags to NO DATA and clear relative offset flags.
1566 * We preserve the ct_resid and the response area.
1567 */
1568 cto->ct_header.rqs_seqno = 1;
1569 cto->ct_seg_count = 0;
1570 cto->ct_reloff = 0;
1571 isp_prt(isp, ISP_LOGTDEBUG1,
1572 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1573 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1574 cto->ct_iid, cto->ct_flags, cto->ct_status,
1575 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1576 isp_put_ctio2(isp, cto, qe);
1577 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1578 return;
1579 }
1580
1581 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1582 isp_prt(isp, ISP_LOGERR,
1583 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1584 "(0x%x)", cto->ct_flags);
1585 mp->error = EINVAL;
1586 return;
1587 }
1588
1589
1590 nxti = *mp->nxtip;
1591
1592 /*
1593 * Set up the CTIO2 data segments.
1594 */
1595 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1596 cto->ct_seg_count++, segcnt++) {
1597 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1598 dm_segs[segcnt].ds_addr;
1599 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1600 dm_segs[segcnt].ds_len;
1601 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1602 isp_prt(isp, ISP_LOGTDEBUG1,
1603 "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1604 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1605 (uintmax_t)dm_segs[segcnt].ds_len);
1606 }
1607
1608 while (segcnt < nseg) {
1609 u_int16_t curip;
1610 int seg;
1611 ispcontreq_t local, *crq = &local, *qep;
1612
1613 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1614 curip = nxti;
1615 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1616 if (nxti == mp->optr) {
1617 ISP_UNLOCK(isp);
1618 isp_prt(isp, ISP_LOGTDEBUG0,
1619 "tdma_mkfc: request queue overflow");
1620 mp->error = MUSHERR_NOQENTRIES;
1621 return;
1622 }
1623 cto->ct_header.rqs_entry_count++;
1624 MEMZERO((void *)crq, sizeof (*crq));
1625 crq->req_header.rqs_entry_count = 1;
1626 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1627 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1628 segcnt++, seg++) {
1629 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1630 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1631 isp_prt(isp, ISP_LOGTDEBUG1,
1632 "isp_send_ctio2: ent%d[%d]%jx:%ju",
1633 cto->ct_header.rqs_entry_count-1, seg,
1634 (uintmax_t)dm_segs[segcnt].ds_addr,
1635 (uintmax_t)dm_segs[segcnt].ds_len);
1636 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1637 cto->ct_seg_count++;
1638 }
1639 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1640 isp_put_cont_req(isp, crq, qep);
1641 ISP_TDQE(isp, "cont entry", curi, qep);
1642 }
1643
1644 /*
1645 * No do final twiddling for the CTIO itself.
1646 */
1647 cto->ct_header.rqs_seqno = 1;
1648 isp_prt(isp, ISP_LOGTDEBUG1,
1649 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1650 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1651 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1652 cto->ct_resid);
1653 isp_put_ctio2(isp, cto, qe);
1654 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1655 *mp->nxtip = nxti;
1656}
1657#endif
1658
1659static void dma2(void *, bus_dma_segment_t *, int, int);
1660
1661#ifdef PAE
1662#define LOWD(x) ((uint32_t) x)
1663#define HIWD(x) ((uint32_t) (x >> 32))
1664
1665static void
1666dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1667{
1668 mush_t *mp;
1669 struct ispsoftc *isp;
1670 struct ccb_scsiio *csio;
1671 struct isp_pcisoftc *pcs;
1672 bus_dmamap_t *dp;
1673 bus_dma_segment_t *eseg;
1674 ispreq64_t *rq;
1675 int seglim, datalen;
1676 u_int16_t nxti;
1677
1678 mp = (mush_t *) arg;
1679 if (error) {
1680 mp->error = error;
1681 return;
1682 }
1683
1684 if (nseg < 1) {
1685 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1686 mp->error = EFAULT;
1687 return;
1688 }
1689 csio = mp->cmd_token;
1690 isp = mp->isp;
1691 rq = mp->rq;
1692 pcs = (struct isp_pcisoftc *)mp->isp;
1693 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1694 nxti = *mp->nxtip;
1695
1696 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1697 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1698 } else {
1699 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1700 }
1701 datalen = XS_XFRLEN(csio);
1702
1703 /*
1704 * We're passed an initial partially filled in entry that
1705 * has most fields filled in except for data transfer
1706 * related values.
1707 *
1708 * Our job is to fill in the initial request queue entry and
1709 * then to start allocating and filling in continuation entries
1710 * until we've covered the entire transfer.
1711 */
1712
1713 if (IS_FC(isp)) {
1714 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
1715 seglim = ISP_RQDSEG_T3;
1716 ((ispreqt3_t *)rq)->req_totalcnt = datalen;
1717 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1718 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1719 } else {
1720 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1721 }
1722 } else {
1723 rq->req_header.rqs_entry_type = RQSTYPE_A64;
1724 if (csio->cdb_len > 12) {
1725 seglim = 0;
1726 } else {
1727 seglim = ISP_RQDSEG_A64;
1728 }
1729 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1730 rq->req_flags |= REQFLAG_DATA_IN;
1731 } else {
1732 rq->req_flags |= REQFLAG_DATA_OUT;
1733 }
1734 }
1735
1736 eseg = dm_segs + nseg;
1737
1738 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1739 if (IS_FC(isp)) {
1740 ispreqt3_t *rq3 = (ispreqt3_t *)rq;
1741 rq3->req_dataseg[rq3->req_seg_count].ds_base =
1742 LOWD(dm_segs->ds_addr);
1743 rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
1744 HIWD(dm_segs->ds_addr);
1745 rq3->req_dataseg[rq3->req_seg_count].ds_count =
1746 dm_segs->ds_len;
1747 } else {
1748 rq->req_dataseg[rq->req_seg_count].ds_base =
1749 LOWD(dm_segs->ds_addr);
1750 rq->req_dataseg[rq->req_seg_count].ds_basehi =
1751 HIWD(dm_segs->ds_addr);
1752 rq->req_dataseg[rq->req_seg_count].ds_count =
1753 dm_segs->ds_len;
1754 }
1755 datalen -= dm_segs->ds_len;
1756 rq->req_seg_count++;
1757 dm_segs++;
1758 }
1759
1760 while (datalen > 0 && dm_segs != eseg) {
1761 u_int16_t onxti;
1762 ispcontreq64_t local, *crq = &local, *cqe;
1763
1764 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1765 onxti = nxti;
1766 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1767 if (nxti == mp->optr) {
1768 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1769 mp->error = MUSHERR_NOQENTRIES;
1770 return;
1771 }
1772 rq->req_header.rqs_entry_count++;
1773 MEMZERO((void *)crq, sizeof (*crq));
1774 crq->req_header.rqs_entry_count = 1;
1775 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1776
1777 seglim = 0;
1778 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
1779 crq->req_dataseg[seglim].ds_base =
1780 LOWD(dm_segs->ds_addr);
1781 crq->req_dataseg[seglim].ds_basehi =
1782 HIWD(dm_segs->ds_addr);
1783 crq->req_dataseg[seglim].ds_count =
1784 dm_segs->ds_len;
1785 rq->req_seg_count++;
1786 dm_segs++;
1787 seglim++;
1788 datalen -= dm_segs->ds_len;
1789 }
1790 isp_put_cont64_req(isp, crq, cqe);
1791 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1792 }
1793 *mp->nxtip = nxti;
1794}
1795#else
1796static void
1797dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1798{
1799 mush_t *mp;
1800 struct ispsoftc *isp;
1801 struct ccb_scsiio *csio;
1802 struct isp_pcisoftc *pcs;
1803 bus_dmamap_t *dp;
1804 bus_dma_segment_t *eseg;
1805 ispreq_t *rq;
1806 int seglim, datalen;
1807 u_int16_t nxti;
1808
1809 mp = (mush_t *) arg;
1810 if (error) {
1811 mp->error = error;
1812 return;
1813 }
1814
1815 if (nseg < 1) {
1816 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1817 mp->error = EFAULT;
1818 return;
1819 }
1820 csio = mp->cmd_token;
1821 isp = mp->isp;
1822 rq = mp->rq;
1823 pcs = (struct isp_pcisoftc *)mp->isp;
1824 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1825 nxti = *mp->nxtip;
1826
1827 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1828 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1829 } else {
1830 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1831 }
1832
1833 datalen = XS_XFRLEN(csio);
1834
1835 /*
1836 * We're passed an initial partially filled in entry that
1837 * has most fields filled in except for data transfer
1838 * related values.
1839 *
1840 * Our job is to fill in the initial request queue entry and
1841 * then to start allocating and filling in continuation entries
1842 * until we've covered the entire transfer.
1843 */
1844
1845 if (IS_FC(isp)) {
1846 seglim = ISP_RQDSEG_T2;
1847 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1848 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1849 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1850 } else {
1851 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1852 }
1853 } else {
1854 if (csio->cdb_len > 12) {
1855 seglim = 0;
1856 } else {
1857 seglim = ISP_RQDSEG;
1858 }
1859 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1860 rq->req_flags |= REQFLAG_DATA_IN;
1861 } else {
1862 rq->req_flags |= REQFLAG_DATA_OUT;
1863 }
1864 }
1865
1866 eseg = dm_segs + nseg;
1867
1868 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1869 if (IS_FC(isp)) {
1870 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1871 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1872 dm_segs->ds_addr;
1873 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1874 dm_segs->ds_len;
1875 } else {
1876 rq->req_dataseg[rq->req_seg_count].ds_base =
1877 dm_segs->ds_addr;
1878 rq->req_dataseg[rq->req_seg_count].ds_count =
1879 dm_segs->ds_len;
1880 }
1881 datalen -= dm_segs->ds_len;
1882 rq->req_seg_count++;
1883 dm_segs++;
1884 }
1885
1886 while (datalen > 0 && dm_segs != eseg) {
1887 u_int16_t onxti;
1888 ispcontreq_t local, *crq = &local, *cqe;
1889
1890 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1891 onxti = nxti;
1892 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1893 if (nxti == mp->optr) {
1894 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1895 mp->error = MUSHERR_NOQENTRIES;
1896 return;
1897 }
1898 rq->req_header.rqs_entry_count++;
1899 MEMZERO((void *)crq, sizeof (*crq));
1900 crq->req_header.rqs_entry_count = 1;
1901 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1902
1903 seglim = 0;
1904 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1905 crq->req_dataseg[seglim].ds_base =
1906 dm_segs->ds_addr;
1907 crq->req_dataseg[seglim].ds_count =
1908 dm_segs->ds_len;
1909 rq->req_seg_count++;
1910 dm_segs++;
1911 seglim++;
1912 datalen -= dm_segs->ds_len;
1913 }
1914 isp_put_cont_req(isp, crq, cqe);
1915 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1916 }
1917 *mp->nxtip = nxti;
1918}
1919#endif
1920
1921static int
1922isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1923 u_int16_t *nxtip, u_int16_t optr)
1924{
1925 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1926 ispreq_t *qep;
1927 bus_dmamap_t *dp = NULL;
1928 mush_t mush, *mp;
1929 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1930
1931 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1932#ifdef ISP_TARGET_MODE
1933 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1934 if (IS_FC(isp)) {
1935 eptr = tdma_mkfc;
1936 } else {
1937 eptr = tdma_mk;
1938 }
1939 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1940 (csio->dxfer_len == 0)) {
1941 mp = &mush;
1942 mp->isp = isp;
1943 mp->cmd_token = csio;
1944 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
1945 mp->nxtip = nxtip;
1946 mp->optr = optr;
1947 mp->error = 0;
1948 (*eptr)(mp, NULL, 0, 0);
1949 goto mbxsync;
1950 }
1951 } else
1952#endif
1953 eptr = dma2;
1954
1955
1956 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1957 (csio->dxfer_len == 0)) {
1958 rq->req_seg_count = 1;
1959 goto mbxsync;
1960 }
1961
1962 /*
1963 * Do a virtual grapevine step to collect info for
1964 * the callback dma allocation that we have to use...
1965 */
1966 mp = &mush;
1967 mp->isp = isp;
1968 mp->cmd_token = csio;
1969 mp->rq = rq;
1970 mp->nxtip = nxtip;
1971 mp->optr = optr;
1972 mp->error = 0;
1973
1974 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1975 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1976 int error, s;
1977 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1978 s = splsoftvm();
1979 error = bus_dmamap_load(pcs->dmat, *dp,
1980 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1981 if (error == EINPROGRESS) {
1982 bus_dmamap_unload(pcs->dmat, *dp);
1983 mp->error = EINVAL;
1984 isp_prt(isp, ISP_LOGERR,
1985 "deferred dma allocation not supported");
1986 } else if (error && mp->error == 0) {
1987#ifdef DIAGNOSTIC
1988 isp_prt(isp, ISP_LOGERR,
1989 "error %d in dma mapping code", error);
1990#endif
1991 mp->error = error;
1992 }
1993 splx(s);
1994 } else {
1995 /* Pointer to physical buffer */
1996 struct bus_dma_segment seg;
1997 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
1998 seg.ds_len = csio->dxfer_len;
1999 (*eptr)(mp, &seg, 1, 0);
2000 }
2001 } else {
2002 struct bus_dma_segment *segs;
2003
2004 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2005 isp_prt(isp, ISP_LOGERR,
2006 "Physical segment pointers unsupported");
2007 mp->error = EINVAL;
2008 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2009 isp_prt(isp, ISP_LOGERR,
2010 "Virtual segment addresses unsupported");
2011 mp->error = EINVAL;
2012 } else {
2013 /* Just use the segments provided */
2014 segs = (struct bus_dma_segment *) csio->data_ptr;
2015 (*eptr)(mp, segs, csio->sglist_cnt, 0);
2016 }
2017 }
2018 if (mp->error) {
2019 int retval = CMD_COMPLETE;
2020 if (mp->error == MUSHERR_NOQENTRIES) {
2021 retval = CMD_EAGAIN;
2022 } else if (mp->error == EFBIG) {
2023 XS_SETERR(csio, CAM_REQ_TOO_BIG);
2024 } else if (mp->error == EINVAL) {
2025 XS_SETERR(csio, CAM_REQ_INVALID);
2026 } else {
2027 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2028 }
2029 return (retval);
2030 }
2031mbxsync:
2032 switch (rq->req_header.rqs_entry_type) {
2033 case RQSTYPE_REQUEST:
2034 isp_put_request(isp, rq, qep);
2035 break;
2036 case RQSTYPE_CMDONLY:
2037 isp_put_extended_request(isp, (ispextreq_t *)rq,
2038 (ispextreq_t *)qep);
2039 break;
2040 case RQSTYPE_T2RQS:
2041 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2042 break;
2043 case RQSTYPE_A64:
2044 case RQSTYPE_T3RQS:
2045 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2046 break;
2047 }
2048 return (CMD_QUEUED);
2049}
2050
2051static void
2052isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
2053{
2054 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2055 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2056 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2057 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2058 } else {
2059 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2060 }
2061 bus_dmamap_unload(pcs->dmat, *dp);
2062}
2063
2064
2065static void
2066isp_pci_reset1(struct ispsoftc *isp)
2067{
2068 /* Make sure the BIOS is disabled */
2069 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2070 /* and enable interrupts */
2071 ENABLE_INTS(isp);
2072}
2073
2074static void
2075isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
2076{
2077 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2078 if (msg)
2079 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2080 else
2081 printf("%s:\n", device_get_nameunit(isp->isp_dev));
2082 if (IS_SCSI(isp))
2083 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2084 else
2085 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2086 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2087 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2088 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2089
2090
2091 if (IS_SCSI(isp)) {
2092 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2093 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2094 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2095 ISP_READ(isp, CDMA_FIFO_STS));
2096 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2097 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2098 ISP_READ(isp, DDMA_FIFO_STS));
2099 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2100 ISP_READ(isp, SXP_INTERRUPT),
2101 ISP_READ(isp, SXP_GROSS_ERR),
2102 ISP_READ(isp, SXP_PINS_CTRL));
2103 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2104 }
2105 printf(" mbox regs: %x %x %x %x %x\n",
2106 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2107 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2108 ISP_READ(isp, OUTMAILBOX4));
2109 printf(" PCI Status Command/Status=%x\n",
2110 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2111}