Deleted Added
full compact
hptiop.c (232854) hptiop.c (236379)
1/*
2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3 * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*
2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3 * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/hptiop/hptiop.c 232854 2012-03-12 08:03:51Z scottl $");
28__FBSDID("$FreeBSD: head/sys/dev/hptiop/hptiop.c 236379 2012-06-01 04:34:49Z eadler $");
29
30#include <sys/param.h>
31#include <sys/types.h>
32#include <sys/cons.h>
33#if (__FreeBSD_version >= 500000)
34#include <sys/time.h>
35#include <sys/systm.h>
36#else
37#include <machine/clock.h>
38#endif
39
40#include <sys/stat.h>
41#include <sys/malloc.h>
42#include <sys/conf.h>
43#include <sys/libkern.h>
44#include <sys/kernel.h>
45
46#if (__FreeBSD_version >= 500000)
47#include <sys/kthread.h>
48#include <sys/mutex.h>
49#include <sys/module.h>
50#endif
51
52#include <sys/eventhandler.h>
53#include <sys/bus.h>
54#include <sys/taskqueue.h>
55#include <sys/ioccom.h>
56
57#include <machine/resource.h>
58#include <machine/bus.h>
59#include <machine/stdarg.h>
60#include <sys/rman.h>
61
62#include <vm/vm.h>
63#include <vm/pmap.h>
64
65#if (__FreeBSD_version >= 500000)
66#include <dev/pci/pcireg.h>
67#include <dev/pci/pcivar.h>
68#else
69#include <pci/pcivar.h>
70#include <pci/pcireg.h>
71#endif
72
73#if (__FreeBSD_version <= 500043)
74#include <sys/devicestat.h>
75#endif
76
77#include <cam/cam.h>
78#include <cam/cam_ccb.h>
79#include <cam/cam_sim.h>
80#include <cam/cam_xpt_sim.h>
81#include <cam/cam_debug.h>
82#include <cam/cam_periph.h>
83#include <cam/scsi/scsi_all.h>
84#include <cam/scsi/scsi_message.h>
85
86#if (__FreeBSD_version < 500043)
87#include <sys/bus_private.h>
88#endif
89
90#include <dev/hptiop/hptiop.h>
91
92static char driver_name[] = "hptiop";
93static char driver_version[] = "v1.3 (010208)";
94
95static devclass_t hptiop_devclass;
96
97static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
98 u_int32_t msg, u_int32_t millisec);
99static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
100 u_int32_t req);
101static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
102static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
103static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
104 struct hpt_iop_ioctl_param *pParams);
105static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
106 struct hpt_iop_ioctl_param *pParams);
107static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
108static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
109static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
110static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
111 struct hpt_iop_request_get_config *config);
112static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
113 struct hpt_iop_request_get_config *config);
114static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
115 struct hpt_iop_request_set_config *config);
116static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
117 struct hpt_iop_request_set_config *config);
118static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
119static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
120static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
121 u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
122static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
123 struct hpt_iop_request_ioctl_command *req,
124 struct hpt_iop_ioctl_param *pParams);
125static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 struct hpt_iop_srb *srb,
127 bus_dma_segment_t *segs, int nsegs);
128static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 struct hpt_iop_srb *srb,
130 bus_dma_segment_t *segs, int nsegs);
131static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
132static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
133static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
134static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
135static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
138static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
139static int hptiop_probe(device_t dev);
140static int hptiop_attach(device_t dev);
141static int hptiop_detach(device_t dev);
142static int hptiop_shutdown(device_t dev);
143static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
144static void hptiop_poll(struct cam_sim *sim);
145static void hptiop_async(void *callback_arg, u_int32_t code,
146 struct cam_path *path, void *arg);
147static void hptiop_pci_intr(void *arg);
148static void hptiop_release_resource(struct hpt_iop_hba *hba);
149static int hptiop_reset_adapter(struct hpt_iop_hba *hba);
150
151static d_open_t hptiop_open;
152static d_close_t hptiop_close;
153static d_ioctl_t hptiop_ioctl;
154
155static struct cdevsw hptiop_cdevsw = {
156 .d_open = hptiop_open,
157 .d_close = hptiop_close,
158 .d_ioctl = hptiop_ioctl,
159 .d_name = driver_name,
160#if __FreeBSD_version>=503000
161 .d_version = D_VERSION,
162#endif
163#if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
164 .d_flags = D_NEEDGIANT,
165#endif
166#if __FreeBSD_version<600034
167#if __FreeBSD_version>=501000
168 .d_maj = MAJOR_AUTO,
169#else
170 .d_maj = HPT_DEV_MAJOR,
171#endif
172#endif
173};
174
175#if __FreeBSD_version < 503000
176#define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
177#else
178#define hba_from_dev(dev) \
179 ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
180#endif
181
182#define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
183 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
184#define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
185 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
186
187#define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
188 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
189#define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
190 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
191#define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
192 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
193#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
194 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
195
196static int hptiop_open(ioctl_dev_t dev, int flags,
197 int devtype, ioctl_thread_t proc)
198{
199 struct hpt_iop_hba *hba = hba_from_dev(dev);
200
201 if (hba==NULL)
202 return ENXIO;
203 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
204 return EBUSY;
205 hba->flag |= HPT_IOCTL_FLAG_OPEN;
206 return 0;
207}
208
209static int hptiop_close(ioctl_dev_t dev, int flags,
210 int devtype, ioctl_thread_t proc)
211{
212 struct hpt_iop_hba *hba = hba_from_dev(dev);
213 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
214 return 0;
215}
216
217static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
218 int flags, ioctl_thread_t proc)
219{
220 int ret = EFAULT;
221 struct hpt_iop_hba *hba = hba_from_dev(dev);
222
223#if (__FreeBSD_version >= 500000)
224 mtx_lock(&Giant);
225#endif
226
227 switch (cmd) {
228 case HPT_DO_IOCONTROL:
229 ret = hba->ops->do_ioctl(hba,
230 (struct hpt_iop_ioctl_param *)data);
231 break;
232 case HPT_SCAN_BUS:
233 ret = hptiop_rescan_bus(hba);
234 break;
235 }
236
237#if (__FreeBSD_version >= 500000)
238 mtx_unlock(&Giant);
239#endif
240
241 return ret;
242}
243
244static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
245{
246 u_int64_t p;
247 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
248 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
249
250 if (outbound_tail != outbound_head) {
251 bus_space_read_region_4(hba->bar2t, hba->bar2h,
252 offsetof(struct hpt_iopmu_mv,
253 outbound_q[outbound_tail]),
254 (u_int32_t *)&p, 2);
255
256 outbound_tail++;
257
258 if (outbound_tail == MVIOP_QUEUE_LEN)
259 outbound_tail = 0;
260
261 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
262 return p;
263 } else
264 return 0;
265}
266
267static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
268{
269 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
270 u_int32_t head = inbound_head + 1;
271
272 if (head == MVIOP_QUEUE_LEN)
273 head = 0;
274
275 bus_space_write_region_4(hba->bar2t, hba->bar2h,
276 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
277 (u_int32_t *)&p, 2);
278 BUS_SPACE_WRT4_MV2(inbound_head, head);
279 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
280}
281
282static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
283{
284 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
285 BUS_SPACE_RD4_ITL(outbound_intstatus);
286}
287
288static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
289{
290
291 BUS_SPACE_WRT4_MV2(inbound_msg, msg);
292 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
293
294 BUS_SPACE_RD4_MV0(outbound_intmask);
295}
296
297static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
298{
299 u_int32_t req=0;
300 int i;
301
302 for (i = 0; i < millisec; i++) {
303 req = BUS_SPACE_RD4_ITL(inbound_queue);
304 if (req != IOPMU_QUEUE_EMPTY)
305 break;
306 DELAY(1000);
307 }
308
309 if (req!=IOPMU_QUEUE_EMPTY) {
310 BUS_SPACE_WRT4_ITL(outbound_queue, req);
311 BUS_SPACE_RD4_ITL(outbound_intstatus);
312 return 0;
313 }
314
315 return -1;
316}
317
318static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
319{
320 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 return -1;
322
323 return 0;
324}
325
326static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 u_int32_t index)
328{
329 struct hpt_iop_srb *srb;
330 struct hpt_iop_request_scsi_command *req=0;
331 union ccb *ccb;
332 u_int8_t *cdb;
333 u_int32_t result, temp, dxfer;
334 u_int64_t temp64;
335
336 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 if (hba->firmware_version > 0x01020000 ||
338 hba->interface_version > 0x01020000) {
339 srb = hba->srb[index & ~(u_int32_t)
340 (IOPMU_QUEUE_ADDR_HOST_BIT
341 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 req = (struct hpt_iop_request_scsi_command *)srb;
343 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 result = IOP_RESULT_SUCCESS;
345 else
346 result = req->header.result;
347 } else {
348 srb = hba->srb[index &
349 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 req = (struct hpt_iop_request_scsi_command *)srb;
351 result = req->header.result;
352 }
353 dxfer = req->dataxfer_length;
354 goto srb_complete;
355 }
356
357 /*iop req*/
358 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 offsetof(struct hpt_iop_request_header, type));
360 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 offsetof(struct hpt_iop_request_header, result));
362 switch(temp) {
363 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364 {
365 temp64 = 0;
366 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 offsetof(struct hpt_iop_request_header, context),
368 (u_int32_t *)&temp64, 2);
369 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 break;
371 }
372
373 case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 offsetof(struct hpt_iop_request_header, context),
376 (u_int32_t *)&temp64, 2);
377 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 index + offsetof(struct hpt_iop_request_scsi_command,
380 dataxfer_length));
381srb_complete:
382 ccb = (union ccb *)srb->ccb;
383 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 cdb = ccb->csio.cdb_io.cdb_ptr;
385 else
386 cdb = ccb->csio.cdb_io.cdb_bytes;
387
388 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 ccb->ccb_h.status = CAM_REQ_CMP;
390 goto scsi_done;
391 }
392
393 switch (result) {
394 case IOP_RESULT_SUCCESS:
395 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 case CAM_DIR_IN:
397 bus_dmamap_sync(hba->io_dmat,
398 srb->dma_map, BUS_DMASYNC_POSTREAD);
399 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 break;
401 case CAM_DIR_OUT:
402 bus_dmamap_sync(hba->io_dmat,
403 srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 break;
406 }
407
408 ccb->ccb_h.status = CAM_REQ_CMP;
409 break;
410
411 case IOP_RESULT_BAD_TARGET:
412 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 break;
414 case IOP_RESULT_BUSY:
415 ccb->ccb_h.status = CAM_BUSY;
416 break;
417 case IOP_RESULT_INVALID_REQUEST:
418 ccb->ccb_h.status = CAM_REQ_INVALID;
419 break;
420 case IOP_RESULT_FAIL:
421 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 break;
423 case IOP_RESULT_RESET:
424 ccb->ccb_h.status = CAM_BUSY;
425 break;
426 case IOP_RESULT_CHECK_CONDITION:
427 memset(&ccb->csio.sense_data, 0,
428 sizeof(ccb->csio.sense_data));
429 if (dxfer < ccb->csio.sense_len)
430 ccb->csio.sense_resid = ccb->csio.sense_len -
431 dxfer;
432 else
433 ccb->csio.sense_resid = 0;
434 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 index + offsetof(struct hpt_iop_request_scsi_command,
437 sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 } else {
440 memcpy(&ccb->csio.sense_data, &req->sg_list,
441 MIN(dxfer, sizeof(ccb->csio.sense_data)));
442 }
443 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 break;
447 default:
448 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 break;
450 }
451scsi_done:
452 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 BUS_SPACE_WRT4_ITL(outbound_queue, index);
454
455 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456
457 hptiop_free_srb(hba, srb);
458 xpt_done(ccb);
459 break;
460 }
461}
462
463static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464{
465 u_int32_t req, temp;
466
467 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 hptiop_request_callback_itl(hba, req);
470 else {
471 struct hpt_iop_request_header *p;
472
473 p = (struct hpt_iop_request_header *)
474 ((char *)hba->u.itl.mu + req);
475 temp = bus_space_read_4(hba->bar0t,
476 hba->bar0h,req +
477 offsetof(struct hpt_iop_request_header,
478 flags));
479 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
480 u_int64_t temp64;
481 bus_space_read_region_4(hba->bar0t,
482 hba->bar0h,req +
483 offsetof(struct hpt_iop_request_header,
484 context),
485 (u_int32_t *)&temp64, 2);
486 if (temp64) {
487 hptiop_request_callback_itl(hba, req);
488 } else {
489 temp64 = 1;
490 bus_space_write_region_4(hba->bar0t,
491 hba->bar0h,req +
492 offsetof(struct hpt_iop_request_header,
493 context),
494 (u_int32_t *)&temp64, 2);
495 }
496 } else
497 hptiop_request_callback_itl(hba, req);
498 }
499 }
500}
501
502static int hptiop_intr_itl(struct hpt_iop_hba * hba)
503{
504 u_int32_t status;
505 int ret = 0;
506
507 status = BUS_SPACE_RD4_ITL(outbound_intstatus);
508
509 if (status & IOPMU_OUTBOUND_INT_MSG0) {
510 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
511 KdPrint(("hptiop: received outbound msg %x\n", msg));
512 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
513 hptiop_os_message_callback(hba, msg);
514 ret = 1;
515 }
516
517 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
518 hptiop_drain_outbound_queue_itl(hba);
519 ret = 1;
520 }
521
522 return ret;
523}
524
525static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
526 u_int64_t _tag)
527{
528 u_int32_t context = (u_int32_t)_tag;
529
530 if (context & MVIOP_CMD_TYPE_SCSI) {
531 struct hpt_iop_srb *srb;
532 struct hpt_iop_request_scsi_command *req;
533 union ccb *ccb;
534 u_int8_t *cdb;
535
536 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
537 req = (struct hpt_iop_request_scsi_command *)srb;
538 ccb = (union ccb *)srb->ccb;
539 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
540 cdb = ccb->csio.cdb_io.cdb_ptr;
541 else
542 cdb = ccb->csio.cdb_io.cdb_bytes;
543
544 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
545 ccb->ccb_h.status = CAM_REQ_CMP;
546 goto scsi_done;
547 }
548 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
549 req->header.result = IOP_RESULT_SUCCESS;
550
551 switch (req->header.result) {
552 case IOP_RESULT_SUCCESS:
553 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
554 case CAM_DIR_IN:
555 bus_dmamap_sync(hba->io_dmat,
556 srb->dma_map, BUS_DMASYNC_POSTREAD);
557 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
558 break;
559 case CAM_DIR_OUT:
560 bus_dmamap_sync(hba->io_dmat,
561 srb->dma_map, BUS_DMASYNC_POSTWRITE);
562 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563 break;
564 }
565 ccb->ccb_h.status = CAM_REQ_CMP;
566 break;
567 case IOP_RESULT_BAD_TARGET:
568 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
569 break;
570 case IOP_RESULT_BUSY:
571 ccb->ccb_h.status = CAM_BUSY;
572 break;
573 case IOP_RESULT_INVALID_REQUEST:
574 ccb->ccb_h.status = CAM_REQ_INVALID;
575 break;
576 case IOP_RESULT_FAIL:
577 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
578 break;
579 case IOP_RESULT_RESET:
580 ccb->ccb_h.status = CAM_BUSY;
581 break;
582 case IOP_RESULT_CHECK_CONDITION:
583 memset(&ccb->csio.sense_data, 0,
584 sizeof(ccb->csio.sense_data));
585 if (req->dataxfer_length < ccb->csio.sense_len)
586 ccb->csio.sense_resid = ccb->csio.sense_len -
587 req->dataxfer_length;
588 else
589 ccb->csio.sense_resid = 0;
590 memcpy(&ccb->csio.sense_data, &req->sg_list,
591 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
592 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
593 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
594 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
595 break;
596 default:
597 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598 break;
599 }
600scsi_done:
601 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
602
603 hptiop_free_srb(hba, srb);
604 xpt_done(ccb);
605 } else if (context & MVIOP_CMD_TYPE_IOCTL) {
606 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
607 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
608 hba->config_done = 1;
609 else
610 hba->config_done = -1;
611 wakeup(req);
612 } else if (context &
613 (MVIOP_CMD_TYPE_SET_CONFIG |
614 MVIOP_CMD_TYPE_GET_CONFIG))
615 hba->config_done = 1;
616 else {
617 device_printf(hba->pcidev, "wrong callback type\n");
618 }
619}
620
621static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
622{
623 u_int64_t req;
624
625 while ((req = hptiop_mv_outbound_read(hba))) {
626 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
627 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
628 hptiop_request_callback_mv(hba, req);
629 }
630 }
631 }
632}
633
634static int hptiop_intr_mv(struct hpt_iop_hba * hba)
635{
636 u_int32_t status;
637 int ret = 0;
638
639 status = BUS_SPACE_RD4_MV0(outbound_doorbell);
640
641 if (status)
642 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
643
644 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
645 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
646 KdPrint(("hptiop: received outbound msg %x\n", msg));
647 hptiop_os_message_callback(hba, msg);
648 ret = 1;
649 }
650
651 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
652 hptiop_drain_outbound_queue_mv(hba);
653 ret = 1;
654 }
655
656 return ret;
657}
658
659static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
660 u_int32_t req32, u_int32_t millisec)
661{
662 u_int32_t i;
663 u_int64_t temp64;
664
665 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
666 BUS_SPACE_RD4_ITL(outbound_intstatus);
667
668 for (i = 0; i < millisec; i++) {
669 hptiop_intr_itl(hba);
670 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
671 offsetof(struct hpt_iop_request_header, context),
672 (u_int32_t *)&temp64, 2);
673 if (temp64)
674 return 0;
675 DELAY(1000);
676 }
677
678 return -1;
679}
680
681static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
682 void *req, u_int32_t millisec)
683{
684 u_int32_t i;
685 u_int64_t phy_addr;
686 hba->config_done = 0;
687
688 phy_addr = hba->ctlcfgcmd_phy |
689 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
690 ((struct hpt_iop_request_get_config *)req)->header.flags |=
691 IOP_REQUEST_FLAG_SYNC_REQUEST |
692 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
693 hptiop_mv_inbound_write(phy_addr, hba);
694 BUS_SPACE_RD4_MV0(outbound_intmask);
695
696 for (i = 0; i < millisec; i++) {
697 hptiop_intr_mv(hba);
698 if (hba->config_done)
699 return 0;
700 DELAY(1000);
701 }
702 return -1;
703}
704
705static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
706 u_int32_t msg, u_int32_t millisec)
707{
708 u_int32_t i;
709
710 hba->msg_done = 0;
711 hba->ops->post_msg(hba, msg);
712
713 for (i=0; i<millisec; i++) {
714 hba->ops->iop_intr(hba);
715 if (hba->msg_done)
716 break;
717 DELAY(1000);
718 }
719
720 return hba->msg_done? 0 : -1;
721}
722
723static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
724 struct hpt_iop_request_get_config * config)
725{
726 u_int32_t req32;
727
728 config->header.size = sizeof(struct hpt_iop_request_get_config);
729 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
730 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
731 config->header.result = IOP_RESULT_PENDING;
732 config->header.context = 0;
733
734 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
735 if (req32 == IOPMU_QUEUE_EMPTY)
736 return -1;
737
738 bus_space_write_region_4(hba->bar0t, hba->bar0h,
739 req32, (u_int32_t *)config,
740 sizeof(struct hpt_iop_request_header) >> 2);
741
742 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
743 KdPrint(("hptiop: get config send cmd failed"));
744 return -1;
745 }
746
747 bus_space_read_region_4(hba->bar0t, hba->bar0h,
748 req32, (u_int32_t *)config,
749 sizeof(struct hpt_iop_request_get_config) >> 2);
750
751 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
752
753 return 0;
754}
755
756static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
757 struct hpt_iop_request_get_config * config)
758{
759 struct hpt_iop_request_get_config *req;
760
761 if (!(req = hba->ctlcfg_ptr))
762 return -1;
763
764 req->header.flags = 0;
765 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
766 req->header.size = sizeof(struct hpt_iop_request_get_config);
767 req->header.result = IOP_RESULT_PENDING;
768 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
769
770 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
771 KdPrint(("hptiop: get config send cmd failed"));
772 return -1;
773 }
774
775 *config = *req;
776 return 0;
777}
778
779static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
780 struct hpt_iop_request_set_config *config)
781{
782 u_int32_t req32;
783
784 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
785
786 if (req32 == IOPMU_QUEUE_EMPTY)
787 return -1;
788
789 config->header.size = sizeof(struct hpt_iop_request_set_config);
790 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
791 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
792 config->header.result = IOP_RESULT_PENDING;
793 config->header.context = 0;
794
795 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
796 (u_int32_t *)config,
797 sizeof(struct hpt_iop_request_set_config) >> 2);
798
799 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
800 KdPrint(("hptiop: set config send cmd failed"));
801 return -1;
802 }
803
804 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
805
806 return 0;
807}
808
809static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
810 struct hpt_iop_request_set_config *config)
811{
812 struct hpt_iop_request_set_config *req;
813
814 if (!(req = hba->ctlcfg_ptr))
815 return -1;
816
817 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
818 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
819 sizeof(struct hpt_iop_request_set_config) -
820 sizeof(struct hpt_iop_request_header));
821
822 req->header.flags = 0;
823 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
824 req->header.size = sizeof(struct hpt_iop_request_set_config);
825 req->header.result = IOP_RESULT_PENDING;
826 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
827
828 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
829 KdPrint(("hptiop: set config send cmd failed"));
830 return -1;
831 }
832
833 return 0;
834}
835
836static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
837 u_int32_t req32,
838 struct hpt_iop_ioctl_param *pParams)
839{
840 u_int64_t temp64;
841 struct hpt_iop_request_ioctl_command req;
842
843 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
844 (hba->max_request_size -
845 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
846 device_printf(hba->pcidev, "request size beyond max value");
847 return -1;
848 }
849
850 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
851 + pParams->nInBufferSize;
852 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
853 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
854 req.header.result = IOP_RESULT_PENDING;
855 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
856 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
857 req.inbuf_size = pParams->nInBufferSize;
858 req.outbuf_size = pParams->nOutBufferSize;
859 req.bytes_returned = 0;
860
861 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
862 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
863
864 hptiop_lock_adapter(hba);
865
866 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
867 BUS_SPACE_RD4_ITL(outbound_intstatus);
868
869 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
870 offsetof(struct hpt_iop_request_ioctl_command, header.context),
871 (u_int32_t *)&temp64, 2);
872 while (temp64) {
873 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
874 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
875 break;
876 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
877 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
878 offsetof(struct hpt_iop_request_ioctl_command,
879 header.context),
880 (u_int32_t *)&temp64, 2);
881 }
882
883 hptiop_unlock_adapter(hba);
884 return 0;
885}
886
887static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
888{
889 unsigned char byte;
890 int i;
891
892 for (i=0; i<size; i++) {
893 if (copyin((u_int8_t *)user + i, &byte, 1))
894 return -1;
895 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
896 }
897
898 return 0;
899}
900
901static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
902{
903 unsigned char byte;
904 int i;
905
906 for (i=0; i<size; i++) {
907 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
908 if (copyout(&byte, (u_int8_t *)user + i, 1))
909 return -1;
910 }
911
912 return 0;
913}
914
915static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
916 struct hpt_iop_ioctl_param * pParams)
917{
918 u_int32_t req32;
919 u_int32_t result;
920
921 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
922 (pParams->Magic != HPT_IOCTL_MAGIC32))
923 return EFAULT;
924
925 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
926 if (req32 == IOPMU_QUEUE_EMPTY)
927 return EFAULT;
928
929 if (pParams->nInBufferSize)
930 if (hptiop_bus_space_copyin(hba, req32 +
931 offsetof(struct hpt_iop_request_ioctl_command, buf),
932 (void *)pParams->lpInBuffer, pParams->nInBufferSize))
933 goto invalid;
934
935 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
936 goto invalid;
937
938 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
939 offsetof(struct hpt_iop_request_ioctl_command,
940 header.result));
941
942 if (result == IOP_RESULT_SUCCESS) {
943 if (pParams->nOutBufferSize)
944 if (hptiop_bus_space_copyout(hba, req32 +
945 offsetof(struct hpt_iop_request_ioctl_command, buf) +
946 ((pParams->nInBufferSize + 3) & ~3),
947 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
948 goto invalid;
949
950 if (pParams->lpBytesReturned) {
951 if (hptiop_bus_space_copyout(hba, req32 +
952 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
953 (void *)pParams->lpBytesReturned, sizeof(unsigned long)))
954 goto invalid;
955 }
956
957 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
958
959 return 0;
960 } else{
961invalid:
962 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
963
964 return EFAULT;
965 }
966}
967
968static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
969 struct hpt_iop_request_ioctl_command *req,
970 struct hpt_iop_ioctl_param *pParams)
971{
972 u_int64_t req_phy;
973 int size = 0;
974
975 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
976 (hba->max_request_size -
977 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
978 device_printf(hba->pcidev, "request size beyond max value");
979 return -1;
980 }
981
982 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
983 req->inbuf_size = pParams->nInBufferSize;
984 req->outbuf_size = pParams->nOutBufferSize;
985 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
986 + pParams->nInBufferSize;
987 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
988 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
989 req->header.result = IOP_RESULT_PENDING;
990 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
991 size = req->header.size >> 8;
992 size = size > 3 ? 3 : size;
993 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
994 hptiop_mv_inbound_write(req_phy, hba);
995
996 BUS_SPACE_RD4_MV0(outbound_intmask);
997
998 while (hba->config_done == 0) {
999 if (hptiop_sleep(hba, req, PPAUSE,
1000 "hptctl", HPT_OSM_TIMEOUT)==0)
1001 continue;
1002 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1003 }
1004 return 0;
1005}
1006
1007static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1008 struct hpt_iop_ioctl_param *pParams)
1009{
1010 struct hpt_iop_request_ioctl_command *req;
1011
1012 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1013 (pParams->Magic != HPT_IOCTL_MAGIC32))
1014 return EFAULT;
1015
1016 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1017 hba->config_done = 0;
1018 hptiop_lock_adapter(hba);
1019 if (pParams->nInBufferSize)
1020 if (copyin((void *)pParams->lpInBuffer,
1021 req->buf, pParams->nInBufferSize))
1022 goto invalid;
1023 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1024 goto invalid;
1025
1026 if (hba->config_done == 1) {
1027 if (pParams->nOutBufferSize)
1028 if (copyout(req->buf +
1029 ((pParams->nInBufferSize + 3) & ~3),
1030 (void *)pParams->lpOutBuffer,
1031 pParams->nOutBufferSize))
1032 goto invalid;
1033
1034 if (pParams->lpBytesReturned)
1035 if (copyout(&req->bytes_returned,
1036 (void*)pParams->lpBytesReturned,
1037 sizeof(u_int32_t)))
1038 goto invalid;
1039 hptiop_unlock_adapter(hba);
1040 return 0;
1041 } else{
1042invalid:
1043 hptiop_unlock_adapter(hba);
1044 return EFAULT;
1045 }
1046}
1047
1048static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1049{
1050 union ccb *ccb;
1051
1052 if ((ccb = xpt_alloc_ccb()) == NULL)
1053 return(ENOMEM);
1054 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1055 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1056 xpt_free_ccb(ccb);
1057 return(EIO);
1058 }
1059 xpt_rescan(ccb);
1060 return(0);
1061}
1062
1063static bus_dmamap_callback_t hptiop_map_srb;
1064static bus_dmamap_callback_t hptiop_post_scsi_command;
1065static bus_dmamap_callback_t hptiop_mv_map_ctlcfg;
1066
1067static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1068{
1069 hba->bar0_rid = 0x10;
1070 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1071 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1072
1073 if (hba->bar0_res == NULL) {
1074 device_printf(hba->pcidev,
1075 "failed to get iop base adrress.\n");
1076 return -1;
1077 }
1078 hba->bar0t = rman_get_bustag(hba->bar0_res);
1079 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1080 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1081 rman_get_virtual(hba->bar0_res);
1082
1083 if (!hba->u.itl.mu) {
1084 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1085 hba->bar0_rid, hba->bar0_res);
1086 device_printf(hba->pcidev, "alloc mem res failed\n");
1087 return -1;
1088 }
1089
1090 return 0;
1091}
1092
1093static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1094{
1095 hba->bar0_rid = 0x10;
1096 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1097 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1098
1099 if (hba->bar0_res == NULL) {
1100 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1101 return -1;
1102 }
1103 hba->bar0t = rman_get_bustag(hba->bar0_res);
1104 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1105 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1106 rman_get_virtual(hba->bar0_res);
1107
1108 if (!hba->u.mv.regs) {
1109 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1110 hba->bar0_rid, hba->bar0_res);
1111 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1112 return -1;
1113 }
1114
1115 hba->bar2_rid = 0x18;
1116 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1117 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1118
1119 if (hba->bar2_res == NULL) {
1120 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1121 hba->bar0_rid, hba->bar0_res);
1122 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1123 return -1;
1124 }
1125
1126 hba->bar2t = rman_get_bustag(hba->bar2_res);
1127 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1128 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1129
1130 if (!hba->u.mv.mu) {
1131 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1132 hba->bar0_rid, hba->bar0_res);
1133 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1134 hba->bar2_rid, hba->bar2_res);
1135 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1136 return -1;
1137 }
1138
1139 return 0;
1140}
1141
1142static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1143{
1144 if (hba->bar0_res)
1145 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1146 hba->bar0_rid, hba->bar0_res);
1147}
1148
1149static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1150{
1151 if (hba->bar0_res)
1152 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1153 hba->bar0_rid, hba->bar0_res);
1154 if (hba->bar2_res)
1155 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1156 hba->bar2_rid, hba->bar2_res);
1157}
1158
1159static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1160{
1161 if (bus_dma_tag_create(hba->parent_dmat,
1162 1,
1163 0,
1164 BUS_SPACE_MAXADDR_32BIT,
1165 BUS_SPACE_MAXADDR,
1166 NULL, NULL,
1167 0x800 - 0x8,
1168 1,
1169 BUS_SPACE_MAXSIZE_32BIT,
1170 BUS_DMA_ALLOCNOW,
1171#if __FreeBSD_version > 502000
1172 NULL,
1173 NULL,
1174#endif
1175 &hba->ctlcfg_dmat)) {
1176 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1177 return -1;
1178 }
1179
1180 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1181#if __FreeBSD_version>501000
1182 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1183#else
1184 BUS_DMA_WAITOK,
1185#endif
1186 &hba->ctlcfg_dmamap) != 0) {
1187 device_printf(hba->pcidev,
1188 "bus_dmamem_alloc failed!\n");
1189 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1190 return -1;
1191 }
1192
1193 if (bus_dmamap_load(hba->ctlcfg_dmat,
1194 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1195 MVIOP_IOCTLCFG_SIZE,
1196 hptiop_mv_map_ctlcfg, hba, 0)) {
1197 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1198 if (hba->ctlcfg_dmat)
1199 bus_dmamem_free(hba->ctlcfg_dmat,
1200 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1201 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1202 return -1;
1203 }
1204
1205 return 0;
1206}
1207
1208static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1209{
1210 if (hba->ctlcfg_dmat) {
1211 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1212 bus_dmamem_free(hba->ctlcfg_dmat,
1213 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1214 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1215 }
1216
1217 return 0;
1218}
1219
1220/*
1221 * CAM driver interface
1222 */
1223static device_method_t driver_methods[] = {
1224 /* Device interface */
1225 DEVMETHOD(device_probe, hptiop_probe),
1226 DEVMETHOD(device_attach, hptiop_attach),
1227 DEVMETHOD(device_detach, hptiop_detach),
1228 DEVMETHOD(device_shutdown, hptiop_shutdown),
1229 { 0, 0 }
1230};
1231
1232static struct hptiop_adapter_ops hptiop_itl_ops = {
1233 .iop_wait_ready = hptiop_wait_ready_itl,
1234 .internal_memalloc = 0,
1235 .internal_memfree = 0,
1236 .alloc_pci_res = hptiop_alloc_pci_res_itl,
1237 .release_pci_res = hptiop_release_pci_res_itl,
1238 .enable_intr = hptiop_enable_intr_itl,
1239 .disable_intr = hptiop_disable_intr_itl,
1240 .get_config = hptiop_get_config_itl,
1241 .set_config = hptiop_set_config_itl,
1242 .iop_intr = hptiop_intr_itl,
1243 .post_msg = hptiop_post_msg_itl,
1244 .post_req = hptiop_post_req_itl,
1245 .do_ioctl = hptiop_do_ioctl_itl,
1246};
1247
1248static struct hptiop_adapter_ops hptiop_mv_ops = {
1249 .iop_wait_ready = hptiop_wait_ready_mv,
1250 .internal_memalloc = hptiop_internal_memalloc_mv,
1251 .internal_memfree = hptiop_internal_memfree_mv,
1252 .alloc_pci_res = hptiop_alloc_pci_res_mv,
1253 .release_pci_res = hptiop_release_pci_res_mv,
1254 .enable_intr = hptiop_enable_intr_mv,
1255 .disable_intr = hptiop_disable_intr_mv,
1256 .get_config = hptiop_get_config_mv,
1257 .set_config = hptiop_set_config_mv,
1258 .iop_intr = hptiop_intr_mv,
1259 .post_msg = hptiop_post_msg_mv,
1260 .post_req = hptiop_post_req_mv,
1261 .do_ioctl = hptiop_do_ioctl_mv,
1262};
1263
1264static driver_t hptiop_pci_driver = {
1265 driver_name,
1266 driver_methods,
1267 sizeof(struct hpt_iop_hba)
1268};
1269
1270DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
29
30#include <sys/param.h>
31#include <sys/types.h>
32#include <sys/cons.h>
33#if (__FreeBSD_version >= 500000)
34#include <sys/time.h>
35#include <sys/systm.h>
36#else
37#include <machine/clock.h>
38#endif
39
40#include <sys/stat.h>
41#include <sys/malloc.h>
42#include <sys/conf.h>
43#include <sys/libkern.h>
44#include <sys/kernel.h>
45
46#if (__FreeBSD_version >= 500000)
47#include <sys/kthread.h>
48#include <sys/mutex.h>
49#include <sys/module.h>
50#endif
51
52#include <sys/eventhandler.h>
53#include <sys/bus.h>
54#include <sys/taskqueue.h>
55#include <sys/ioccom.h>
56
57#include <machine/resource.h>
58#include <machine/bus.h>
59#include <machine/stdarg.h>
60#include <sys/rman.h>
61
62#include <vm/vm.h>
63#include <vm/pmap.h>
64
65#if (__FreeBSD_version >= 500000)
66#include <dev/pci/pcireg.h>
67#include <dev/pci/pcivar.h>
68#else
69#include <pci/pcivar.h>
70#include <pci/pcireg.h>
71#endif
72
73#if (__FreeBSD_version <= 500043)
74#include <sys/devicestat.h>
75#endif
76
77#include <cam/cam.h>
78#include <cam/cam_ccb.h>
79#include <cam/cam_sim.h>
80#include <cam/cam_xpt_sim.h>
81#include <cam/cam_debug.h>
82#include <cam/cam_periph.h>
83#include <cam/scsi/scsi_all.h>
84#include <cam/scsi/scsi_message.h>
85
86#if (__FreeBSD_version < 500043)
87#include <sys/bus_private.h>
88#endif
89
90#include <dev/hptiop/hptiop.h>
91
92static char driver_name[] = "hptiop";
93static char driver_version[] = "v1.3 (010208)";
94
95static devclass_t hptiop_devclass;
96
97static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
98 u_int32_t msg, u_int32_t millisec);
99static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
100 u_int32_t req);
101static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
102static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
103static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
104 struct hpt_iop_ioctl_param *pParams);
105static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
106 struct hpt_iop_ioctl_param *pParams);
107static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
108static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
109static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
110static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
111 struct hpt_iop_request_get_config *config);
112static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
113 struct hpt_iop_request_get_config *config);
114static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
115 struct hpt_iop_request_set_config *config);
116static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
117 struct hpt_iop_request_set_config *config);
118static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
119static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
120static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
121 u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
122static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
123 struct hpt_iop_request_ioctl_command *req,
124 struct hpt_iop_ioctl_param *pParams);
125static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 struct hpt_iop_srb *srb,
127 bus_dma_segment_t *segs, int nsegs);
128static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 struct hpt_iop_srb *srb,
130 bus_dma_segment_t *segs, int nsegs);
131static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
132static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
133static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
134static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
135static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
138static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
139static int hptiop_probe(device_t dev);
140static int hptiop_attach(device_t dev);
141static int hptiop_detach(device_t dev);
142static int hptiop_shutdown(device_t dev);
143static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
144static void hptiop_poll(struct cam_sim *sim);
145static void hptiop_async(void *callback_arg, u_int32_t code,
146 struct cam_path *path, void *arg);
147static void hptiop_pci_intr(void *arg);
148static void hptiop_release_resource(struct hpt_iop_hba *hba);
149static int hptiop_reset_adapter(struct hpt_iop_hba *hba);
150
151static d_open_t hptiop_open;
152static d_close_t hptiop_close;
153static d_ioctl_t hptiop_ioctl;
154
155static struct cdevsw hptiop_cdevsw = {
156 .d_open = hptiop_open,
157 .d_close = hptiop_close,
158 .d_ioctl = hptiop_ioctl,
159 .d_name = driver_name,
160#if __FreeBSD_version>=503000
161 .d_version = D_VERSION,
162#endif
163#if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
164 .d_flags = D_NEEDGIANT,
165#endif
166#if __FreeBSD_version<600034
167#if __FreeBSD_version>=501000
168 .d_maj = MAJOR_AUTO,
169#else
170 .d_maj = HPT_DEV_MAJOR,
171#endif
172#endif
173};
174
175#if __FreeBSD_version < 503000
176#define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
177#else
178#define hba_from_dev(dev) \
179 ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
180#endif
181
182#define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
183 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
184#define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
185 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
186
187#define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
188 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
189#define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
190 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
191#define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
192 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
193#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
194 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
195
196static int hptiop_open(ioctl_dev_t dev, int flags,
197 int devtype, ioctl_thread_t proc)
198{
199 struct hpt_iop_hba *hba = hba_from_dev(dev);
200
201 if (hba==NULL)
202 return ENXIO;
203 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
204 return EBUSY;
205 hba->flag |= HPT_IOCTL_FLAG_OPEN;
206 return 0;
207}
208
209static int hptiop_close(ioctl_dev_t dev, int flags,
210 int devtype, ioctl_thread_t proc)
211{
212 struct hpt_iop_hba *hba = hba_from_dev(dev);
213 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
214 return 0;
215}
216
217static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
218 int flags, ioctl_thread_t proc)
219{
220 int ret = EFAULT;
221 struct hpt_iop_hba *hba = hba_from_dev(dev);
222
223#if (__FreeBSD_version >= 500000)
224 mtx_lock(&Giant);
225#endif
226
227 switch (cmd) {
228 case HPT_DO_IOCONTROL:
229 ret = hba->ops->do_ioctl(hba,
230 (struct hpt_iop_ioctl_param *)data);
231 break;
232 case HPT_SCAN_BUS:
233 ret = hptiop_rescan_bus(hba);
234 break;
235 }
236
237#if (__FreeBSD_version >= 500000)
238 mtx_unlock(&Giant);
239#endif
240
241 return ret;
242}
243
244static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
245{
246 u_int64_t p;
247 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
248 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
249
250 if (outbound_tail != outbound_head) {
251 bus_space_read_region_4(hba->bar2t, hba->bar2h,
252 offsetof(struct hpt_iopmu_mv,
253 outbound_q[outbound_tail]),
254 (u_int32_t *)&p, 2);
255
256 outbound_tail++;
257
258 if (outbound_tail == MVIOP_QUEUE_LEN)
259 outbound_tail = 0;
260
261 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
262 return p;
263 } else
264 return 0;
265}
266
267static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
268{
269 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
270 u_int32_t head = inbound_head + 1;
271
272 if (head == MVIOP_QUEUE_LEN)
273 head = 0;
274
275 bus_space_write_region_4(hba->bar2t, hba->bar2h,
276 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
277 (u_int32_t *)&p, 2);
278 BUS_SPACE_WRT4_MV2(inbound_head, head);
279 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
280}
281
282static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
283{
284 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
285 BUS_SPACE_RD4_ITL(outbound_intstatus);
286}
287
288static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
289{
290
291 BUS_SPACE_WRT4_MV2(inbound_msg, msg);
292 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
293
294 BUS_SPACE_RD4_MV0(outbound_intmask);
295}
296
297static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
298{
299 u_int32_t req=0;
300 int i;
301
302 for (i = 0; i < millisec; i++) {
303 req = BUS_SPACE_RD4_ITL(inbound_queue);
304 if (req != IOPMU_QUEUE_EMPTY)
305 break;
306 DELAY(1000);
307 }
308
309 if (req!=IOPMU_QUEUE_EMPTY) {
310 BUS_SPACE_WRT4_ITL(outbound_queue, req);
311 BUS_SPACE_RD4_ITL(outbound_intstatus);
312 return 0;
313 }
314
315 return -1;
316}
317
318static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
319{
320 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 return -1;
322
323 return 0;
324}
325
326static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 u_int32_t index)
328{
329 struct hpt_iop_srb *srb;
330 struct hpt_iop_request_scsi_command *req=0;
331 union ccb *ccb;
332 u_int8_t *cdb;
333 u_int32_t result, temp, dxfer;
334 u_int64_t temp64;
335
336 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 if (hba->firmware_version > 0x01020000 ||
338 hba->interface_version > 0x01020000) {
339 srb = hba->srb[index & ~(u_int32_t)
340 (IOPMU_QUEUE_ADDR_HOST_BIT
341 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 req = (struct hpt_iop_request_scsi_command *)srb;
343 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 result = IOP_RESULT_SUCCESS;
345 else
346 result = req->header.result;
347 } else {
348 srb = hba->srb[index &
349 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 req = (struct hpt_iop_request_scsi_command *)srb;
351 result = req->header.result;
352 }
353 dxfer = req->dataxfer_length;
354 goto srb_complete;
355 }
356
357 /*iop req*/
358 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 offsetof(struct hpt_iop_request_header, type));
360 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 offsetof(struct hpt_iop_request_header, result));
362 switch(temp) {
363 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364 {
365 temp64 = 0;
366 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 offsetof(struct hpt_iop_request_header, context),
368 (u_int32_t *)&temp64, 2);
369 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 break;
371 }
372
373 case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 offsetof(struct hpt_iop_request_header, context),
376 (u_int32_t *)&temp64, 2);
377 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 index + offsetof(struct hpt_iop_request_scsi_command,
380 dataxfer_length));
381srb_complete:
382 ccb = (union ccb *)srb->ccb;
383 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 cdb = ccb->csio.cdb_io.cdb_ptr;
385 else
386 cdb = ccb->csio.cdb_io.cdb_bytes;
387
388 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 ccb->ccb_h.status = CAM_REQ_CMP;
390 goto scsi_done;
391 }
392
393 switch (result) {
394 case IOP_RESULT_SUCCESS:
395 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 case CAM_DIR_IN:
397 bus_dmamap_sync(hba->io_dmat,
398 srb->dma_map, BUS_DMASYNC_POSTREAD);
399 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 break;
401 case CAM_DIR_OUT:
402 bus_dmamap_sync(hba->io_dmat,
403 srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 break;
406 }
407
408 ccb->ccb_h.status = CAM_REQ_CMP;
409 break;
410
411 case IOP_RESULT_BAD_TARGET:
412 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 break;
414 case IOP_RESULT_BUSY:
415 ccb->ccb_h.status = CAM_BUSY;
416 break;
417 case IOP_RESULT_INVALID_REQUEST:
418 ccb->ccb_h.status = CAM_REQ_INVALID;
419 break;
420 case IOP_RESULT_FAIL:
421 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 break;
423 case IOP_RESULT_RESET:
424 ccb->ccb_h.status = CAM_BUSY;
425 break;
426 case IOP_RESULT_CHECK_CONDITION:
427 memset(&ccb->csio.sense_data, 0,
428 sizeof(ccb->csio.sense_data));
429 if (dxfer < ccb->csio.sense_len)
430 ccb->csio.sense_resid = ccb->csio.sense_len -
431 dxfer;
432 else
433 ccb->csio.sense_resid = 0;
434 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 index + offsetof(struct hpt_iop_request_scsi_command,
437 sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 } else {
440 memcpy(&ccb->csio.sense_data, &req->sg_list,
441 MIN(dxfer, sizeof(ccb->csio.sense_data)));
442 }
443 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 break;
447 default:
448 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 break;
450 }
451scsi_done:
452 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 BUS_SPACE_WRT4_ITL(outbound_queue, index);
454
455 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456
457 hptiop_free_srb(hba, srb);
458 xpt_done(ccb);
459 break;
460 }
461}
462
463static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464{
465 u_int32_t req, temp;
466
467 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 hptiop_request_callback_itl(hba, req);
470 else {
471 struct hpt_iop_request_header *p;
472
473 p = (struct hpt_iop_request_header *)
474 ((char *)hba->u.itl.mu + req);
475 temp = bus_space_read_4(hba->bar0t,
476 hba->bar0h,req +
477 offsetof(struct hpt_iop_request_header,
478 flags));
479 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
480 u_int64_t temp64;
481 bus_space_read_region_4(hba->bar0t,
482 hba->bar0h,req +
483 offsetof(struct hpt_iop_request_header,
484 context),
485 (u_int32_t *)&temp64, 2);
486 if (temp64) {
487 hptiop_request_callback_itl(hba, req);
488 } else {
489 temp64 = 1;
490 bus_space_write_region_4(hba->bar0t,
491 hba->bar0h,req +
492 offsetof(struct hpt_iop_request_header,
493 context),
494 (u_int32_t *)&temp64, 2);
495 }
496 } else
497 hptiop_request_callback_itl(hba, req);
498 }
499 }
500}
501
502static int hptiop_intr_itl(struct hpt_iop_hba * hba)
503{
504 u_int32_t status;
505 int ret = 0;
506
507 status = BUS_SPACE_RD4_ITL(outbound_intstatus);
508
509 if (status & IOPMU_OUTBOUND_INT_MSG0) {
510 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
511 KdPrint(("hptiop: received outbound msg %x\n", msg));
512 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
513 hptiop_os_message_callback(hba, msg);
514 ret = 1;
515 }
516
517 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
518 hptiop_drain_outbound_queue_itl(hba);
519 ret = 1;
520 }
521
522 return ret;
523}
524
525static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
526 u_int64_t _tag)
527{
528 u_int32_t context = (u_int32_t)_tag;
529
530 if (context & MVIOP_CMD_TYPE_SCSI) {
531 struct hpt_iop_srb *srb;
532 struct hpt_iop_request_scsi_command *req;
533 union ccb *ccb;
534 u_int8_t *cdb;
535
536 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
537 req = (struct hpt_iop_request_scsi_command *)srb;
538 ccb = (union ccb *)srb->ccb;
539 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
540 cdb = ccb->csio.cdb_io.cdb_ptr;
541 else
542 cdb = ccb->csio.cdb_io.cdb_bytes;
543
544 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
545 ccb->ccb_h.status = CAM_REQ_CMP;
546 goto scsi_done;
547 }
548 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
549 req->header.result = IOP_RESULT_SUCCESS;
550
551 switch (req->header.result) {
552 case IOP_RESULT_SUCCESS:
553 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
554 case CAM_DIR_IN:
555 bus_dmamap_sync(hba->io_dmat,
556 srb->dma_map, BUS_DMASYNC_POSTREAD);
557 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
558 break;
559 case CAM_DIR_OUT:
560 bus_dmamap_sync(hba->io_dmat,
561 srb->dma_map, BUS_DMASYNC_POSTWRITE);
562 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563 break;
564 }
565 ccb->ccb_h.status = CAM_REQ_CMP;
566 break;
567 case IOP_RESULT_BAD_TARGET:
568 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
569 break;
570 case IOP_RESULT_BUSY:
571 ccb->ccb_h.status = CAM_BUSY;
572 break;
573 case IOP_RESULT_INVALID_REQUEST:
574 ccb->ccb_h.status = CAM_REQ_INVALID;
575 break;
576 case IOP_RESULT_FAIL:
577 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
578 break;
579 case IOP_RESULT_RESET:
580 ccb->ccb_h.status = CAM_BUSY;
581 break;
582 case IOP_RESULT_CHECK_CONDITION:
583 memset(&ccb->csio.sense_data, 0,
584 sizeof(ccb->csio.sense_data));
585 if (req->dataxfer_length < ccb->csio.sense_len)
586 ccb->csio.sense_resid = ccb->csio.sense_len -
587 req->dataxfer_length;
588 else
589 ccb->csio.sense_resid = 0;
590 memcpy(&ccb->csio.sense_data, &req->sg_list,
591 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
592 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
593 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
594 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
595 break;
596 default:
597 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598 break;
599 }
600scsi_done:
601 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
602
603 hptiop_free_srb(hba, srb);
604 xpt_done(ccb);
605 } else if (context & MVIOP_CMD_TYPE_IOCTL) {
606 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
607 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
608 hba->config_done = 1;
609 else
610 hba->config_done = -1;
611 wakeup(req);
612 } else if (context &
613 (MVIOP_CMD_TYPE_SET_CONFIG |
614 MVIOP_CMD_TYPE_GET_CONFIG))
615 hba->config_done = 1;
616 else {
617 device_printf(hba->pcidev, "wrong callback type\n");
618 }
619}
620
621static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
622{
623 u_int64_t req;
624
625 while ((req = hptiop_mv_outbound_read(hba))) {
626 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
627 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
628 hptiop_request_callback_mv(hba, req);
629 }
630 }
631 }
632}
633
634static int hptiop_intr_mv(struct hpt_iop_hba * hba)
635{
636 u_int32_t status;
637 int ret = 0;
638
639 status = BUS_SPACE_RD4_MV0(outbound_doorbell);
640
641 if (status)
642 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
643
644 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
645 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
646 KdPrint(("hptiop: received outbound msg %x\n", msg));
647 hptiop_os_message_callback(hba, msg);
648 ret = 1;
649 }
650
651 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
652 hptiop_drain_outbound_queue_mv(hba);
653 ret = 1;
654 }
655
656 return ret;
657}
658
659static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
660 u_int32_t req32, u_int32_t millisec)
661{
662 u_int32_t i;
663 u_int64_t temp64;
664
665 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
666 BUS_SPACE_RD4_ITL(outbound_intstatus);
667
668 for (i = 0; i < millisec; i++) {
669 hptiop_intr_itl(hba);
670 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
671 offsetof(struct hpt_iop_request_header, context),
672 (u_int32_t *)&temp64, 2);
673 if (temp64)
674 return 0;
675 DELAY(1000);
676 }
677
678 return -1;
679}
680
681static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
682 void *req, u_int32_t millisec)
683{
684 u_int32_t i;
685 u_int64_t phy_addr;
686 hba->config_done = 0;
687
688 phy_addr = hba->ctlcfgcmd_phy |
689 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
690 ((struct hpt_iop_request_get_config *)req)->header.flags |=
691 IOP_REQUEST_FLAG_SYNC_REQUEST |
692 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
693 hptiop_mv_inbound_write(phy_addr, hba);
694 BUS_SPACE_RD4_MV0(outbound_intmask);
695
696 for (i = 0; i < millisec; i++) {
697 hptiop_intr_mv(hba);
698 if (hba->config_done)
699 return 0;
700 DELAY(1000);
701 }
702 return -1;
703}
704
705static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
706 u_int32_t msg, u_int32_t millisec)
707{
708 u_int32_t i;
709
710 hba->msg_done = 0;
711 hba->ops->post_msg(hba, msg);
712
713 for (i=0; i<millisec; i++) {
714 hba->ops->iop_intr(hba);
715 if (hba->msg_done)
716 break;
717 DELAY(1000);
718 }
719
720 return hba->msg_done? 0 : -1;
721}
722
723static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
724 struct hpt_iop_request_get_config * config)
725{
726 u_int32_t req32;
727
728 config->header.size = sizeof(struct hpt_iop_request_get_config);
729 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
730 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
731 config->header.result = IOP_RESULT_PENDING;
732 config->header.context = 0;
733
734 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
735 if (req32 == IOPMU_QUEUE_EMPTY)
736 return -1;
737
738 bus_space_write_region_4(hba->bar0t, hba->bar0h,
739 req32, (u_int32_t *)config,
740 sizeof(struct hpt_iop_request_header) >> 2);
741
742 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
743 KdPrint(("hptiop: get config send cmd failed"));
744 return -1;
745 }
746
747 bus_space_read_region_4(hba->bar0t, hba->bar0h,
748 req32, (u_int32_t *)config,
749 sizeof(struct hpt_iop_request_get_config) >> 2);
750
751 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
752
753 return 0;
754}
755
756static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
757 struct hpt_iop_request_get_config * config)
758{
759 struct hpt_iop_request_get_config *req;
760
761 if (!(req = hba->ctlcfg_ptr))
762 return -1;
763
764 req->header.flags = 0;
765 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
766 req->header.size = sizeof(struct hpt_iop_request_get_config);
767 req->header.result = IOP_RESULT_PENDING;
768 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
769
770 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
771 KdPrint(("hptiop: get config send cmd failed"));
772 return -1;
773 }
774
775 *config = *req;
776 return 0;
777}
778
779static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
780 struct hpt_iop_request_set_config *config)
781{
782 u_int32_t req32;
783
784 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
785
786 if (req32 == IOPMU_QUEUE_EMPTY)
787 return -1;
788
789 config->header.size = sizeof(struct hpt_iop_request_set_config);
790 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
791 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
792 config->header.result = IOP_RESULT_PENDING;
793 config->header.context = 0;
794
795 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
796 (u_int32_t *)config,
797 sizeof(struct hpt_iop_request_set_config) >> 2);
798
799 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
800 KdPrint(("hptiop: set config send cmd failed"));
801 return -1;
802 }
803
804 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
805
806 return 0;
807}
808
809static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
810 struct hpt_iop_request_set_config *config)
811{
812 struct hpt_iop_request_set_config *req;
813
814 if (!(req = hba->ctlcfg_ptr))
815 return -1;
816
817 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
818 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
819 sizeof(struct hpt_iop_request_set_config) -
820 sizeof(struct hpt_iop_request_header));
821
822 req->header.flags = 0;
823 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
824 req->header.size = sizeof(struct hpt_iop_request_set_config);
825 req->header.result = IOP_RESULT_PENDING;
826 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
827
828 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
829 KdPrint(("hptiop: set config send cmd failed"));
830 return -1;
831 }
832
833 return 0;
834}
835
836static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
837 u_int32_t req32,
838 struct hpt_iop_ioctl_param *pParams)
839{
840 u_int64_t temp64;
841 struct hpt_iop_request_ioctl_command req;
842
843 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
844 (hba->max_request_size -
845 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
846 device_printf(hba->pcidev, "request size beyond max value");
847 return -1;
848 }
849
850 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
851 + pParams->nInBufferSize;
852 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
853 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
854 req.header.result = IOP_RESULT_PENDING;
855 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
856 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
857 req.inbuf_size = pParams->nInBufferSize;
858 req.outbuf_size = pParams->nOutBufferSize;
859 req.bytes_returned = 0;
860
861 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
862 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
863
864 hptiop_lock_adapter(hba);
865
866 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
867 BUS_SPACE_RD4_ITL(outbound_intstatus);
868
869 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
870 offsetof(struct hpt_iop_request_ioctl_command, header.context),
871 (u_int32_t *)&temp64, 2);
872 while (temp64) {
873 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
874 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
875 break;
876 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
877 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
878 offsetof(struct hpt_iop_request_ioctl_command,
879 header.context),
880 (u_int32_t *)&temp64, 2);
881 }
882
883 hptiop_unlock_adapter(hba);
884 return 0;
885}
886
887static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
888{
889 unsigned char byte;
890 int i;
891
892 for (i=0; i<size; i++) {
893 if (copyin((u_int8_t *)user + i, &byte, 1))
894 return -1;
895 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
896 }
897
898 return 0;
899}
900
901static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
902{
903 unsigned char byte;
904 int i;
905
906 for (i=0; i<size; i++) {
907 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
908 if (copyout(&byte, (u_int8_t *)user + i, 1))
909 return -1;
910 }
911
912 return 0;
913}
914
915static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
916 struct hpt_iop_ioctl_param * pParams)
917{
918 u_int32_t req32;
919 u_int32_t result;
920
921 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
922 (pParams->Magic != HPT_IOCTL_MAGIC32))
923 return EFAULT;
924
925 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
926 if (req32 == IOPMU_QUEUE_EMPTY)
927 return EFAULT;
928
929 if (pParams->nInBufferSize)
930 if (hptiop_bus_space_copyin(hba, req32 +
931 offsetof(struct hpt_iop_request_ioctl_command, buf),
932 (void *)pParams->lpInBuffer, pParams->nInBufferSize))
933 goto invalid;
934
935 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
936 goto invalid;
937
938 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
939 offsetof(struct hpt_iop_request_ioctl_command,
940 header.result));
941
942 if (result == IOP_RESULT_SUCCESS) {
943 if (pParams->nOutBufferSize)
944 if (hptiop_bus_space_copyout(hba, req32 +
945 offsetof(struct hpt_iop_request_ioctl_command, buf) +
946 ((pParams->nInBufferSize + 3) & ~3),
947 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
948 goto invalid;
949
950 if (pParams->lpBytesReturned) {
951 if (hptiop_bus_space_copyout(hba, req32 +
952 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
953 (void *)pParams->lpBytesReturned, sizeof(unsigned long)))
954 goto invalid;
955 }
956
957 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
958
959 return 0;
960 } else{
961invalid:
962 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
963
964 return EFAULT;
965 }
966}
967
968static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
969 struct hpt_iop_request_ioctl_command *req,
970 struct hpt_iop_ioctl_param *pParams)
971{
972 u_int64_t req_phy;
973 int size = 0;
974
975 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
976 (hba->max_request_size -
977 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
978 device_printf(hba->pcidev, "request size beyond max value");
979 return -1;
980 }
981
982 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
983 req->inbuf_size = pParams->nInBufferSize;
984 req->outbuf_size = pParams->nOutBufferSize;
985 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
986 + pParams->nInBufferSize;
987 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
988 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
989 req->header.result = IOP_RESULT_PENDING;
990 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
991 size = req->header.size >> 8;
992 size = size > 3 ? 3 : size;
993 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
994 hptiop_mv_inbound_write(req_phy, hba);
995
996 BUS_SPACE_RD4_MV0(outbound_intmask);
997
998 while (hba->config_done == 0) {
999 if (hptiop_sleep(hba, req, PPAUSE,
1000 "hptctl", HPT_OSM_TIMEOUT)==0)
1001 continue;
1002 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1003 }
1004 return 0;
1005}
1006
1007static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1008 struct hpt_iop_ioctl_param *pParams)
1009{
1010 struct hpt_iop_request_ioctl_command *req;
1011
1012 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1013 (pParams->Magic != HPT_IOCTL_MAGIC32))
1014 return EFAULT;
1015
1016 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1017 hba->config_done = 0;
1018 hptiop_lock_adapter(hba);
1019 if (pParams->nInBufferSize)
1020 if (copyin((void *)pParams->lpInBuffer,
1021 req->buf, pParams->nInBufferSize))
1022 goto invalid;
1023 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1024 goto invalid;
1025
1026 if (hba->config_done == 1) {
1027 if (pParams->nOutBufferSize)
1028 if (copyout(req->buf +
1029 ((pParams->nInBufferSize + 3) & ~3),
1030 (void *)pParams->lpOutBuffer,
1031 pParams->nOutBufferSize))
1032 goto invalid;
1033
1034 if (pParams->lpBytesReturned)
1035 if (copyout(&req->bytes_returned,
1036 (void*)pParams->lpBytesReturned,
1037 sizeof(u_int32_t)))
1038 goto invalid;
1039 hptiop_unlock_adapter(hba);
1040 return 0;
1041 } else{
1042invalid:
1043 hptiop_unlock_adapter(hba);
1044 return EFAULT;
1045 }
1046}
1047
1048static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1049{
1050 union ccb *ccb;
1051
1052 if ((ccb = xpt_alloc_ccb()) == NULL)
1053 return(ENOMEM);
1054 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1055 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1056 xpt_free_ccb(ccb);
1057 return(EIO);
1058 }
1059 xpt_rescan(ccb);
1060 return(0);
1061}
1062
1063static bus_dmamap_callback_t hptiop_map_srb;
1064static bus_dmamap_callback_t hptiop_post_scsi_command;
1065static bus_dmamap_callback_t hptiop_mv_map_ctlcfg;
1066
1067static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1068{
1069 hba->bar0_rid = 0x10;
1070 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1071 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1072
1073 if (hba->bar0_res == NULL) {
1074 device_printf(hba->pcidev,
1075 "failed to get iop base adrress.\n");
1076 return -1;
1077 }
1078 hba->bar0t = rman_get_bustag(hba->bar0_res);
1079 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1080 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1081 rman_get_virtual(hba->bar0_res);
1082
1083 if (!hba->u.itl.mu) {
1084 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1085 hba->bar0_rid, hba->bar0_res);
1086 device_printf(hba->pcidev, "alloc mem res failed\n");
1087 return -1;
1088 }
1089
1090 return 0;
1091}
1092
1093static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1094{
1095 hba->bar0_rid = 0x10;
1096 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1097 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1098
1099 if (hba->bar0_res == NULL) {
1100 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1101 return -1;
1102 }
1103 hba->bar0t = rman_get_bustag(hba->bar0_res);
1104 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1105 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1106 rman_get_virtual(hba->bar0_res);
1107
1108 if (!hba->u.mv.regs) {
1109 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1110 hba->bar0_rid, hba->bar0_res);
1111 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1112 return -1;
1113 }
1114
1115 hba->bar2_rid = 0x18;
1116 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1117 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1118
1119 if (hba->bar2_res == NULL) {
1120 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1121 hba->bar0_rid, hba->bar0_res);
1122 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1123 return -1;
1124 }
1125
1126 hba->bar2t = rman_get_bustag(hba->bar2_res);
1127 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1128 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1129
1130 if (!hba->u.mv.mu) {
1131 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1132 hba->bar0_rid, hba->bar0_res);
1133 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1134 hba->bar2_rid, hba->bar2_res);
1135 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1136 return -1;
1137 }
1138
1139 return 0;
1140}
1141
1142static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1143{
1144 if (hba->bar0_res)
1145 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1146 hba->bar0_rid, hba->bar0_res);
1147}
1148
1149static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1150{
1151 if (hba->bar0_res)
1152 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1153 hba->bar0_rid, hba->bar0_res);
1154 if (hba->bar2_res)
1155 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1156 hba->bar2_rid, hba->bar2_res);
1157}
1158
1159static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1160{
1161 if (bus_dma_tag_create(hba->parent_dmat,
1162 1,
1163 0,
1164 BUS_SPACE_MAXADDR_32BIT,
1165 BUS_SPACE_MAXADDR,
1166 NULL, NULL,
1167 0x800 - 0x8,
1168 1,
1169 BUS_SPACE_MAXSIZE_32BIT,
1170 BUS_DMA_ALLOCNOW,
1171#if __FreeBSD_version > 502000
1172 NULL,
1173 NULL,
1174#endif
1175 &hba->ctlcfg_dmat)) {
1176 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1177 return -1;
1178 }
1179
1180 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1181#if __FreeBSD_version>501000
1182 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1183#else
1184 BUS_DMA_WAITOK,
1185#endif
1186 &hba->ctlcfg_dmamap) != 0) {
1187 device_printf(hba->pcidev,
1188 "bus_dmamem_alloc failed!\n");
1189 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1190 return -1;
1191 }
1192
1193 if (bus_dmamap_load(hba->ctlcfg_dmat,
1194 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1195 MVIOP_IOCTLCFG_SIZE,
1196 hptiop_mv_map_ctlcfg, hba, 0)) {
1197 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1198 if (hba->ctlcfg_dmat)
1199 bus_dmamem_free(hba->ctlcfg_dmat,
1200 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1201 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1202 return -1;
1203 }
1204
1205 return 0;
1206}
1207
1208static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1209{
1210 if (hba->ctlcfg_dmat) {
1211 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1212 bus_dmamem_free(hba->ctlcfg_dmat,
1213 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1214 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1215 }
1216
1217 return 0;
1218}
1219
1220/*
1221 * CAM driver interface
1222 */
1223static device_method_t driver_methods[] = {
1224 /* Device interface */
1225 DEVMETHOD(device_probe, hptiop_probe),
1226 DEVMETHOD(device_attach, hptiop_attach),
1227 DEVMETHOD(device_detach, hptiop_detach),
1228 DEVMETHOD(device_shutdown, hptiop_shutdown),
1229 { 0, 0 }
1230};
1231
1232static struct hptiop_adapter_ops hptiop_itl_ops = {
1233 .iop_wait_ready = hptiop_wait_ready_itl,
1234 .internal_memalloc = 0,
1235 .internal_memfree = 0,
1236 .alloc_pci_res = hptiop_alloc_pci_res_itl,
1237 .release_pci_res = hptiop_release_pci_res_itl,
1238 .enable_intr = hptiop_enable_intr_itl,
1239 .disable_intr = hptiop_disable_intr_itl,
1240 .get_config = hptiop_get_config_itl,
1241 .set_config = hptiop_set_config_itl,
1242 .iop_intr = hptiop_intr_itl,
1243 .post_msg = hptiop_post_msg_itl,
1244 .post_req = hptiop_post_req_itl,
1245 .do_ioctl = hptiop_do_ioctl_itl,
1246};
1247
1248static struct hptiop_adapter_ops hptiop_mv_ops = {
1249 .iop_wait_ready = hptiop_wait_ready_mv,
1250 .internal_memalloc = hptiop_internal_memalloc_mv,
1251 .internal_memfree = hptiop_internal_memfree_mv,
1252 .alloc_pci_res = hptiop_alloc_pci_res_mv,
1253 .release_pci_res = hptiop_release_pci_res_mv,
1254 .enable_intr = hptiop_enable_intr_mv,
1255 .disable_intr = hptiop_disable_intr_mv,
1256 .get_config = hptiop_get_config_mv,
1257 .set_config = hptiop_set_config_mv,
1258 .iop_intr = hptiop_intr_mv,
1259 .post_msg = hptiop_post_msg_mv,
1260 .post_req = hptiop_post_req_mv,
1261 .do_ioctl = hptiop_do_ioctl_mv,
1262};
1263
1264static driver_t hptiop_pci_driver = {
1265 driver_name,
1266 driver_methods,
1267 sizeof(struct hpt_iop_hba)
1268};
1269
1270DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1271MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1271
1272static int hptiop_probe(device_t dev)
1273{
1274 struct hpt_iop_hba *hba;
1275 u_int32_t id;
1276 static char buf[256];
1277 int sas = 0;
1278 struct hptiop_adapter_ops *ops;
1279
1280 if (pci_get_vendor(dev) != 0x1103)
1281 return (ENXIO);
1282
1283 id = pci_get_device(dev);
1284
1285 switch (id) {
1286 case 0x4322:
1287 case 0x4321:
1288 case 0x4320:
1289 sas = 1;
1290 case 0x3220:
1291 case 0x3320:
1292 case 0x3410:
1293 case 0x3520:
1294 case 0x3510:
1295 case 0x3511:
1296 case 0x3521:
1297 case 0x3522:
1298 case 0x3540:
1299 ops = &hptiop_itl_ops;
1300 break;
1301 case 0x3120:
1302 case 0x3122:
1303 case 0x3020:
1304 ops = &hptiop_mv_ops;
1305 break;
1306 default:
1307 return (ENXIO);
1308 }
1309
1310 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1311 pci_get_bus(dev), pci_get_slot(dev),
1312 pci_get_function(dev), pci_get_irq(dev));
1313
1314 sprintf(buf, "RocketRAID %x %s Controller\n",
1315 id, sas ? "SAS" : "SATA");
1316 device_set_desc_copy(dev, buf);
1317
1318 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1319 bzero(hba, sizeof(struct hpt_iop_hba));
1320 hba->ops = ops;
1321
1322 KdPrint(("hba->ops=%p\n", hba->ops));
1323 return 0;
1324}
1325
1326static int hptiop_attach(device_t dev)
1327{
1328 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1329 struct hpt_iop_request_get_config iop_config;
1330 struct hpt_iop_request_set_config set_config;
1331 int rid = 0;
1332 struct cam_devq *devq;
1333 struct ccb_setasync ccb;
1334 u_int32_t unit = device_get_unit(dev);
1335
1336 device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1337 unit, driver_version);
1338
1339 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1340 pci_get_bus(dev), pci_get_slot(dev),
1341 pci_get_function(dev), hba->ops));
1342
1343#if __FreeBSD_version >=440000
1344 pci_enable_busmaster(dev);
1345#endif
1346 hba->pcidev = dev;
1347 hba->pciunit = unit;
1348
1349 if (hba->ops->alloc_pci_res(hba))
1350 return ENXIO;
1351
1352 if (hba->ops->iop_wait_ready(hba, 2000)) {
1353 device_printf(dev, "adapter is not ready\n");
1354 goto release_pci_res;
1355 }
1356
1357#if (__FreeBSD_version >= 500000)
1358 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1359#endif
1360
1361 if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1362 1, /* alignment */
1363 0, /* boundary */
1364 BUS_SPACE_MAXADDR, /* lowaddr */
1365 BUS_SPACE_MAXADDR, /* highaddr */
1366 NULL, NULL, /* filter, filterarg */
1367 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1368 BUS_SPACE_UNRESTRICTED, /* nsegments */
1369 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1370 0, /* flags */
1371#if __FreeBSD_version>502000
1372 NULL, /* lockfunc */
1373 NULL, /* lockfuncarg */
1374#endif
1375 &hba->parent_dmat /* tag */))
1376 {
1377 device_printf(dev, "alloc parent_dmat failed\n");
1378 goto release_pci_res;
1379 }
1380
1381 if (hba->ops->internal_memalloc) {
1382 if (hba->ops->internal_memalloc(hba)) {
1383 device_printf(dev, "alloc srb_dmat failed\n");
1384 goto destroy_parent_tag;
1385 }
1386 }
1387
1388 if (hba->ops->get_config(hba, &iop_config)) {
1389 device_printf(dev, "get iop config failed.\n");
1390 goto get_config_failed;
1391 }
1392
1393 hba->firmware_version = iop_config.firmware_version;
1394 hba->interface_version = iop_config.interface_version;
1395 hba->max_requests = iop_config.max_requests;
1396 hba->max_devices = iop_config.max_devices;
1397 hba->max_request_size = iop_config.request_size;
1398 hba->max_sg_count = iop_config.max_sg_count;
1399
1400 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1401 4, /* alignment */
1402 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1403 BUS_SPACE_MAXADDR, /* lowaddr */
1404 BUS_SPACE_MAXADDR, /* highaddr */
1405 NULL, NULL, /* filter, filterarg */
1406 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1407 hba->max_sg_count, /* nsegments */
1408 0x20000, /* maxsegsize */
1409 BUS_DMA_ALLOCNOW, /* flags */
1410#if __FreeBSD_version>502000
1411 busdma_lock_mutex, /* lockfunc */
1412 &hba->lock, /* lockfuncarg */
1413#endif
1414 &hba->io_dmat /* tag */))
1415 {
1416 device_printf(dev, "alloc io_dmat failed\n");
1417 goto get_config_failed;
1418 }
1419
1420 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1421 1, /* alignment */
1422 0, /* boundary */
1423 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1424 BUS_SPACE_MAXADDR, /* highaddr */
1425 NULL, NULL, /* filter, filterarg */
1426 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1427 1, /* nsegments */
1428 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1429 0, /* flags */
1430#if __FreeBSD_version>502000
1431 NULL, /* lockfunc */
1432 NULL, /* lockfuncarg */
1433#endif
1434 &hba->srb_dmat /* tag */))
1435 {
1436 device_printf(dev, "alloc srb_dmat failed\n");
1437 goto destroy_io_dmat;
1438 }
1439
1440 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1441#if __FreeBSD_version>501000
1442 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1443#else
1444 BUS_DMA_WAITOK,
1445#endif
1446 &hba->srb_dmamap) != 0)
1447 {
1448 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1449 goto destroy_srb_dmat;
1450 }
1451
1452 if (bus_dmamap_load(hba->srb_dmat,
1453 hba->srb_dmamap, hba->uncached_ptr,
1454 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1455 hptiop_map_srb, hba, 0))
1456 {
1457 device_printf(dev, "bus_dmamap_load failed!\n");
1458 goto srb_dmamem_free;
1459 }
1460
1461 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1462 device_printf(dev, "cam_simq_alloc failed\n");
1463 goto srb_dmamap_unload;
1464 }
1465
1466#if __FreeBSD_version <700000
1467 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1468 hba, unit, hba->max_requests - 1, 1, devq);
1469#else
1470 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1471 hba, unit, &Giant, hba->max_requests - 1, 1, devq);
1472#endif
1473 if (!hba->sim) {
1474 device_printf(dev, "cam_sim_alloc failed\n");
1475 cam_simq_free(devq);
1476 goto srb_dmamap_unload;
1477 }
1478#if __FreeBSD_version <700000
1479 if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
1480#else
1481 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
1482#endif
1483 {
1484 device_printf(dev, "xpt_bus_register failed\n");
1485 goto free_cam_sim;
1486 }
1487
1488 if (xpt_create_path(&hba->path, /*periph */ NULL,
1489 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
1490 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1491 device_printf(dev, "xpt_create_path failed\n");
1492 goto deregister_xpt_bus;
1493 }
1494
1495 bzero(&set_config, sizeof(set_config));
1496 set_config.iop_id = unit;
1497 set_config.vbus_id = cam_sim_path(hba->sim);
1498 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
1499
1500 if (hba->ops->set_config(hba, &set_config)) {
1501 device_printf(dev, "set iop config failed.\n");
1502 goto free_hba_path;
1503 }
1504
1505 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1506 ccb.ccb_h.func_code = XPT_SASYNC_CB;
1507 ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
1508 ccb.callback = hptiop_async;
1509 ccb.callback_arg = hba->sim;
1510 xpt_action((union ccb *)&ccb);
1511
1512 rid = 0;
1513 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
1514 &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1515 device_printf(dev, "allocate irq failed!\n");
1516 goto free_hba_path;
1517 }
1518
1519#if __FreeBSD_version <700000
1520 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1521 hptiop_pci_intr, hba, &hba->irq_handle))
1522#else
1523 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1524 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
1525#endif
1526 {
1527 device_printf(dev, "allocate intr function failed!\n");
1528 goto free_irq_resource;
1529 }
1530
1531 if (hptiop_send_sync_msg(hba,
1532 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1533 device_printf(dev, "fail to start background task\n");
1534 goto teartown_irq_resource;
1535 }
1536
1537 hba->ops->enable_intr(hba);
1538
1539 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
1540 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
1541 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
1542
1543#if __FreeBSD_version < 503000
1544 hba->ioctl_dev->si_drv1 = hba;
1545#endif
1546
1547 return 0;
1548
1549
1550teartown_irq_resource:
1551 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
1552
1553free_irq_resource:
1554 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
1555
1556free_hba_path:
1557 xpt_free_path(hba->path);
1558
1559deregister_xpt_bus:
1560 xpt_bus_deregister(cam_sim_path(hba->sim));
1561
1562free_cam_sim:
1563 cam_sim_free(hba->sim, /*free devq*/ TRUE);
1564
1565srb_dmamap_unload:
1566 if (hba->uncached_ptr)
1567 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1568
1569srb_dmamem_free:
1570 if (hba->uncached_ptr)
1571 bus_dmamem_free(hba->srb_dmat,
1572 hba->uncached_ptr, hba->srb_dmamap);
1573
1574destroy_srb_dmat:
1575 if (hba->srb_dmat)
1576 bus_dma_tag_destroy(hba->srb_dmat);
1577
1578destroy_io_dmat:
1579 if (hba->io_dmat)
1580 bus_dma_tag_destroy(hba->io_dmat);
1581
1582get_config_failed:
1583 if (hba->ops->internal_memfree)
1584 hba->ops->internal_memfree(hba);
1585
1586destroy_parent_tag:
1587 if (hba->parent_dmat)
1588 bus_dma_tag_destroy(hba->parent_dmat);
1589
1590release_pci_res:
1591 if (hba->ops->release_pci_res)
1592 hba->ops->release_pci_res(hba);
1593
1594 return ENXIO;
1595}
1596
1597static int hptiop_detach(device_t dev)
1598{
1599 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1600 int i;
1601 int error = EBUSY;
1602
1603 hptiop_lock_adapter(hba);
1604 for (i = 0; i < hba->max_devices; i++)
1605 if (hptiop_os_query_remove_device(hba, i)) {
1606 device_printf(dev, "%d file system is busy. id=%d",
1607 hba->pciunit, i);
1608 goto out;
1609 }
1610
1611 if ((error = hptiop_shutdown(dev)) != 0)
1612 goto out;
1613 if (hptiop_send_sync_msg(hba,
1614 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
1615 goto out;
1616
1617 hptiop_release_resource(hba);
1618 error = 0;
1619out:
1620 hptiop_unlock_adapter(hba);
1621 return error;
1622}
1623
1624static int hptiop_shutdown(device_t dev)
1625{
1626 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1627
1628 int error = 0;
1629
1630 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
1631 device_printf(dev, "%d device is busy", hba->pciunit);
1632 return EBUSY;
1633 }
1634
1635 hba->ops->disable_intr(hba);
1636
1637 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1638 error = EBUSY;
1639
1640 return error;
1641}
1642
1643static void hptiop_pci_intr(void *arg)
1644{
1645 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1646 hptiop_lock_adapter(hba);
1647 hba->ops->iop_intr(hba);
1648 hptiop_unlock_adapter(hba);
1649}
1650
1651static void hptiop_poll(struct cam_sim *sim)
1652{
1653 hptiop_pci_intr(cam_sim_softc(sim));
1654}
1655
1656static void hptiop_async(void * callback_arg, u_int32_t code,
1657 struct cam_path * path, void * arg)
1658{
1659}
1660
1661static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
1662{
1663 BUS_SPACE_WRT4_ITL(outbound_intmask,
1664 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
1665}
1666
1667static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
1668{
1669 u_int32_t int_mask;
1670
1671 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1672
1673 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1674 | MVIOP_MU_OUTBOUND_INT_MSG;
1675 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1676}
1677
1678static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
1679{
1680 u_int32_t int_mask;
1681
1682 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
1683
1684 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
1685 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
1686 BUS_SPACE_RD4_ITL(outbound_intstatus);
1687}
1688
1689static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
1690{
1691 u_int32_t int_mask;
1692 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1693
1694 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
1695 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
1696 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1697 BUS_SPACE_RD4_MV0(outbound_intmask);
1698}
1699
1700static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
1701{
1702 return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1703}
1704
1705static void *hptiop_get_srb(struct hpt_iop_hba * hba)
1706{
1707 struct hpt_iop_srb * srb;
1708
1709 if (hba->srb_list) {
1710 srb = hba->srb_list;
1711 hba->srb_list = srb->next;
1712 return srb;
1713 }
1714
1715 return NULL;
1716}
1717
1718static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
1719{
1720 srb->next = hba->srb_list;
1721 hba->srb_list = srb;
1722}
1723
1724static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
1725{
1726 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
1727 struct hpt_iop_srb * srb;
1728
1729 switch (ccb->ccb_h.func_code) {
1730
1731 case XPT_SCSI_IO:
1732 hptiop_lock_adapter(hba);
1733 if (ccb->ccb_h.target_lun != 0 ||
1734 ccb->ccb_h.target_id >= hba->max_devices ||
1735 (ccb->ccb_h.flags & CAM_CDB_PHYS))
1736 {
1737 ccb->ccb_h.status = CAM_TID_INVALID;
1738 xpt_done(ccb);
1739 goto scsi_done;
1740 }
1741
1742 if ((srb = hptiop_get_srb(hba)) == NULL) {
1743 device_printf(hba->pcidev, "srb allocated failed");
1744 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1745 xpt_done(ccb);
1746 goto scsi_done;
1747 }
1748
1749 srb->ccb = ccb;
1750
1751 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1752 hptiop_post_scsi_command(srb, NULL, 0, 0);
1753 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1754 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1755 int error;
1756
1757 error = bus_dmamap_load(hba->io_dmat,
1758 srb->dma_map,
1759 ccb->csio.data_ptr,
1760 ccb->csio.dxfer_len,
1761 hptiop_post_scsi_command,
1762 srb, 0);
1763
1764 if (error && error != EINPROGRESS) {
1765 device_printf(hba->pcidev,
1766 "%d bus_dmamap_load error %d",
1767 hba->pciunit, error);
1768 xpt_freeze_simq(hba->sim, 1);
1769 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1770invalid:
1771 hptiop_free_srb(hba, srb);
1772 xpt_done(ccb);
1773 goto scsi_done;
1774 }
1775 }
1776 else {
1777 device_printf(hba->pcidev,
1778 "CAM_DATA_PHYS not supported");
1779 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1780 goto invalid;
1781 }
1782 }
1783 else {
1784 struct bus_dma_segment *segs;
1785
1786 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
1787 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1788 device_printf(hba->pcidev, "SCSI cmd failed");
1789 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
1790 goto invalid;
1791 }
1792
1793 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1794 hptiop_post_scsi_command(srb, segs,
1795 ccb->csio.sglist_cnt, 0);
1796 }
1797
1798scsi_done:
1799 hptiop_unlock_adapter(hba);
1800 return;
1801
1802 case XPT_RESET_BUS:
1803 device_printf(hba->pcidev, "reset adapter");
1804 hptiop_lock_adapter(hba);
1805 hba->msg_done = 0;
1806 hptiop_reset_adapter(hba);
1807 hptiop_unlock_adapter(hba);
1808 break;
1809
1810 case XPT_GET_TRAN_SETTINGS:
1811 case XPT_SET_TRAN_SETTINGS:
1812 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1813 break;
1814
1815 case XPT_CALC_GEOMETRY:
1816#if __FreeBSD_version >= 500000
1817 cam_calc_geometry(&ccb->ccg, 1);
1818#else
1819 ccb->ccg.heads = 255;
1820 ccb->ccg.secs_per_track = 63;
1821 ccb->ccg.cylinders = ccb->ccg.volume_size /
1822 (ccb->ccg.heads * ccb->ccg.secs_per_track);
1823 ccb->ccb_h.status = CAM_REQ_CMP;
1824#endif
1825 break;
1826
1827 case XPT_PATH_INQ:
1828 {
1829 struct ccb_pathinq *cpi = &ccb->cpi;
1830
1831 cpi->version_num = 1;
1832 cpi->hba_inquiry = PI_SDTR_ABLE;
1833 cpi->target_sprt = 0;
1834 cpi->hba_misc = PIM_NOBUSRESET;
1835 cpi->hba_eng_cnt = 0;
1836 cpi->max_target = hba->max_devices;
1837 cpi->max_lun = 0;
1838 cpi->unit_number = cam_sim_unit(sim);
1839 cpi->bus_id = cam_sim_bus(sim);
1840 cpi->initiator_id = hba->max_devices;
1841 cpi->base_transfer_speed = 3300;
1842
1843 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1844 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
1845 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1846 cpi->transport = XPORT_SPI;
1847 cpi->transport_version = 2;
1848 cpi->protocol = PROTO_SCSI;
1849 cpi->protocol_version = SCSI_REV_2;
1850 cpi->ccb_h.status = CAM_REQ_CMP;
1851 break;
1852 }
1853
1854 default:
1855 ccb->ccb_h.status = CAM_REQ_INVALID;
1856 break;
1857 }
1858
1859 xpt_done(ccb);
1860 return;
1861}
1862
1863static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
1864 struct hpt_iop_srb *srb,
1865 bus_dma_segment_t *segs, int nsegs)
1866{
1867 int idx;
1868 union ccb *ccb = srb->ccb;
1869 u_int8_t *cdb;
1870
1871 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1872 cdb = ccb->csio.cdb_io.cdb_ptr;
1873 else
1874 cdb = ccb->csio.cdb_io.cdb_bytes;
1875
1876 KdPrint(("ccb=%p %x-%x-%x\n",
1877 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
1878
1879 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1880 u_int32_t iop_req32;
1881 struct hpt_iop_request_scsi_command req;
1882
1883 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1884
1885 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
1886 device_printf(hba->pcidev, "invaild req offset\n");
1887 ccb->ccb_h.status = CAM_BUSY;
1888 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1889 hptiop_free_srb(hba, srb);
1890 xpt_done(ccb);
1891 return;
1892 }
1893
1894 if (ccb->csio.dxfer_len && nsegs > 0) {
1895 struct hpt_iopsg *psg = req.sg_list;
1896 for (idx = 0; idx < nsegs; idx++, psg++) {
1897 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1898 psg->size = segs[idx].ds_len;
1899 psg->eot = 0;
1900 }
1901 psg[-1].eot = 1;
1902 }
1903
1904 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
1905
1906 req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1907 + nsegs*sizeof(struct hpt_iopsg);
1908 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1909 req.header.flags = 0;
1910 req.header.result = IOP_RESULT_PENDING;
1911 req.header.context = (u_int64_t)(unsigned long)srb;
1912 req.dataxfer_length = ccb->csio.dxfer_len;
1913 req.channel = 0;
1914 req.target = ccb->ccb_h.target_id;
1915 req.lun = ccb->ccb_h.target_lun;
1916
1917 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
1918 (u_int8_t *)&req, req.header.size);
1919
1920 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1921 bus_dmamap_sync(hba->io_dmat,
1922 srb->dma_map, BUS_DMASYNC_PREREAD);
1923 }
1924 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1925 bus_dmamap_sync(hba->io_dmat,
1926 srb->dma_map, BUS_DMASYNC_PREWRITE);
1927
1928 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
1929 } else {
1930 struct hpt_iop_request_scsi_command *req;
1931
1932 req = (struct hpt_iop_request_scsi_command *)srb;
1933 if (ccb->csio.dxfer_len && nsegs > 0) {
1934 struct hpt_iopsg *psg = req->sg_list;
1935 for (idx = 0; idx < nsegs; idx++, psg++) {
1936 psg->pci_address =
1937 (u_int64_t)segs[idx].ds_addr;
1938 psg->size = segs[idx].ds_len;
1939 psg->eot = 0;
1940 }
1941 psg[-1].eot = 1;
1942 }
1943
1944 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1945
1946 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1947 req->header.result = IOP_RESULT_PENDING;
1948 req->dataxfer_length = ccb->csio.dxfer_len;
1949 req->channel = 0;
1950 req->target = ccb->ccb_h.target_id;
1951 req->lun = ccb->ccb_h.target_lun;
1952 req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1953 + nsegs*sizeof(struct hpt_iopsg);
1954 req->header.context = (u_int64_t)srb->index |
1955 IOPMU_QUEUE_ADDR_HOST_BIT;
1956 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1957
1958 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1959 bus_dmamap_sync(hba->io_dmat,
1960 srb->dma_map, BUS_DMASYNC_PREREAD);
1961 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1962 bus_dmamap_sync(hba->io_dmat,
1963 srb->dma_map, BUS_DMASYNC_PREWRITE);
1964 }
1965
1966 if (hba->firmware_version > 0x01020000
1967 || hba->interface_version > 0x01020000) {
1968 u_int32_t size_bits;
1969
1970 if (req->header.size < 256)
1971 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1972 else if (req->header.size < 512)
1973 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1974 else
1975 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
1976 | IOPMU_QUEUE_ADDR_HOST_BIT;
1977
1978 BUS_SPACE_WRT4_ITL(inbound_queue,
1979 (u_int32_t)srb->phy_addr | size_bits);
1980 } else
1981 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
1982 |IOPMU_QUEUE_ADDR_HOST_BIT);
1983 }
1984}
1985
1986static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
1987 struct hpt_iop_srb *srb,
1988 bus_dma_segment_t *segs, int nsegs)
1989{
1990 int idx, size;
1991 union ccb *ccb = srb->ccb;
1992 u_int8_t *cdb;
1993 struct hpt_iop_request_scsi_command *req;
1994 u_int64_t req_phy;
1995
1996 req = (struct hpt_iop_request_scsi_command *)srb;
1997 req_phy = srb->phy_addr;
1998
1999 if (ccb->csio.dxfer_len && nsegs > 0) {
2000 struct hpt_iopsg *psg = req->sg_list;
2001 for (idx = 0; idx < nsegs; idx++, psg++) {
2002 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2003 psg->size = segs[idx].ds_len;
2004 psg->eot = 0;
2005 }
2006 psg[-1].eot = 1;
2007 }
2008 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2009 cdb = ccb->csio.cdb_io.cdb_ptr;
2010 else
2011 cdb = ccb->csio.cdb_io.cdb_bytes;
2012
2013 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2014 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2015 req->header.result = IOP_RESULT_PENDING;
2016 req->dataxfer_length = ccb->csio.dxfer_len;
2017 req->channel = 0;
2018 req->target = ccb->ccb_h.target_id;
2019 req->lun = ccb->ccb_h.target_lun;
2020 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2021 - sizeof(struct hpt_iopsg)
2022 + nsegs * sizeof(struct hpt_iopsg);
2023 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2024 bus_dmamap_sync(hba->io_dmat,
2025 srb->dma_map, BUS_DMASYNC_PREREAD);
2026 }
2027 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2028 bus_dmamap_sync(hba->io_dmat,
2029 srb->dma_map, BUS_DMASYNC_PREWRITE);
2030 req->header.context = (u_int64_t)srb->index
2031 << MVIOP_REQUEST_NUMBER_START_BIT
2032 | MVIOP_CMD_TYPE_SCSI;
2033 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2034 size = req->header.size >> 8;
2035 hptiop_mv_inbound_write(req_phy
2036 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2037 | (size > 3 ? 3 : size), hba);
2038}
2039
2040static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2041 int nsegs, int error)
2042{
2043 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2044 union ccb *ccb = srb->ccb;
2045 struct hpt_iop_hba *hba = srb->hba;
2046
2047 if (error || nsegs > hba->max_sg_count) {
2048 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2049 ccb->ccb_h.func_code,
2050 ccb->ccb_h.target_id,
2051 ccb->ccb_h.target_lun, nsegs));
2052 ccb->ccb_h.status = CAM_BUSY;
2053 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2054 hptiop_free_srb(hba, srb);
2055 xpt_done(ccb);
2056 return;
2057 }
2058
2059 hba->ops->post_req(hba, srb, segs, nsegs);
2060}
2061
2062static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2063 int nsegs, int error)
2064{
2065 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2066 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2067 & ~(u_int64_t)0x1F;
2068 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2069 & ~0x1F);
2070}
2071
2072static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2073 int nsegs, int error)
2074{
2075 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2076 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2077 struct hpt_iop_srb *srb, *tmp_srb;
2078 int i;
2079
2080 if (error || nsegs == 0) {
2081 device_printf(hba->pcidev, "hptiop_map_srb error");
2082 return;
2083 }
2084
2085 /* map srb */
2086 srb = (struct hpt_iop_srb *)
2087 (((unsigned long)hba->uncached_ptr + 0x1F)
2088 & ~(unsigned long)0x1F);
2089
2090 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2091 tmp_srb = (struct hpt_iop_srb *)
2092 ((char *)srb + i * HPT_SRB_MAX_SIZE);
2093 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2094 if (bus_dmamap_create(hba->io_dmat,
2095 0, &tmp_srb->dma_map)) {
2096 device_printf(hba->pcidev, "dmamap create failed");
2097 return;
2098 }
2099
2100 bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2101 tmp_srb->hba = hba;
2102 tmp_srb->index = i;
2103 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2104 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2105 (phy_addr >> 5);
2106 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2107 tmp_srb->srb_flag =
2108 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2109 } else {
2110 tmp_srb->phy_addr = phy_addr;
2111 }
2112
2113 hptiop_free_srb(hba, tmp_srb);
2114 hba->srb[i] = tmp_srb;
2115 phy_addr += HPT_SRB_MAX_SIZE;
2116 }
2117 else {
2118 device_printf(hba->pcidev, "invalid alignment");
2119 return;
2120 }
2121 }
2122}
2123
2124static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2125{
2126 hba->msg_done = 1;
2127}
2128
2129static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2130 int target_id)
2131{
2132 struct cam_periph *periph = NULL;
2133 struct cam_path *path;
2134 int status, retval = 0;
2135
2136 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2137
2138 if (status == CAM_REQ_CMP) {
2139 if ((periph = cam_periph_find(path, "da")) != NULL) {
2140 if (periph->refcount >= 1) {
2141 device_printf(hba->pcidev, "%d ,"
2142 "target_id=0x%x,"
2143 "refcount=%d",
2144 hba->pciunit, target_id, periph->refcount);
2145 retval = -1;
2146 }
2147 }
2148 xpt_free_path(path);
2149 }
2150 return retval;
2151}
2152
2153static void hptiop_release_resource(struct hpt_iop_hba *hba)
2154{
2155 int i;
2156 if (hba->path) {
2157 struct ccb_setasync ccb;
2158
2159 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2160 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2161 ccb.event_enable = 0;
2162 ccb.callback = hptiop_async;
2163 ccb.callback_arg = hba->sim;
2164 xpt_action((union ccb *)&ccb);
2165 xpt_free_path(hba->path);
2166 }
2167
2168 if (hba->sim) {
2169 xpt_bus_deregister(cam_sim_path(hba->sim));
2170 cam_sim_free(hba->sim, TRUE);
2171 }
2172
2173 if (hba->ctlcfg_dmat) {
2174 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2175 bus_dmamem_free(hba->ctlcfg_dmat,
2176 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2177 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2178 }
2179
2180 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2181 struct hpt_iop_srb *srb = hba->srb[i];
2182 if (srb->dma_map)
2183 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2184 }
2185
2186 if (hba->srb_dmat) {
2187 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2188 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2189 bus_dma_tag_destroy(hba->srb_dmat);
2190 }
2191
2192 if (hba->io_dmat)
2193 bus_dma_tag_destroy(hba->io_dmat);
2194
2195 if (hba->parent_dmat)
2196 bus_dma_tag_destroy(hba->parent_dmat);
2197
2198 if (hba->irq_handle)
2199 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2200
2201 if (hba->irq_res)
2202 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2203 0, hba->irq_res);
2204
2205 if (hba->bar0_res)
2206 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2207 hba->bar0_rid, hba->bar0_res);
2208 if (hba->bar2_res)
2209 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2210 hba->bar2_rid, hba->bar2_res);
2211 if (hba->ioctl_dev)
2212 destroy_dev(hba->ioctl_dev);
2213}
1272
1273static int hptiop_probe(device_t dev)
1274{
1275 struct hpt_iop_hba *hba;
1276 u_int32_t id;
1277 static char buf[256];
1278 int sas = 0;
1279 struct hptiop_adapter_ops *ops;
1280
1281 if (pci_get_vendor(dev) != 0x1103)
1282 return (ENXIO);
1283
1284 id = pci_get_device(dev);
1285
1286 switch (id) {
1287 case 0x4322:
1288 case 0x4321:
1289 case 0x4320:
1290 sas = 1;
1291 case 0x3220:
1292 case 0x3320:
1293 case 0x3410:
1294 case 0x3520:
1295 case 0x3510:
1296 case 0x3511:
1297 case 0x3521:
1298 case 0x3522:
1299 case 0x3540:
1300 ops = &hptiop_itl_ops;
1301 break;
1302 case 0x3120:
1303 case 0x3122:
1304 case 0x3020:
1305 ops = &hptiop_mv_ops;
1306 break;
1307 default:
1308 return (ENXIO);
1309 }
1310
1311 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1312 pci_get_bus(dev), pci_get_slot(dev),
1313 pci_get_function(dev), pci_get_irq(dev));
1314
1315 sprintf(buf, "RocketRAID %x %s Controller\n",
1316 id, sas ? "SAS" : "SATA");
1317 device_set_desc_copy(dev, buf);
1318
1319 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1320 bzero(hba, sizeof(struct hpt_iop_hba));
1321 hba->ops = ops;
1322
1323 KdPrint(("hba->ops=%p\n", hba->ops));
1324 return 0;
1325}
1326
1327static int hptiop_attach(device_t dev)
1328{
1329 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1330 struct hpt_iop_request_get_config iop_config;
1331 struct hpt_iop_request_set_config set_config;
1332 int rid = 0;
1333 struct cam_devq *devq;
1334 struct ccb_setasync ccb;
1335 u_int32_t unit = device_get_unit(dev);
1336
1337 device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1338 unit, driver_version);
1339
1340 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1341 pci_get_bus(dev), pci_get_slot(dev),
1342 pci_get_function(dev), hba->ops));
1343
1344#if __FreeBSD_version >=440000
1345 pci_enable_busmaster(dev);
1346#endif
1347 hba->pcidev = dev;
1348 hba->pciunit = unit;
1349
1350 if (hba->ops->alloc_pci_res(hba))
1351 return ENXIO;
1352
1353 if (hba->ops->iop_wait_ready(hba, 2000)) {
1354 device_printf(dev, "adapter is not ready\n");
1355 goto release_pci_res;
1356 }
1357
1358#if (__FreeBSD_version >= 500000)
1359 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1360#endif
1361
1362 if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1363 1, /* alignment */
1364 0, /* boundary */
1365 BUS_SPACE_MAXADDR, /* lowaddr */
1366 BUS_SPACE_MAXADDR, /* highaddr */
1367 NULL, NULL, /* filter, filterarg */
1368 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1369 BUS_SPACE_UNRESTRICTED, /* nsegments */
1370 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1371 0, /* flags */
1372#if __FreeBSD_version>502000
1373 NULL, /* lockfunc */
1374 NULL, /* lockfuncarg */
1375#endif
1376 &hba->parent_dmat /* tag */))
1377 {
1378 device_printf(dev, "alloc parent_dmat failed\n");
1379 goto release_pci_res;
1380 }
1381
1382 if (hba->ops->internal_memalloc) {
1383 if (hba->ops->internal_memalloc(hba)) {
1384 device_printf(dev, "alloc srb_dmat failed\n");
1385 goto destroy_parent_tag;
1386 }
1387 }
1388
1389 if (hba->ops->get_config(hba, &iop_config)) {
1390 device_printf(dev, "get iop config failed.\n");
1391 goto get_config_failed;
1392 }
1393
1394 hba->firmware_version = iop_config.firmware_version;
1395 hba->interface_version = iop_config.interface_version;
1396 hba->max_requests = iop_config.max_requests;
1397 hba->max_devices = iop_config.max_devices;
1398 hba->max_request_size = iop_config.request_size;
1399 hba->max_sg_count = iop_config.max_sg_count;
1400
1401 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1402 4, /* alignment */
1403 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1404 BUS_SPACE_MAXADDR, /* lowaddr */
1405 BUS_SPACE_MAXADDR, /* highaddr */
1406 NULL, NULL, /* filter, filterarg */
1407 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1408 hba->max_sg_count, /* nsegments */
1409 0x20000, /* maxsegsize */
1410 BUS_DMA_ALLOCNOW, /* flags */
1411#if __FreeBSD_version>502000
1412 busdma_lock_mutex, /* lockfunc */
1413 &hba->lock, /* lockfuncarg */
1414#endif
1415 &hba->io_dmat /* tag */))
1416 {
1417 device_printf(dev, "alloc io_dmat failed\n");
1418 goto get_config_failed;
1419 }
1420
1421 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1422 1, /* alignment */
1423 0, /* boundary */
1424 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1425 BUS_SPACE_MAXADDR, /* highaddr */
1426 NULL, NULL, /* filter, filterarg */
1427 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1428 1, /* nsegments */
1429 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1430 0, /* flags */
1431#if __FreeBSD_version>502000
1432 NULL, /* lockfunc */
1433 NULL, /* lockfuncarg */
1434#endif
1435 &hba->srb_dmat /* tag */))
1436 {
1437 device_printf(dev, "alloc srb_dmat failed\n");
1438 goto destroy_io_dmat;
1439 }
1440
1441 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1442#if __FreeBSD_version>501000
1443 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1444#else
1445 BUS_DMA_WAITOK,
1446#endif
1447 &hba->srb_dmamap) != 0)
1448 {
1449 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1450 goto destroy_srb_dmat;
1451 }
1452
1453 if (bus_dmamap_load(hba->srb_dmat,
1454 hba->srb_dmamap, hba->uncached_ptr,
1455 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1456 hptiop_map_srb, hba, 0))
1457 {
1458 device_printf(dev, "bus_dmamap_load failed!\n");
1459 goto srb_dmamem_free;
1460 }
1461
1462 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1463 device_printf(dev, "cam_simq_alloc failed\n");
1464 goto srb_dmamap_unload;
1465 }
1466
1467#if __FreeBSD_version <700000
1468 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1469 hba, unit, hba->max_requests - 1, 1, devq);
1470#else
1471 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1472 hba, unit, &Giant, hba->max_requests - 1, 1, devq);
1473#endif
1474 if (!hba->sim) {
1475 device_printf(dev, "cam_sim_alloc failed\n");
1476 cam_simq_free(devq);
1477 goto srb_dmamap_unload;
1478 }
1479#if __FreeBSD_version <700000
1480 if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
1481#else
1482 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
1483#endif
1484 {
1485 device_printf(dev, "xpt_bus_register failed\n");
1486 goto free_cam_sim;
1487 }
1488
1489 if (xpt_create_path(&hba->path, /*periph */ NULL,
1490 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
1491 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1492 device_printf(dev, "xpt_create_path failed\n");
1493 goto deregister_xpt_bus;
1494 }
1495
1496 bzero(&set_config, sizeof(set_config));
1497 set_config.iop_id = unit;
1498 set_config.vbus_id = cam_sim_path(hba->sim);
1499 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
1500
1501 if (hba->ops->set_config(hba, &set_config)) {
1502 device_printf(dev, "set iop config failed.\n");
1503 goto free_hba_path;
1504 }
1505
1506 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1507 ccb.ccb_h.func_code = XPT_SASYNC_CB;
1508 ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
1509 ccb.callback = hptiop_async;
1510 ccb.callback_arg = hba->sim;
1511 xpt_action((union ccb *)&ccb);
1512
1513 rid = 0;
1514 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
1515 &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1516 device_printf(dev, "allocate irq failed!\n");
1517 goto free_hba_path;
1518 }
1519
1520#if __FreeBSD_version <700000
1521 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1522 hptiop_pci_intr, hba, &hba->irq_handle))
1523#else
1524 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1525 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
1526#endif
1527 {
1528 device_printf(dev, "allocate intr function failed!\n");
1529 goto free_irq_resource;
1530 }
1531
1532 if (hptiop_send_sync_msg(hba,
1533 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1534 device_printf(dev, "fail to start background task\n");
1535 goto teartown_irq_resource;
1536 }
1537
1538 hba->ops->enable_intr(hba);
1539
1540 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
1541 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
1542 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
1543
1544#if __FreeBSD_version < 503000
1545 hba->ioctl_dev->si_drv1 = hba;
1546#endif
1547
1548 return 0;
1549
1550
1551teartown_irq_resource:
1552 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
1553
1554free_irq_resource:
1555 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
1556
1557free_hba_path:
1558 xpt_free_path(hba->path);
1559
1560deregister_xpt_bus:
1561 xpt_bus_deregister(cam_sim_path(hba->sim));
1562
1563free_cam_sim:
1564 cam_sim_free(hba->sim, /*free devq*/ TRUE);
1565
1566srb_dmamap_unload:
1567 if (hba->uncached_ptr)
1568 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1569
1570srb_dmamem_free:
1571 if (hba->uncached_ptr)
1572 bus_dmamem_free(hba->srb_dmat,
1573 hba->uncached_ptr, hba->srb_dmamap);
1574
1575destroy_srb_dmat:
1576 if (hba->srb_dmat)
1577 bus_dma_tag_destroy(hba->srb_dmat);
1578
1579destroy_io_dmat:
1580 if (hba->io_dmat)
1581 bus_dma_tag_destroy(hba->io_dmat);
1582
1583get_config_failed:
1584 if (hba->ops->internal_memfree)
1585 hba->ops->internal_memfree(hba);
1586
1587destroy_parent_tag:
1588 if (hba->parent_dmat)
1589 bus_dma_tag_destroy(hba->parent_dmat);
1590
1591release_pci_res:
1592 if (hba->ops->release_pci_res)
1593 hba->ops->release_pci_res(hba);
1594
1595 return ENXIO;
1596}
1597
1598static int hptiop_detach(device_t dev)
1599{
1600 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1601 int i;
1602 int error = EBUSY;
1603
1604 hptiop_lock_adapter(hba);
1605 for (i = 0; i < hba->max_devices; i++)
1606 if (hptiop_os_query_remove_device(hba, i)) {
1607 device_printf(dev, "%d file system is busy. id=%d",
1608 hba->pciunit, i);
1609 goto out;
1610 }
1611
1612 if ((error = hptiop_shutdown(dev)) != 0)
1613 goto out;
1614 if (hptiop_send_sync_msg(hba,
1615 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
1616 goto out;
1617
1618 hptiop_release_resource(hba);
1619 error = 0;
1620out:
1621 hptiop_unlock_adapter(hba);
1622 return error;
1623}
1624
1625static int hptiop_shutdown(device_t dev)
1626{
1627 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1628
1629 int error = 0;
1630
1631 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
1632 device_printf(dev, "%d device is busy", hba->pciunit);
1633 return EBUSY;
1634 }
1635
1636 hba->ops->disable_intr(hba);
1637
1638 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1639 error = EBUSY;
1640
1641 return error;
1642}
1643
1644static void hptiop_pci_intr(void *arg)
1645{
1646 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1647 hptiop_lock_adapter(hba);
1648 hba->ops->iop_intr(hba);
1649 hptiop_unlock_adapter(hba);
1650}
1651
1652static void hptiop_poll(struct cam_sim *sim)
1653{
1654 hptiop_pci_intr(cam_sim_softc(sim));
1655}
1656
1657static void hptiop_async(void * callback_arg, u_int32_t code,
1658 struct cam_path * path, void * arg)
1659{
1660}
1661
1662static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
1663{
1664 BUS_SPACE_WRT4_ITL(outbound_intmask,
1665 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
1666}
1667
1668static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
1669{
1670 u_int32_t int_mask;
1671
1672 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1673
1674 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1675 | MVIOP_MU_OUTBOUND_INT_MSG;
1676 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1677}
1678
1679static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
1680{
1681 u_int32_t int_mask;
1682
1683 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
1684
1685 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
1686 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
1687 BUS_SPACE_RD4_ITL(outbound_intstatus);
1688}
1689
1690static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
1691{
1692 u_int32_t int_mask;
1693 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1694
1695 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
1696 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
1697 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1698 BUS_SPACE_RD4_MV0(outbound_intmask);
1699}
1700
1701static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
1702{
1703 return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1704}
1705
1706static void *hptiop_get_srb(struct hpt_iop_hba * hba)
1707{
1708 struct hpt_iop_srb * srb;
1709
1710 if (hba->srb_list) {
1711 srb = hba->srb_list;
1712 hba->srb_list = srb->next;
1713 return srb;
1714 }
1715
1716 return NULL;
1717}
1718
1719static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
1720{
1721 srb->next = hba->srb_list;
1722 hba->srb_list = srb;
1723}
1724
1725static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
1726{
1727 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
1728 struct hpt_iop_srb * srb;
1729
1730 switch (ccb->ccb_h.func_code) {
1731
1732 case XPT_SCSI_IO:
1733 hptiop_lock_adapter(hba);
1734 if (ccb->ccb_h.target_lun != 0 ||
1735 ccb->ccb_h.target_id >= hba->max_devices ||
1736 (ccb->ccb_h.flags & CAM_CDB_PHYS))
1737 {
1738 ccb->ccb_h.status = CAM_TID_INVALID;
1739 xpt_done(ccb);
1740 goto scsi_done;
1741 }
1742
1743 if ((srb = hptiop_get_srb(hba)) == NULL) {
1744 device_printf(hba->pcidev, "srb allocated failed");
1745 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1746 xpt_done(ccb);
1747 goto scsi_done;
1748 }
1749
1750 srb->ccb = ccb;
1751
1752 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1753 hptiop_post_scsi_command(srb, NULL, 0, 0);
1754 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1755 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1756 int error;
1757
1758 error = bus_dmamap_load(hba->io_dmat,
1759 srb->dma_map,
1760 ccb->csio.data_ptr,
1761 ccb->csio.dxfer_len,
1762 hptiop_post_scsi_command,
1763 srb, 0);
1764
1765 if (error && error != EINPROGRESS) {
1766 device_printf(hba->pcidev,
1767 "%d bus_dmamap_load error %d",
1768 hba->pciunit, error);
1769 xpt_freeze_simq(hba->sim, 1);
1770 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1771invalid:
1772 hptiop_free_srb(hba, srb);
1773 xpt_done(ccb);
1774 goto scsi_done;
1775 }
1776 }
1777 else {
1778 device_printf(hba->pcidev,
1779 "CAM_DATA_PHYS not supported");
1780 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1781 goto invalid;
1782 }
1783 }
1784 else {
1785 struct bus_dma_segment *segs;
1786
1787 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
1788 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1789 device_printf(hba->pcidev, "SCSI cmd failed");
1790 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
1791 goto invalid;
1792 }
1793
1794 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1795 hptiop_post_scsi_command(srb, segs,
1796 ccb->csio.sglist_cnt, 0);
1797 }
1798
1799scsi_done:
1800 hptiop_unlock_adapter(hba);
1801 return;
1802
1803 case XPT_RESET_BUS:
1804 device_printf(hba->pcidev, "reset adapter");
1805 hptiop_lock_adapter(hba);
1806 hba->msg_done = 0;
1807 hptiop_reset_adapter(hba);
1808 hptiop_unlock_adapter(hba);
1809 break;
1810
1811 case XPT_GET_TRAN_SETTINGS:
1812 case XPT_SET_TRAN_SETTINGS:
1813 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1814 break;
1815
1816 case XPT_CALC_GEOMETRY:
1817#if __FreeBSD_version >= 500000
1818 cam_calc_geometry(&ccb->ccg, 1);
1819#else
1820 ccb->ccg.heads = 255;
1821 ccb->ccg.secs_per_track = 63;
1822 ccb->ccg.cylinders = ccb->ccg.volume_size /
1823 (ccb->ccg.heads * ccb->ccg.secs_per_track);
1824 ccb->ccb_h.status = CAM_REQ_CMP;
1825#endif
1826 break;
1827
1828 case XPT_PATH_INQ:
1829 {
1830 struct ccb_pathinq *cpi = &ccb->cpi;
1831
1832 cpi->version_num = 1;
1833 cpi->hba_inquiry = PI_SDTR_ABLE;
1834 cpi->target_sprt = 0;
1835 cpi->hba_misc = PIM_NOBUSRESET;
1836 cpi->hba_eng_cnt = 0;
1837 cpi->max_target = hba->max_devices;
1838 cpi->max_lun = 0;
1839 cpi->unit_number = cam_sim_unit(sim);
1840 cpi->bus_id = cam_sim_bus(sim);
1841 cpi->initiator_id = hba->max_devices;
1842 cpi->base_transfer_speed = 3300;
1843
1844 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1845 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
1846 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1847 cpi->transport = XPORT_SPI;
1848 cpi->transport_version = 2;
1849 cpi->protocol = PROTO_SCSI;
1850 cpi->protocol_version = SCSI_REV_2;
1851 cpi->ccb_h.status = CAM_REQ_CMP;
1852 break;
1853 }
1854
1855 default:
1856 ccb->ccb_h.status = CAM_REQ_INVALID;
1857 break;
1858 }
1859
1860 xpt_done(ccb);
1861 return;
1862}
1863
1864static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
1865 struct hpt_iop_srb *srb,
1866 bus_dma_segment_t *segs, int nsegs)
1867{
1868 int idx;
1869 union ccb *ccb = srb->ccb;
1870 u_int8_t *cdb;
1871
1872 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1873 cdb = ccb->csio.cdb_io.cdb_ptr;
1874 else
1875 cdb = ccb->csio.cdb_io.cdb_bytes;
1876
1877 KdPrint(("ccb=%p %x-%x-%x\n",
1878 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
1879
1880 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1881 u_int32_t iop_req32;
1882 struct hpt_iop_request_scsi_command req;
1883
1884 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1885
1886 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
1887 device_printf(hba->pcidev, "invaild req offset\n");
1888 ccb->ccb_h.status = CAM_BUSY;
1889 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1890 hptiop_free_srb(hba, srb);
1891 xpt_done(ccb);
1892 return;
1893 }
1894
1895 if (ccb->csio.dxfer_len && nsegs > 0) {
1896 struct hpt_iopsg *psg = req.sg_list;
1897 for (idx = 0; idx < nsegs; idx++, psg++) {
1898 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1899 psg->size = segs[idx].ds_len;
1900 psg->eot = 0;
1901 }
1902 psg[-1].eot = 1;
1903 }
1904
1905 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
1906
1907 req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1908 + nsegs*sizeof(struct hpt_iopsg);
1909 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1910 req.header.flags = 0;
1911 req.header.result = IOP_RESULT_PENDING;
1912 req.header.context = (u_int64_t)(unsigned long)srb;
1913 req.dataxfer_length = ccb->csio.dxfer_len;
1914 req.channel = 0;
1915 req.target = ccb->ccb_h.target_id;
1916 req.lun = ccb->ccb_h.target_lun;
1917
1918 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
1919 (u_int8_t *)&req, req.header.size);
1920
1921 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1922 bus_dmamap_sync(hba->io_dmat,
1923 srb->dma_map, BUS_DMASYNC_PREREAD);
1924 }
1925 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1926 bus_dmamap_sync(hba->io_dmat,
1927 srb->dma_map, BUS_DMASYNC_PREWRITE);
1928
1929 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
1930 } else {
1931 struct hpt_iop_request_scsi_command *req;
1932
1933 req = (struct hpt_iop_request_scsi_command *)srb;
1934 if (ccb->csio.dxfer_len && nsegs > 0) {
1935 struct hpt_iopsg *psg = req->sg_list;
1936 for (idx = 0; idx < nsegs; idx++, psg++) {
1937 psg->pci_address =
1938 (u_int64_t)segs[idx].ds_addr;
1939 psg->size = segs[idx].ds_len;
1940 psg->eot = 0;
1941 }
1942 psg[-1].eot = 1;
1943 }
1944
1945 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1946
1947 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1948 req->header.result = IOP_RESULT_PENDING;
1949 req->dataxfer_length = ccb->csio.dxfer_len;
1950 req->channel = 0;
1951 req->target = ccb->ccb_h.target_id;
1952 req->lun = ccb->ccb_h.target_lun;
1953 req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1954 + nsegs*sizeof(struct hpt_iopsg);
1955 req->header.context = (u_int64_t)srb->index |
1956 IOPMU_QUEUE_ADDR_HOST_BIT;
1957 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1958
1959 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1960 bus_dmamap_sync(hba->io_dmat,
1961 srb->dma_map, BUS_DMASYNC_PREREAD);
1962 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1963 bus_dmamap_sync(hba->io_dmat,
1964 srb->dma_map, BUS_DMASYNC_PREWRITE);
1965 }
1966
1967 if (hba->firmware_version > 0x01020000
1968 || hba->interface_version > 0x01020000) {
1969 u_int32_t size_bits;
1970
1971 if (req->header.size < 256)
1972 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1973 else if (req->header.size < 512)
1974 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1975 else
1976 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
1977 | IOPMU_QUEUE_ADDR_HOST_BIT;
1978
1979 BUS_SPACE_WRT4_ITL(inbound_queue,
1980 (u_int32_t)srb->phy_addr | size_bits);
1981 } else
1982 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
1983 |IOPMU_QUEUE_ADDR_HOST_BIT);
1984 }
1985}
1986
1987static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
1988 struct hpt_iop_srb *srb,
1989 bus_dma_segment_t *segs, int nsegs)
1990{
1991 int idx, size;
1992 union ccb *ccb = srb->ccb;
1993 u_int8_t *cdb;
1994 struct hpt_iop_request_scsi_command *req;
1995 u_int64_t req_phy;
1996
1997 req = (struct hpt_iop_request_scsi_command *)srb;
1998 req_phy = srb->phy_addr;
1999
2000 if (ccb->csio.dxfer_len && nsegs > 0) {
2001 struct hpt_iopsg *psg = req->sg_list;
2002 for (idx = 0; idx < nsegs; idx++, psg++) {
2003 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2004 psg->size = segs[idx].ds_len;
2005 psg->eot = 0;
2006 }
2007 psg[-1].eot = 1;
2008 }
2009 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2010 cdb = ccb->csio.cdb_io.cdb_ptr;
2011 else
2012 cdb = ccb->csio.cdb_io.cdb_bytes;
2013
2014 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2015 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2016 req->header.result = IOP_RESULT_PENDING;
2017 req->dataxfer_length = ccb->csio.dxfer_len;
2018 req->channel = 0;
2019 req->target = ccb->ccb_h.target_id;
2020 req->lun = ccb->ccb_h.target_lun;
2021 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2022 - sizeof(struct hpt_iopsg)
2023 + nsegs * sizeof(struct hpt_iopsg);
2024 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2025 bus_dmamap_sync(hba->io_dmat,
2026 srb->dma_map, BUS_DMASYNC_PREREAD);
2027 }
2028 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2029 bus_dmamap_sync(hba->io_dmat,
2030 srb->dma_map, BUS_DMASYNC_PREWRITE);
2031 req->header.context = (u_int64_t)srb->index
2032 << MVIOP_REQUEST_NUMBER_START_BIT
2033 | MVIOP_CMD_TYPE_SCSI;
2034 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2035 size = req->header.size >> 8;
2036 hptiop_mv_inbound_write(req_phy
2037 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2038 | (size > 3 ? 3 : size), hba);
2039}
2040
2041static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2042 int nsegs, int error)
2043{
2044 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2045 union ccb *ccb = srb->ccb;
2046 struct hpt_iop_hba *hba = srb->hba;
2047
2048 if (error || nsegs > hba->max_sg_count) {
2049 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2050 ccb->ccb_h.func_code,
2051 ccb->ccb_h.target_id,
2052 ccb->ccb_h.target_lun, nsegs));
2053 ccb->ccb_h.status = CAM_BUSY;
2054 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2055 hptiop_free_srb(hba, srb);
2056 xpt_done(ccb);
2057 return;
2058 }
2059
2060 hba->ops->post_req(hba, srb, segs, nsegs);
2061}
2062
2063static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2064 int nsegs, int error)
2065{
2066 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2067 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2068 & ~(u_int64_t)0x1F;
2069 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2070 & ~0x1F);
2071}
2072
2073static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2074 int nsegs, int error)
2075{
2076 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2077 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2078 struct hpt_iop_srb *srb, *tmp_srb;
2079 int i;
2080
2081 if (error || nsegs == 0) {
2082 device_printf(hba->pcidev, "hptiop_map_srb error");
2083 return;
2084 }
2085
2086 /* map srb */
2087 srb = (struct hpt_iop_srb *)
2088 (((unsigned long)hba->uncached_ptr + 0x1F)
2089 & ~(unsigned long)0x1F);
2090
2091 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2092 tmp_srb = (struct hpt_iop_srb *)
2093 ((char *)srb + i * HPT_SRB_MAX_SIZE);
2094 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2095 if (bus_dmamap_create(hba->io_dmat,
2096 0, &tmp_srb->dma_map)) {
2097 device_printf(hba->pcidev, "dmamap create failed");
2098 return;
2099 }
2100
2101 bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2102 tmp_srb->hba = hba;
2103 tmp_srb->index = i;
2104 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2105 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2106 (phy_addr >> 5);
2107 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2108 tmp_srb->srb_flag =
2109 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2110 } else {
2111 tmp_srb->phy_addr = phy_addr;
2112 }
2113
2114 hptiop_free_srb(hba, tmp_srb);
2115 hba->srb[i] = tmp_srb;
2116 phy_addr += HPT_SRB_MAX_SIZE;
2117 }
2118 else {
2119 device_printf(hba->pcidev, "invalid alignment");
2120 return;
2121 }
2122 }
2123}
2124
2125static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2126{
2127 hba->msg_done = 1;
2128}
2129
2130static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2131 int target_id)
2132{
2133 struct cam_periph *periph = NULL;
2134 struct cam_path *path;
2135 int status, retval = 0;
2136
2137 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2138
2139 if (status == CAM_REQ_CMP) {
2140 if ((periph = cam_periph_find(path, "da")) != NULL) {
2141 if (periph->refcount >= 1) {
2142 device_printf(hba->pcidev, "%d ,"
2143 "target_id=0x%x,"
2144 "refcount=%d",
2145 hba->pciunit, target_id, periph->refcount);
2146 retval = -1;
2147 }
2148 }
2149 xpt_free_path(path);
2150 }
2151 return retval;
2152}
2153
2154static void hptiop_release_resource(struct hpt_iop_hba *hba)
2155{
2156 int i;
2157 if (hba->path) {
2158 struct ccb_setasync ccb;
2159
2160 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2161 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2162 ccb.event_enable = 0;
2163 ccb.callback = hptiop_async;
2164 ccb.callback_arg = hba->sim;
2165 xpt_action((union ccb *)&ccb);
2166 xpt_free_path(hba->path);
2167 }
2168
2169 if (hba->sim) {
2170 xpt_bus_deregister(cam_sim_path(hba->sim));
2171 cam_sim_free(hba->sim, TRUE);
2172 }
2173
2174 if (hba->ctlcfg_dmat) {
2175 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2176 bus_dmamem_free(hba->ctlcfg_dmat,
2177 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2178 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2179 }
2180
2181 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2182 struct hpt_iop_srb *srb = hba->srb[i];
2183 if (srb->dma_map)
2184 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2185 }
2186
2187 if (hba->srb_dmat) {
2188 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2189 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2190 bus_dma_tag_destroy(hba->srb_dmat);
2191 }
2192
2193 if (hba->io_dmat)
2194 bus_dma_tag_destroy(hba->io_dmat);
2195
2196 if (hba->parent_dmat)
2197 bus_dma_tag_destroy(hba->parent_dmat);
2198
2199 if (hba->irq_handle)
2200 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2201
2202 if (hba->irq_res)
2203 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2204 0, hba->irq_res);
2205
2206 if (hba->bar0_res)
2207 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2208 hba->bar0_rid, hba->bar0_res);
2209 if (hba->bar2_res)
2210 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2211 hba->bar2_rid, hba->bar2_res);
2212 if (hba->ioctl_dev)
2213 destroy_dev(hba->ioctl_dev);
2214}