1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5 * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/types.h>
31#include <sys/cons.h>
32#include <sys/time.h>
33#include <sys/systm.h>
34
35#include <sys/stat.h>
36#include <sys/malloc.h>
37#include <sys/conf.h>
38#include <sys/libkern.h>
39#include <sys/kernel.h>
40
41#include <sys/kthread.h>
42#include <sys/mutex.h>
43#include <sys/module.h>
44
45#include <sys/eventhandler.h>
46#include <sys/bus.h>
47#include <sys/taskqueue.h>
48#include <sys/ioccom.h>
49
50#include <machine/resource.h>
51#include <machine/bus.h>
52#include <machine/stdarg.h>
53#include <sys/rman.h>
54
55#include <vm/vm.h>
56#include <vm/pmap.h>
57
58#include <dev/pci/pcireg.h>
59#include <dev/pci/pcivar.h>
60
61
62#include <cam/cam.h>
63#include <cam/cam_ccb.h>
64#include <cam/cam_sim.h>
65#include <cam/cam_xpt_sim.h>
66#include <cam/cam_debug.h>
67#include <cam/cam_periph.h>
68#include <cam/scsi/scsi_all.h>
69#include <cam/scsi/scsi_message.h>
70
71
72#include <dev/hptiop/hptiop.h>
73
74static const char driver_name[] = "hptiop";
75static const char driver_version[] = "v1.9";
76
77static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
78				u_int32_t msg, u_int32_t millisec);
79static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
80							u_int32_t req);
81static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
82static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
83							u_int32_t req);
84static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
85static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
86				struct hpt_iop_ioctl_param *pParams);
87static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
88				struct hpt_iop_ioctl_param *pParams);
89static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
90				struct hpt_iop_ioctl_param *pParams);
91static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
92static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
93static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
94static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
95static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
96				struct hpt_iop_request_get_config *config);
97static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
98				struct hpt_iop_request_get_config *config);
99static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
100				struct hpt_iop_request_get_config *config);
101static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
102				struct hpt_iop_request_set_config *config);
103static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
104				struct hpt_iop_request_set_config *config);
105static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
106				struct hpt_iop_request_set_config *config);
107static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
108static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
109static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
110static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
111static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
112static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
113			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
114static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
115				struct hpt_iop_request_ioctl_command *req,
116				struct hpt_iop_ioctl_param *pParams);
117static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
118				struct hpt_iop_request_ioctl_command *req,
119				struct hpt_iop_ioctl_param *pParams);
120static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
121				struct hpt_iop_srb *srb,
122				bus_dma_segment_t *segs, int nsegs);
123static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
124				struct hpt_iop_srb *srb,
125				bus_dma_segment_t *segs, int nsegs);
126static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
127				struct hpt_iop_srb *srb,
128				bus_dma_segment_t *segs, int nsegs);
129static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
130static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
131static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
132static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
133static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
134static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
135static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
138static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
139static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
140static int  hptiop_probe(device_t dev);
141static int  hptiop_attach(device_t dev);
142static int  hptiop_detach(device_t dev);
143static int  hptiop_shutdown(device_t dev);
144static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
145static void hptiop_poll(struct cam_sim *sim);
146static void hptiop_async(void *callback_arg, u_int32_t code,
147					struct cam_path *path, void *arg);
148static void hptiop_pci_intr(void *arg);
149static void hptiop_release_resource(struct hpt_iop_hba *hba);
150static void hptiop_reset_adapter(void *argv);
151static d_open_t hptiop_open;
152static d_close_t hptiop_close;
153static d_ioctl_t hptiop_ioctl;
154
155static struct cdevsw hptiop_cdevsw = {
156	.d_open = hptiop_open,
157	.d_close = hptiop_close,
158	.d_ioctl = hptiop_ioctl,
159	.d_name = driver_name,
160	.d_version = D_VERSION,
161};
162
163#define hba_from_dev(dev) \
164	((struct hpt_iop_hba *)((dev)->si_drv1))
165
166#define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
167		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
168#define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
169		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
170
171#define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
172		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
173#define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
174		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
175#define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
176		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
177#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
178		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
179
180#define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
181		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
182#define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
183		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
184
185static int hptiop_open(ioctl_dev_t dev, int flags,
186					int devtype, ioctl_thread_t proc)
187{
188	struct hpt_iop_hba *hba = hba_from_dev(dev);
189
190	if (hba==NULL)
191		return ENXIO;
192	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
193		return EBUSY;
194	hba->flag |= HPT_IOCTL_FLAG_OPEN;
195	return 0;
196}
197
198static int hptiop_close(ioctl_dev_t dev, int flags,
199					int devtype, ioctl_thread_t proc)
200{
201	struct hpt_iop_hba *hba = hba_from_dev(dev);
202	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
203	return 0;
204}
205
206static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
207					int flags, ioctl_thread_t proc)
208{
209	int ret = EFAULT;
210	struct hpt_iop_hba *hba = hba_from_dev(dev);
211
212	switch (cmd) {
213	case HPT_DO_IOCONTROL:
214		ret = hba->ops->do_ioctl(hba,
215				(struct hpt_iop_ioctl_param *)data);
216		break;
217	case HPT_SCAN_BUS:
218		ret = hptiop_rescan_bus(hba);
219		break;
220	}
221	return ret;
222}
223
224static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
225{
226	u_int64_t p;
227	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
228	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
229
230	if (outbound_tail != outbound_head) {
231		bus_space_read_region_4(hba->bar2t, hba->bar2h,
232			offsetof(struct hpt_iopmu_mv,
233				outbound_q[outbound_tail]),
234			(u_int32_t *)&p, 2);
235
236		outbound_tail++;
237
238		if (outbound_tail == MVIOP_QUEUE_LEN)
239			outbound_tail = 0;
240
241		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
242		return p;
243	} else
244		return 0;
245}
246
247static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
248{
249	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
250	u_int32_t head = inbound_head + 1;
251
252	if (head == MVIOP_QUEUE_LEN)
253		head = 0;
254
255	bus_space_write_region_4(hba->bar2t, hba->bar2h,
256			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
257			(u_int32_t *)&p, 2);
258	BUS_SPACE_WRT4_MV2(inbound_head, head);
259	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
260}
261
262static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
263{
264	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
265	BUS_SPACE_RD4_ITL(outbound_intstatus);
266}
267
268static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
269{
270
271	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
272	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
273
274	BUS_SPACE_RD4_MV0(outbound_intmask);
275}
276
277static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
278{
279	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
280	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
281}
282
283static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
284{
285	u_int32_t req=0;
286	int i;
287
288	for (i = 0; i < millisec; i++) {
289		req = BUS_SPACE_RD4_ITL(inbound_queue);
290		if (req != IOPMU_QUEUE_EMPTY)
291			break;
292		DELAY(1000);
293	}
294
295	if (req!=IOPMU_QUEUE_EMPTY) {
296		BUS_SPACE_WRT4_ITL(outbound_queue, req);
297		BUS_SPACE_RD4_ITL(outbound_intstatus);
298		return 0;
299	}
300
301	return -1;
302}
303
304static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
305{
306	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
307		return -1;
308
309	return 0;
310}
311
312static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
313							u_int32_t millisec)
314{
315	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
316		return -1;
317
318	return 0;
319}
320
321static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
322							u_int32_t index)
323{
324	struct hpt_iop_srb *srb;
325	struct hpt_iop_request_scsi_command *req=NULL;
326	union ccb *ccb;
327	u_int8_t *cdb;
328	u_int32_t result, temp, dxfer;
329	u_int64_t temp64;
330
331	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
332		if (hba->firmware_version > 0x01020000 ||
333			hba->interface_version > 0x01020000) {
334			srb = hba->srb[index & ~(u_int32_t)
335				(IOPMU_QUEUE_ADDR_HOST_BIT
336				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
337			req = (struct hpt_iop_request_scsi_command *)srb;
338			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
339				result = IOP_RESULT_SUCCESS;
340			else
341				result = req->header.result;
342		} else {
343			srb = hba->srb[index &
344				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
345			req = (struct hpt_iop_request_scsi_command *)srb;
346			result = req->header.result;
347		}
348		dxfer = req->dataxfer_length;
349		goto srb_complete;
350	}
351
352	/*iop req*/
353	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
354		offsetof(struct hpt_iop_request_header, type));
355	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
356		offsetof(struct hpt_iop_request_header, result));
357	switch(temp) {
358	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
359	{
360		temp64 = 0;
361		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
362			offsetof(struct hpt_iop_request_header, context),
363			(u_int32_t *)&temp64, 2);
364		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
365		break;
366	}
367
368	case IOP_REQUEST_TYPE_SCSI_COMMAND:
369		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
370			offsetof(struct hpt_iop_request_header, context),
371			(u_int32_t *)&temp64, 2);
372		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
373		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
374				index + offsetof(struct hpt_iop_request_scsi_command,
375				dataxfer_length));
376srb_complete:
377		ccb = (union ccb *)srb->ccb;
378		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
379			cdb = ccb->csio.cdb_io.cdb_ptr;
380		else
381			cdb = ccb->csio.cdb_io.cdb_bytes;
382
383		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
384			ccb->ccb_h.status = CAM_REQ_CMP;
385			goto scsi_done;
386		}
387
388		switch (result) {
389		case IOP_RESULT_SUCCESS:
390			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
391			case CAM_DIR_IN:
392				bus_dmamap_sync(hba->io_dmat,
393					srb->dma_map, BUS_DMASYNC_POSTREAD);
394				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
395				break;
396			case CAM_DIR_OUT:
397				bus_dmamap_sync(hba->io_dmat,
398					srb->dma_map, BUS_DMASYNC_POSTWRITE);
399				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400				break;
401			}
402
403			ccb->ccb_h.status = CAM_REQ_CMP;
404			break;
405
406		case IOP_RESULT_BAD_TARGET:
407			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
408			break;
409		case IOP_RESULT_BUSY:
410			ccb->ccb_h.status = CAM_BUSY;
411			break;
412		case IOP_RESULT_INVALID_REQUEST:
413			ccb->ccb_h.status = CAM_REQ_INVALID;
414			break;
415		case IOP_RESULT_FAIL:
416			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
417			break;
418		case IOP_RESULT_RESET:
419			ccb->ccb_h.status = CAM_BUSY;
420			break;
421		case IOP_RESULT_CHECK_CONDITION:
422			memset(&ccb->csio.sense_data, 0,
423			    sizeof(ccb->csio.sense_data));
424			if (dxfer < ccb->csio.sense_len)
425				ccb->csio.sense_resid = ccb->csio.sense_len -
426				    dxfer;
427			else
428				ccb->csio.sense_resid = 0;
429			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
430				bus_space_read_region_1(hba->bar0t, hba->bar0h,
431					index + offsetof(struct hpt_iop_request_scsi_command,
432					sg_list), (u_int8_t *)&ccb->csio.sense_data,
433					MIN(dxfer, sizeof(ccb->csio.sense_data)));
434			} else {
435				memcpy(&ccb->csio.sense_data, &req->sg_list,
436					MIN(dxfer, sizeof(ccb->csio.sense_data)));
437			}
438			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
439			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
440			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
441			break;
442		default:
443			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444			break;
445		}
446scsi_done:
447		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
448			BUS_SPACE_WRT4_ITL(outbound_queue, index);
449
450		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
451
452		hptiop_free_srb(hba, srb);
453		xpt_done(ccb);
454		break;
455	}
456}
457
458static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
459{
460	u_int32_t req, temp;
461
462	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
463		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
464			hptiop_request_callback_itl(hba, req);
465		else {
466			temp = bus_space_read_4(hba->bar0t,
467					hba->bar0h,req +
468					offsetof(struct hpt_iop_request_header,
469						flags));
470			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
471				u_int64_t temp64;
472				bus_space_read_region_4(hba->bar0t,
473					hba->bar0h,req +
474					offsetof(struct hpt_iop_request_header,
475						context),
476					(u_int32_t *)&temp64, 2);
477				if (temp64) {
478					hptiop_request_callback_itl(hba, req);
479				} else {
480					temp64 = 1;
481					bus_space_write_region_4(hba->bar0t,
482						hba->bar0h,req +
483						offsetof(struct hpt_iop_request_header,
484							context),
485						(u_int32_t *)&temp64, 2);
486				}
487			} else
488				hptiop_request_callback_itl(hba, req);
489		}
490	}
491}
492
493static int hptiop_intr_itl(struct hpt_iop_hba * hba)
494{
495	u_int32_t status;
496	int ret = 0;
497
498	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
499
500	if (status & IOPMU_OUTBOUND_INT_MSG0) {
501		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
502		KdPrint(("hptiop: received outbound msg %x\n", msg));
503		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
504		hptiop_os_message_callback(hba, msg);
505		ret = 1;
506	}
507
508	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
509		hptiop_drain_outbound_queue_itl(hba);
510		ret = 1;
511	}
512
513	return ret;
514}
515
516static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
517							u_int64_t _tag)
518{
519	u_int32_t context = (u_int32_t)_tag;
520
521	if (context & MVIOP_CMD_TYPE_SCSI) {
522		struct hpt_iop_srb *srb;
523		struct hpt_iop_request_scsi_command *req;
524		union ccb *ccb;
525		u_int8_t *cdb;
526
527		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
528		req = (struct hpt_iop_request_scsi_command *)srb;
529		ccb = (union ccb *)srb->ccb;
530		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
531			cdb = ccb->csio.cdb_io.cdb_ptr;
532		else
533			cdb = ccb->csio.cdb_io.cdb_bytes;
534
535		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
536			ccb->ccb_h.status = CAM_REQ_CMP;
537			goto scsi_done;
538		}
539		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
540			req->header.result = IOP_RESULT_SUCCESS;
541
542		switch (req->header.result) {
543		case IOP_RESULT_SUCCESS:
544			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
545			case CAM_DIR_IN:
546				bus_dmamap_sync(hba->io_dmat,
547					srb->dma_map, BUS_DMASYNC_POSTREAD);
548				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
549				break;
550			case CAM_DIR_OUT:
551				bus_dmamap_sync(hba->io_dmat,
552					srb->dma_map, BUS_DMASYNC_POSTWRITE);
553				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
554				break;
555			}
556			ccb->ccb_h.status = CAM_REQ_CMP;
557			break;
558		case IOP_RESULT_BAD_TARGET:
559			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
560			break;
561		case IOP_RESULT_BUSY:
562			ccb->ccb_h.status = CAM_BUSY;
563			break;
564		case IOP_RESULT_INVALID_REQUEST:
565			ccb->ccb_h.status = CAM_REQ_INVALID;
566			break;
567		case IOP_RESULT_FAIL:
568			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
569			break;
570		case IOP_RESULT_RESET:
571			ccb->ccb_h.status = CAM_BUSY;
572			break;
573		case IOP_RESULT_CHECK_CONDITION:
574			memset(&ccb->csio.sense_data, 0,
575			    sizeof(ccb->csio.sense_data));
576			if (req->dataxfer_length < ccb->csio.sense_len)
577				ccb->csio.sense_resid = ccb->csio.sense_len -
578				    req->dataxfer_length;
579			else
580				ccb->csio.sense_resid = 0;
581			memcpy(&ccb->csio.sense_data, &req->sg_list,
582				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
583			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
584			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
585			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
586			break;
587		default:
588			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
589			break;
590		}
591scsi_done:
592		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
593
594		hptiop_free_srb(hba, srb);
595		xpt_done(ccb);
596	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
597		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
598		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
599			hba->config_done = 1;
600		else
601			hba->config_done = -1;
602		wakeup(req);
603	} else if (context &
604			(MVIOP_CMD_TYPE_SET_CONFIG |
605				MVIOP_CMD_TYPE_GET_CONFIG))
606		hba->config_done = 1;
607	else {
608		device_printf(hba->pcidev, "wrong callback type\n");
609	}
610}
611
612static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
613				u_int32_t _tag)
614{
615	u_int32_t req_type = _tag & 0xf;
616
617	struct hpt_iop_srb *srb;
618	struct hpt_iop_request_scsi_command *req;
619	union ccb *ccb;
620	u_int8_t *cdb;
621
622	switch (req_type) {
623	case IOP_REQUEST_TYPE_GET_CONFIG:
624	case IOP_REQUEST_TYPE_SET_CONFIG:
625		hba->config_done = 1;
626		break;
627
628	case IOP_REQUEST_TYPE_SCSI_COMMAND:
629		srb = hba->srb[(_tag >> 4) & 0xff];
630		req = (struct hpt_iop_request_scsi_command *)srb;
631
632		ccb = (union ccb *)srb->ccb;
633
634		callout_stop(&srb->timeout);
635
636		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
637			cdb = ccb->csio.cdb_io.cdb_ptr;
638		else
639			cdb = ccb->csio.cdb_io.cdb_bytes;
640
641		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
642			ccb->ccb_h.status = CAM_REQ_CMP;
643			goto scsi_done;
644		}
645
646		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
647			req->header.result = IOP_RESULT_SUCCESS;
648
649		switch (req->header.result) {
650		case IOP_RESULT_SUCCESS:
651			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
652			case CAM_DIR_IN:
653				bus_dmamap_sync(hba->io_dmat,
654						srb->dma_map, BUS_DMASYNC_POSTREAD);
655				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
656				break;
657			case CAM_DIR_OUT:
658				bus_dmamap_sync(hba->io_dmat,
659						srb->dma_map, BUS_DMASYNC_POSTWRITE);
660				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
661				break;
662			}
663			ccb->ccb_h.status = CAM_REQ_CMP;
664			break;
665		case IOP_RESULT_BAD_TARGET:
666			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
667			break;
668		case IOP_RESULT_BUSY:
669			ccb->ccb_h.status = CAM_BUSY;
670			break;
671		case IOP_RESULT_INVALID_REQUEST:
672			ccb->ccb_h.status = CAM_REQ_INVALID;
673			break;
674		case IOP_RESULT_FAIL:
675			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
676			break;
677		case IOP_RESULT_RESET:
678			ccb->ccb_h.status = CAM_BUSY;
679			break;
680		case IOP_RESULT_CHECK_CONDITION:
681			memset(&ccb->csio.sense_data, 0,
682			       sizeof(ccb->csio.sense_data));
683			if (req->dataxfer_length < ccb->csio.sense_len)
684				ccb->csio.sense_resid = ccb->csio.sense_len -
685				req->dataxfer_length;
686			else
687				ccb->csio.sense_resid = 0;
688			memcpy(&ccb->csio.sense_data, &req->sg_list,
689			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
690			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
691			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
692			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
693			break;
694		default:
695			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
696			break;
697		}
698scsi_done:
699		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
700
701		hptiop_free_srb(hba, srb);
702		xpt_done(ccb);
703		break;
704	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
705		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
706			hba->config_done = 1;
707		else
708			hba->config_done = -1;
709		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
710		break;
711	default:
712		device_printf(hba->pcidev, "wrong callback type\n");
713		break;
714	}
715}
716
717static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
718{
719	u_int64_t req;
720
721	while ((req = hptiop_mv_outbound_read(hba))) {
722		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
723			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
724				hptiop_request_callback_mv(hba, req);
725			}
726	    	}
727	}
728}
729
730static int hptiop_intr_mv(struct hpt_iop_hba * hba)
731{
732	u_int32_t status;
733	int ret = 0;
734
735	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
736
737	if (status)
738		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
739
740	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
741		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
742		KdPrint(("hptiop: received outbound msg %x\n", msg));
743		hptiop_os_message_callback(hba, msg);
744		ret = 1;
745	}
746
747	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
748		hptiop_drain_outbound_queue_mv(hba);
749		ret = 1;
750	}
751
752	return ret;
753}
754
755static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
756{
757	u_int32_t status, _tag, cptr;
758	int ret = 0;
759
760	if (hba->initialized) {
761		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
762	}
763
764	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
765	if (status) {
766		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
767		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
768			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
769			hptiop_os_message_callback(hba, msg);
770		}
771		ret = 1;
772	}
773
774	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
775	if (status) {
776		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
777		do {
778			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
779			while (hba->u.mvfrey.outlist_rptr != cptr) {
780				hba->u.mvfrey.outlist_rptr++;
781				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
782					hba->u.mvfrey.outlist_rptr = 0;
783				}
784
785				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
786				hptiop_request_callback_mvfrey(hba, _tag);
787				ret = 2;
788			}
789		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
790	}
791
792	if (hba->initialized) {
793		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
794	}
795
796	return ret;
797}
798
799static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
800					u_int32_t req32, u_int32_t millisec)
801{
802	u_int32_t i;
803	u_int64_t temp64;
804
805	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
806	BUS_SPACE_RD4_ITL(outbound_intstatus);
807
808	for (i = 0; i < millisec; i++) {
809		hptiop_intr_itl(hba);
810		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
811			offsetof(struct hpt_iop_request_header, context),
812			(u_int32_t *)&temp64, 2);
813		if (temp64)
814			return 0;
815		DELAY(1000);
816	}
817
818	return -1;
819}
820
821static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
822					void *req, u_int32_t millisec)
823{
824	u_int32_t i;
825	u_int64_t phy_addr;
826	hba->config_done = 0;
827
828	phy_addr = hba->ctlcfgcmd_phy |
829			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
830	((struct hpt_iop_request_get_config *)req)->header.flags |=
831		IOP_REQUEST_FLAG_SYNC_REQUEST |
832		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
833	hptiop_mv_inbound_write(phy_addr, hba);
834	BUS_SPACE_RD4_MV0(outbound_intmask);
835
836	for (i = 0; i < millisec; i++) {
837		hptiop_intr_mv(hba);
838		if (hba->config_done)
839			return 0;
840		DELAY(1000);
841	}
842	return -1;
843}
844
845static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
846					void *req, u_int32_t millisec)
847{
848	u_int32_t i, index;
849	u_int64_t phy_addr;
850	struct hpt_iop_request_header *reqhdr =
851										(struct hpt_iop_request_header *)req;
852
853	hba->config_done = 0;
854
855	phy_addr = hba->ctlcfgcmd_phy;
856	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
857					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
858					| IOP_REQUEST_FLAG_ADDR_BITS
859					| ((phy_addr >> 16) & 0xffff0000);
860	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
861					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
862
863	hba->u.mvfrey.inlist_wptr++;
864	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
865
866	if (index == hba->u.mvfrey.list_count) {
867		index = 0;
868		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
869		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
870	}
871
872	hba->u.mvfrey.inlist[index].addr = phy_addr;
873	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
874
875	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
876	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
877
878	for (i = 0; i < millisec; i++) {
879		hptiop_intr_mvfrey(hba);
880		if (hba->config_done)
881			return 0;
882		DELAY(1000);
883	}
884	return -1;
885}
886
887static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
888					u_int32_t msg, u_int32_t millisec)
889{
890	u_int32_t i;
891
892	hba->msg_done = 0;
893	hba->ops->post_msg(hba, msg);
894
895	for (i=0; i<millisec; i++) {
896		hba->ops->iop_intr(hba);
897		if (hba->msg_done)
898			break;
899		DELAY(1000);
900	}
901
902	return hba->msg_done? 0 : -1;
903}
904
905static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
906				struct hpt_iop_request_get_config * config)
907{
908	u_int32_t req32;
909
910	config->header.size = sizeof(struct hpt_iop_request_get_config);
911	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
912	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
913	config->header.result = IOP_RESULT_PENDING;
914	config->header.context = 0;
915
916	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
917	if (req32 == IOPMU_QUEUE_EMPTY)
918		return -1;
919
920	bus_space_write_region_4(hba->bar0t, hba->bar0h,
921			req32, (u_int32_t *)config,
922			sizeof(struct hpt_iop_request_header) >> 2);
923
924	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
925		KdPrint(("hptiop: get config send cmd failed"));
926		return -1;
927	}
928
929	bus_space_read_region_4(hba->bar0t, hba->bar0h,
930			req32, (u_int32_t *)config,
931			sizeof(struct hpt_iop_request_get_config) >> 2);
932
933	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
934
935	return 0;
936}
937
938static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
939				struct hpt_iop_request_get_config * config)
940{
941	struct hpt_iop_request_get_config *req;
942
943	if (!(req = hba->ctlcfg_ptr))
944		return -1;
945
946	req->header.flags = 0;
947	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
948	req->header.size = sizeof(struct hpt_iop_request_get_config);
949	req->header.result = IOP_RESULT_PENDING;
950	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
951
952	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
953		KdPrint(("hptiop: get config send cmd failed"));
954		return -1;
955	}
956
957	*config = *req;
958	return 0;
959}
960
961static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
962				struct hpt_iop_request_get_config * config)
963{
964	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
965
966	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
967	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
968		KdPrint(("hptiop: header size %x/%x type %x/%x",
969			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
970			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
971		return -1;
972	}
973
974	config->interface_version = info->interface_version;
975	config->firmware_version = info->firmware_version;
976	config->max_requests = info->max_requests;
977	config->request_size = info->request_size;
978	config->max_sg_count = info->max_sg_count;
979	config->data_transfer_length = info->data_transfer_length;
980	config->alignment_mask = info->alignment_mask;
981	config->max_devices = info->max_devices;
982	config->sdram_size = info->sdram_size;
983
984	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
985		 config->max_requests, config->request_size,
986		 config->data_transfer_length, config->max_devices,
987		 config->sdram_size));
988
989	return 0;
990}
991
992static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
993				struct hpt_iop_request_set_config *config)
994{
995	u_int32_t req32;
996
997	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
998
999	if (req32 == IOPMU_QUEUE_EMPTY)
1000		return -1;
1001
1002	config->header.size = sizeof(struct hpt_iop_request_set_config);
1003	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1004	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1005	config->header.result = IOP_RESULT_PENDING;
1006	config->header.context = 0;
1007
1008	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1009		(u_int32_t *)config,
1010		sizeof(struct hpt_iop_request_set_config) >> 2);
1011
1012	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1013		KdPrint(("hptiop: set config send cmd failed"));
1014		return -1;
1015	}
1016
1017	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1018
1019	return 0;
1020}
1021
1022static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1023				struct hpt_iop_request_set_config *config)
1024{
1025	struct hpt_iop_request_set_config *req;
1026
1027	if (!(req = hba->ctlcfg_ptr))
1028		return -1;
1029
1030	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1031		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1032		sizeof(struct hpt_iop_request_set_config) -
1033			sizeof(struct hpt_iop_request_header));
1034
1035	req->header.flags = 0;
1036	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1037	req->header.size = sizeof(struct hpt_iop_request_set_config);
1038	req->header.result = IOP_RESULT_PENDING;
1039	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1040
1041	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1042		KdPrint(("hptiop: set config send cmd failed"));
1043		return -1;
1044	}
1045
1046	return 0;
1047}
1048
1049static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1050				struct hpt_iop_request_set_config *config)
1051{
1052	struct hpt_iop_request_set_config *req;
1053
1054	if (!(req = hba->ctlcfg_ptr))
1055		return -1;
1056
1057	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1058		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1059		sizeof(struct hpt_iop_request_set_config) -
1060			sizeof(struct hpt_iop_request_header));
1061
1062	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1063	req->header.size = sizeof(struct hpt_iop_request_set_config);
1064	req->header.result = IOP_RESULT_PENDING;
1065
1066	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1067		KdPrint(("hptiop: set config send cmd failed"));
1068		return -1;
1069	}
1070
1071	return 0;
1072}
1073
1074static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1075				u_int32_t req32,
1076				struct hpt_iop_ioctl_param *pParams)
1077{
1078	u_int64_t temp64;
1079	struct hpt_iop_request_ioctl_command req;
1080
1081	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1082			(hba->max_request_size -
1083			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1084		device_printf(hba->pcidev, "request size beyond max value");
1085		return -1;
1086	}
1087
1088	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1089		+ pParams->nInBufferSize;
1090	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1091	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1092	req.header.result = IOP_RESULT_PENDING;
1093	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1094	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1095	req.inbuf_size = pParams->nInBufferSize;
1096	req.outbuf_size = pParams->nOutBufferSize;
1097	req.bytes_returned = 0;
1098
1099	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1100		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1101
1102	hptiop_lock_adapter(hba);
1103
1104	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1105	BUS_SPACE_RD4_ITL(outbound_intstatus);
1106
1107	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1108		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1109		(u_int32_t *)&temp64, 2);
1110	while (temp64) {
1111		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1112				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1113			break;
1114		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1115		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1116			offsetof(struct hpt_iop_request_ioctl_command,
1117				header.context),
1118			(u_int32_t *)&temp64, 2);
1119	}
1120
1121	hptiop_unlock_adapter(hba);
1122	return 0;
1123}
1124
1125static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1126									void *user, int size)
1127{
1128	unsigned char byte;
1129	int i;
1130
1131	for (i=0; i<size; i++) {
1132		if (copyin((u_int8_t *)user + i, &byte, 1))
1133			return -1;
1134		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1135	}
1136
1137	return 0;
1138}
1139
1140static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1141									void *user, int size)
1142{
1143	unsigned char byte;
1144	int i;
1145
1146	for (i=0; i<size; i++) {
1147		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1148		if (copyout(&byte, (u_int8_t *)user + i, 1))
1149			return -1;
1150	}
1151
1152	return 0;
1153}
1154
1155static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1156				struct hpt_iop_ioctl_param * pParams)
1157{
1158	u_int32_t req32;
1159	u_int32_t result;
1160
1161	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1162		(pParams->Magic != HPT_IOCTL_MAGIC32))
1163		return EFAULT;
1164
1165	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1166	if (req32 == IOPMU_QUEUE_EMPTY)
1167		return EFAULT;
1168
1169	if (pParams->nInBufferSize)
1170		if (hptiop_bus_space_copyin(hba, req32 +
1171			offsetof(struct hpt_iop_request_ioctl_command, buf),
1172			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
1173			goto invalid;
1174
1175	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1176		goto invalid;
1177
1178	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1179			offsetof(struct hpt_iop_request_ioctl_command,
1180				header.result));
1181
1182	if (result == IOP_RESULT_SUCCESS) {
1183		if (pParams->nOutBufferSize)
1184			if (hptiop_bus_space_copyout(hba, req32 +
1185				offsetof(struct hpt_iop_request_ioctl_command, buf) +
1186					((pParams->nInBufferSize + 3) & ~3),
1187				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1188				goto invalid;
1189
1190		if (pParams->lpBytesReturned) {
1191			if (hptiop_bus_space_copyout(hba, req32 +
1192				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1193				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1194				goto invalid;
1195		}
1196
1197		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1198
1199		return 0;
1200	} else{
1201invalid:
1202		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1203
1204		return EFAULT;
1205	}
1206}
1207
1208static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1209				struct hpt_iop_request_ioctl_command *req,
1210				struct hpt_iop_ioctl_param *pParams)
1211{
1212	u_int64_t req_phy;
1213	int size = 0;
1214
1215	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1216			(hba->max_request_size -
1217			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1218		device_printf(hba->pcidev, "request size beyond max value");
1219		return -1;
1220	}
1221
1222	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1223	req->inbuf_size = pParams->nInBufferSize;
1224	req->outbuf_size = pParams->nOutBufferSize;
1225	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1226					+ pParams->nInBufferSize;
1227	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1228	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1229	req->header.result = IOP_RESULT_PENDING;
1230	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1231	size = req->header.size >> 8;
1232	size = imin(3, size);
1233	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1234	hptiop_mv_inbound_write(req_phy, hba);
1235
1236	BUS_SPACE_RD4_MV0(outbound_intmask);
1237
1238	while (hba->config_done == 0) {
1239		if (hptiop_sleep(hba, req, PPAUSE,
1240			"hptctl", HPT_OSM_TIMEOUT)==0)
1241			continue;
1242		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1243	}
1244	return 0;
1245}
1246
1247static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1248				struct hpt_iop_ioctl_param *pParams)
1249{
1250	struct hpt_iop_request_ioctl_command *req;
1251
1252	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1253		(pParams->Magic != HPT_IOCTL_MAGIC32))
1254		return EFAULT;
1255
1256	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1257	hba->config_done = 0;
1258	hptiop_lock_adapter(hba);
1259	if (pParams->nInBufferSize)
1260		if (copyin((void *)pParams->lpInBuffer,
1261				req->buf, pParams->nInBufferSize))
1262			goto invalid;
1263	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1264		goto invalid;
1265
1266	if (hba->config_done == 1) {
1267		if (pParams->nOutBufferSize)
1268			if (copyout(req->buf +
1269				((pParams->nInBufferSize + 3) & ~3),
1270				(void *)pParams->lpOutBuffer,
1271				pParams->nOutBufferSize))
1272				goto invalid;
1273
1274		if (pParams->lpBytesReturned)
1275			if (copyout(&req->bytes_returned,
1276				(void*)pParams->lpBytesReturned,
1277				sizeof(u_int32_t)))
1278				goto invalid;
1279		hptiop_unlock_adapter(hba);
1280		return 0;
1281	} else{
1282invalid:
1283		hptiop_unlock_adapter(hba);
1284		return EFAULT;
1285	}
1286}
1287
1288static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1289				struct hpt_iop_request_ioctl_command *req,
1290				struct hpt_iop_ioctl_param *pParams)
1291{
1292	u_int64_t phy_addr;
1293	u_int32_t index;
1294
1295	phy_addr = hba->ctlcfgcmd_phy;
1296
1297	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1298			(hba->max_request_size -
1299			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1300		device_printf(hba->pcidev, "request size beyond max value");
1301		return -1;
1302	}
1303
1304	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1305	req->inbuf_size = pParams->nInBufferSize;
1306	req->outbuf_size = pParams->nOutBufferSize;
1307	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1308					+ pParams->nInBufferSize;
1309
1310	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1311	req->header.result = IOP_RESULT_PENDING;
1312
1313	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1314						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1315						| IOP_REQUEST_FLAG_ADDR_BITS
1316						| ((phy_addr >> 16) & 0xffff0000);
1317	req->header.context = ((phy_addr & 0xffffffff) << 32 )
1318						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1319
1320	hba->u.mvfrey.inlist_wptr++;
1321	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1322
1323	if (index == hba->u.mvfrey.list_count) {
1324		index = 0;
1325		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1326		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1327	}
1328
1329	hba->u.mvfrey.inlist[index].addr = phy_addr;
1330	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1331
1332	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1333	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1334
1335	while (hba->config_done == 0) {
1336		if (hptiop_sleep(hba, req, PPAUSE,
1337			"hptctl", HPT_OSM_TIMEOUT)==0)
1338			continue;
1339		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1340	}
1341	return 0;
1342}
1343
1344static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1345				struct hpt_iop_ioctl_param *pParams)
1346{
1347	struct hpt_iop_request_ioctl_command *req;
1348
1349	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1350		(pParams->Magic != HPT_IOCTL_MAGIC32))
1351		return EFAULT;
1352
1353	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1354	hba->config_done = 0;
1355	hptiop_lock_adapter(hba);
1356	if (pParams->nInBufferSize)
1357		if (copyin((void *)pParams->lpInBuffer,
1358				req->buf, pParams->nInBufferSize))
1359			goto invalid;
1360	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1361		goto invalid;
1362
1363	if (hba->config_done == 1) {
1364		if (pParams->nOutBufferSize)
1365			if (copyout(req->buf +
1366				((pParams->nInBufferSize + 3) & ~3),
1367				(void *)pParams->lpOutBuffer,
1368				pParams->nOutBufferSize))
1369				goto invalid;
1370
1371		if (pParams->lpBytesReturned)
1372			if (copyout(&req->bytes_returned,
1373				(void*)pParams->lpBytesReturned,
1374				sizeof(u_int32_t)))
1375				goto invalid;
1376		hptiop_unlock_adapter(hba);
1377		return 0;
1378	} else{
1379invalid:
1380		hptiop_unlock_adapter(hba);
1381		return EFAULT;
1382	}
1383}
1384
1385static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1386{
1387	union ccb           *ccb;
1388
1389	if ((ccb = xpt_alloc_ccb()) == NULL)
1390		return(ENOMEM);
1391	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1392		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1393		xpt_free_ccb(ccb);
1394		return(EIO);
1395	}
1396	xpt_rescan(ccb);
1397	return(0);
1398}
1399
1400static  bus_dmamap_callback_t   hptiop_map_srb;
1401static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1402static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1403static	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1404
1405static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1406{
1407	hba->bar0_rid = 0x10;
1408	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1409			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1410
1411	if (hba->bar0_res == NULL) {
1412		device_printf(hba->pcidev,
1413			"failed to get iop base adrress.\n");
1414		return -1;
1415	}
1416	hba->bar0t = rman_get_bustag(hba->bar0_res);
1417	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1418	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1419				rman_get_virtual(hba->bar0_res);
1420
1421	if (!hba->u.itl.mu) {
1422		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1423					hba->bar0_rid, hba->bar0_res);
1424		device_printf(hba->pcidev, "alloc mem res failed\n");
1425		return -1;
1426	}
1427
1428	return 0;
1429}
1430
1431static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1432{
1433	hba->bar0_rid = 0x10;
1434	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1435			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1436
1437	if (hba->bar0_res == NULL) {
1438		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1439		return -1;
1440	}
1441	hba->bar0t = rman_get_bustag(hba->bar0_res);
1442	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1443	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1444				rman_get_virtual(hba->bar0_res);
1445
1446	if (!hba->u.mv.regs) {
1447		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1448					hba->bar0_rid, hba->bar0_res);
1449		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1450		return -1;
1451	}
1452
1453	hba->bar2_rid = 0x18;
1454	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1455			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1456
1457	if (hba->bar2_res == NULL) {
1458		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1459					hba->bar0_rid, hba->bar0_res);
1460		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1461		return -1;
1462	}
1463
1464	hba->bar2t = rman_get_bustag(hba->bar2_res);
1465	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1466	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1467
1468	if (!hba->u.mv.mu) {
1469		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1470					hba->bar0_rid, hba->bar0_res);
1471		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1472					hba->bar2_rid, hba->bar2_res);
1473		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1474		return -1;
1475	}
1476
1477	return 0;
1478}
1479
1480static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1481{
1482	hba->bar0_rid = 0x10;
1483	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1484			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1485
1486	if (hba->bar0_res == NULL) {
1487		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1488		return -1;
1489	}
1490	hba->bar0t = rman_get_bustag(hba->bar0_res);
1491	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1492	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1493				rman_get_virtual(hba->bar0_res);
1494
1495	if (!hba->u.mvfrey.config) {
1496		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1497					hba->bar0_rid, hba->bar0_res);
1498		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1499		return -1;
1500	}
1501
1502	hba->bar2_rid = 0x18;
1503	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1504			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1505
1506	if (hba->bar2_res == NULL) {
1507		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1508					hba->bar0_rid, hba->bar0_res);
1509		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1510		return -1;
1511	}
1512
1513	hba->bar2t = rman_get_bustag(hba->bar2_res);
1514	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1515	hba->u.mvfrey.mu =
1516					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1517
1518	if (!hba->u.mvfrey.mu) {
1519		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1520					hba->bar0_rid, hba->bar0_res);
1521		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522					hba->bar2_rid, hba->bar2_res);
1523		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1524		return -1;
1525	}
1526
1527	return 0;
1528}
1529
1530static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1531{
1532	if (hba->bar0_res)
1533		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534			hba->bar0_rid, hba->bar0_res);
1535}
1536
1537static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1538{
1539	if (hba->bar0_res)
1540		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1541			hba->bar0_rid, hba->bar0_res);
1542	if (hba->bar2_res)
1543		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1544			hba->bar2_rid, hba->bar2_res);
1545}
1546
1547static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1548{
1549	if (hba->bar0_res)
1550		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1551			hba->bar0_rid, hba->bar0_res);
1552	if (hba->bar2_res)
1553		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1554			hba->bar2_rid, hba->bar2_res);
1555}
1556
1557static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1558{
1559	if (bus_dma_tag_create(hba->parent_dmat,
1560				1,
1561				0,
1562				BUS_SPACE_MAXADDR_32BIT,
1563				BUS_SPACE_MAXADDR,
1564				NULL, NULL,
1565				0x800 - 0x8,
1566				1,
1567				BUS_SPACE_MAXSIZE_32BIT,
1568				BUS_DMA_ALLOCNOW,
1569				NULL,
1570				NULL,
1571				&hba->ctlcfg_dmat)) {
1572		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1573		return -1;
1574	}
1575
1576	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1577		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1578		&hba->ctlcfg_dmamap) != 0) {
1579			device_printf(hba->pcidev,
1580					"bus_dmamem_alloc failed!\n");
1581			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1582			return -1;
1583	}
1584
1585	if (bus_dmamap_load(hba->ctlcfg_dmat,
1586			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1587			MVIOP_IOCTLCFG_SIZE,
1588			hptiop_mv_map_ctlcfg, hba, 0)) {
1589		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1590		if (hba->ctlcfg_dmat) {
1591			bus_dmamem_free(hba->ctlcfg_dmat,
1592				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1593			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1594		}
1595		return -1;
1596	}
1597
1598	return 0;
1599}
1600
1601static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1602{
1603	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1604
1605	list_count >>= 16;
1606
1607	if (list_count == 0) {
1608		return -1;
1609	}
1610
1611	hba->u.mvfrey.list_count = list_count;
1612	hba->u.mvfrey.internal_mem_size = 0x800
1613							+ list_count * sizeof(struct mvfrey_inlist_entry)
1614							+ list_count * sizeof(struct mvfrey_outlist_entry)
1615							+ sizeof(int);
1616	if (bus_dma_tag_create(hba->parent_dmat,
1617				1,
1618				0,
1619				BUS_SPACE_MAXADDR_32BIT,
1620				BUS_SPACE_MAXADDR,
1621				NULL, NULL,
1622				hba->u.mvfrey.internal_mem_size,
1623				1,
1624				BUS_SPACE_MAXSIZE_32BIT,
1625				BUS_DMA_ALLOCNOW,
1626				NULL,
1627				NULL,
1628				&hba->ctlcfg_dmat)) {
1629		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1630		return -1;
1631	}
1632
1633	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1634		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1635		&hba->ctlcfg_dmamap) != 0) {
1636			device_printf(hba->pcidev,
1637					"bus_dmamem_alloc failed!\n");
1638			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1639			return -1;
1640	}
1641
1642	if (bus_dmamap_load(hba->ctlcfg_dmat,
1643			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1644			hba->u.mvfrey.internal_mem_size,
1645			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1646		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1647		if (hba->ctlcfg_dmat) {
1648			bus_dmamem_free(hba->ctlcfg_dmat,
1649				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1650			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1651		}
1652		return -1;
1653	}
1654
1655	return 0;
1656}
1657
1658static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1659	return 0;
1660}
1661
1662static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1663{
1664	if (hba->ctlcfg_dmat) {
1665		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1666		bus_dmamem_free(hba->ctlcfg_dmat,
1667					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1668		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1669	}
1670
1671	return 0;
1672}
1673
1674static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1675{
1676	if (hba->ctlcfg_dmat) {
1677		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1678		bus_dmamem_free(hba->ctlcfg_dmat,
1679					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1680		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1681	}
1682
1683	return 0;
1684}
1685
1686static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1687{
1688	u_int32_t i = 100;
1689
1690	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1691		return -1;
1692
1693	/* wait 100ms for MCU ready */
1694	while(i--) {
1695		DELAY(1000);
1696	}
1697
1698	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1699							hba->u.mvfrey.inlist_phy & 0xffffffff);
1700	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1701							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1702
1703	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1704							hba->u.mvfrey.outlist_phy & 0xffffffff);
1705	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1706							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1707
1708	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1709							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1710	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1711							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1712
1713	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1714								| CL_POINTER_TOGGLE;
1715	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1716								| CL_POINTER_TOGGLE;
1717	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1718
1719	return 0;
1720}
1721
1722/*
1723 * CAM driver interface
1724 */
1725static device_method_t driver_methods[] = {
1726	/* Device interface */
1727	DEVMETHOD(device_probe,     hptiop_probe),
1728	DEVMETHOD(device_attach,    hptiop_attach),
1729	DEVMETHOD(device_detach,    hptiop_detach),
1730	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1731	{ 0, 0 }
1732};
1733
1734static struct hptiop_adapter_ops hptiop_itl_ops = {
1735	.family	           = INTEL_BASED_IOP,
1736	.iop_wait_ready    = hptiop_wait_ready_itl,
1737	.internal_memalloc = 0,
1738	.internal_memfree  = hptiop_internal_memfree_itl,
1739	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1740	.release_pci_res   = hptiop_release_pci_res_itl,
1741	.enable_intr       = hptiop_enable_intr_itl,
1742	.disable_intr      = hptiop_disable_intr_itl,
1743	.get_config        = hptiop_get_config_itl,
1744	.set_config        = hptiop_set_config_itl,
1745	.iop_intr          = hptiop_intr_itl,
1746	.post_msg          = hptiop_post_msg_itl,
1747	.post_req          = hptiop_post_req_itl,
1748	.do_ioctl          = hptiop_do_ioctl_itl,
1749	.reset_comm        = 0,
1750};
1751
1752static struct hptiop_adapter_ops hptiop_mv_ops = {
1753	.family	           = MV_BASED_IOP,
1754	.iop_wait_ready    = hptiop_wait_ready_mv,
1755	.internal_memalloc = hptiop_internal_memalloc_mv,
1756	.internal_memfree  = hptiop_internal_memfree_mv,
1757	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1758	.release_pci_res   = hptiop_release_pci_res_mv,
1759	.enable_intr       = hptiop_enable_intr_mv,
1760	.disable_intr      = hptiop_disable_intr_mv,
1761	.get_config        = hptiop_get_config_mv,
1762	.set_config        = hptiop_set_config_mv,
1763	.iop_intr          = hptiop_intr_mv,
1764	.post_msg          = hptiop_post_msg_mv,
1765	.post_req          = hptiop_post_req_mv,
1766	.do_ioctl          = hptiop_do_ioctl_mv,
1767	.reset_comm        = 0,
1768};
1769
1770static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1771	.family	           = MVFREY_BASED_IOP,
1772	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1773	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1774	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1775	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1776	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1777	.enable_intr       = hptiop_enable_intr_mvfrey,
1778	.disable_intr      = hptiop_disable_intr_mvfrey,
1779	.get_config        = hptiop_get_config_mvfrey,
1780	.set_config        = hptiop_set_config_mvfrey,
1781	.iop_intr          = hptiop_intr_mvfrey,
1782	.post_msg          = hptiop_post_msg_mvfrey,
1783	.post_req          = hptiop_post_req_mvfrey,
1784	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1785	.reset_comm        = hptiop_reset_comm_mvfrey,
1786};
1787
1788static driver_t hptiop_pci_driver = {
1789	driver_name,
1790	driver_methods,
1791	sizeof(struct hpt_iop_hba)
1792};
1793
1794DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, 0, 0);
1795MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1796
1797static int hptiop_probe(device_t dev)
1798{
1799	struct hpt_iop_hba *hba;
1800	u_int32_t id;
1801	static char buf[256];
1802	int sas = 0;
1803	struct hptiop_adapter_ops *ops;
1804
1805	if (pci_get_vendor(dev) != 0x1103)
1806		return (ENXIO);
1807
1808	id = pci_get_device(dev);
1809
1810	switch (id) {
1811		case 0x4520:
1812		case 0x4521:
1813		case 0x4522:
1814			sas = 1;
1815		case 0x3620:
1816		case 0x3622:
1817		case 0x3640:
1818			ops = &hptiop_mvfrey_ops;
1819			break;
1820		case 0x4210:
1821		case 0x4211:
1822		case 0x4310:
1823		case 0x4311:
1824		case 0x4320:
1825		case 0x4321:
1826 		case 0x4322:
1827			sas = 1;
1828		case 0x3220:
1829		case 0x3320:
1830		case 0x3410:
1831		case 0x3520:
1832		case 0x3510:
1833		case 0x3511:
1834		case 0x3521:
1835		case 0x3522:
1836		case 0x3530:
1837		case 0x3540:
1838		case 0x3560:
1839			ops = &hptiop_itl_ops;
1840			break;
1841		case 0x3020:
1842		case 0x3120:
1843		case 0x3122:
1844			ops = &hptiop_mv_ops;
1845			break;
1846		default:
1847			return (ENXIO);
1848	}
1849
1850	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1851		pci_get_bus(dev), pci_get_slot(dev),
1852		pci_get_function(dev), pci_get_irq(dev));
1853
1854	sprintf(buf, "RocketRAID %x %s Controller\n",
1855				id, sas ? "SAS" : "SATA");
1856	device_set_desc_copy(dev, buf);
1857
1858	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1859	bzero(hba, sizeof(struct hpt_iop_hba));
1860	hba->ops = ops;
1861
1862	KdPrint(("hba->ops=%p\n", hba->ops));
1863	return 0;
1864}
1865
1866static int hptiop_attach(device_t dev)
1867{
1868	struct make_dev_args args;
1869	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1870	struct hpt_iop_request_get_config  iop_config;
1871	struct hpt_iop_request_set_config  set_config;
1872	int rid = 0;
1873	struct cam_devq *devq;
1874	struct ccb_setasync ccb;
1875	u_int32_t unit = device_get_unit(dev);
1876
1877	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1878			unit, driver_version);
1879
1880	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1881		pci_get_bus(dev), pci_get_slot(dev),
1882		pci_get_function(dev), hba->ops));
1883
1884	pci_enable_busmaster(dev);
1885	hba->pcidev = dev;
1886	hba->pciunit = unit;
1887
1888	if (hba->ops->alloc_pci_res(hba))
1889		return ENXIO;
1890
1891	if (hba->ops->iop_wait_ready(hba, 2000)) {
1892		device_printf(dev, "adapter is not ready\n");
1893		goto release_pci_res;
1894	}
1895
1896	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1897
1898	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1899			1,  /* alignment */
1900			0, /* boundary */
1901			BUS_SPACE_MAXADDR,  /* lowaddr */
1902			BUS_SPACE_MAXADDR,  /* highaddr */
1903			NULL, NULL,         /* filter, filterarg */
1904			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1905			BUS_SPACE_UNRESTRICTED, /* nsegments */
1906			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1907			0,      /* flags */
1908			NULL,   /* lockfunc */
1909			NULL,       /* lockfuncarg */
1910			&hba->parent_dmat   /* tag */))
1911	{
1912		device_printf(dev, "alloc parent_dmat failed\n");
1913		goto release_pci_res;
1914	}
1915
1916	if (hba->ops->family == MV_BASED_IOP) {
1917		if (hba->ops->internal_memalloc(hba)) {
1918			device_printf(dev, "alloc srb_dmat failed\n");
1919			goto destroy_parent_tag;
1920		}
1921	}
1922
1923	if (hba->ops->get_config(hba, &iop_config)) {
1924		device_printf(dev, "get iop config failed.\n");
1925		goto get_config_failed;
1926	}
1927
1928	hba->firmware_version = iop_config.firmware_version;
1929	hba->interface_version = iop_config.interface_version;
1930	hba->max_requests = iop_config.max_requests;
1931	hba->max_devices = iop_config.max_devices;
1932	hba->max_request_size = iop_config.request_size;
1933	hba->max_sg_count = iop_config.max_sg_count;
1934
1935	if (hba->ops->family == MVFREY_BASED_IOP) {
1936		if (hba->ops->internal_memalloc(hba)) {
1937			device_printf(dev, "alloc srb_dmat failed\n");
1938			goto destroy_parent_tag;
1939		}
1940		if (hba->ops->reset_comm(hba)) {
1941			device_printf(dev, "reset comm failed\n");
1942			goto get_config_failed;
1943		}
1944	}
1945
1946	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1947			4,  /* alignment */
1948			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1949			BUS_SPACE_MAXADDR,  /* lowaddr */
1950			BUS_SPACE_MAXADDR,  /* highaddr */
1951			NULL, NULL,         /* filter, filterarg */
1952			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1953			hba->max_sg_count,  /* nsegments */
1954			0x20000,    /* maxsegsize */
1955			BUS_DMA_ALLOCNOW,       /* flags */
1956			busdma_lock_mutex,  /* lockfunc */
1957			&hba->lock,     /* lockfuncarg */
1958			&hba->io_dmat   /* tag */))
1959	{
1960		device_printf(dev, "alloc io_dmat failed\n");
1961		goto get_config_failed;
1962	}
1963
1964	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1965			1,  /* alignment */
1966			0, /* boundary */
1967			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1968			BUS_SPACE_MAXADDR,  /* highaddr */
1969			NULL, NULL,         /* filter, filterarg */
1970			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1971			1,  /* nsegments */
1972			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1973			0,      /* flags */
1974			NULL,   /* lockfunc */
1975			NULL,       /* lockfuncarg */
1976			&hba->srb_dmat  /* tag */))
1977	{
1978		device_printf(dev, "alloc srb_dmat failed\n");
1979		goto destroy_io_dmat;
1980	}
1981
1982	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1983			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1984			&hba->srb_dmamap) != 0)
1985	{
1986		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1987		goto destroy_srb_dmat;
1988	}
1989
1990	if (bus_dmamap_load(hba->srb_dmat,
1991			hba->srb_dmamap, hba->uncached_ptr,
1992			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1993			hptiop_map_srb, hba, 0))
1994	{
1995		device_printf(dev, "bus_dmamap_load failed!\n");
1996		goto srb_dmamem_free;
1997	}
1998
1999	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2000		device_printf(dev, "cam_simq_alloc failed\n");
2001		goto srb_dmamap_unload;
2002	}
2003
2004	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2005			hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2006	if (!hba->sim) {
2007		device_printf(dev, "cam_sim_alloc failed\n");
2008		cam_simq_free(devq);
2009		goto srb_dmamap_unload;
2010	}
2011	hptiop_lock_adapter(hba);
2012	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2013	{
2014		device_printf(dev, "xpt_bus_register failed\n");
2015		goto free_cam_sim;
2016	}
2017
2018	if (xpt_create_path(&hba->path, /*periph */ NULL,
2019			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2020			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2021		device_printf(dev, "xpt_create_path failed\n");
2022		goto deregister_xpt_bus;
2023	}
2024	hptiop_unlock_adapter(hba);
2025
2026	bzero(&set_config, sizeof(set_config));
2027	set_config.iop_id = unit;
2028	set_config.vbus_id = cam_sim_path(hba->sim);
2029	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2030
2031	if (hba->ops->set_config(hba, &set_config)) {
2032		device_printf(dev, "set iop config failed.\n");
2033		goto free_hba_path;
2034	}
2035
2036	memset(&ccb, 0, sizeof(ccb));
2037	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2038	ccb.ccb_h.func_code = XPT_SASYNC_CB;
2039	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2040	ccb.callback = hptiop_async;
2041	ccb.callback_arg = hba->sim;
2042	xpt_action((union ccb *)&ccb);
2043
2044	rid = 0;
2045	if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2046			&rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2047		device_printf(dev, "allocate irq failed!\n");
2048		goto free_hba_path;
2049	}
2050
2051	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2052				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2053	{
2054		device_printf(dev, "allocate intr function failed!\n");
2055		goto free_irq_resource;
2056	}
2057
2058	if (hptiop_send_sync_msg(hba,
2059			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2060		device_printf(dev, "fail to start background task\n");
2061		goto teartown_irq_resource;
2062	}
2063
2064	hba->ops->enable_intr(hba);
2065	hba->initialized = 1;
2066
2067	make_dev_args_init(&args);
2068	args.mda_devsw = &hptiop_cdevsw;
2069	args.mda_uid = UID_ROOT;
2070	args.mda_gid = GID_WHEEL /*GID_OPERATOR*/;
2071	args.mda_mode = S_IRUSR | S_IWUSR;
2072	args.mda_si_drv1 = hba;
2073
2074	make_dev_s(&args, &hba->ioctl_dev, "%s%d", driver_name, unit);
2075
2076	return 0;
2077
2078
2079teartown_irq_resource:
2080	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2081
2082free_irq_resource:
2083	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2084
2085	hptiop_lock_adapter(hba);
2086free_hba_path:
2087	xpt_free_path(hba->path);
2088
2089deregister_xpt_bus:
2090	xpt_bus_deregister(cam_sim_path(hba->sim));
2091
2092free_cam_sim:
2093	cam_sim_free(hba->sim, /*free devq*/ TRUE);
2094	hptiop_unlock_adapter(hba);
2095
2096srb_dmamap_unload:
2097	if (hba->uncached_ptr)
2098		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2099
2100srb_dmamem_free:
2101	if (hba->uncached_ptr)
2102		bus_dmamem_free(hba->srb_dmat,
2103			hba->uncached_ptr, hba->srb_dmamap);
2104
2105destroy_srb_dmat:
2106	if (hba->srb_dmat)
2107		bus_dma_tag_destroy(hba->srb_dmat);
2108
2109destroy_io_dmat:
2110	if (hba->io_dmat)
2111		bus_dma_tag_destroy(hba->io_dmat);
2112
2113get_config_failed:
2114	hba->ops->internal_memfree(hba);
2115
2116destroy_parent_tag:
2117	if (hba->parent_dmat)
2118		bus_dma_tag_destroy(hba->parent_dmat);
2119
2120release_pci_res:
2121	if (hba->ops->release_pci_res)
2122		hba->ops->release_pci_res(hba);
2123
2124	return ENXIO;
2125}
2126
2127static int hptiop_detach(device_t dev)
2128{
2129	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2130	int i;
2131	int error = EBUSY;
2132
2133	hptiop_lock_adapter(hba);
2134	for (i = 0; i < hba->max_devices; i++)
2135		if (hptiop_os_query_remove_device(hba, i)) {
2136			device_printf(dev, "%d file system is busy. id=%d",
2137						hba->pciunit, i);
2138			goto out;
2139		}
2140
2141	if ((error = hptiop_shutdown(dev)) != 0)
2142		goto out;
2143	if (hptiop_send_sync_msg(hba,
2144		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2145		goto out;
2146	hptiop_unlock_adapter(hba);
2147
2148	hptiop_release_resource(hba);
2149	return (0);
2150out:
2151	hptiop_unlock_adapter(hba);
2152	return error;
2153}
2154
2155static int hptiop_shutdown(device_t dev)
2156{
2157	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2158
2159	int error = 0;
2160
2161	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2162		device_printf(dev, "%d device is busy", hba->pciunit);
2163		return EBUSY;
2164	}
2165
2166	hba->ops->disable_intr(hba);
2167
2168	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2169		error = EBUSY;
2170
2171	return error;
2172}
2173
2174static void hptiop_pci_intr(void *arg)
2175{
2176	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2177	hptiop_lock_adapter(hba);
2178	hba->ops->iop_intr(hba);
2179	hptiop_unlock_adapter(hba);
2180}
2181
2182static void hptiop_poll(struct cam_sim *sim)
2183{
2184	struct hpt_iop_hba *hba;
2185
2186	hba = cam_sim_softc(sim);
2187	hba->ops->iop_intr(hba);
2188}
2189
2190static void hptiop_async(void * callback_arg, u_int32_t code,
2191					struct cam_path * path, void * arg)
2192{
2193}
2194
2195static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2196{
2197	BUS_SPACE_WRT4_ITL(outbound_intmask,
2198		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2199}
2200
2201static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2202{
2203	u_int32_t int_mask;
2204
2205	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2206
2207	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2208			| MVIOP_MU_OUTBOUND_INT_MSG;
2209    	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2210}
2211
2212static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2213{
2214	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2215	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2216
2217	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2218	BUS_SPACE_RD4_MVFREY2(isr_enable);
2219
2220	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2221	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2222}
2223
2224static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2225{
2226	u_int32_t int_mask;
2227
2228	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2229
2230	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2231	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2232	BUS_SPACE_RD4_ITL(outbound_intstatus);
2233}
2234
2235static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2236{
2237	u_int32_t int_mask;
2238	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2239
2240	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2241			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2242	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2243	BUS_SPACE_RD4_MV0(outbound_intmask);
2244}
2245
2246static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2247{
2248	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2249	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2250
2251	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2252	BUS_SPACE_RD4_MVFREY2(isr_enable);
2253
2254	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2255	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2256}
2257
2258static void hptiop_reset_adapter(void *argv)
2259{
2260	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2261	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2262		return;
2263	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2264}
2265
2266static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2267{
2268	struct hpt_iop_srb * srb;
2269
2270	if (hba->srb_list) {
2271		srb = hba->srb_list;
2272		hba->srb_list = srb->next;
2273		return srb;
2274	}
2275
2276	return NULL;
2277}
2278
2279static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2280{
2281	srb->next = hba->srb_list;
2282	hba->srb_list = srb;
2283}
2284
2285static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2286{
2287	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2288	struct hpt_iop_srb * srb;
2289	int error;
2290
2291	switch (ccb->ccb_h.func_code) {
2292
2293	case XPT_SCSI_IO:
2294		if (ccb->ccb_h.target_lun != 0 ||
2295			ccb->ccb_h.target_id >= hba->max_devices ||
2296			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2297		{
2298			ccb->ccb_h.status = CAM_TID_INVALID;
2299			xpt_done(ccb);
2300			return;
2301		}
2302
2303		if ((srb = hptiop_get_srb(hba)) == NULL) {
2304			device_printf(hba->pcidev, "srb allocated failed");
2305			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2306			xpt_done(ccb);
2307			return;
2308		}
2309
2310		srb->ccb = ccb;
2311		error = bus_dmamap_load_ccb(hba->io_dmat,
2312					    srb->dma_map,
2313					    ccb,
2314					    hptiop_post_scsi_command,
2315					    srb,
2316					    0);
2317
2318		if (error && error != EINPROGRESS) {
2319			device_printf(hba->pcidev,
2320				"%d bus_dmamap_load error %d",
2321				hba->pciunit, error);
2322			xpt_freeze_simq(hba->sim, 1);
2323			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2324			hptiop_free_srb(hba, srb);
2325			xpt_done(ccb);
2326			return;
2327		}
2328
2329		return;
2330
2331	case XPT_RESET_BUS:
2332		device_printf(hba->pcidev, "reset adapter");
2333		hba->msg_done = 0;
2334		hptiop_reset_adapter(hba);
2335		break;
2336
2337	case XPT_GET_TRAN_SETTINGS:
2338	case XPT_SET_TRAN_SETTINGS:
2339		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2340		break;
2341
2342	case XPT_CALC_GEOMETRY:
2343		cam_calc_geometry(&ccb->ccg, 1);
2344		break;
2345
2346	case XPT_PATH_INQ:
2347	{
2348		struct ccb_pathinq *cpi = &ccb->cpi;
2349
2350		cpi->version_num = 1;
2351		cpi->hba_inquiry = PI_SDTR_ABLE;
2352		cpi->target_sprt = 0;
2353		cpi->hba_misc = PIM_NOBUSRESET;
2354		cpi->hba_eng_cnt = 0;
2355		cpi->max_target = hba->max_devices;
2356		cpi->max_lun = 0;
2357		cpi->unit_number = cam_sim_unit(sim);
2358		cpi->bus_id = cam_sim_bus(sim);
2359		cpi->initiator_id = hba->max_devices;
2360		cpi->base_transfer_speed = 3300;
2361
2362		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2363		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2364		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2365		cpi->transport = XPORT_SPI;
2366		cpi->transport_version = 2;
2367		cpi->protocol = PROTO_SCSI;
2368		cpi->protocol_version = SCSI_REV_2;
2369		cpi->ccb_h.status = CAM_REQ_CMP;
2370		break;
2371	}
2372
2373	default:
2374		ccb->ccb_h.status = CAM_REQ_INVALID;
2375		break;
2376	}
2377
2378	xpt_done(ccb);
2379	return;
2380}
2381
2382static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2383				struct hpt_iop_srb *srb,
2384				bus_dma_segment_t *segs, int nsegs)
2385{
2386	int idx;
2387	union ccb *ccb = srb->ccb;
2388	u_int8_t *cdb;
2389
2390	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2391		cdb = ccb->csio.cdb_io.cdb_ptr;
2392	else
2393		cdb = ccb->csio.cdb_io.cdb_bytes;
2394
2395	KdPrint(("ccb=%p %x-%x-%x\n",
2396		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2397
2398	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2399		u_int32_t iop_req32;
2400		struct hpt_iop_request_scsi_command req;
2401
2402		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2403
2404		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2405			device_printf(hba->pcidev, "invalid req offset\n");
2406			ccb->ccb_h.status = CAM_BUSY;
2407			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2408			hptiop_free_srb(hba, srb);
2409			xpt_done(ccb);
2410			return;
2411		}
2412
2413		if (ccb->csio.dxfer_len && nsegs > 0) {
2414			struct hpt_iopsg *psg = req.sg_list;
2415			for (idx = 0; idx < nsegs; idx++, psg++) {
2416				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2417				psg->size = segs[idx].ds_len;
2418				psg->eot = 0;
2419			}
2420			psg[-1].eot = 1;
2421		}
2422
2423		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2424
2425		req.header.size =
2426				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2427				+ nsegs*sizeof(struct hpt_iopsg);
2428		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2429		req.header.flags = 0;
2430		req.header.result = IOP_RESULT_PENDING;
2431		req.header.context = (u_int64_t)(unsigned long)srb;
2432		req.dataxfer_length = ccb->csio.dxfer_len;
2433		req.channel =  0;
2434		req.target =  ccb->ccb_h.target_id;
2435		req.lun =  ccb->ccb_h.target_lun;
2436
2437		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2438			(u_int8_t *)&req, req.header.size);
2439
2440		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2441			bus_dmamap_sync(hba->io_dmat,
2442				srb->dma_map, BUS_DMASYNC_PREREAD);
2443		}
2444		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2445			bus_dmamap_sync(hba->io_dmat,
2446				srb->dma_map, BUS_DMASYNC_PREWRITE);
2447
2448		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2449	} else {
2450		struct hpt_iop_request_scsi_command *req;
2451
2452		req = (struct hpt_iop_request_scsi_command *)srb;
2453		if (ccb->csio.dxfer_len && nsegs > 0) {
2454			struct hpt_iopsg *psg = req->sg_list;
2455			for (idx = 0; idx < nsegs; idx++, psg++) {
2456				psg->pci_address =
2457					(u_int64_t)segs[idx].ds_addr;
2458				psg->size = segs[idx].ds_len;
2459				psg->eot = 0;
2460			}
2461			psg[-1].eot = 1;
2462		}
2463
2464		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2465
2466		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2467		req->header.result = IOP_RESULT_PENDING;
2468		req->dataxfer_length = ccb->csio.dxfer_len;
2469		req->channel =  0;
2470		req->target =  ccb->ccb_h.target_id;
2471		req->lun =  ccb->ccb_h.target_lun;
2472		req->header.size =
2473			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2474			+ nsegs*sizeof(struct hpt_iopsg);
2475		req->header.context = (u_int64_t)srb->index |
2476						IOPMU_QUEUE_ADDR_HOST_BIT;
2477		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2478
2479		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2480			bus_dmamap_sync(hba->io_dmat,
2481				srb->dma_map, BUS_DMASYNC_PREREAD);
2482		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2483			bus_dmamap_sync(hba->io_dmat,
2484				srb->dma_map, BUS_DMASYNC_PREWRITE);
2485		}
2486
2487		if (hba->firmware_version > 0x01020000
2488			|| hba->interface_version > 0x01020000) {
2489			u_int32_t size_bits;
2490
2491			if (req->header.size < 256)
2492				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2493			else if (req->header.size < 512)
2494				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2495			else
2496				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2497						| IOPMU_QUEUE_ADDR_HOST_BIT;
2498
2499			BUS_SPACE_WRT4_ITL(inbound_queue,
2500				(u_int32_t)srb->phy_addr | size_bits);
2501		} else
2502			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2503				|IOPMU_QUEUE_ADDR_HOST_BIT);
2504	}
2505}
2506
2507static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2508				struct hpt_iop_srb *srb,
2509				bus_dma_segment_t *segs, int nsegs)
2510{
2511	int idx, size;
2512	union ccb *ccb = srb->ccb;
2513	u_int8_t *cdb;
2514	struct hpt_iop_request_scsi_command *req;
2515	u_int64_t req_phy;
2516
2517    	req = (struct hpt_iop_request_scsi_command *)srb;
2518	req_phy = srb->phy_addr;
2519
2520	if (ccb->csio.dxfer_len && nsegs > 0) {
2521		struct hpt_iopsg *psg = req->sg_list;
2522		for (idx = 0; idx < nsegs; idx++, psg++) {
2523			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2524			psg->size = segs[idx].ds_len;
2525			psg->eot = 0;
2526		}
2527		psg[-1].eot = 1;
2528	}
2529	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2530		cdb = ccb->csio.cdb_io.cdb_ptr;
2531	else
2532		cdb = ccb->csio.cdb_io.cdb_bytes;
2533
2534	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2535	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2536	req->header.result = IOP_RESULT_PENDING;
2537	req->dataxfer_length = ccb->csio.dxfer_len;
2538	req->channel = 0;
2539	req->target =  ccb->ccb_h.target_id;
2540	req->lun =  ccb->ccb_h.target_lun;
2541	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2542				- sizeof(struct hpt_iopsg)
2543				+ nsegs * sizeof(struct hpt_iopsg);
2544	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2545		bus_dmamap_sync(hba->io_dmat,
2546			srb->dma_map, BUS_DMASYNC_PREREAD);
2547	}
2548	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2549		bus_dmamap_sync(hba->io_dmat,
2550			srb->dma_map, BUS_DMASYNC_PREWRITE);
2551	req->header.context = (u_int64_t)srb->index
2552					<< MVIOP_REQUEST_NUMBER_START_BIT
2553					| MVIOP_CMD_TYPE_SCSI;
2554	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2555	size = req->header.size >> 8;
2556	hptiop_mv_inbound_write(req_phy
2557			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2558			| imin(3, size), hba);
2559}
2560
2561static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2562				struct hpt_iop_srb *srb,
2563				bus_dma_segment_t *segs, int nsegs)
2564{
2565	int idx, index;
2566	union ccb *ccb = srb->ccb;
2567	u_int8_t *cdb;
2568	struct hpt_iop_request_scsi_command *req;
2569	u_int64_t req_phy;
2570
2571	req = (struct hpt_iop_request_scsi_command *)srb;
2572	req_phy = srb->phy_addr;
2573
2574	if (ccb->csio.dxfer_len && nsegs > 0) {
2575		struct hpt_iopsg *psg = req->sg_list;
2576		for (idx = 0; idx < nsegs; idx++, psg++) {
2577			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2578			psg->size = segs[idx].ds_len;
2579			psg->eot = 0;
2580		}
2581		psg[-1].eot = 1;
2582	}
2583	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2584		cdb = ccb->csio.cdb_io.cdb_ptr;
2585	else
2586		cdb = ccb->csio.cdb_io.cdb_bytes;
2587
2588	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2589	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2590	req->header.result = IOP_RESULT_PENDING;
2591	req->dataxfer_length = ccb->csio.dxfer_len;
2592	req->channel = 0;
2593	req->target = ccb->ccb_h.target_id;
2594	req->lun = ccb->ccb_h.target_lun;
2595	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2596				- sizeof(struct hpt_iopsg)
2597				+ nsegs * sizeof(struct hpt_iopsg);
2598	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2599		bus_dmamap_sync(hba->io_dmat,
2600			srb->dma_map, BUS_DMASYNC_PREREAD);
2601	}
2602	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2603		bus_dmamap_sync(hba->io_dmat,
2604			srb->dma_map, BUS_DMASYNC_PREWRITE);
2605
2606	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2607						| IOP_REQUEST_FLAG_ADDR_BITS
2608						| ((req_phy >> 16) & 0xffff0000);
2609	req->header.context = ((req_phy & 0xffffffff) << 32 )
2610						| srb->index << 4
2611						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2612
2613	hba->u.mvfrey.inlist_wptr++;
2614	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2615
2616	if (index == hba->u.mvfrey.list_count) {
2617		index = 0;
2618		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2619		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2620	}
2621
2622	hba->u.mvfrey.inlist[index].addr = req_phy;
2623	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2624
2625	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2626	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2627
2628	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2629		callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2630	}
2631}
2632
2633static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2634					int nsegs, int error)
2635{
2636	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2637	union ccb *ccb = srb->ccb;
2638	struct hpt_iop_hba *hba = srb->hba;
2639
2640	if (error || nsegs > hba->max_sg_count) {
2641		KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2642			ccb->ccb_h.func_code,
2643			ccb->ccb_h.target_id,
2644			(uintmax_t)ccb->ccb_h.target_lun, nsegs));
2645		ccb->ccb_h.status = CAM_BUSY;
2646		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2647		hptiop_free_srb(hba, srb);
2648		xpt_done(ccb);
2649		return;
2650	}
2651
2652	hba->ops->post_req(hba, srb, segs, nsegs);
2653}
2654
2655static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2656				int nsegs, int error)
2657{
2658	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2659	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2660				& ~(u_int64_t)0x1F;
2661	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2662				& ~0x1F);
2663}
2664
2665static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2666				int nsegs, int error)
2667{
2668	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2669	char *p;
2670	u_int64_t phy;
2671	u_int32_t list_count = hba->u.mvfrey.list_count;
2672
2673	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2674				& ~(u_int64_t)0x1F;
2675	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2676				& ~0x1F);
2677
2678	hba->ctlcfgcmd_phy = phy;
2679	hba->ctlcfg_ptr = p;
2680
2681	p += 0x800;
2682	phy += 0x800;
2683
2684	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2685	hba->u.mvfrey.inlist_phy = phy;
2686
2687	p += list_count * sizeof(struct mvfrey_inlist_entry);
2688	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2689
2690	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2691	hba->u.mvfrey.outlist_phy = phy;
2692
2693	p += list_count * sizeof(struct mvfrey_outlist_entry);
2694	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2695
2696	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2697	hba->u.mvfrey.outlist_cptr_phy = phy;
2698}
2699
2700static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2701				int nsegs, int error)
2702{
2703	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2704	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2705	struct hpt_iop_srb *srb, *tmp_srb;
2706	int i;
2707
2708	if (error || nsegs == 0) {
2709		device_printf(hba->pcidev, "hptiop_map_srb error");
2710		return;
2711	}
2712
2713	/* map srb */
2714	srb = (struct hpt_iop_srb *)
2715		(((unsigned long)hba->uncached_ptr + 0x1F)
2716		& ~(unsigned long)0x1F);
2717
2718	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2719		tmp_srb = (struct hpt_iop_srb *)
2720					((char *)srb + i * HPT_SRB_MAX_SIZE);
2721		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2722			if (bus_dmamap_create(hba->io_dmat,
2723						0, &tmp_srb->dma_map)) {
2724				device_printf(hba->pcidev, "dmamap create failed");
2725				return;
2726			}
2727
2728			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2729			tmp_srb->hba = hba;
2730			tmp_srb->index = i;
2731			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2732				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2733							(phy_addr >> 5);
2734				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2735					tmp_srb->srb_flag =
2736						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2737			} else {
2738				tmp_srb->phy_addr = phy_addr;
2739			}
2740
2741			callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2742			hptiop_free_srb(hba, tmp_srb);
2743			hba->srb[i] = tmp_srb;
2744			phy_addr += HPT_SRB_MAX_SIZE;
2745		}
2746		else {
2747			device_printf(hba->pcidev, "invalid alignment");
2748			return;
2749		}
2750	}
2751}
2752
2753static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2754{
2755	hba->msg_done = 1;
2756}
2757
2758static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2759						int target_id)
2760{
2761	struct cam_periph       *periph = NULL;
2762	struct cam_path         *path;
2763	int                     status, retval = 0;
2764
2765	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2766
2767	if (status == CAM_REQ_CMP) {
2768		if ((periph = cam_periph_find(path, "da")) != NULL) {
2769			if (periph->refcount >= 1) {
2770				device_printf(hba->pcidev, "%d ,"
2771					"target_id=0x%x,"
2772					"refcount=%d",
2773				    hba->pciunit, target_id, periph->refcount);
2774				retval = -1;
2775			}
2776		}
2777		xpt_free_path(path);
2778	}
2779	return retval;
2780}
2781
2782static void hptiop_release_resource(struct hpt_iop_hba *hba)
2783{
2784	int i;
2785
2786	if (hba->ioctl_dev)
2787		destroy_dev(hba->ioctl_dev);
2788
2789	if (hba->path) {
2790		struct ccb_setasync ccb;
2791
2792		memset(&ccb, 0, sizeof(ccb));
2793		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2794		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2795		ccb.event_enable = 0;
2796		ccb.callback = hptiop_async;
2797		ccb.callback_arg = hba->sim;
2798		xpt_action((union ccb *)&ccb);
2799		xpt_free_path(hba->path);
2800	}
2801
2802	if (hba->irq_handle)
2803		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2804
2805	if (hba->sim) {
2806		hptiop_lock_adapter(hba);
2807		xpt_bus_deregister(cam_sim_path(hba->sim));
2808		cam_sim_free(hba->sim, TRUE);
2809		hptiop_unlock_adapter(hba);
2810	}
2811
2812	if (hba->ctlcfg_dmat) {
2813		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2814		bus_dmamem_free(hba->ctlcfg_dmat,
2815					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2816		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2817	}
2818
2819	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2820		struct hpt_iop_srb *srb = hba->srb[i];
2821		if (srb->dma_map)
2822			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2823		callout_drain(&srb->timeout);
2824	}
2825
2826	if (hba->srb_dmat) {
2827		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2828		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2829		bus_dma_tag_destroy(hba->srb_dmat);
2830	}
2831
2832	if (hba->io_dmat)
2833		bus_dma_tag_destroy(hba->io_dmat);
2834
2835	if (hba->parent_dmat)
2836		bus_dma_tag_destroy(hba->parent_dmat);
2837
2838	if (hba->irq_res)
2839		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2840					0, hba->irq_res);
2841
2842	if (hba->bar0_res)
2843		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2844					hba->bar0_rid, hba->bar0_res);
2845	if (hba->bar2_res)
2846		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2847					hba->bar2_rid, hba->bar2_res);
2848	mtx_destroy(&hba->lock);
2849}
2850