11558Srgrimes/*
292058Sobrien * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
392058Sobrien * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
492058Sobrien *
51558Srgrimes * Redistribution and use in source and binary forms, with or without
61558Srgrimes * modification, are permitted provided that the following conditions
71558Srgrimes * are met:
81558Srgrimes * 1. Redistributions of source code must retain the above copyright
91558Srgrimes *    notice, this list of conditions and the following disclaimer.
101558Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111558Srgrimes *    notice, this list of conditions and the following disclaimer in the
121558Srgrimes *    documentation and/or other materials provided with the distribution.
131558Srgrimes *
141558Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
151558Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161558Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171558Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
181558Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191558Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201558Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211558Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221558Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2392058Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241558Srgrimes * SUCH DAMAGE.
251558Srgrimes */
261558Srgrimes
271558Srgrimes#include <sys/cdefs.h>
281558Srgrimes__FBSDID("$FreeBSD: stable/10/sys/dev/hptiop/hptiop.c 315813 2017-03-23 06:41:13Z mav $");
291558Srgrimes
301558Srgrimes#include <sys/param.h>
311558Srgrimes#include <sys/types.h>
321558Srgrimes#include <sys/cons.h>
331558Srgrimes#include <sys/time.h>
341558Srgrimes#include <sys/systm.h>
351558Srgrimes
361558Srgrimes#include <sys/stat.h>
371558Srgrimes#include <sys/malloc.h>
381558Srgrimes#include <sys/conf.h>
3992058Sobrien#include <sys/libkern.h>
4092058Sobrien#include <sys/kernel.h>
411558Srgrimes
421558Srgrimes#include <sys/kthread.h>
43115449Sobrien#include <sys/mutex.h>
441558Srgrimes#include <sys/module.h>
4536632Scharnier
461558Srgrimes#include <sys/eventhandler.h>
471558Srgrimes#include <sys/bus.h>
481558Srgrimes#include <sys/taskqueue.h>
491558Srgrimes#include <sys/ioccom.h>
501558Srgrimes
511558Srgrimes#include <machine/resource.h>
521558Srgrimes#include <machine/bus.h>
53115449Sobrien#include <machine/stdarg.h>
5436632Scharnier#include <sys/rman.h>
5599365Smarkm
5699365Smarkm#include <vm/vm.h>
5799365Smarkm#include <vm/pmap.h>
581558Srgrimes
59114552Sphk#include <dev/pci/pcireg.h>
601558Srgrimes#include <dev/pci/pcivar.h>
611558Srgrimes
6213544Sjoerg
63103669Sphk#include <cam/cam.h>
641558Srgrimes#include <cam/cam_ccb.h>
65101994Sbmilekic#include <cam/cam_sim.h>
661558Srgrimes#include <cam/cam_xpt_sim.h>
67112307Sru#include <cam/cam_debug.h>
681558Srgrimes#include <cam/cam_periph.h>
691558Srgrimes#include <cam/scsi/scsi_all.h>
701558Srgrimes#include <cam/scsi/scsi_message.h>
71114569Sphk
7213544Sjoerg
7313544Sjoerg#include <dev/hptiop/hptiop.h>
7413544Sjoerg
751558Srgrimesstatic const char driver_name[] = "hptiop";
7626542Scharnierstatic const char driver_version[] = "v1.9";
7759216Simp
7899365Smarkmstatic devclass_t hptiop_devclass;
791558Srgrimes
801558Srgrimesstatic int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
81114571Sphk				u_int32_t msg, u_int32_t millisec);
82114571Sphkstatic void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
83133347Sdes							u_int32_t req);
84114571Sphkstatic void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
85133347Sdesstatic void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
86114571Sphk							u_int32_t req);
87121222Sphkstatic void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
88114571Sphkstatic int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
89114571Sphk				struct hpt_iop_ioctl_param *pParams);
90114571Sphkstatic int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
91114571Sphk				struct hpt_iop_ioctl_param *pParams);
92114571Sphkstatic int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
93114571Sphk				struct hpt_iop_ioctl_param *pParams);
94114571Sphkstatic int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
9513544Sjoergstatic int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
961558Srgrimesstatic int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
971558Srgrimesstatic int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
98114571Sphkstatic int hptiop_get_config_itl(struct hpt_iop_hba *hba,
99114571Sphk				struct hpt_iop_request_get_config *config);
100114571Sphkstatic int hptiop_get_config_mv(struct hpt_iop_hba *hba,
1011558Srgrimes				struct hpt_iop_request_get_config *config);
102114571Sphkstatic int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
103114571Sphk				struct hpt_iop_request_get_config *config);
104114574Sphkstatic int hptiop_set_config_itl(struct hpt_iop_hba *hba,
105114574Sphk				struct hpt_iop_request_set_config *config);
106114571Sphkstatic int hptiop_set_config_mv(struct hpt_iop_hba *hba,
107114571Sphk				struct hpt_iop_request_set_config *config);
1081558Srgrimesstatic int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
10973034Sjwd				struct hpt_iop_request_set_config *config);
11073034Sjwdstatic int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
111114571Sphkstatic int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
112114571Sphkstatic int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
113114571Sphkstatic int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
11473034Sjwdstatic int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
115114571Sphkstatic int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
116114574Sphk			u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
117114571Sphkstatic int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1181558Srgrimes				struct hpt_iop_request_ioctl_command *req,
119114673Sphk				struct hpt_iop_ioctl_param *pParams);
120115995Sphkstatic int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
121115995Sphk				struct hpt_iop_request_ioctl_command *req,
122115995Sphk				struct hpt_iop_ioctl_param *pParams);
123115995Sphkstatic void hptiop_post_req_itl(struct hpt_iop_hba *hba,
124115995Sphk				struct hpt_iop_srb *srb,
125115995Sphk				bus_dma_segment_t *segs, int nsegs);
126115696Sphkstatic void hptiop_post_req_mv(struct hpt_iop_hba *hba,
127115696Sphk				struct hpt_iop_srb *srb,
128114551Sphk				bus_dma_segment_t *segs, int nsegs);
129114551Sphkstatic void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
130114551Sphk				struct hpt_iop_srb *srb,
131114551Sphk				bus_dma_segment_t *segs, int nsegs);
132114551Sphkstatic void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
133114551Sphkstatic void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
134114551Sphkstatic void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
135114550Sphkstatic void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
1361558Srgrimesstatic void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
137109872Sphkstatic void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
1381558Srgrimesstatic void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
1391558Srgrimesstatic void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
140112307Srustatic void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
141114571Sphkstatic void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
142127650Sluigistatic int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
1431558Srgrimesstatic int  hptiop_probe(device_t dev);
14413544Sjoergstatic int  hptiop_attach(device_t dev);
14592541Simpstatic int  hptiop_detach(device_t dev);
1461558Srgrimesstatic int  hptiop_shutdown(device_t dev);
1471558Srgrimesstatic void hptiop_action(struct cam_sim *sim, union ccb *ccb);
148114569Sphkstatic void hptiop_poll(struct cam_sim *sim);
149114571Sphkstatic void hptiop_async(void *callback_arg, u_int32_t code,
1501558Srgrimes					struct cam_path *path, void *arg);
151127650Sluigistatic void hptiop_pci_intr(void *arg);
1521558Srgrimesstatic void hptiop_release_resource(struct hpt_iop_hba *hba);
153114574Sphkstatic void hptiop_reset_adapter(void *argv);
154114574Sphkstatic d_open_t hptiop_open;
155114574Sphkstatic d_close_t hptiop_close;
1561558Srgrimesstatic d_ioctl_t hptiop_ioctl;
1571558Srgrimes
1581558Srgrimesstatic struct cdevsw hptiop_cdevsw = {
1591558Srgrimes	.d_open = hptiop_open,
1601558Srgrimes	.d_close = hptiop_close,
1611558Srgrimes	.d_ioctl = hptiop_ioctl,
162127650Sluigi	.d_name = driver_name,
163127650Sluigi	.d_version = D_VERSION,
164127650Sluigi};
165112307Sru
166115948Sphk#define hba_from_dev(dev) \
167115948Sphk	((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
168115948Sphk
169115948Sphk#define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
170115696Sphk		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
171115696Sphk#define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
172114550Sphk		hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
173114551Sphk
174114550Sphk#define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
175115696Sphk		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
176114550Sphk#define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
177114550Sphk		hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
178114550Sphk#define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
179114571Sphk		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
180114571Sphk#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
181114550Sphk		hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
182112307Sru
18373034Sjwd#define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
18473034Sjwd		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
18573034Sjwd#define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
1861558Srgrimes		hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
1871558Srgrimes
1881558Srgrimesstatic int hptiop_open(ioctl_dev_t dev, int flags,
1891558Srgrimes					int devtype, ioctl_thread_t proc)
1901558Srgrimes{
1911558Srgrimes	struct hpt_iop_hba *hba = hba_from_dev(dev);
1921558Srgrimes
1931558Srgrimes	if (hba==NULL)
1941558Srgrimes		return ENXIO;
1951558Srgrimes	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
1961558Srgrimes		return EBUSY;
197114571Sphk	hba->flag |= HPT_IOCTL_FLAG_OPEN;
198114571Sphk	return 0;
199114571Sphk}
200114571Sphk
2011558Srgrimesstatic int hptiop_close(ioctl_dev_t dev, int flags,
2021558Srgrimes					int devtype, ioctl_thread_t proc)
2031558Srgrimes{
2041558Srgrimes	struct hpt_iop_hba *hba = hba_from_dev(dev);
2051558Srgrimes	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
2061558Srgrimes	return 0;
2071558Srgrimes}
2081558Srgrimes
2091558Srgrimesstatic int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
2101558Srgrimes					int flags, ioctl_thread_t proc)
2111558Srgrimes{
2121558Srgrimes	int ret = EFAULT;
213114571Sphk	struct hpt_iop_hba *hba = hba_from_dev(dev);
2141558Srgrimes
2151558Srgrimes	mtx_lock(&Giant);
216115995Sphk
217115995Sphk	switch (cmd) {
2181558Srgrimes	case HPT_DO_IOCONTROL:
219114569Sphk		ret = hba->ops->do_ioctl(hba,
220127650Sluigi				(struct hpt_iop_ioctl_param *)data);
221127650Sluigi		break;
222127650Sluigi	case HPT_SCAN_BUS:
223114569Sphk		ret = hptiop_rescan_bus(hba);
224114569Sphk		break;
225114569Sphk	}
226114569Sphk
227114569Sphk	mtx_unlock(&Giant);
228114569Sphk
2291558Srgrimes	return ret;
2301558Srgrimes}
231114571Sphk
232114571Sphkstatic u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
233114571Sphk{
234114571Sphk	u_int64_t p;
235114571Sphk	u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
2361558Srgrimes	u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
2371558Srgrimes
23848957Sbillf	if (outbound_tail != outbound_head) {
23948957Sbillf		bus_space_read_region_4(hba->bar2t, hba->bar2h,
24048957Sbillf			offsetof(struct hpt_iopmu_mv,
2411558Srgrimes				outbound_q[outbound_tail]),
2421558Srgrimes			(u_int32_t *)&p, 2);
2431558Srgrimes
244114571Sphk		outbound_tail++;
245121222Sphk
246114571Sphk		if (outbound_tail == MVIOP_QUEUE_LEN)
2471558Srgrimes			outbound_tail = 0;
2481558Srgrimes
2491558Srgrimes		BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
2501558Srgrimes		return p;
2511558Srgrimes	} else
252114571Sphk		return 0;
253114571Sphk}
254114571Sphk
2551558Srgrimesstatic void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
2561558Srgrimes{
2571558Srgrimes	u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
2581558Srgrimes	u_int32_t head = inbound_head + 1;
2591558Srgrimes
2601558Srgrimes	if (head == MVIOP_QUEUE_LEN)
261114569Sphk		head = 0;
262114571Sphk
26337865Sbde	bus_space_write_region_4(hba->bar2t, hba->bar2h,
26437865Sbde			offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
265114571Sphk			(u_int32_t *)&p, 2);
2661558Srgrimes	BUS_SPACE_WRT4_MV2(inbound_head, head);
2671558Srgrimes	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
2681558Srgrimes}
269114571Sphk
270114571Sphkstatic void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
271114571Sphk{
272114571Sphk	BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
273114571Sphk	BUS_SPACE_RD4_ITL(outbound_intstatus);
2741558Srgrimes}
275114571Sphk
276114571Sphkstatic void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
277121222Sphk{
278114571Sphk
279114571Sphk	BUS_SPACE_WRT4_MV2(inbound_msg, msg);
2801558Srgrimes	BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
2811558Srgrimes
2821558Srgrimes	BUS_SPACE_RD4_MV0(outbound_intmask);
2831558Srgrimes}
284114571Sphk
285121222Sphkstatic void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
2861558Srgrimes{
287114571Sphk	BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
288114571Sphk	BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
289114571Sphk}
2901558Srgrimes
2911558Srgrimesstatic int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
2921558Srgrimes{
2931558Srgrimes	u_int32_t req=0;
2941558Srgrimes	int i;
295121222Sphk
296121222Sphk	for (i = 0; i < millisec; i++) {
297121222Sphk		req = BUS_SPACE_RD4_ITL(inbound_queue);
298121222Sphk		if (req != IOPMU_QUEUE_EMPTY)
299121222Sphk			break;
300121222Sphk		DELAY(1000);
301121222Sphk	}
302121222Sphk
303121222Sphk	if (req!=IOPMU_QUEUE_EMPTY) {
304121222Sphk		BUS_SPACE_WRT4_ITL(outbound_queue, req);
305121222Sphk		BUS_SPACE_RD4_ITL(outbound_intstatus);
306121222Sphk		return 0;
307121222Sphk	}
308121222Sphk
309121222Sphk	return -1;
310121222Sphk}
311121222Sphk
312121222Sphkstatic int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
3131558Srgrimes{
314111286Sru	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
3151558Srgrimes		return -1;
316114571Sphk
317114571Sphk	return 0;
3181558Srgrimes}
31992541Simp
32013550Sjoergstatic int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
32113550Sjoerg							u_int32_t millisec)
32213550Sjoerg{
32313550Sjoerg	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
32413544Sjoerg		return -1;
32536632Scharnier
32636632Scharnier	return 0;
3271558Srgrimes}
3281558Srgrimes
3291558Srgrimesstatic void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
3301558Srgrimes							u_int32_t index)
331114571Sphk{
332114571Sphk	struct hpt_iop_srb *srb;
3331558Srgrimes	struct hpt_iop_request_scsi_command *req=0;
334114571Sphk	union ccb *ccb;
335114571Sphk	u_int8_t *cdb;
336127044Sjhb	u_int32_t result, temp, dxfer;
337114571Sphk	u_int64_t temp64;
338114571Sphk
339114571Sphk	if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
340114571Sphk		if (hba->firmware_version > 0x01020000 ||
341114571Sphk			hba->interface_version > 0x01020000) {
342114571Sphk			srb = hba->srb[index & ~(u_int32_t)
343114571Sphk				(IOPMU_QUEUE_ADDR_HOST_BIT
344114860Sphk				| IOPMU_QUEUE_REQUEST_RESULT_BIT)];
345114860Sphk			req = (struct hpt_iop_request_scsi_command *)srb;
346114860Sphk			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
347114571Sphk				result = IOP_RESULT_SUCCESS;
348127044Sjhb			else
349127044Sjhb				result = req->header.result;
350127044Sjhb		} else {
351127044Sjhb			srb = hba->srb[index &
352127044Sjhb				~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
353127044Sjhb			req = (struct hpt_iop_request_scsi_command *)srb;
354127044Sjhb			result = req->header.result;
355127044Sjhb		}
356127044Sjhb		dxfer = req->dataxfer_length;
357114571Sphk		goto srb_complete;
358114860Sphk	}
359114860Sphk
360114860Sphk	/*iop req*/
361114571Sphk	temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
362114571Sphk		offsetof(struct hpt_iop_request_header, type));
363133347Sdes	result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
364114571Sphk		offsetof(struct hpt_iop_request_header, result));
365114571Sphk	switch(temp) {
366114571Sphk	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
367114571Sphk	{
368114571Sphk		temp64 = 0;
369114571Sphk		bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
370112307Sru			offsetof(struct hpt_iop_request_header, context),
371114569Sphk			(u_int32_t *)&temp64, 2);
372114569Sphk		wakeup((void *)((unsigned long)hba->u.itl.mu + index));
373114569Sphk		break;
374114571Sphk	}
37538384Sdfr
37673034Sjwd	case IOP_REQUEST_TYPE_SCSI_COMMAND:
377114574Sphk		bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
378114571Sphk			offsetof(struct hpt_iop_request_header, context),
37973034Sjwd			(u_int32_t *)&temp64, 2);
380109878Sphk		srb = (struct hpt_iop_srb *)(unsigned long)temp64;
381109878Sphk		dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
382109878Sphk				index + offsetof(struct hpt_iop_request_scsi_command,
383109878Sphk				dataxfer_length));
384109878Sphksrb_complete:
385109878Sphk		ccb = (union ccb *)srb->ccb;
386114571Sphk		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
387114571Sphk			cdb = ccb->csio.cdb_io.cdb_ptr;
388114673Sphk		else
389114673Sphk			cdb = ccb->csio.cdb_io.cdb_bytes;
390114673Sphk
391115696Sphk		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
392115696Sphk			ccb->ccb_h.status = CAM_REQ_CMP;
393114552Sphk			goto scsi_done;
394114569Sphk		}
395114571Sphk
396112307Sru		switch (result) {
397112307Sru		case IOP_RESULT_SUCCESS:
398112307Sru			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
399114569Sphk			case CAM_DIR_IN:
400114569Sphk				bus_dmamap_sync(hba->io_dmat,
401114569Sphk					srb->dma_map, BUS_DMASYNC_POSTREAD);
402127650Sluigi				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
403127650Sluigi				break;
404127650Sluigi			case CAM_DIR_OUT:
405127650Sluigi				bus_dmamap_sync(hba->io_dmat,
406115624Sphk					srb->dma_map, BUS_DMASYNC_POSTWRITE);
407115624Sphk				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
408114569Sphk				break;
409114569Sphk			}
410133347Sdes
411115696Sphk			ccb->ccb_h.status = CAM_REQ_CMP;
412114569Sphk			break;
413114571Sphk
414114571Sphk		case IOP_RESULT_BAD_TARGET:
415114571Sphk			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
416114571Sphk			break;
417114571Sphk		case IOP_RESULT_BUSY:
418114569Sphk			ccb->ccb_h.status = CAM_BUSY;
419114569Sphk			break;
420115624Sphk		case IOP_RESULT_INVALID_REQUEST:
421115624Sphk			ccb->ccb_h.status = CAM_REQ_INVALID;
422114569Sphk			break;
423114569Sphk		case IOP_RESULT_FAIL:
424114571Sphk			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
425114569Sphk			break;
426114571Sphk		case IOP_RESULT_RESET:
427114571Sphk			ccb->ccb_h.status = CAM_BUSY;
428114571Sphk			break;
429114571Sphk		case IOP_RESULT_CHECK_CONDITION:
430114571Sphk			memset(&ccb->csio.sense_data, 0,
431114569Sphk			    sizeof(ccb->csio.sense_data));
432114569Sphk			if (dxfer < ccb->csio.sense_len)
433114569Sphk				ccb->csio.sense_resid = ccb->csio.sense_len -
434114571Sphk				    dxfer;
435114569Sphk			else
436114569Sphk				ccb->csio.sense_resid = 0;
437114569Sphk			if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
438114569Sphk				bus_space_read_region_1(hba->bar0t, hba->bar0h,
439114569Sphk					index + offsetof(struct hpt_iop_request_scsi_command,
440109878Sphk					sg_list), (u_int8_t *)&ccb->csio.sense_data,
4411558Srgrimes					MIN(dxfer, sizeof(ccb->csio.sense_data)));
4421558Srgrimes			} else {
4431558Srgrimes				memcpy(&ccb->csio.sense_data, &req->sg_list,
444127650Sluigi					MIN(dxfer, sizeof(ccb->csio.sense_data)));
445127650Sluigi			}
446127650Sluigi			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
447127650Sluigi			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
448127650Sluigi			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
449127650Sluigi			break;
450127650Sluigi		default:
451127650Sluigi			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
452127650Sluigi			break;
453127650Sluigi		}
454127650Sluigiscsi_done:
455127650Sluigi		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
456127650Sluigi			BUS_SPACE_WRT4_ITL(outbound_queue, index);
457127650Sluigi
458127650Sluigi		ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
4591558Srgrimes
4601558Srgrimes		hptiop_free_srb(hba, srb);
4611558Srgrimes		xpt_done(ccb);
4621558Srgrimes		break;
463114571Sphk	}
464114571Sphk}
4651558Srgrimes
466114673Sphkstatic void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
467114569Sphk{
468114673Sphk	u_int32_t req, temp;
469114673Sphk
4701558Srgrimes	while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
471114569Sphk		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
472114569Sphk			hptiop_request_callback_itl(hba, req);
473114569Sphk		else {
474127650Sluigi			struct hpt_iop_request_header *p;
475127650Sluigi
476127650Sluigi			p = (struct hpt_iop_request_header *)
477115696Sphk				((char *)hba->u.itl.mu + req);
478115696Sphk			temp = bus_space_read_4(hba->bar0t,
479115696Sphk					hba->bar0h,req +
480134973Sbrooks					offsetof(struct hpt_iop_request_header,
481134973Sbrooks						flags));
482134973Sbrooks			if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
483114550Sphk				u_int64_t temp64;
484114571Sphk				bus_space_read_region_4(hba->bar0t,
485114569Sphk					hba->bar0h,req +
486114569Sphk					offsetof(struct hpt_iop_request_header,
487115696Sphk						context),
488115696Sphk					(u_int32_t *)&temp64, 2);
489115696Sphk				if (temp64) {
490114571Sphk					hptiop_request_callback_itl(hba, req);
491114571Sphk				} else {
492114673Sphk					temp64 = 1;
493115624Sphk					bus_space_write_region_4(hba->bar0t,
494115624Sphk						hba->bar0h,req +
495114673Sphk						offsetof(struct hpt_iop_request_header,
496114673Sphk							context),
497114673Sphk						(u_int32_t *)&temp64, 2);
498114673Sphk				}
499114673Sphk			} else
500114673Sphk				hptiop_request_callback_itl(hba, req);
501114673Sphk		}
502114673Sphk	}
503114673Sphk}
504114673Sphk
505114673Sphkstatic int hptiop_intr_itl(struct hpt_iop_hba * hba)
506114673Sphk{
507114673Sphk	u_int32_t status;
508114673Sphk	int ret = 0;
509114571Sphk
5101558Srgrimes	status = BUS_SPACE_RD4_ITL(outbound_intstatus);
5111558Srgrimes
5121558Srgrimes	if (status & IOPMU_OUTBOUND_INT_MSG0) {
513114571Sphk		u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
51492541Simp		KdPrint(("hptiop: received outbound msg %x\n", msg));
5151558Srgrimes		BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
51692541Simp		hptiop_os_message_callback(hba, msg);
51792541Simp		ret = 1;
5181558Srgrimes	}
519114571Sphk
520114571Sphk	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
521114571Sphk		hptiop_drain_outbound_queue_itl(hba);
5221558Srgrimes		ret = 1;
523114574Sphk	}
524114574Sphk
525114574Sphk	return ret;
526114574Sphk}
527114574Sphk
528114574Sphkstatic void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
529114574Sphk							u_int64_t _tag)
530114574Sphk{
531114574Sphk	u_int32_t context = (u_int32_t)_tag;
532114574Sphk
533114574Sphk	if (context & MVIOP_CMD_TYPE_SCSI) {
534114574Sphk		struct hpt_iop_srb *srb;
535114574Sphk		struct hpt_iop_request_scsi_command *req;
536114574Sphk		union ccb *ccb;
537114574Sphk		u_int8_t *cdb;
538114574Sphk
539114574Sphk		srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
540114574Sphk		req = (struct hpt_iop_request_scsi_command *)srb;
541114574Sphk		ccb = (union ccb *)srb->ccb;
542114574Sphk		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
543114574Sphk			cdb = ccb->csio.cdb_io.cdb_ptr;
544114574Sphk		else
545114574Sphk			cdb = ccb->csio.cdb_io.cdb_bytes;
546114574Sphk
547114574Sphk		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
548114574Sphk			ccb->ccb_h.status = CAM_REQ_CMP;
549114574Sphk			goto scsi_done;
550114574Sphk		}
551114574Sphk		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
552114574Sphk			req->header.result = IOP_RESULT_SUCCESS;
553114574Sphk
554114574Sphk		switch (req->header.result) {
555114574Sphk		case IOP_RESULT_SUCCESS:
556114574Sphk			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
557114574Sphk			case CAM_DIR_IN:
558114574Sphk				bus_dmamap_sync(hba->io_dmat,
559114574Sphk					srb->dma_map, BUS_DMASYNC_POSTREAD);
560114574Sphk				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
561114574Sphk				break;
562114574Sphk			case CAM_DIR_OUT:
563114574Sphk				bus_dmamap_sync(hba->io_dmat,
564114574Sphk					srb->dma_map, BUS_DMASYNC_POSTWRITE);
5651558Srgrimes				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
5665393Sgibbs				break;
5671558Srgrimes			}
5681558Srgrimes			ccb->ccb_h.status = CAM_REQ_CMP;
5691558Srgrimes			break;
57037234Sbde		case IOP_RESULT_BAD_TARGET:
57137234Sbde			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
572107041Sjulian			break;
5731558Srgrimes		case IOP_RESULT_BUSY:
5741558Srgrimes			ccb->ccb_h.status = CAM_BUSY;
5751558Srgrimes			break;
5761558Srgrimes		case IOP_RESULT_INVALID_REQUEST:
5771558Srgrimes			ccb->ccb_h.status = CAM_REQ_INVALID;
5781558Srgrimes			break;
57937234Sbde		case IOP_RESULT_FAIL:
58037234Sbde			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
58137234Sbde			break;
5821558Srgrimes		case IOP_RESULT_RESET:
5831558Srgrimes			ccb->ccb_h.status = CAM_BUSY;
5841558Srgrimes			break;
58537234Sbde		case IOP_RESULT_CHECK_CONDITION:
58637234Sbde			memset(&ccb->csio.sense_data, 0,
58737234Sbde			    sizeof(ccb->csio.sense_data));
5881558Srgrimes			if (req->dataxfer_length < ccb->csio.sense_len)
5891558Srgrimes				ccb->csio.sense_resid = ccb->csio.sense_len -
5901558Srgrimes				    req->dataxfer_length;
5915393Sgibbs			else
59237234Sbde				ccb->csio.sense_resid = 0;
59337234Sbde			memcpy(&ccb->csio.sense_data, &req->sg_list,
59437234Sbde				MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
5955393Sgibbs			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
5965393Sgibbs			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
5975393Sgibbs			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
5981558Srgrimes			break;
5991558Srgrimes		default:
6001558Srgrimes			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
6011558Srgrimes			break;
602114574Sphk		}
603114574Sphkscsi_done:
604114574Sphk		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
605114571Sphk
6061558Srgrimes		hptiop_free_srb(hba, srb);
6071558Srgrimes		xpt_done(ccb);
6081558Srgrimes	} else if (context & MVIOP_CMD_TYPE_IOCTL) {
6091558Srgrimes		struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
6101558Srgrimes		if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
611114571Sphk			hba->config_done = 1;
612114571Sphk		else
6131558Srgrimes			hba->config_done = -1;
61492541Simp		wakeup(req);
6151558Srgrimes	} else if (context &
61624180Simp			(MVIOP_CMD_TYPE_SET_CONFIG |
6171558Srgrimes				MVIOP_CMD_TYPE_GET_CONFIG))
61824180Simp		hba->config_done = 1;
61924180Simp	else {
62036632Scharnier		device_printf(hba->pcidev, "wrong callback type\n");
6211558Srgrimes	}
6221558Srgrimes}
623114571Sphk
62424180Simpstatic void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
6251558Srgrimes				u_int32_t _tag)
6261558Srgrimes{
6271558Srgrimes	u_int32_t req_type = _tag & 0xf;
62824180Simp
62924180Simp	struct hpt_iop_srb *srb;
63036632Scharnier	struct hpt_iop_request_scsi_command *req;
6311558Srgrimes	union ccb *ccb;
6321558Srgrimes	u_int8_t *cdb;
6331558Srgrimes
634114571Sphk	switch (req_type) {
635114571Sphk	case IOP_REQUEST_TYPE_GET_CONFIG:
636114571Sphk	case IOP_REQUEST_TYPE_SET_CONFIG:
637114571Sphk		hba->config_done = 1;
638114571Sphk		break;
6391558Srgrimes
6401558Srgrimes	case IOP_REQUEST_TYPE_SCSI_COMMAND:
6411558Srgrimes		srb = hba->srb[(_tag >> 4) & 0xff];
6421558Srgrimes		req = (struct hpt_iop_request_scsi_command *)srb;
643114571Sphk
644114571Sphk		ccb = (union ccb *)srb->ccb;
6451558Srgrimes
6461558Srgrimes		untimeout(hptiop_reset_adapter, hba, srb->timeout_ch);
6471558Srgrimes
6481558Srgrimes		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
6491558Srgrimes			cdb = ccb->csio.cdb_io.cdb_ptr;
6501558Srgrimes		else
6511558Srgrimes			cdb = ccb->csio.cdb_io.cdb_bytes;
6521558Srgrimes
6531558Srgrimes		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
6541558Srgrimes			ccb->ccb_h.status = CAM_REQ_CMP;
6551558Srgrimes			goto scsi_done;
656114571Sphk		}
65792541Simp
6581558Srgrimes		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
65992541Simp			req->header.result = IOP_RESULT_SUCCESS;
66099365Smarkm
66199365Smarkm		switch (req->header.result) {
6621558Srgrimes		case IOP_RESULT_SUCCESS:
6631558Srgrimes			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
6641558Srgrimes			case CAM_DIR_IN:
6651558Srgrimes				bus_dmamap_sync(hba->io_dmat,
66636632Scharnier						srb->dma_map, BUS_DMASYNC_POSTREAD);
6671558Srgrimes				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
6681558Srgrimes				break;
6691558Srgrimes			case CAM_DIR_OUT:
67036632Scharnier				bus_dmamap_sync(hba->io_dmat,
6711558Srgrimes						srb->dma_map, BUS_DMASYNC_POSTWRITE);
6721558Srgrimes				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
6731558Srgrimes				break;
6741558Srgrimes			}
6751558Srgrimes			ccb->ccb_h.status = CAM_REQ_CMP;
6761558Srgrimes			break;
6771558Srgrimes		case IOP_RESULT_BAD_TARGET:
6781558Srgrimes			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
6791558Srgrimes			break;
6801558Srgrimes		case IOP_RESULT_BUSY:
68179452Sbrian			ccb->ccb_h.status = CAM_BUSY;
68236632Scharnier			break;
6831558Srgrimes		case IOP_RESULT_INVALID_REQUEST:
68499365Smarkm			ccb->ccb_h.status = CAM_REQ_INVALID;
6851558Srgrimes			break;
6861558Srgrimes		case IOP_RESULT_FAIL:
6871558Srgrimes			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
68899365Smarkm			break;
6891558Srgrimes		case IOP_RESULT_RESET:
6901558Srgrimes			ccb->ccb_h.status = CAM_BUSY;
691114571Sphk			break;
69292541Simp		case IOP_RESULT_CHECK_CONDITION:
6931558Srgrimes			memset(&ccb->csio.sense_data, 0,
6941558Srgrimes			       sizeof(ccb->csio.sense_data));
6951558Srgrimes			if (req->dataxfer_length < ccb->csio.sense_len)
6961558Srgrimes				ccb->csio.sense_resid = ccb->csio.sense_len -
6971558Srgrimes				req->dataxfer_length;
69892541Simp			else
6991558Srgrimes				ccb->csio.sense_resid = 0;
7001558Srgrimes			memcpy(&ccb->csio.sense_data, &req->sg_list,
7011558Srgrimes			       MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
702114571Sphk			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
70392541Simp			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
7041558Srgrimes			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
70592541Simp			break;
7061558Srgrimes		default:
7071558Srgrimes			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
7081558Srgrimes			break;
7091558Srgrimes		}
7101558Srgrimesscsi_done:
7111558Srgrimes		ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
7121558Srgrimes
7131558Srgrimes		hptiop_free_srb(hba, srb);
71492541Simp		xpt_done(ccb);
7151558Srgrimes		break;
7161558Srgrimes	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
7171558Srgrimes		if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
7181558Srgrimes			hba->config_done = 1;
7191558Srgrimes		else
7201558Srgrimes			hba->config_done = -1;
7211558Srgrimes		wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
722114571Sphk		break;
72392541Simp	default:
7241558Srgrimes		device_printf(hba->pcidev, "wrong callback type\n");
72594065Sphk		break;
72694065Sphk	}
727107041Sjulian}
72894065Sphk
729107041Sjulianstatic void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
730107041Sjulian{
73192541Simp	u_int64_t req;
7321558Srgrimes
733114862Sphk	while ((req = hptiop_mv_outbound_read(hba))) {
734109378Sdes		if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
735109378Sdes			if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
736109378Sdes				hptiop_request_callback_mv(hba, req);
7371558Srgrimes			}
73896475Sphk	    	}
7391558Srgrimes	}
7401558Srgrimes}
74113544Sjoerg
7421558Srgrimesstatic int hptiop_intr_mv(struct hpt_iop_hba * hba)
7431558Srgrimes{
7441558Srgrimes	u_int32_t status;
7451558Srgrimes	int ret = 0;
7461558Srgrimes
7471558Srgrimes	status = BUS_SPACE_RD4_MV0(outbound_doorbell);
7481558Srgrimes
7491558Srgrimes	if (status)
7501558Srgrimes		BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
7511558Srgrimes
7521558Srgrimes	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
753114571Sphk		u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
7541558Srgrimes		KdPrint(("hptiop: received outbound msg %x\n", msg));
75599365Smarkm		hptiop_os_message_callback(hba, msg);
7561558Srgrimes		ret = 1;
7571558Srgrimes	}
758114571Sphk
7591558Srgrimes	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
76097855Siedowse		hptiop_drain_outbound_queue_mv(hba);
7611558Srgrimes		ret = 1;
76297855Siedowse	}
76397855Siedowse
764107041Sjulian	return ret;
765107041Sjulian}
766107041Sjulian
7671558Srgrimesstatic int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
7681558Srgrimes{
7691558Srgrimes	u_int32_t status, _tag, cptr;
7701558Srgrimes	int ret = 0;
771114571Sphk
7721558Srgrimes	if (hba->initialized) {
7731558Srgrimes		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
774114571Sphk	}
7751558Srgrimes
776114571Sphk	status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
7771558Srgrimes	if (status) {
778114571Sphk		BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
7791558Srgrimes		if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
7801558Srgrimes			u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
7811558Srgrimes			hptiop_os_message_callback(hba, msg);
7821558Srgrimes		}
7831558Srgrimes		ret = 1;
7841558Srgrimes	}
7851558Srgrimes
7861558Srgrimes	status = BUS_SPACE_RD4_MVFREY2(isr_cause);
7871558Srgrimes	if (status) {
7881558Srgrimes		BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
7891558Srgrimes		do {
790114571Sphk			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
7911558Srgrimes			while (hba->u.mvfrey.outlist_rptr != cptr) {
792107041Sjulian				hba->u.mvfrey.outlist_rptr++;
7931558Srgrimes				if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
7941558Srgrimes					hba->u.mvfrey.outlist_rptr = 0;
7951558Srgrimes				}
7961558Srgrimes
797107041Sjulian				_tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
798107041Sjulian				hptiop_request_callback_mvfrey(hba, _tag);
7991558Srgrimes				ret = 2;
8001558Srgrimes			}
8011558Srgrimes		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
8021558Srgrimes	}
8031558Srgrimes
8041558Srgrimes	if (hba->initialized) {
8051558Srgrimes		BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
8061558Srgrimes	}
8071558Srgrimes
80899365Smarkm	return ret;
809114571Sphk}
8101558Srgrimes
8111558Srgrimesstatic int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
8121558Srgrimes					u_int32_t req32, u_int32_t millisec)
813114571Sphk{
8141558Srgrimes	u_int32_t i;
8151558Srgrimes	u_int64_t temp64;
8161558Srgrimes
817114571Sphk	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
818107041Sjulian	BUS_SPACE_RD4_ITL(outbound_intstatus);
819107041Sjulian
8201558Srgrimes	for (i = 0; i < millisec; i++) {
8211558Srgrimes		hptiop_intr_itl(hba);
8221558Srgrimes		bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
8231558Srgrimes			offsetof(struct hpt_iop_request_header, context),
8241558Srgrimes			(u_int32_t *)&temp64, 2);
8251558Srgrimes		if (temp64)
8261558Srgrimes			return 0;
8271558Srgrimes		DELAY(1000);
828114571Sphk	}
829107041Sjulian
830107041Sjulian	return -1;
831133347Sdes}
832107041Sjulian
833133347Sdesstatic int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
834107041Sjulian					void *req, u_int32_t millisec)
835133347Sdes{
8361558Srgrimes	u_int32_t i;
8371558Srgrimes	u_int64_t phy_addr;
8381558Srgrimes	hba->config_done = 0;
8391558Srgrimes
8401558Srgrimes	phy_addr = hba->ctlcfgcmd_phy |
8411558Srgrimes			(u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
8421558Srgrimes	((struct hpt_iop_request_get_config *)req)->header.flags |=
843114571Sphk		IOP_REQUEST_FLAG_SYNC_REQUEST |
844107041Sjulian		IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
845107041Sjulian	hptiop_mv_inbound_write(phy_addr, hba);
8461558Srgrimes	BUS_SPACE_RD4_MV0(outbound_intmask);
8471558Srgrimes
8481558Srgrimes	for (i = 0; i < millisec; i++) {
8491558Srgrimes		hptiop_intr_mv(hba);
8501558Srgrimes		if (hba->config_done)
8511558Srgrimes			return 0;
8521558Srgrimes		DELAY(1000);
853114571Sphk	}
854107041Sjulian	return -1;
855107041Sjulian}
8561558Srgrimes
8571558Srgrimesstatic int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
8581558Srgrimes					void *req, u_int32_t millisec)
8591558Srgrimes{
8601558Srgrimes	u_int32_t i, index;
8611558Srgrimes	u_int64_t phy_addr;
8621558Srgrimes	struct hpt_iop_request_header *reqhdr =
863114571Sphk										(struct hpt_iop_request_header *)req;
864107041Sjulian
865107041Sjulian	hba->config_done = 0;
8661558Srgrimes
8671558Srgrimes	phy_addr = hba->ctlcfgcmd_phy;
8681558Srgrimes	reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
8691558Srgrimes					| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
8701558Srgrimes					| IOP_REQUEST_FLAG_ADDR_BITS
8711558Srgrimes					| ((phy_addr >> 16) & 0xffff0000);
8721558Srgrimes	reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
873114571Sphk					| IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
874107041Sjulian
875107041Sjulian	hba->u.mvfrey.inlist_wptr++;
8766643Sbde	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
8776643Sbde
8786643Sbde	if (index == hba->u.mvfrey.list_count) {
8796643Sbde		index = 0;
8806643Sbde		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
8816643Sbde		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
8826643Sbde	}
883114571Sphk
884107041Sjulian	hba->u.mvfrey.inlist[index].addr = phy_addr;
885107041Sjulian	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
8861558Srgrimes
8871558Srgrimes	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
8881558Srgrimes	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
8891558Srgrimes
8901558Srgrimes	for (i = 0; i < millisec; i++) {
8911558Srgrimes		hptiop_intr_mvfrey(hba);
8921558Srgrimes		if (hba->config_done)
893114571Sphk			return 0;
894107041Sjulian		DELAY(1000);
895107041Sjulian	}
8961558Srgrimes	return -1;
8971558Srgrimes}
8981558Srgrimes
8991558Srgrimesstatic int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
9001558Srgrimes					u_int32_t msg, u_int32_t millisec)
9011558Srgrimes{
9021558Srgrimes	u_int32_t i;
903114571Sphk
904107041Sjulian	hba->msg_done = 0;
905107041Sjulian	hba->ops->post_msg(hba, msg);
9061558Srgrimes
9071558Srgrimes	for (i=0; i<millisec; i++) {
9081558Srgrimes		hba->ops->iop_intr(hba);
9091558Srgrimes		if (hba->msg_done)
9101558Srgrimes			break;
9111558Srgrimes		DELAY(1000);
9121558Srgrimes	}
913114571Sphk
914107041Sjulian	return hba->msg_done? 0 : -1;
915107041Sjulian}
9161558Srgrimes
9171558Srgrimesstatic int hptiop_get_config_itl(struct hpt_iop_hba * hba,
9181558Srgrimes				struct hpt_iop_request_get_config * config)
9191558Srgrimes{
9201558Srgrimes	u_int32_t req32;
9211558Srgrimes
9221558Srgrimes	config->header.size = sizeof(struct hpt_iop_request_get_config);
923114571Sphk	config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
924107041Sjulian	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
925107041Sjulian	config->header.result = IOP_RESULT_PENDING;
9261558Srgrimes	config->header.context = 0;
9271558Srgrimes
928114571Sphk	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
929107041Sjulian	if (req32 == IOPMU_QUEUE_EMPTY)
930107041Sjulian		return -1;
9311558Srgrimes
9321558Srgrimes	bus_space_write_region_4(hba->bar0t, hba->bar0h,
93373034Sjwd			req32, (u_int32_t *)config,
93497534Siedowse			sizeof(struct hpt_iop_request_header) >> 2);
93597534Siedowse
93697534Siedowse	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
93797534Siedowse		KdPrint(("hptiop: get config send cmd failed"));
93897534Siedowse		return -1;
93997534Siedowse	}
94097534Siedowse
94197534Siedowse	bus_space_read_region_4(hba->bar0t, hba->bar0h,
94297534Siedowse			req32, (u_int32_t *)config,
94397534Siedowse			sizeof(struct hpt_iop_request_get_config) >> 2);
94497534Siedowse
94597534Siedowse	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
94697534Siedowse
94797534Siedowse	return 0;
94897534Siedowse}
94997534Siedowse
95097534Siedowsestatic int hptiop_get_config_mv(struct hpt_iop_hba * hba,
95197534Siedowse				struct hpt_iop_request_get_config * config)
95297534Siedowse{
95397534Siedowse	struct hpt_iop_request_get_config *req;
95497534Siedowse
95597534Siedowse	if (!(req = hba->ctlcfg_ptr))
95697534Siedowse		return -1;
95797534Siedowse
95897534Siedowse	req->header.flags = 0;
95997534Siedowse	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
96097534Siedowse	req->header.size = sizeof(struct hpt_iop_request_get_config);
96197534Siedowse	req->header.result = IOP_RESULT_PENDING;
96297534Siedowse	req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
9633111Spst
9643111Spst	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
96597534Siedowse		KdPrint(("hptiop: get config send cmd failed"));
9663111Spst		return -1;
9673111Spst	}
968107041Sjulian
9693111Spst	*config = *req;
97097534Siedowse	return 0;
97197534Siedowse}
97273034Sjwd
97397534Siedowsestatic int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
97473034Sjwd				struct hpt_iop_request_get_config * config)
97573034Sjwd{
97697534Siedowse	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
97773034Sjwd
978133347Sdes	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
97973034Sjwd	    info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
980133347Sdes		KdPrint(("hptiop: header size %x/%x type %x/%x",
98173034Sjwd			 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
98273034Sjwd			 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
98397534Siedowse		return -1;
9841558Srgrimes	}
98597534Siedowse
98697534Siedowse	config->interface_version = info->interface_version;
98797534Siedowse	config->firmware_version = info->firmware_version;
98897534Siedowse	config->max_requests = info->max_requests;
989114571Sphk	config->request_size = info->request_size;
99097534Siedowse	config->max_sg_count = info->max_sg_count;
99197534Siedowse	config->data_transfer_length = info->data_transfer_length;
99297534Siedowse	config->alignment_mask = info->alignment_mask;
99397534Siedowse	config->max_devices = info->max_devices;
99497534Siedowse	config->sdram_size = info->sdram_size;
995107041Sjulian
9961558Srgrimes	KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
99797534Siedowse		 config->max_requests, config->request_size,
99897534Siedowse		 config->data_transfer_length, config->max_devices,
9991558Srgrimes		 config->sdram_size));
100097534Siedowse
100197534Siedowse	return 0;
1002107041Sjulian}
1003107041Sjulian
1004107041Sjulianstatic int hptiop_set_config_itl(struct hpt_iop_hba *hba,
100597534Siedowse				struct hpt_iop_request_set_config *config)
100697534Siedowse{
100797534Siedowse	u_int32_t req32;
100897534Siedowse
100997534Siedowse	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
101097534Siedowse
1011107041Sjulian	if (req32 == IOPMU_QUEUE_EMPTY)
1012107041Sjulian		return -1;
1013107041Sjulian
1014107041Sjulian	config->header.size = sizeof(struct hpt_iop_request_set_config);
101597534Siedowse	config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
101697534Siedowse	config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
101797534Siedowse	config->header.result = IOP_RESULT_PENDING;
1018112945Syar	config->header.context = 0;
1019112945Syar
1020112945Syar	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1021112945Syar		(u_int32_t *)config,
102297534Siedowse		sizeof(struct hpt_iop_request_set_config) >> 2);
102397534Siedowse
1024114571Sphk	if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
102597534Siedowse		KdPrint(("hptiop: set config send cmd failed"));
102697534Siedowse		return -1;
102797534Siedowse	}
102897534Siedowse
102997534Siedowse	BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1030107041Sjulian
103197534Siedowse	return 0;
103297534Siedowse}
1033107041Sjulian
103497534Siedowsestatic int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1035102231Strhodes				struct hpt_iop_request_set_config *config)
103697534Siedowse{
103797534Siedowse	struct hpt_iop_request_set_config *req;
103897534Siedowse
103997534Siedowse	if (!(req = hba->ctlcfg_ptr))
104097534Siedowse		return -1;
104197534Siedowse
104297534Siedowse	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
104397534Siedowse		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1044120821Siedowse		sizeof(struct hpt_iop_request_set_config) -
1045120821Siedowse			sizeof(struct hpt_iop_request_header));
1046120821Siedowse
104797534Siedowse	req->header.flags = 0;
104897534Siedowse	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
104997534Siedowse	req->header.size = sizeof(struct hpt_iop_request_set_config);
105097534Siedowse	req->header.result = IOP_RESULT_PENDING;
105197534Siedowse	req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
105297534Siedowse
1053120821Siedowse	if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1054120821Siedowse		KdPrint(("hptiop: set config send cmd failed"));
105597534Siedowse		return -1;
105697534Siedowse	}
105797534Siedowse
105897534Siedowse	return 0;
105997534Siedowse}
10601558Srgrimes
106197534Siedowsestatic int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
10621558Srgrimes				struct hpt_iop_request_set_config *config)
10631558Srgrimes{
10641558Srgrimes	struct hpt_iop_request_set_config *req;
10651558Srgrimes
10661558Srgrimes	if (!(req = hba->ctlcfg_ptr))
10671558Srgrimes		return -1;
1068114571Sphk
106992541Simp	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
10701558Srgrimes		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
107192541Simp		sizeof(struct hpt_iop_request_set_config) -
10721558Srgrimes			sizeof(struct hpt_iop_request_header));
10731558Srgrimes
1074107041Sjulian	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
107573034Sjwd	req->header.size = sizeof(struct hpt_iop_request_set_config);
107673034Sjwd	req->header.result = IOP_RESULT_PENDING;
107773034Sjwd
107873034Sjwd	if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
10791558Srgrimes		KdPrint(("hptiop: set config send cmd failed"));
1080114571Sphk		return -1;
1081114571Sphk	}
1082114571Sphk
1083114574Sphk	return 0;
1084114574Sphk}
1085114574Sphk
1086114574Sphkstatic int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1087114574Sphk				u_int32_t req32,
1088114574Sphk				struct hpt_iop_ioctl_param *pParams)
1089114574Sphk{
1090114574Sphk	u_int64_t temp64;
1091114574Sphk	struct hpt_iop_request_ioctl_command req;
1092114574Sphk
1093114574Sphk	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1094114574Sphk			(hba->max_request_size -
1095114574Sphk			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1096114574Sphk		device_printf(hba->pcidev, "request size beyond max value");
1097114574Sphk		return -1;
1098114574Sphk	}
1099114574Sphk
1100114574Sphk	req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1101114574Sphk		+ pParams->nInBufferSize;
1102114574Sphk	req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1103114574Sphk	req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1104114574Sphk	req.header.result = IOP_RESULT_PENDING;
1105114574Sphk	req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1106114574Sphk	req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1107114574Sphk	req.inbuf_size = pParams->nInBufferSize;
1108114574Sphk	req.outbuf_size = pParams->nOutBufferSize;
1109114574Sphk	req.bytes_returned = 0;
1110114574Sphk
1111114574Sphk	bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1112114574Sphk		offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1113114574Sphk
1114114574Sphk	hptiop_lock_adapter(hba);
1115114574Sphk
1116114574Sphk	BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1117114574Sphk	BUS_SPACE_RD4_ITL(outbound_intstatus);
1118114574Sphk
1119114574Sphk	bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1120114574Sphk		offsetof(struct hpt_iop_request_ioctl_command, header.context),
1121114574Sphk		(u_int32_t *)&temp64, 2);
1122114574Sphk	while (temp64) {
1123114574Sphk		if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1124114574Sphk				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1125114574Sphk			break;
1126114574Sphk		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1127114574Sphk		bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1128114574Sphk			offsetof(struct hpt_iop_request_ioctl_command,
11291558Srgrimes				header.context),
113073034Sjwd			(u_int32_t *)&temp64, 2);
1131114574Sphk	}
113273034Sjwd
113373034Sjwd	hptiop_unlock_adapter(hba);
113473034Sjwd	return 0;
113573034Sjwd}
113673034Sjwd
11371558Srgrimesstatic int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
113873034Sjwd									void *user, int size)
113973034Sjwd{
114073034Sjwd	unsigned char byte;
114194065Sphk	int i;
114273034Sjwd
114373034Sjwd	for (i=0; i<size; i++) {
114473034Sjwd		if (copyin((u_int8_t *)user + i, &byte, 1))
1145114574Sphk			return -1;
114673034Sjwd		bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
114773034Sjwd	}
114873034Sjwd
114973034Sjwd	return 0;
115073034Sjwd}
115173573Simp
115273034Sjwdstatic int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
115373034Sjwd									void *user, int size)
115473034Sjwd{
115573034Sjwd	unsigned char byte;
115673034Sjwd	int i;
115773034Sjwd
1158133348Sdes	for (i=0; i<size; i++) {
1159133348Sdes		byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1160133348Sdes		if (copyout(&byte, (u_int8_t *)user + i, 1))
1161133348Sdes			return -1;
1162133348Sdes	}
1163133348Sdes
1164133348Sdes	return 0;
1165133348Sdes}
1166133348Sdes
1167133348Sdesstatic int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1168133348Sdes				struct hpt_iop_ioctl_param * pParams)
1169133348Sdes{
117073034Sjwd	u_int32_t req32;
117173034Sjwd	u_int32_t result;
117273573Simp
117373034Sjwd	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
117473034Sjwd		(pParams->Magic != HPT_IOCTL_MAGIC32))
117573034Sjwd		return EFAULT;
117673034Sjwd
1177133384Sdes	req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1178133348Sdes	if (req32 == IOPMU_QUEUE_EMPTY)
117973034Sjwd		return EFAULT;
118073034Sjwd
118173034Sjwd	if (pParams->nInBufferSize)
118273034Sjwd		if (hptiop_bus_space_copyin(hba, req32 +
118373034Sjwd			offsetof(struct hpt_iop_request_ioctl_command, buf),
118473034Sjwd			(void *)pParams->lpInBuffer, pParams->nInBufferSize))
118573034Sjwd			goto invalid;
118673034Sjwd
118773034Sjwd	if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
118873034Sjwd		goto invalid;
1189114574Sphk
119073034Sjwd	result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
119173034Sjwd			offsetof(struct hpt_iop_request_ioctl_command,
119273034Sjwd				header.result));
119373034Sjwd
119473034Sjwd	if (result == IOP_RESULT_SUCCESS) {
119594065Sphk		if (pParams->nOutBufferSize)
119673034Sjwd			if (hptiop_bus_space_copyout(hba, req32 +
119773034Sjwd				offsetof(struct hpt_iop_request_ioctl_command, buf) +
119873034Sjwd					((pParams->nInBufferSize + 3) & ~3),
119973034Sjwd				(void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
120073034Sjwd				goto invalid;
120173034Sjwd
120273034Sjwd		if (pParams->lpBytesReturned) {
120373034Sjwd			if (hptiop_bus_space_copyout(hba, req32 +
120473034Sjwd				offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
120594065Sphk				(void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
120673034Sjwd				goto invalid;
120773034Sjwd		}
120873034Sjwd
120973034Sjwd		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
121073034Sjwd
121173034Sjwd		return 0;
121273034Sjwd	} else{
121373034Sjwdinvalid:
121473034Sjwd		BUS_SPACE_WRT4_ITL(outbound_queue, req32);
121573034Sjwd
121673034Sjwd		return EFAULT;
121773034Sjwd	}
121873034Sjwd}
121973034Sjwd
122073034Sjwdstatic int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
122173034Sjwd				struct hpt_iop_request_ioctl_command *req,
122273034Sjwd				struct hpt_iop_ioctl_param *pParams)
122373034Sjwd{
122494065Sphk	u_int64_t req_phy;
122573034Sjwd	int size = 0;
122673034Sjwd
122773034Sjwd	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
122873034Sjwd			(hba->max_request_size -
122973034Sjwd			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
123073034Sjwd		device_printf(hba->pcidev, "request size beyond max value");
123173034Sjwd		return -1;
123273034Sjwd	}
123373034Sjwd
123473034Sjwd	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
123573034Sjwd	req->inbuf_size = pParams->nInBufferSize;
123673034Sjwd	req->outbuf_size = pParams->nOutBufferSize;
123773034Sjwd	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
123873034Sjwd					+ pParams->nInBufferSize;
123973034Sjwd	req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
12401558Srgrimes	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
12411558Srgrimes	req->header.result = IOP_RESULT_PENDING;
124273034Sjwd	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
124373034Sjwd	size = req->header.size >> 8;
124494065Sphk	size = size > 3 ? 3 : size;
124573034Sjwd	req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
124673034Sjwd	hptiop_mv_inbound_write(req_phy, hba);
124773034Sjwd
124873034Sjwd	BUS_SPACE_RD4_MV0(outbound_intmask);
124973034Sjwd
125073034Sjwd	while (hba->config_done == 0) {
125173034Sjwd		if (hptiop_sleep(hba, req, PPAUSE,
1252133347Sdes			"hptctl", HPT_OSM_TIMEOUT)==0)
1253107534Sgrog			continue;
1254107534Sgrog		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
125573034Sjwd	}
125694065Sphk	return 0;
125794065Sphk}
125873034Sjwd
125973034Sjwdstatic int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
126073034Sjwd				struct hpt_iop_ioctl_param *pParams)
126173034Sjwd{
126294065Sphk	struct hpt_iop_request_ioctl_command *req;
1263133347Sdes
1264133347Sdes	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
126573034Sjwd		(pParams->Magic != HPT_IOCTL_MAGIC32))
126673034Sjwd		return EFAULT;
1267114574Sphk
126873034Sjwd	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
126994065Sphk	hba->config_done = 0;
127073034Sjwd	hptiop_lock_adapter(hba);
127173034Sjwd	if (pParams->nInBufferSize)
127294065Sphk		if (copyin((void *)pParams->lpInBuffer,
1273133347Sdes				req->buf, pParams->nInBufferSize))
127473034Sjwd			goto invalid;
127573034Sjwd	if (hptiop_post_ioctl_command_mv(hba, req, pParams))
127673034Sjwd		goto invalid;
127773034Sjwd
127873034Sjwd	if (hba->config_done == 1) {
127973034Sjwd		if (pParams->nOutBufferSize)
12801558Srgrimes			if (copyout(req->buf +
1281114574Sphk				((pParams->nInBufferSize + 3) & ~3),
128237234Sbde				(void *)pParams->lpOutBuffer,
12831558Srgrimes				pParams->nOutBufferSize))
12841558Srgrimes				goto invalid;
1285114574Sphk
12861558Srgrimes		if (pParams->lpBytesReturned)
12871558Srgrimes			if (copyout(&req->bytes_returned,
1288114574Sphk				(void*)pParams->lpBytesReturned,
12891558Srgrimes				sizeof(u_int32_t)))
12901558Srgrimes				goto invalid;
12911558Srgrimes		hptiop_unlock_adapter(hba);
12921558Srgrimes		return 0;
12931558Srgrimes	} else{
12941558Srgrimesinvalid:
12951558Srgrimes		hptiop_unlock_adapter(hba);
12961558Srgrimes		return EFAULT;
12971558Srgrimes	}
129813544Sjoerg}
12991558Srgrimes
13001558Srgrimesstatic int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
13011558Srgrimes				struct hpt_iop_request_ioctl_command *req,
1302114574Sphk				struct hpt_iop_ioctl_param *pParams)
130373034Sjwd{
1304114574Sphk	u_int64_t phy_addr;
130573034Sjwd	u_int32_t index;
1306114574Sphk
130773034Sjwd	phy_addr = hba->ctlcfgcmd_phy;
1308114574Sphk
130973034Sjwd	if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
131073034Sjwd			(hba->max_request_size -
131173034Sjwd			offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1312114574Sphk		device_printf(hba->pcidev, "request size beyond max value");
131373034Sjwd		return -1;
131473034Sjwd	}
131573034Sjwd
131673034Sjwd	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
131773034Sjwd	req->inbuf_size = pParams->nInBufferSize;
131873034Sjwd	req->outbuf_size = pParams->nOutBufferSize;
131973034Sjwd	req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1320107534Sgrog					+ pParams->nInBufferSize;
1321133347Sdes
1322107534Sgrog	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1323107534Sgrog	req->header.result = IOP_RESULT_PENDING;
132473034Sjwd
132573034Sjwd	req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
132673034Sjwd						| IOP_REQUEST_FLAG_OUTPUT_CONTEXT
132773034Sjwd						| IOP_REQUEST_FLAG_ADDR_BITS
132873034Sjwd						| ((phy_addr >> 16) & 0xffff0000);
132973034Sjwd	req->header.context = ((phy_addr & 0xffffffff) << 32 )
133073034Sjwd						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
133173034Sjwd
133273034Sjwd	hba->u.mvfrey.inlist_wptr++;
133373034Sjwd	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
13341558Srgrimes
13351558Srgrimes	if (index == hba->u.mvfrey.list_count) {
13361558Srgrimes		index = 0;
13371558Srgrimes		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
13381558Srgrimes		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1339114574Sphk	}
134037234Sbde
13411558Srgrimes	hba->u.mvfrey.inlist[index].addr = phy_addr;
13421558Srgrimes	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
13431558Srgrimes
13441558Srgrimes	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
13451558Srgrimes	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
134613550Sjoerg
134713550Sjoerg	while (hba->config_done == 0) {
134813550Sjoerg		if (hptiop_sleep(hba, req, PPAUSE,
134913550Sjoerg			"hptctl", HPT_OSM_TIMEOUT)==0)
135013550Sjoerg			continue;
135113550Sjoerg		hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
135213550Sjoerg	}
135313550Sjoerg	return 0;
1354114571Sphk}
135513550Sjoerg
135613550Sjoergstatic int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
135799365Smarkm				struct hpt_iop_ioctl_param *pParams)
1358103669Sphk{
135913550Sjoerg	struct hpt_iop_request_ioctl_command *req;
1360114574Sphk
136113550Sjoerg	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1362114571Sphk		(pParams->Magic != HPT_IOCTL_MAGIC32))
1363114571Sphk		return EFAULT;
136416431Sbde
136513550Sjoerg	req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
136668044Sjkh	hba->config_done = 0;
1367127650Sluigi	hptiop_lock_adapter(hba);
1368127650Sluigi	if (pParams->nInBufferSize)
1369127650Sluigi		if (copyin((void *)pParams->lpInBuffer,
1370103669Sphk				req->buf, pParams->nInBufferSize))
1371103669Sphk			goto invalid;
1372103669Sphk	if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1373103669Sphk		goto invalid;
1374103669Sphk
1375103669Sphk	if (hba->config_done == 1) {
1376103669Sphk		if (pParams->nOutBufferSize)
1377103669Sphk			if (copyout(req->buf +
1378103669Sphk				((pParams->nInBufferSize + 3) & ~3),
1379103669Sphk				(void *)pParams->lpOutBuffer,
138068044Sjkh				pParams->nOutBufferSize))
1381103669Sphk				goto invalid;
1382103669Sphk
1383103669Sphk		if (pParams->lpBytesReturned)
1384103669Sphk			if (copyout(&req->bytes_returned,
138568044Sjkh				(void*)pParams->lpBytesReturned,
1386103669Sphk				sizeof(u_int32_t)))
1387103669Sphk				goto invalid;
1388103669Sphk		hptiop_unlock_adapter(hba);
1389103669Sphk		return 0;
1390103669Sphk	} else{
1391103669Sphkinvalid:
1392103669Sphk		hptiop_unlock_adapter(hba);
1393103669Sphk		return EFAULT;
1394103669Sphk	}
1395103669Sphk}
1396103669Sphk
1397103669Sphkstatic int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1398103669Sphk{
1399103669Sphk	union ccb           *ccb;
1400103669Sphk
1401103669Sphk	if ((ccb = xpt_alloc_ccb()) == NULL)
1402103669Sphk		return(ENOMEM);
1403103669Sphk	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1404103669Sphk		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1405114574Sphk		xpt_free_ccb(ccb);
1406103669Sphk		return(EIO);
1407103669Sphk	}
1408103669Sphk	xpt_rescan(ccb);
1409103669Sphk	return(0);
1410103669Sphk}
1411103669Sphk
1412103669Sphkstatic  bus_dmamap_callback_t   hptiop_map_srb;
141399365Smarkmstatic  bus_dmamap_callback_t   hptiop_post_scsi_command;
141413550Sjoergstatic  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
141513550Sjoergstatic	bus_dmamap_callback_t	hptiop_mvfrey_map_ctlcfg;
1416114571Sphk
141792541Simpstatic int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
14181558Srgrimes{
1419112307Sru	hba->bar0_rid = 0x10;
1420112307Sru	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1421112307Sru			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1422114569Sphk
1423112307Sru	if (hba->bar0_res == NULL) {
1424114571Sphk		device_printf(hba->pcidev,
1425112307Sru			"failed to get iop base adrress.\n");
1426114569Sphk		return -1;
1427112307Sru	}
1428114569Sphk	hba->bar0t = rman_get_bustag(hba->bar0_res);
1429112307Sru	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1430113680Sphk	hba->u.itl.mu = (struct hpt_iopmu_itl *)
1431112307Sru				rman_get_virtual(hba->bar0_res);
1432114571Sphk
1433112307Sru	if (!hba->u.itl.mu) {
1434113680Sphk		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1435109878Sphk					hba->bar0_rid, hba->bar0_res);
1436109878Sphk		device_printf(hba->pcidev, "alloc mem res failed\n");
14371558Srgrimes		return -1;
14381558Srgrimes	}
1439
1440	return 0;
1441}
1442
1443static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1444{
1445	hba->bar0_rid = 0x10;
1446	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1447			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1448
1449	if (hba->bar0_res == NULL) {
1450		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1451		return -1;
1452	}
1453	hba->bar0t = rman_get_bustag(hba->bar0_res);
1454	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1455	hba->u.mv.regs = (struct hpt_iopmv_regs *)
1456				rman_get_virtual(hba->bar0_res);
1457
1458	if (!hba->u.mv.regs) {
1459		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1460					hba->bar0_rid, hba->bar0_res);
1461		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1462		return -1;
1463	}
1464
1465	hba->bar2_rid = 0x18;
1466	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1467			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1468
1469	if (hba->bar2_res == NULL) {
1470		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1471					hba->bar0_rid, hba->bar0_res);
1472		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1473		return -1;
1474	}
1475
1476	hba->bar2t = rman_get_bustag(hba->bar2_res);
1477	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1478	hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1479
1480	if (!hba->u.mv.mu) {
1481		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1482					hba->bar0_rid, hba->bar0_res);
1483		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484					hba->bar2_rid, hba->bar2_res);
1485		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1486		return -1;
1487	}
1488
1489	return 0;
1490}
1491
1492static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1493{
1494	hba->bar0_rid = 0x10;
1495	hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1496			SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1497
1498	if (hba->bar0_res == NULL) {
1499		device_printf(hba->pcidev, "failed to get iop bar0.\n");
1500		return -1;
1501	}
1502	hba->bar0t = rman_get_bustag(hba->bar0_res);
1503	hba->bar0h = rman_get_bushandle(hba->bar0_res);
1504	hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1505				rman_get_virtual(hba->bar0_res);
1506
1507	if (!hba->u.mvfrey.config) {
1508		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1509					hba->bar0_rid, hba->bar0_res);
1510		device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1511		return -1;
1512	}
1513
1514	hba->bar2_rid = 0x18;
1515	hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1516			SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1517
1518	if (hba->bar2_res == NULL) {
1519		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1520					hba->bar0_rid, hba->bar0_res);
1521		device_printf(hba->pcidev, "failed to get iop bar2.\n");
1522		return -1;
1523	}
1524
1525	hba->bar2t = rman_get_bustag(hba->bar2_res);
1526	hba->bar2h = rman_get_bushandle(hba->bar2_res);
1527	hba->u.mvfrey.mu =
1528					(struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1529
1530	if (!hba->u.mvfrey.mu) {
1531		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1532					hba->bar0_rid, hba->bar0_res);
1533		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534					hba->bar2_rid, hba->bar2_res);
1535		device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1536		return -1;
1537	}
1538
1539	return 0;
1540}
1541
1542static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1543{
1544	if (hba->bar0_res)
1545		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1546			hba->bar0_rid, hba->bar0_res);
1547}
1548
1549static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1550{
1551	if (hba->bar0_res)
1552		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1553			hba->bar0_rid, hba->bar0_res);
1554	if (hba->bar2_res)
1555		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1556			hba->bar2_rid, hba->bar2_res);
1557}
1558
1559static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1560{
1561	if (hba->bar0_res)
1562		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1563			hba->bar0_rid, hba->bar0_res);
1564	if (hba->bar2_res)
1565		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1566			hba->bar2_rid, hba->bar2_res);
1567}
1568
1569static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1570{
1571	if (bus_dma_tag_create(hba->parent_dmat,
1572				1,
1573				0,
1574				BUS_SPACE_MAXADDR_32BIT,
1575				BUS_SPACE_MAXADDR,
1576				NULL, NULL,
1577				0x800 - 0x8,
1578				1,
1579				BUS_SPACE_MAXSIZE_32BIT,
1580				BUS_DMA_ALLOCNOW,
1581				NULL,
1582				NULL,
1583				&hba->ctlcfg_dmat)) {
1584		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1585		return -1;
1586	}
1587
1588	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1589		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1590		&hba->ctlcfg_dmamap) != 0) {
1591			device_printf(hba->pcidev,
1592					"bus_dmamem_alloc failed!\n");
1593			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1594			return -1;
1595	}
1596
1597	if (bus_dmamap_load(hba->ctlcfg_dmat,
1598			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1599			MVIOP_IOCTLCFG_SIZE,
1600			hptiop_mv_map_ctlcfg, hba, 0)) {
1601		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1602		if (hba->ctlcfg_dmat) {
1603			bus_dmamem_free(hba->ctlcfg_dmat,
1604				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1605			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1606		}
1607		return -1;
1608	}
1609
1610	return 0;
1611}
1612
1613static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1614{
1615	u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1616
1617	list_count >>= 16;
1618
1619	if (list_count == 0) {
1620		return -1;
1621	}
1622
1623	hba->u.mvfrey.list_count = list_count;
1624	hba->u.mvfrey.internal_mem_size = 0x800
1625							+ list_count * sizeof(struct mvfrey_inlist_entry)
1626							+ list_count * sizeof(struct mvfrey_outlist_entry)
1627							+ sizeof(int);
1628	if (bus_dma_tag_create(hba->parent_dmat,
1629				1,
1630				0,
1631				BUS_SPACE_MAXADDR_32BIT,
1632				BUS_SPACE_MAXADDR,
1633				NULL, NULL,
1634				hba->u.mvfrey.internal_mem_size,
1635				1,
1636				BUS_SPACE_MAXSIZE_32BIT,
1637				BUS_DMA_ALLOCNOW,
1638				NULL,
1639				NULL,
1640				&hba->ctlcfg_dmat)) {
1641		device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1642		return -1;
1643	}
1644
1645	if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1646		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1647		&hba->ctlcfg_dmamap) != 0) {
1648			device_printf(hba->pcidev,
1649					"bus_dmamem_alloc failed!\n");
1650			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1651			return -1;
1652	}
1653
1654	if (bus_dmamap_load(hba->ctlcfg_dmat,
1655			hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1656			hba->u.mvfrey.internal_mem_size,
1657			hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1658		device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1659		if (hba->ctlcfg_dmat) {
1660			bus_dmamem_free(hba->ctlcfg_dmat,
1661				hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1662			bus_dma_tag_destroy(hba->ctlcfg_dmat);
1663		}
1664		return -1;
1665	}
1666
1667	return 0;
1668}
1669
1670static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1671	return 0;
1672}
1673
1674static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1675{
1676	if (hba->ctlcfg_dmat) {
1677		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1678		bus_dmamem_free(hba->ctlcfg_dmat,
1679					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1680		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1681	}
1682
1683	return 0;
1684}
1685
1686static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1687{
1688	if (hba->ctlcfg_dmat) {
1689		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1690		bus_dmamem_free(hba->ctlcfg_dmat,
1691					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1692		bus_dma_tag_destroy(hba->ctlcfg_dmat);
1693	}
1694
1695	return 0;
1696}
1697
1698static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1699{
1700	u_int32_t i = 100;
1701
1702	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1703		return -1;
1704
1705	/* wait 100ms for MCU ready */
1706	while(i--) {
1707		DELAY(1000);
1708	}
1709
1710	BUS_SPACE_WRT4_MVFREY2(inbound_base,
1711							hba->u.mvfrey.inlist_phy & 0xffffffff);
1712	BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1713							(hba->u.mvfrey.inlist_phy >> 16) >> 16);
1714
1715	BUS_SPACE_WRT4_MVFREY2(outbound_base,
1716							hba->u.mvfrey.outlist_phy & 0xffffffff);
1717	BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1718							(hba->u.mvfrey.outlist_phy >> 16) >> 16);
1719
1720	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1721							hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1722	BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1723							(hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1724
1725	hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1726								| CL_POINTER_TOGGLE;
1727	*hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1728								| CL_POINTER_TOGGLE;
1729	hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1730
1731	return 0;
1732}
1733
1734/*
1735 * CAM driver interface
1736 */
1737static device_method_t driver_methods[] = {
1738	/* Device interface */
1739	DEVMETHOD(device_probe,     hptiop_probe),
1740	DEVMETHOD(device_attach,    hptiop_attach),
1741	DEVMETHOD(device_detach,    hptiop_detach),
1742	DEVMETHOD(device_shutdown,  hptiop_shutdown),
1743	{ 0, 0 }
1744};
1745
1746static struct hptiop_adapter_ops hptiop_itl_ops = {
1747	.family	           = INTEL_BASED_IOP,
1748	.iop_wait_ready    = hptiop_wait_ready_itl,
1749	.internal_memalloc = 0,
1750	.internal_memfree  = hptiop_internal_memfree_itl,
1751	.alloc_pci_res     = hptiop_alloc_pci_res_itl,
1752	.release_pci_res   = hptiop_release_pci_res_itl,
1753	.enable_intr       = hptiop_enable_intr_itl,
1754	.disable_intr      = hptiop_disable_intr_itl,
1755	.get_config        = hptiop_get_config_itl,
1756	.set_config        = hptiop_set_config_itl,
1757	.iop_intr          = hptiop_intr_itl,
1758	.post_msg          = hptiop_post_msg_itl,
1759	.post_req          = hptiop_post_req_itl,
1760	.do_ioctl          = hptiop_do_ioctl_itl,
1761	.reset_comm        = 0,
1762};
1763
1764static struct hptiop_adapter_ops hptiop_mv_ops = {
1765	.family	           = MV_BASED_IOP,
1766	.iop_wait_ready    = hptiop_wait_ready_mv,
1767	.internal_memalloc = hptiop_internal_memalloc_mv,
1768	.internal_memfree  = hptiop_internal_memfree_mv,
1769	.alloc_pci_res     = hptiop_alloc_pci_res_mv,
1770	.release_pci_res   = hptiop_release_pci_res_mv,
1771	.enable_intr       = hptiop_enable_intr_mv,
1772	.disable_intr      = hptiop_disable_intr_mv,
1773	.get_config        = hptiop_get_config_mv,
1774	.set_config        = hptiop_set_config_mv,
1775	.iop_intr          = hptiop_intr_mv,
1776	.post_msg          = hptiop_post_msg_mv,
1777	.post_req          = hptiop_post_req_mv,
1778	.do_ioctl          = hptiop_do_ioctl_mv,
1779	.reset_comm        = 0,
1780};
1781
1782static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1783	.family	           = MVFREY_BASED_IOP,
1784	.iop_wait_ready    = hptiop_wait_ready_mvfrey,
1785	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
1786	.internal_memfree  = hptiop_internal_memfree_mvfrey,
1787	.alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1788	.release_pci_res   = hptiop_release_pci_res_mvfrey,
1789	.enable_intr       = hptiop_enable_intr_mvfrey,
1790	.disable_intr      = hptiop_disable_intr_mvfrey,
1791	.get_config        = hptiop_get_config_mvfrey,
1792	.set_config        = hptiop_set_config_mvfrey,
1793	.iop_intr          = hptiop_intr_mvfrey,
1794	.post_msg          = hptiop_post_msg_mvfrey,
1795	.post_req          = hptiop_post_req_mvfrey,
1796	.do_ioctl          = hptiop_do_ioctl_mvfrey,
1797	.reset_comm        = hptiop_reset_comm_mvfrey,
1798};
1799
1800static driver_t hptiop_pci_driver = {
1801	driver_name,
1802	driver_methods,
1803	sizeof(struct hpt_iop_hba)
1804};
1805
1806DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1807MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1808
1809static int hptiop_probe(device_t dev)
1810{
1811	struct hpt_iop_hba *hba;
1812	u_int32_t id;
1813	static char buf[256];
1814	int sas = 0;
1815	struct hptiop_adapter_ops *ops;
1816
1817	if (pci_get_vendor(dev) != 0x1103)
1818		return (ENXIO);
1819
1820	id = pci_get_device(dev);
1821
1822	switch (id) {
1823		case 0x4520:
1824		case 0x4521:
1825		case 0x4522:
1826			sas = 1;
1827		case 0x3620:
1828		case 0x3622:
1829		case 0x3640:
1830			ops = &hptiop_mvfrey_ops;
1831			break;
1832		case 0x4210:
1833		case 0x4211:
1834		case 0x4310:
1835		case 0x4311:
1836		case 0x4320:
1837		case 0x4321:
1838 		case 0x4322:
1839			sas = 1;
1840		case 0x3220:
1841		case 0x3320:
1842		case 0x3410:
1843		case 0x3520:
1844		case 0x3510:
1845		case 0x3511:
1846		case 0x3521:
1847		case 0x3522:
1848		case 0x3530:
1849		case 0x3540:
1850		case 0x3560:
1851			ops = &hptiop_itl_ops;
1852			break;
1853		case 0x3020:
1854		case 0x3120:
1855		case 0x3122:
1856			ops = &hptiop_mv_ops;
1857			break;
1858		default:
1859			return (ENXIO);
1860	}
1861
1862	device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1863		pci_get_bus(dev), pci_get_slot(dev),
1864		pci_get_function(dev), pci_get_irq(dev));
1865
1866	sprintf(buf, "RocketRAID %x %s Controller\n",
1867				id, sas ? "SAS" : "SATA");
1868	device_set_desc_copy(dev, buf);
1869
1870	hba = (struct hpt_iop_hba *)device_get_softc(dev);
1871	bzero(hba, sizeof(struct hpt_iop_hba));
1872	hba->ops = ops;
1873
1874	KdPrint(("hba->ops=%p\n", hba->ops));
1875	return 0;
1876}
1877
1878static int hptiop_attach(device_t dev)
1879{
1880	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1881	struct hpt_iop_request_get_config  iop_config;
1882	struct hpt_iop_request_set_config  set_config;
1883	int rid = 0;
1884	struct cam_devq *devq;
1885	struct ccb_setasync ccb;
1886	u_int32_t unit = device_get_unit(dev);
1887
1888	device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1889			unit, driver_version);
1890
1891	KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1892		pci_get_bus(dev), pci_get_slot(dev),
1893		pci_get_function(dev), hba->ops));
1894
1895	pci_enable_busmaster(dev);
1896	hba->pcidev = dev;
1897	hba->pciunit = unit;
1898
1899	if (hba->ops->alloc_pci_res(hba))
1900		return ENXIO;
1901
1902	if (hba->ops->iop_wait_ready(hba, 2000)) {
1903		device_printf(dev, "adapter is not ready\n");
1904		goto release_pci_res;
1905	}
1906
1907	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1908
1909	if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1910			1,  /* alignment */
1911			0, /* boundary */
1912			BUS_SPACE_MAXADDR,  /* lowaddr */
1913			BUS_SPACE_MAXADDR,  /* highaddr */
1914			NULL, NULL,         /* filter, filterarg */
1915			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1916			BUS_SPACE_UNRESTRICTED, /* nsegments */
1917			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1918			0,      /* flags */
1919			NULL,   /* lockfunc */
1920			NULL,       /* lockfuncarg */
1921			&hba->parent_dmat   /* tag */))
1922	{
1923		device_printf(dev, "alloc parent_dmat failed\n");
1924		goto release_pci_res;
1925	}
1926
1927	if (hba->ops->family == MV_BASED_IOP) {
1928		if (hba->ops->internal_memalloc(hba)) {
1929			device_printf(dev, "alloc srb_dmat failed\n");
1930			goto destroy_parent_tag;
1931		}
1932	}
1933
1934	if (hba->ops->get_config(hba, &iop_config)) {
1935		device_printf(dev, "get iop config failed.\n");
1936		goto get_config_failed;
1937	}
1938
1939	hba->firmware_version = iop_config.firmware_version;
1940	hba->interface_version = iop_config.interface_version;
1941	hba->max_requests = iop_config.max_requests;
1942	hba->max_devices = iop_config.max_devices;
1943	hba->max_request_size = iop_config.request_size;
1944	hba->max_sg_count = iop_config.max_sg_count;
1945
1946	if (hba->ops->family == MVFREY_BASED_IOP) {
1947		if (hba->ops->internal_memalloc(hba)) {
1948			device_printf(dev, "alloc srb_dmat failed\n");
1949			goto destroy_parent_tag;
1950		}
1951		if (hba->ops->reset_comm(hba)) {
1952			device_printf(dev, "reset comm failed\n");
1953			goto get_config_failed;
1954		}
1955	}
1956
1957	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1958			4,  /* alignment */
1959			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1960			BUS_SPACE_MAXADDR,  /* lowaddr */
1961			BUS_SPACE_MAXADDR,  /* highaddr */
1962			NULL, NULL,         /* filter, filterarg */
1963			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1964			hba->max_sg_count,  /* nsegments */
1965			0x20000,    /* maxsegsize */
1966			BUS_DMA_ALLOCNOW,       /* flags */
1967			busdma_lock_mutex,  /* lockfunc */
1968			&hba->lock,     /* lockfuncarg */
1969			&hba->io_dmat   /* tag */))
1970	{
1971		device_printf(dev, "alloc io_dmat failed\n");
1972		goto get_config_failed;
1973	}
1974
1975	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1976			1,  /* alignment */
1977			0, /* boundary */
1978			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1979			BUS_SPACE_MAXADDR,  /* highaddr */
1980			NULL, NULL,         /* filter, filterarg */
1981			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1982			1,  /* nsegments */
1983			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1984			0,      /* flags */
1985			NULL,   /* lockfunc */
1986			NULL,       /* lockfuncarg */
1987			&hba->srb_dmat  /* tag */))
1988	{
1989		device_printf(dev, "alloc srb_dmat failed\n");
1990		goto destroy_io_dmat;
1991	}
1992
1993	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1994			BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1995			&hba->srb_dmamap) != 0)
1996	{
1997		device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1998		goto destroy_srb_dmat;
1999	}
2000
2001	if (bus_dmamap_load(hba->srb_dmat,
2002			hba->srb_dmamap, hba->uncached_ptr,
2003			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2004			hptiop_map_srb, hba, 0))
2005	{
2006		device_printf(dev, "bus_dmamap_load failed!\n");
2007		goto srb_dmamem_free;
2008	}
2009
2010	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2011		device_printf(dev, "cam_simq_alloc failed\n");
2012		goto srb_dmamap_unload;
2013	}
2014
2015	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2016			hba, unit, &Giant, hba->max_requests - 1, 1, devq);
2017	if (!hba->sim) {
2018		device_printf(dev, "cam_sim_alloc failed\n");
2019		cam_simq_free(devq);
2020		goto srb_dmamap_unload;
2021	}
2022	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2023	{
2024		device_printf(dev, "xpt_bus_register failed\n");
2025		goto free_cam_sim;
2026	}
2027
2028	if (xpt_create_path(&hba->path, /*periph */ NULL,
2029			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2030			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2031		device_printf(dev, "xpt_create_path failed\n");
2032		goto deregister_xpt_bus;
2033	}
2034
2035	bzero(&set_config, sizeof(set_config));
2036	set_config.iop_id = unit;
2037	set_config.vbus_id = cam_sim_path(hba->sim);
2038	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2039
2040	if (hba->ops->set_config(hba, &set_config)) {
2041		device_printf(dev, "set iop config failed.\n");
2042		goto free_hba_path;
2043	}
2044
2045	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2046	ccb.ccb_h.func_code = XPT_SASYNC_CB;
2047	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2048	ccb.callback = hptiop_async;
2049	ccb.callback_arg = hba->sim;
2050	xpt_action((union ccb *)&ccb);
2051
2052	rid = 0;
2053	if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
2054			&rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2055		device_printf(dev, "allocate irq failed!\n");
2056		goto free_hba_path;
2057	}
2058
2059	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
2060				NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2061	{
2062		device_printf(dev, "allocate intr function failed!\n");
2063		goto free_irq_resource;
2064	}
2065
2066	if (hptiop_send_sync_msg(hba,
2067			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2068		device_printf(dev, "fail to start background task\n");
2069		goto teartown_irq_resource;
2070	}
2071
2072	hba->ops->enable_intr(hba);
2073	hba->initialized = 1;
2074
2075	hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2076				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2077				S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2078
2079
2080	return 0;
2081
2082
2083teartown_irq_resource:
2084	bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2085
2086free_irq_resource:
2087	bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2088
2089free_hba_path:
2090	xpt_free_path(hba->path);
2091
2092deregister_xpt_bus:
2093	xpt_bus_deregister(cam_sim_path(hba->sim));
2094
2095free_cam_sim:
2096	cam_sim_free(hba->sim, /*free devq*/ TRUE);
2097
2098srb_dmamap_unload:
2099	if (hba->uncached_ptr)
2100		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2101
2102srb_dmamem_free:
2103	if (hba->uncached_ptr)
2104		bus_dmamem_free(hba->srb_dmat,
2105			hba->uncached_ptr, hba->srb_dmamap);
2106
2107destroy_srb_dmat:
2108	if (hba->srb_dmat)
2109		bus_dma_tag_destroy(hba->srb_dmat);
2110
2111destroy_io_dmat:
2112	if (hba->io_dmat)
2113		bus_dma_tag_destroy(hba->io_dmat);
2114
2115get_config_failed:
2116	hba->ops->internal_memfree(hba);
2117
2118destroy_parent_tag:
2119	if (hba->parent_dmat)
2120		bus_dma_tag_destroy(hba->parent_dmat);
2121
2122release_pci_res:
2123	if (hba->ops->release_pci_res)
2124		hba->ops->release_pci_res(hba);
2125
2126	return ENXIO;
2127}
2128
2129static int hptiop_detach(device_t dev)
2130{
2131	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2132	int i;
2133	int error = EBUSY;
2134
2135	hptiop_lock_adapter(hba);
2136	for (i = 0; i < hba->max_devices; i++)
2137		if (hptiop_os_query_remove_device(hba, i)) {
2138			device_printf(dev, "%d file system is busy. id=%d",
2139						hba->pciunit, i);
2140			goto out;
2141		}
2142
2143	if ((error = hptiop_shutdown(dev)) != 0)
2144		goto out;
2145	if (hptiop_send_sync_msg(hba,
2146		IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2147		goto out;
2148
2149	hptiop_release_resource(hba);
2150	error = 0;
2151out:
2152	hptiop_unlock_adapter(hba);
2153	return error;
2154}
2155
2156static int hptiop_shutdown(device_t dev)
2157{
2158	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2159
2160	int error = 0;
2161
2162	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2163		device_printf(dev, "%d device is busy", hba->pciunit);
2164		return EBUSY;
2165	}
2166
2167	hba->ops->disable_intr(hba);
2168
2169	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2170		error = EBUSY;
2171
2172	return error;
2173}
2174
2175static void hptiop_pci_intr(void *arg)
2176{
2177	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2178	hptiop_lock_adapter(hba);
2179	hba->ops->iop_intr(hba);
2180	hptiop_unlock_adapter(hba);
2181}
2182
2183static void hptiop_poll(struct cam_sim *sim)
2184{
2185	hptiop_pci_intr(cam_sim_softc(sim));
2186}
2187
2188static void hptiop_async(void * callback_arg, u_int32_t code,
2189					struct cam_path * path, void * arg)
2190{
2191}
2192
2193static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2194{
2195	BUS_SPACE_WRT4_ITL(outbound_intmask,
2196		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2197}
2198
2199static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2200{
2201	u_int32_t int_mask;
2202
2203	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2204
2205	int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2206			| MVIOP_MU_OUTBOUND_INT_MSG;
2207    	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2208}
2209
2210static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2211{
2212	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2213	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2214
2215	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2216	BUS_SPACE_RD4_MVFREY2(isr_enable);
2217
2218	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2219	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2220}
2221
2222static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2223{
2224	u_int32_t int_mask;
2225
2226	int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2227
2228	int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2229	BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2230	BUS_SPACE_RD4_ITL(outbound_intstatus);
2231}
2232
2233static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2234{
2235	u_int32_t int_mask;
2236	int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2237
2238	int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2239			| MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2240	BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2241	BUS_SPACE_RD4_MV0(outbound_intmask);
2242}
2243
2244static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2245{
2246	BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2247	BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2248
2249	BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2250	BUS_SPACE_RD4_MVFREY2(isr_enable);
2251
2252	BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2253	BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2254}
2255
2256static void hptiop_reset_adapter(void *argv)
2257{
2258	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2259	if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2260		return;
2261	hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2262}
2263
2264static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2265{
2266	struct hpt_iop_srb * srb;
2267
2268	if (hba->srb_list) {
2269		srb = hba->srb_list;
2270		hba->srb_list = srb->next;
2271		return srb;
2272	}
2273
2274	return NULL;
2275}
2276
2277static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2278{
2279	srb->next = hba->srb_list;
2280	hba->srb_list = srb;
2281}
2282
2283static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2284{
2285	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2286	struct hpt_iop_srb * srb;
2287	int error;
2288
2289	switch (ccb->ccb_h.func_code) {
2290
2291	case XPT_SCSI_IO:
2292		hptiop_lock_adapter(hba);
2293		if (ccb->ccb_h.target_lun != 0 ||
2294			ccb->ccb_h.target_id >= hba->max_devices ||
2295			(ccb->ccb_h.flags & CAM_CDB_PHYS))
2296		{
2297			ccb->ccb_h.status = CAM_TID_INVALID;
2298			xpt_done(ccb);
2299			goto scsi_done;
2300		}
2301
2302		if ((srb = hptiop_get_srb(hba)) == NULL) {
2303			device_printf(hba->pcidev, "srb allocated failed");
2304			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2305			xpt_done(ccb);
2306			goto scsi_done;
2307		}
2308
2309		srb->ccb = ccb;
2310		error = bus_dmamap_load_ccb(hba->io_dmat,
2311					    srb->dma_map,
2312					    ccb,
2313					    hptiop_post_scsi_command,
2314					    srb,
2315					    0);
2316
2317		if (error && error != EINPROGRESS) {
2318			device_printf(hba->pcidev,
2319				"%d bus_dmamap_load error %d",
2320				hba->pciunit, error);
2321			xpt_freeze_simq(hba->sim, 1);
2322			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2323			hptiop_free_srb(hba, srb);
2324			xpt_done(ccb);
2325			goto scsi_done;
2326		}
2327
2328scsi_done:
2329		hptiop_unlock_adapter(hba);
2330		return;
2331
2332	case XPT_RESET_BUS:
2333		device_printf(hba->pcidev, "reset adapter");
2334		hptiop_lock_adapter(hba);
2335		hba->msg_done = 0;
2336		hptiop_reset_adapter(hba);
2337		hptiop_unlock_adapter(hba);
2338		break;
2339
2340	case XPT_GET_TRAN_SETTINGS:
2341	case XPT_SET_TRAN_SETTINGS:
2342		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2343		break;
2344
2345	case XPT_CALC_GEOMETRY:
2346		cam_calc_geometry(&ccb->ccg, 1);
2347		break;
2348
2349	case XPT_PATH_INQ:
2350	{
2351		struct ccb_pathinq *cpi = &ccb->cpi;
2352
2353		cpi->version_num = 1;
2354		cpi->hba_inquiry = PI_SDTR_ABLE;
2355		cpi->target_sprt = 0;
2356		cpi->hba_misc = PIM_NOBUSRESET;
2357		cpi->hba_eng_cnt = 0;
2358		cpi->max_target = hba->max_devices;
2359		cpi->max_lun = 0;
2360		cpi->unit_number = cam_sim_unit(sim);
2361		cpi->bus_id = cam_sim_bus(sim);
2362		cpi->initiator_id = hba->max_devices;
2363		cpi->base_transfer_speed = 3300;
2364
2365		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2366		strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2367		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2368		cpi->transport = XPORT_SPI;
2369		cpi->transport_version = 2;
2370		cpi->protocol = PROTO_SCSI;
2371		cpi->protocol_version = SCSI_REV_2;
2372		cpi->ccb_h.status = CAM_REQ_CMP;
2373		break;
2374	}
2375
2376	default:
2377		ccb->ccb_h.status = CAM_REQ_INVALID;
2378		break;
2379	}
2380
2381	xpt_done(ccb);
2382	return;
2383}
2384
2385static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2386				struct hpt_iop_srb *srb,
2387				bus_dma_segment_t *segs, int nsegs)
2388{
2389	int idx;
2390	union ccb *ccb = srb->ccb;
2391	u_int8_t *cdb;
2392
2393	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2394		cdb = ccb->csio.cdb_io.cdb_ptr;
2395	else
2396		cdb = ccb->csio.cdb_io.cdb_bytes;
2397
2398	KdPrint(("ccb=%p %x-%x-%x\n",
2399		ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2400
2401	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2402		u_int32_t iop_req32;
2403		struct hpt_iop_request_scsi_command req;
2404
2405		iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2406
2407		if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2408			device_printf(hba->pcidev, "invalid req offset\n");
2409			ccb->ccb_h.status = CAM_BUSY;
2410			bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2411			hptiop_free_srb(hba, srb);
2412			xpt_done(ccb);
2413			return;
2414		}
2415
2416		if (ccb->csio.dxfer_len && nsegs > 0) {
2417			struct hpt_iopsg *psg = req.sg_list;
2418			for (idx = 0; idx < nsegs; idx++, psg++) {
2419				psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2420				psg->size = segs[idx].ds_len;
2421				psg->eot = 0;
2422			}
2423			psg[-1].eot = 1;
2424		}
2425
2426		bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2427
2428		req.header.size =
2429				offsetof(struct hpt_iop_request_scsi_command, sg_list)
2430				+ nsegs*sizeof(struct hpt_iopsg);
2431		req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2432		req.header.flags = 0;
2433		req.header.result = IOP_RESULT_PENDING;
2434		req.header.context = (u_int64_t)(unsigned long)srb;
2435		req.dataxfer_length = ccb->csio.dxfer_len;
2436		req.channel =  0;
2437		req.target =  ccb->ccb_h.target_id;
2438		req.lun =  ccb->ccb_h.target_lun;
2439
2440		bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2441			(u_int8_t *)&req, req.header.size);
2442
2443		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2444			bus_dmamap_sync(hba->io_dmat,
2445				srb->dma_map, BUS_DMASYNC_PREREAD);
2446		}
2447		else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2448			bus_dmamap_sync(hba->io_dmat,
2449				srb->dma_map, BUS_DMASYNC_PREWRITE);
2450
2451		BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2452	} else {
2453		struct hpt_iop_request_scsi_command *req;
2454
2455		req = (struct hpt_iop_request_scsi_command *)srb;
2456		if (ccb->csio.dxfer_len && nsegs > 0) {
2457			struct hpt_iopsg *psg = req->sg_list;
2458			for (idx = 0; idx < nsegs; idx++, psg++) {
2459				psg->pci_address =
2460					(u_int64_t)segs[idx].ds_addr;
2461				psg->size = segs[idx].ds_len;
2462				psg->eot = 0;
2463			}
2464			psg[-1].eot = 1;
2465		}
2466
2467		bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2468
2469		req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2470		req->header.result = IOP_RESULT_PENDING;
2471		req->dataxfer_length = ccb->csio.dxfer_len;
2472		req->channel =  0;
2473		req->target =  ccb->ccb_h.target_id;
2474		req->lun =  ccb->ccb_h.target_lun;
2475		req->header.size =
2476			offsetof(struct hpt_iop_request_scsi_command, sg_list)
2477			+ nsegs*sizeof(struct hpt_iopsg);
2478		req->header.context = (u_int64_t)srb->index |
2479						IOPMU_QUEUE_ADDR_HOST_BIT;
2480		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2481
2482		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2483			bus_dmamap_sync(hba->io_dmat,
2484				srb->dma_map, BUS_DMASYNC_PREREAD);
2485		}else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2486			bus_dmamap_sync(hba->io_dmat,
2487				srb->dma_map, BUS_DMASYNC_PREWRITE);
2488		}
2489
2490		if (hba->firmware_version > 0x01020000
2491			|| hba->interface_version > 0x01020000) {
2492			u_int32_t size_bits;
2493
2494			if (req->header.size < 256)
2495				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2496			else if (req->header.size < 512)
2497				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2498			else
2499				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2500						| IOPMU_QUEUE_ADDR_HOST_BIT;
2501
2502			BUS_SPACE_WRT4_ITL(inbound_queue,
2503				(u_int32_t)srb->phy_addr | size_bits);
2504		} else
2505			BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2506				|IOPMU_QUEUE_ADDR_HOST_BIT);
2507	}
2508}
2509
2510static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2511				struct hpt_iop_srb *srb,
2512				bus_dma_segment_t *segs, int nsegs)
2513{
2514	int idx, size;
2515	union ccb *ccb = srb->ccb;
2516	u_int8_t *cdb;
2517	struct hpt_iop_request_scsi_command *req;
2518	u_int64_t req_phy;
2519
2520    	req = (struct hpt_iop_request_scsi_command *)srb;
2521	req_phy = srb->phy_addr;
2522
2523	if (ccb->csio.dxfer_len && nsegs > 0) {
2524		struct hpt_iopsg *psg = req->sg_list;
2525		for (idx = 0; idx < nsegs; idx++, psg++) {
2526			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2527			psg->size = segs[idx].ds_len;
2528			psg->eot = 0;
2529		}
2530		psg[-1].eot = 1;
2531	}
2532	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2533		cdb = ccb->csio.cdb_io.cdb_ptr;
2534	else
2535		cdb = ccb->csio.cdb_io.cdb_bytes;
2536
2537	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2538	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2539	req->header.result = IOP_RESULT_PENDING;
2540	req->dataxfer_length = ccb->csio.dxfer_len;
2541	req->channel = 0;
2542	req->target =  ccb->ccb_h.target_id;
2543	req->lun =  ccb->ccb_h.target_lun;
2544	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2545				- sizeof(struct hpt_iopsg)
2546				+ nsegs * sizeof(struct hpt_iopsg);
2547	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2548		bus_dmamap_sync(hba->io_dmat,
2549			srb->dma_map, BUS_DMASYNC_PREREAD);
2550	}
2551	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2552		bus_dmamap_sync(hba->io_dmat,
2553			srb->dma_map, BUS_DMASYNC_PREWRITE);
2554	req->header.context = (u_int64_t)srb->index
2555					<< MVIOP_REQUEST_NUMBER_START_BIT
2556					| MVIOP_CMD_TYPE_SCSI;
2557	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2558	size = req->header.size >> 8;
2559	hptiop_mv_inbound_write(req_phy
2560			| MVIOP_MU_QUEUE_ADDR_HOST_BIT
2561			| (size > 3 ? 3 : size), hba);
2562}
2563
2564static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2565				struct hpt_iop_srb *srb,
2566				bus_dma_segment_t *segs, int nsegs)
2567{
2568	int idx, index;
2569	union ccb *ccb = srb->ccb;
2570	u_int8_t *cdb;
2571	struct hpt_iop_request_scsi_command *req;
2572	u_int64_t req_phy;
2573
2574	req = (struct hpt_iop_request_scsi_command *)srb;
2575	req_phy = srb->phy_addr;
2576
2577	if (ccb->csio.dxfer_len && nsegs > 0) {
2578		struct hpt_iopsg *psg = req->sg_list;
2579		for (idx = 0; idx < nsegs; idx++, psg++) {
2580			psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2581			psg->size = segs[idx].ds_len;
2582			psg->eot = 0;
2583		}
2584		psg[-1].eot = 1;
2585	}
2586	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2587		cdb = ccb->csio.cdb_io.cdb_ptr;
2588	else
2589		cdb = ccb->csio.cdb_io.cdb_bytes;
2590
2591	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2592	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2593	req->header.result = IOP_RESULT_PENDING;
2594	req->dataxfer_length = ccb->csio.dxfer_len;
2595	req->channel = 0;
2596	req->target = ccb->ccb_h.target_id;
2597	req->lun = ccb->ccb_h.target_lun;
2598	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2599				- sizeof(struct hpt_iopsg)
2600				+ nsegs * sizeof(struct hpt_iopsg);
2601	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2602		bus_dmamap_sync(hba->io_dmat,
2603			srb->dma_map, BUS_DMASYNC_PREREAD);
2604	}
2605	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2606		bus_dmamap_sync(hba->io_dmat,
2607			srb->dma_map, BUS_DMASYNC_PREWRITE);
2608
2609	req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2610						| IOP_REQUEST_FLAG_ADDR_BITS
2611						| ((req_phy >> 16) & 0xffff0000);
2612	req->header.context = ((req_phy & 0xffffffff) << 32 )
2613						| srb->index << 4
2614						| IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2615
2616	hba->u.mvfrey.inlist_wptr++;
2617	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2618
2619	if (index == hba->u.mvfrey.list_count) {
2620		index = 0;
2621		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2622		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2623	}
2624
2625	hba->u.mvfrey.inlist[index].addr = req_phy;
2626	hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2627
2628	BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2629	BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2630
2631	if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2632		srb->timeout_ch = timeout(hptiop_reset_adapter, hba, 20*hz);
2633	}
2634}
2635
2636static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2637					int nsegs, int error)
2638{
2639	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2640	union ccb *ccb = srb->ccb;
2641	struct hpt_iop_hba *hba = srb->hba;
2642
2643	if (error || nsegs > hba->max_sg_count) {
2644		KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2645			ccb->ccb_h.func_code,
2646			ccb->ccb_h.target_id,
2647			ccb->ccb_h.target_lun, nsegs));
2648		ccb->ccb_h.status = CAM_BUSY;
2649		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2650		hptiop_free_srb(hba, srb);
2651		xpt_done(ccb);
2652		return;
2653	}
2654
2655	hba->ops->post_req(hba, srb, segs, nsegs);
2656}
2657
2658static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2659				int nsegs, int error)
2660{
2661	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2662	hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2663				& ~(u_int64_t)0x1F;
2664	hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2665				& ~0x1F);
2666}
2667
2668static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2669				int nsegs, int error)
2670{
2671	struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2672	char *p;
2673	u_int64_t phy;
2674	u_int32_t list_count = hba->u.mvfrey.list_count;
2675
2676	phy = ((u_int64_t)segs->ds_addr + 0x1F)
2677				& ~(u_int64_t)0x1F;
2678	p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2679				& ~0x1F);
2680
2681	hba->ctlcfgcmd_phy = phy;
2682	hba->ctlcfg_ptr = p;
2683
2684	p += 0x800;
2685	phy += 0x800;
2686
2687	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2688	hba->u.mvfrey.inlist_phy = phy;
2689
2690	p += list_count * sizeof(struct mvfrey_inlist_entry);
2691	phy += list_count * sizeof(struct mvfrey_inlist_entry);
2692
2693	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2694	hba->u.mvfrey.outlist_phy = phy;
2695
2696	p += list_count * sizeof(struct mvfrey_outlist_entry);
2697	phy += list_count * sizeof(struct mvfrey_outlist_entry);
2698
2699	hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2700	hba->u.mvfrey.outlist_cptr_phy = phy;
2701}
2702
2703static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2704				int nsegs, int error)
2705{
2706	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2707	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2708	struct hpt_iop_srb *srb, *tmp_srb;
2709	int i;
2710
2711	if (error || nsegs == 0) {
2712		device_printf(hba->pcidev, "hptiop_map_srb error");
2713		return;
2714	}
2715
2716	/* map srb */
2717	srb = (struct hpt_iop_srb *)
2718		(((unsigned long)hba->uncached_ptr + 0x1F)
2719		& ~(unsigned long)0x1F);
2720
2721	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2722		tmp_srb = (struct hpt_iop_srb *)
2723					((char *)srb + i * HPT_SRB_MAX_SIZE);
2724		if (((unsigned long)tmp_srb & 0x1F) == 0) {
2725			if (bus_dmamap_create(hba->io_dmat,
2726						0, &tmp_srb->dma_map)) {
2727				device_printf(hba->pcidev, "dmamap create failed");
2728				return;
2729			}
2730
2731			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2732			tmp_srb->hba = hba;
2733			tmp_srb->index = i;
2734			if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2735				tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2736							(phy_addr >> 5);
2737				if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2738					tmp_srb->srb_flag =
2739						HPT_SRB_FLAG_HIGH_MEM_ACESS;
2740			} else {
2741				tmp_srb->phy_addr = phy_addr;
2742			}
2743
2744			callout_handle_init(&tmp_srb->timeout_ch);
2745			hptiop_free_srb(hba, tmp_srb);
2746			hba->srb[i] = tmp_srb;
2747			phy_addr += HPT_SRB_MAX_SIZE;
2748		}
2749		else {
2750			device_printf(hba->pcidev, "invalid alignment");
2751			return;
2752		}
2753	}
2754}
2755
2756static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2757{
2758	hba->msg_done = 1;
2759}
2760
2761static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2762						int target_id)
2763{
2764	struct cam_periph       *periph = NULL;
2765	struct cam_path         *path;
2766	int                     status, retval = 0;
2767
2768	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2769
2770	if (status == CAM_REQ_CMP) {
2771		if ((periph = cam_periph_find(path, "da")) != NULL) {
2772			if (periph->refcount >= 1) {
2773				device_printf(hba->pcidev, "%d ,"
2774					"target_id=0x%x,"
2775					"refcount=%d",
2776				    hba->pciunit, target_id, periph->refcount);
2777				retval = -1;
2778			}
2779		}
2780		xpt_free_path(path);
2781	}
2782	return retval;
2783}
2784
2785static void hptiop_release_resource(struct hpt_iop_hba *hba)
2786{
2787	int i;
2788	if (hba->path) {
2789		struct ccb_setasync ccb;
2790
2791		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2792		ccb.ccb_h.func_code = XPT_SASYNC_CB;
2793		ccb.event_enable = 0;
2794		ccb.callback = hptiop_async;
2795		ccb.callback_arg = hba->sim;
2796		xpt_action((union ccb *)&ccb);
2797		xpt_free_path(hba->path);
2798	}
2799
2800	if (hba->sim) {
2801		xpt_bus_deregister(cam_sim_path(hba->sim));
2802		cam_sim_free(hba->sim, TRUE);
2803	}
2804
2805	if (hba->ctlcfg_dmat) {
2806		bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2807		bus_dmamem_free(hba->ctlcfg_dmat,
2808					hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2809		bus_dma_tag_destroy(hba->ctlcfg_dmat);
2810	}
2811
2812	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2813		struct hpt_iop_srb *srb = hba->srb[i];
2814		if (srb->dma_map)
2815			bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2816	}
2817
2818	if (hba->srb_dmat) {
2819		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2820		bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2821		bus_dma_tag_destroy(hba->srb_dmat);
2822	}
2823
2824	if (hba->io_dmat)
2825		bus_dma_tag_destroy(hba->io_dmat);
2826
2827	if (hba->parent_dmat)
2828		bus_dma_tag_destroy(hba->parent_dmat);
2829
2830	if (hba->irq_handle)
2831		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2832
2833	if (hba->irq_res)
2834		bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2835					0, hba->irq_res);
2836
2837	if (hba->bar0_res)
2838		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2839					hba->bar0_rid, hba->bar0_res);
2840	if (hba->bar2_res)
2841		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2842					hba->bar2_rid, hba->bar2_res);
2843	if (hba->ioctl_dev)
2844		destroy_dev(hba->ioctl_dev);
2845}
2846