hptiop.c revision 170872
1/*
2 * HighPoint RR3xxx RAID Driver for FreeBSD
3 * Copyright (C) 2005-2007 HighPoint Technologies, Inc. All Rights Reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: head/sys/dev/hptiop/hptiop.c 170872 2007-06-17 05:55:54Z scottl $");
28
29#include <sys/param.h>
30#include <sys/types.h>
31#include <sys/cons.h>
32#if (__FreeBSD_version >= 500000)
33#include <sys/time.h>
34#include <sys/systm.h>
35#else
36#include <machine/clock.h>
37#endif
38
39#include <sys/stat.h>
40#include <sys/malloc.h>
41#include <sys/conf.h>
42#include <sys/libkern.h>
43#include <sys/kernel.h>
44
45#if (__FreeBSD_version >= 500000)
46#include <sys/kthread.h>
47#include <sys/mutex.h>
48#include <sys/module.h>
49#endif
50
51#include <sys/eventhandler.h>
52#include <sys/bus.h>
53#include <sys/taskqueue.h>
54#include <sys/ioccom.h>
55
56#include <machine/resource.h>
57#include <machine/bus.h>
58#include <machine/stdarg.h>
59#include <sys/rman.h>
60
61#include <vm/vm.h>
62#include <vm/pmap.h>
63
64#if (__FreeBSD_version >= 500000)
65#include <dev/pci/pcireg.h>
66#include <dev/pci/pcivar.h>
67#else
68#include <pci/pcivar.h>
69#include <pci/pcireg.h>
70#endif
71
72#if (__FreeBSD_version <= 500043)
73#include <sys/devicestat.h>
74#endif
75
76#include <cam/cam.h>
77#include <cam/cam_ccb.h>
78#include <cam/cam_sim.h>
79#include <cam/cam_xpt_sim.h>
80#include <cam/cam_debug.h>
81#include <cam/cam_xpt_periph.h>
82#include <cam/cam_periph.h>
83#include <cam/scsi/scsi_all.h>
84#include <cam/scsi/scsi_message.h>
85
86#if (__FreeBSD_version < 500043)
87#include <sys/bus_private.h>
88#endif
89
90#include <dev/hptiop/hptiop.h>
91
92static struct hpt_iop_hba * g_hba[4];
93static int  iop_count = 0;
94static char driver_name[] = "hptiop";
95static char driver_version[] = "v1.2 (041307)";
96static int  osm_max_targets = 32;
97
98static devclass_t hptiop_devclass;
99
100static void os_request_callback(struct hpt_iop_hba * hba, u_int32_t req);
101static void os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg);
102static int  hptiop_do_ioctl(struct hpt_iop_hba * hba, struct hpt_iop_ioctl_param * pParams);
103static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
104static int  hptiop_rescan_bus(struct hpt_iop_hba * hba);
105static int  hptiop_post_ioctl_command(struct hpt_iop_hba * hba,
106		struct hpt_iop_request_ioctl_command * req, struct hpt_iop_ioctl_param * pParams);
107static int  os_query_remove_device(struct hpt_iop_hba * hba, int target_id);
108static int  hptiop_probe(device_t dev);
109static int  hptiop_attach(device_t dev);
110static int  hptiop_detach(device_t dev);
111static int  hptiop_shutdown(device_t dev);
112static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
113static void hptiop_poll(struct cam_sim *sim);
114static void hptiop_async(void * callback_arg, u_int32_t code,
115					struct cam_path * path, void * arg);
116static void hptiop_pci_intr(void *arg);
117static void hptiop_release_resource(struct hpt_iop_hba * hba);
118static int  hptiop_reset_adapter(struct hpt_iop_hba * hba);
119static void hptiop_enable_interrupts(struct hpt_iop_hba * hba);
120static void hptiop_disable_interrupts(struct hpt_iop_hba * hba);
121
122static d_open_t hptiop_open;
123static d_close_t hptiop_close;
124static d_ioctl_t hptiop_ioctl;
125
126static struct cdevsw hptiop_cdevsw = {
127	.d_open = hptiop_open,
128	.d_close = hptiop_close,
129	.d_ioctl = hptiop_ioctl,
130	.d_name = driver_name,
131#if __FreeBSD_version>=503000
132	.d_version = D_VERSION,
133#endif
134#if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
135	.d_flags = D_NEEDGIANT,
136#endif
137#if __FreeBSD_version<600034
138#if __FreeBSD_version>=501000
139	.d_maj = MAJOR_AUTO,
140#else
141	.d_maj = HPT_DEV_MAJOR,
142#endif
143#endif
144};
145
146#if __FreeBSD_version < 503000
147#define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
148#else
149#define hba_from_dev(dev) \
150		((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, minor(dev)))
151#endif
152
153static int hptiop_open(ioctl_dev_t dev, int flags,
154					int devtype, ioctl_thread_t proc)
155{
156	struct hpt_iop_hba * hba = hba_from_dev(dev);
157
158	if (hba==NULL)
159		return ENXIO;
160	if (hba->flag & HPT_IOCTL_FLAG_OPEN)
161		return EBUSY;
162	hba->flag |= HPT_IOCTL_FLAG_OPEN;
163	return 0;
164}
165
166static int hptiop_close(ioctl_dev_t dev, int flags,
167					int devtype, ioctl_thread_t proc)
168{
169	struct hpt_iop_hba * hba = hba_from_dev(dev);
170	hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
171	return 0;
172}
173
174static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
175					int flags, ioctl_thread_t proc)
176{
177	int ret = EFAULT;
178	struct hpt_iop_hba * hba = hba_from_dev(dev);
179
180#if (__FreeBSD_version >= 500000)
181	mtx_lock(&Giant);
182#endif
183
184	switch (cmd) {
185	case HPT_DO_IOCONTROL:
186		ret = hptiop_do_ioctl(hba, (struct hpt_iop_ioctl_param *)data);
187		break;
188	case HPT_SCAN_BUS:
189		ret = hptiop_rescan_bus(hba);
190		break;
191	}
192
193#if (__FreeBSD_version >= 500000)
194	mtx_unlock(&Giant);
195#endif
196
197	return ret;
198}
199
200static __inline void * iop_get_inbound_request(struct hpt_iopmu * iop)
201{
202	u_int32_t m = readl(&iop->inbound_queue);
203	return (m == 0xFFFFFFFF)? 0 : ((char *)iop + m);
204}
205
206static __inline void iop_post_inbound_request(struct hpt_iopmu * iop, void *req)
207{
208	writel(&iop->inbound_queue, (char *)req - (char *)iop);
209}
210
211static __inline void iop_post_outbound_request(struct hpt_iopmu * iop, void *req)
212{
213	writel(&iop->outbound_queue, (char *)req - (char *)iop);
214}
215
216static __inline void hptiop_pci_posting_flush(struct hpt_iopmu * iop)
217{
218	readl(&iop->outbound_intstatus);
219}
220
221static int iop_wait_ready(struct hpt_iopmu * iop, u_int32_t millisec)
222{
223	u_int32_t req=0;
224	int i;
225
226	for (i = 0; i < millisec; i++) {
227		req = readl(&iop->inbound_queue);
228		if (req != IOPMU_QUEUE_EMPTY)
229			break;
230		DELAY(1000);
231	}
232
233	if (req!=IOPMU_QUEUE_EMPTY) {
234		writel(&iop->outbound_queue, req);
235		hptiop_pci_posting_flush(iop);
236		return 0;
237	}
238
239	return -1;
240}
241
242static int iop_intr(struct hpt_iop_hba * hba)
243{
244	struct hpt_iopmu * iop = hba->iop;
245	u_int32_t status;
246	int ret = 0;
247
248	status = readl(&iop->outbound_intstatus);
249
250	if (status & IOPMU_OUTBOUND_INT_MSG0) {
251		u_int32_t msg = readl(&iop->outbound_msgaddr0);
252		KdPrint(("received outbound msg %x", msg));
253		writel(&iop->outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
254		os_message_callback(hba, msg);
255		ret = 1;
256	}
257
258	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
259		u_int32_t req;
260		while ((req = readl(&iop->outbound_queue))
261							!=IOPMU_QUEUE_EMPTY) {
262			if (req & IOPMU_QUEUE_MASK_HOST_BITS)
263				os_request_callback(hba, req);
264			else {
265				struct hpt_iop_request_header * p;
266				p = (struct hpt_iop_request_header *)((char *)hba->iop + req);
267				if (p->flags & IOP_REQUEST_FLAG_SYNC_REQUEST) {
268					if (p->context)
269						os_request_callback(hba, req);
270					else
271						p->context= 1;
272				}
273				else
274					os_request_callback(hba, req);
275			}
276		}
277		ret = 1;
278	}
279	return ret;
280}
281
282static int iop_send_sync_request(struct hpt_iop_hba * hba, void *req, u_int32_t millisec)
283{
284	u_int32_t i;
285
286	((struct hpt_iop_request_header *)req)->flags |= IOP_REQUEST_FLAG_SYNC_REQUEST;
287	((struct hpt_iop_request_header *)req)->context = 0;
288
289	writel(&hba->iop->inbound_queue,
290			(u_int32_t)((char *)req - (char *)hba->iop));
291
292	hptiop_pci_posting_flush(hba->iop);
293
294	for (i = 0; i < millisec; i++) {
295		iop_intr(hba);
296		if (((struct hpt_iop_request_header *)req)->context)
297			return 0;
298		DELAY(1000);
299	}
300
301	return -1;
302}
303
304static int iop_send_sync_msg(struct hpt_iop_hba * hba, u_int32_t msg, int *done, u_int32_t millisec)
305{
306	u_int32_t i;
307
308	*done = 0;
309
310	writel(&hba->iop->inbound_msgaddr0, msg);
311
312	hptiop_pci_posting_flush(hba->iop);
313
314	for (i = 0; i < millisec; i++) {
315		iop_intr(hba);
316		if (*done)
317			break;
318		DELAY(1000);
319	}
320
321	return *done? 0 : -1;
322}
323
324static int iop_get_config(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config)
325{
326	u_int32_t req32=0;
327	struct hpt_iop_request_get_config * req;
328
329	if ((req32 = readl(&hba->iop->inbound_queue)) == IOPMU_QUEUE_EMPTY)
330		return -1;
331
332	req = (struct hpt_iop_request_get_config *)((char *)hba->iop + req32);
333	req->header.flags = 0;
334	req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
335	req->header.size = sizeof(struct hpt_iop_request_get_config);
336	req->header.result = IOP_RESULT_PENDING;
337
338	if (iop_send_sync_request(hba, req, 20000)) {
339		KdPrint(("Get config send cmd failed"));
340		return -1;
341	}
342
343	*config = *req;
344	writel(&hba->iop->outbound_queue, req32);
345	return 0;
346}
347
348static int iop_set_config(struct hpt_iop_hba * hba, struct hpt_iop_request_set_config *config)
349{
350	u_int32_t req32;
351	struct hpt_iop_request_set_config *req;
352
353	req32 = readl(&hba->iop->inbound_queue);
354	if (req32 == IOPMU_QUEUE_EMPTY)
355		return -1;
356
357	req = (struct hpt_iop_request_set_config *)((char *)hba->iop + req32);
358	memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
359		(u_int8_t *)config + sizeof(struct hpt_iop_request_header),
360		sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header));
361	req->header.flags = 0;
362	req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
363	req->header.size = sizeof(struct hpt_iop_request_set_config);
364	req->header.result = IOP_RESULT_PENDING;
365
366	if (iop_send_sync_request(hba, req, 20000)) {
367		KdPrint(("Set config send cmd failed"));
368		return -1;
369	}
370
371	writel(&hba->iop->outbound_queue, req32);
372	return 0;
373}
374
375static int hptiop_do_ioctl(struct hpt_iop_hba * hba, struct hpt_iop_ioctl_param * pParams)
376{
377	struct hpt_iop_request_ioctl_command * req;
378
379	if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
380		(pParams->Magic != HPT_IOCTL_MAGIC32))
381		return EFAULT;
382
383	req = (struct hpt_iop_request_ioctl_command *)iop_get_inbound_request(hba->iop);
384	if (!req) {
385		printf("hptiop: ioctl command failed");
386		return EFAULT;
387	}
388
389	if (pParams->nInBufferSize)
390		if (copyin((void *)pParams->lpInBuffer,
391				req->buf, pParams->nInBufferSize))
392			goto invalid;
393
394	if (hptiop_post_ioctl_command(hba, req, pParams))
395		goto invalid;
396
397	if (req->header.result == IOP_RESULT_SUCCESS) {
398		if (pParams->nOutBufferSize)
399			if (copyout(req->buf +
400					((pParams->nInBufferSize + 3) & ~3),
401					(void*)pParams->lpOutBuffer,
402					pParams->nOutBufferSize))
403				goto invalid;
404
405		if (pParams->lpBytesReturned)
406			if (copyout(&req->bytes_returned,
407					(void*)pParams->lpBytesReturned,
408					sizeof(u_int32_t)))
409				goto invalid;
410		iop_post_outbound_request(hba->iop, req);
411		return 0;
412	} else{
413invalid:
414		iop_post_outbound_request(hba->iop, req);
415		return EFAULT;
416	}
417}
418
419static int hptiop_post_ioctl_command(struct hpt_iop_hba *hba,
420		struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams)
421{
422	if (((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize >
423			hba->max_request_size - sizeof(struct hpt_iop_request_header) -
424					4 * sizeof(u_int32_t)) {
425		printf("hptiop: request size beyond max value");
426		return -1;
427	}
428
429	req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
430	req->inbuf_size = pParams->nInBufferSize;
431	req->outbuf_size = pParams->nOutBufferSize;
432
433	req->header.size = sizeof(struct hpt_iop_request_ioctl_command)
434					- 4 + pParams->nInBufferSize;
435	req->header.context = (u_int64_t)(unsigned long)req;
436	req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
437	req->header.result = IOP_RESULT_PENDING;
438	req->header.flags |= IOP_REQUEST_FLAG_SYNC_REQUEST;
439
440	hptiop_lock_adapter(hba);
441	iop_post_inbound_request(hba->iop, req);
442	hptiop_pci_posting_flush(hba->iop);
443
444	while (req->header.context) {
445		if (hptiop_sleep(hba, req,
446				PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
447			break;
448		iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET,
449						&hba->msg_done, 60000);
450	}
451
452	hptiop_unlock_adapter(hba);
453	return 0;
454}
455
456static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
457{
458	struct cam_path     *path;
459	union ccb           *ccb;
460	if (xpt_create_path(&path, xpt_periph, cam_sim_path(hba->sim),
461		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
462		return(EIO);
463	if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK)) == NULL)
464		return(ENOMEM);
465	bzero(ccb, sizeof(union ccb));
466	xpt_setup_ccb(&ccb->ccb_h, path, 5);
467	ccb->ccb_h.func_code = XPT_SCAN_BUS;
468	ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
469	ccb->crcn.flags = CAM_FLAG_NONE;
470	xpt_action(ccb);
471	return(0);
472}
473
474static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
475{
476	xpt_free_path(ccb->ccb_h.path);
477	free(ccb, M_TEMP);
478	return;
479}
480
481static  bus_dmamap_callback_t   hptiop_map_srb;
482static  bus_dmamap_callback_t   hptiop_post_scsi_command;
483
484/*
485 * CAM driver interface
486 */
487static device_method_t driver_methods[] = {
488	/* Device interface */
489	DEVMETHOD(device_probe,     hptiop_probe),
490	DEVMETHOD(device_attach,    hptiop_attach),
491	DEVMETHOD(device_detach,    hptiop_detach),
492	DEVMETHOD(device_shutdown,  hptiop_shutdown),
493	{ 0, 0 }
494};
495
496static driver_t hptiop_pci_driver = {
497	driver_name,
498	driver_methods,
499	sizeof(struct hpt_iop_hba)
500};
501
502DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
503
504static int hptiop_probe(device_t dev)
505{
506	struct hpt_iop_hba *hba;
507
508	if ((pci_get_vendor(dev) == 0x1103 && pci_get_device(dev) == 0x3220) ||
509		(pci_get_vendor(dev) == 0x1103 && pci_get_device(dev) == 0x3320) ||
510		(pci_get_vendor(dev) == 0x1103 && pci_get_device(dev) == 0x3520)) {
511		printf("hptiop: adapter at PCI %d:%d:%d, IRQ %d",
512			pci_get_bus(dev), pci_get_slot(dev),
513			pci_get_function(dev), pci_get_irq(dev));
514		device_set_desc(dev, driver_name);
515		hba = (struct hpt_iop_hba *)device_get_softc(dev);
516		memset(hba, 0, sizeof(struct hpt_iop_hba));
517		return 0;
518	}
519	return ENXIO;
520}
521
522static int hptiop_attach(device_t dev)
523{
524	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
525	struct hpt_iop_request_get_config  iop_config;
526	struct hpt_iop_request_set_config  set_config;
527	int rid = 0;
528	struct cam_devq *devq;
529	struct ccb_setasync ccb;
530	u_int32_t unit = device_get_unit(dev);
531
532	printf("%s%d: RocketRAID 3xxx controller driver %s\n",
533		driver_name, unit, driver_version);
534
535	KdPrint(("hptiop_attach(%d, %d/%d/%d)", unit,
536		pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
537
538#if __FreeBSD_version >=440000
539	pci_enable_busmaster(dev);
540#endif
541	hba->pcidev = dev;
542	hba->pciunit = unit;
543
544	hba->bar0_rid = 0x10;
545	hba->bar0_res = bus_alloc_resource(hba->pcidev,
546					SYS_RES_MEMORY,	&hba->bar0_rid,
547					0, ~0, 0x100000, RF_ACTIVE);
548
549	if (hba->bar0_res == NULL) {
550		printf("hptiop: Failed to get iop base adrress.\n");
551		return ENXIO;
552	}
553
554	hba->iop = (struct hpt_iopmu *)rman_get_virtual(hba->bar0_res);
555
556	if (!hba->iop) {
557		printf("hptiop: alloc mem res failed\n");
558		return ENXIO;
559	}
560
561	if (iop_wait_ready(hba->iop, 2000)) {
562		printf("hptiop: adapter is not ready\n");
563		return ENXIO;
564	}
565
566	if (iop_get_config(hba, &iop_config)) {
567		printf("hptiop: Get iop config failed.\n");
568		return ENXIO;
569	}
570
571	hba->firmware_version = iop_config.firmware_version;
572	hba->interface_version = iop_config.interface_version;
573	hba->max_requests = iop_config.max_requests;
574	hba->max_devices = iop_config.max_devices;
575	hba->max_request_size = iop_config.request_size;
576	hba->max_sg_count = iop_config.max_sg_count;
577
578#if (__FreeBSD_version >= 500000)
579	mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
580#endif
581
582	if (bus_dma_tag_create(NULL,/* parent */
583			1,  /* alignment */
584			0, /* boundary */
585			BUS_SPACE_MAXADDR,  /* lowaddr */
586			BUS_SPACE_MAXADDR,  /* highaddr */
587			NULL, NULL,         /* filter, filterarg */
588			BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
589			BUS_SPACE_UNRESTRICTED, /* nsegments */
590			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
591			0,      /* flags */
592#if __FreeBSD_version>502000
593			NULL,   /* lockfunc */
594			NULL,       /* lockfuncarg */
595#endif
596			&hba->parent_dmat   /* tag */))
597	{
598		printf("hptiop: alloc parent_dmat failed\n");
599		return ENXIO;
600	}
601
602	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
603			4,  /* alignment */
604			BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
605			BUS_SPACE_MAXADDR,  /* lowaddr */
606			BUS_SPACE_MAXADDR,  /* highaddr */
607			NULL, NULL,         /* filter, filterarg */
608			PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
609			hba->max_sg_count,  /* nsegments */
610			0x20000,    /* maxsegsize */
611			BUS_DMA_ALLOCNOW,       /* flags */
612#if __FreeBSD_version>502000
613			busdma_lock_mutex,  /* lockfunc */
614			&hba->lock,     /* lockfuncarg */
615#endif
616			&hba->io_dmat   /* tag */))
617	{
618		printf("hptiop: alloc io_dmat failed\n");
619		bus_dma_tag_destroy(hba->parent_dmat);
620		return ENXIO;
621	}
622
623	if (bus_dma_tag_create(hba->parent_dmat,/* parent */
624			1,  /* alignment */
625			0, /* boundary */
626			BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
627			BUS_SPACE_MAXADDR,  /* highaddr */
628			NULL, NULL,         /* filter, filterarg */
629			HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
630			1,  /* nsegments */
631			BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
632			0,      /* flags */
633#if __FreeBSD_version>502000
634			NULL,   /* lockfunc */
635			NULL,       /* lockfuncarg */
636#endif
637			&hba->srb_dmat  /* tag */))
638	{
639		printf("hptiop: alloc srb_dmat failed\n");
640		bus_dma_tag_destroy(hba->io_dmat);
641		bus_dma_tag_destroy(hba->parent_dmat);
642		return ENXIO;
643	}
644
645	if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
646#if __FreeBSD_version>501000
647		BUS_DMA_WAITOK | BUS_DMA_COHERENT,
648#else
649		BUS_DMA_WAITOK,
650#endif
651		&hba->srb_dmamap) != 0)
652	{
653			printf("hptiop: bus_dmamem_alloc failed!\n");
654release_tag:
655			bus_dma_tag_destroy(hba->srb_dmat);
656			bus_dma_tag_destroy(hba->io_dmat);
657			bus_dma_tag_destroy(hba->parent_dmat);
658			return ENXIO;
659	}
660
661	if (bus_dmamap_load(hba->srb_dmat,
662			hba->srb_dmamap, hba->uncached_ptr,
663			(HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
664			hptiop_map_srb, hba, 0))
665	{
666		printf("hptiop: bus_dmamap_load failed!\n");
667		goto release_tag;
668	}
669
670	if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
671		printf("hptiop: cam_simq_alloc failed\n");
672attach_failed:
673		hptiop_release_resource(hba);
674		return ENXIO;
675	}
676	hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
677			hba, unit, &Giant, hba->max_requests - 1, 1, devq);
678	if (!hba->sim) {
679		printf("hptiop: cam_sim_alloc failed\n");
680		cam_simq_free(devq);
681		goto attach_failed;
682	}
683
684	if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) {
685		printf("hptiop: xpt_bus_register failed\n");
686		cam_sim_free(hba->sim, /*free devq*/ TRUE);
687		hba->sim = NULL;
688		goto attach_failed;
689	}
690
691	if (xpt_create_path(&hba->path, /*periph */ NULL,
692			cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
693			CAM_LUN_WILDCARD) != CAM_REQ_CMP)
694	{
695		printf("hptiop: xpt_create_path failed\n");
696		xpt_bus_deregister(cam_sim_path(hba->sim));
697		cam_sim_free(hba->sim, /*free_devq*/TRUE);
698		hba->sim = NULL;
699		goto attach_failed;
700	}
701
702	bzero(&set_config, sizeof(set_config));
703	set_config.iop_id = iop_count;
704	set_config.vbus_id = cam_sim_path(hba->sim);
705	set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
706
707	if (iop_set_config(hba, &set_config)) {
708		printf("hptiop: Set iop config failed.\n");
709		goto attach_failed;
710	}
711
712	xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
713	ccb.ccb_h.func_code = XPT_SASYNC_CB;
714	ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
715	ccb.callback = hptiop_async;
716	ccb.callback_arg = hba->sim;
717	xpt_action((union ccb *)&ccb);
718
719	rid = 0;
720	if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
721			&rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
722		printf("hptiop: allocate irq failed!\n");
723		goto attach_failed;
724	}
725
726	if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
727				NULL, hptiop_pci_intr, hba, &hba->irq_handle)) {
728		printf("hptiop: allocate intr function failed!\n");
729		goto attach_failed;
730	}
731
732	hptiop_enable_interrupts(hba);
733
734	if (iop_send_sync_msg(hba,
735			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
736			&hba->msg_done, 5000)) {
737		printf("hptiop: Fail to start background task\n");
738		goto attach_failed;
739	}
740
741	hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
742				UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
743				S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
744
745#if __FreeBSD_version < 503000
746	hba->ioctl_dev->si_drv1 = hba;
747#endif
748
749	hptiop_rescan_bus(hba);
750
751	g_hba[iop_count++] = hba;
752	return 0;
753}
754
755static int hptiop_detach(device_t dev)
756{
757	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
758	int i;
759	int error = EBUSY;
760
761	hptiop_lock_adapter(hba);
762	for (i = 0; i < osm_max_targets; i++)
763		if (os_query_remove_device(hba, i)) {
764			printf("hptiop%d file system is busy. id=%d",
765						hba->pciunit, i);
766			goto out;
767		}
768
769	if ((error = hptiop_shutdown(dev)) != 0)
770		goto out;
771	if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
772						&hba->msg_done, 60000))
773		goto out;
774
775	hptiop_release_resource(hba);
776	error = 0;
777out:
778	hptiop_unlock_adapter(hba);
779	return error;
780}
781
782static int hptiop_shutdown(device_t dev)
783{
784	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
785
786	int error = 0;
787
788	if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
789		printf("hptiop: %d server is busy", hba->pciunit);
790		return EBUSY;
791	}
792	hptiop_disable_interrupts(hba);
793	if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN,
794						&hba->msg_done, 60000))
795		error = EBUSY;
796
797	return error;
798}
799
800static void hptiop_pci_intr(void *arg)
801{
802	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
803	hptiop_lock_adapter(hba);
804	iop_intr(hba);
805	hptiop_unlock_adapter(hba);
806}
807
808static void hptiop_poll(struct cam_sim *sim)
809{
810	hptiop_pci_intr(cam_sim_softc(sim));
811}
812
813static void hptiop_async(void * callback_arg, u_int32_t code,
814					struct cam_path * path, void * arg)
815{
816}
817
818static void hptiop_enable_interrupts(struct hpt_iop_hba * hba)
819{
820	writel(&hba->iop->outbound_intmask,
821		~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
822}
823
824static void hptiop_disable_interrupts(struct hpt_iop_hba * hba)
825{
826	u_int32_t int_mask;
827
828	int_mask = readl(&hba->iop->outbound_intmask);
829	writel(&hba->iop->outbound_intmask, int_mask |
830		IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0);
831	hptiop_pci_posting_flush(hba->iop);
832}
833
834static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
835{
836	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET,
837						&hba->msg_done, 60000);
838}
839
840static void *hptiop_get_srb(struct hpt_iop_hba * hba)
841{
842	struct hpt_iop_srb * srb;
843
844	if (hba->srb_list) {
845		srb = hba->srb_list;
846		hba->srb_list = srb->next;
847	}
848	else
849		srb=NULL;
850
851	return srb;
852}
853
854static void hptiop_free_srb(struct hpt_iop_hba * hba, struct hpt_iop_srb * srb)
855{
856	srb->next = hba->srb_list;
857	hba->srb_list = srb;
858}
859
860static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
861{
862	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
863	struct hpt_iop_srb * srb;
864
865	switch (ccb->ccb_h.func_code) {
866
867	case XPT_SCSI_IO:
868		hptiop_lock_adapter(hba);
869		if (ccb->ccb_h.target_lun != 0 ||
870			ccb->ccb_h.target_id >= osm_max_targets ||
871			(ccb->ccb_h.flags & CAM_CDB_PHYS))
872		{
873			ccb->ccb_h.status = CAM_TID_INVALID;
874			xpt_done(ccb);
875			goto scsi_done;
876		}
877
878		if ((srb = hptiop_get_srb(hba)) == NULL) {
879			printf("hptiop: srb allocated failed");
880			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
881			xpt_done(ccb);
882			goto scsi_done;
883		}
884
885		srb->ccb = ccb;
886
887		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
888			hptiop_post_scsi_command(srb, NULL, 0, 0);
889		else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
890			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
891				int error;
892
893				error = bus_dmamap_load(hba->io_dmat,
894					srb->dma_map,
895					ccb->csio.data_ptr, ccb->csio.dxfer_len,
896					hptiop_post_scsi_command, srb, 0);
897
898				if (error && error != EINPROGRESS) {
899					printf("hptiop: %d bus_dmamap_load error %d",
900							hba->pciunit, error);
901					xpt_freeze_simq(hba->sim, 1);
902					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
903invalid:
904					hptiop_free_srb(hba, srb);
905					xpt_done(ccb);
906					goto scsi_done;
907				}
908			}
909			else {
910				printf("hptiop: CAM_DATA_PHYS not supported");
911				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
912				goto invalid;
913			}
914		}
915		else {
916			struct bus_dma_segment *segs;
917
918			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
919				(ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
920				printf("hptiop: SCSI cmd failed");
921				ccb->ccb_h.status=CAM_PROVIDE_FAIL;
922				goto invalid;
923			}
924
925			segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
926			hptiop_post_scsi_command(srb, segs,
927						ccb->csio.sglist_cnt, 0);
928		}
929
930scsi_done:
931		hptiop_unlock_adapter(hba);
932		return;
933
934	case XPT_RESET_BUS:
935		printf("hptiop: reset adapter");
936		hptiop_lock_adapter(hba);
937		hba->msg_done = 0;
938		hptiop_reset_adapter(hba);
939		hptiop_unlock_adapter(hba);
940		break;
941
942	case XPT_GET_TRAN_SETTINGS:
943	case XPT_SET_TRAN_SETTINGS:
944		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
945		break;
946
947	case XPT_CALC_GEOMETRY:
948		ccb->ccg.heads = 255;
949		ccb->ccg.secs_per_track = 63;
950		ccb->ccg.cylinders = ccb->ccg.volume_size /
951				(ccb->ccg.heads * ccb->ccg.secs_per_track);
952		ccb->ccb_h.status = CAM_REQ_CMP;
953		break;
954
955	case XPT_PATH_INQ:
956	{
957		struct ccb_pathinq *cpi = &ccb->cpi;
958
959		cpi->version_num = 1;
960		cpi->hba_inquiry = PI_SDTR_ABLE;
961		cpi->target_sprt = 0;
962		cpi->hba_misc = PIM_NOBUSRESET;
963		cpi->hba_eng_cnt = 0;
964		cpi->max_target = osm_max_targets;
965		cpi->max_lun = 0;
966		cpi->unit_number = cam_sim_unit(sim);
967		cpi->bus_id = cam_sim_bus(sim);
968		cpi->initiator_id = osm_max_targets;
969		cpi->base_transfer_speed = 3300;
970
971		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
972		strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
973		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
974		cpi->ccb_h.status = CAM_REQ_CMP;
975		break;
976	}
977
978	default:
979		ccb->ccb_h.status = CAM_REQ_INVALID;
980		break;
981	}
982
983	xpt_done(ccb);
984	return;
985}
986
987static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
988							int nsegs, int error)
989{
990	int idx;
991	struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
992	union ccb *ccb = srb->ccb;
993	u_int8_t *cdb;
994	struct hpt_iop_hba * hba = srb->hba;
995	struct hpt_iop_request_scsi_command * req;
996
997	if (error) {
998scsi_error:
999		printf("hptiop: post scsi command: dma error, err = %d, nsegs = %d",
1000					error, nsegs);
1001		ccb->ccb_h.status = CAM_BUSY;
1002		bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1003		hptiop_free_srb(hba, srb);
1004		xpt_done(ccb);
1005		return;
1006	}
1007
1008	if (nsegs > hba->max_sg_count) {
1009		printf("hptiop: nsegs is too large: nsegs=%d, Allowed count=%d",
1010					nsegs, hba->max_sg_count);
1011		goto scsi_error;
1012	}
1013
1014	if (!srb) {
1015		printf("hptiop: invalid srb");
1016		goto scsi_error;
1017	}
1018
1019	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1020		u_int32_t m = readl(&hba->iop->inbound_queue);
1021
1022		if (m == 0xFFFFFFFF) {
1023			printf("hptiop: invaild req offset: %d", m);
1024			goto scsi_error;
1025		}
1026		req = (struct hpt_iop_request_scsi_command *)((char *)hba->iop + m);
1027	}
1028	else
1029		req = (struct hpt_iop_request_scsi_command *)srb;
1030
1031	if (ccb->csio.dxfer_len && nsegs > 0) {
1032		struct hpt_iopsg *psg = req->sg_list;
1033		for (idx = 0; idx < nsegs; idx++, psg++) {
1034			psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1035			psg->size = segs[idx].ds_len;
1036			psg->eot = 0;
1037		}
1038		psg[-1].eot = 1;
1039	}
1040
1041	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1042		cdb = ccb->csio.cdb_io.cdb_ptr;
1043	else
1044		cdb = ccb->csio.cdb_io.cdb_bytes;
1045
1046	bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1047
1048	req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1049	req->header.result = IOP_RESULT_PENDING;
1050	req->dataxfer_length = ccb->csio.dxfer_len;
1051	req->channel =  0;
1052	req->target =  ccb->ccb_h.target_id;
1053	req->lun =  ccb->ccb_h.target_lun;
1054	req->header.size = sizeof(struct hpt_iop_request_scsi_command)
1055					- sizeof(struct hpt_iopsg) + nsegs*sizeof(struct hpt_iopsg);
1056
1057	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1058		bus_dmamap_sync(hba->io_dmat,
1059					srb->dma_map, BUS_DMASYNC_PREREAD);
1060	}
1061	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1062		bus_dmamap_sync(hba->io_dmat,
1063					srb->dma_map, BUS_DMASYNC_PREWRITE);
1064
1065	if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1066		req->header.context = (u_int64_t)(unsigned long)srb;
1067		req->header.flags = 0;
1068		writel(&hba->iop->inbound_queue,
1069					((char *)req - (char *)hba->iop));
1070	}
1071	else {
1072		req->header.context = (u_int64_t)srb->index |
1073						IOPMU_QUEUE_ADDR_HOST_BIT;
1074		req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1075
1076		if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) {
1077			u_int32_t size_bits;
1078
1079			if (req->header.size < 256)
1080				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1081			else if (req->header.size < 512)
1082				size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1083			else
1084				size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT;
1085			writel(&hba->iop->inbound_queue, srb->phy_addr | size_bits);
1086		} else
1087			writel(&hba->iop->inbound_queue,
1088				srb->phy_addr | IOPMU_QUEUE_ADDR_HOST_BIT);
1089	}
1090}
1091
1092static void os_request_callback(struct hpt_iop_hba * hba, u_int32_t index)
1093{
1094	struct hpt_iop_srb * srb;
1095	struct hpt_iop_request_scsi_command * req;
1096	union ccb *ccb;
1097	u_int8_t *cdb;
1098
1099	if (index & IOPMU_QUEUE_MASK_HOST_BITS) {
1100		if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) {
1101			srb = hba->srb[index & ~(u_int32_t)
1102				(IOPMU_QUEUE_ADDR_HOST_BIT | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
1103			req = (struct hpt_iop_request_scsi_command *)srb;
1104			if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
1105				req->header.result = IOP_RESULT_SUCCESS;
1106		} else {
1107			srb = hba->srb[index & ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
1108			req = (struct hpt_iop_request_scsi_command *)srb;
1109		}
1110		goto srb_complete;
1111	}
1112
1113	req = (struct hpt_iop_request_scsi_command *)((char *)hba->iop + index);
1114
1115	switch(((struct hpt_iop_request_header *)req)->type) {
1116	case IOP_REQUEST_TYPE_IOCTL_COMMAND:
1117	{
1118		struct hpt_iop_request_ioctl_command * p;
1119		p = (struct hpt_iop_request_ioctl_command *)(unsigned long)
1120				(((struct hpt_iop_request_header *)req)->context);
1121		((struct hpt_iop_request_header *)req)->context = 0;
1122		wakeup(req);
1123		break;
1124	}
1125
1126	case IOP_REQUEST_TYPE_SCSI_COMMAND:
1127		srb = (struct hpt_iop_srb *)(unsigned long)req->header.context;
1128srb_complete:
1129		ccb = (union ccb *)srb->ccb;
1130		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1131			cdb = ccb->csio.cdb_io.cdb_ptr;
1132		else
1133			cdb = ccb->csio.cdb_io.cdb_bytes;
1134
1135		if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
1136			ccb->ccb_h.status = CAM_REQ_CMP;
1137			goto scsi_done;
1138		}
1139
1140		switch (((struct hpt_iop_request_header *)req)->result) {
1141		case IOP_RESULT_SUCCESS:
1142			switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1143			case CAM_DIR_IN:
1144				bus_dmamap_sync(hba->io_dmat,
1145					srb->dma_map, BUS_DMASYNC_POSTREAD);
1146				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1147				break;
1148			case CAM_DIR_OUT:
1149				bus_dmamap_sync(hba->io_dmat,
1150					srb->dma_map, BUS_DMASYNC_POSTWRITE);
1151				bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1152				break;
1153			}
1154
1155			ccb->ccb_h.status = CAM_REQ_CMP;
1156			break;
1157
1158		case IOP_RESULT_BAD_TARGET:
1159			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1160			break;
1161		case IOP_RESULT_BUSY:
1162			ccb->ccb_h.status = CAM_BUSY;
1163			break;
1164		case IOP_RESULT_INVALID_REQUEST:
1165			ccb->ccb_h.status = CAM_REQ_INVALID;
1166			break;
1167		case IOP_RESULT_FAIL:
1168			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1169			break;
1170		case IOP_RESULT_RESET:
1171			ccb->ccb_h.status = CAM_BUSY;
1172			break;
1173		default:
1174			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1175			break;
1176		}
1177scsi_done:
1178		if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
1179			iop_post_outbound_request(hba->iop, req);
1180
1181		hptiop_free_srb(hba, srb);
1182		xpt_done(ccb);
1183		break;
1184	}
1185}
1186
1187static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
1188						int nsegs, int error)
1189{
1190	struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1191	bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
1192	struct hpt_iop_srb *srb, *tmp_srb;
1193	int i;
1194
1195	if (error || nsegs == 0) {
1196		printf("hptiop_map_srb error");
1197		return;
1198	}
1199
1200	/* map srb */
1201	srb = (struct hpt_iop_srb *)
1202		(((unsigned long)hba->uncached_ptr + 0x1F) & ~(unsigned long)0x1F);
1203
1204	for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
1205		tmp_srb = (struct hpt_iop_srb *)
1206					((char *)srb + i * HPT_SRB_MAX_SIZE);
1207		if (((unsigned long)tmp_srb & 0x1F) == 0) {
1208			if (bus_dmamap_create(hba->io_dmat,
1209						0, &tmp_srb->dma_map)) {
1210				printf("hptiop: dmamap create failed");
1211				return;
1212			}
1213
1214			bzero(tmp_srb, sizeof(struct hpt_iop_srb));
1215			tmp_srb->hba = hba;
1216			tmp_srb->index = i;
1217			if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
1218				tmp_srb->srb_flag = HPT_SRB_FLAG_HIGH_MEM_ACESS;
1219			tmp_srb->phy_addr = (u_int32_t)(phy_addr >> 5);
1220			hptiop_free_srb(hba, tmp_srb);
1221			hba->srb[i] = tmp_srb;
1222			phy_addr += HPT_SRB_MAX_SIZE;
1223		}
1224		else {
1225			printf("hptiop: invalid alignment");
1226			return;
1227		}
1228	}
1229}
1230
1231static void os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
1232{
1233		hba->msg_done = 1;
1234}
1235
1236static  int os_query_remove_device(struct hpt_iop_hba * hba, int target_id)
1237{
1238	struct cam_periph       *periph = NULL;
1239	struct cam_path         *path;
1240	int                     status, retval = 0;
1241
1242	status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
1243
1244	if (status == CAM_REQ_CMP) {
1245		if ((periph = cam_periph_find(path, "da")) != NULL) {
1246			if (periph->refcount >= 1) {
1247				printf("hptiop: %d ,target_id=0x%x, refcount=%d",
1248				    hba->pciunit, target_id, periph->refcount);
1249				retval = -1;
1250			}
1251		}
1252		xpt_free_path(path);
1253	}
1254	return retval;
1255}
1256
1257static void hptiop_release_resource(struct hpt_iop_hba *hba)
1258{
1259	struct ccb_setasync ccb;
1260
1261	if (hba->path) {
1262		xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1263		ccb.ccb_h.func_code = XPT_SASYNC_CB;
1264		ccb.event_enable = 0;
1265		ccb.callback = hptiop_async;
1266		ccb.callback_arg = hba->sim;
1267		xpt_action((union ccb *)&ccb);
1268		xpt_free_path(hba->path);
1269	}
1270
1271	if (hba->sim) {
1272		xpt_bus_deregister(cam_sim_path(hba->sim));
1273		cam_sim_free(hba->sim, TRUE);
1274	}
1275
1276	if (hba->srb_dmat) {
1277		bus_dmamem_free(hba->srb_dmat,
1278					hba->uncached_ptr, hba->srb_dmamap);
1279		bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1280		bus_dma_tag_destroy(hba->srb_dmat);
1281	}
1282
1283	if (hba->io_dmat)
1284		bus_dma_tag_destroy(hba->io_dmat);
1285
1286	if (hba->parent_dmat)
1287		bus_dma_tag_destroy(hba->parent_dmat);
1288
1289	if (hba->irq_handle)
1290		bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
1291
1292	if (hba->irq_res)
1293		bus_release_resource(hba->pcidev, SYS_RES_IRQ, 0, hba->irq_res);
1294
1295	if (hba->bar0_res)
1296		bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1297					hba->bar0_rid, hba->bar0_res);
1298
1299	if (hba->ioctl_dev)
1300		destroy_dev(hba->ioctl_dev);
1301}
1302