1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
5 * Copyright (c) 2004-05 Vinod Kashyap.
6 * Copyright (c) 2000 Michael Smith
7 * Copyright (c) 2000 BSDi
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35/*
36 * AMCC'S 3ware driver for 9000 series storage controllers.
37 *
38 * Author: Vinod Kashyap
39 * Modifications by: Adam Radford
40 * Modifications by: Manjunath Ranganathaiah
41 */
42
43/*
44 * FreeBSD specific functions not related to CAM, and other
45 * miscellaneous functions.
46 */
47
48#include <dev/twa/tw_osl_includes.h>
49#include <dev/twa/tw_cl_fwif.h>
50#include <dev/twa/tw_cl_ioctl.h>
51#include <dev/twa/tw_osl_ioctl.h>
52
53#ifdef TW_OSL_DEBUG
54TW_INT32	TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
55TW_INT32	TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
56#endif /* TW_OSL_DEBUG */
57
58static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
59
60static	d_open_t		twa_open;
61static	d_close_t		twa_close;
62static	d_ioctl_t		twa_ioctl;
63
64static struct cdevsw twa_cdevsw = {
65	.d_version =	D_VERSION,
66	.d_open =	twa_open,
67	.d_close =	twa_close,
68	.d_ioctl =	twa_ioctl,
69	.d_name =	"twa",
70};
71
72static devclass_t	twa_devclass;
73
74/*
75 * Function name:	twa_open
76 * Description:		Called when the controller is opened.
77 *			Simply marks the controller as open.
78 *
79 * Input:		dev	-- control device corresponding to the ctlr
80 *			flags	-- mode of open
81 *			fmt	-- device type (character/block etc.)
82 *			proc	-- current process
83 * Output:		None
84 * Return value:	0	-- success
85 *			non-zero-- failure
86 */
87static TW_INT32
88twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
89{
90	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
91
92	tw_osli_dbg_dprintf(5, sc, "entered");
93	sc->open = TW_CL_TRUE;
94	return(0);
95}
96
97/*
98 * Function name:	twa_close
99 * Description:		Called when the controller is closed.
100 *			Simply marks the controller as not open.
101 *
102 * Input:		dev	-- control device corresponding to the ctlr
103 *			flags	-- mode of corresponding open
104 *			fmt	-- device type (character/block etc.)
105 *			proc	-- current process
106 * Output:		None
107 * Return value:	0	-- success
108 *			non-zero-- failure
109 */
110static TW_INT32
111twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
112{
113	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
114
115	tw_osli_dbg_dprintf(5, sc, "entered");
116	sc->open = TW_CL_FALSE;
117	return(0);
118}
119
120/*
121 * Function name:	twa_ioctl
122 * Description:		Called when an ioctl is posted to the controller.
123 *			Handles any OS Layer specific cmds, passes the rest
124 *			on to the Common Layer.
125 *
126 * Input:		dev	-- control device corresponding to the ctlr
127 *			cmd	-- ioctl cmd
128 *			buf	-- ptr to buffer in kernel memory, which is
129 *				   a copy of the input buffer in user-space
130 *			flags	-- mode of corresponding open
131 *			proc	-- current process
132 * Output:		buf	-- ptr to buffer in kernel memory, which will
133 *				   be copied to the output buffer in user-space
134 * Return value:	0	-- success
135 *			non-zero-- failure
136 */
137static TW_INT32
138twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc)
139{
140	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
141	TW_INT32		error;
142
143	tw_osli_dbg_dprintf(5, sc, "entered");
144
145	switch (cmd) {
146	case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
147		tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
148		error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
149		break;
150
151	case TW_OSL_IOCTL_SCAN_BUS:
152		/* Request CAM for a bus scan. */
153		tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
154		error = tw_osli_request_bus_scan(sc);
155		break;
156
157	default:
158		tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
159		error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
160		break;
161	}
162	return(error);
163}
164
165static TW_INT32	twa_probe(device_t dev);
166static TW_INT32	twa_attach(device_t dev);
167static TW_INT32	twa_detach(device_t dev);
168static TW_INT32	twa_shutdown(device_t dev);
169static TW_VOID	twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
170static TW_VOID	twa_pci_intr(TW_VOID *arg);
171static TW_VOID	twa_watchdog(TW_VOID *arg);
172int twa_setup_intr(struct twa_softc *sc);
173int twa_teardown_intr(struct twa_softc *sc);
174
175static TW_INT32	tw_osli_alloc_mem(struct twa_softc *sc);
176static TW_VOID	tw_osli_free_resources(struct twa_softc *sc);
177
178static TW_VOID	twa_map_load_data_callback(TW_VOID *arg,
179	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
180static TW_VOID	twa_map_load_callback(TW_VOID *arg,
181	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
182
183static device_method_t	twa_methods[] = {
184	/* Device interface */
185	DEVMETHOD(device_probe,		twa_probe),
186	DEVMETHOD(device_attach,	twa_attach),
187	DEVMETHOD(device_detach,	twa_detach),
188	DEVMETHOD(device_shutdown,	twa_shutdown),
189
190	DEVMETHOD_END
191};
192
193static driver_t	twa_pci_driver = {
194	"twa",
195	twa_methods,
196	sizeof(struct twa_softc)
197};
198
199DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
200MODULE_DEPEND(twa, cam, 1, 1, 1);
201MODULE_DEPEND(twa, pci, 1, 1, 1);
202
203/*
204 * Function name:	twa_probe
205 * Description:		Called at driver load time.  Claims 9000 ctlrs.
206 *
207 * Input:		dev	-- bus device corresponding to the ctlr
208 * Output:		None
209 * Return value:	<= 0	-- success
210 *			> 0	-- failure
211 */
212static TW_INT32
213twa_probe(device_t dev)
214{
215	static TW_UINT8	first_ctlr = 1;
216
217	tw_osli_dbg_printf(3, "entered");
218
219	if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
220		device_set_desc(dev, TW_OSLI_DEVICE_NAME);
221		/* Print the driver version only once. */
222		if (first_ctlr) {
223			printf("3ware device driver for 9000 series storage "
224				"controllers, version: %s\n",
225				TW_OSL_DRIVER_VERSION_STRING);
226			first_ctlr = 0;
227		}
228		return(0);
229	}
230	return(ENXIO);
231}
232
233int twa_setup_intr(struct twa_softc *sc)
234{
235	int error = 0;
236
237	if (!(sc->intr_handle) && (sc->irq_res)) {
238		error = bus_setup_intr(sc->bus_dev, sc->irq_res,
239					INTR_TYPE_CAM | INTR_MPSAFE,
240					NULL, twa_pci_intr,
241					sc, &sc->intr_handle);
242	}
243	return( error );
244}
245
246int twa_teardown_intr(struct twa_softc *sc)
247{
248	int error = 0;
249
250	if ((sc->intr_handle) && (sc->irq_res)) {
251		error = bus_teardown_intr(sc->bus_dev,
252						sc->irq_res, sc->intr_handle);
253		sc->intr_handle = NULL;
254	}
255	return( error );
256}
257
258/*
259 * Function name:	twa_attach
260 * Description:		Allocates pci resources; updates sc; adds a node to the
261 *			sysctl tree to expose the driver version; makes calls
262 *			(to the Common Layer) to initialize ctlr, and to
263 *			attach to CAM.
264 *
265 * Input:		dev	-- bus device corresponding to the ctlr
266 * Output:		None
267 * Return value:	0	-- success
268 *			non-zero-- failure
269 */
270static TW_INT32
271twa_attach(device_t dev)
272{
273	struct twa_softc	*sc = device_get_softc(dev);
274	TW_INT32		bar_num;
275	TW_INT32		bar0_offset;
276	TW_INT32		bar_size;
277	TW_INT32		error;
278
279	tw_osli_dbg_dprintf(3, sc, "entered");
280
281	sc->ctlr_handle.osl_ctlr_ctxt = sc;
282
283	/* Initialize the softc structure. */
284	sc->bus_dev = dev;
285	sc->device_id = pci_get_device(dev);
286
287	/* Initialize the mutexes right here. */
288	sc->io_lock = &(sc->io_lock_handle);
289	mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
290	sc->q_lock = &(sc->q_lock_handle);
291	mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
292	sc->sim_lock = &(sc->sim_lock_handle);
293	mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
294
295	sysctl_ctx_init(&sc->sysctl_ctxt);
296	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
297	    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev),
298	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
299	if (sc->sysctl_tree == NULL) {
300		tw_osli_printf(sc, "error = %d",
301			TW_CL_SEVERITY_ERROR_STRING,
302			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
303			0x2000,
304			"Cannot add sysctl tree node",
305			ENXIO);
306		return(ENXIO);
307	}
308	SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
309		OID_AUTO, "driver_version", CTLFLAG_RD,
310		TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
311
312	/* Force the busmaster enable bit on, in case the BIOS forgot. */
313	pci_enable_busmaster(dev);
314
315	/* Allocate the PCI register window. */
316	if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
317		&bar_num, &bar0_offset, &bar_size))) {
318		tw_osli_printf(sc, "error = %d",
319			TW_CL_SEVERITY_ERROR_STRING,
320			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
321			0x201F,
322			"Can't get PCI BAR info",
323			error);
324		tw_osli_free_resources(sc);
325		return(error);
326	}
327	sc->reg_res_id = PCIR_BARS + bar0_offset;
328	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
329				&(sc->reg_res_id), RF_ACTIVE))
330				== NULL) {
331		tw_osli_printf(sc, "error = %d",
332			TW_CL_SEVERITY_ERROR_STRING,
333			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
334			0x2002,
335			"Can't allocate register window",
336			ENXIO);
337		tw_osli_free_resources(sc);
338		return(ENXIO);
339	}
340	sc->bus_tag = rman_get_bustag(sc->reg_res);
341	sc->bus_handle = rman_get_bushandle(sc->reg_res);
342
343	/* Allocate and register our interrupt. */
344	sc->irq_res_id = 0;
345	if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ,
346				&(sc->irq_res_id),
347				RF_SHAREABLE | RF_ACTIVE)) == NULL) {
348		tw_osli_printf(sc, "error = %d",
349			TW_CL_SEVERITY_ERROR_STRING,
350			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
351			0x2003,
352			"Can't allocate interrupt",
353			ENXIO);
354		tw_osli_free_resources(sc);
355		return(ENXIO);
356	}
357	if ((error = twa_setup_intr(sc))) {
358		tw_osli_printf(sc, "error = %d",
359			TW_CL_SEVERITY_ERROR_STRING,
360			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
361			0x2004,
362			"Can't set up interrupt",
363			error);
364		tw_osli_free_resources(sc);
365		return(error);
366	}
367
368	if ((error = tw_osli_alloc_mem(sc))) {
369		tw_osli_printf(sc, "error = %d",
370			TW_CL_SEVERITY_ERROR_STRING,
371			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
372			0x2005,
373			"Memory allocation failure",
374			error);
375		tw_osli_free_resources(sc);
376		return(error);
377	}
378
379	/* Initialize the Common Layer for this controller. */
380	if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
381			TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
382			sc->non_dma_mem, sc->dma_mem,
383			sc->dma_mem_phys
384			))) {
385		tw_osli_printf(sc, "error = %d",
386			TW_CL_SEVERITY_ERROR_STRING,
387			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
388			0x2006,
389			"Failed to initialize Common Layer/controller",
390			error);
391		tw_osli_free_resources(sc);
392		return(error);
393	}
394
395	/* Create the control device. */
396	sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
397			UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
398			"twa%d", device_get_unit(sc->bus_dev));
399	sc->ctrl_dev->si_drv1 = sc;
400
401	if ((error = tw_osli_cam_attach(sc))) {
402		tw_osli_free_resources(sc);
403		tw_osli_printf(sc, "error = %d",
404			TW_CL_SEVERITY_ERROR_STRING,
405			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
406			0x2007,
407			"Failed to initialize CAM",
408			error);
409		return(error);
410	}
411
412	sc->watchdog_index = 0;
413	callout_init(&(sc->watchdog_callout[0]), 1);
414	callout_init(&(sc->watchdog_callout[1]), 1);
415	callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
416	gone_in_dev(dev, 13, "twa(4) removed");
417
418	return(0);
419}
420
421static TW_VOID
422twa_watchdog(TW_VOID *arg)
423{
424	struct tw_cl_ctlr_handle *ctlr_handle =
425		(struct tw_cl_ctlr_handle *)arg;
426	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
427	int				i;
428	int				i_need_a_reset = 0;
429	int				driver_is_active = 0;
430	int				my_watchdog_was_pending = 1234;
431	TW_UINT64			current_time;
432	struct tw_osli_req_context	*my_req;
433
434//==============================================================================
435	current_time = (TW_UINT64) (tw_osl_get_local_time());
436
437	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
438		my_req = &(sc->req_ctx_buf[i]);
439
440		if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
441			(my_req->deadline) &&
442			(my_req->deadline < current_time)) {
443			tw_cl_set_reset_needed(ctlr_handle);
444#ifdef    TW_OSL_DEBUG
445			device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
446#else  /* TW_OSL_DEBUG */
447			device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
448#endif /* TW_OSL_DEBUG */
449			break;
450		}
451	}
452//==============================================================================
453
454	i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
455
456	i = (int) ((sc->watchdog_index++) & 1);
457
458	driver_is_active = tw_cl_is_active(ctlr_handle);
459
460	if (i_need_a_reset) {
461#ifdef    TW_OSL_DEBUG
462		device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
463#endif /* TW_OSL_DEBUG */
464		my_watchdog_was_pending =
465			callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
466		tw_cl_reset_ctlr(ctlr_handle);
467#ifdef    TW_OSL_DEBUG
468		device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
469#endif /* TW_OSL_DEBUG */
470	} else if (driver_is_active) {
471		my_watchdog_was_pending =
472			callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle);
473	}
474#ifdef    TW_OSL_DEBUG
475	if (i_need_a_reset || my_watchdog_was_pending)
476		device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
477		"driver_is_active = %d, my_watchdog_was_pending = %d\n",
478		i_need_a_reset, driver_is_active, my_watchdog_was_pending);
479#endif /* TW_OSL_DEBUG */
480}
481
482/*
483 * Function name:	tw_osli_alloc_mem
484 * Description:		Allocates memory needed both by CL and OSL.
485 *
486 * Input:		sc	-- OSL internal controller context
487 * Output:		None
488 * Return value:	0	-- success
489 *			non-zero-- failure
490 */
491static TW_INT32
492tw_osli_alloc_mem(struct twa_softc *sc)
493{
494	struct tw_osli_req_context	*req;
495	TW_UINT32			max_sg_elements;
496	TW_UINT32			non_dma_mem_size;
497	TW_UINT32			dma_mem_size;
498	TW_INT32			error;
499	TW_INT32			i;
500
501	tw_osli_dbg_dprintf(3, sc, "entered");
502
503	sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
504	sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
505
506	max_sg_elements = (sizeof(bus_addr_t) == 8) ?
507		TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
508
509	if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
510			sc->device_id, TW_OSLI_MAX_NUM_REQUESTS,  TW_OSLI_MAX_NUM_AENS,
511			&(sc->alignment), &(sc->sg_size_factor),
512			&non_dma_mem_size, &dma_mem_size
513			))) {
514		tw_osli_printf(sc, "error = %d",
515			TW_CL_SEVERITY_ERROR_STRING,
516			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
517			0x2008,
518			"Can't get Common Layer's memory requirements",
519			error);
520		return(error);
521	}
522
523	if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
524				M_WAITOK)) == NULL) {
525		tw_osli_printf(sc, "error = %d",
526			TW_CL_SEVERITY_ERROR_STRING,
527			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
528			0x2009,
529			"Can't allocate non-dma memory",
530			ENOMEM);
531		return(ENOMEM);
532	}
533
534	/* Create the parent dma tag. */
535	if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */
536				sc->alignment,		/* alignment */
537				TW_OSLI_DMA_BOUNDARY,	/* boundary */
538				BUS_SPACE_MAXADDR,	/* lowaddr */
539				BUS_SPACE_MAXADDR, 	/* highaddr */
540				NULL, NULL, 		/* filter, filterarg */
541				TW_CL_MAX_IO_SIZE,	/* maxsize */
542				max_sg_elements,	/* nsegments */
543				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
544				0,			/* flags */
545				NULL,			/* lockfunc */
546				NULL,			/* lockfuncarg */
547				&sc->parent_tag		/* tag */)) {
548		tw_osli_printf(sc, "error = %d",
549			TW_CL_SEVERITY_ERROR_STRING,
550			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
551			0x200A,
552			"Can't allocate parent DMA tag",
553			ENOMEM);
554		return(ENOMEM);
555	}
556
557	/* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
558	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
559				sc->alignment,		/* alignment */
560				0,			/* boundary */
561				BUS_SPACE_MAXADDR,	/* lowaddr */
562				BUS_SPACE_MAXADDR, 	/* highaddr */
563				NULL, NULL, 		/* filter, filterarg */
564				dma_mem_size,		/* maxsize */
565				1,			/* nsegments */
566				BUS_SPACE_MAXSIZE,	/* maxsegsize */
567				0,			/* flags */
568				NULL,			/* lockfunc */
569				NULL,			/* lockfuncarg */
570				&sc->cmd_tag		/* tag */)) {
571		tw_osli_printf(sc, "error = %d",
572			TW_CL_SEVERITY_ERROR_STRING,
573			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
574			0x200B,
575			"Can't allocate DMA tag for Common Layer's "
576			"DMA'able memory",
577			ENOMEM);
578		return(ENOMEM);
579	}
580
581	if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
582		BUS_DMA_NOWAIT, &sc->cmd_map)) {
583		/* Try a second time. */
584		if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
585			BUS_DMA_NOWAIT, &sc->cmd_map)) {
586			tw_osli_printf(sc, "error = %d",
587				TW_CL_SEVERITY_ERROR_STRING,
588				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
589				0x200C,
590				"Can't allocate DMA'able memory for the"
591				"Common Layer",
592				ENOMEM);
593			return(ENOMEM);
594		}
595	}
596
597	bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
598		dma_mem_size, twa_map_load_callback,
599		&sc->dma_mem_phys, 0);
600
601	/*
602	 * Create a dma tag for data buffers; size will be the maximum
603	 * possible I/O size (128kB).
604	 */
605	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
606				sc->alignment,		/* alignment */
607				0,			/* boundary */
608				BUS_SPACE_MAXADDR,	/* lowaddr */
609				BUS_SPACE_MAXADDR, 	/* highaddr */
610				NULL, NULL, 		/* filter, filterarg */
611				TW_CL_MAX_IO_SIZE,	/* maxsize */
612				max_sg_elements,	/* nsegments */
613				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
614				BUS_DMA_ALLOCNOW,	/* flags */
615				twa_busdma_lock,	/* lockfunc */
616				sc->io_lock,		/* lockfuncarg */
617				&sc->dma_tag		/* tag */)) {
618		tw_osli_printf(sc, "error = %d",
619			TW_CL_SEVERITY_ERROR_STRING,
620			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
621			0x200F,
622			"Can't allocate DMA tag for data buffers",
623			ENOMEM);
624		return(ENOMEM);
625	}
626
627	/*
628	 * Create a dma tag for ioctl data buffers; size will be the maximum
629	 * possible I/O size (128kB).
630	 */
631	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
632				sc->alignment,		/* alignment */
633				0,			/* boundary */
634				BUS_SPACE_MAXADDR,	/* lowaddr */
635				BUS_SPACE_MAXADDR, 	/* highaddr */
636				NULL, NULL, 		/* filter, filterarg */
637				TW_CL_MAX_IO_SIZE,	/* maxsize */
638				max_sg_elements,	/* nsegments */
639				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
640				BUS_DMA_ALLOCNOW,	/* flags */
641				twa_busdma_lock,	/* lockfunc */
642				sc->io_lock,		/* lockfuncarg */
643				&sc->ioctl_tag		/* tag */)) {
644		tw_osli_printf(sc, "error = %d",
645			TW_CL_SEVERITY_ERROR_STRING,
646			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
647			0x2010,
648			"Can't allocate DMA tag for ioctl data buffers",
649			ENOMEM);
650		return(ENOMEM);
651	}
652
653	/* Create just one map for all ioctl request data buffers. */
654	if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
655		tw_osli_printf(sc, "error = %d",
656			TW_CL_SEVERITY_ERROR_STRING,
657			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
658			0x2011,
659			"Can't create ioctl map",
660			ENOMEM);
661		return(ENOMEM);
662	}
663
664	/* Initialize request queues. */
665	tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
666	tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
667
668	if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
669			malloc((sizeof(struct tw_osli_req_context) *
670				TW_OSLI_MAX_NUM_REQUESTS),
671				TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
672		tw_osli_printf(sc, "error = %d",
673			TW_CL_SEVERITY_ERROR_STRING,
674			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
675			0x2012,
676			"Failed to allocate request packets",
677			ENOMEM);
678		return(ENOMEM);
679	}
680	bzero(sc->req_ctx_buf,
681		sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
682
683	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
684		req = &(sc->req_ctx_buf[i]);
685		req->ctlr = sc;
686		if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
687			tw_osli_printf(sc, "request # = %d, error = %d",
688				TW_CL_SEVERITY_ERROR_STRING,
689				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
690				0x2013,
691				"Can't create dma map",
692				i, ENOMEM);
693			return(ENOMEM);
694		}
695
696		/* Initialize the ioctl wakeup/ timeout mutex */
697		req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
698		mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
699
700		/* Insert request into the free queue. */
701		tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
702	}
703
704	return(0);
705}
706
707/*
708 * Function name:	tw_osli_free_resources
709 * Description:		Performs clean-up at the time of going down.
710 *
711 * Input:		sc	-- ptr to OSL internal ctlr context
712 * Output:		None
713 * Return value:	None
714 */
715static TW_VOID
716tw_osli_free_resources(struct twa_softc *sc)
717{
718	struct tw_osli_req_context	*req;
719	TW_INT32			error = 0;
720
721	tw_osli_dbg_dprintf(3, sc, "entered");
722
723	/* Detach from CAM */
724	tw_osli_cam_detach(sc);
725
726	if (sc->req_ctx_buf)
727		while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
728			NULL) {
729			mtx_destroy(req->ioctl_wake_timeout_lock);
730
731			if ((error = bus_dmamap_destroy(sc->dma_tag,
732					req->dma_map)))
733				tw_osli_dbg_dprintf(1, sc,
734					"dmamap_destroy(dma) returned %d",
735					error);
736		}
737
738	if ((sc->ioctl_tag) && (sc->ioctl_map))
739		if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
740			tw_osli_dbg_dprintf(1, sc,
741				"dmamap_destroy(ioctl) returned %d", error);
742
743	/* Free all memory allocated so far. */
744	if (sc->req_ctx_buf)
745		free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
746
747	if (sc->non_dma_mem)
748		free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
749
750	if (sc->dma_mem) {
751		bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
752		bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
753			sc->cmd_map);
754	}
755	if (sc->cmd_tag)
756		if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
757			tw_osli_dbg_dprintf(1, sc,
758				"dma_tag_destroy(cmd) returned %d", error);
759
760	if (sc->dma_tag)
761		if ((error = bus_dma_tag_destroy(sc->dma_tag)))
762			tw_osli_dbg_dprintf(1, sc,
763				"dma_tag_destroy(dma) returned %d", error);
764
765	if (sc->ioctl_tag)
766		if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
767			tw_osli_dbg_dprintf(1, sc,
768				"dma_tag_destroy(ioctl) returned %d", error);
769
770	if (sc->parent_tag)
771		if ((error = bus_dma_tag_destroy(sc->parent_tag)))
772			tw_osli_dbg_dprintf(1, sc,
773				"dma_tag_destroy(parent) returned %d", error);
774
775	/* Disconnect the interrupt handler. */
776	if ((error = twa_teardown_intr(sc)))
777			tw_osli_dbg_dprintf(1, sc,
778				"teardown_intr returned %d", error);
779
780	if (sc->irq_res != NULL)
781		if ((error = bus_release_resource(sc->bus_dev,
782				SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
783			tw_osli_dbg_dprintf(1, sc,
784				"release_resource(irq) returned %d", error);
785
786	/* Release the register window mapping. */
787	if (sc->reg_res != NULL)
788		if ((error = bus_release_resource(sc->bus_dev,
789				SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
790			tw_osli_dbg_dprintf(1, sc,
791				"release_resource(io) returned %d", error);
792
793	/* Destroy the control device. */
794	if (sc->ctrl_dev != (struct cdev *)NULL)
795		destroy_dev(sc->ctrl_dev);
796
797	if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
798		tw_osli_dbg_dprintf(1, sc,
799			"sysctl_ctx_free returned %d", error);
800
801}
802
803/*
804 * Function name:	twa_detach
805 * Description:		Called when the controller is being detached from
806 *			the pci bus.
807 *
808 * Input:		dev	-- bus device corresponding to the ctlr
809 * Output:		None
810 * Return value:	0	-- success
811 *			non-zero-- failure
812 */
813static TW_INT32
814twa_detach(device_t dev)
815{
816	struct twa_softc	*sc = device_get_softc(dev);
817	TW_INT32		error;
818
819	tw_osli_dbg_dprintf(3, sc, "entered");
820
821	error = EBUSY;
822	if (sc->open) {
823		tw_osli_printf(sc, "error = %d",
824			TW_CL_SEVERITY_ERROR_STRING,
825			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
826			0x2014,
827			"Device open",
828			error);
829		goto out;
830	}
831
832	/* Shut the controller down. */
833	if ((error = twa_shutdown(dev)))
834		goto out;
835
836	/* Free all resources associated with this controller. */
837	tw_osli_free_resources(sc);
838	error = 0;
839
840out:
841	return(error);
842}
843
844/*
845 * Function name:	twa_shutdown
846 * Description:		Called at unload/shutdown time.  Lets the controller
847 *			know that we are going down.
848 *
849 * Input:		dev	-- bus device corresponding to the ctlr
850 * Output:		None
851 * Return value:	0	-- success
852 *			non-zero-- failure
853 */
854static TW_INT32
855twa_shutdown(device_t dev)
856{
857	struct twa_softc	*sc = device_get_softc(dev);
858	TW_INT32		error = 0;
859
860	tw_osli_dbg_dprintf(3, sc, "entered");
861
862	/* Disconnect interrupts. */
863	error = twa_teardown_intr(sc);
864
865	/* Stop watchdog task. */
866	callout_drain(&(sc->watchdog_callout[0]));
867	callout_drain(&(sc->watchdog_callout[1]));
868
869	/* Disconnect from the controller. */
870	if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
871		tw_osli_printf(sc, "error = %d",
872			TW_CL_SEVERITY_ERROR_STRING,
873			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
874			0x2015,
875			"Failed to shutdown Common Layer/controller",
876			error);
877	}
878	return(error);
879}
880
881/*
882 * Function name:	twa_busdma_lock
883 * Description:		Function to provide synchronization during busdma_swi.
884 *
885 * Input:		lock_arg -- lock mutex sent as argument
886 *			op -- operation (lock/unlock) expected of the function
887 * Output:		None
888 * Return value:	None
889 */
890TW_VOID
891twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
892{
893	struct mtx	*lock;
894
895	lock = (struct mtx *)lock_arg;
896	switch (op) {
897	case BUS_DMA_LOCK:
898		mtx_lock_spin(lock);
899		break;
900
901	case BUS_DMA_UNLOCK:
902		mtx_unlock_spin(lock);
903		break;
904
905	default:
906		panic("Unknown operation 0x%x for twa_busdma_lock!", op);
907	}
908}
909
910/*
911 * Function name:	twa_pci_intr
912 * Description:		Interrupt handler.  Wrapper for twa_interrupt.
913 *
914 * Input:		arg	-- ptr to OSL internal ctlr context
915 * Output:		None
916 * Return value:	None
917 */
918static TW_VOID
919twa_pci_intr(TW_VOID *arg)
920{
921	struct twa_softc	*sc = (struct twa_softc *)arg;
922
923	tw_osli_dbg_dprintf(10, sc, "entered");
924	tw_cl_interrupt(&(sc->ctlr_handle));
925}
926
927/*
928 * Function name:	tw_osli_fw_passthru
929 * Description:		Builds a fw passthru cmd pkt, and submits it to CL.
930 *
931 * Input:		sc	-- ptr to OSL internal ctlr context
932 *			buf	-- ptr to ioctl pkt understood by CL
933 * Output:		None
934 * Return value:	0	-- success
935 *			non-zero-- failure
936 */
937TW_INT32
938tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
939{
940	struct tw_osli_req_context		*req;
941	struct tw_osli_ioctl_no_data_buf	*user_buf =
942		(struct tw_osli_ioctl_no_data_buf *)buf;
943	TW_TIME					end_time;
944	TW_UINT32				timeout = 60;
945	TW_UINT32				data_buf_size_adjusted;
946	struct tw_cl_req_packet			*req_pkt;
947	struct tw_cl_passthru_req_packet	*pt_req;
948	TW_INT32				error;
949
950	tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
951
952	if ((req = tw_osli_get_request(sc)) == NULL)
953		return(EBUSY);
954
955	req->req_handle.osl_req_ctxt = req;
956	req->orig_req = buf;
957	req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
958
959	req_pkt = &(req->req_pkt);
960	req_pkt->status = 0;
961	req_pkt->tw_osl_callback = tw_osl_complete_passthru;
962	/* Let the Common Layer retry the request on cmd queue full. */
963	req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
964
965	pt_req = &(req_pkt->gen_req_pkt.pt_req);
966	/*
967	 * Make sure that the data buffer sent to firmware is a
968	 * 512 byte multiple in size.
969	 */
970	data_buf_size_adjusted =
971		(user_buf->driver_pkt.buffer_length +
972		(sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
973	if ((req->length = data_buf_size_adjusted)) {
974		if ((req->data = malloc(data_buf_size_adjusted,
975			TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
976			error = ENOMEM;
977			tw_osli_printf(sc, "error = %d",
978				TW_CL_SEVERITY_ERROR_STRING,
979				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
980				0x2016,
981				"Could not alloc mem for "
982				"fw_passthru data_buf",
983				error);
984			goto fw_passthru_err;
985		}
986		/* Copy the payload. */
987		if ((error = copyin((TW_VOID *)(user_buf->pdata),
988			req->data,
989			user_buf->driver_pkt.buffer_length)) != 0) {
990			tw_osli_printf(sc, "error = %d",
991				TW_CL_SEVERITY_ERROR_STRING,
992				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
993				0x2017,
994				"Could not copyin fw_passthru data_buf",
995				error);
996			goto fw_passthru_err;
997		}
998		pt_req->sgl_entries = 1; /* will be updated during mapping */
999		req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1000			TW_OSLI_REQ_FLAGS_DATA_OUT);
1001	} else
1002		pt_req->sgl_entries = 0; /* no payload */
1003
1004	pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1005	pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1006
1007	if ((error = tw_osli_map_request(req)))
1008		goto fw_passthru_err;
1009
1010	end_time = tw_osl_get_local_time() + timeout;
1011	while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1012		mtx_lock(req->ioctl_wake_timeout_lock);
1013		req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1014
1015		error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1016			    "twa_passthru", timeout*hz);
1017		mtx_unlock(req->ioctl_wake_timeout_lock);
1018
1019		if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1020			error = 0;
1021		req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1022
1023		if (! error) {
1024			if (((error = req->error_code)) ||
1025				((error = (req->state !=
1026				TW_OSLI_REQ_STATE_COMPLETE))) ||
1027				((error = req_pkt->status)))
1028				goto fw_passthru_err;
1029			break;
1030		}
1031
1032		if (req_pkt->status) {
1033			error = req_pkt->status;
1034			goto fw_passthru_err;
1035		}
1036
1037		if (error == EWOULDBLOCK) {
1038			/* Time out! */
1039			if ((!(req->error_code))                       &&
1040			    (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1041			    (!(req_pkt->status))			  ) {
1042#ifdef    TW_OSL_DEBUG
1043				tw_osli_printf(sc, "request = %p",
1044					TW_CL_SEVERITY_ERROR_STRING,
1045					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1046					0x7777,
1047					"FALSE Passthru timeout!",
1048					req);
1049#endif /* TW_OSL_DEBUG */
1050				error = 0; /* False error */
1051				break;
1052			}
1053			if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1054#ifdef    TW_OSL_DEBUG
1055				tw_osli_printf(sc, "request = %p",
1056					TW_CL_SEVERITY_ERROR_STRING,
1057					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1058					0x2018,
1059					"Passthru request timed out!",
1060					req);
1061#else  /* TW_OSL_DEBUG */
1062			device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1063#endif /* TW_OSL_DEBUG */
1064				tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1065			}
1066
1067			error = 0;
1068			end_time = tw_osl_get_local_time() + timeout;
1069			continue;
1070			/*
1071			 * Don't touch req after a reset.  It (and any
1072			 * associated data) will be
1073			 * unmapped by the callback.
1074			 */
1075		}
1076		/*
1077		 * Either the request got completed, or we were woken up by a
1078		 * signal.  Calculate the new timeout, in case it was the latter.
1079		 */
1080		timeout = (end_time - tw_osl_get_local_time());
1081	} /* End of while loop */
1082
1083	/* If there was a payload, copy it back. */
1084	if ((!error) && (req->length))
1085		if ((error = copyout(req->data, user_buf->pdata,
1086			user_buf->driver_pkt.buffer_length)))
1087			tw_osli_printf(sc, "error = %d",
1088				TW_CL_SEVERITY_ERROR_STRING,
1089				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1090				0x2019,
1091				"Could not copyout fw_passthru data_buf",
1092				error);
1093
1094fw_passthru_err:
1095
1096	if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1097		error = EBUSY;
1098
1099	user_buf->driver_pkt.os_status = error;
1100	/* Free resources. */
1101	if (req->data)
1102		free(req->data, TW_OSLI_MALLOC_CLASS);
1103	tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1104	return(error);
1105}
1106
1107/*
1108 * Function name:	tw_osl_complete_passthru
1109 * Description:		Called to complete passthru requests.
1110 *
1111 * Input:		req_handle	-- ptr to request handle
1112 * Output:		None
1113 * Return value:	None
1114 */
1115TW_VOID
1116tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1117{
1118	struct tw_osli_req_context	*req = req_handle->osl_req_ctxt;
1119	struct tw_cl_req_packet		*req_pkt =
1120		(struct tw_cl_req_packet *)(&req->req_pkt);
1121	struct twa_softc		*sc = req->ctlr;
1122
1123	tw_osli_dbg_dprintf(5, sc, "entered");
1124
1125	if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1126		tw_osli_printf(sc, "request = %p, status = %d",
1127			TW_CL_SEVERITY_ERROR_STRING,
1128			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1129			0x201B,
1130			"Unposted command completed!!",
1131			req, req->state);
1132	}
1133
1134	/*
1135	 * Remove request from the busy queue.  Just mark it complete.
1136	 * There's no need to move it into the complete queue as we are
1137	 * going to be done with it right now.
1138	 */
1139	req->state = TW_OSLI_REQ_STATE_COMPLETE;
1140	tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1141
1142	tw_osli_unmap_request(req);
1143
1144	/*
1145	 * Don't do a wake up if there was an error even before the request
1146	 * was sent down to the Common Layer, and we hadn't gotten an
1147	 * EINPROGRESS.  The request originator will then be returned an
1148	 * error, and he can do the clean-up.
1149	 */
1150	if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1151		return;
1152
1153	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1154		if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1155			/* Wake up the sleeping command originator. */
1156			tw_osli_dbg_dprintf(5, sc,
1157				"Waking up originator of request %p", req);
1158			req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1159			wakeup_one(req);
1160		} else {
1161			/*
1162			 * If the request completed even before mtx_sleep
1163			 * was called, simply return.
1164			 */
1165			if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1166				return;
1167
1168			if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1169				return;
1170
1171			tw_osli_printf(sc, "request = %p",
1172				TW_CL_SEVERITY_ERROR_STRING,
1173				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1174				0x201C,
1175				"Passthru callback called, "
1176				"and caller not sleeping",
1177				req);
1178		}
1179	} else {
1180		tw_osli_printf(sc, "request = %p",
1181			TW_CL_SEVERITY_ERROR_STRING,
1182			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1183			0x201D,
1184			"Passthru callback called for non-passthru request",
1185			req);
1186	}
1187}
1188
1189/*
1190 * Function name:	tw_osli_get_request
1191 * Description:		Gets a request pkt from the free queue.
1192 *
1193 * Input:		sc	-- ptr to OSL internal ctlr context
1194 * Output:		None
1195 * Return value:	ptr to request pkt	-- success
1196 *			NULL			-- failure
1197 */
1198struct tw_osli_req_context *
1199tw_osli_get_request(struct twa_softc *sc)
1200{
1201	struct tw_osli_req_context	*req;
1202
1203	tw_osli_dbg_dprintf(4, sc, "entered");
1204
1205	/* Get a free request packet. */
1206	req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1207
1208	/* Initialize some fields to their defaults. */
1209	if (req) {
1210		req->req_handle.osl_req_ctxt = NULL;
1211		req->req_handle.cl_req_ctxt = NULL;
1212		req->req_handle.is_io = 0;
1213		req->data = NULL;
1214		req->length = 0;
1215		req->deadline = 0;
1216		req->real_data = NULL;
1217		req->real_length = 0;
1218		req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1219		req->flags = 0;
1220		req->error_code = 0;
1221		req->orig_req = NULL;
1222
1223		bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1224	}
1225	return(req);
1226}
1227
1228/*
1229 * Function name:	twa_map_load_data_callback
1230 * Description:		Callback of bus_dmamap_load for the buffer associated
1231 *			with data.  Updates the cmd pkt (size/sgl_entries
1232 *			fields, as applicable) to reflect the number of sg
1233 *			elements.
1234 *
1235 * Input:		arg	-- ptr to OSL internal request context
1236 *			segs	-- ptr to a list of segment descriptors
1237 *			nsegments--# of segments
1238 *			error	-- 0 if no errors encountered before callback,
1239 *				   non-zero if errors were encountered
1240 * Output:		None
1241 * Return value:	None
1242 */
1243static TW_VOID
1244twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1245	TW_INT32 nsegments, TW_INT32 error)
1246{
1247	struct tw_osli_req_context	*req =
1248		(struct tw_osli_req_context *)arg;
1249	struct twa_softc		*sc = req->ctlr;
1250	struct tw_cl_req_packet		*req_pkt = &(req->req_pkt);
1251
1252	tw_osli_dbg_dprintf(10, sc, "entered");
1253
1254	if (error == EINVAL) {
1255		req->error_code = error;
1256		return;
1257	}
1258
1259	/* Mark the request as currently being processed. */
1260	req->state = TW_OSLI_REQ_STATE_BUSY;
1261	/* Move the request into the busy queue. */
1262	tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1263
1264	req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1265
1266	if (error == EFBIG) {
1267		req->error_code = error;
1268		goto out;
1269	}
1270
1271	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1272		struct tw_cl_passthru_req_packet	*pt_req;
1273
1274		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1275			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1276				BUS_DMASYNC_PREREAD);
1277
1278		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1279			/*
1280			 * If we're using an alignment buffer, and we're
1281			 * writing data, copy the real data out.
1282			 */
1283			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1284				bcopy(req->real_data, req->data, req->real_length);
1285			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1286				BUS_DMASYNC_PREWRITE);
1287		}
1288
1289		pt_req = &(req_pkt->gen_req_pkt.pt_req);
1290		pt_req->sg_list = (TW_UINT8 *)segs;
1291		pt_req->sgl_entries += (nsegments - 1);
1292		error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1293			&(req->req_handle));
1294	} else {
1295		struct tw_cl_scsi_req_packet	*scsi_req;
1296
1297		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1298			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1299				BUS_DMASYNC_PREREAD);
1300
1301		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1302			/*
1303			 * If we're using an alignment buffer, and we're
1304			 * writing data, copy the real data out.
1305			 */
1306			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1307				bcopy(req->real_data, req->data, req->real_length);
1308			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1309				BUS_DMASYNC_PREWRITE);
1310		}
1311
1312		scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1313		scsi_req->sg_list = (TW_UINT8 *)segs;
1314		scsi_req->sgl_entries += (nsegments - 1);
1315		error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1316			&(req->req_handle));
1317	}
1318
1319out:
1320	if (error) {
1321		req->error_code = error;
1322		req_pkt->tw_osl_callback(&(req->req_handle));
1323		/*
1324		 * If the caller had been returned EINPROGRESS, and he has
1325		 * registered a callback for handling completion, the callback
1326		 * will never get called because we were unable to submit the
1327		 * request.  So, free up the request right here.
1328		 */
1329		if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1330			tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1331	}
1332}
1333
1334/*
1335 * Function name:	twa_map_load_callback
1336 * Description:		Callback of bus_dmamap_load for the buffer associated
1337 *			with a cmd pkt.
1338 *
1339 * Input:		arg	-- ptr to variable to hold phys addr
1340 *			segs	-- ptr to a list of segment descriptors
1341 *			nsegments--# of segments
1342 *			error	-- 0 if no errors encountered before callback,
1343 *				   non-zero if errors were encountered
1344 * Output:		None
1345 * Return value:	None
1346 */
1347static TW_VOID
1348twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1349	TW_INT32 nsegments, TW_INT32 error)
1350{
1351	*((bus_addr_t *)arg) = segs[0].ds_addr;
1352}
1353
1354/*
1355 * Function name:	tw_osli_map_request
1356 * Description:		Maps a cmd pkt and data associated with it, into
1357 *			DMA'able memory.
1358 *
1359 * Input:		req	-- ptr to request pkt
1360 * Output:		None
1361 * Return value:	0	-- success
1362 *			non-zero-- failure
1363 */
1364TW_INT32
1365tw_osli_map_request(struct tw_osli_req_context *req)
1366{
1367	struct twa_softc	*sc = req->ctlr;
1368	TW_INT32		error = 0;
1369
1370	tw_osli_dbg_dprintf(10, sc, "entered");
1371
1372	/* If the command involves data, map that too. */
1373	if (req->data != NULL) {
1374		/*
1375		 * It's sufficient for the data pointer to be 4-byte aligned
1376		 * to work with 9000.  However, if 4-byte aligned addresses
1377		 * are passed to bus_dmamap_load, we can get back sg elements
1378		 * that are not 512-byte multiples in size.  So, we will let
1379		 * only those buffers that are 512-byte aligned to pass
1380		 * through, and bounce the rest, so as to make sure that we
1381		 * always get back sg elements that are 512-byte multiples
1382		 * in size.
1383		 */
1384		if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1385			(req->length % sc->sg_size_factor)) {
1386			req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1387			/* Save original data pointer and length. */
1388			req->real_data = req->data;
1389			req->real_length = req->length;
1390			req->length = (req->length +
1391				(sc->sg_size_factor - 1)) &
1392				~(sc->sg_size_factor - 1);
1393			req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1394					M_NOWAIT);
1395			if (req->data == NULL) {
1396				tw_osli_printf(sc, "error = %d",
1397					TW_CL_SEVERITY_ERROR_STRING,
1398					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1399					0x201E,
1400					"Failed to allocate memory "
1401					"for bounce buffer",
1402					ENOMEM);
1403				/* Restore original data pointer and length. */
1404				req->data = req->real_data;
1405				req->length = req->real_length;
1406				return(ENOMEM);
1407			}
1408		}
1409
1410		/*
1411		 * Map the data buffer into bus space and build the SG list.
1412		 */
1413		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1414			/* Lock against multiple simultaneous ioctl calls. */
1415			mtx_lock_spin(sc->io_lock);
1416			error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1417				req->data, req->length,
1418				twa_map_load_data_callback, req,
1419				BUS_DMA_WAITOK);
1420			mtx_unlock_spin(sc->io_lock);
1421		} else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
1422			error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
1423				req->orig_req, twa_map_load_data_callback, req,
1424				BUS_DMA_WAITOK);
1425		} else {
1426			/*
1427			 * There's only one CAM I/O thread running at a time.
1428			 * So, there's no need to hold the io_lock.
1429			 */
1430			error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1431				req->data, req->length,
1432				twa_map_load_data_callback, req,
1433				BUS_DMA_WAITOK);
1434		}
1435
1436		if (!error)
1437			error = req->error_code;
1438		else {
1439			if (error == EINPROGRESS) {
1440				/*
1441				 * Specifying sc->io_lock as the lockfuncarg
1442				 * in ...tag_create should protect the access
1443				 * of ...FLAGS_MAPPED from the callback.
1444				 */
1445				mtx_lock_spin(sc->io_lock);
1446				if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1447					req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1448				tw_osli_disallow_new_requests(sc, &(req->req_handle));
1449				mtx_unlock_spin(sc->io_lock);
1450				error = 0;
1451			} else {
1452				tw_osli_printf(sc, "error = %d",
1453					TW_CL_SEVERITY_ERROR_STRING,
1454					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1455					0x9999,
1456					"Failed to map DMA memory "
1457					"for I/O request",
1458					error);
1459				req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1460				/* Free alignment buffer if it was used. */
1461				if (req->flags &
1462					TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1463					free(req->data, TW_OSLI_MALLOC_CLASS);
1464					/*
1465					 * Restore original data pointer
1466					 * and length.
1467					 */
1468					req->data = req->real_data;
1469					req->length = req->real_length;
1470				}
1471			}
1472		}
1473
1474	} else {
1475		/* Mark the request as currently being processed. */
1476		req->state = TW_OSLI_REQ_STATE_BUSY;
1477		/* Move the request into the busy queue. */
1478		tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1479		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1480			error = tw_cl_fw_passthru(&sc->ctlr_handle,
1481					&(req->req_pkt), &(req->req_handle));
1482		else
1483			error = tw_cl_start_io(&sc->ctlr_handle,
1484					&(req->req_pkt), &(req->req_handle));
1485		if (error) {
1486			req->error_code = error;
1487			req->req_pkt.tw_osl_callback(&(req->req_handle));
1488		}
1489	}
1490	return(error);
1491}
1492
1493/*
1494 * Function name:	tw_osli_unmap_request
1495 * Description:		Undoes the mapping done by tw_osli_map_request.
1496 *
1497 * Input:		req	-- ptr to request pkt
1498 * Output:		None
1499 * Return value:	None
1500 */
1501TW_VOID
1502tw_osli_unmap_request(struct tw_osli_req_context *req)
1503{
1504	struct twa_softc	*sc = req->ctlr;
1505
1506	tw_osli_dbg_dprintf(10, sc, "entered");
1507
1508	/* If the command involved data, unmap that too. */
1509	if (req->data != NULL) {
1510		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1511			/* Lock against multiple simultaneous ioctl calls. */
1512			mtx_lock_spin(sc->io_lock);
1513
1514			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1515				bus_dmamap_sync(sc->ioctl_tag,
1516					sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1517
1518				/*
1519				 * If we are using a bounce buffer, and we are
1520				 * reading data, copy the real data in.
1521				 */
1522				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1523					bcopy(req->data, req->real_data,
1524						req->real_length);
1525			}
1526
1527			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1528				bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1529					BUS_DMASYNC_POSTWRITE);
1530
1531			bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1532
1533			mtx_unlock_spin(sc->io_lock);
1534		} else {
1535			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1536				bus_dmamap_sync(sc->dma_tag,
1537					req->dma_map, BUS_DMASYNC_POSTREAD);
1538
1539				/*
1540				 * If we are using a bounce buffer, and we are
1541				 * reading data, copy the real data in.
1542				 */
1543				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1544					bcopy(req->data, req->real_data,
1545						req->real_length);
1546			}
1547			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1548				bus_dmamap_sync(sc->dma_tag, req->dma_map,
1549					BUS_DMASYNC_POSTWRITE);
1550
1551			bus_dmamap_unload(sc->dma_tag, req->dma_map);
1552		}
1553	}
1554
1555	/* Free alignment buffer if it was used. */
1556	if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1557		free(req->data, TW_OSLI_MALLOC_CLASS);
1558		/* Restore original data pointer and length. */
1559		req->data = req->real_data;
1560		req->length = req->real_length;
1561	}
1562}
1563
1564#ifdef TW_OSL_DEBUG
1565
1566TW_VOID	twa_report_stats(TW_VOID);
1567TW_VOID	twa_reset_stats(TW_VOID);
1568TW_VOID	tw_osli_print_ctlr_stats(struct twa_softc *sc);
1569TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1570
1571/*
1572 * Function name:	twa_report_stats
1573 * Description:		For being called from ddb.  Calls functions that print
1574 *			OSL and CL internal stats for the controller.
1575 *
1576 * Input:		None
1577 * Output:		None
1578 * Return value:	None
1579 */
1580TW_VOID
1581twa_report_stats(TW_VOID)
1582{
1583	struct twa_softc	*sc;
1584	TW_INT32		i;
1585
1586	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1587		tw_osli_print_ctlr_stats(sc);
1588		tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1589	}
1590}
1591
1592/*
1593 * Function name:	tw_osli_print_ctlr_stats
1594 * Description:		For being called from ddb.  Prints OSL controller stats
1595 *
1596 * Input:		sc	-- ptr to OSL internal controller context
1597 * Output:		None
1598 * Return value:	None
1599 */
1600TW_VOID
1601tw_osli_print_ctlr_stats(struct twa_softc *sc)
1602{
1603	twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1604	twa_printf(sc, "OSLq type  current  max\n");
1605	twa_printf(sc, "free      %04d     %04d\n",
1606		sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1607		sc->q_stats[TW_OSLI_FREE_Q].max_len);
1608	twa_printf(sc, "busy      %04d     %04d\n",
1609		sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1610		sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1611}
1612
1613/*
1614 * Function name:	twa_print_req_info
1615 * Description:		For being called from ddb.  Calls functions that print
1616 *			OSL and CL internal details for the request.
1617 *
1618 * Input:		req	-- ptr to OSL internal request context
1619 * Output:		None
1620 * Return value:	None
1621 */
1622TW_VOID
1623twa_print_req_info(struct tw_osli_req_context *req)
1624{
1625	struct twa_softc	*sc = req->ctlr;
1626
1627	twa_printf(sc, "OSL details for request:\n");
1628	twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1629		"data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1630		"state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1631		"next_req = %p, prev_req = %p, dma_map = %p\n",
1632		req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1633		req->data, req->length, req->real_data, req->real_length,
1634		req->state, req->flags, req->error_code, req->orig_req,
1635		req->link.next, req->link.prev, req->dma_map);
1636	tw_cl_print_req_info(&(req->req_handle));
1637}
1638
1639/*
1640 * Function name:	twa_reset_stats
1641 * Description:		For being called from ddb.
1642 *			Resets some OSL controller stats.
1643 *
1644 * Input:		None
1645 * Output:		None
1646 * Return value:	None
1647 */
1648TW_VOID
1649twa_reset_stats(TW_VOID)
1650{
1651	struct twa_softc	*sc;
1652	TW_INT32		i;
1653
1654	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1655		sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1656		sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1657		tw_cl_reset_stats(&sc->ctlr_handle);
1658	}
1659}
1660
1661#endif /* TW_OSL_DEBUG */
1662