tw_osl_freebsd.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
5 * Copyright (c) 2004-05 Vinod Kashyap.
6 * Copyright (c) 2000 Michael Smith
7 * Copyright (c) 2000 BSDi
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/dev/twa/tw_osl_freebsd.c 330897 2018-03-14 03:19:51Z eadler $");
34
35/*
36 * AMCC'S 3ware driver for 9000 series storage controllers.
37 *
38 * Author: Vinod Kashyap
39 * Modifications by: Adam Radford
40 * Modifications by: Manjunath Ranganathaiah
41 */
42
43
44/*
45 * FreeBSD specific functions not related to CAM, and other
46 * miscellaneous functions.
47 */
48
49
50#include <dev/twa/tw_osl_includes.h>
51#include <dev/twa/tw_cl_fwif.h>
52#include <dev/twa/tw_cl_ioctl.h>
53#include <dev/twa/tw_osl_ioctl.h>
54
55#ifdef TW_OSL_DEBUG
56TW_INT32	TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
57TW_INT32	TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
58#endif /* TW_OSL_DEBUG */
59
60static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
61
62
63static	d_open_t		twa_open;
64static	d_close_t		twa_close;
65static	d_ioctl_t		twa_ioctl;
66
67static struct cdevsw twa_cdevsw = {
68	.d_version =	D_VERSION,
69	.d_open =	twa_open,
70	.d_close =	twa_close,
71	.d_ioctl =	twa_ioctl,
72	.d_name =	"twa",
73};
74
75static devclass_t	twa_devclass;
76
77
78/*
79 * Function name:	twa_open
80 * Description:		Called when the controller is opened.
81 *			Simply marks the controller as open.
82 *
83 * Input:		dev	-- control device corresponding to the ctlr
84 *			flags	-- mode of open
85 *			fmt	-- device type (character/block etc.)
86 *			proc	-- current process
87 * Output:		None
88 * Return value:	0	-- success
89 *			non-zero-- failure
90 */
91static TW_INT32
92twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
93{
94	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
95
96	tw_osli_dbg_dprintf(5, sc, "entered");
97	sc->open = TW_CL_TRUE;
98	return(0);
99}
100
101
102
103/*
104 * Function name:	twa_close
105 * Description:		Called when the controller is closed.
106 *			Simply marks the controller as not open.
107 *
108 * Input:		dev	-- control device corresponding to the ctlr
109 *			flags	-- mode of corresponding open
110 *			fmt	-- device type (character/block etc.)
111 *			proc	-- current process
112 * Output:		None
113 * Return value:	0	-- success
114 *			non-zero-- failure
115 */
116static TW_INT32
117twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
118{
119	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
120
121	tw_osli_dbg_dprintf(5, sc, "entered");
122	sc->open = TW_CL_FALSE;
123	return(0);
124}
125
126
127
128/*
129 * Function name:	twa_ioctl
130 * Description:		Called when an ioctl is posted to the controller.
131 *			Handles any OS Layer specific cmds, passes the rest
132 *			on to the Common Layer.
133 *
134 * Input:		dev	-- control device corresponding to the ctlr
135 *			cmd	-- ioctl cmd
136 *			buf	-- ptr to buffer in kernel memory, which is
137 *				   a copy of the input buffer in user-space
138 *			flags	-- mode of corresponding open
139 *			proc	-- current process
140 * Output:		buf	-- ptr to buffer in kernel memory, which will
141 *				   be copied to the output buffer in user-space
142 * Return value:	0	-- success
143 *			non-zero-- failure
144 */
145static TW_INT32
146twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc)
147{
148	struct twa_softc	*sc = (struct twa_softc *)(dev->si_drv1);
149	TW_INT32		error;
150
151	tw_osli_dbg_dprintf(5, sc, "entered");
152
153	switch (cmd) {
154	case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
155		tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
156		error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
157		break;
158
159	case TW_OSL_IOCTL_SCAN_BUS:
160		/* Request CAM for a bus scan. */
161		tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
162		error = tw_osli_request_bus_scan(sc);
163		break;
164
165	default:
166		tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
167		error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
168		break;
169	}
170	return(error);
171}
172
173
174
175static TW_INT32	twa_probe(device_t dev);
176static TW_INT32	twa_attach(device_t dev);
177static TW_INT32	twa_detach(device_t dev);
178static TW_INT32	twa_shutdown(device_t dev);
179static TW_VOID	twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
180static TW_VOID	twa_pci_intr(TW_VOID *arg);
181static TW_VOID	twa_watchdog(TW_VOID *arg);
182int twa_setup_intr(struct twa_softc *sc);
183int twa_teardown_intr(struct twa_softc *sc);
184
185static TW_INT32	tw_osli_alloc_mem(struct twa_softc *sc);
186static TW_VOID	tw_osli_free_resources(struct twa_softc *sc);
187
188static TW_VOID	twa_map_load_data_callback(TW_VOID *arg,
189	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190static TW_VOID	twa_map_load_callback(TW_VOID *arg,
191	bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
192
193
194static device_method_t	twa_methods[] = {
195	/* Device interface */
196	DEVMETHOD(device_probe,		twa_probe),
197	DEVMETHOD(device_attach,	twa_attach),
198	DEVMETHOD(device_detach,	twa_detach),
199	DEVMETHOD(device_shutdown,	twa_shutdown),
200
201	DEVMETHOD_END
202};
203
204static driver_t	twa_pci_driver = {
205	"twa",
206	twa_methods,
207	sizeof(struct twa_softc)
208};
209
210DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
211MODULE_DEPEND(twa, cam, 1, 1, 1);
212MODULE_DEPEND(twa, pci, 1, 1, 1);
213
214
215/*
216 * Function name:	twa_probe
217 * Description:		Called at driver load time.  Claims 9000 ctlrs.
218 *
219 * Input:		dev	-- bus device corresponding to the ctlr
220 * Output:		None
221 * Return value:	<= 0	-- success
222 *			> 0	-- failure
223 */
224static TW_INT32
225twa_probe(device_t dev)
226{
227	static TW_UINT8	first_ctlr = 1;
228
229	tw_osli_dbg_printf(3, "entered");
230
231	if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
232		device_set_desc(dev, TW_OSLI_DEVICE_NAME);
233		/* Print the driver version only once. */
234		if (first_ctlr) {
235			printf("3ware device driver for 9000 series storage "
236				"controllers, version: %s\n",
237				TW_OSL_DRIVER_VERSION_STRING);
238			first_ctlr = 0;
239		}
240		return(0);
241	}
242	return(ENXIO);
243}
244
245int twa_setup_intr(struct twa_softc *sc)
246{
247	int error = 0;
248
249	if (!(sc->intr_handle) && (sc->irq_res)) {
250		error = bus_setup_intr(sc->bus_dev, sc->irq_res,
251					INTR_TYPE_CAM | INTR_MPSAFE,
252					NULL, twa_pci_intr,
253					sc, &sc->intr_handle);
254	}
255	return( error );
256}
257
258
259int twa_teardown_intr(struct twa_softc *sc)
260{
261	int error = 0;
262
263	if ((sc->intr_handle) && (sc->irq_res)) {
264		error = bus_teardown_intr(sc->bus_dev,
265						sc->irq_res, sc->intr_handle);
266		sc->intr_handle = NULL;
267	}
268	return( error );
269}
270
271
272
273/*
274 * Function name:	twa_attach
275 * Description:		Allocates pci resources; updates sc; adds a node to the
276 *			sysctl tree to expose the driver version; makes calls
277 *			(to the Common Layer) to initialize ctlr, and to
278 *			attach to CAM.
279 *
280 * Input:		dev	-- bus device corresponding to the ctlr
281 * Output:		None
282 * Return value:	0	-- success
283 *			non-zero-- failure
284 */
285static TW_INT32
286twa_attach(device_t dev)
287{
288	struct twa_softc	*sc = device_get_softc(dev);
289	TW_INT32		bar_num;
290	TW_INT32		bar0_offset;
291	TW_INT32		bar_size;
292	TW_INT32		error;
293
294	tw_osli_dbg_dprintf(3, sc, "entered");
295
296	sc->ctlr_handle.osl_ctlr_ctxt = sc;
297
298	/* Initialize the softc structure. */
299	sc->bus_dev = dev;
300	sc->device_id = pci_get_device(dev);
301
302	/* Initialize the mutexes right here. */
303	sc->io_lock = &(sc->io_lock_handle);
304	mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
305	sc->q_lock = &(sc->q_lock_handle);
306	mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
307	sc->sim_lock = &(sc->sim_lock_handle);
308	mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
309
310	sysctl_ctx_init(&sc->sysctl_ctxt);
311	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
312		SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
313		device_get_nameunit(dev), CTLFLAG_RD, 0, "");
314	if (sc->sysctl_tree == NULL) {
315		tw_osli_printf(sc, "error = %d",
316			TW_CL_SEVERITY_ERROR_STRING,
317			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
318			0x2000,
319			"Cannot add sysctl tree node",
320			ENXIO);
321		return(ENXIO);
322	}
323	SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
324		OID_AUTO, "driver_version", CTLFLAG_RD,
325		TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
326
327	/* Force the busmaster enable bit on, in case the BIOS forgot. */
328	pci_enable_busmaster(dev);
329
330	/* Allocate the PCI register window. */
331	if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
332		&bar_num, &bar0_offset, &bar_size))) {
333		tw_osli_printf(sc, "error = %d",
334			TW_CL_SEVERITY_ERROR_STRING,
335			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
336			0x201F,
337			"Can't get PCI BAR info",
338			error);
339		tw_osli_free_resources(sc);
340		return(error);
341	}
342	sc->reg_res_id = PCIR_BARS + bar0_offset;
343	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
344				&(sc->reg_res_id), RF_ACTIVE))
345				== NULL) {
346		tw_osli_printf(sc, "error = %d",
347			TW_CL_SEVERITY_ERROR_STRING,
348			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
349			0x2002,
350			"Can't allocate register window",
351			ENXIO);
352		tw_osli_free_resources(sc);
353		return(ENXIO);
354	}
355	sc->bus_tag = rman_get_bustag(sc->reg_res);
356	sc->bus_handle = rman_get_bushandle(sc->reg_res);
357
358	/* Allocate and register our interrupt. */
359	sc->irq_res_id = 0;
360	if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ,
361				&(sc->irq_res_id),
362				RF_SHAREABLE | RF_ACTIVE)) == NULL) {
363		tw_osli_printf(sc, "error = %d",
364			TW_CL_SEVERITY_ERROR_STRING,
365			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
366			0x2003,
367			"Can't allocate interrupt",
368			ENXIO);
369		tw_osli_free_resources(sc);
370		return(ENXIO);
371	}
372	if ((error = twa_setup_intr(sc))) {
373		tw_osli_printf(sc, "error = %d",
374			TW_CL_SEVERITY_ERROR_STRING,
375			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
376			0x2004,
377			"Can't set up interrupt",
378			error);
379		tw_osli_free_resources(sc);
380		return(error);
381	}
382
383	if ((error = tw_osli_alloc_mem(sc))) {
384		tw_osli_printf(sc, "error = %d",
385			TW_CL_SEVERITY_ERROR_STRING,
386			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
387			0x2005,
388			"Memory allocation failure",
389			error);
390		tw_osli_free_resources(sc);
391		return(error);
392	}
393
394	/* Initialize the Common Layer for this controller. */
395	if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
396			TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
397			sc->non_dma_mem, sc->dma_mem,
398			sc->dma_mem_phys
399			))) {
400		tw_osli_printf(sc, "error = %d",
401			TW_CL_SEVERITY_ERROR_STRING,
402			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
403			0x2006,
404			"Failed to initialize Common Layer/controller",
405			error);
406		tw_osli_free_resources(sc);
407		return(error);
408	}
409
410	/* Create the control device. */
411	sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
412			UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
413			"twa%d", device_get_unit(sc->bus_dev));
414	sc->ctrl_dev->si_drv1 = sc;
415
416	if ((error = tw_osli_cam_attach(sc))) {
417		tw_osli_free_resources(sc);
418		tw_osli_printf(sc, "error = %d",
419			TW_CL_SEVERITY_ERROR_STRING,
420			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
421			0x2007,
422			"Failed to initialize CAM",
423			error);
424		return(error);
425	}
426
427	sc->watchdog_index = 0;
428	callout_init(&(sc->watchdog_callout[0]), 1);
429	callout_init(&(sc->watchdog_callout[1]), 1);
430	callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
431
432	return(0);
433}
434
435
436static TW_VOID
437twa_watchdog(TW_VOID *arg)
438{
439	struct tw_cl_ctlr_handle *ctlr_handle =
440		(struct tw_cl_ctlr_handle *)arg;
441	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
442	int				i;
443	int				i_need_a_reset = 0;
444	int				driver_is_active = 0;
445	int				my_watchdog_was_pending = 1234;
446	TW_UINT64			current_time;
447	struct tw_osli_req_context	*my_req;
448
449
450//==============================================================================
451	current_time = (TW_UINT64) (tw_osl_get_local_time());
452
453	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
454		my_req = &(sc->req_ctx_buf[i]);
455
456		if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
457			(my_req->deadline) &&
458			(my_req->deadline < current_time)) {
459			tw_cl_set_reset_needed(ctlr_handle);
460#ifdef    TW_OSL_DEBUG
461			device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
462#else  /* TW_OSL_DEBUG */
463			device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
464#endif /* TW_OSL_DEBUG */
465			break;
466		}
467	}
468//==============================================================================
469
470	i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
471
472	i = (int) ((sc->watchdog_index++) & 1);
473
474	driver_is_active = tw_cl_is_active(ctlr_handle);
475
476	if (i_need_a_reset) {
477#ifdef    TW_OSL_DEBUG
478		device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
479#endif /* TW_OSL_DEBUG */
480		my_watchdog_was_pending =
481			callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
482		tw_cl_reset_ctlr(ctlr_handle);
483#ifdef    TW_OSL_DEBUG
484		device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
485#endif /* TW_OSL_DEBUG */
486	} else if (driver_is_active) {
487		my_watchdog_was_pending =
488			callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle);
489	}
490#ifdef    TW_OSL_DEBUG
491	if (i_need_a_reset || my_watchdog_was_pending)
492		device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
493		"driver_is_active = %d, my_watchdog_was_pending = %d\n",
494		i_need_a_reset, driver_is_active, my_watchdog_was_pending);
495#endif /* TW_OSL_DEBUG */
496}
497
498
499/*
500 * Function name:	tw_osli_alloc_mem
501 * Description:		Allocates memory needed both by CL and OSL.
502 *
503 * Input:		sc	-- OSL internal controller context
504 * Output:		None
505 * Return value:	0	-- success
506 *			non-zero-- failure
507 */
508static TW_INT32
509tw_osli_alloc_mem(struct twa_softc *sc)
510{
511	struct tw_osli_req_context	*req;
512	TW_UINT32			max_sg_elements;
513	TW_UINT32			non_dma_mem_size;
514	TW_UINT32			dma_mem_size;
515	TW_INT32			error;
516	TW_INT32			i;
517
518	tw_osli_dbg_dprintf(3, sc, "entered");
519
520	sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
521	sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
522
523	max_sg_elements = (sizeof(bus_addr_t) == 8) ?
524		TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
525
526	if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
527			sc->device_id, TW_OSLI_MAX_NUM_REQUESTS,  TW_OSLI_MAX_NUM_AENS,
528			&(sc->alignment), &(sc->sg_size_factor),
529			&non_dma_mem_size, &dma_mem_size
530			))) {
531		tw_osli_printf(sc, "error = %d",
532			TW_CL_SEVERITY_ERROR_STRING,
533			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
534			0x2008,
535			"Can't get Common Layer's memory requirements",
536			error);
537		return(error);
538	}
539
540	if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
541				M_WAITOK)) == NULL) {
542		tw_osli_printf(sc, "error = %d",
543			TW_CL_SEVERITY_ERROR_STRING,
544			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
545			0x2009,
546			"Can't allocate non-dma memory",
547			ENOMEM);
548		return(ENOMEM);
549	}
550
551	/* Create the parent dma tag. */
552	if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */
553				sc->alignment,		/* alignment */
554				0,			/* boundary */
555				BUS_SPACE_MAXADDR,	/* lowaddr */
556				BUS_SPACE_MAXADDR, 	/* highaddr */
557				NULL, NULL, 		/* filter, filterarg */
558				TW_CL_MAX_IO_SIZE,	/* maxsize */
559				max_sg_elements,	/* nsegments */
560				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
561				0,			/* flags */
562				NULL,			/* lockfunc */
563				NULL,			/* lockfuncarg */
564				&sc->parent_tag		/* tag */)) {
565		tw_osli_printf(sc, "error = %d",
566			TW_CL_SEVERITY_ERROR_STRING,
567			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
568			0x200A,
569			"Can't allocate parent DMA tag",
570			ENOMEM);
571		return(ENOMEM);
572	}
573
574	/* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
575	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
576				sc->alignment,		/* alignment */
577				0,			/* boundary */
578				BUS_SPACE_MAXADDR,	/* lowaddr */
579				BUS_SPACE_MAXADDR, 	/* highaddr */
580				NULL, NULL, 		/* filter, filterarg */
581				dma_mem_size,		/* maxsize */
582				1,			/* nsegments */
583				BUS_SPACE_MAXSIZE,	/* maxsegsize */
584				0,			/* flags */
585				NULL,			/* lockfunc */
586				NULL,			/* lockfuncarg */
587				&sc->cmd_tag		/* tag */)) {
588		tw_osli_printf(sc, "error = %d",
589			TW_CL_SEVERITY_ERROR_STRING,
590			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
591			0x200B,
592			"Can't allocate DMA tag for Common Layer's "
593			"DMA'able memory",
594			ENOMEM);
595		return(ENOMEM);
596	}
597
598	if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
599		BUS_DMA_NOWAIT, &sc->cmd_map)) {
600		/* Try a second time. */
601		if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
602			BUS_DMA_NOWAIT, &sc->cmd_map)) {
603			tw_osli_printf(sc, "error = %d",
604				TW_CL_SEVERITY_ERROR_STRING,
605				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
606				0x200C,
607				"Can't allocate DMA'able memory for the"
608				"Common Layer",
609				ENOMEM);
610			return(ENOMEM);
611		}
612	}
613
614	bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
615		dma_mem_size, twa_map_load_callback,
616		&sc->dma_mem_phys, 0);
617
618	/*
619	 * Create a dma tag for data buffers; size will be the maximum
620	 * possible I/O size (128kB).
621	 */
622	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
623				sc->alignment,		/* alignment */
624				0,			/* boundary */
625				BUS_SPACE_MAXADDR,	/* lowaddr */
626				BUS_SPACE_MAXADDR, 	/* highaddr */
627				NULL, NULL, 		/* filter, filterarg */
628				TW_CL_MAX_IO_SIZE,	/* maxsize */
629				max_sg_elements,	/* nsegments */
630				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
631				BUS_DMA_ALLOCNOW,	/* flags */
632				twa_busdma_lock,	/* lockfunc */
633				sc->io_lock,		/* lockfuncarg */
634				&sc->dma_tag		/* tag */)) {
635		tw_osli_printf(sc, "error = %d",
636			TW_CL_SEVERITY_ERROR_STRING,
637			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
638			0x200F,
639			"Can't allocate DMA tag for data buffers",
640			ENOMEM);
641		return(ENOMEM);
642	}
643
644	/*
645	 * Create a dma tag for ioctl data buffers; size will be the maximum
646	 * possible I/O size (128kB).
647	 */
648	if (bus_dma_tag_create(sc->parent_tag,		/* parent */
649				sc->alignment,		/* alignment */
650				0,			/* boundary */
651				BUS_SPACE_MAXADDR,	/* lowaddr */
652				BUS_SPACE_MAXADDR, 	/* highaddr */
653				NULL, NULL, 		/* filter, filterarg */
654				TW_CL_MAX_IO_SIZE,	/* maxsize */
655				max_sg_elements,	/* nsegments */
656				TW_CL_MAX_IO_SIZE,	/* maxsegsize */
657				BUS_DMA_ALLOCNOW,	/* flags */
658				twa_busdma_lock,	/* lockfunc */
659				sc->io_lock,		/* lockfuncarg */
660				&sc->ioctl_tag		/* tag */)) {
661		tw_osli_printf(sc, "error = %d",
662			TW_CL_SEVERITY_ERROR_STRING,
663			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
664			0x2010,
665			"Can't allocate DMA tag for ioctl data buffers",
666			ENOMEM);
667		return(ENOMEM);
668	}
669
670	/* Create just one map for all ioctl request data buffers. */
671	if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
672		tw_osli_printf(sc, "error = %d",
673			TW_CL_SEVERITY_ERROR_STRING,
674			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
675			0x2011,
676			"Can't create ioctl map",
677			ENOMEM);
678		return(ENOMEM);
679	}
680
681
682	/* Initialize request queues. */
683	tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
684	tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
685
686	if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
687			malloc((sizeof(struct tw_osli_req_context) *
688				TW_OSLI_MAX_NUM_REQUESTS),
689				TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
690		tw_osli_printf(sc, "error = %d",
691			TW_CL_SEVERITY_ERROR_STRING,
692			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
693			0x2012,
694			"Failed to allocate request packets",
695			ENOMEM);
696		return(ENOMEM);
697	}
698	bzero(sc->req_ctx_buf,
699		sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
700
701	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
702		req = &(sc->req_ctx_buf[i]);
703		req->ctlr = sc;
704		if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
705			tw_osli_printf(sc, "request # = %d, error = %d",
706				TW_CL_SEVERITY_ERROR_STRING,
707				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
708				0x2013,
709				"Can't create dma map",
710				i, ENOMEM);
711			return(ENOMEM);
712		}
713
714		/* Initialize the ioctl wakeup/ timeout mutex */
715		req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
716		mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
717
718		/* Insert request into the free queue. */
719		tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
720	}
721
722	return(0);
723}
724
725
726
727/*
728 * Function name:	tw_osli_free_resources
729 * Description:		Performs clean-up at the time of going down.
730 *
731 * Input:		sc	-- ptr to OSL internal ctlr context
732 * Output:		None
733 * Return value:	None
734 */
735static TW_VOID
736tw_osli_free_resources(struct twa_softc *sc)
737{
738	struct tw_osli_req_context	*req;
739	TW_INT32			error = 0;
740
741	tw_osli_dbg_dprintf(3, sc, "entered");
742
743	/* Detach from CAM */
744	tw_osli_cam_detach(sc);
745
746	if (sc->req_ctx_buf)
747		while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
748			NULL) {
749			mtx_destroy(req->ioctl_wake_timeout_lock);
750
751			if ((error = bus_dmamap_destroy(sc->dma_tag,
752					req->dma_map)))
753				tw_osli_dbg_dprintf(1, sc,
754					"dmamap_destroy(dma) returned %d",
755					error);
756		}
757
758	if ((sc->ioctl_tag) && (sc->ioctl_map))
759		if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
760			tw_osli_dbg_dprintf(1, sc,
761				"dmamap_destroy(ioctl) returned %d", error);
762
763	/* Free all memory allocated so far. */
764	if (sc->req_ctx_buf)
765		free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
766
767	if (sc->non_dma_mem)
768		free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
769
770	if (sc->dma_mem) {
771		bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
772		bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
773			sc->cmd_map);
774	}
775	if (sc->cmd_tag)
776		if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
777			tw_osli_dbg_dprintf(1, sc,
778				"dma_tag_destroy(cmd) returned %d", error);
779
780	if (sc->dma_tag)
781		if ((error = bus_dma_tag_destroy(sc->dma_tag)))
782			tw_osli_dbg_dprintf(1, sc,
783				"dma_tag_destroy(dma) returned %d", error);
784
785	if (sc->ioctl_tag)
786		if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
787			tw_osli_dbg_dprintf(1, sc,
788				"dma_tag_destroy(ioctl) returned %d", error);
789
790	if (sc->parent_tag)
791		if ((error = bus_dma_tag_destroy(sc->parent_tag)))
792			tw_osli_dbg_dprintf(1, sc,
793				"dma_tag_destroy(parent) returned %d", error);
794
795
796	/* Disconnect the interrupt handler. */
797	if ((error = twa_teardown_intr(sc)))
798			tw_osli_dbg_dprintf(1, sc,
799				"teardown_intr returned %d", error);
800
801	if (sc->irq_res != NULL)
802		if ((error = bus_release_resource(sc->bus_dev,
803				SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
804			tw_osli_dbg_dprintf(1, sc,
805				"release_resource(irq) returned %d", error);
806
807
808	/* Release the register window mapping. */
809	if (sc->reg_res != NULL)
810		if ((error = bus_release_resource(sc->bus_dev,
811				SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
812			tw_osli_dbg_dprintf(1, sc,
813				"release_resource(io) returned %d", error);
814
815
816	/* Destroy the control device. */
817	if (sc->ctrl_dev != (struct cdev *)NULL)
818		destroy_dev(sc->ctrl_dev);
819
820	if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
821		tw_osli_dbg_dprintf(1, sc,
822			"sysctl_ctx_free returned %d", error);
823
824}
825
826
827
828/*
829 * Function name:	twa_detach
830 * Description:		Called when the controller is being detached from
831 *			the pci bus.
832 *
833 * Input:		dev	-- bus device corresponding to the ctlr
834 * Output:		None
835 * Return value:	0	-- success
836 *			non-zero-- failure
837 */
838static TW_INT32
839twa_detach(device_t dev)
840{
841	struct twa_softc	*sc = device_get_softc(dev);
842	TW_INT32		error;
843
844	tw_osli_dbg_dprintf(3, sc, "entered");
845
846	error = EBUSY;
847	if (sc->open) {
848		tw_osli_printf(sc, "error = %d",
849			TW_CL_SEVERITY_ERROR_STRING,
850			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
851			0x2014,
852			"Device open",
853			error);
854		goto out;
855	}
856
857	/* Shut the controller down. */
858	if ((error = twa_shutdown(dev)))
859		goto out;
860
861	/* Free all resources associated with this controller. */
862	tw_osli_free_resources(sc);
863	error = 0;
864
865out:
866	return(error);
867}
868
869
870
871/*
872 * Function name:	twa_shutdown
873 * Description:		Called at unload/shutdown time.  Lets the controller
874 *			know that we are going down.
875 *
876 * Input:		dev	-- bus device corresponding to the ctlr
877 * Output:		None
878 * Return value:	0	-- success
879 *			non-zero-- failure
880 */
881static TW_INT32
882twa_shutdown(device_t dev)
883{
884	struct twa_softc	*sc = device_get_softc(dev);
885	TW_INT32		error = 0;
886
887	tw_osli_dbg_dprintf(3, sc, "entered");
888
889	/* Disconnect interrupts. */
890	error = twa_teardown_intr(sc);
891
892	/* Stop watchdog task. */
893	callout_drain(&(sc->watchdog_callout[0]));
894	callout_drain(&(sc->watchdog_callout[1]));
895
896	/* Disconnect from the controller. */
897	if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
898		tw_osli_printf(sc, "error = %d",
899			TW_CL_SEVERITY_ERROR_STRING,
900			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
901			0x2015,
902			"Failed to shutdown Common Layer/controller",
903			error);
904	}
905	return(error);
906}
907
908
909
910/*
911 * Function name:	twa_busdma_lock
912 * Description:		Function to provide synchronization during busdma_swi.
913 *
914 * Input:		lock_arg -- lock mutex sent as argument
915 *			op -- operation (lock/unlock) expected of the function
916 * Output:		None
917 * Return value:	None
918 */
919TW_VOID
920twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
921{
922	struct mtx	*lock;
923
924	lock = (struct mtx *)lock_arg;
925	switch (op) {
926	case BUS_DMA_LOCK:
927		mtx_lock_spin(lock);
928		break;
929
930	case BUS_DMA_UNLOCK:
931		mtx_unlock_spin(lock);
932		break;
933
934	default:
935		panic("Unknown operation 0x%x for twa_busdma_lock!", op);
936	}
937}
938
939
940/*
941 * Function name:	twa_pci_intr
942 * Description:		Interrupt handler.  Wrapper for twa_interrupt.
943 *
944 * Input:		arg	-- ptr to OSL internal ctlr context
945 * Output:		None
946 * Return value:	None
947 */
948static TW_VOID
949twa_pci_intr(TW_VOID *arg)
950{
951	struct twa_softc	*sc = (struct twa_softc *)arg;
952
953	tw_osli_dbg_dprintf(10, sc, "entered");
954	tw_cl_interrupt(&(sc->ctlr_handle));
955}
956
957
958/*
959 * Function name:	tw_osli_fw_passthru
960 * Description:		Builds a fw passthru cmd pkt, and submits it to CL.
961 *
962 * Input:		sc	-- ptr to OSL internal ctlr context
963 *			buf	-- ptr to ioctl pkt understood by CL
964 * Output:		None
965 * Return value:	0	-- success
966 *			non-zero-- failure
967 */
968TW_INT32
969tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
970{
971	struct tw_osli_req_context		*req;
972	struct tw_osli_ioctl_no_data_buf	*user_buf =
973		(struct tw_osli_ioctl_no_data_buf *)buf;
974	TW_TIME					end_time;
975	TW_UINT32				timeout = 60;
976	TW_UINT32				data_buf_size_adjusted;
977	struct tw_cl_req_packet			*req_pkt;
978	struct tw_cl_passthru_req_packet	*pt_req;
979	TW_INT32				error;
980
981	tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
982
983	if ((req = tw_osli_get_request(sc)) == NULL)
984		return(EBUSY);
985
986	req->req_handle.osl_req_ctxt = req;
987	req->orig_req = buf;
988	req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
989
990	req_pkt = &(req->req_pkt);
991	req_pkt->status = 0;
992	req_pkt->tw_osl_callback = tw_osl_complete_passthru;
993	/* Let the Common Layer retry the request on cmd queue full. */
994	req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
995
996	pt_req = &(req_pkt->gen_req_pkt.pt_req);
997	/*
998	 * Make sure that the data buffer sent to firmware is a
999	 * 512 byte multiple in size.
1000	 */
1001	data_buf_size_adjusted =
1002		(user_buf->driver_pkt.buffer_length +
1003		(sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1004	if ((req->length = data_buf_size_adjusted)) {
1005		if ((req->data = malloc(data_buf_size_adjusted,
1006			TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1007			error = ENOMEM;
1008			tw_osli_printf(sc, "error = %d",
1009				TW_CL_SEVERITY_ERROR_STRING,
1010				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1011				0x2016,
1012				"Could not alloc mem for "
1013				"fw_passthru data_buf",
1014				error);
1015			goto fw_passthru_err;
1016		}
1017		/* Copy the payload. */
1018		if ((error = copyin((TW_VOID *)(user_buf->pdata),
1019			req->data,
1020			user_buf->driver_pkt.buffer_length)) != 0) {
1021			tw_osli_printf(sc, "error = %d",
1022				TW_CL_SEVERITY_ERROR_STRING,
1023				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1024				0x2017,
1025				"Could not copyin fw_passthru data_buf",
1026				error);
1027			goto fw_passthru_err;
1028		}
1029		pt_req->sgl_entries = 1; /* will be updated during mapping */
1030		req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1031			TW_OSLI_REQ_FLAGS_DATA_OUT);
1032	} else
1033		pt_req->sgl_entries = 0; /* no payload */
1034
1035	pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1036	pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1037
1038	if ((error = tw_osli_map_request(req)))
1039		goto fw_passthru_err;
1040
1041	end_time = tw_osl_get_local_time() + timeout;
1042	while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1043		mtx_lock(req->ioctl_wake_timeout_lock);
1044		req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1045
1046		error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1047			    "twa_passthru", timeout*hz);
1048		mtx_unlock(req->ioctl_wake_timeout_lock);
1049
1050		if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1051			error = 0;
1052		req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1053
1054		if (! error) {
1055			if (((error = req->error_code)) ||
1056				((error = (req->state !=
1057				TW_OSLI_REQ_STATE_COMPLETE))) ||
1058				((error = req_pkt->status)))
1059				goto fw_passthru_err;
1060			break;
1061		}
1062
1063		if (req_pkt->status) {
1064			error = req_pkt->status;
1065			goto fw_passthru_err;
1066		}
1067
1068		if (error == EWOULDBLOCK) {
1069			/* Time out! */
1070			if ((!(req->error_code))                       &&
1071			    (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1072			    (!(req_pkt->status))			  ) {
1073#ifdef    TW_OSL_DEBUG
1074				tw_osli_printf(sc, "request = %p",
1075					TW_CL_SEVERITY_ERROR_STRING,
1076					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1077					0x7777,
1078					"FALSE Passthru timeout!",
1079					req);
1080#endif /* TW_OSL_DEBUG */
1081				error = 0; /* False error */
1082				break;
1083			}
1084			if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1085#ifdef    TW_OSL_DEBUG
1086				tw_osli_printf(sc, "request = %p",
1087					TW_CL_SEVERITY_ERROR_STRING,
1088					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1089					0x2018,
1090					"Passthru request timed out!",
1091					req);
1092#else  /* TW_OSL_DEBUG */
1093			device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1094#endif /* TW_OSL_DEBUG */
1095				tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1096			}
1097
1098			error = 0;
1099			end_time = tw_osl_get_local_time() + timeout;
1100			continue;
1101			/*
1102			 * Don't touch req after a reset.  It (and any
1103			 * associated data) will be
1104			 * unmapped by the callback.
1105			 */
1106		}
1107		/*
1108		 * Either the request got completed, or we were woken up by a
1109		 * signal.  Calculate the new timeout, in case it was the latter.
1110		 */
1111		timeout = (end_time - tw_osl_get_local_time());
1112	} /* End of while loop */
1113
1114	/* If there was a payload, copy it back. */
1115	if ((!error) && (req->length))
1116		if ((error = copyout(req->data, user_buf->pdata,
1117			user_buf->driver_pkt.buffer_length)))
1118			tw_osli_printf(sc, "error = %d",
1119				TW_CL_SEVERITY_ERROR_STRING,
1120				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1121				0x2019,
1122				"Could not copyout fw_passthru data_buf",
1123				error);
1124
1125fw_passthru_err:
1126
1127	if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1128		error = EBUSY;
1129
1130	user_buf->driver_pkt.os_status = error;
1131	/* Free resources. */
1132	if (req->data)
1133		free(req->data, TW_OSLI_MALLOC_CLASS);
1134	tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1135	return(error);
1136}
1137
1138
1139
1140/*
1141 * Function name:	tw_osl_complete_passthru
1142 * Description:		Called to complete passthru requests.
1143 *
1144 * Input:		req_handle	-- ptr to request handle
1145 * Output:		None
1146 * Return value:	None
1147 */
1148TW_VOID
1149tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1150{
1151	struct tw_osli_req_context	*req = req_handle->osl_req_ctxt;
1152	struct tw_cl_req_packet		*req_pkt =
1153		(struct tw_cl_req_packet *)(&req->req_pkt);
1154	struct twa_softc		*sc = req->ctlr;
1155
1156	tw_osli_dbg_dprintf(5, sc, "entered");
1157
1158	if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1159		tw_osli_printf(sc, "request = %p, status = %d",
1160			TW_CL_SEVERITY_ERROR_STRING,
1161			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1162			0x201B,
1163			"Unposted command completed!!",
1164			req, req->state);
1165	}
1166
1167	/*
1168	 * Remove request from the busy queue.  Just mark it complete.
1169	 * There's no need to move it into the complete queue as we are
1170	 * going to be done with it right now.
1171	 */
1172	req->state = TW_OSLI_REQ_STATE_COMPLETE;
1173	tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1174
1175	tw_osli_unmap_request(req);
1176
1177	/*
1178	 * Don't do a wake up if there was an error even before the request
1179	 * was sent down to the Common Layer, and we hadn't gotten an
1180	 * EINPROGRESS.  The request originator will then be returned an
1181	 * error, and he can do the clean-up.
1182	 */
1183	if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1184		return;
1185
1186	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1187		if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1188			/* Wake up the sleeping command originator. */
1189			tw_osli_dbg_dprintf(5, sc,
1190				"Waking up originator of request %p", req);
1191			req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1192			wakeup_one(req);
1193		} else {
1194			/*
1195			 * If the request completed even before mtx_sleep
1196			 * was called, simply return.
1197			 */
1198			if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1199				return;
1200
1201			if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1202				return;
1203
1204			tw_osli_printf(sc, "request = %p",
1205				TW_CL_SEVERITY_ERROR_STRING,
1206				TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1207				0x201C,
1208				"Passthru callback called, "
1209				"and caller not sleeping",
1210				req);
1211		}
1212	} else {
1213		tw_osli_printf(sc, "request = %p",
1214			TW_CL_SEVERITY_ERROR_STRING,
1215			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1216			0x201D,
1217			"Passthru callback called for non-passthru request",
1218			req);
1219	}
1220}
1221
1222
1223
1224/*
1225 * Function name:	tw_osli_get_request
1226 * Description:		Gets a request pkt from the free queue.
1227 *
1228 * Input:		sc	-- ptr to OSL internal ctlr context
1229 * Output:		None
1230 * Return value:	ptr to request pkt	-- success
1231 *			NULL			-- failure
1232 */
1233struct tw_osli_req_context *
1234tw_osli_get_request(struct twa_softc *sc)
1235{
1236	struct tw_osli_req_context	*req;
1237
1238	tw_osli_dbg_dprintf(4, sc, "entered");
1239
1240	/* Get a free request packet. */
1241	req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1242
1243	/* Initialize some fields to their defaults. */
1244	if (req) {
1245		req->req_handle.osl_req_ctxt = NULL;
1246		req->req_handle.cl_req_ctxt = NULL;
1247		req->req_handle.is_io = 0;
1248		req->data = NULL;
1249		req->length = 0;
1250		req->deadline = 0;
1251		req->real_data = NULL;
1252		req->real_length = 0;
1253		req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1254		req->flags = 0;
1255		req->error_code = 0;
1256		req->orig_req = NULL;
1257
1258		bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1259
1260	}
1261	return(req);
1262}
1263
1264
1265
1266/*
1267 * Function name:	twa_map_load_data_callback
1268 * Description:		Callback of bus_dmamap_load for the buffer associated
1269 *			with data.  Updates the cmd pkt (size/sgl_entries
1270 *			fields, as applicable) to reflect the number of sg
1271 *			elements.
1272 *
1273 * Input:		arg	-- ptr to OSL internal request context
1274 *			segs	-- ptr to a list of segment descriptors
1275 *			nsegments--# of segments
1276 *			error	-- 0 if no errors encountered before callback,
1277 *				   non-zero if errors were encountered
1278 * Output:		None
1279 * Return value:	None
1280 */
1281static TW_VOID
1282twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1283	TW_INT32 nsegments, TW_INT32 error)
1284{
1285	struct tw_osli_req_context	*req =
1286		(struct tw_osli_req_context *)arg;
1287	struct twa_softc		*sc = req->ctlr;
1288	struct tw_cl_req_packet		*req_pkt = &(req->req_pkt);
1289
1290	tw_osli_dbg_dprintf(10, sc, "entered");
1291
1292	if (error == EINVAL) {
1293		req->error_code = error;
1294		return;
1295	}
1296
1297	/* Mark the request as currently being processed. */
1298	req->state = TW_OSLI_REQ_STATE_BUSY;
1299	/* Move the request into the busy queue. */
1300	tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1301
1302	req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1303
1304	if (error == EFBIG) {
1305		req->error_code = error;
1306		goto out;
1307	}
1308
1309	if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1310		struct tw_cl_passthru_req_packet	*pt_req;
1311
1312		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1313			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1314				BUS_DMASYNC_PREREAD);
1315
1316		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1317			/*
1318			 * If we're using an alignment buffer, and we're
1319			 * writing data, copy the real data out.
1320			 */
1321			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1322				bcopy(req->real_data, req->data, req->real_length);
1323			bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1324				BUS_DMASYNC_PREWRITE);
1325		}
1326
1327		pt_req = &(req_pkt->gen_req_pkt.pt_req);
1328		pt_req->sg_list = (TW_UINT8 *)segs;
1329		pt_req->sgl_entries += (nsegments - 1);
1330		error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1331			&(req->req_handle));
1332	} else {
1333		struct tw_cl_scsi_req_packet	*scsi_req;
1334
1335		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1336			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1337				BUS_DMASYNC_PREREAD);
1338
1339		if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1340			/*
1341			 * If we're using an alignment buffer, and we're
1342			 * writing data, copy the real data out.
1343			 */
1344			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1345				bcopy(req->real_data, req->data, req->real_length);
1346			bus_dmamap_sync(sc->dma_tag, req->dma_map,
1347				BUS_DMASYNC_PREWRITE);
1348		}
1349
1350		scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1351		scsi_req->sg_list = (TW_UINT8 *)segs;
1352		scsi_req->sgl_entries += (nsegments - 1);
1353		error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1354			&(req->req_handle));
1355	}
1356
1357out:
1358	if (error) {
1359		req->error_code = error;
1360		req_pkt->tw_osl_callback(&(req->req_handle));
1361		/*
1362		 * If the caller had been returned EINPROGRESS, and he has
1363		 * registered a callback for handling completion, the callback
1364		 * will never get called because we were unable to submit the
1365		 * request.  So, free up the request right here.
1366		 */
1367		if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1368			tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1369	}
1370}
1371
1372
1373
1374/*
1375 * Function name:	twa_map_load_callback
1376 * Description:		Callback of bus_dmamap_load for the buffer associated
1377 *			with a cmd pkt.
1378 *
1379 * Input:		arg	-- ptr to variable to hold phys addr
1380 *			segs	-- ptr to a list of segment descriptors
1381 *			nsegments--# of segments
1382 *			error	-- 0 if no errors encountered before callback,
1383 *				   non-zero if errors were encountered
1384 * Output:		None
1385 * Return value:	None
1386 */
1387static TW_VOID
1388twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1389	TW_INT32 nsegments, TW_INT32 error)
1390{
1391	*((bus_addr_t *)arg) = segs[0].ds_addr;
1392}
1393
1394
1395
1396/*
1397 * Function name:	tw_osli_map_request
1398 * Description:		Maps a cmd pkt and data associated with it, into
1399 *			DMA'able memory.
1400 *
1401 * Input:		req	-- ptr to request pkt
1402 * Output:		None
1403 * Return value:	0	-- success
1404 *			non-zero-- failure
1405 */
1406TW_INT32
1407tw_osli_map_request(struct tw_osli_req_context *req)
1408{
1409	struct twa_softc	*sc = req->ctlr;
1410	TW_INT32		error = 0;
1411
1412	tw_osli_dbg_dprintf(10, sc, "entered");
1413
1414	/* If the command involves data, map that too. */
1415	if (req->data != NULL) {
1416		/*
1417		 * It's sufficient for the data pointer to be 4-byte aligned
1418		 * to work with 9000.  However, if 4-byte aligned addresses
1419		 * are passed to bus_dmamap_load, we can get back sg elements
1420		 * that are not 512-byte multiples in size.  So, we will let
1421		 * only those buffers that are 512-byte aligned to pass
1422		 * through, and bounce the rest, so as to make sure that we
1423		 * always get back sg elements that are 512-byte multiples
1424		 * in size.
1425		 */
1426		if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1427			(req->length % sc->sg_size_factor)) {
1428			req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1429			/* Save original data pointer and length. */
1430			req->real_data = req->data;
1431			req->real_length = req->length;
1432			req->length = (req->length +
1433				(sc->sg_size_factor - 1)) &
1434				~(sc->sg_size_factor - 1);
1435			req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1436					M_NOWAIT);
1437			if (req->data == NULL) {
1438				tw_osli_printf(sc, "error = %d",
1439					TW_CL_SEVERITY_ERROR_STRING,
1440					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1441					0x201E,
1442					"Failed to allocate memory "
1443					"for bounce buffer",
1444					ENOMEM);
1445				/* Restore original data pointer and length. */
1446				req->data = req->real_data;
1447				req->length = req->real_length;
1448				return(ENOMEM);
1449			}
1450		}
1451
1452		/*
1453		 * Map the data buffer into bus space and build the SG list.
1454		 */
1455		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1456			/* Lock against multiple simultaneous ioctl calls. */
1457			mtx_lock_spin(sc->io_lock);
1458			error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1459				req->data, req->length,
1460				twa_map_load_data_callback, req,
1461				BUS_DMA_WAITOK);
1462			mtx_unlock_spin(sc->io_lock);
1463		} else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
1464			error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
1465				req->orig_req, twa_map_load_data_callback, req,
1466				BUS_DMA_WAITOK);
1467		} else {
1468			/*
1469			 * There's only one CAM I/O thread running at a time.
1470			 * So, there's no need to hold the io_lock.
1471			 */
1472			error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1473				req->data, req->length,
1474				twa_map_load_data_callback, req,
1475				BUS_DMA_WAITOK);
1476		}
1477
1478		if (!error)
1479			error = req->error_code;
1480		else {
1481			if (error == EINPROGRESS) {
1482				/*
1483				 * Specifying sc->io_lock as the lockfuncarg
1484				 * in ...tag_create should protect the access
1485				 * of ...FLAGS_MAPPED from the callback.
1486				 */
1487				mtx_lock_spin(sc->io_lock);
1488				if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1489					req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1490				tw_osli_disallow_new_requests(sc, &(req->req_handle));
1491				mtx_unlock_spin(sc->io_lock);
1492				error = 0;
1493			} else {
1494				tw_osli_printf(sc, "error = %d",
1495					TW_CL_SEVERITY_ERROR_STRING,
1496					TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1497					0x9999,
1498					"Failed to map DMA memory "
1499					"for I/O request",
1500					error);
1501				req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1502				/* Free alignment buffer if it was used. */
1503				if (req->flags &
1504					TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1505					free(req->data, TW_OSLI_MALLOC_CLASS);
1506					/*
1507					 * Restore original data pointer
1508					 * and length.
1509					 */
1510					req->data = req->real_data;
1511					req->length = req->real_length;
1512				}
1513			}
1514		}
1515
1516	} else {
1517		/* Mark the request as currently being processed. */
1518		req->state = TW_OSLI_REQ_STATE_BUSY;
1519		/* Move the request into the busy queue. */
1520		tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1521		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1522			error = tw_cl_fw_passthru(&sc->ctlr_handle,
1523					&(req->req_pkt), &(req->req_handle));
1524		else
1525			error = tw_cl_start_io(&sc->ctlr_handle,
1526					&(req->req_pkt), &(req->req_handle));
1527		if (error) {
1528			req->error_code = error;
1529			req->req_pkt.tw_osl_callback(&(req->req_handle));
1530		}
1531	}
1532	return(error);
1533}
1534
1535
1536
1537/*
1538 * Function name:	tw_osli_unmap_request
1539 * Description:		Undoes the mapping done by tw_osli_map_request.
1540 *
1541 * Input:		req	-- ptr to request pkt
1542 * Output:		None
1543 * Return value:	None
1544 */
1545TW_VOID
1546tw_osli_unmap_request(struct tw_osli_req_context *req)
1547{
1548	struct twa_softc	*sc = req->ctlr;
1549
1550	tw_osli_dbg_dprintf(10, sc, "entered");
1551
1552	/* If the command involved data, unmap that too. */
1553	if (req->data != NULL) {
1554		if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1555			/* Lock against multiple simultaneous ioctl calls. */
1556			mtx_lock_spin(sc->io_lock);
1557
1558			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1559				bus_dmamap_sync(sc->ioctl_tag,
1560					sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1561
1562				/*
1563				 * If we are using a bounce buffer, and we are
1564				 * reading data, copy the real data in.
1565				 */
1566				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1567					bcopy(req->data, req->real_data,
1568						req->real_length);
1569			}
1570
1571			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1572				bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1573					BUS_DMASYNC_POSTWRITE);
1574
1575			bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1576
1577			mtx_unlock_spin(sc->io_lock);
1578		} else {
1579			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1580				bus_dmamap_sync(sc->dma_tag,
1581					req->dma_map, BUS_DMASYNC_POSTREAD);
1582
1583				/*
1584				 * If we are using a bounce buffer, and we are
1585				 * reading data, copy the real data in.
1586				 */
1587				if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1588					bcopy(req->data, req->real_data,
1589						req->real_length);
1590			}
1591			if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1592				bus_dmamap_sync(sc->dma_tag, req->dma_map,
1593					BUS_DMASYNC_POSTWRITE);
1594
1595			bus_dmamap_unload(sc->dma_tag, req->dma_map);
1596		}
1597	}
1598
1599	/* Free alignment buffer if it was used. */
1600	if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1601		free(req->data, TW_OSLI_MALLOC_CLASS);
1602		/* Restore original data pointer and length. */
1603		req->data = req->real_data;
1604		req->length = req->real_length;
1605	}
1606}
1607
1608
1609
1610#ifdef TW_OSL_DEBUG
1611
1612TW_VOID	twa_report_stats(TW_VOID);
1613TW_VOID	twa_reset_stats(TW_VOID);
1614TW_VOID	tw_osli_print_ctlr_stats(struct twa_softc *sc);
1615TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1616
1617
1618/*
1619 * Function name:	twa_report_stats
1620 * Description:		For being called from ddb.  Calls functions that print
1621 *			OSL and CL internal stats for the controller.
1622 *
1623 * Input:		None
1624 * Output:		None
1625 * Return value:	None
1626 */
1627TW_VOID
1628twa_report_stats(TW_VOID)
1629{
1630	struct twa_softc	*sc;
1631	TW_INT32		i;
1632
1633	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1634		tw_osli_print_ctlr_stats(sc);
1635		tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1636	}
1637}
1638
1639
1640
1641/*
1642 * Function name:	tw_osli_print_ctlr_stats
1643 * Description:		For being called from ddb.  Prints OSL controller stats
1644 *
1645 * Input:		sc	-- ptr to OSL internal controller context
1646 * Output:		None
1647 * Return value:	None
1648 */
1649TW_VOID
1650tw_osli_print_ctlr_stats(struct twa_softc *sc)
1651{
1652	twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1653	twa_printf(sc, "OSLq type  current  max\n");
1654	twa_printf(sc, "free      %04d     %04d\n",
1655		sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1656		sc->q_stats[TW_OSLI_FREE_Q].max_len);
1657	twa_printf(sc, "busy      %04d     %04d\n",
1658		sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1659		sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1660}
1661
1662
1663
1664/*
1665 * Function name:	twa_print_req_info
1666 * Description:		For being called from ddb.  Calls functions that print
1667 *			OSL and CL internal details for the request.
1668 *
1669 * Input:		req	-- ptr to OSL internal request context
1670 * Output:		None
1671 * Return value:	None
1672 */
1673TW_VOID
1674twa_print_req_info(struct tw_osli_req_context *req)
1675{
1676	struct twa_softc	*sc = req->ctlr;
1677
1678	twa_printf(sc, "OSL details for request:\n");
1679	twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1680		"data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1681		"state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1682		"next_req = %p, prev_req = %p, dma_map = %p\n",
1683		req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1684		req->data, req->length, req->real_data, req->real_length,
1685		req->state, req->flags, req->error_code, req->orig_req,
1686		req->link.next, req->link.prev, req->dma_map);
1687	tw_cl_print_req_info(&(req->req_handle));
1688}
1689
1690
1691
1692/*
1693 * Function name:	twa_reset_stats
1694 * Description:		For being called from ddb.
1695 *			Resets some OSL controller stats.
1696 *
1697 * Input:		None
1698 * Output:		None
1699 * Return value:	None
1700 */
1701TW_VOID
1702twa_reset_stats(TW_VOID)
1703{
1704	struct twa_softc	*sc;
1705	TW_INT32		i;
1706
1707	for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1708		sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1709		sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1710		tw_cl_reset_stats(&sc->ctlr_handle);
1711	}
1712}
1713
1714#endif /* TW_OSL_DEBUG */
1715