mpt.h revision 164837
1193645Ssimon/* $FreeBSD: head/sys/dev/mpt/mpt.h 164837 2006-12-03 00:28:11Z mjacob $ */
2193645Ssimon/*-
3193645Ssimon * Generic defines for LSI '909 FC  adapters.
4193645Ssimon * FreeBSD Version.
5193645Ssimon *
6193645Ssimon * Copyright (c)  2000, 2001 by Greg Ansley
7193645Ssimon *
8193645Ssimon * Redistribution and use in source and binary forms, with or without
9193645Ssimon * modification, are permitted provided that the following conditions
10193645Ssimon * are met:
11193645Ssimon * 1. Redistributions of source code must retain the above copyright
12193645Ssimon *    notice immediately at the beginning of the file, without modification,
13193645Ssimon *    this list of conditions, and the following disclaimer.
14193645Ssimon * 2. The name of the author may not be used to endorse or promote products
15193645Ssimon *    derived from this software without specific prior written permission.
16193645Ssimon *
17193645Ssimon * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18193645Ssimon * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19193645Ssimon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20193645Ssimon * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21193645Ssimon * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22193645Ssimon * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23193645Ssimon * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24193645Ssimon * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25193645Ssimon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26193645Ssimon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27193645Ssimon * SUCH DAMAGE.
28193645Ssimon */
29193645Ssimon/*-
30193645Ssimon * Copyright (c) 2002, 2006 by Matthew Jacob
31193645Ssimon * All rights reserved.
32193645Ssimon *
33193645Ssimon * Redistribution and use in source and binary forms, with or without
34193645Ssimon * modification, are permitted provided that the following conditions are
35193645Ssimon * met:
36193645Ssimon * 1. Redistributions of source code must retain the above copyright
37193645Ssimon *    notice, this list of conditions and the following disclaimer.
38193645Ssimon * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39193645Ssimon *    substantially similar to the "NO WARRANTY" disclaimer below
40193645Ssimon *    ("Disclaimer") and any redistribution must be conditioned upon including
41193645Ssimon *    a substantially similar Disclaimer requirement for further binary
42193645Ssimon *    redistribution.
43193645Ssimon * 3. Neither the names of the above listed copyright holders nor the names
44193645Ssimon *    of any contributors may be used to endorse or promote products derived
45193645Ssimon *    from this software without specific prior written permission.
46193645Ssimon *
47193645Ssimon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48193645Ssimon * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49193645Ssimon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50193645Ssimon * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51193645Ssimon * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52193645Ssimon * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53193645Ssimon * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54193645Ssimon * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55193645Ssimon * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56193645Ssimon * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57193645Ssimon * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58193645Ssimon *
59193645Ssimon * Support from Chris Ellsworth in order to make SAS adapters work
60193645Ssimon * is gratefully acknowledged.
61193645Ssimon *
62193645Ssimon *
63193645Ssimon * Support from LSI-Logic has also gone a great deal toward making this a
64193645Ssimon * workable subsystem and is gratefully acknowledged.
65193645Ssimon */
66193645Ssimon/*
67193645Ssimon * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68193645Ssimon * Copyright (c) 2004, 2005 Justin T. Gibbs
69193645Ssimon * Copyright (c) 2005, WHEEL Sp. z o.o.
70193645Ssimon * All rights reserved.
71193645Ssimon *
72193645Ssimon * Redistribution and use in source and binary forms, with or without
73193645Ssimon * modification, are permitted provided that the following conditions are
74193645Ssimon * met:
75193645Ssimon * 1. Redistributions of source code must retain the above copyright
76193645Ssimon *    notice, this list of conditions and the following disclaimer.
77193645Ssimon * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78193645Ssimon *    substantially similar to the "NO WARRANTY" disclaimer below
79193645Ssimon *    ("Disclaimer") and any redistribution must be conditioned upon including
80193645Ssimon *    a substantially similar Disclaimer requirement for further binary
81193645Ssimon *    redistribution.
82193645Ssimon * 3. Neither the names of the above listed copyright holders nor the names
83193645Ssimon *    of any contributors may be used to endorse or promote products derived
84193645Ssimon *    from this software without specific prior written permission.
85193645Ssimon *
86193645Ssimon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 */
98
99#ifndef _MPT_H_
100#define _MPT_H_
101
102/********************************* OS Includes ********************************/
103#include <sys/types.h>
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/endian.h>
107#include <sys/eventhandler.h>
108#if __FreeBSD_version < 500000
109#include <sys/kernel.h>
110#include <sys/queue.h>
111#include <sys/malloc.h>
112#else
113#include <sys/lock.h>
114#include <sys/kernel.h>
115#include <sys/queue.h>
116#include <sys/malloc.h>
117#include <sys/mutex.h>
118#include <sys/condvar.h>
119#endif
120#include <sys/proc.h>
121#include <sys/bus.h>
122#include <sys/module.h>
123
124#include <machine/cpu.h>
125#include <machine/resource.h>
126
127#if __FreeBSD_version < 500000
128#include <machine/bus.h>
129#include <machine/clock.h>
130#endif
131
132#include <sys/rman.h>
133
134#if __FreeBSD_version < 500000
135#include <pci/pcireg.h>
136#include <pci/pcivar.h>
137#else
138#include <dev/pci/pcireg.h>
139#include <dev/pci/pcivar.h>
140#endif
141
142#include <machine/bus.h>
143#include "opt_ddb.h"
144
145/**************************** Register Definitions ****************************/
146#include <dev/mpt/mpt_reg.h>
147
148/******************************* MPI Definitions ******************************/
149#include <dev/mpt/mpilib/mpi_type.h>
150#include <dev/mpt/mpilib/mpi.h>
151#include <dev/mpt/mpilib/mpi_cnfg.h>
152#include <dev/mpt/mpilib/mpi_ioc.h>
153#include <dev/mpt/mpilib/mpi_raid.h>
154
155/* XXX For mpt_debug.c */
156#include <dev/mpt/mpilib/mpi_init.h>
157
158/****************************** Misc Definitions ******************************/
159#define MPT_OK (0)
160#define MPT_FAIL (0x10000)
161
162#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
163
164#define	MPT_ROLE_NONE		0
165#define	MPT_ROLE_INITIATOR	1
166#define	MPT_ROLE_TARGET		2
167#define	MPT_ROLE_BOTH		3
168#define	MPT_ROLE_DEFAULT	MPT_ROLE_INITIATOR
169
170/**************************** Forward Declarations ****************************/
171struct mpt_softc;
172struct mpt_personality;
173typedef struct req_entry request_t;
174
175/************************* Personality Module Support *************************/
176typedef int mpt_load_handler_t(struct mpt_personality *);
177typedef int mpt_probe_handler_t(struct mpt_softc *);
178typedef int mpt_attach_handler_t(struct mpt_softc *);
179typedef int mpt_enable_handler_t(struct mpt_softc *);
180typedef void mpt_ready_handler_t(struct mpt_softc *);
181typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
182				MSG_EVENT_NOTIFY_REPLY *);
183typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
184/* XXX Add return value and use for veto? */
185typedef void mpt_shutdown_handler_t(struct mpt_softc *);
186typedef void mpt_detach_handler_t(struct mpt_softc *);
187typedef int mpt_unload_handler_t(struct mpt_personality *);
188
189struct mpt_personality
190{
191	const char		*name;
192	uint32_t		 id;		/* Assigned identifier. */
193	u_int			 use_count;	/* Instances using personality*/
194	mpt_load_handler_t	*load;		/* configure personailty */
195#define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
196	mpt_probe_handler_t	*probe;		/* configure personailty */
197	mpt_attach_handler_t	*attach;	/* initialize device instance */
198	mpt_enable_handler_t	*enable;	/* enable device */
199	mpt_ready_handler_t	*ready;		/* final open for business */
200	mpt_event_handler_t	*event;		/* Handle MPI event. */
201	mpt_reset_handler_t	*reset;		/* Re-init after reset. */
202	mpt_shutdown_handler_t	*shutdown;	/* Shutdown instance. */
203	mpt_detach_handler_t	*detach;	/* release device instance */
204	mpt_unload_handler_t	*unload;	/* Shutdown personality */
205#define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
206};
207
208int mpt_modevent(module_t, int, void *);
209
210/* Maximum supported number of personalities. */
211#define MPT_MAX_PERSONALITIES	(15)
212
213#define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
214	MODULE_DEPEND(name, dep, vmin, vpref, vmax)
215
216#define DECLARE_MPT_PERSONALITY(name, order)				  \
217	static moduledata_t name##_mod = {				  \
218		#name, mpt_modevent, &name##_personality		  \
219	};								  \
220	DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order);	  \
221	MODULE_VERSION(name, 1);					  \
222	MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
223
224/******************************* Bus DMA Support ******************************/
225/* XXX Need to update bus_dmamap_sync to take a range argument. */
226#define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op)	\
227	bus_dmamap_sync(dma_tag, dmamap, op)
228
229#if __FreeBSD_version >= 501102
230#define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
231			   lowaddr, highaddr, filter, filterarg,	\
232			   maxsize, nsegments, maxsegsz, flags,		\
233			   dma_tagp)					\
234	bus_dma_tag_create(parent_tag, alignment, boundary,		\
235			   lowaddr, highaddr, filter, filterarg,	\
236			   maxsize, nsegments, maxsegsz, flags,		\
237			   busdma_lock_mutex, &Giant,			\
238			   dma_tagp)
239#else
240#define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
241			   lowaddr, highaddr, filter, filterarg,	\
242			   maxsize, nsegments, maxsegsz, flags,		\
243			   dma_tagp)					\
244	bus_dma_tag_create(parent_tag, alignment, boundary,		\
245			   lowaddr, highaddr, filter, filterarg,	\
246			   maxsize, nsegments, maxsegsz, flags,		\
247			   dma_tagp)
248#endif
249
250struct mpt_map_info {
251	struct mpt_softc *mpt;
252	int		  error;
253	uint32_t	  phys;
254};
255
256void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
257
258/**************************** Kernel Thread Support ***************************/
259#if __FreeBSD_version > 500005
260#define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
261	kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
262#else
263#define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
264	kthread_create(func, farg, proc_ptr, fmtstr, arg)
265#endif
266
267/****************************** Timer Facilities ******************************/
268#if __FreeBSD_version > 500000
269#define mpt_callout_init(c)	callout_init(c, /*mpsafe*/0);
270#else
271#define mpt_callout_init(c)	callout_init(c);
272#endif
273
274/********************************** Endianess *********************************/
275static __inline uint64_t
276u64toh(U64 s)
277{
278	uint64_t result;
279
280	result = le32toh(s.Low);
281	result |= ((uint64_t)le32toh(s.High)) << 32;
282	return (result);
283}
284
285/**************************** MPI Transaction State ***************************/
286typedef enum {
287	REQ_STATE_NIL		= 0x00,
288	REQ_STATE_FREE		= 0x01,
289	REQ_STATE_ALLOCATED	= 0x02,
290	REQ_STATE_QUEUED	= 0x04,
291	REQ_STATE_DONE		= 0x08,
292	REQ_STATE_TIMEDOUT	= 0x10,
293	REQ_STATE_NEED_WAKEUP	= 0x20,
294	REQ_STATE_LOCKED	= 0x80,	/* can't be freed */
295	REQ_STATE_MASK		= 0xFF
296} mpt_req_state_t;
297
298struct req_entry {
299	TAILQ_ENTRY(req_entry) links;	/* Pointer to next in list */
300	mpt_req_state_t	state;		/* Request State Information */
301	uint16_t	index;		/* Index of this entry */
302	uint16_t	IOCStatus;	/* Completion status */
303	uint16_t	ResponseCode;	/* TMF Reponse Code */
304	uint16_t	serno;		/* serial number */
305	union ccb      *ccb;		/* CAM request */
306	void	       *req_vbuf;	/* Virtual Address of Entry */
307	void	       *sense_vbuf;	/* Virtual Address of sense data */
308	bus_addr_t	req_pbuf;	/* Physical Address of Entry */
309	bus_addr_t	sense_pbuf;	/* Physical Address of sense data */
310	bus_dmamap_t	dmap;		/* DMA map for data buffers */
311	struct req_entry *chain;	/* for SGE overallocations */
312};
313
314/**************************** MPI Target State Info ***************************/
315
316typedef struct {
317	uint32_t reply_desc;	/* current reply descriptor */
318	uint32_t resid;		/* current data residual */
319	uint32_t bytes_xfered;	/* current relative offset */
320	union ccb *ccb;		/* pointer to currently active ccb */
321	request_t *req;		/* pointer to currently active assist request */
322	uint32_t
323		is_local : 1,
324		nxfers	 : 31;
325	uint32_t tag_id;
326	enum {
327		TGT_STATE_NIL,
328		TGT_STATE_LOADING,
329		TGT_STATE_LOADED,
330		TGT_STATE_IN_CAM,
331                TGT_STATE_SETTING_UP_FOR_DATA,
332                TGT_STATE_MOVING_DATA,
333                TGT_STATE_MOVING_DATA_AND_STATUS,
334                TGT_STATE_SENDING_STATUS
335	} state;
336} mpt_tgt_state_t;
337
338/*
339 * When we get an incoming command it has its own tag which is called the
340 * IoIndex. This is the value we gave that particular command buffer when
341 * we originally assigned it. It's just a number, really. The FC card uses
342 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
343 * contains pointers the request_t structures related to that IoIndex.
344 *
345 * What *we* do is construct a tag out of the index for the target command
346 * which owns the incoming ATIO plus a rolling sequence number.
347 */
348#define	MPT_MAKE_TAGID(mpt, req, ioindex)	\
349 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
350
351#ifdef	INVARIANTS
352#define	MPT_TAG_2_REQ(a, b)		mpt_tag_2_req(a, (uint32_t) b)
353#else
354#define	MPT_TAG_2_REQ(mpt, tag)		mpt->tgt_cmd_ptrs[tag >> 18]
355#endif
356
357#define	MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
358    (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
359
360STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
361#define	MPT_MAX_LUNS	256
362typedef struct {
363	struct mpt_hdr_stailq	atios;
364	struct mpt_hdr_stailq	inots;
365	int enabled;
366} tgt_resource_t;
367#define	MPT_MAX_ELS	64
368
369/**************************** Handler Registration ****************************/
370/*
371 * Global table of registered reply handlers.  The
372 * handler is indicated by byte 3 of the request
373 * index submitted to the IOC.  This allows the
374 * driver core to perform generic processing without
375 * any knowledge of per-personality behavior.
376 *
377 * MPT_NUM_REPLY_HANDLERS must be a power of 2
378 * to allow the easy generation of a mask.
379 *
380 * The handler offsets used by the core are hard coded
381 * allowing faster code generation when assigning a handler
382 * to a request.  All "personalities" must use the
383 * the handler registration mechanism.
384 *
385 * The IOC handlers that are rarely executed are placed
386 * at the tail of the table to make it more likely that
387 * all commonly executed handlers fit in a single cache
388 * line.
389 */
390#define MPT_NUM_REPLY_HANDLERS		(32)
391#define MPT_REPLY_HANDLER_EVENTS	MPT_CBI_TO_HID(0)
392#define MPT_REPLY_HANDLER_CONFIG	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
393#define MPT_REPLY_HANDLER_HANDSHAKE	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
394typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
395    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
396typedef union {
397	mpt_reply_handler_t	*reply_handler;
398} mpt_handler_t;
399
400typedef enum {
401	MPT_HANDLER_REPLY,
402	MPT_HANDLER_EVENT,
403	MPT_HANDLER_RESET,
404	MPT_HANDLER_SHUTDOWN
405} mpt_handler_type;
406
407struct mpt_handler_record
408{
409	LIST_ENTRY(mpt_handler_record)	links;
410	mpt_handler_t			handler;
411};
412
413LIST_HEAD(mpt_handler_list, mpt_handler_record);
414
415/*
416 * The handler_id is currently unused but would contain the
417 * handler ID used in the MsgContext field to allow direction
418 * of replies to the handler.  Registrations that don't require
419 * a handler id can pass in NULL for the handler_id.
420 *
421 * Deregistrations for handlers without a handler id should
422 * pass in MPT_HANDLER_ID_NONE.
423 */
424#define MPT_HANDLER_ID_NONE		(0xFFFFFFFF)
425int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
426			 mpt_handler_t, uint32_t *);
427int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
428			   mpt_handler_t, uint32_t);
429
430/******************* Per-Controller Instance Data Structures ******************/
431TAILQ_HEAD(req_queue, req_entry);
432
433/* Structure for saving proper values for modifyable PCI config registers */
434struct mpt_pci_cfg {
435	uint16_t Command;
436	uint16_t LatencyTimer_LineSize;
437	uint32_t IO_BAR;
438	uint32_t Mem0_BAR[2];
439	uint32_t Mem1_BAR[2];
440	uint32_t ROM_BAR;
441	uint8_t  IntLine;
442	uint32_t PMCSR;
443};
444
445typedef enum {
446	MPT_RVF_NONE		= 0x0,
447	MPT_RVF_ACTIVE		= 0x1,
448	MPT_RVF_ANNOUNCED	= 0x2,
449	MPT_RVF_UP2DATE		= 0x4,
450	MPT_RVF_REFERENCED	= 0x8,
451	MPT_RVF_WCE_CHANGED	= 0x10
452} mpt_raid_volume_flags;
453
454struct mpt_raid_volume {
455	CONFIG_PAGE_RAID_VOL_0	       *config_page;
456	MPI_RAID_VOL_INDICATOR		sync_progress;
457	mpt_raid_volume_flags		flags;
458	u_int				quiesced_disks;
459};
460
461typedef enum {
462	MPT_RDF_NONE		= 0x00,
463	MPT_RDF_ACTIVE		= 0x01,
464	MPT_RDF_ANNOUNCED	= 0x02,
465	MPT_RDF_UP2DATE		= 0x04,
466	MPT_RDF_REFERENCED	= 0x08,
467	MPT_RDF_QUIESCING	= 0x10,
468	MPT_RDF_QUIESCED	= 0x20
469} mpt_raid_disk_flags;
470
471struct mpt_raid_disk {
472	CONFIG_PAGE_RAID_PHYS_DISK_0	config_page;
473	struct mpt_raid_volume	       *volume;
474	u_int				member_number;
475	u_int				pass_thru_active;
476	mpt_raid_disk_flags		flags;
477};
478
479struct mpt_evtf_record {
480	MSG_EVENT_NOTIFY_REPLY		reply;
481	uint32_t			context;
482	LIST_ENTRY(mpt_evtf_record)	links;
483};
484
485LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
486
487struct mpt_softc {
488	device_t		dev;
489#if __FreeBSD_version < 500000
490	uint32_t		mpt_islocked;
491	int			mpt_splsaved;
492#else
493	struct mtx		mpt_lock;
494	int			mpt_locksetup;
495#endif
496	uint32_t		mpt_pers_mask;
497	uint32_t
498		unit		: 8,
499				: 2,
500		msi_enable	: 1,
501		twildcard	: 1,
502		tenabled	: 1,
503		do_cfg_role	: 1,
504		raid_enabled	: 1,
505		raid_mwce_set	: 1,
506		getreqwaiter	: 1,
507		shutdwn_raid    : 1,
508		shutdwn_recovery: 1,
509		outofbeer	: 1,
510		disabled	: 1,
511		is_spi		: 1,
512		is_sas		: 1,
513		is_fc		: 1;
514
515	u_int			cfg_role;
516	u_int			role;	/* role: none, ini, target, both */
517
518	u_int			verbose;
519
520	/*
521	 * IOC Facts
522	 */
523	uint16_t	mpt_global_credits;
524	uint16_t	request_frame_size;
525	uint16_t	mpt_max_devices;
526	uint8_t		mpt_max_buses;
527	uint8_t		ioc_facts_flags;
528
529	/*
530	 * Port Facts
531	 * XXX - Add multi-port support!.
532	 */
533	uint16_t	mpt_ini_id;
534	uint16_t	mpt_port_type;
535	uint16_t	mpt_proto_flags;
536	uint16_t	mpt_max_tgtcmds;
537
538	/*
539	 * Device Configuration Information
540	 */
541	union {
542		struct mpt_spi_cfg {
543			CONFIG_PAGE_SCSI_PORT_0		_port_page0;
544			CONFIG_PAGE_SCSI_PORT_1		_port_page1;
545			CONFIG_PAGE_SCSI_PORT_2		_port_page2;
546			CONFIG_PAGE_SCSI_DEVICE_0	_dev_page0[16];
547			CONFIG_PAGE_SCSI_DEVICE_1	_dev_page1[16];
548			uint16_t			_tag_enable;
549			uint16_t			_disc_enable;
550		} spi;
551#define	mpt_port_page0		cfg.spi._port_page0
552#define	mpt_port_page1		cfg.spi._port_page1
553#define	mpt_port_page2		cfg.spi._port_page2
554#define	mpt_dev_page0		cfg.spi._dev_page0
555#define	mpt_dev_page1		cfg.spi._dev_page1
556#define	mpt_tag_enable		cfg.spi._tag_enable
557#define	mpt_disc_enable		cfg.spi._disc_enable
558		struct mpi_fc_cfg {
559			CONFIG_PAGE_FC_PORT_0 _port_page0;
560			uint32_t _port_speed;
561#define	mpt_fcport_page0	cfg.fc._port_page0
562#define	mpt_fcport_speed	cfg.fc._port_speed
563		} fc;
564	} cfg;
565#if __FreeBSD_version >= 500000
566	/*
567	 * Device config information stored up for sysctl to access
568	 */
569	union {
570		struct {
571			unsigned int initiator_id;
572		} spi;
573		struct {
574			char wwnn[19];
575			char wwpn[19];
576		} fc;
577	} scinfo;
578#endif
579
580	/* Controller Info for RAID information */
581	CONFIG_PAGE_IOC_2 *	ioc_page2;
582	CONFIG_PAGE_IOC_3 *	ioc_page3;
583
584	/* Raid Data */
585	struct mpt_raid_volume* raid_volumes;
586	struct mpt_raid_disk*	raid_disks;
587	u_int			raid_max_volumes;
588	u_int			raid_max_disks;
589	u_int			raid_page0_len;
590	u_int			raid_wakeup;
591	u_int			raid_rescan;
592	u_int			raid_resync_rate;
593	u_int			raid_mwce_setting;
594	u_int			raid_queue_depth;
595	u_int			raid_nonopt_volumes;
596	struct proc	       *raid_thread;
597	struct callout		raid_timer;
598
599	/*
600	 * PCI Hardware info
601	 */
602	int			pci_msi_count;
603	struct resource *	pci_irq;	/* Interrupt map for chip */
604	void *			ih;		/* Interupt handle */
605	struct mpt_pci_cfg	pci_cfg;	/* saved PCI conf registers */
606
607	/*
608	 * DMA Mapping Stuff
609	 */
610	struct resource *	pci_reg;	/* Register map for chip */
611	int			pci_mem_rid;	/* Resource ID */
612	bus_space_tag_t		pci_st;		/* Bus tag for registers */
613	bus_space_handle_t	pci_sh;		/* Bus handle for registers */
614	/* PIO versions of above. */
615	int			pci_pio_rid;
616	struct resource *	pci_pio_reg;
617	bus_space_tag_t		pci_pio_st;
618	bus_space_handle_t	pci_pio_sh;
619
620	bus_dma_tag_t		parent_dmat;	/* DMA tag for parent PCI bus */
621	bus_dma_tag_t		reply_dmat;	/* DMA tag for reply memory */
622	bus_dmamap_t		reply_dmap;	/* DMA map for reply memory */
623	uint8_t		       *reply;		/* KVA of reply memory */
624	bus_addr_t		reply_phys;	/* BusAddr of reply memory */
625
626	bus_dma_tag_t		buffer_dmat;	/* DMA tag for buffers */
627	bus_dma_tag_t		request_dmat;	/* DMA tag for request memroy */
628	bus_dmamap_t		request_dmap;	/* DMA map for request memroy */
629	uint8_t		       *request;	/* KVA of Request memory */
630	bus_addr_t		request_phys;	/* BusAddr of request memory */
631
632	uint32_t		max_seg_cnt;	/* calculated after IOC facts */
633
634	/*
635	 * Hardware management
636	 */
637	u_int			reset_cnt;
638
639	/*
640	 * CAM && Software Management
641	 */
642	request_t	       *request_pool;
643	struct req_queue	request_free_list;
644	struct req_queue	request_pending_list;
645	struct req_queue	request_timeout_list;
646
647
648	struct cam_sim	       *sim;
649	struct cam_path	       *path;
650
651	struct cam_sim	       *phydisk_sim;
652	struct cam_path	       *phydisk_path;
653
654	struct proc	       *recovery_thread;
655	request_t	       *tmf_req;
656
657	/*
658	 * Deferred frame acks due to resource shortage.
659	 */
660	struct mpt_evtf_list	ack_frames;
661
662	/*
663	 * Target Mode Support
664	 */
665	uint32_t		scsi_tgt_handler_id;
666	request_t **		tgt_cmd_ptrs;
667	request_t **		els_cmd_ptrs;	/* FC only */
668
669	/*
670	 * *snork*- this is chosen to be here *just in case* somebody
671	 * forgets to point to it exactly and we index off of trt with
672	 * CAM_LUN_WILDCARD.
673	 */
674	tgt_resource_t		trt_wildcard;		/* wildcard luns */
675	tgt_resource_t		trt[MPT_MAX_LUNS];
676	uint16_t		tgt_cmds_allocated;
677	uint16_t		els_cmds_allocated;	/* FC only */
678
679	uint16_t		timeouts;	/* timeout count */
680	uint16_t		success;	/* successes afer timeout */
681	uint16_t		sequence;	/* Sequence Number */
682	uint16_t		pad3;
683
684
685	/* Paired port in some dual adapters configurations */
686	struct mpt_softc *	mpt2;
687
688	/* FW Image management */
689	uint32_t		fw_image_size;
690	uint8_t		       *fw_image;
691	bus_dma_tag_t		fw_dmat;	/* DMA tag for firmware image */
692	bus_dmamap_t		fw_dmap;	/* DMA map for firmware image */
693	bus_addr_t		fw_phys;	/* BusAddr of firmware image */
694
695	/* Shutdown Event Handler. */
696	eventhandler_tag         eh;
697
698	TAILQ_ENTRY(mpt_softc)	links;
699};
700
701static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
702
703static __inline void
704mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
705{
706	if ((req->serno = mpt->sequence++) == 0) {
707		req->serno = mpt->sequence++;
708	}
709}
710
711/***************************** Locking Primitives *****************************/
712#if __FreeBSD_version < 500000
713#define	MPT_IFLAGS		INTR_TYPE_CAM
714#define	MPT_LOCK(mpt)		mpt_lockspl(mpt)
715#define	MPT_UNLOCK(mpt)		mpt_unlockspl(mpt)
716#define	MPT_OWNED(mpt)		mpt->mpt_islocked
717#define	MPTLOCK_2_CAMLOCK	MPT_UNLOCK
718#define	CAMLOCK_2_MPTLOCK	MPT_LOCK
719#define	MPT_LOCK_SETUP(mpt)
720#define	MPT_LOCK_DESTROY(mpt)
721
722static __inline void mpt_lockspl(struct mpt_softc *mpt);
723static __inline void mpt_unlockspl(struct mpt_softc *mpt);
724
725static __inline void
726mpt_lockspl(struct mpt_softc *mpt)
727{
728       int s;
729
730       s = splcam();
731       if (mpt->mpt_islocked++ == 0) {
732               mpt->mpt_splsaved = s;
733       } else {
734               splx(s);
735	       panic("Recursed lock with mask: 0x%x\n", s);
736       }
737}
738
739static __inline void
740mpt_unlockspl(struct mpt_softc *mpt)
741{
742       if (mpt->mpt_islocked) {
743               if (--mpt->mpt_islocked == 0) {
744                       splx(mpt->mpt_splsaved);
745               }
746       } else
747	       panic("Negative lock count\n");
748}
749
750static __inline int
751mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
752	   const char *wmesg, int timo)
753{
754	int saved_cnt;
755	int saved_spl;
756	int error;
757
758	KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep"));
759	saved_cnt = mpt->mpt_islocked;
760	saved_spl = mpt->mpt_splsaved;
761	mpt->mpt_islocked = 0;
762	error = tsleep(ident, priority, wmesg, timo);
763	KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup"));
764	mpt->mpt_islocked = saved_cnt;
765	mpt->mpt_splsaved = saved_spl;
766	return (error);
767}
768
769#else
770#ifdef	LOCKING_WORKED_AS_IT_SHOULD
771#error "Shouldn't Be Here!"
772#define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
773#define	MPT_LOCK_SETUP(mpt)						\
774		mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF);		\
775		mpt->mpt_locksetup = 1
776#define	MPT_LOCK_DESTROY(mpt)						\
777	if (mpt->mpt_locksetup) {					\
778		mtx_destroy(&mpt->mpt_lock);				\
779		mpt->mpt_locksetup = 0;					\
780	}
781
782#define	MPT_LOCK(mpt)		mtx_lock(&(mpt)->mpt_lock)
783#define	MPT_UNLOCK(mpt)		mtx_unlock(&(mpt)->mpt_lock)
784#define	MPT_OWNED(mpt)		mtx_owned(&(mpt)->mpt_lock)
785#define	MPTLOCK_2_CAMLOCK(mpt)	\
786	mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant)
787#define	CAMLOCK_2_MPTLOCK(mpt)	\
788	mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock)
789#define mpt_sleep(mpt, ident, priority, wmesg, timo) \
790	msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
791
792#else
793
794#define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY
795#define	MPT_LOCK_SETUP(mpt)	do { } while (0)
796#define	MPT_LOCK_DESTROY(mpt)	do { } while (0)
797#if	0
798#define	MPT_LOCK(mpt)		\
799	device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); 	\
800	KASSERT(mpt->mpt_locksetup == 0,				\
801	    ("recursive lock acquire at %s:%d", __FILE__, __LINE__));	\
802	mpt->mpt_locksetup = 1
803#define	MPT_UNLOCK(mpt)		\
804	device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); 	\
805	KASSERT(mpt->mpt_locksetup == 1,				\
806	    ("release unowned lock at %s:%d", __FILE__, __LINE__));	\
807	mpt->mpt_locksetup = 0
808#else
809#define	MPT_LOCK(mpt)							\
810	KASSERT(mpt->mpt_locksetup == 0,				\
811	    ("recursive lock acquire at %s:%d", __FILE__, __LINE__));	\
812	mpt->mpt_locksetup = 1
813#define	MPT_UNLOCK(mpt)							\
814	KASSERT(mpt->mpt_locksetup == 1,				\
815	    ("release unowned lock at %s:%d", __FILE__, __LINE__));	\
816	mpt->mpt_locksetup = 0
817#endif
818#define	MPT_OWNED(mpt)		mpt->mpt_locksetup
819#define	MPTLOCK_2_CAMLOCK(mpt)	MPT_UNLOCK(mpt)
820#define	CAMLOCK_2_MPTLOCK(mpt)	MPT_LOCK(mpt)
821
822static __inline int
823mpt_sleep(struct mpt_softc *, void *, int, const char *, int);
824
825static __inline int
826mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t)
827{
828	int r;
829	MPT_UNLOCK(mpt);
830	r = tsleep(i, p, w, t);
831	MPT_LOCK(mpt);
832	return (r);
833}
834#endif
835#endif
836
837/******************************* Register Access ******************************/
838static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
839static __inline uint32_t mpt_read(struct mpt_softc *, int);
840static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
841static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
842
843static __inline void
844mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
845{
846	bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
847}
848
849static __inline uint32_t
850mpt_read(struct mpt_softc *mpt, int offset)
851{
852	return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
853}
854
855/*
856 * Some operations (e.g. diagnostic register writes while the ARM proccessor
857 * is disabled), must be performed using "PCI pio" operations.  On non-PCI
858 * busses, these operations likely map to normal register accesses.
859 */
860static __inline void
861mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
862{
863	bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
864}
865
866static __inline uint32_t
867mpt_pio_read(struct mpt_softc *mpt, int offset)
868{
869	return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
870}
871/*********************** Reply Frame/Request Management ***********************/
872/* Max MPT Reply we are willing to accept (must be power of 2) */
873#define MPT_REPLY_SIZE   	256
874
875/*
876 * Must be less than 16384 in order for target mode to work
877 */
878#define MPT_MAX_REQUESTS(mpt)	512
879#define MPT_REQUEST_AREA	512
880#define MPT_SENSE_SIZE		32	/* included in MPT_REQUEST_AREA */
881#define MPT_REQ_MEM_SIZE(mpt)	(MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
882
883#define MPT_CONTEXT_CB_SHIFT	(16)
884#define MPT_CBI(handle)		(handle >> MPT_CONTEXT_CB_SHIFT)
885#define MPT_CBI_TO_HID(cbi)	((cbi) << MPT_CONTEXT_CB_SHIFT)
886#define MPT_CONTEXT_TO_CBI(x)	\
887    (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
888#define MPT_CONTEXT_REQI_MASK	0xFFFF
889#define MPT_CONTEXT_TO_REQI(x)	((x) & MPT_CONTEXT_REQI_MASK)
890
891/*
892 * Convert a 32bit physical address returned from IOC to an
893 * offset into our reply frame memory or the kvm address needed
894 * to access the data.  The returned address is only the low
895 * 32 bits, so mask our base physical address accordingly.
896 */
897#define MPT_REPLY_BADDR(x)		\
898	(x << 1)
899#define MPT_REPLY_OTOV(m, i) 		\
900	((void *)(&m->reply[i]))
901
902#define	MPT_DUMP_REPLY_FRAME(mpt, reply_frame)		\
903do {							\
904	if (mpt->verbose > MPT_PRT_DEBUG)		\
905		mpt_dump_reply_frame(mpt, reply_frame);	\
906} while(0)
907
908static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
909static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
910
911/*
912 * Give the reply buffer back to the IOC after we have
913 * finished processing it.
914 */
915static __inline void
916mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
917{
918     mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
919}
920
921/* Get a reply from the IOC */
922static __inline uint32_t
923mpt_pop_reply_queue(struct mpt_softc *mpt)
924{
925     return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
926}
927
928void
929mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
930
931/************************** Scatter Gather Managment **************************/
932/* MPT_RQSL- size of request frame, in bytes */
933#define	MPT_RQSL(mpt)		(mpt->request_frame_size << 2)
934
935/* MPT_NSGL- how many SG entries can fit in a request frame size */
936#define	MPT_NSGL(mpt)		(MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
937
938/* MPT_NRFM- how many request frames can fit in each request alloc we make */
939#define	MPT_NRFM(mpt)		(MPT_REQUEST_AREA / MPT_RQSL(mpt))
940
941/*
942 * MPT_NSGL_FIRST- # of SG elements that can fit after
943 * an I/O request but still within the request frame.
944 * Do this safely based upon SGE_IO_UNION.
945 *
946 * Note that the first element is *within* the SCSI request.
947 */
948#define	MPT_NSGL_FIRST(mpt)	\
949    ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
950    sizeof (SGE_IO_UNION))
951
952/***************************** IOC Initialization *****************************/
953int mpt_reset(struct mpt_softc *, int /*reinit*/);
954
955/****************************** Debugging ************************************/
956typedef struct mpt_decode_entry {
957	char    *name;
958	u_int	 value;
959	u_int	 mask;
960} mpt_decode_entry_t;
961
962int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries,
963		     const char *name, u_int value, u_int *cur_column,
964		     u_int wrap_point);
965
966void mpt_dump_data(struct mpt_softc *, const char *, void *, int);
967void mpt_dump_request(struct mpt_softc *, request_t *);
968
969enum {
970	MPT_PRT_ALWAYS,
971	MPT_PRT_FATAL,
972	MPT_PRT_ERROR,
973	MPT_PRT_WARN,
974	MPT_PRT_INFO,
975	MPT_PRT_NEGOTIATION,
976	MPT_PRT_DEBUG,
977	MPT_PRT_DEBUG1,
978	MPT_PRT_DEBUG2,
979	MPT_PRT_DEBUG3,
980	MPT_PRT_TRACE,
981	MPT_PRT_NONE=100
982};
983
984#if __FreeBSD_version > 500000
985#define mpt_lprt(mpt, level, ...)		\
986do {						\
987	if (level <= (mpt)->verbose)		\
988		mpt_prt(mpt, __VA_ARGS__);	\
989} while (0)
990
991#define mpt_lprtc(mpt, level, ...)		 \
992do {						 \
993	if (level <= (mpt)->debug_level)	 \
994		mpt_prtc(mpt, __VA_ARGS__);	 \
995} while (0)
996#else
997void mpt_lprt(struct mpt_softc *, int, const char *, ...)
998	__printflike(3, 4);
999void mpt_lprtc(struct mpt_softc *, int, const char *, ...)
1000	__printflike(3, 4);
1001#endif
1002void mpt_prt(struct mpt_softc *, const char *, ...)
1003	__printflike(2, 3);
1004void mpt_prtc(struct mpt_softc *, const char *, ...)
1005	__printflike(2, 3);
1006
1007/**************************** Target Mode Related ***************************/
1008static __inline int mpt_cdblen(uint8_t, int);
1009static __inline int
1010mpt_cdblen(uint8_t cdb0, int maxlen)
1011{
1012	int group = cdb0 >> 5;
1013	switch (group) {
1014	case 0:
1015		return (6);
1016	case 1:
1017		return (10);
1018	case 4:
1019	case 5:
1020		return (12);
1021	default:
1022		return (16);
1023	}
1024}
1025#ifdef	INVARIANTS
1026static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
1027static __inline request_t *
1028mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
1029{
1030	uint16_t rtg = (tag >> 18);
1031	KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag));
1032	KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
1033	KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
1034	return (mpt->tgt_cmd_ptrs[rtg]);
1035}
1036
1037
1038static __inline int
1039mpt_req_on_free_list(struct mpt_softc *, request_t *);
1040static __inline int
1041mpt_req_on_pending_list(struct mpt_softc *, request_t *);
1042
1043static __inline void
1044mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1045static __inline void
1046mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1047
1048
1049/*
1050 * Is request on freelist?
1051 */
1052static __inline int
1053mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
1054{
1055	request_t *lrq;
1056
1057	TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
1058		if (lrq == req) {
1059			return (1);
1060		}
1061	}
1062	return (0);
1063}
1064
1065/*
1066 * Is request on pending list?
1067 */
1068static __inline int
1069mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1070{
1071	request_t *lrq;
1072
1073	TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1074		if (lrq == req) {
1075			return (1);
1076		}
1077	}
1078	return (0);
1079}
1080
1081/*
1082 * Make sure that req *is* part of one of the special lists
1083 */
1084static __inline void
1085mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1086{
1087	int i;
1088	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1089		if (req == mpt->els_cmd_ptrs[i]) {
1090			return;
1091		}
1092	}
1093	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1094		if (req == mpt->tgt_cmd_ptrs[i]) {
1095			return;
1096		}
1097	}
1098	panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n",
1099	    s, line, req, req->serno,
1100	    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1101}
1102
1103/*
1104 * Make sure that req is *not* part of one of the special lists.
1105 */
1106static __inline void
1107mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1108{
1109	int i;
1110	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1111		KASSERT(req != mpt->els_cmd_ptrs[i],
1112		    ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n",
1113		    s, line, req, req->serno,
1114		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1115	}
1116	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1117		KASSERT(req != mpt->tgt_cmd_ptrs[i],
1118		    ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n",
1119		    s, line, req, req->serno,
1120		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1121	}
1122}
1123#endif
1124
1125/*
1126 * Task Management Types, purely for internal consumption
1127 */
1128typedef enum {
1129	MPT_ABORT_TASK_SET=1234,
1130	MPT_CLEAR_TASK_SET,
1131	MPT_TARGET_RESET,
1132	MPT_CLEAR_ACA,
1133	MPT_TERMINATE_TASK,
1134	MPT_NIL_TMT_VALUE=5678
1135} mpt_task_mgmt_t;
1136
1137/**************************** Unclassified Routines ***************************/
1138void		mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1139int		mpt_recv_handshake_reply(struct mpt_softc *mpt,
1140					 size_t reply_len, void *reply);
1141int		mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1142			     mpt_req_state_t state, mpt_req_state_t mask,
1143			     int sleep_ok, int time_ms);
1144void		mpt_enable_ints(struct mpt_softc *mpt);
1145void		mpt_disable_ints(struct mpt_softc *mpt);
1146int		mpt_attach(struct mpt_softc *mpt);
1147int		mpt_shutdown(struct mpt_softc *mpt);
1148int		mpt_detach(struct mpt_softc *mpt);
1149int		mpt_send_handshake_cmd(struct mpt_softc *mpt,
1150				       size_t len, void *cmd);
1151request_t *	mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1152void		mpt_free_request(struct mpt_softc *mpt, request_t *req);
1153void		mpt_intr(void *arg);
1154void		mpt_check_doorbell(struct mpt_softc *mpt);
1155void		mpt_dump_reply_frame(struct mpt_softc *mpt,
1156				     MSG_DEFAULT_REPLY *reply_frame);
1157
1158void		mpt_set_config_regs(struct mpt_softc *);
1159int		mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1160				  u_int /*Action*/, u_int /*PageVersion*/,
1161				  u_int /*PageLength*/, u_int /*PageNumber*/,
1162				  u_int /*PageType*/, uint32_t /*PageAddress*/,
1163				  bus_addr_t /*addr*/, bus_size_t/*len*/,
1164				  int /*sleep_ok*/, int /*timeout_ms*/);
1165int		mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1166				    int /*PageNumber*/,
1167				    uint32_t /*PageAddress*/,
1168				    CONFIG_PAGE_HEADER *,
1169				    int /*sleep_ok*/, int /*timeout_ms*/);
1170int		mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1171				  uint32_t /*PageAddress*/,
1172				  CONFIG_PAGE_HEADER *, size_t /*len*/,
1173				  int /*sleep_ok*/, int /*timeout_ms*/);
1174int		mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1175				   uint32_t /*PageAddress*/,
1176				   CONFIG_PAGE_HEADER *, size_t /*len*/,
1177				   int /*sleep_ok*/, int /*timeout_ms*/);
1178static __inline int
1179mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1180		      CONFIG_PAGE_HEADER *hdr, size_t len,
1181		      int sleep_ok, int timeout_ms)
1182{
1183	return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1184				  PageAddress, hdr, len, sleep_ok, timeout_ms));
1185}
1186
1187static __inline int
1188mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1189		       CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1190		       int timeout_ms)
1191{
1192	return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1193				   PageAddress, hdr, len, sleep_ok,
1194				   timeout_ms));
1195}
1196
1197/* mpt_debug.c functions */
1198void mpt_print_reply(void *vmsg);
1199void mpt_print_db(uint32_t mb);
1200void mpt_print_config_reply(void *vmsg);
1201char *mpt_ioc_diag(uint32_t diag);
1202void mpt_req_state(mpt_req_state_t state);
1203void mpt_print_config_request(void *vmsg);
1204void mpt_print_request(void *vmsg);
1205void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg);
1206void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1207#endif /* _MPT_H_ */
1208