mpt.h revision 275982
1/* $FreeBSD: stable/10/sys/dev/mpt/mpt.h 275982 2014-12-21 03:06:11Z smh $ */
2/*-
3 * Generic defines for LSI '909 FC  adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c)  2000, 2001 by Greg Ansley
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice immediately at the beginning of the file, without modification,
13 *    this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29/*-
30 * Copyright (c) 2002, 2006 by Matthew Jacob
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions are
35 * met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39 *    substantially similar to the "NO WARRANTY" disclaimer below
40 *    ("Disclaimer") and any redistribution must be conditioned upon including
41 *    a substantially similar Disclaimer requirement for further binary
42 *    redistribution.
43 * 3. Neither the names of the above listed copyright holders nor the names
44 *    of any contributors may be used to endorse or promote products derived
45 *    from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 *
59 * Support from Chris Ellsworth in order to make SAS adapters work
60 * is gratefully acknowledged.
61 *
62 *
63 * Support from LSI-Logic has also gone a great deal toward making this a
64 * workable subsystem and is gratefully acknowledged.
65 */
66/*
67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * Copyright (c) 2005, WHEEL Sp. z o.o.
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions are
74 * met:
75 * 1. Redistributions of source code must retain the above copyright
76 *    notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78 *    substantially similar to the "NO WARRANTY" disclaimer below
79 *    ("Disclaimer") and any redistribution must be conditioned upon including
80 *    a substantially similar Disclaimer requirement for further binary
81 *    redistribution.
82 * 3. Neither the names of the above listed copyright holders nor the names
83 *    of any contributors may be used to endorse or promote products derived
84 *    from this software without specific prior written permission.
85 *
86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 */
98
99#ifndef _MPT_H_
100#define _MPT_H_
101
102/********************************* OS Includes ********************************/
103#include <sys/param.h>
104#include <sys/systm.h>
105#include <sys/bus.h>
106#include <sys/condvar.h>
107#include <sys/endian.h>
108#include <sys/eventhandler.h>
109#include <sys/kernel.h>
110#include <sys/lock.h>
111#include <sys/malloc.h>
112#include <sys/module.h>
113#include <sys/mutex.h>
114#include <sys/proc.h>
115#include <sys/queue.h>
116#include <sys/rman.h>
117#include <sys/types.h>
118
119#include <machine/bus.h>
120#include <machine/cpu.h>
121#include <machine/resource.h>
122
123#ifdef __sparc64__
124#include <dev/ofw/openfirm.h>
125#include <machine/ofw_machdep.h>
126#endif
127
128#include <dev/pci/pcireg.h>
129#include <dev/pci/pcivar.h>
130
131#include "opt_ddb.h"
132
133/**************************** Register Definitions ****************************/
134#include <dev/mpt/mpt_reg.h>
135
136/******************************* MPI Definitions ******************************/
137#include <dev/mpt/mpilib/mpi_type.h>
138#include <dev/mpt/mpilib/mpi.h>
139#include <dev/mpt/mpilib/mpi_cnfg.h>
140#include <dev/mpt/mpilib/mpi_ioc.h>
141#include <dev/mpt/mpilib/mpi_raid.h>
142
143/* XXX For mpt_debug.c */
144#include <dev/mpt/mpilib/mpi_init.h>
145
146#define	MPT_S64_2_SCALAR(y)	((((int64_t)y.High) << 32) | (y.Low))
147#define	MPT_U64_2_SCALAR(y)	((((uint64_t)y.High) << 32) | (y.Low))
148
149/****************************** Misc Definitions ******************************/
150/* #define MPT_TEST_MULTIPATH	1 */
151#define MPT_OK (0)
152#define MPT_FAIL (0x10000)
153
154#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
155
156#define	MPT_ROLE_NONE		0
157#define	MPT_ROLE_INITIATOR	1
158#define	MPT_ROLE_TARGET		2
159#define	MPT_ROLE_BOTH		3
160#define	MPT_ROLE_DEFAULT	MPT_ROLE_INITIATOR
161
162#define	MPT_INI_ID_NONE		-1
163
164/**************************** Forward Declarations ****************************/
165struct mpt_softc;
166struct mpt_personality;
167typedef struct req_entry request_t;
168
169/************************* Personality Module Support *************************/
170typedef int mpt_load_handler_t(struct mpt_personality *);
171typedef int mpt_probe_handler_t(struct mpt_softc *);
172typedef int mpt_attach_handler_t(struct mpt_softc *);
173typedef int mpt_enable_handler_t(struct mpt_softc *);
174typedef void mpt_ready_handler_t(struct mpt_softc *);
175typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
176				MSG_EVENT_NOTIFY_REPLY *);
177typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
178/* XXX Add return value and use for veto? */
179typedef void mpt_shutdown_handler_t(struct mpt_softc *);
180typedef void mpt_detach_handler_t(struct mpt_softc *);
181typedef int mpt_unload_handler_t(struct mpt_personality *);
182
183struct mpt_personality
184{
185	const char		*name;
186	uint32_t		 id;		/* Assigned identifier. */
187	u_int			 use_count;	/* Instances using personality*/
188	mpt_load_handler_t	*load;		/* configure personailty */
189#define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
190	mpt_probe_handler_t	*probe;		/* configure personailty */
191	mpt_attach_handler_t	*attach;	/* initialize device instance */
192	mpt_enable_handler_t	*enable;	/* enable device */
193	mpt_ready_handler_t	*ready;		/* final open for business */
194	mpt_event_handler_t	*event;		/* Handle MPI event. */
195	mpt_reset_handler_t	*reset;		/* Re-init after reset. */
196	mpt_shutdown_handler_t	*shutdown;	/* Shutdown instance. */
197	mpt_detach_handler_t	*detach;	/* release device instance */
198	mpt_unload_handler_t	*unload;	/* Shutdown personality */
199#define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
200};
201
202int mpt_modevent(module_t, int, void *);
203
204/* Maximum supported number of personalities. */
205#define MPT_MAX_PERSONALITIES	(15)
206
207#define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
208	MODULE_DEPEND(name, dep, vmin, vpref, vmax)
209
210#define DECLARE_MPT_PERSONALITY(name, order)				  \
211	static moduledata_t name##_mod = {				  \
212		#name, mpt_modevent, &name##_personality		  \
213	};								  \
214	DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order);	  \
215	MODULE_VERSION(name, 1);					  \
216	MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
217
218/******************************* Bus DMA Support ******************************/
219/* XXX Need to update bus_dmamap_sync to take a range argument. */
220#define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op)	\
221	bus_dmamap_sync(dma_tag, dmamap, op)
222
223#define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
224			   lowaddr, highaddr, filter, filterarg,	\
225			   maxsize, nsegments, maxsegsz, flags,		\
226			   dma_tagp)					\
227	bus_dma_tag_create(parent_tag, alignment, boundary,		\
228			   lowaddr, highaddr, filter, filterarg,	\
229			   maxsize, nsegments, maxsegsz, flags,		\
230			   busdma_lock_mutex, &(mpt)->mpt_lock,		\
231			   dma_tagp)
232struct mpt_map_info {
233	struct mpt_softc *mpt;
234	int		  error;
235	uint32_t	  phys;
236};
237
238void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
239
240/********************************** Endianess *********************************/
241#define	MPT_2_HOST64(ptr, tag)	ptr->tag = le64toh(ptr->tag)
242#define	MPT_2_HOST32(ptr, tag)	ptr->tag = le32toh(ptr->tag)
243#define	MPT_2_HOST16(ptr, tag)	ptr->tag = le16toh(ptr->tag)
244
245#define	HOST_2_MPT64(ptr, tag)	ptr->tag = htole64(ptr->tag)
246#define	HOST_2_MPT32(ptr, tag)	ptr->tag = htole32(ptr->tag)
247#define	HOST_2_MPT16(ptr, tag)	ptr->tag = htole16(ptr->tag)
248
249#if	_BYTE_ORDER == _BIG_ENDIAN
250void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *);
251void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *);
252void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *);
253void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *);
254void mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *);
255void mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *);
256void mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *);
257void host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *);
258void mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *);
259void mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *);
260void mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *);
261void host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *);
262void mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *);
263void mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *);
264void host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *);
265void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *);
266void mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *);
267void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *);
268#else
269#define	mpt2host_sge_simple_union(x)		do { ; } while (0)
270#define	mpt2host_iocfacts_reply(x)		do { ; } while (0)
271#define	mpt2host_portfacts_reply(x)		do { ; } while (0)
272#define	mpt2host_config_page_ioc2(x)		do { ; } while (0)
273#define	mpt2host_config_page_ioc3(x)		do { ; } while (0)
274#define	mpt2host_config_page_scsi_port_0(x)	do { ; } while (0)
275#define	mpt2host_config_page_scsi_port_1(x)	do { ; } while (0)
276#define	host2mpt_config_page_scsi_port_1(x)	do { ; } while (0)
277#define	mpt2host_config_page_scsi_port_2(x)	do { ; } while (0)
278#define	mpt2host_config_page_scsi_device_0(x)	do { ; } while (0)
279#define	mpt2host_config_page_scsi_device_1(x)	do { ; } while (0)
280#define	host2mpt_config_page_scsi_device_1(x)	do { ; } while (0)
281#define	mpt2host_config_page_fc_port_0(x)	do { ; } while (0)
282#define	mpt2host_config_page_fc_port_1(x)	do { ; } while (0)
283#define	host2mpt_config_page_fc_port_1(x)	do { ; } while (0)
284#define	mpt2host_config_page_raid_vol_0(x)	do { ; } while (0)
285#define	mpt2host_config_page_raid_phys_disk_0(x)			\
286	do { ; } while (0)
287#define	mpt2host_mpi_raid_vol_indicator(x)	do { ; } while (0)
288#endif
289
290/**************************** MPI Transaction State ***************************/
291typedef enum {
292	REQ_STATE_NIL		= 0x00,
293	REQ_STATE_FREE		= 0x01,
294	REQ_STATE_ALLOCATED	= 0x02,
295	REQ_STATE_QUEUED	= 0x04,
296	REQ_STATE_DONE		= 0x08,
297	REQ_STATE_TIMEDOUT	= 0x10,
298	REQ_STATE_NEED_WAKEUP	= 0x20,
299	REQ_STATE_LOCKED	= 0x80,	/* can't be freed */
300	REQ_STATE_MASK		= 0xFF
301} mpt_req_state_t;
302
303struct req_entry {
304	TAILQ_ENTRY(req_entry) links;	/* Pointer to next in list */
305	mpt_req_state_t	state;		/* Request State Information */
306	uint16_t	index;		/* Index of this entry */
307	uint16_t	IOCStatus;	/* Completion status */
308	uint16_t	ResponseCode;	/* TMF Response Code */
309	uint16_t	serno;		/* serial number */
310	union ccb      *ccb;		/* CAM request */
311	void	       *req_vbuf;	/* Virtual Address of Entry */
312	void	       *sense_vbuf;	/* Virtual Address of sense data */
313	bus_addr_t	req_pbuf;	/* Physical Address of Entry */
314	bus_addr_t	sense_pbuf;	/* Physical Address of sense data */
315	bus_dmamap_t	dmap;		/* DMA map for data buffers */
316	struct req_entry *chain;	/* for SGE overallocations */
317	struct callout  callout;	/* Timeout for the request */
318};
319
320typedef struct mpt_config_params {
321	u_int		Action;
322	u_int		PageVersion;
323	u_int		PageLength;
324	u_int		PageNumber;
325	u_int		PageType;
326	u_int		PageAddress;
327	u_int		ExtPageLength;
328	u_int		ExtPageType;
329} cfgparms_t;
330
331/**************************** MPI Target State Info ***************************/
332
333typedef struct {
334	uint32_t reply_desc;	/* current reply descriptor */
335	uint32_t resid;		/* current data residual */
336	uint32_t bytes_xfered;	/* current relative offset */
337	union ccb *ccb;		/* pointer to currently active ccb */
338	request_t *req;		/* pointer to currently active assist request */
339	uint32_t
340		is_local : 1,
341		nxfers	 : 31;
342	uint32_t tag_id;
343	enum {
344		TGT_STATE_NIL,
345		TGT_STATE_LOADING,
346		TGT_STATE_LOADED,
347		TGT_STATE_IN_CAM,
348                TGT_STATE_SETTING_UP_FOR_DATA,
349                TGT_STATE_MOVING_DATA,
350                TGT_STATE_MOVING_DATA_AND_STATUS,
351                TGT_STATE_SENDING_STATUS
352	} state;
353} mpt_tgt_state_t;
354
355/*
356 * When we get an incoming command it has its own tag which is called the
357 * IoIndex. This is the value we gave that particular command buffer when
358 * we originally assigned it. It's just a number, really. The FC card uses
359 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
360 * contains pointers the request_t structures related to that IoIndex.
361 *
362 * What *we* do is construct a tag out of the index for the target command
363 * which owns the incoming ATIO plus a rolling sequence number.
364 */
365#define	MPT_MAKE_TAGID(mpt, req, ioindex)	\
366 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
367
368#ifdef	INVARIANTS
369#define	MPT_TAG_2_REQ(a, b)		mpt_tag_2_req(a, (uint32_t) b)
370#else
371#define	MPT_TAG_2_REQ(mpt, tag)		mpt->tgt_cmd_ptrs[tag >> 18]
372#endif
373
374#define	MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
375    (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
376
377STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
378#define	MPT_MAX_LUNS	256
379typedef struct {
380	struct mpt_hdr_stailq	atios;
381	struct mpt_hdr_stailq	inots;
382	int enabled;
383} tgt_resource_t;
384#define	MPT_MAX_ELS	64
385
386/**************************** Handler Registration ****************************/
387/*
388 * Global table of registered reply handlers.  The
389 * handler is indicated by byte 3 of the request
390 * index submitted to the IOC.  This allows the
391 * driver core to perform generic processing without
392 * any knowledge of per-personality behavior.
393 *
394 * MPT_NUM_REPLY_HANDLERS must be a power of 2
395 * to allow the easy generation of a mask.
396 *
397 * The handler offsets used by the core are hard coded
398 * allowing faster code generation when assigning a handler
399 * to a request.  All "personalities" must use the
400 * the handler registration mechanism.
401 *
402 * The IOC handlers that are rarely executed are placed
403 * at the tail of the table to make it more likely that
404 * all commonly executed handlers fit in a single cache
405 * line.
406 */
407#define MPT_NUM_REPLY_HANDLERS		(32)
408#define MPT_REPLY_HANDLER_EVENTS	MPT_CBI_TO_HID(0)
409#define MPT_REPLY_HANDLER_CONFIG	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
410#define MPT_REPLY_HANDLER_HANDSHAKE	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
411typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
412    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
413typedef union {
414	mpt_reply_handler_t	*reply_handler;
415} mpt_handler_t;
416
417typedef enum {
418	MPT_HANDLER_REPLY,
419	MPT_HANDLER_EVENT,
420	MPT_HANDLER_RESET,
421	MPT_HANDLER_SHUTDOWN
422} mpt_handler_type;
423
424struct mpt_handler_record
425{
426	LIST_ENTRY(mpt_handler_record)	links;
427	mpt_handler_t			handler;
428};
429
430LIST_HEAD(mpt_handler_list, mpt_handler_record);
431
432/*
433 * The handler_id is currently unused but would contain the
434 * handler ID used in the MsgContext field to allow direction
435 * of replies to the handler.  Registrations that don't require
436 * a handler id can pass in NULL for the handler_id.
437 *
438 * Deregistrations for handlers without a handler id should
439 * pass in MPT_HANDLER_ID_NONE.
440 */
441#define MPT_HANDLER_ID_NONE		(0xFFFFFFFF)
442int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
443			 mpt_handler_t, uint32_t *);
444int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
445			   mpt_handler_t, uint32_t);
446
447/******************* Per-Controller Instance Data Structures ******************/
448TAILQ_HEAD(req_queue, req_entry);
449
450/* Structure for saving proper values for modifyable PCI config registers */
451struct mpt_pci_cfg {
452	uint16_t Command;
453	uint16_t LatencyTimer_LineSize;
454	uint32_t IO_BAR;
455	uint32_t Mem0_BAR[2];
456	uint32_t Mem1_BAR[2];
457	uint32_t ROM_BAR;
458	uint8_t  IntLine;
459	uint32_t PMCSR;
460};
461
462typedef enum {
463	MPT_RVF_NONE		= 0x0,
464	MPT_RVF_ACTIVE		= 0x1,
465	MPT_RVF_ANNOUNCED	= 0x2,
466	MPT_RVF_UP2DATE		= 0x4,
467	MPT_RVF_REFERENCED	= 0x8,
468	MPT_RVF_WCE_CHANGED	= 0x10
469} mpt_raid_volume_flags;
470
471struct mpt_raid_volume {
472	CONFIG_PAGE_RAID_VOL_0	       *config_page;
473	MPI_RAID_VOL_INDICATOR		sync_progress;
474	mpt_raid_volume_flags		flags;
475	u_int				quiesced_disks;
476};
477
478typedef enum {
479	MPT_RDF_NONE		= 0x00,
480	MPT_RDF_ACTIVE		= 0x01,
481	MPT_RDF_ANNOUNCED	= 0x02,
482	MPT_RDF_UP2DATE		= 0x04,
483	MPT_RDF_REFERENCED	= 0x08,
484	MPT_RDF_QUIESCING	= 0x10,
485	MPT_RDF_QUIESCED	= 0x20
486} mpt_raid_disk_flags;
487
488struct mpt_raid_disk {
489	CONFIG_PAGE_RAID_PHYS_DISK_0	config_page;
490	struct mpt_raid_volume	       *volume;
491	u_int				member_number;
492	u_int				pass_thru_active;
493	mpt_raid_disk_flags		flags;
494};
495
496struct mpt_evtf_record {
497	MSG_EVENT_NOTIFY_REPLY		reply;
498	uint32_t			context;
499	LIST_ENTRY(mpt_evtf_record)	links;
500};
501
502LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
503
504struct mptsas_devinfo {
505	uint16_t	dev_handle;
506	uint16_t	parent_dev_handle;
507	uint16_t	enclosure_handle;
508	uint16_t	slot;
509	uint8_t		phy_num;
510	uint8_t		physical_port;
511	uint8_t		target_id;
512	uint8_t		bus;
513	uint64_t	sas_address;
514	uint32_t	device_info;
515};
516
517struct mptsas_phyinfo {
518	uint16_t	handle;
519	uint8_t		phy_num;
520	uint8_t		port_id;
521	uint8_t		negotiated_link_rate;
522	uint8_t		hw_link_rate;
523	uint8_t		programmed_link_rate;
524	uint8_t		sas_port_add_phy;
525	struct mptsas_devinfo identify;
526	struct mptsas_devinfo attached;
527};
528
529struct mptsas_portinfo {
530	uint16_t			num_phys;
531	struct mptsas_phyinfo		*phy_info;
532};
533
534struct mpt_softc {
535	device_t		dev;
536	struct mtx		mpt_lock;
537	int			mpt_locksetup;
538	uint32_t		mpt_pers_mask;
539	uint32_t
540				: 7,
541		unit		: 8,
542		ready		: 1,
543		fw_uploaded	: 1,
544		msi_enable	: 1,
545		twildcard	: 1,
546		tenabled	: 1,
547		do_cfg_role	: 1,
548		raid_enabled	: 1,
549		raid_mwce_set	: 1,
550		getreqwaiter	: 1,
551		shutdwn_raid    : 1,
552		shutdwn_recovery: 1,
553		outofbeer	: 1,
554		disabled	: 1,
555		is_spi		: 1,
556		is_sas		: 1,
557		is_fc		: 1,
558		is_1078		: 1;
559
560	u_int			cfg_role;
561	u_int			role;	/* role: none, ini, target, both */
562
563	u_int			verbose;
564#ifdef	MPT_TEST_MULTIPATH
565	int			failure_id;
566#endif
567
568	/*
569	 * IOC Facts
570	 */
571	MSG_IOC_FACTS_REPLY	ioc_facts;
572
573	/*
574	 * Port Facts
575	 */
576	MSG_PORT_FACTS_REPLY *	port_facts;
577#define	mpt_max_tgtcmds	port_facts[0].MaxPostedCmdBuffers
578
579	/*
580	 * Device Configuration Information
581	 */
582	union {
583		struct mpt_spi_cfg {
584			CONFIG_PAGE_SCSI_PORT_0		_port_page0;
585			CONFIG_PAGE_SCSI_PORT_1		_port_page1;
586			CONFIG_PAGE_SCSI_PORT_2		_port_page2;
587			CONFIG_PAGE_SCSI_DEVICE_0	_dev_page0[16];
588			CONFIG_PAGE_SCSI_DEVICE_1	_dev_page1[16];
589			int				_ini_id;
590			uint16_t			_tag_enable;
591			uint16_t			_disc_enable;
592		} spi;
593#define	mpt_port_page0		cfg.spi._port_page0
594#define	mpt_port_page1		cfg.spi._port_page1
595#define	mpt_port_page2		cfg.spi._port_page2
596#define	mpt_dev_page0		cfg.spi._dev_page0
597#define	mpt_dev_page1		cfg.spi._dev_page1
598#define	mpt_ini_id		cfg.spi._ini_id
599#define	mpt_tag_enable		cfg.spi._tag_enable
600#define	mpt_disc_enable		cfg.spi._disc_enable
601		struct mpi_fc_cfg {
602			CONFIG_PAGE_FC_PORT_0 _port_page0;
603			uint32_t _port_speed;
604#define	mpt_fcport_page0	cfg.fc._port_page0
605#define	mpt_fcport_speed	cfg.fc._port_speed
606		} fc;
607	} cfg;
608	/*
609	 * Device config information stored up for sysctl to access
610	 */
611	union {
612		struct {
613			unsigned int initiator_id;
614		} spi;
615		struct {
616			char wwnn[19];
617			char wwpn[19];
618		} fc;
619	} scinfo;
620
621	/* Controller Info for RAID information */
622	CONFIG_PAGE_IOC_2 *	ioc_page2;
623	CONFIG_PAGE_IOC_3 *	ioc_page3;
624
625	/* Raid Data */
626	struct mpt_raid_volume* raid_volumes;
627	struct mpt_raid_disk*	raid_disks;
628	u_int			raid_max_volumes;
629	u_int			raid_max_disks;
630	u_int			raid_page0_len;
631	u_int			raid_wakeup;
632	u_int			raid_rescan;
633	u_int			raid_resync_rate;
634	u_int			raid_mwce_setting;
635	u_int			raid_queue_depth;
636	u_int			raid_nonopt_volumes;
637	struct proc	       *raid_thread;
638	struct callout		raid_timer;
639
640	/*
641	 * PCI Hardware info
642	 */
643	struct resource *	pci_irq;	/* Interrupt map for chip */
644	void *			ih;		/* Interrupt handle */
645#if 0
646	struct mpt_pci_cfg	pci_cfg;	/* saved PCI conf registers */
647#endif
648
649	/*
650	 * DMA Mapping Stuff
651	 */
652	struct resource *	pci_reg;	/* Register map for chip */
653	bus_space_tag_t		pci_st;		/* Bus tag for registers */
654	bus_space_handle_t	pci_sh;		/* Bus handle for registers */
655	/* PIO versions of above. */
656	struct resource *	pci_pio_reg;
657	bus_space_tag_t		pci_pio_st;
658	bus_space_handle_t	pci_pio_sh;
659
660	bus_dma_tag_t		parent_dmat;	/* DMA tag for parent PCI bus */
661	bus_dma_tag_t		reply_dmat;	/* DMA tag for reply memory */
662	bus_dmamap_t		reply_dmap;	/* DMA map for reply memory */
663	uint8_t		       *reply;		/* KVA of reply memory */
664	bus_addr_t		reply_phys;	/* BusAddr of reply memory */
665
666	bus_dma_tag_t		buffer_dmat;	/* DMA tag for buffers */
667	bus_dma_tag_t		request_dmat;	/* DMA tag for request memroy */
668	bus_dmamap_t		request_dmap;	/* DMA map for request memroy */
669	uint8_t		       *request;	/* KVA of Request memory */
670	bus_addr_t		request_phys;	/* BusAddr of request memory */
671
672	uint32_t		max_seg_cnt;	/* calculated after IOC facts */
673	uint32_t		max_cam_seg_cnt;/* calculated from MAXPHYS*/
674
675	/*
676	 * Hardware management
677	 */
678	u_int			reset_cnt;
679
680	/*
681	 * CAM && Software Management
682	 */
683	request_t	       *request_pool;
684	struct req_queue	request_free_list;
685	struct req_queue	request_pending_list;
686	struct req_queue	request_timeout_list;
687
688
689	struct cam_sim	       *sim;
690	struct cam_path	       *path;
691
692	struct cam_sim	       *phydisk_sim;
693	struct cam_path	       *phydisk_path;
694
695	struct proc	       *recovery_thread;
696	request_t	       *tmf_req;
697
698	/*
699	 * Deferred frame acks due to resource shortage.
700	 */
701	struct mpt_evtf_list	ack_frames;
702
703	/*
704	 * Target Mode Support
705	 */
706	uint32_t		scsi_tgt_handler_id;
707	request_t **		tgt_cmd_ptrs;
708	request_t **		els_cmd_ptrs;	/* FC only */
709
710	/*
711	 * *snork*- this is chosen to be here *just in case* somebody
712	 * forgets to point to it exactly and we index off of trt with
713	 * CAM_LUN_WILDCARD.
714	 */
715	tgt_resource_t		trt_wildcard;		/* wildcard luns */
716	tgt_resource_t		trt[MPT_MAX_LUNS];
717	uint16_t		tgt_cmds_allocated;
718	uint16_t		els_cmds_allocated;	/* FC only */
719
720	uint16_t		timeouts;	/* timeout count */
721	uint16_t		success;	/* successes afer timeout */
722	uint16_t		sequence;	/* Sequence Number */
723	uint16_t		pad3;
724
725#if 0
726	/* Paired port in some dual adapters configurations */
727	struct mpt_softc *	mpt2;
728#endif
729
730	/* FW Image management */
731	uint32_t		fw_image_size;
732	uint8_t		       *fw_image;
733	bus_dma_tag_t		fw_dmat;	/* DMA tag for firmware image */
734	bus_dmamap_t		fw_dmap;	/* DMA map for firmware image */
735	bus_addr_t		fw_phys;	/* BusAddr of firmware image */
736
737	/* SAS Topology */
738	struct mptsas_portinfo	*sas_portinfo;
739
740	/* Shutdown Event Handler. */
741	eventhandler_tag         eh;
742
743	/* Userland management interface. */
744	struct cdev		*cdev;
745
746	TAILQ_ENTRY(mpt_softc)	links;
747};
748
749static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
750
751static __inline void
752mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
753{
754	if ((req->serno = mpt->sequence++) == 0) {
755		req->serno = mpt->sequence++;
756	}
757}
758
759/***************************** Locking Primitives *****************************/
760#define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
761#define	MPT_LOCK_SETUP(mpt)						\
762		mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF);		\
763		mpt->mpt_locksetup = 1
764#define	MPT_LOCK_DESTROY(mpt)						\
765	if (mpt->mpt_locksetup) {					\
766		mtx_destroy(&mpt->mpt_lock);				\
767		mpt->mpt_locksetup = 0;					\
768	}
769
770#define	MPT_LOCK(mpt)		mtx_lock(&(mpt)->mpt_lock)
771#define	MPT_UNLOCK(mpt)		mtx_unlock(&(mpt)->mpt_lock)
772#define	MPT_OWNED(mpt)		mtx_owned(&(mpt)->mpt_lock)
773#define	MPT_LOCK_ASSERT(mpt)	mtx_assert(&(mpt)->mpt_lock, MA_OWNED)
774#define mpt_sleep(mpt, ident, priority, wmesg, sbt) \
775    msleep_sbt(ident, &(mpt)->mpt_lock, priority, wmesg, sbt, 0, 0)
776#define mpt_req_timeout(req, sbt, func, arg) \
777    callout_reset_sbt(&(req)->callout, (sbt), 0, (func), (arg), 0)
778#define mpt_req_untimeout(req, func, arg) \
779	callout_stop(&(req)->callout)
780#define mpt_callout_init(mpt, c) \
781	callout_init_mtx(c, &(mpt)->mpt_lock, 0)
782#define mpt_callout_drain(mpt, c) \
783	callout_drain(c)
784
785/******************************* Register Access ******************************/
786static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
787static __inline uint32_t mpt_read(struct mpt_softc *, int);
788static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
789static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
790
791static __inline void
792mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
793{
794	bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
795}
796
797static __inline uint32_t
798mpt_read(struct mpt_softc *mpt, int offset)
799{
800	return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
801}
802
803/*
804 * Some operations (e.g. diagnostic register writes while the ARM proccessor
805 * is disabled), must be performed using "PCI pio" operations.  On non-PCI
806 * busses, these operations likely map to normal register accesses.
807 */
808static __inline void
809mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
810{
811	KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource"));
812	bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
813}
814
815static __inline uint32_t
816mpt_pio_read(struct mpt_softc *mpt, int offset)
817{
818	KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource"));
819	return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
820}
821/*********************** Reply Frame/Request Management ***********************/
822/* Max MPT Reply we are willing to accept (must be power of 2) */
823#define MPT_REPLY_SIZE   	256
824
825/*
826 * Must be less than 16384 in order for target mode to work
827 */
828#define MPT_MAX_REQUESTS(mpt)	512
829#define MPT_REQUEST_AREA	512
830#define MPT_SENSE_SIZE		32	/* included in MPT_REQUEST_AREA */
831#define MPT_REQ_MEM_SIZE(mpt)	(MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
832
833#define MPT_CONTEXT_CB_SHIFT	(16)
834#define MPT_CBI(handle)		(handle >> MPT_CONTEXT_CB_SHIFT)
835#define MPT_CBI_TO_HID(cbi)	((cbi) << MPT_CONTEXT_CB_SHIFT)
836#define MPT_CONTEXT_TO_CBI(x)	\
837    (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
838#define MPT_CONTEXT_REQI_MASK	0xFFFF
839#define MPT_CONTEXT_TO_REQI(x)	((x) & MPT_CONTEXT_REQI_MASK)
840
841/*
842 * Convert a 32bit physical address returned from IOC to an
843 * offset into our reply frame memory or the kvm address needed
844 * to access the data.  The returned address is only the low
845 * 32 bits, so mask our base physical address accordingly.
846 */
847#define MPT_REPLY_BADDR(x)		\
848	(x << 1)
849#define MPT_REPLY_OTOV(m, i) 		\
850	((void *)(&m->reply[i]))
851
852#define	MPT_DUMP_REPLY_FRAME(mpt, reply_frame)		\
853do {							\
854	if (mpt->verbose > MPT_PRT_DEBUG)		\
855		mpt_dump_reply_frame(mpt, reply_frame);	\
856} while(0)
857
858static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
859static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
860
861/*
862 * Give the reply buffer back to the IOC after we have
863 * finished processing it.
864 */
865static __inline void
866mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
867{
868     mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
869}
870
871/* Get a reply from the IOC */
872static __inline uint32_t
873mpt_pop_reply_queue(struct mpt_softc *mpt)
874{
875     return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
876}
877
878void
879mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
880
881/************************** Scatter Gather Management **************************/
882/* MPT_RQSL- size of request frame, in bytes */
883#define	MPT_RQSL(mpt)		(mpt->ioc_facts.RequestFrameSize << 2)
884
885/* MPT_NSGL- how many SG entries can fit in a request frame size */
886#define	MPT_NSGL(mpt)		(MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
887
888/* MPT_NRFM- how many request frames can fit in each request alloc we make */
889#define	MPT_NRFM(mpt)		(MPT_REQUEST_AREA / MPT_RQSL(mpt))
890
891/*
892 * MPT_NSGL_FIRST- # of SG elements that can fit after
893 * an I/O request but still within the request frame.
894 * Do this safely based upon SGE_IO_UNION.
895 *
896 * Note that the first element is *within* the SCSI request.
897 */
898#define	MPT_NSGL_FIRST(mpt)	\
899    ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
900    sizeof (SGE_IO_UNION))
901
902/***************************** IOC Initialization *****************************/
903int mpt_reset(struct mpt_softc *, int /*reinit*/);
904
905/****************************** Debugging ************************************/
906void mpt_dump_data(struct mpt_softc *, const char *, void *, int);
907void mpt_dump_request(struct mpt_softc *, request_t *);
908
909enum {
910	MPT_PRT_ALWAYS,
911	MPT_PRT_FATAL,
912	MPT_PRT_ERROR,
913	MPT_PRT_WARN,
914	MPT_PRT_INFO,
915	MPT_PRT_NEGOTIATION,
916	MPT_PRT_DEBUG,
917	MPT_PRT_DEBUG1,
918	MPT_PRT_DEBUG2,
919	MPT_PRT_DEBUG3,
920	MPT_PRT_TRACE,
921	MPT_PRT_NONE=100
922};
923
924#define mpt_lprt(mpt, level, ...)		\
925do {						\
926	if (level <= (mpt)->verbose)		\
927		mpt_prt(mpt, __VA_ARGS__);	\
928} while (0)
929
930#if 0
931#define mpt_lprtc(mpt, level, ...)		\
932do {						\
933	if (level <= (mpt)->verbose)		\
934		mpt_prtc(mpt, __VA_ARGS__);	\
935} while (0)
936#endif
937
938void mpt_prt(struct mpt_softc *, const char *, ...)
939	__printflike(2, 3);
940void mpt_prtc(struct mpt_softc *, const char *, ...)
941	__printflike(2, 3);
942
943/**************************** Target Mode Related ***************************/
944static __inline int mpt_cdblen(uint8_t, int);
945static __inline int
946mpt_cdblen(uint8_t cdb0, int maxlen)
947{
948	int group = cdb0 >> 5;
949	switch (group) {
950	case 0:
951		return (6);
952	case 1:
953		return (10);
954	case 4:
955	case 5:
956		return (12);
957	default:
958		return (16);
959	}
960}
961#ifdef	INVARIANTS
962static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
963static __inline request_t *
964mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
965{
966	uint16_t rtg = (tag >> 18);
967	KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d", tag));
968	KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
969	KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
970	return (mpt->tgt_cmd_ptrs[rtg]);
971}
972#endif
973
974static __inline int
975mpt_req_on_free_list(struct mpt_softc *, request_t *);
976static __inline int
977mpt_req_on_pending_list(struct mpt_softc *, request_t *);
978
979/*
980 * Is request on freelist?
981 */
982static __inline int
983mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
984{
985	request_t *lrq;
986
987	TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
988		if (lrq == req) {
989			return (1);
990		}
991	}
992	return (0);
993}
994
995/*
996 * Is request on pending list?
997 */
998static __inline int
999mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1000{
1001	request_t *lrq;
1002
1003	TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1004		if (lrq == req) {
1005			return (1);
1006		}
1007	}
1008	return (0);
1009}
1010
1011#ifdef	INVARIANTS
1012static __inline void
1013mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1014static __inline void
1015mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1016
1017/*
1018 * Make sure that req *is* part of one of the special lists
1019 */
1020static __inline void
1021mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1022{
1023	int i;
1024	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1025		if (req == mpt->els_cmd_ptrs[i]) {
1026			return;
1027		}
1028	}
1029	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1030		if (req == mpt->tgt_cmd_ptrs[i]) {
1031			return;
1032		}
1033	}
1034	panic("%s(%d): req %p:%u function %x not in els or tgt ptrs",
1035	    s, line, req, req->serno,
1036	    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1037}
1038
1039/*
1040 * Make sure that req is *not* part of one of the special lists.
1041 */
1042static __inline void
1043mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1044{
1045	int i;
1046	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1047		KASSERT(req != mpt->els_cmd_ptrs[i],
1048		    ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d",
1049		    s, line, req, req->serno,
1050		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1051	}
1052	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1053		KASSERT(req != mpt->tgt_cmd_ptrs[i],
1054		    ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d",
1055		    s, line, req, req->serno,
1056		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1057	}
1058}
1059#endif
1060
1061/*
1062 * Task Management Types, purely for internal consumption
1063 */
1064typedef enum {
1065	MPT_ABORT_TASK_SET=1234,
1066	MPT_CLEAR_TASK_SET,
1067	MPT_TARGET_RESET,
1068	MPT_CLEAR_ACA,
1069	MPT_TERMINATE_TASK,
1070	MPT_NIL_TMT_VALUE=5678
1071} mpt_task_mgmt_t;
1072
1073/**************************** Unclassified Routines ***************************/
1074void		mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1075int		mpt_recv_handshake_reply(struct mpt_softc *mpt,
1076					 size_t reply_len, void *reply);
1077int		mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1078			     mpt_req_state_t state, mpt_req_state_t mask,
1079			     int sleep_ok, int time_ms);
1080void		mpt_enable_ints(struct mpt_softc *mpt);
1081void		mpt_disable_ints(struct mpt_softc *mpt);
1082int		mpt_attach(struct mpt_softc *mpt);
1083int		mpt_shutdown(struct mpt_softc *mpt);
1084int		mpt_detach(struct mpt_softc *mpt);
1085int		mpt_send_handshake_cmd(struct mpt_softc *mpt,
1086				       size_t len, void *cmd);
1087request_t *	mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1088void		mpt_free_request(struct mpt_softc *mpt, request_t *req);
1089void		mpt_intr(void *arg);
1090void		mpt_check_doorbell(struct mpt_softc *mpt);
1091void		mpt_dump_reply_frame(struct mpt_softc *mpt,
1092				     MSG_DEFAULT_REPLY *reply_frame);
1093
1094int		mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1095				  cfgparms_t *params,
1096				  bus_addr_t /*addr*/, bus_size_t/*len*/,
1097				  int /*sleep_ok*/, int /*timeout_ms*/);
1098int		mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion,
1099				       int PageNumber, uint32_t PageAddress,
1100				       int ExtPageType,
1101				       CONFIG_EXTENDED_PAGE_HEADER *rslt,
1102				       int sleep_ok, int timeout_ms);
1103int		mpt_read_extcfg_page(struct mpt_softc *mpt, int Action,
1104				     uint32_t PageAddress,
1105				     CONFIG_EXTENDED_PAGE_HEADER *hdr,
1106				     void *buf, size_t len, int sleep_ok,
1107				     int timeout_ms);
1108int		mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1109				    int /*PageNumber*/,
1110				    uint32_t /*PageAddress*/,
1111				    CONFIG_PAGE_HEADER *,
1112				    int /*sleep_ok*/, int /*timeout_ms*/);
1113int		mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1114				  uint32_t /*PageAddress*/,
1115				  CONFIG_PAGE_HEADER *, size_t /*len*/,
1116				  int /*sleep_ok*/, int /*timeout_ms*/);
1117int		mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1118				   uint32_t /*PageAddress*/,
1119				   CONFIG_PAGE_HEADER *, size_t /*len*/,
1120				   int /*sleep_ok*/, int /*timeout_ms*/);
1121static __inline int
1122mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1123		      CONFIG_PAGE_HEADER *hdr, size_t len,
1124		      int sleep_ok, int timeout_ms)
1125{
1126	return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1127				  PageAddress, hdr, len, sleep_ok, timeout_ms));
1128}
1129
1130static __inline int
1131mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1132		       CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1133		       int timeout_ms)
1134{
1135	return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1136				   PageAddress, hdr, len, sleep_ok,
1137				   timeout_ms));
1138}
1139/* mpt_debug.c functions */
1140void mpt_print_reply(void *vmsg);
1141void mpt_print_db(uint32_t mb);
1142void mpt_print_config_reply(void *vmsg);
1143char *mpt_ioc_diag(uint32_t diag);
1144void mpt_req_state(mpt_req_state_t state);
1145void mpt_print_config_request(void *vmsg);
1146void mpt_print_request(void *vmsg);
1147void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1148#endif /* _MPT_H_ */
1149