1/*-
2 * Generic defines for LSI '909 FC  adapters.
3 * FreeBSD Version.
4 *
5 * SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause
6 *
7 * Copyright (c)  2000, 2001 by Greg Ansley
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice immediately at the beginning of the file, without modification,
14 *    this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30/*-
31 * Copyright (c) 2002, 2006 by Matthew Jacob
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions are
36 * met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
40 *    substantially similar to the "NO WARRANTY" disclaimer below
41 *    ("Disclaimer") and any redistribution must be conditioned upon including
42 *    a substantially similar Disclaimer requirement for further binary
43 *    redistribution.
44 * 3. Neither the names of the above listed copyright holders nor the names
45 *    of any contributors may be used to endorse or promote products derived
46 *    from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
49 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
52 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
58 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * Support from Chris Ellsworth in order to make SAS adapters work
61 * is gratefully acknowledged.
62 *
63 *
64 * Support from LSI-Logic has also gone a great deal toward making this a
65 * workable subsystem and is gratefully acknowledged.
66 */
67/*
68 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
69 * Copyright (c) 2004, 2005 Justin T. Gibbs
70 * Copyright (c) 2005, WHEEL Sp. z o.o.
71 * All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions are
75 * met:
76 * 1. Redistributions of source code must retain the above copyright
77 *    notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
79 *    substantially similar to the "NO WARRANTY" disclaimer below
80 *    ("Disclaimer") and any redistribution must be conditioned upon including
81 *    a substantially similar Disclaimer requirement for further binary
82 *    redistribution.
83 * 3. Neither the names of the above listed copyright holders nor the names
84 *    of any contributors may be used to endorse or promote products derived
85 *    from this software without specific prior written permission.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
88 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
89 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
90 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
91 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
92 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
93 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
94 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
95 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
96 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
97 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
98 */
99
100#ifndef _MPT_H_
101#define _MPT_H_
102
103/********************************* OS Includes ********************************/
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/condvar.h>
108#include <sys/endian.h>
109#include <sys/eventhandler.h>
110#include <sys/kernel.h>
111#include <sys/lock.h>
112#include <sys/malloc.h>
113#include <sys/module.h>
114#include <sys/mutex.h>
115#include <sys/proc.h>
116#include <sys/queue.h>
117#include <sys/rman.h>
118#include <sys/types.h>
119
120#include <machine/bus.h>
121#include <machine/cpu.h>
122#include <machine/resource.h>
123
124#include <dev/pci/pcireg.h>
125#include <dev/pci/pcivar.h>
126
127#include "opt_ddb.h"
128
129/**************************** Register Definitions ****************************/
130#include <dev/mpt/mpt_reg.h>
131
132/******************************* MPI Definitions ******************************/
133#include <dev/mpt/mpilib/mpi_type.h>
134#include <dev/mpt/mpilib/mpi.h>
135#include <dev/mpt/mpilib/mpi_cnfg.h>
136#include <dev/mpt/mpilib/mpi_ioc.h>
137#include <dev/mpt/mpilib/mpi_raid.h>
138
139/* XXX For mpt_debug.c */
140#include <dev/mpt/mpilib/mpi_init.h>
141
142#define	MPT_S64_2_SCALAR(y)	((((int64_t)y.High) << 32) | (y.Low))
143#define	MPT_U64_2_SCALAR(y)	((((uint64_t)y.High) << 32) | (y.Low))
144
145/****************************** Misc Definitions ******************************/
146/* #define MPT_TEST_MULTIPATH	1 */
147#define MPT_OK (0)
148#define MPT_FAIL (0x10000)
149
150#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
151
152#define	MPT_ROLE_NONE		0
153#define	MPT_ROLE_INITIATOR	1
154#define	MPT_ROLE_TARGET		2
155#define	MPT_ROLE_BOTH		3
156#define	MPT_ROLE_DEFAULT	MPT_ROLE_INITIATOR
157
158#define	MPT_INI_ID_NONE		-1
159
160/**************************** Forward Declarations ****************************/
161struct mpt_softc;
162struct mpt_personality;
163typedef struct req_entry request_t;
164
165/************************* Personality Module Support *************************/
166typedef int mpt_load_handler_t(struct mpt_personality *);
167typedef int mpt_probe_handler_t(struct mpt_softc *);
168typedef int mpt_attach_handler_t(struct mpt_softc *);
169typedef int mpt_enable_handler_t(struct mpt_softc *);
170typedef void mpt_ready_handler_t(struct mpt_softc *);
171typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
172				MSG_EVENT_NOTIFY_REPLY *);
173typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
174/* XXX Add return value and use for veto? */
175typedef void mpt_shutdown_handler_t(struct mpt_softc *);
176typedef void mpt_detach_handler_t(struct mpt_softc *);
177typedef int mpt_unload_handler_t(struct mpt_personality *);
178
179struct mpt_personality
180{
181	const char		*name;
182	uint32_t		 id;		/* Assigned identifier. */
183	u_int			 use_count;	/* Instances using personality*/
184	mpt_load_handler_t	*load;		/* configure personailty */
185#define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
186	mpt_probe_handler_t	*probe;		/* configure personailty */
187	mpt_attach_handler_t	*attach;	/* initialize device instance */
188	mpt_enable_handler_t	*enable;	/* enable device */
189	mpt_ready_handler_t	*ready;		/* final open for business */
190	mpt_event_handler_t	*event;		/* Handle MPI event. */
191	mpt_reset_handler_t	*reset;		/* Re-init after reset. */
192	mpt_shutdown_handler_t	*shutdown;	/* Shutdown instance. */
193	mpt_detach_handler_t	*detach;	/* release device instance */
194	mpt_unload_handler_t	*unload;	/* Shutdown personality */
195#define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
196};
197
198int mpt_modevent(module_t, int, void *);
199
200/* Maximum supported number of personalities. */
201#define MPT_MAX_PERSONALITIES	(15)
202
203#define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
204	MODULE_DEPEND(name, dep, vmin, vpref, vmax)
205
206#define DECLARE_MPT_PERSONALITY(name, order)				  \
207	static moduledata_t name##_mod = {				  \
208		#name, mpt_modevent, &name##_personality		  \
209	};								  \
210	DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order);	  \
211	MODULE_VERSION(name, 1);					  \
212	MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
213
214/******************************* Bus DMA Support ******************************/
215/* XXX Need to update bus_dmamap_sync to take a range argument. */
216#define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op)	\
217	bus_dmamap_sync(dma_tag, dmamap, op)
218
219#define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
220			   lowaddr, highaddr, filter, filterarg,	\
221			   maxsize, nsegments, maxsegsz, flags,		\
222			   dma_tagp)					\
223	bus_dma_tag_create(parent_tag, alignment, boundary,		\
224			   lowaddr, highaddr, filter, filterarg,	\
225			   maxsize, nsegments, maxsegsz, flags,		\
226			   busdma_lock_mutex, &(mpt)->mpt_lock,		\
227			   dma_tagp)
228struct mpt_map_info {
229	struct mpt_softc *mpt;
230	int		  error;
231	uint32_t	  phys;
232};
233
234void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
235
236/********************************* Endianness *********************************/
237#define	MPT_2_HOST64(ptr, tag)	ptr->tag = le64toh(ptr->tag)
238#define	MPT_2_HOST32(ptr, tag)	ptr->tag = le32toh(ptr->tag)
239#define	MPT_2_HOST16(ptr, tag)	ptr->tag = le16toh(ptr->tag)
240
241#define	HOST_2_MPT64(ptr, tag)	ptr->tag = htole64(ptr->tag)
242#define	HOST_2_MPT32(ptr, tag)	ptr->tag = htole32(ptr->tag)
243#define	HOST_2_MPT16(ptr, tag)	ptr->tag = htole16(ptr->tag)
244
245#if	_BYTE_ORDER == _BIG_ENDIAN
246void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *);
247void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *);
248void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *);
249void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *);
250void mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *);
251void mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *);
252void mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *);
253void host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *);
254void mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *);
255void mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *);
256void mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *);
257void host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *);
258void mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *);
259void mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *);
260void host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *);
261void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *);
262void mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *);
263void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *);
264#else
265#define	mpt2host_sge_simple_union(x)		do { ; } while (0)
266#define	mpt2host_iocfacts_reply(x)		do { ; } while (0)
267#define	mpt2host_portfacts_reply(x)		do { ; } while (0)
268#define	mpt2host_config_page_ioc2(x)		do { ; } while (0)
269#define	mpt2host_config_page_ioc3(x)		do { ; } while (0)
270#define	mpt2host_config_page_scsi_port_0(x)	do { ; } while (0)
271#define	mpt2host_config_page_scsi_port_1(x)	do { ; } while (0)
272#define	host2mpt_config_page_scsi_port_1(x)	do { ; } while (0)
273#define	mpt2host_config_page_scsi_port_2(x)	do { ; } while (0)
274#define	mpt2host_config_page_scsi_device_0(x)	do { ; } while (0)
275#define	mpt2host_config_page_scsi_device_1(x)	do { ; } while (0)
276#define	host2mpt_config_page_scsi_device_1(x)	do { ; } while (0)
277#define	mpt2host_config_page_fc_port_0(x)	do { ; } while (0)
278#define	mpt2host_config_page_fc_port_1(x)	do { ; } while (0)
279#define	host2mpt_config_page_fc_port_1(x)	do { ; } while (0)
280#define	mpt2host_config_page_raid_vol_0(x)	do { ; } while (0)
281#define	mpt2host_config_page_raid_phys_disk_0(x)			\
282	do { ; } while (0)
283#define	mpt2host_mpi_raid_vol_indicator(x)	do { ; } while (0)
284#endif
285
286/**************************** MPI Transaction State ***************************/
287typedef enum {
288	REQ_STATE_NIL		= 0x00,
289	REQ_STATE_FREE		= 0x01,
290	REQ_STATE_ALLOCATED	= 0x02,
291	REQ_STATE_QUEUED	= 0x04,
292	REQ_STATE_DONE		= 0x08,
293	REQ_STATE_TIMEDOUT	= 0x10,
294	REQ_STATE_NEED_WAKEUP	= 0x20,
295	REQ_STATE_LOCKED	= 0x80,	/* can't be freed */
296	REQ_STATE_MASK		= 0xFF
297} mpt_req_state_t;
298
299struct req_entry {
300	TAILQ_ENTRY(req_entry) links;	/* Pointer to next in list */
301	mpt_req_state_t	state;		/* Request State Information */
302	uint16_t	index;		/* Index of this entry */
303	uint16_t	IOCStatus;	/* Completion status */
304	uint16_t	ResponseCode;	/* TMF Response Code */
305	uint16_t	serno;		/* serial number */
306	union ccb      *ccb;		/* CAM request */
307	void	       *req_vbuf;	/* Virtual Address of Entry */
308	void	       *sense_vbuf;	/* Virtual Address of sense data */
309	bus_addr_t	req_pbuf;	/* Physical Address of Entry */
310	bus_addr_t	sense_pbuf;	/* Physical Address of sense data */
311	bus_dmamap_t	dmap;		/* DMA map for data buffers */
312	struct req_entry *chain;	/* for SGE overallocations */
313	struct callout  callout;	/* Timeout for the request */
314};
315
316typedef struct mpt_config_params {
317	u_int		Action;
318	u_int		PageVersion;
319	u_int		PageLength;
320	u_int		PageNumber;
321	u_int		PageType;
322	u_int		PageAddress;
323	u_int		ExtPageLength;
324	u_int		ExtPageType;
325} cfgparms_t;
326
327/**************************** MPI Target State Info ***************************/
328typedef struct {
329	uint32_t reply_desc;	/* current reply descriptor */
330	uint32_t bytes_xfered;	/* current relative offset */
331	int resid;		/* current data residual */
332	union ccb *ccb;		/* pointer to currently active ccb */
333	request_t *req;		/* pointer to currently active assist request */
334	uint32_t
335		is_local : 1,
336		nxfers	 : 31;
337	uint32_t tag_id;	/* Our local tag. */
338	uint16_t itag;		/* Initiator tag. */
339	enum {
340		TGT_STATE_NIL,
341		TGT_STATE_LOADING,
342		TGT_STATE_LOADED,
343		TGT_STATE_IN_CAM,
344                TGT_STATE_SETTING_UP_FOR_DATA,
345                TGT_STATE_MOVING_DATA,
346                TGT_STATE_MOVING_DATA_AND_STATUS,
347                TGT_STATE_SENDING_STATUS
348	} state;
349} mpt_tgt_state_t;
350
351/*
352 * When we get an incoming command it has its own tag which is called the
353 * IoIndex. This is the value we gave that particular command buffer when
354 * we originally assigned it. It's just a number, really. The FC card uses
355 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
356 * contains pointers the request_t structures related to that IoIndex.
357 *
358 * What *we* do is construct a tag out of the index for the target command
359 * which owns the incoming ATIO plus a rolling sequence number.
360 */
361#define	MPT_MAKE_TAGID(mpt, req, ioindex)	\
362 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
363
364#ifdef	INVARIANTS
365#define	MPT_TAG_2_REQ(a, b)		mpt_tag_2_req(a, (uint32_t) b)
366#else
367#define	MPT_TAG_2_REQ(mpt, tag)		mpt->tgt_cmd_ptrs[tag >> 18]
368#endif
369
370#define	MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
371    (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
372
373STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
374#define	MPT_MAX_LUNS	256
375typedef struct {
376	struct mpt_hdr_stailq	atios;
377	struct mpt_hdr_stailq	inots;
378	int enabled;
379} tgt_resource_t;
380#define	MPT_MAX_ELS	64
381
382/**************************** Handler Registration ****************************/
383/*
384 * Global table of registered reply handlers.  The
385 * handler is indicated by byte 3 of the request
386 * index submitted to the IOC.  This allows the
387 * driver core to perform generic processing without
388 * any knowledge of per-personality behavior.
389 *
390 * MPT_NUM_REPLY_HANDLERS must be a power of 2
391 * to allow the easy generation of a mask.
392 *
393 * The handler offsets used by the core are hard coded
394 * allowing faster code generation when assigning a handler
395 * to a request.  All "personalities" must use the
396 * the handler registration mechanism.
397 *
398 * The IOC handlers that are rarely executed are placed
399 * at the tail of the table to make it more likely that
400 * all commonly executed handlers fit in a single cache
401 * line.
402 */
403#define MPT_NUM_REPLY_HANDLERS		(32)
404#define MPT_REPLY_HANDLER_EVENTS	MPT_CBI_TO_HID(0)
405#define MPT_REPLY_HANDLER_CONFIG	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
406#define MPT_REPLY_HANDLER_HANDSHAKE	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
407typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
408    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
409typedef union {
410	mpt_reply_handler_t	*reply_handler;
411} mpt_handler_t;
412
413typedef enum {
414	MPT_HANDLER_REPLY,
415	MPT_HANDLER_EVENT,
416	MPT_HANDLER_RESET,
417	MPT_HANDLER_SHUTDOWN
418} mpt_handler_type;
419
420struct mpt_handler_record
421{
422	LIST_ENTRY(mpt_handler_record)	links;
423	mpt_handler_t			handler;
424};
425
426LIST_HEAD(mpt_handler_list, mpt_handler_record);
427
428/*
429 * The handler_id is currently unused but would contain the
430 * handler ID used in the MsgContext field to allow direction
431 * of replies to the handler.  Registrations that don't require
432 * a handler id can pass in NULL for the handler_id.
433 *
434 * Deregistrations for handlers without a handler id should
435 * pass in MPT_HANDLER_ID_NONE.
436 */
437#define MPT_HANDLER_ID_NONE		(0xFFFFFFFF)
438int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
439			 mpt_handler_t, uint32_t *);
440int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
441			   mpt_handler_t, uint32_t);
442
443/******************* Per-Controller Instance Data Structures ******************/
444TAILQ_HEAD(req_queue, req_entry);
445
446/* Structure for saving proper values for modifyable PCI config registers */
447struct mpt_pci_cfg {
448	uint16_t Command;
449	uint16_t LatencyTimer_LineSize;
450	uint32_t IO_BAR;
451	uint32_t Mem0_BAR[2];
452	uint32_t Mem1_BAR[2];
453	uint32_t ROM_BAR;
454	uint8_t  IntLine;
455	uint32_t PMCSR;
456};
457
458typedef enum {
459	MPT_RVF_NONE		= 0x0,
460	MPT_RVF_ACTIVE		= 0x1,
461	MPT_RVF_ANNOUNCED	= 0x2,
462	MPT_RVF_UP2DATE		= 0x4,
463	MPT_RVF_REFERENCED	= 0x8,
464	MPT_RVF_WCE_CHANGED	= 0x10
465} mpt_raid_volume_flags;
466
467struct mpt_raid_volume {
468	CONFIG_PAGE_RAID_VOL_0	       *config_page;
469	MPI_RAID_VOL_INDICATOR		sync_progress;
470	mpt_raid_volume_flags		flags;
471	u_int				quiesced_disks;
472};
473
474typedef enum {
475	MPT_RDF_NONE		= 0x00,
476	MPT_RDF_ACTIVE		= 0x01,
477	MPT_RDF_ANNOUNCED	= 0x02,
478	MPT_RDF_UP2DATE		= 0x04,
479	MPT_RDF_REFERENCED	= 0x08,
480	MPT_RDF_QUIESCING	= 0x10,
481	MPT_RDF_QUIESCED	= 0x20
482} mpt_raid_disk_flags;
483
484struct mpt_raid_disk {
485	CONFIG_PAGE_RAID_PHYS_DISK_0	config_page;
486	struct mpt_raid_volume	       *volume;
487	u_int				member_number;
488	u_int				pass_thru_active;
489	mpt_raid_disk_flags		flags;
490};
491
492struct mpt_evtf_record {
493	MSG_EVENT_NOTIFY_REPLY		reply;
494	uint32_t			context;
495	LIST_ENTRY(mpt_evtf_record)	links;
496};
497
498LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
499
500struct mptsas_devinfo {
501	uint16_t	dev_handle;
502	uint16_t	parent_dev_handle;
503	uint16_t	enclosure_handle;
504	uint16_t	slot;
505	uint8_t		phy_num;
506	uint8_t		physical_port;
507	uint8_t		target_id;
508	uint8_t		bus;
509	uint64_t	sas_address;
510	uint32_t	device_info;
511};
512
513struct mptsas_phyinfo {
514	uint16_t	handle;
515	uint8_t		phy_num;
516	uint8_t		port_id;
517	uint8_t		negotiated_link_rate;
518	uint8_t		hw_link_rate;
519	uint8_t		programmed_link_rate;
520	uint8_t		sas_port_add_phy;
521	struct mptsas_devinfo identify;
522	struct mptsas_devinfo attached;
523};
524
525struct mptsas_portinfo {
526	uint16_t			num_phys;
527	struct mptsas_phyinfo		*phy_info;
528};
529
530struct mpt_softc {
531	device_t		dev;
532	struct mtx		mpt_lock;
533	int			mpt_locksetup;
534	uint32_t		mpt_pers_mask;
535	uint32_t
536				: 7,
537		unit		: 8,
538		ready		: 1,
539		fw_uploaded	: 1,
540		msi_enable	: 1,
541		twildcard	: 1,
542		tenabled	: 1,
543		do_cfg_role	: 1,
544		raid_enabled	: 1,
545		raid_mwce_set	: 1,
546		getreqwaiter	: 1,
547		shutdwn_raid    : 1,
548		shutdwn_recovery: 1,
549		outofbeer	: 1,
550		disabled	: 1,
551		is_spi		: 1,
552		is_sas		: 1,
553		is_fc		: 1,
554		is_1078		: 1;
555
556	u_int			cfg_role;
557	u_int			role;	/* role: none, ini, target, both */
558
559	u_int			verbose;
560#ifdef	MPT_TEST_MULTIPATH
561	int			failure_id;
562#endif
563
564	/*
565	 * IOC Facts
566	 */
567	MSG_IOC_FACTS_REPLY	ioc_facts;
568
569	/*
570	 * Port Facts
571	 */
572	MSG_PORT_FACTS_REPLY *	port_facts;
573#define	mpt_max_tgtcmds	port_facts[0].MaxPostedCmdBuffers
574
575	/*
576	 * Device Configuration Information
577	 */
578	union {
579		struct mpt_spi_cfg {
580			CONFIG_PAGE_SCSI_PORT_0		_port_page0;
581			CONFIG_PAGE_SCSI_PORT_1		_port_page1;
582			CONFIG_PAGE_SCSI_PORT_2		_port_page2;
583			CONFIG_PAGE_SCSI_DEVICE_0	_dev_page0[16];
584			CONFIG_PAGE_SCSI_DEVICE_1	_dev_page1[16];
585			int				_ini_id;
586			uint16_t			_tag_enable;
587			uint16_t			_disc_enable;
588		} spi;
589#define	mpt_port_page0		cfg.spi._port_page0
590#define	mpt_port_page1		cfg.spi._port_page1
591#define	mpt_port_page2		cfg.spi._port_page2
592#define	mpt_dev_page0		cfg.spi._dev_page0
593#define	mpt_dev_page1		cfg.spi._dev_page1
594#define	mpt_ini_id		cfg.spi._ini_id
595#define	mpt_tag_enable		cfg.spi._tag_enable
596#define	mpt_disc_enable		cfg.spi._disc_enable
597		struct mpi_fc_cfg {
598			CONFIG_PAGE_FC_PORT_0 _port_page0;
599			uint32_t _port_speed;
600#define	mpt_fcport_page0	cfg.fc._port_page0
601#define	mpt_fcport_speed	cfg.fc._port_speed
602		} fc;
603	} cfg;
604	/*
605	 * Device config information stored up for sysctl to access
606	 */
607	union {
608		struct {
609			unsigned int initiator_id;
610		} spi;
611		struct {
612			uint64_t wwnn;
613			uint64_t wwpn;
614			uint32_t portid;
615		} fc;
616	} scinfo;
617
618	/* Controller Info for RAID information */
619	CONFIG_PAGE_IOC_2 *	ioc_page2;
620	CONFIG_PAGE_IOC_3 *	ioc_page3;
621
622	/* Raid Data */
623	struct mpt_raid_volume* raid_volumes;
624	struct mpt_raid_disk*	raid_disks;
625	u_int			raid_max_volumes;
626	u_int			raid_max_disks;
627	u_int			raid_page0_len;
628	u_int			raid_wakeup;
629	u_int			raid_rescan;
630	u_int			raid_resync_rate;
631	u_int			raid_mwce_setting;
632	u_int			raid_queue_depth;
633	u_int			raid_nonopt_volumes;
634	struct proc	       *raid_thread;
635	struct callout		raid_timer;
636
637	/*
638	 * PCI Hardware info
639	 */
640	struct resource *	pci_irq;	/* Interrupt map for chip */
641	void *			ih;		/* Interrupt handle */
642#if 0
643	struct mpt_pci_cfg	pci_cfg;	/* saved PCI conf registers */
644#endif
645
646	/*
647	 * DMA Mapping Stuff
648	 */
649	struct resource *	pci_reg;	/* Register map for chip */
650	bus_space_tag_t		pci_st;		/* Bus tag for registers */
651	bus_space_handle_t	pci_sh;		/* Bus handle for registers */
652	/* PIO versions of above. */
653	struct resource *	pci_pio_reg;
654	bus_space_tag_t		pci_pio_st;
655	bus_space_handle_t	pci_pio_sh;
656
657	bus_dma_tag_t		parent_dmat;	/* DMA tag for parent PCI bus */
658	bus_dma_tag_t		reply_dmat;	/* DMA tag for reply memory */
659	bus_dmamap_t		reply_dmap;	/* DMA map for reply memory */
660	uint8_t		       *reply;		/* KVA of reply memory */
661	bus_addr_t		reply_phys;	/* BusAddr of reply memory */
662
663	bus_dma_tag_t		buffer_dmat;	/* DMA tag for buffers */
664	bus_dma_tag_t		request_dmat;	/* DMA tag for request memory */
665	bus_dmamap_t		request_dmap;	/* DMA map for request memory */
666	uint8_t		       *request;	/* KVA of Request memory */
667	bus_addr_t		request_phys;	/* BusAddr of request memory */
668
669	uint32_t		max_seg_cnt;	/* calculated after IOC facts */
670	uint32_t		max_cam_seg_cnt;/* calculated from maxphys */
671
672	/*
673	 * Hardware management
674	 */
675	u_int			reset_cnt;
676
677	/*
678	 * CAM && Software Management
679	 */
680	request_t	       *request_pool;
681	struct req_queue	request_free_list;
682	struct req_queue	request_pending_list;
683	struct req_queue	request_timeout_list;
684
685	struct cam_sim	       *sim;
686	struct cam_path	       *path;
687
688	struct cam_sim	       *phydisk_sim;
689	struct cam_path	       *phydisk_path;
690
691	struct proc	       *recovery_thread;
692	request_t	       *tmf_req;
693
694	/*
695	 * Deferred frame acks due to resource shortage.
696	 */
697	struct mpt_evtf_list	ack_frames;
698
699	/*
700	 * Target Mode Support
701	 */
702	uint32_t		scsi_tgt_handler_id;
703	request_t **		tgt_cmd_ptrs;
704	request_t **		els_cmd_ptrs;	/* FC only */
705
706	/*
707	 * *snork*- this is chosen to be here *just in case* somebody
708	 * forgets to point to it exactly and we index off of trt with
709	 * CAM_LUN_WILDCARD.
710	 */
711	tgt_resource_t		trt_wildcard;		/* wildcard luns */
712	tgt_resource_t		trt[MPT_MAX_LUNS];
713	uint16_t		tgt_cmds_allocated;
714	uint16_t		els_cmds_allocated;	/* FC only */
715
716	uint16_t		timeouts;	/* timeout count */
717	uint16_t		success;	/* successes afer timeout */
718	uint16_t		sequence;	/* Sequence Number */
719	uint16_t		pad3;
720
721#if 0
722	/* Paired port in some dual adapters configurations */
723	struct mpt_softc *	mpt2;
724#endif
725
726	/* FW Image management */
727	uint32_t		fw_image_size;
728	uint8_t		       *fw_image;
729	bus_dma_tag_t		fw_dmat;	/* DMA tag for firmware image */
730	bus_dmamap_t		fw_dmap;	/* DMA map for firmware image */
731	bus_addr_t		fw_phys;	/* BusAddr of firmware image */
732
733	/* SAS Topology */
734	struct mptsas_portinfo	*sas_portinfo;
735
736	/* Shutdown Event Handler. */
737	eventhandler_tag         eh;
738
739	/* Userland management interface. */
740	struct cdev		*cdev;
741
742	TAILQ_ENTRY(mpt_softc)	links;
743};
744
745static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
746
747static __inline void
748mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
749{
750	if ((req->serno = mpt->sequence++) == 0) {
751		req->serno = mpt->sequence++;
752	}
753}
754
755/***************************** Locking Primitives *****************************/
756#define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
757#define	MPT_LOCK_SETUP(mpt)						\
758		mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF);		\
759		mpt->mpt_locksetup = 1
760#define	MPT_LOCK_DESTROY(mpt)						\
761	if (mpt->mpt_locksetup) {					\
762		mtx_destroy(&mpt->mpt_lock);				\
763		mpt->mpt_locksetup = 0;					\
764	}
765
766#define	MPT_LOCK(mpt)		mtx_lock(&(mpt)->mpt_lock)
767#define	MPT_UNLOCK(mpt)		mtx_unlock(&(mpt)->mpt_lock)
768#define	MPT_OWNED(mpt)		mtx_owned(&(mpt)->mpt_lock)
769#define	MPT_LOCK_ASSERT(mpt)	mtx_assert(&(mpt)->mpt_lock, MA_OWNED)
770#define mpt_sleep(mpt, ident, priority, wmesg, sbt) \
771    msleep_sbt(ident, &(mpt)->mpt_lock, priority, wmesg, sbt, 0, 0)
772#define mpt_req_timeout(req, sbt, func, arg) \
773    callout_reset_sbt(&(req)->callout, (sbt), 0, (func), (arg), 0)
774#define mpt_req_untimeout(req, func, arg) \
775	callout_stop(&(req)->callout)
776#define mpt_callout_init(mpt, c) \
777	callout_init_mtx(c, &(mpt)->mpt_lock, 0)
778#define mpt_callout_drain(mpt, c) \
779	callout_drain(c)
780
781/******************************* Register Access ******************************/
782static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
783static __inline void mpt_write_stream(struct mpt_softc *, size_t, uint32_t);
784static __inline uint32_t mpt_read(struct mpt_softc *, int);
785static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
786static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
787
788static __inline void
789mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
790{
791	bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
792}
793
794static __inline void
795mpt_write_stream(struct mpt_softc *mpt, size_t offset, uint32_t val)
796{
797	bus_space_write_stream_4(mpt->pci_st, mpt->pci_sh, offset, val);
798}
799
800static __inline uint32_t
801mpt_read(struct mpt_softc *mpt, int offset)
802{
803	return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
804}
805
806/*
807 * Some operations (e.g. diagnostic register writes while the ARM processor
808 * is disabled), must be performed using "PCI pio" operations.  On non-PCI
809 * buses, these operations likely map to normal register accesses.
810 */
811static __inline void
812mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
813{
814	KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource"));
815	bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
816}
817
818static __inline uint32_t
819mpt_pio_read(struct mpt_softc *mpt, int offset)
820{
821	KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource"));
822	return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
823}
824
825/*********************** Reply Frame/Request Management ***********************/
826/* Max MPT Reply we are willing to accept (must be power of 2) */
827#define MPT_REPLY_SIZE   	256
828
829/*
830 * Must be less than 16384 in order for target mode to work
831 */
832#define MPT_MAX_REQUESTS(mpt)	512
833#define MPT_REQUEST_AREA	512
834#define MPT_SENSE_SIZE		32	/* included in MPT_REQUEST_AREA */
835#define MPT_REQ_MEM_SIZE(mpt)	(MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
836
837#define MPT_CONTEXT_CB_SHIFT	(16)
838#define MPT_CBI(handle)		(handle >> MPT_CONTEXT_CB_SHIFT)
839#define MPT_CBI_TO_HID(cbi)	((cbi) << MPT_CONTEXT_CB_SHIFT)
840#define MPT_CONTEXT_TO_CBI(x)	\
841    (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
842#define MPT_CONTEXT_REQI_MASK	0xFFFF
843#define MPT_CONTEXT_TO_REQI(x)	((x) & MPT_CONTEXT_REQI_MASK)
844
845/*
846 * Convert a 32bit physical address returned from IOC to an
847 * offset into our reply frame memory or the kvm address needed
848 * to access the data.  The returned address is only the low
849 * 32 bits, so mask our base physical address accordingly.
850 */
851#define MPT_REPLY_BADDR(x)		\
852	(x << 1)
853#define MPT_REPLY_OTOV(m, i) 		\
854	((void *)(&m->reply[i]))
855
856#define	MPT_DUMP_REPLY_FRAME(mpt, reply_frame)		\
857do {							\
858	if (mpt->verbose > MPT_PRT_DEBUG)		\
859		mpt_dump_reply_frame(mpt, reply_frame);	\
860} while(0)
861
862static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
863static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
864
865/*
866 * Give the reply buffer back to the IOC after we have
867 * finished processing it.
868 */
869static __inline void
870mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
871{
872     mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
873}
874
875/* Get a reply from the IOC */
876static __inline uint32_t
877mpt_pop_reply_queue(struct mpt_softc *mpt)
878{
879     return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
880}
881
882void
883mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
884
885/************************** Scatter Gather Management **************************/
886/* MPT_RQSL- size of request frame, in bytes */
887#define	MPT_RQSL(mpt)		(mpt->ioc_facts.RequestFrameSize << 2)
888
889/* MPT_NSGL- how many SG entries can fit in a request frame size */
890#define	MPT_NSGL(mpt)		(MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
891
892/* MPT_NRFM- how many request frames can fit in each request alloc we make */
893#define	MPT_NRFM(mpt)		(MPT_REQUEST_AREA / MPT_RQSL(mpt))
894
895/*
896 * MPT_NSGL_FIRST- # of SG elements that can fit after
897 * an I/O request but still within the request frame.
898 * Do this safely based upon SGE_IO_UNION.
899 *
900 * Note that the first element is *within* the SCSI request.
901 */
902#define	MPT_NSGL_FIRST(mpt)	\
903    ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
904    sizeof (SGE_IO_UNION))
905
906/***************************** IOC Initialization *****************************/
907int mpt_reset(struct mpt_softc *, int /*reinit*/);
908
909/****************************** Debugging ************************************/
910void mpt_dump_data(struct mpt_softc *, const char *, void *, int);
911void mpt_dump_request(struct mpt_softc *, request_t *);
912
913enum {
914	MPT_PRT_ALWAYS,
915	MPT_PRT_FATAL,
916	MPT_PRT_ERROR,
917	MPT_PRT_WARN,
918	MPT_PRT_INFO,
919	MPT_PRT_NEGOTIATION,
920	MPT_PRT_DEBUG,
921	MPT_PRT_DEBUG1,
922	MPT_PRT_DEBUG2,
923	MPT_PRT_DEBUG3,
924	MPT_PRT_TRACE,
925	MPT_PRT_NONE=100
926};
927
928#define mpt_lprt(mpt, level, ...)		\
929do {						\
930	if ((level) <= (mpt)->verbose)		\
931		mpt_prt(mpt, __VA_ARGS__);	\
932} while (0)
933
934#if 0
935#define mpt_lprtc(mpt, level, ...)		\
936do {						\
937	if ((level) <= (mpt)->verbose)		\
938		mpt_prtc(mpt, __VA_ARGS__);	\
939} while (0)
940#endif
941
942void mpt_prt(struct mpt_softc *, const char *, ...)
943	__printflike(2, 3);
944void mpt_prtc(struct mpt_softc *, const char *, ...)
945	__printflike(2, 3);
946
947/**************************** Target Mode Related ***************************/
948#ifdef	INVARIANTS
949static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
950static __inline request_t *
951mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
952{
953	uint16_t rtg = (tag >> 18);
954	KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d", tag));
955	KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
956	KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
957	return (mpt->tgt_cmd_ptrs[rtg]);
958}
959#endif
960
961static __inline int
962mpt_req_on_free_list(struct mpt_softc *, request_t *);
963static __inline int
964mpt_req_on_pending_list(struct mpt_softc *, request_t *);
965
966/*
967 * Is request on freelist?
968 */
969static __inline int
970mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
971{
972	request_t *lrq;
973
974	TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
975		if (lrq == req) {
976			return (1);
977		}
978	}
979	return (0);
980}
981
982/*
983 * Is request on pending list?
984 */
985static __inline int
986mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
987{
988	request_t *lrq;
989
990	TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
991		if (lrq == req) {
992			return (1);
993		}
994	}
995	return (0);
996}
997
998#ifdef	INVARIANTS
999static __inline void
1000mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1001static __inline void
1002mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1003
1004/*
1005 * Make sure that req *is* part of one of the special lists
1006 */
1007static __inline void
1008mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1009{
1010	int i;
1011	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1012		if (req == mpt->els_cmd_ptrs[i]) {
1013			return;
1014		}
1015	}
1016	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1017		if (req == mpt->tgt_cmd_ptrs[i]) {
1018			return;
1019		}
1020	}
1021	panic("%s(%d): req %p:%u function %x not in els or tgt ptrs",
1022	    s, line, req, req->serno,
1023	    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1024}
1025
1026/*
1027 * Make sure that req is *not* part of one of the special lists.
1028 */
1029static __inline void
1030mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1031{
1032	int i;
1033	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1034		KASSERT(req != mpt->els_cmd_ptrs[i],
1035		    ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d",
1036		    s, line, req, req->serno,
1037		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1038	}
1039	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1040		KASSERT(req != mpt->tgt_cmd_ptrs[i],
1041		    ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d",
1042		    s, line, req, req->serno,
1043		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1044	}
1045}
1046#endif
1047
1048/*
1049 * Task Management Types, purely for internal consumption
1050 */
1051typedef enum {
1052	MPT_QUERY_TASK_SET=1234,
1053	MPT_ABORT_TASK_SET,
1054	MPT_CLEAR_TASK_SET,
1055	MPT_QUERY_ASYNC_EVENT,
1056	MPT_LOGICAL_UNIT_RESET,
1057	MPT_TARGET_RESET,
1058	MPT_CLEAR_ACA,
1059	MPT_NIL_TMT_VALUE=5678
1060} mpt_task_mgmt_t;
1061
1062/**************************** Unclassified Routines ***************************/
1063void		mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1064int		mpt_recv_handshake_reply(struct mpt_softc *mpt,
1065					 size_t reply_len, void *reply);
1066int		mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1067			     mpt_req_state_t state, mpt_req_state_t mask,
1068			     int sleep_ok, int time_ms);
1069void		mpt_enable_ints(struct mpt_softc *mpt);
1070void		mpt_disable_ints(struct mpt_softc *mpt);
1071int		mpt_attach(struct mpt_softc *mpt);
1072int		mpt_shutdown(struct mpt_softc *mpt);
1073int		mpt_detach(struct mpt_softc *mpt);
1074int		mpt_send_handshake_cmd(struct mpt_softc *mpt,
1075				       size_t len, void *cmd);
1076request_t *	mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1077void		mpt_free_request(struct mpt_softc *mpt, request_t *req);
1078void		mpt_intr(void *arg);
1079void		mpt_check_doorbell(struct mpt_softc *mpt);
1080void		mpt_dump_reply_frame(struct mpt_softc *mpt,
1081				     MSG_DEFAULT_REPLY *reply_frame);
1082
1083int		mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1084				  cfgparms_t *params,
1085				  bus_addr_t /*addr*/, bus_size_t/*len*/,
1086				  int /*sleep_ok*/, int /*timeout_ms*/);
1087int		mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion,
1088				       int PageNumber, uint32_t PageAddress,
1089				       int ExtPageType,
1090				       CONFIG_EXTENDED_PAGE_HEADER *rslt,
1091				       int sleep_ok, int timeout_ms);
1092int		mpt_read_extcfg_page(struct mpt_softc *mpt, int Action,
1093				     uint32_t PageAddress,
1094				     CONFIG_EXTENDED_PAGE_HEADER *hdr,
1095				     void *buf, size_t len, int sleep_ok,
1096				     int timeout_ms);
1097int		mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1098				    int /*PageNumber*/,
1099				    uint32_t /*PageAddress*/,
1100				    CONFIG_PAGE_HEADER *,
1101				    int /*sleep_ok*/, int /*timeout_ms*/);
1102int		mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1103				  uint32_t /*PageAddress*/,
1104				  CONFIG_PAGE_HEADER *, size_t /*len*/,
1105				  int /*sleep_ok*/, int /*timeout_ms*/);
1106int		mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1107				   uint32_t /*PageAddress*/,
1108				   CONFIG_PAGE_HEADER *, size_t /*len*/,
1109				   int /*sleep_ok*/, int /*timeout_ms*/);
1110static __inline int
1111mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1112		      CONFIG_PAGE_HEADER *hdr, size_t len,
1113		      int sleep_ok, int timeout_ms)
1114{
1115	return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1116				  PageAddress, hdr, len, sleep_ok, timeout_ms));
1117}
1118
1119static __inline int
1120mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1121		       CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1122		       int timeout_ms)
1123{
1124	return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1125				   PageAddress, hdr, len, sleep_ok,
1126				   timeout_ms));
1127}
1128
1129/* mpt_debug.c functions */
1130void mpt_print_reply(void *vmsg);
1131void mpt_print_db(uint32_t mb);
1132void mpt_print_config_reply(void *vmsg);
1133char *mpt_ioc_diag(uint32_t diag);
1134void mpt_req_state(mpt_req_state_t state);
1135void mpt_print_config_request(void *vmsg);
1136void mpt_print_request(void *vmsg);
1137void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1138
1139#endif /* _MPT_H_ */
1140