mpt_user.c revision 192450
192195Sjake/*-
292195Sjake * Copyright (c) 2008 Yahoo!, Inc.
392195Sjake * All rights reserved.
492195Sjake * Written by: John Baldwin <jhb@FreeBSD.org>
592195Sjake *
692195Sjake * Redistribution and use in source and binary forms, with or without
792195Sjake * modification, are permitted provided that the following conditions
892195Sjake * are met:
992195Sjake * 1. Redistributions of source code must retain the above copyright
1092195Sjake *    notice, this list of conditions and the following disclaimer.
1192195Sjake * 2. Redistributions in binary form must reproduce the above copyright
1292195Sjake *    notice, this list of conditions and the following disclaimer in the
1392195Sjake *    documentation and/or other materials provided with the distribution.
1492195Sjake * 3. Neither the name of the author nor the names of any co-contributors
1592195Sjake *    may be used to endorse or promote products derived from this software
1692195Sjake *    without specific prior written permission.
1792195Sjake *
1892195Sjake * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1992195Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2092195Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2192195Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2292195Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2392195Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2492195Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2592195Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2692195Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2792195Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2892195Sjake * SUCH DAMAGE.
2992195Sjake *
3092195Sjake * LSI MPT-Fusion Host Adapter FreeBSD userland interface
3192195Sjake */
32104724Sru
3392195Sjake#include <sys/cdefs.h>
3492195Sjake__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt_user.c 192450 2009-05-20 17:29:21Z imp $");
3592195Sjake
3692195Sjake#include <sys/param.h>
3792195Sjake#include <sys/conf.h>
3892195Sjake#include <sys/errno.h>
3992195Sjake#include <sys/ioccom.h>
40174128Srwatson#include <sys/mpt_ioctl.h>
41130661Stmm
42130661Stmm#include <dev/mpt/mpt.h>
4392195Sjake
4492195Sjakestruct mpt_user_raid_action_result {
45107071Stmm	uint32_t	volume_status;
46107071Stmm	uint32_t	action_data[4];
47107071Stmm	uint16_t	action_status;
4892195Sjake};
4992195Sjake
5092195Sjakestruct mpt_page_memory {
5192195Sjake	bus_dma_tag_t	tag;
5292195Sjake	bus_dmamap_t	map;
5392195Sjake	bus_addr_t	paddr;
5492195Sjake	void		*vaddr;
55133063Sdfr};
56133063Sdfr
57133063Sdfrstatic mpt_probe_handler_t	mpt_user_probe;
58133063Sdfrstatic mpt_attach_handler_t	mpt_user_attach;
59133063Sdfrstatic mpt_enable_handler_t	mpt_user_enable;
60133063Sdfrstatic mpt_ready_handler_t	mpt_user_ready;
61133063Sdfrstatic mpt_event_handler_t	mpt_user_event;
62133063Sdfrstatic mpt_reset_handler_t	mpt_user_reset;
63133063Sdfrstatic mpt_detach_handler_t	mpt_user_detach;
64133063Sdfr
65133063Sdfrstatic struct mpt_personality mpt_user_personality = {
66133063Sdfr	.name		= "mpt_user",
67133063Sdfr	.probe		= mpt_user_probe,
68133063Sdfr	.attach		= mpt_user_attach,
69133063Sdfr	.enable		= mpt_user_enable,
7092195Sjake	.ready		= mpt_user_ready,
71	.event		= mpt_user_event,
72	.reset		= mpt_user_reset,
73	.detach		= mpt_user_detach,
74};
75
76DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
77
78static mpt_reply_handler_t	mpt_user_reply_handler;
79
80static d_open_t		mpt_open;
81static d_close_t	mpt_close;
82static d_ioctl_t	mpt_ioctl;
83
84static struct cdevsw mpt_cdevsw = {
85	.d_version =	D_VERSION,
86	.d_flags =	0,
87	.d_open =	mpt_open,
88	.d_close =	mpt_close,
89	.d_ioctl =	mpt_ioctl,
90	.d_name =	"mpt",
91};
92
93static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
94
95static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
96
97int
98mpt_user_probe(struct mpt_softc *mpt)
99{
100
101	/* Attach to every controller. */
102	return (0);
103}
104
105int
106mpt_user_attach(struct mpt_softc *mpt)
107{
108	mpt_handler_t handler;
109	int error, unit;
110
111	MPT_LOCK(mpt);
112	handler.reply_handler = mpt_user_reply_handler;
113	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
114				     &user_handler_id);
115	MPT_UNLOCK(mpt);
116	if (error != 0) {
117		mpt_prt(mpt, "Unable to register user handler!\n");
118		return (error);
119	}
120	unit = device_get_unit(mpt->dev);
121	mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
122	    "mpt%d", unit);
123	if (mpt->cdev == NULL) {
124		MPT_LOCK(mpt);
125		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
126		    user_handler_id);
127		MPT_UNLOCK(mpt);
128		return (ENOMEM);
129	}
130	mpt->cdev->si_drv1 = mpt;
131	return (0);
132}
133
134int
135mpt_user_enable(struct mpt_softc *mpt)
136{
137
138	return (0);
139}
140
141void
142mpt_user_ready(struct mpt_softc *mpt)
143{
144}
145
146int
147mpt_user_event(struct mpt_softc *mpt, request_t *req,
148    MSG_EVENT_NOTIFY_REPLY *msg)
149{
150
151	/* Someday we may want to let a user daemon listen for events? */
152	return (0);
153}
154
155void
156mpt_user_reset(struct mpt_softc *mpt, int type)
157{
158}
159
160void
161mpt_user_detach(struct mpt_softc *mpt)
162{
163	mpt_handler_t handler;
164
165	/* XXX: do a purge of pending requests? */
166	destroy_dev(mpt->cdev);
167
168	MPT_LOCK(mpt);
169	handler.reply_handler = mpt_user_reply_handler;
170	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
171	    user_handler_id);
172	MPT_UNLOCK(mpt);
173}
174
175static int
176mpt_open(struct cdev *dev, int flags, int fmt, struct thread *td)
177{
178
179	return (0);
180}
181
182static int
183mpt_close(struct cdev *dev, int flags, int fmt, struct thread *td)
184{
185
186	return (0);
187}
188
189static int
190mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
191    size_t len)
192{
193	struct mpt_map_info mi;
194	int error;
195
196	page_mem->vaddr = NULL;
197
198	/* Limit requests to 16M. */
199	if (len > 16 * 1024 * 1024)
200		return (ENOSPC);
201	error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
202	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
203	    len, 1, len, 0, &page_mem->tag);
204	if (error)
205		return (error);
206	error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
207	    BUS_DMA_NOWAIT, &page_mem->map);
208	if (error) {
209		bus_dma_tag_destroy(page_mem->tag);
210		return (error);
211	}
212	mi.mpt = mpt;
213	error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
214	    len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
215	if (error == 0)
216		error = mi.error;
217	if (error) {
218		bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
219		bus_dma_tag_destroy(page_mem->tag);
220		page_mem->vaddr = NULL;
221		return (error);
222	}
223	page_mem->paddr = mi.phys;
224	return (0);
225}
226
227static void
228mpt_free_buffer(struct mpt_page_memory *page_mem)
229{
230
231	if (page_mem->vaddr == NULL)
232		return;
233	bus_dmamap_unload(page_mem->tag, page_mem->map);
234	bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
235	bus_dma_tag_destroy(page_mem->tag);
236	page_mem->vaddr = NULL;
237}
238
239static int
240mpt_user_read_cfg_header(struct mpt_softc *mpt,
241    struct mpt_cfg_page_req *page_req)
242{
243	request_t  *req;
244	cfgparms_t params;
245	MSG_CONFIG *cfgp;
246	int	    error;
247
248	req = mpt_get_request(mpt, TRUE);
249	if (req == NULL) {
250		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
251		return (ENOMEM);
252	}
253
254	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
255	params.PageVersion = 0;
256	params.PageLength = 0;
257	params.PageNumber = page_req->header.PageNumber;
258	params.PageType = page_req->header.PageType;
259	params.PageAddress = le32toh(page_req->page_address);
260	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
261				  TRUE, 5000);
262	if (error != 0) {
263		/*
264		 * Leave the request. Without resetting the chip, it's
265		 * still owned by it and we'll just get into trouble
266		 * freeing it now. Mark it as abandoned so that if it
267		 * shows up later it can be freed.
268		 */
269		mpt_prt(mpt, "read_cfg_header timed out\n");
270		return (ETIMEDOUT);
271	}
272
273	page_req->ioc_status = htole16(req->IOCStatus);
274	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
275		cfgp = req->req_vbuf;
276		bcopy(&cfgp->Header, &page_req->header,
277		    sizeof(page_req->header));
278	}
279	mpt_free_request(mpt, req);
280	return (0);
281}
282
283static int
284mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
285    struct mpt_page_memory *mpt_page)
286{
287	CONFIG_PAGE_HEADER *hdr;
288	request_t    *req;
289	cfgparms_t    params;
290	int	      error;
291
292	req = mpt_get_request(mpt, TRUE);
293	if (req == NULL) {
294		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
295		return (ENOMEM);
296	}
297
298	hdr = mpt_page->vaddr;
299	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
300	params.PageVersion = hdr->PageVersion;
301	params.PageLength = hdr->PageLength;
302	params.PageNumber = hdr->PageNumber;
303	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
304	params.PageAddress = le32toh(page_req->page_address);
305	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
306	    le32toh(page_req->len), TRUE, 5000);
307	if (error != 0) {
308		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
309		return (ETIMEDOUT);
310	}
311
312	page_req->ioc_status = htole16(req->IOCStatus);
313	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
314		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
315		    BUS_DMASYNC_POSTREAD);
316	mpt_free_request(mpt, req);
317	return (0);
318}
319
320static int
321mpt_user_read_extcfg_header(struct mpt_softc *mpt,
322    struct mpt_ext_cfg_page_req *ext_page_req)
323{
324	request_t  *req;
325	cfgparms_t params;
326	MSG_CONFIG_REPLY *cfgp;
327	int	    error;
328
329	req = mpt_get_request(mpt, TRUE);
330	if (req == NULL) {
331		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
332		return (ENOMEM);
333	}
334
335	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
336	params.PageVersion = ext_page_req->header.PageVersion;
337	params.PageLength = 0;
338	params.PageNumber = ext_page_req->header.PageNumber;
339	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
340	params.PageAddress = le32toh(ext_page_req->page_address);
341	params.ExtPageType = ext_page_req->header.ExtPageType;
342	params.ExtPageLength = 0;
343	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
344				  TRUE, 5000);
345	if (error != 0) {
346		/*
347		 * Leave the request. Without resetting the chip, it's
348		 * still owned by it and we'll just get into trouble
349		 * freeing it now. Mark it as abandoned so that if it
350		 * shows up later it can be freed.
351		 */
352		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
353		return (ETIMEDOUT);
354	}
355
356	ext_page_req->ioc_status = htole16(req->IOCStatus);
357	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
358		cfgp = req->req_vbuf;
359		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
360		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
361		ext_page_req->header.PageType = cfgp->Header.PageType;
362		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
363		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
364	}
365	mpt_free_request(mpt, req);
366	return (0);
367}
368
369static int
370mpt_user_read_extcfg_page(struct mpt_softc *mpt,
371    struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
372{
373	CONFIG_EXTENDED_PAGE_HEADER *hdr;
374	request_t    *req;
375	cfgparms_t    params;
376	int	      error;
377
378	req = mpt_get_request(mpt, TRUE);
379	if (req == NULL) {
380		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
381		return (ENOMEM);
382	}
383
384	hdr = mpt_page->vaddr;
385	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
386	params.PageVersion = hdr->PageVersion;
387	params.PageLength = 0;
388	params.PageNumber = hdr->PageNumber;
389	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
390	params.PageAddress = le32toh(ext_page_req->page_address);
391	params.ExtPageType = hdr->ExtPageType;
392	params.ExtPageLength = hdr->ExtPageLength;
393	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
394	    le32toh(ext_page_req->len), TRUE, 5000);
395	if (error != 0) {
396		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
397		return (ETIMEDOUT);
398	}
399
400	ext_page_req->ioc_status = htole16(req->IOCStatus);
401	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
402		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
403		    BUS_DMASYNC_POSTREAD);
404	mpt_free_request(mpt, req);
405	return (0);
406}
407
408static int
409mpt_user_write_cfg_page(struct mpt_softc *mpt,
410    struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
411{
412	CONFIG_PAGE_HEADER *hdr;
413	request_t    *req;
414	cfgparms_t    params;
415	u_int	      hdr_attr;
416	int	      error;
417
418	hdr = mpt_page->vaddr;
419	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
420	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
421	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
422		mpt_prt(mpt, "page type 0x%x not changeable\n",
423			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
424		return (EINVAL);
425	}
426
427#if	0
428	/*
429	 * We shouldn't mask off other bits here.
430	 */
431	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
432#endif
433
434	req = mpt_get_request(mpt, TRUE);
435	if (req == NULL)
436		return (ENOMEM);
437
438	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREWRITE);
439
440	/*
441	 * There isn't any point in restoring stripped out attributes
442	 * if you then mask them going down to issue the request.
443	 */
444
445	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
446	params.PageVersion = hdr->PageVersion;
447	params.PageLength = hdr->PageLength;
448	params.PageNumber = hdr->PageNumber;
449	params.PageAddress = le32toh(page_req->page_address);
450#if	0
451	/* Restore stripped out attributes */
452	hdr->PageType |= hdr_attr;
453	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
454#else
455	params.PageType = hdr->PageType;
456#endif
457	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
458	    le32toh(page_req->len), TRUE, 5000);
459	if (error != 0) {
460		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
461		return (ETIMEDOUT);
462	}
463
464	page_req->ioc_status = htole16(req->IOCStatus);
465	mpt_free_request(mpt, req);
466	return (0);
467}
468
469static int
470mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
471    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
472{
473	MSG_RAID_ACTION_REPLY *reply;
474	struct mpt_user_raid_action_result *res;
475
476	if (req == NULL)
477		return (TRUE);
478
479	if (reply_frame != NULL) {
480		bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
481		    BUS_DMASYNC_POSTREAD);
482		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
483		req->IOCStatus = le16toh(reply->IOCStatus);
484		res = (struct mpt_user_raid_action_result *)
485		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
486		res->action_status = reply->ActionStatus;
487		res->volume_status = reply->VolumeStatus;
488		bcopy(&reply->ActionData, res->action_data,
489		    sizeof(res->action_data));
490	}
491
492	req->state &= ~REQ_STATE_QUEUED;
493	req->state |= REQ_STATE_DONE;
494	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
495
496	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
497		wakeup(req);
498	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
499		/*
500		 * Whew- we can free this request (late completion)
501		 */
502		mpt_free_request(mpt, req);
503	}
504
505	return (TRUE);
506}
507
508/*
509 * We use the first part of the request buffer after the request frame
510 * to hold the action data and action status from the RAID reply.  The
511 * rest of the request buffer is used to hold the buffer for the
512 * action SGE.
513 */
514static int
515mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
516	struct mpt_page_memory *mpt_page)
517{
518	request_t *req;
519	struct mpt_user_raid_action_result *res;
520	MSG_RAID_ACTION_REQUEST *rap;
521	SGE_SIMPLE32 *se;
522	int error;
523
524	req = mpt_get_request(mpt, TRUE);
525	if (req == NULL)
526		return (ENOMEM);
527	rap = req->req_vbuf;
528	memset(rap, 0, sizeof *rap);
529	rap->Action = raid_act->action;
530	rap->ActionDataWord = raid_act->action_data_word;
531	rap->Function = MPI_FUNCTION_RAID_ACTION;
532	rap->VolumeID = raid_act->volume_id;
533	rap->VolumeBus = raid_act->volume_bus;
534	rap->PhysDiskNum = raid_act->phys_disk_num;
535	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
536	if (mpt_page->vaddr != NULL && raid_act->len != 0) {
537		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
538		    BUS_DMASYNC_PREWRITE);
539		se->Address = htole32(mpt_page->paddr);
540		MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
541		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
542		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
543		    MPI_SGE_FLAGS_END_OF_LIST |
544		    raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
545		    MPI_SGE_FLAGS_IOC_TO_HOST));
546	}
547	se->FlagsLength = htole32(se->FlagsLength);
548	rap->MsgContext = htole32(req->index | user_handler_id);
549
550	mpt_check_doorbell(mpt);
551	mpt_send_cmd(mpt, req);
552
553	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
554	    2000);
555	if (error != 0) {
556		/*
557		 * Leave request so it can be cleaned up later.
558		 */
559		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
560		return (error);
561	}
562
563	raid_act->ioc_status = htole16(req->IOCStatus);
564	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
565		mpt_free_request(mpt, req);
566		return (0);
567	}
568
569	res = (struct mpt_user_raid_action_result *)
570	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
571	raid_act->volume_status = res->volume_status;
572	raid_act->action_status = res->action_status;
573	bcopy(res->action_data, raid_act->action_data,
574	    sizeof(res->action_data));
575	if (mpt_page->vaddr != NULL)
576		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
577		    BUS_DMASYNC_POSTREAD);
578	mpt_free_request(mpt, req);
579	return (0);
580}
581
582#ifdef __amd64__
583#define	PTRIN(p)		((void *)(uintptr_t)(p))
584#define PTROUT(v)		((u_int32_t)(uintptr_t)(v))
585#endif
586
587static int
588mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
589{
590	struct mpt_softc *mpt;
591	struct mpt_cfg_page_req *page_req;
592	struct mpt_ext_cfg_page_req *ext_page_req;
593	struct mpt_raid_action *raid_act;
594	struct mpt_page_memory mpt_page;
595#ifdef __amd64__
596	struct mpt_cfg_page_req32 *page_req32;
597	struct mpt_cfg_page_req page_req_swab;
598	struct mpt_ext_cfg_page_req32 *ext_page_req32;
599	struct mpt_ext_cfg_page_req ext_page_req_swab;
600	struct mpt_raid_action32 *raid_act32;
601	struct mpt_raid_action raid_act_swab;
602#endif
603	int error;
604
605	mpt = dev->si_drv1;
606	page_req = (void *)arg;
607	ext_page_req = (void *)arg;
608	raid_act = (void *)arg;
609	mpt_page.vaddr = NULL;
610
611#ifdef __amd64__
612	/* Convert 32-bit structs to native ones. */
613	page_req32 = (void *)arg;
614	ext_page_req32 = (void *)arg;
615	raid_act32 = (void *)arg;
616	switch (cmd) {
617	case MPTIO_READ_CFG_HEADER32:
618	case MPTIO_READ_CFG_PAGE32:
619	case MPTIO_WRITE_CFG_PAGE32:
620		page_req = &page_req_swab;
621		page_req->header = page_req32->header;
622		page_req->page_address = page_req32->page_address;
623		page_req->buf = PTRIN(page_req32->buf);
624		page_req->len = page_req32->len;
625		page_req->ioc_status = page_req32->ioc_status;
626		break;
627	case MPTIO_READ_EXT_CFG_HEADER32:
628	case MPTIO_READ_EXT_CFG_PAGE32:
629		ext_page_req = &ext_page_req_swab;
630		ext_page_req->header = ext_page_req32->header;
631		ext_page_req->page_address = ext_page_req32->page_address;
632		ext_page_req->buf = PTRIN(ext_page_req32->buf);
633		ext_page_req->len = ext_page_req32->len;
634		ext_page_req->ioc_status = ext_page_req32->ioc_status;
635		break;
636	case MPTIO_RAID_ACTION32:
637		raid_act = &raid_act_swab;
638		raid_act->action = raid_act32->action;
639		raid_act->volume_bus = raid_act32->volume_bus;
640		raid_act->volume_id = raid_act32->volume_id;
641		raid_act->phys_disk_num = raid_act32->phys_disk_num;
642		raid_act->action_data_word = raid_act32->action_data_word;
643		raid_act->buf = PTRIN(raid_act32->buf);
644		raid_act->len = raid_act32->len;
645		raid_act->volume_status = raid_act32->volume_status;
646		bcopy(raid_act32->action_data, raid_act->action_data,
647		    sizeof(raid_act->action_data));
648		raid_act->action_status = raid_act32->action_status;
649		raid_act->ioc_status = raid_act32->ioc_status;
650		raid_act->write = raid_act32->write;
651		break;
652	}
653#endif
654
655	switch (cmd) {
656#ifdef __amd64__
657	case MPTIO_READ_CFG_HEADER32:
658#endif
659	case MPTIO_READ_CFG_HEADER:
660		MPT_LOCK(mpt);
661		error = mpt_user_read_cfg_header(mpt, page_req);
662		MPT_UNLOCK(mpt);
663		break;
664#ifdef __amd64__
665	case MPTIO_READ_CFG_PAGE32:
666#endif
667	case MPTIO_READ_CFG_PAGE:
668		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
669		if (error)
670			break;
671		error = copyin(page_req->buf, mpt_page.vaddr,
672		    sizeof(CONFIG_PAGE_HEADER));
673		if (error)
674			break;
675		MPT_LOCK(mpt);
676		error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
677		MPT_UNLOCK(mpt);
678		if (error)
679			break;
680		error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
681		break;
682#ifdef __amd64__
683	case MPTIO_READ_EXT_CFG_HEADER32:
684#endif
685	case MPTIO_READ_EXT_CFG_HEADER:
686		MPT_LOCK(mpt);
687		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
688		MPT_UNLOCK(mpt);
689		break;
690#ifdef __amd64__
691	case MPTIO_READ_EXT_CFG_PAGE32:
692#endif
693	case MPTIO_READ_EXT_CFG_PAGE:
694		error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
695		if (error)
696			break;
697		error = copyin(ext_page_req->buf, mpt_page.vaddr,
698		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
699		if (error)
700			break;
701		MPT_LOCK(mpt);
702		error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
703		MPT_UNLOCK(mpt);
704		if (error)
705			break;
706		error = copyout(mpt_page.vaddr, ext_page_req->buf,
707		    ext_page_req->len);
708		break;
709#ifdef __amd64__
710	case MPTIO_WRITE_CFG_PAGE32:
711#endif
712	case MPTIO_WRITE_CFG_PAGE:
713		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
714		if (error)
715			break;
716		error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
717		if (error)
718			break;
719		MPT_LOCK(mpt);
720		error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
721		MPT_UNLOCK(mpt);
722		break;
723#ifdef __amd64__
724	case MPTIO_RAID_ACTION32:
725#endif
726	case MPTIO_RAID_ACTION:
727		if (raid_act->buf != NULL) {
728			error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
729			if (error)
730				break;
731			error = copyin(raid_act->buf, mpt_page.vaddr,
732			    raid_act->len);
733			if (error)
734				break;
735		}
736		MPT_LOCK(mpt);
737		error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
738		MPT_UNLOCK(mpt);
739		if (error)
740			break;
741		if (raid_act->buf != NULL)
742			error = copyout(mpt_page.vaddr, raid_act->buf,
743			    raid_act->len);
744		break;
745	default:
746		error = ENOIOCTL;
747		break;
748	}
749
750	mpt_free_buffer(&mpt_page);
751
752	if (error)
753		return (error);
754
755#ifdef __amd64__
756	/* Convert native structs to 32-bit ones. */
757	switch (cmd) {
758	case MPTIO_READ_CFG_HEADER32:
759	case MPTIO_READ_CFG_PAGE32:
760	case MPTIO_WRITE_CFG_PAGE32:
761		page_req32->header = page_req->header;
762		page_req32->page_address = page_req->page_address;
763		page_req32->buf = PTROUT(page_req->buf);
764		page_req32->len = page_req->len;
765		page_req32->ioc_status = page_req->ioc_status;
766		break;
767	case MPTIO_READ_EXT_CFG_HEADER32:
768	case MPTIO_READ_EXT_CFG_PAGE32:
769		ext_page_req32->header = ext_page_req->header;
770		ext_page_req32->page_address = ext_page_req->page_address;
771		ext_page_req32->buf = PTROUT(ext_page_req->buf);
772		ext_page_req32->len = ext_page_req->len;
773		ext_page_req32->ioc_status = ext_page_req->ioc_status;
774		break;
775	case MPTIO_RAID_ACTION32:
776		raid_act32->action = raid_act->action;
777		raid_act32->volume_bus = raid_act->volume_bus;
778		raid_act32->volume_id = raid_act->volume_id;
779		raid_act32->phys_disk_num = raid_act->phys_disk_num;
780		raid_act32->action_data_word = raid_act->action_data_word;
781		raid_act32->buf = PTROUT(raid_act->buf);
782		raid_act32->len = raid_act->len;
783		raid_act32->volume_status = raid_act->volume_status;
784		bcopy(raid_act->action_data, raid_act32->action_data,
785		    sizeof(raid_act->action_data));
786		raid_act32->action_status = raid_act->action_status;
787		raid_act32->ioc_status = raid_act->ioc_status;
788		raid_act32->write = raid_act->write;
789		break;
790	}
791#endif
792
793	return (0);
794}
795