153537Sbrian/*-
280728Sbrian * Copyright (c) 2008 Yahoo!, Inc.
353537Sbrian * All rights reserved.
453537Sbrian * Written by: John Baldwin <jhb@FreeBSD.org>
553537Sbrian *
653537Sbrian * Redistribution and use in source and binary forms, with or without
753537Sbrian * modification, are permitted provided that the following conditions
853537Sbrian * are met:
953537Sbrian * 1. Redistributions of source code must retain the above copyright
1053537Sbrian *    notice, this list of conditions and the following disclaimer.
1153537Sbrian * 2. Redistributions in binary form must reproduce the above copyright
1253537Sbrian *    notice, this list of conditions and the following disclaimer in the
1353537Sbrian *    documentation and/or other materials provided with the distribution.
1453537Sbrian * 3. Neither the name of the author nor the names of any co-contributors
1553537Sbrian *    may be used to endorse or promote products derived from this software
1653537Sbrian *    without specific prior written permission.
1753537Sbrian *
1853537Sbrian * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1953537Sbrian * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2053537Sbrian * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2153537Sbrian * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2253537Sbrian * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2353537Sbrian * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2453537Sbrian * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2553537Sbrian * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2653537Sbrian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2753537Sbrian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2853537Sbrian * SUCH DAMAGE.
2953537Sbrian *
3079529Sru * LSI MPT-Fusion Host Adapter FreeBSD userland interface
3153537Sbrian */
3253537Sbrian
3353537Sbrian#include <sys/cdefs.h>
3453537Sbrian__FBSDID("$FreeBSD: stable/11/sys/dev/mpt/mpt_user.c 360451 2020-04-28 20:14:38Z brooks $");
3568949Sru
3670227Sru#include <sys/param.h>
3753609Sbrian#ifdef __amd64__
3853537Sbrian#include <sys/abi_compat.h>
3981251Sru#endif
4066602Sbrian#include <sys/conf.h>
4153537Sbrian#include <sys/errno.h>
4253537Sbrian#include <sys/ioccom.h>
4353537Sbrian#include <sys/mpt_ioctl.h>
4453537Sbrian
4553537Sbrian#include <dev/mpt/mpt.h>
4699500Scharnier
4753537Sbrianstruct mpt_user_raid_action_result {
4853537Sbrian	uint32_t	volume_status;
4986783Sru	uint32_t	action_data[4];
5086783Sru	uint16_t	action_status;
5180728Sbrian};
5280728Sbrian
5353537Sbrianstruct mpt_page_memory {
5480728Sbrian	bus_dma_tag_t	tag;
5580728Sbrian	bus_dmamap_t	map;
5677760Ssobomax	bus_addr_t	paddr;
5753537Sbrian	void		*vaddr;
5853537Sbrian};
5999500Scharnier
6053537Sbrianstatic mpt_probe_handler_t	mpt_user_probe;
6199500Scharnierstatic mpt_attach_handler_t	mpt_user_attach;
6253537Sbrianstatic mpt_enable_handler_t	mpt_user_enable;
6353537Sbrianstatic mpt_ready_handler_t	mpt_user_ready;
6453537Sbrianstatic mpt_event_handler_t	mpt_user_event;
6553537Sbrianstatic mpt_reset_handler_t	mpt_user_reset;
6686783Srustatic mpt_detach_handler_t	mpt_user_detach;
6753537Sbrian
6853537Sbrianstatic struct mpt_personality mpt_user_personality = {
6953537Sbrian	.name		= "mpt_user",
7053537Sbrian	.probe		= mpt_user_probe,
7180728Sbrian	.attach		= mpt_user_attach,
7280728Sbrian	.enable		= mpt_user_enable,
7353537Sbrian	.ready		= mpt_user_ready,
7453537Sbrian	.event		= mpt_user_event,
7553537Sbrian	.reset		= mpt_user_reset,
7653537Sbrian	.detach		= mpt_user_detach,
7753537Sbrian};
7886783Sru
7980728SbrianDECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
8080728Sbrian
8181251Srustatic mpt_reply_handler_t	mpt_user_reply_handler;
8281251Sru
8353537Sbrianstatic d_open_t		mpt_open;
8453537Sbrianstatic d_close_t	mpt_close;
8580728Sbrianstatic d_ioctl_t	mpt_ioctl;
8686783Sru
8786783Srustatic struct cdevsw mpt_cdevsw = {
8886783Sru	.d_version =	D_VERSION,
8979754Sdd	.d_flags =	0,
9053537Sbrian	.d_open =	mpt_open,
9153537Sbrian	.d_close =	mpt_close,
9286783Sru	.d_ioctl =	mpt_ioctl,
9353537Sbrian	.d_name =	"mpt",
9480728Sbrian};
9580728Sbrian
9680728Sbrianstatic MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
9780728Sbrian
9880728Sbrianstatic uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
9980728Sbrian
10053537Sbrianstatic int
10180728Sbrianmpt_user_probe(struct mpt_softc *mpt)
10280728Sbrian{
10353537Sbrian
10480728Sbrian	/* Attach to every controller. */
10580728Sbrian	return (0);
10680728Sbrian}
10780728Sbrian
10853537Sbrianstatic int
10953537Sbrianmpt_user_attach(struct mpt_softc *mpt)
11053537Sbrian{
11181251Sru	mpt_handler_t handler;
11281251Sru	int error, unit;
11353537Sbrian
11453537Sbrian	MPT_LOCK(mpt);
11590975Sbrian	handler.reply_handler = mpt_user_reply_handler;
11686783Sru	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
11790975Sbrian				     &user_handler_id);
11890975Sbrian	MPT_UNLOCK(mpt);
11990975Sbrian	if (error != 0) {
12090975Sbrian		mpt_prt(mpt, "Unable to register user handler!\n");
12186756Sbrian		return (error);
12253537Sbrian	}
12353537Sbrian	unit = device_get_unit(mpt->dev);
12453537Sbrian	mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
12553537Sbrian	    "mpt%d", unit);
12653537Sbrian	if (mpt->cdev == NULL) {
12753537Sbrian		MPT_LOCK(mpt);
12853537Sbrian		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
12986783Sru		    user_handler_id);
13053537Sbrian		MPT_UNLOCK(mpt);
13153537Sbrian		return (ENOMEM);
13253537Sbrian	}
13353537Sbrian	mpt->cdev->si_drv1 = mpt;
13480728Sbrian	return (0);
13580728Sbrian}
13653537Sbrian
13786783Srustatic int
13853537Sbrianmpt_user_enable(struct mpt_softc *mpt)
13953537Sbrian{
14080728Sbrian
14180728Sbrian	return (0);
14253537Sbrian}
14386783Sru
14453537Sbrianstatic void
14553537Sbrianmpt_user_ready(struct mpt_softc *mpt)
14653537Sbrian{
14753537Sbrian
14853537Sbrian}
14986783Sru
15053537Sbrianstatic int
15153537Sbrianmpt_user_event(struct mpt_softc *mpt, request_t *req,
15266602Sbrian    MSG_EVENT_NOTIFY_REPLY *msg)
15366602Sbrian{
15486783Sru
15566602Sbrian	/* Someday we may want to let a user daemon listen for events? */
15666602Sbrian	return (0);
15766602Sbrian}
15853609Sbrian
15953609Sbrianstatic void
16053609Sbrianmpt_user_reset(struct mpt_softc *mpt, int type)
16153609Sbrian{
16253609Sbrian
16353609Sbrian}
16453537Sbrian
16553537Sbrianstatic void
16653537Sbrianmpt_user_detach(struct mpt_softc *mpt)
16753537Sbrian{
16853537Sbrian	mpt_handler_t handler;
16953537Sbrian
17053537Sbrian	/* XXX: do a purge of pending requests? */
17180728Sbrian	destroy_dev(mpt->cdev);
17280728Sbrian
17353537Sbrian	MPT_LOCK(mpt);
17486783Sru	handler.reply_handler = mpt_user_reply_handler;
17553537Sbrian	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
176141851Sru	    user_handler_id);
17753537Sbrian	MPT_UNLOCK(mpt);
17886783Sru}
17953537Sbrian
18066602Sbrianstatic int
18166602Sbrianmpt_open(struct cdev *dev, int flags, int fmt, struct thread *td)
18266602Sbrian{
18366602Sbrian
18453537Sbrian	return (0);
18553537Sbrian}
18653537Sbrian
18786783Srustatic int
18886783Srumpt_close(struct cdev *dev, int flags, int fmt, struct thread *td)
18986783Sru{
19086783Sru
19153537Sbrian	return (0);
19253537Sbrian}
19353537Sbrian
19453537Sbrianstatic int
19586783Srumpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
19653537Sbrian    size_t len)
19766602Sbrian{
19853537Sbrian	struct mpt_map_info mi;
19977760Ssobomax	int error;
20077760Ssobomax
20177760Ssobomax	page_mem->vaddr = NULL;
20279211Sru
20353537Sbrian	/* Limit requests to 16M. */
20453537Sbrian	if (len > 16 * 1024 * 1024)
205140414Sru		return (ENOSPC);
206140414Sru	error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
207140414Sru	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
208140414Sru	    len, 1, len, 0, &page_mem->tag);
209140414Sru	if (error)
210140414Sru		return (error);
211140414Sru	error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
21253537Sbrian	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
21353537Sbrian	if (error) {
21453537Sbrian		bus_dma_tag_destroy(page_mem->tag);
21580728Sbrian		return (error);
21680728Sbrian	}
21753537Sbrian	mi.mpt = mpt;
21880728Sbrian	error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
21980728Sbrian	    len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
220	if (error == 0)
221		error = mi.error;
222	if (error) {
223		bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
224		bus_dma_tag_destroy(page_mem->tag);
225		page_mem->vaddr = NULL;
226		return (error);
227	}
228	page_mem->paddr = mi.phys;
229	return (0);
230}
231
232static void
233mpt_free_buffer(struct mpt_page_memory *page_mem)
234{
235
236	if (page_mem->vaddr == NULL)
237		return;
238	bus_dmamap_unload(page_mem->tag, page_mem->map);
239	bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
240	bus_dma_tag_destroy(page_mem->tag);
241	page_mem->vaddr = NULL;
242}
243
244static int
245mpt_user_read_cfg_header(struct mpt_softc *mpt,
246    struct mpt_cfg_page_req *page_req)
247{
248	request_t  *req;
249	cfgparms_t params;
250	MSG_CONFIG *cfgp;
251	int	    error;
252
253	req = mpt_get_request(mpt, TRUE);
254	if (req == NULL) {
255		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
256		return (ENOMEM);
257	}
258
259	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
260	params.PageVersion = 0;
261	params.PageLength = 0;
262	params.PageNumber = page_req->header.PageNumber;
263	params.PageType = page_req->header.PageType;
264	params.PageAddress = le32toh(page_req->page_address);
265	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
266				  TRUE, 5000);
267	if (error != 0) {
268		/*
269		 * Leave the request. Without resetting the chip, it's
270		 * still owned by it and we'll just get into trouble
271		 * freeing it now. Mark it as abandoned so that if it
272		 * shows up later it can be freed.
273		 */
274		mpt_prt(mpt, "read_cfg_header timed out\n");
275		return (ETIMEDOUT);
276	}
277
278	page_req->ioc_status = htole16(req->IOCStatus);
279	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
280		cfgp = req->req_vbuf;
281		bcopy(&cfgp->Header, &page_req->header,
282		    sizeof(page_req->header));
283	}
284	mpt_free_request(mpt, req);
285	return (0);
286}
287
288static int
289mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
290    struct mpt_page_memory *mpt_page)
291{
292	CONFIG_PAGE_HEADER *hdr;
293	request_t    *req;
294	cfgparms_t    params;
295	int	      error;
296
297	req = mpt_get_request(mpt, TRUE);
298	if (req == NULL) {
299		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
300		return (ENOMEM);
301	}
302
303	hdr = mpt_page->vaddr;
304	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
305	params.PageVersion = hdr->PageVersion;
306	params.PageLength = hdr->PageLength;
307	params.PageNumber = hdr->PageNumber;
308	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
309	params.PageAddress = le32toh(page_req->page_address);
310	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
311	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
312	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
313	    le32toh(page_req->len), TRUE, 5000);
314	if (error != 0) {
315		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
316		return (ETIMEDOUT);
317	}
318
319	page_req->ioc_status = htole16(req->IOCStatus);
320	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
321		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
322		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
323	mpt_free_request(mpt, req);
324	return (0);
325}
326
327static int
328mpt_user_read_extcfg_header(struct mpt_softc *mpt,
329    struct mpt_ext_cfg_page_req *ext_page_req)
330{
331	request_t  *req;
332	cfgparms_t params;
333	MSG_CONFIG_REPLY *cfgp;
334	int	    error;
335
336	req = mpt_get_request(mpt, TRUE);
337	if (req == NULL) {
338		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
339		return (ENOMEM);
340	}
341
342	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
343	params.PageVersion = ext_page_req->header.PageVersion;
344	params.PageLength = 0;
345	params.PageNumber = ext_page_req->header.PageNumber;
346	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
347	params.PageAddress = le32toh(ext_page_req->page_address);
348	params.ExtPageType = ext_page_req->header.ExtPageType;
349	params.ExtPageLength = 0;
350	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
351				  TRUE, 5000);
352	if (error != 0) {
353		/*
354		 * Leave the request. Without resetting the chip, it's
355		 * still owned by it and we'll just get into trouble
356		 * freeing it now. Mark it as abandoned so that if it
357		 * shows up later it can be freed.
358		 */
359		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
360		return (ETIMEDOUT);
361	}
362
363	ext_page_req->ioc_status = htole16(req->IOCStatus);
364	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
365		cfgp = req->req_vbuf;
366		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
367		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
368		ext_page_req->header.PageType = cfgp->Header.PageType;
369		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
370		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
371	}
372	mpt_free_request(mpt, req);
373	return (0);
374}
375
376static int
377mpt_user_read_extcfg_page(struct mpt_softc *mpt,
378    struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
379{
380	CONFIG_EXTENDED_PAGE_HEADER *hdr;
381	request_t    *req;
382	cfgparms_t    params;
383	int	      error;
384
385	req = mpt_get_request(mpt, TRUE);
386	if (req == NULL) {
387		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
388		return (ENOMEM);
389	}
390
391	hdr = mpt_page->vaddr;
392	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
393	params.PageVersion = hdr->PageVersion;
394	params.PageLength = 0;
395	params.PageNumber = hdr->PageNumber;
396	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
397	params.PageAddress = le32toh(ext_page_req->page_address);
398	params.ExtPageType = hdr->ExtPageType;
399	params.ExtPageLength = hdr->ExtPageLength;
400	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
401	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
402	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
403	    le32toh(ext_page_req->len), TRUE, 5000);
404	if (error != 0) {
405		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
406		return (ETIMEDOUT);
407	}
408
409	ext_page_req->ioc_status = htole16(req->IOCStatus);
410	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
411		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
412		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
413	mpt_free_request(mpt, req);
414	return (0);
415}
416
417static int
418mpt_user_write_cfg_page(struct mpt_softc *mpt,
419    struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
420{
421	CONFIG_PAGE_HEADER *hdr;
422	request_t    *req;
423	cfgparms_t    params;
424	u_int	      hdr_attr;
425	int	      error;
426
427	hdr = mpt_page->vaddr;
428	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
429	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
430	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
431		mpt_prt(mpt, "page type 0x%x not changeable\n",
432			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
433		return (EINVAL);
434	}
435
436#if	0
437	/*
438	 * We shouldn't mask off other bits here.
439	 */
440	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
441#endif
442
443	req = mpt_get_request(mpt, TRUE);
444	if (req == NULL)
445		return (ENOMEM);
446
447	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
448	    BUS_DMASYNC_PREWRITE);
449
450	/*
451	 * There isn't any point in restoring stripped out attributes
452	 * if you then mask them going down to issue the request.
453	 */
454
455	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
456	params.PageVersion = hdr->PageVersion;
457	params.PageLength = hdr->PageLength;
458	params.PageNumber = hdr->PageNumber;
459	params.PageAddress = le32toh(page_req->page_address);
460#if	0
461	/* Restore stripped out attributes */
462	hdr->PageType |= hdr_attr;
463	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
464#else
465	params.PageType = hdr->PageType;
466#endif
467	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
468	    le32toh(page_req->len), TRUE, 5000);
469	if (error != 0) {
470		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
471		return (ETIMEDOUT);
472	}
473
474	page_req->ioc_status = htole16(req->IOCStatus);
475	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
476	    BUS_DMASYNC_POSTWRITE);
477	mpt_free_request(mpt, req);
478	return (0);
479}
480
481static int
482mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
483    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
484{
485	MSG_RAID_ACTION_REPLY *reply;
486	struct mpt_user_raid_action_result *res;
487
488	if (req == NULL)
489		return (TRUE);
490
491	if (reply_frame != NULL) {
492		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
493		req->IOCStatus = le16toh(reply->IOCStatus);
494		res = (struct mpt_user_raid_action_result *)
495		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
496		res->action_status = reply->ActionStatus;
497		res->volume_status = reply->VolumeStatus;
498		bcopy(&reply->ActionData, res->action_data,
499		    sizeof(res->action_data));
500	}
501
502	req->state &= ~REQ_STATE_QUEUED;
503	req->state |= REQ_STATE_DONE;
504	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
505
506	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
507		wakeup(req);
508	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
509		/*
510		 * Whew- we can free this request (late completion)
511		 */
512		mpt_free_request(mpt, req);
513	}
514
515	return (TRUE);
516}
517
518/*
519 * We use the first part of the request buffer after the request frame
520 * to hold the action data and action status from the RAID reply.  The
521 * rest of the request buffer is used to hold the buffer for the
522 * action SGE.
523 */
524static int
525mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
526	struct mpt_page_memory *mpt_page)
527{
528	request_t *req;
529	struct mpt_user_raid_action_result *res;
530	MSG_RAID_ACTION_REQUEST *rap;
531	SGE_SIMPLE32 *se;
532	int error;
533
534	req = mpt_get_request(mpt, TRUE);
535	if (req == NULL)
536		return (ENOMEM);
537	rap = req->req_vbuf;
538	memset(rap, 0, sizeof *rap);
539	rap->Action = raid_act->action;
540	rap->ActionDataWord = raid_act->action_data_word;
541	rap->Function = MPI_FUNCTION_RAID_ACTION;
542	rap->VolumeID = raid_act->volume_id;
543	rap->VolumeBus = raid_act->volume_bus;
544	rap->PhysDiskNum = raid_act->phys_disk_num;
545	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
546	if (mpt_page->vaddr != NULL && raid_act->len != 0) {
547		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
548		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
549		se->Address = htole32(mpt_page->paddr);
550		MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
551		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
552		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
553		    MPI_SGE_FLAGS_END_OF_LIST |
554		    (raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
555		    MPI_SGE_FLAGS_IOC_TO_HOST)));
556	}
557	se->FlagsLength = htole32(se->FlagsLength);
558	rap->MsgContext = htole32(req->index | user_handler_id);
559
560	mpt_check_doorbell(mpt);
561	mpt_send_cmd(mpt, req);
562
563	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
564	    2000);
565	if (error != 0) {
566		/*
567		 * Leave request so it can be cleaned up later.
568		 */
569		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
570		return (error);
571	}
572
573	raid_act->ioc_status = htole16(req->IOCStatus);
574	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
575		mpt_free_request(mpt, req);
576		return (0);
577	}
578
579	res = (struct mpt_user_raid_action_result *)
580	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
581	raid_act->volume_status = res->volume_status;
582	raid_act->action_status = res->action_status;
583	bcopy(res->action_data, raid_act->action_data,
584	    sizeof(res->action_data));
585	if (mpt_page->vaddr != NULL)
586		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
587		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
588	mpt_free_request(mpt, req);
589	return (0);
590}
591
592static int
593mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
594{
595	struct mpt_softc *mpt;
596	struct mpt_cfg_page_req *page_req;
597	struct mpt_ext_cfg_page_req *ext_page_req;
598	struct mpt_raid_action *raid_act;
599	struct mpt_page_memory mpt_page;
600#ifdef __amd64__
601	struct mpt_cfg_page_req32 *page_req32;
602	struct mpt_cfg_page_req page_req_swab;
603	struct mpt_ext_cfg_page_req32 *ext_page_req32;
604	struct mpt_ext_cfg_page_req ext_page_req_swab;
605	struct mpt_raid_action32 *raid_act32;
606	struct mpt_raid_action raid_act_swab;
607#endif
608	int error;
609
610	mpt = dev->si_drv1;
611	page_req = (void *)arg;
612	ext_page_req = (void *)arg;
613	raid_act = (void *)arg;
614	mpt_page.vaddr = NULL;
615
616#ifdef __amd64__
617	/* Convert 32-bit structs to native ones. */
618	page_req32 = (void *)arg;
619	ext_page_req32 = (void *)arg;
620	raid_act32 = (void *)arg;
621	switch (cmd) {
622	case MPTIO_READ_CFG_HEADER32:
623	case MPTIO_READ_CFG_PAGE32:
624	case MPTIO_WRITE_CFG_PAGE32:
625		page_req = &page_req_swab;
626		page_req->header = page_req32->header;
627		page_req->page_address = page_req32->page_address;
628		page_req->buf = PTRIN(page_req32->buf);
629		page_req->len = page_req32->len;
630		page_req->ioc_status = page_req32->ioc_status;
631		break;
632	case MPTIO_READ_EXT_CFG_HEADER32:
633	case MPTIO_READ_EXT_CFG_PAGE32:
634		ext_page_req = &ext_page_req_swab;
635		ext_page_req->header = ext_page_req32->header;
636		ext_page_req->page_address = ext_page_req32->page_address;
637		ext_page_req->buf = PTRIN(ext_page_req32->buf);
638		ext_page_req->len = ext_page_req32->len;
639		ext_page_req->ioc_status = ext_page_req32->ioc_status;
640		break;
641	case MPTIO_RAID_ACTION32:
642		raid_act = &raid_act_swab;
643		raid_act->action = raid_act32->action;
644		raid_act->volume_bus = raid_act32->volume_bus;
645		raid_act->volume_id = raid_act32->volume_id;
646		raid_act->phys_disk_num = raid_act32->phys_disk_num;
647		raid_act->action_data_word = raid_act32->action_data_word;
648		raid_act->buf = PTRIN(raid_act32->buf);
649		raid_act->len = raid_act32->len;
650		raid_act->volume_status = raid_act32->volume_status;
651		bcopy(raid_act32->action_data, raid_act->action_data,
652		    sizeof(raid_act->action_data));
653		raid_act->action_status = raid_act32->action_status;
654		raid_act->ioc_status = raid_act32->ioc_status;
655		raid_act->write = raid_act32->write;
656		break;
657	}
658#endif
659
660	switch (cmd) {
661#ifdef __amd64__
662	case MPTIO_READ_CFG_HEADER32:
663#endif
664	case MPTIO_READ_CFG_HEADER:
665		MPT_LOCK(mpt);
666		error = mpt_user_read_cfg_header(mpt, page_req);
667		MPT_UNLOCK(mpt);
668		break;
669#ifdef __amd64__
670	case MPTIO_READ_CFG_PAGE32:
671#endif
672	case MPTIO_READ_CFG_PAGE:
673		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
674		if (error)
675			break;
676		error = copyin(page_req->buf, mpt_page.vaddr,
677		    sizeof(CONFIG_PAGE_HEADER));
678		if (error)
679			break;
680		MPT_LOCK(mpt);
681		error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
682		MPT_UNLOCK(mpt);
683		if (error)
684			break;
685		error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
686		break;
687#ifdef __amd64__
688	case MPTIO_READ_EXT_CFG_HEADER32:
689#endif
690	case MPTIO_READ_EXT_CFG_HEADER:
691		MPT_LOCK(mpt);
692		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
693		MPT_UNLOCK(mpt);
694		break;
695#ifdef __amd64__
696	case MPTIO_READ_EXT_CFG_PAGE32:
697#endif
698	case MPTIO_READ_EXT_CFG_PAGE:
699		error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
700		if (error)
701			break;
702		error = copyin(ext_page_req->buf, mpt_page.vaddr,
703		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
704		if (error)
705			break;
706		MPT_LOCK(mpt);
707		error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
708		MPT_UNLOCK(mpt);
709		if (error)
710			break;
711		error = copyout(mpt_page.vaddr, ext_page_req->buf,
712		    ext_page_req->len);
713		break;
714#ifdef __amd64__
715	case MPTIO_WRITE_CFG_PAGE32:
716#endif
717	case MPTIO_WRITE_CFG_PAGE:
718		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
719		if (error)
720			break;
721		error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
722		if (error)
723			break;
724		MPT_LOCK(mpt);
725		error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
726		MPT_UNLOCK(mpt);
727		break;
728#ifdef __amd64__
729	case MPTIO_RAID_ACTION32:
730#endif
731	case MPTIO_RAID_ACTION:
732		if (raid_act->buf != NULL) {
733			error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
734			if (error)
735				break;
736			error = copyin(raid_act->buf, mpt_page.vaddr,
737			    raid_act->len);
738			if (error)
739				break;
740		}
741		MPT_LOCK(mpt);
742		error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
743		MPT_UNLOCK(mpt);
744		if (error)
745			break;
746		if (raid_act->buf != NULL)
747			error = copyout(mpt_page.vaddr, raid_act->buf,
748			    raid_act->len);
749		break;
750	default:
751		error = ENOIOCTL;
752		break;
753	}
754
755	mpt_free_buffer(&mpt_page);
756
757	if (error)
758		return (error);
759
760#ifdef __amd64__
761	/* Convert native structs to 32-bit ones. */
762	switch (cmd) {
763	case MPTIO_READ_CFG_HEADER32:
764	case MPTIO_READ_CFG_PAGE32:
765	case MPTIO_WRITE_CFG_PAGE32:
766		page_req32->header = page_req->header;
767		page_req32->page_address = page_req->page_address;
768		page_req32->buf = PTROUT(page_req->buf);
769		page_req32->len = page_req->len;
770		page_req32->ioc_status = page_req->ioc_status;
771		break;
772	case MPTIO_READ_EXT_CFG_HEADER32:
773	case MPTIO_READ_EXT_CFG_PAGE32:
774		ext_page_req32->header = ext_page_req->header;
775		ext_page_req32->page_address = ext_page_req->page_address;
776		ext_page_req32->buf = PTROUT(ext_page_req->buf);
777		ext_page_req32->len = ext_page_req->len;
778		ext_page_req32->ioc_status = ext_page_req->ioc_status;
779		break;
780	case MPTIO_RAID_ACTION32:
781		raid_act32->action = raid_act->action;
782		raid_act32->volume_bus = raid_act->volume_bus;
783		raid_act32->volume_id = raid_act->volume_id;
784		raid_act32->phys_disk_num = raid_act->phys_disk_num;
785		raid_act32->action_data_word = raid_act->action_data_word;
786		raid_act32->buf = PTROUT(raid_act->buf);
787		raid_act32->len = raid_act->len;
788		raid_act32->volume_status = raid_act->volume_status;
789		bcopy(raid_act->action_data, raid_act32->action_data,
790		    sizeof(raid_act->action_data));
791		raid_act32->action_status = raid_act->action_status;
792		raid_act32->ioc_status = raid_act->ioc_status;
793		raid_act32->write = raid_act->write;
794		break;
795	}
796#endif
797
798	return (0);
799}
800