1/*	$NetBSD: ips.c,v 1.7 2024/01/08 18:38:25 chs Exp $	*/
2/*	$OpenBSD: ips.c,v 1.113 2016/08/14 04:08:03 dlg Exp $	*/
3
4/*-
5 * Copyright (c) 2017 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
32 *
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 */
45
46/*
47 * IBM (Adaptec) ServeRAID controllers driver.
48 */
49
50#include <sys/cdefs.h>
51__KERNEL_RCSID(0, "$NetBSD: ips.c,v 1.7 2024/01/08 18:38:25 chs Exp $");
52
53#include "bio.h"
54
55#include <sys/param.h>
56#include <sys/systm.h>
57#include <sys/device.h>
58#include <sys/kernel.h>
59#include <sys/queue.h>
60#include <sys/buf.h>
61#include <sys/endian.h>
62#include <sys/conf.h>
63#include <sys/malloc.h>
64#include <sys/ioctl.h>
65#include <sys/kthread.h>
66
67#include <sys/bus.h>
68#include <sys/intr.h>
69
70#include <dev/scsipi/scsi_all.h>
71#include <dev/scsipi/scsipi_all.h>
72#include <dev/scsipi/scsi_disk.h>
73#include <dev/scsipi/scsipi_disk.h>
74#include <dev/scsipi/scsiconf.h>
75
76#include <dev/biovar.h>
77#include <dev/sysmon/sysmonvar.h>
78#include <sys/envsys.h>
79
80#include <dev/pci/pcireg.h>
81#include <dev/pci/pcivar.h>
82#include <dev/pci/pcidevs.h>
83
84/* Debug levels */
85#define IPS_D_ERR	0x0001	/* errors */
86#define IPS_D_INFO	0x0002	/* information */
87#define IPS_D_XFER	0x0004	/* transfers */
88
89#ifdef IPS_DEBUG
90#define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
91int ips_debug = IPS_D_ERR;
92#else
93#define DPRINTF(a, b)
94#endif
95
96#define IPS_MAXDRIVES		8
97#define IPS_MAXCHANS		4
98#define IPS_MAXTARGETS		16
99#define IPS_MAXCHUNKS		16
100#define IPS_MAXCMDS		128
101
102#define IPS_MAXFER		(64 * 1024)
103#define IPS_MAXSGS		16
104#define IPS_MAXCDB		12
105
106#define IPS_SECSZ		512
107#define IPS_NVRAMPGSZ		128
108#define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
109
110#define	IPS_TIMEOUT		60000	/* ms */
111
112/* Command codes */
113#define IPS_CMD_READ		0x02
114#define IPS_CMD_WRITE		0x03
115#define IPS_CMD_DCDB		0x04
116#define IPS_CMD_GETADAPTERINFO	0x05
117#define IPS_CMD_FLUSH		0x0a
118#define IPS_CMD_REBUILDSTATUS	0x0c
119#define IPS_CMD_SETSTATE	0x10
120#define IPS_CMD_REBUILD		0x16
121#define IPS_CMD_ERRORTABLE	0x17
122#define IPS_CMD_GETDRIVEINFO	0x19
123#define IPS_CMD_RESETCHAN	0x1a
124#define IPS_CMD_DOWNLOAD	0x20
125#define IPS_CMD_RWBIOSFW	0x22
126#define IPS_CMD_READCONF	0x38
127#define IPS_CMD_GETSUBSYS	0x40
128#define IPS_CMD_CONFIGSYNC	0x58
129#define IPS_CMD_READ_SG		0x82
130#define IPS_CMD_WRITE_SG	0x83
131#define IPS_CMD_DCDB_SG		0x84
132#define IPS_CMD_EDCDB		0x95
133#define IPS_CMD_EDCDB_SG	0x96
134#define IPS_CMD_RWNVRAMPAGE	0xbc
135#define IPS_CMD_GETVERINFO	0xc6
136#define IPS_CMD_FFDC		0xd7
137#define IPS_CMD_SG		0x80
138#define IPS_CMD_RWNVRAM		0xbc
139
140/* DCDB attributes */
141#define IPS_DCDB_DATAIN		0x01	/* data input */
142#define IPS_DCDB_DATAOUT	0x02	/* data output */
143#define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
144#define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
145#define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
146#define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
147#define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
148#define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
149
150/* Register definitions */
151#define IPS_REG_HIS		0x08	/* host interrupt status */
152#define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
153#define IPS_REG_HIS_EN			0x80	/* enable interrupts */
154#define IPS_REG_CCSA		0x10	/* command channel system address */
155#define IPS_REG_CCC		0x14	/* command channel control */
156#define IPS_REG_CCC_SEM			0x0008	/* semaphore */
157#define IPS_REG_CCC_START		0x101a	/* start command */
158#define IPS_REG_SQH		0x20	/* status queue head */
159#define IPS_REG_SQT		0x24	/* status queue tail */
160#define IPS_REG_SQE		0x28	/* status queue end */
161#define IPS_REG_SQS		0x2c	/* status queue start */
162
163#define IPS_REG_OIS		0x30	/* outbound interrupt status */
164#define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
165#define IPS_REG_OIM		0x34	/* outbound interrupt mask */
166#define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
167#define IPS_REG_IQP		0x40	/* inbound queue port */
168#define IPS_REG_OQP		0x44	/* outbound queue port */
169
170/* Status word fields */
171#define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
172#define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
173#define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
174#define IPS_STAT_GSC(x)		((x) & 0x0f)
175
176/* Basic status codes */
177#define IPS_STAT_OK		0x00	/* success */
178#define IPS_STAT_RECOV		0x01	/* recovered error */
179#define IPS_STAT_INVOP		0x03	/* invalid opcode */
180#define IPS_STAT_INVCMD		0x04	/* invalid command block */
181#define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
182#define IPS_STAT_BUSY		0x08	/* busy */
183#define IPS_STAT_CMPLERR	0x0c	/* completed with error */
184#define IPS_STAT_LDERR		0x0d	/* logical drive error */
185#define IPS_STAT_TIMO		0x0e	/* timeout */
186#define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
187
188/* Extended status codes */
189#define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
190#define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
191#define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
192#define IPS_ESTAT_DEVRST	0xf8	/* device reset */
193#define IPS_ESTAT_RECOV		0xfc	/* recovered error */
194#define IPS_ESTAT_CKCOND	0xff	/* check condition */
195
196#define IPS_IOSIZE		128	/* max space size to map */
197
198/* Command frame */
199struct ips_cmd {
200	u_int8_t	code;
201	u_int8_t	id;
202	u_int8_t	drive;
203	u_int8_t	sgcnt;
204	u_int32_t	lba;
205	u_int32_t	sgaddr;
206	u_int16_t	seccnt;
207	u_int8_t	seg4g;
208	u_int8_t	esg;
209	u_int32_t	ccsar;
210	u_int32_t	cccr;
211};
212
213/* Direct CDB (SCSI pass-through) frame */
214struct ips_dcdb {
215	u_int8_t	device;
216	u_int8_t	attr;
217	u_int16_t	datalen;
218	u_int32_t	sgaddr;
219	u_int8_t	cdblen;
220	u_int8_t	senselen;
221	u_int8_t	sgcnt;
222	u_int8_t	__reserved1;
223	u_int8_t	cdb[IPS_MAXCDB];
224	u_int8_t	sense[64];
225	u_int8_t	status;
226	u_int8_t	__reserved2[3];
227};
228
229/* Scatter-gather array element */
230struct ips_sg {
231	u_int32_t	addr;
232	u_int32_t	size;
233};
234
235/* Command block */
236struct ips_cmdb {
237	struct ips_cmd	cmd;
238	struct ips_dcdb	dcdb;
239	struct ips_sg	sg[IPS_MAXSGS];
240};
241
242/* Data frames */
243struct ips_adapterinfo {
244	u_int8_t	drivecnt;
245	u_int8_t	miscflag;
246	u_int8_t	sltflag;
247	u_int8_t	bstflag;
248	u_int8_t	pwrchgcnt;
249	u_int8_t	wrongaddrcnt;
250	u_int8_t	unidentcnt;
251	u_int8_t	nvramdevchgcnt;
252	u_int8_t	firmware[8];
253	u_int8_t	bios[8];
254	u_int32_t	drivesize[IPS_MAXDRIVES];
255	u_int8_t	cmdcnt;
256	u_int8_t	maxphysdevs;
257	u_int16_t	flashrepgmcnt;
258	u_int8_t	defunctdiskcnt;
259	u_int8_t	rebuildflag;
260	u_int8_t	offdrivecnt;
261	u_int8_t	critdrivecnt;
262	u_int16_t	confupdcnt;
263	u_int8_t	blkflag;
264	u_int8_t	__reserved;
265	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
266};
267
268struct ips_driveinfo {
269	u_int8_t	drivecnt;
270	u_int8_t	__reserved[3];
271	struct ips_drive {
272		u_int8_t	id;
273		u_int8_t	__reserved;
274		u_int8_t	raid;
275		u_int8_t	state;
276#define IPS_DS_FREE	0x00
277#define IPS_DS_OFFLINE	0x02
278#define IPS_DS_ONLINE	0x03
279#define IPS_DS_DEGRADED	0x04
280#define IPS_DS_SYS	0x06
281#define IPS_DS_CRS	0x24
282
283		u_int32_t	seccnt;
284	}		drive[IPS_MAXDRIVES];
285};
286
287struct ips_conf {
288	u_int8_t	ldcnt;
289	u_int8_t	day;
290	u_int8_t	month;
291	u_int8_t	year;
292	u_int8_t	initid[4];
293	u_int8_t	hostid[12];
294	u_int8_t	time[8];
295	u_int32_t	useropt;
296	u_int16_t	userfield;
297	u_int8_t	rebuildrate;
298	u_int8_t	__reserved1;
299
300	struct ips_hw {
301		u_int8_t	board[8];
302		u_int8_t	cpu[8];
303		u_int8_t	nchantype;
304		u_int8_t	nhostinttype;
305		u_int8_t	compression;
306		u_int8_t	nvramtype;
307		u_int32_t	nvramsize;
308	}		hw;
309
310	struct ips_ld {
311		u_int16_t	userfield;
312		u_int8_t	state;
313		u_int8_t	raidcacheparam;
314		u_int8_t	chunkcnt;
315		u_int8_t	stripesize;
316		u_int8_t	params;
317		u_int8_t	__reserved;
318		u_int32_t	size;
319
320		struct ips_chunk {
321			u_int8_t	channel;
322			u_int8_t	target;
323			u_int16_t	__reserved;
324			u_int32_t	startsec;
325			u_int32_t	seccnt;
326		}		chunk[IPS_MAXCHUNKS];
327	}		ld[IPS_MAXDRIVES];
328
329	struct ips_dev {
330		u_int8_t	initiator;
331		u_int8_t	params;
332		u_int8_t	miscflag;
333		u_int8_t	state;
334#define IPS_DVS_STANDBY	0x01
335#define IPS_DVS_REBUILD	0x02
336#define IPS_DVS_SPARE	0x04
337#define IPS_DVS_MEMBER	0x08
338#define IPS_DVS_ONLINE	0x80
339#define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
340
341		u_int32_t	seccnt;
342		u_int8_t	devid[28];
343	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
344
345	u_int8_t	reserved[512];
346};
347
348struct ips_rblstat {
349	u_int8_t	__unknown[20];
350	struct {
351		u_int8_t	__unknown[4];
352		u_int32_t	total;
353		u_int32_t	remain;
354	}		ld[IPS_MAXDRIVES];
355};
356
357struct ips_pg5 {
358	u_int32_t	signature;
359	u_int8_t	__reserved1;
360	u_int8_t	slot;
361	u_int16_t	type;
362	u_int8_t	bioshi[4];
363	u_int8_t	bioslo[4];
364	u_int16_t	__reserved2;
365	u_int8_t	__reserved3;
366	u_int8_t	os;
367	u_int8_t	driverhi[4];
368	u_int8_t	driverlo[4];
369	u_int8_t	__reserved4[100];
370};
371
372struct ips_info {
373	struct ips_adapterinfo	adapter;
374	struct ips_driveinfo	drive;
375	struct ips_conf		conf;
376	struct ips_rblstat	rblstat;
377	struct ips_pg5		pg5;
378};
379
380/* Command control block */
381struct ips_softc;
382struct ips_ccb {
383	struct ips_softc *	c_sc;		/* driver softc */
384	int			c_id;		/* command id */
385	int			c_flags;	/* SCSI_* flags */
386	enum {
387		IPS_CCB_FREE,
388		IPS_CCB_QUEUED,
389		IPS_CCB_DONE
390	}			c_state;	/* command state */
391
392	void *			c_cmdbva;	/* command block virt addr */
393	paddr_t			c_cmdbpa;	/* command block phys addr */
394	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
395
396	struct scsipi_xfer *	c_xfer;		/* corresponding SCSI xfer */
397
398	u_int8_t		c_stat;		/* status byte copy */
399	u_int8_t		c_estat;	/* ext status byte copy */
400	int			c_error;	/* completion error */
401
402	void			(*c_done)(struct ips_softc *,	/* cmd done */
403				    struct ips_ccb *);		/* callback */
404
405	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
406};
407
408/* CCB queue */
409SLIST_HEAD(ips_ccbq, ips_ccb);
410
411/* DMA-able chunk of memory */
412struct dmamem {
413	bus_dma_tag_t		dm_tag;
414	bus_dmamap_t		dm_map;
415	bus_dma_segment_t	dm_seg;
416	bus_size_t		dm_size;
417	void *			dm_vaddr;
418#define dm_paddr dm_seg.ds_addr
419};
420
421struct ips_softc {
422	device_t		sc_dev;
423
424	/* SCSI mid-layer connection. */
425	struct scsipi_adapter   sc_adapt;
426
427	struct ips_pt {
428		struct scsipi_channel	pt_chan;
429		int			pt_nchan;
430		struct ips_softc *	pt_sc;
431
432		int			pt_proctgt;
433		char			pt_procdev[16];
434	}			sc_pt[IPS_MAXCHANS];
435
436	bus_space_tag_t		sc_iot;
437	bus_space_handle_t	sc_ioh;
438	bus_dma_tag_t		sc_dmat;
439
440	const struct ips_chipset *sc_chip;
441
442	struct ips_info *	sc_info;
443	struct dmamem		sc_infom;
444
445	int			sc_nunits;
446
447	struct dmamem		sc_cmdbm;
448
449	struct ips_ccb *	sc_ccb;
450	int			sc_nccbs;
451	struct ips_ccbq		sc_ccbq_free;
452	struct kmutex		sc_ccb_mtx;
453
454	struct dmamem		sc_sqm;
455	paddr_t			sc_sqtail;
456	u_int32_t *		sc_sqbuf;
457	int			sc_sqidx;
458};
459
460int	ips_match(device_t, cfdata_t, void *);
461void	ips_attach(device_t, device_t, void *);
462
463void	ips_scsi_cmd(struct ips_ccb *);
464void	ips_scsi_pt_cmd(struct scsipi_xfer *);
465static void ips_scsipi_request(struct scsipi_channel *,
466	    scsipi_adapter_req_t, void *);
467int	ips_scsi_ioctl(struct scsipi_channel *, u_long, void *,
468	    int, struct proc *);
469
470#if NBIO > 0
471int	ips_ioctl(device_t, u_long, void *);
472int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
473int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
474int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
475int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
476#endif
477
478int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsipi_xfer *);
479void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsipi_xfer *);
480
481int	ips_cmd(struct ips_softc *, struct ips_ccb *);
482int	ips_poll(struct ips_softc *, struct ips_ccb *);
483void	ips_done(struct ips_softc *, struct ips_ccb *);
484void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
485void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
486void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
487int	ips_error(struct ips_softc *, struct ips_ccb *);
488int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
489int	ips_intr(void *);
490void	ips_timeout(void *);
491
492int	ips_getadapterinfo(struct ips_softc *, int);
493int	ips_getdriveinfo(struct ips_softc *, int);
494int	ips_getconf(struct ips_softc *, int);
495int	ips_getpg5(struct ips_softc *, int);
496
497#if NBIO > 0
498int	ips_getrblstat(struct ips_softc *, int);
499int	ips_setstate(struct ips_softc *, int, int, int, int);
500int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
501#endif
502
503void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
504void	ips_copperhead_intren(struct ips_softc *);
505int	ips_copperhead_isintr(struct ips_softc *);
506u_int32_t ips_copperhead_status(struct ips_softc *);
507
508void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
509void	ips_morpheus_intren(struct ips_softc *);
510int	ips_morpheus_isintr(struct ips_softc *);
511u_int32_t ips_morpheus_status(struct ips_softc *);
512
513struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
514void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
515struct ips_ccb *ips_ccb_get(struct ips_softc *);
516void	ips_ccb_put(struct ips_softc *, struct ips_ccb *);
517
518int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
519void	ips_dmamem_free(struct dmamem *);
520
521extern struct  cfdriver ips_cd;
522
523CFATTACH_DECL_NEW(ips, sizeof(struct ips_softc),
524    ips_match, ips_attach, NULL, NULL);
525
526static struct ips_ident {
527        pci_vendor_id_t vendor;
528        pci_product_id_t product;
529} const ips_ids[] = {
530	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
531	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID4 },
532	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
533};
534
535static const struct ips_chipset {
536	enum {
537		IPS_CHIP_COPPERHEAD = 0,
538		IPS_CHIP_MORPHEUS
539	}		ic_id;
540
541	int		ic_bar;
542
543	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
544	void		(*ic_intren)(struct ips_softc *);
545	int		(*ic_isintr)(struct ips_softc *);
546	u_int32_t	(*ic_status)(struct ips_softc *);
547} ips_chips[] = {
548	{
549		IPS_CHIP_COPPERHEAD,
550		0x14,
551		ips_copperhead_exec,
552		ips_copperhead_intren,
553		ips_copperhead_isintr,
554		ips_copperhead_status
555	},
556	{
557		IPS_CHIP_MORPHEUS,
558		0x10,
559		ips_morpheus_exec,
560		ips_morpheus_intren,
561		ips_morpheus_isintr,
562		ips_morpheus_status
563	}
564};
565
566#define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
567#define ips_intren(s)	(s)->sc_chip->ic_intren((s))
568#define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
569#define ips_status(s)	(s)->sc_chip->ic_status((s))
570
571static const char *ips_names[] = {
572	NULL,
573	NULL,
574	"II",
575	"onboard",
576	"onboard",
577	"3H",
578	"3L",
579	"4H",
580	"4M",
581	"4L",
582	"4Mx",
583	"4Lx",
584	"5i",
585	"5i",
586	"6M",
587	"6i",
588	"7t",
589	"7k",
590	"7M"
591};
592
593/* Lookup supported device table */
594static const struct ips_ident *
595ips_lookup(const struct pci_attach_args *pa)
596{
597        const struct ips_ident *imp;
598	int i;
599
600	for (i = 0, imp = ips_ids; i < __arraycount(ips_ids); i++, imp++) {
601                if (PCI_VENDOR(pa->pa_id) == imp->vendor &&
602                    PCI_PRODUCT(pa->pa_id) == imp->product)
603                        return imp;
604        }
605        return NULL;
606}
607
608int
609ips_match(device_t parent, cfdata_t cfdata, void *aux)
610{
611	struct pci_attach_args *pa = aux;
612
613	if (ips_lookup(pa) != NULL)
614		return 1;
615
616	return 0;
617}
618
619void
620ips_attach(device_t parent, device_t self, void *aux)
621{
622	struct ips_softc *sc = device_private(self);
623	struct pci_attach_args *pa = aux;
624	struct ips_ccb ccb0;
625	struct ips_adapterinfo *ai;
626	struct ips_driveinfo *di;
627	struct ips_pg5 *pg5;
628	pcireg_t maptype;
629	bus_size_t iosize;
630	pci_intr_handle_t ih;
631	const char *intrstr;
632	int type, i;
633	struct scsipi_adapter *adapt;
634	struct scsipi_channel *chan;
635	char intrbuf[PCI_INTRSTR_LEN];
636
637	sc->sc_dev = self;
638	sc->sc_dmat = pa->pa_dmat;
639
640	/* Identify chipset */
641	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
642		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
643	else
644		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
645
646	/* Map registers */
647	// XXX check IPS_IOSIZE as old code used to do?
648	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
649	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
650	    &sc->sc_ioh, NULL, &iosize)) {
651		printf(": can't map regs\n");
652		return;
653	}
654
655	/* Allocate command buffer */
656	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
657	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
658		printf(": can't alloc cmd buffer\n");
659		goto fail1;
660	}
661
662	/* Allocate info buffer */
663	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
664	    sizeof(struct ips_info))) {
665		printf(": can't alloc info buffer\n");
666		goto fail2;
667	}
668	sc->sc_info = sc->sc_infom.dm_vaddr;
669	ai = &sc->sc_info->adapter;
670	di = &sc->sc_info->drive;
671	pg5 = &sc->sc_info->pg5;
672
673	/* Allocate status queue for the Copperhead chipset */
674	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
675		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
676			printf(": can't alloc status queue\n");
677			goto fail3;
678		}
679		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
680		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
681		sc->sc_sqidx = 0;
682		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
683		    sc->sc_sqm.dm_paddr);
684		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
685		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
686		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
687		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
688		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
689		    sc->sc_sqm.dm_paddr);
690	}
691
692	/* Bootstrap CCB queue */
693	sc->sc_nccbs = 1;
694	sc->sc_ccb = &ccb0;
695	bzero(&ccb0, sizeof(ccb0));
696	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
697	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
698	SLIST_INIT(&sc->sc_ccbq_free);
699	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
700	mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
701
702	/* Get adapter info */
703	if (ips_getadapterinfo(sc, XS_CTL_NOSLEEP)) {
704		printf(": can't get adapter info\n");
705		goto fail4;
706	}
707
708	/* Get logical drives info */
709	if (ips_getdriveinfo(sc, XS_CTL_NOSLEEP)) {
710		printf(": can't get ld info\n");
711		goto fail4;
712	}
713	sc->sc_nunits = di->drivecnt;
714
715	/* Get configuration */
716	if (ips_getconf(sc, XS_CTL_NOSLEEP)) {
717		printf(": can't get config\n");
718		goto fail4;
719	}
720
721	/* Read NVRAM page 5 for additional info */
722	(void)ips_getpg5(sc, XS_CTL_NOSLEEP);
723
724	/* Initialize CCB queue */
725	sc->sc_nccbs = ai->cmdcnt;
726	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
727		printf(": can't alloc ccb queue\n");
728		goto fail4;
729	}
730	SLIST_INIT(&sc->sc_ccbq_free);
731	for (i = 0; i < sc->sc_nccbs; i++)
732		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
733		    &sc->sc_ccb[i], c_link);
734
735	/* Install interrupt handler */
736	if (pci_intr_map(pa, &ih)) {
737		printf(": can't map interrupt\n");
738		goto fail5;
739	}
740	intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
741	if (pci_intr_establish_xname(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
742		device_xname(sc->sc_dev)) == NULL) {
743		printf(": can't establish interrupt");
744		if (intrstr != NULL)
745			printf(" at %s", intrstr);
746		printf("\n");
747		goto fail5;
748	}
749	printf(": %s\n", intrstr);
750
751	/* Display adapter info */
752	device_printf(sc->sc_dev, "ServeRAID");
753	type = htole16(pg5->type);
754	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
755		printf(" %s", ips_names[type]);
756	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
757	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
758	    ai->firmware[6]);
759	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
760	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
761	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
762	    (sc->sc_nunits == 1 ? "" : "s"));
763	printf("\n");
764
765	/*
766	 * Attach to scsipi.
767	 */
768	adapt = &sc->sc_adapt;
769	memset(adapt, 0, sizeof(*adapt));
770	adapt->adapt_dev = self;
771	adapt->adapt_nchannels = IPS_MAXCHANS;
772	if (sc->sc_nunits > 0)
773		adapt->adapt_openings = sc->sc_nccbs / sc->sc_nunits;
774	adapt->adapt_max_periph = adapt->adapt_openings;
775	adapt->adapt_request = ips_scsipi_request;
776	adapt->adapt_minphys = minphys;
777	adapt->adapt_ioctl = ips_scsi_ioctl;
778
779	/* For each channel attach SCSI pass-through bus */
780	for (i = 0; i < IPS_MAXCHANS; i++) {
781		struct ips_pt *pt;
782		int target, lastarget;
783
784		pt = &sc->sc_pt[i];
785		pt->pt_sc = sc;
786		pt->pt_nchan = i;
787		pt->pt_proctgt = -1;
788
789		/* Check if channel has any devices besides disks */
790		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
791		    target++) {
792			struct ips_dev *idev;
793			int dev_type;
794
795			idev = &sc->sc_info->conf.dev[i][target];
796			dev_type = idev->params & SID_TYPE;
797			if (idev->state && dev_type != T_DIRECT) {
798				lastarget = target;
799				if (type == T_PROCESSOR ||
800				    type == T_ENCLOSURE)
801					/* remember enclosure address */
802					pt->pt_proctgt = target;
803			}
804		}
805		if (lastarget == -1)
806			continue;
807
808		chan = &pt->pt_chan;
809		memset(chan, 0, sizeof(*chan));
810		chan->chan_adapter = adapt;
811		chan->chan_bustype = &scsi_bustype;
812		chan->chan_channel = i;
813		chan->chan_ntargets = IPS_MAXTARGETS;
814		chan->chan_nluns = lastarget + 1;
815		chan->chan_id = i;
816		chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
817		config_found(self, chan, scsiprint, CFARGS_NONE);
818	}
819
820	/* Enable interrupts */
821	ips_intren(sc);
822
823#if NBIO > 0
824	/* Install ioctl handler */
825	if (bio_register(sc->sc_dev, ips_ioctl))
826		device_printf(sc->sc_dev, "no ioctl support\n");
827#endif
828
829	return;
830fail5:
831	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
832fail4:
833	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
834		ips_dmamem_free(&sc->sc_sqm);
835fail3:
836	ips_dmamem_free(&sc->sc_infom);
837fail2:
838	ips_dmamem_free(&sc->sc_cmdbm);
839fail1:
840	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
841}
842
843void
844ips_scsi_cmd(struct ips_ccb *ccb)
845{
846	struct scsipi_xfer *xs = ccb->c_xfer;
847	struct scsipi_periph *periph = xs->xs_periph;
848	struct scsipi_channel *chan = periph->periph_channel;
849	struct ips_softc *sc = device_private(chan->chan_adapter->adapt_dev);
850	struct ips_driveinfo *di = &sc->sc_info->drive;
851	struct ips_drive *drive;
852	struct ips_cmd *cmd;
853	int target = periph->periph_target;
854	u_int32_t blkno, blkcnt;
855	int code;
856
857	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
858	    "opcode 0x%02x, flags 0x%x\n", device_xname(sc->sc_dev), xs, target,
859	    xs->cmd->opcode, xs->xs_control));
860
861	if (target >= sc->sc_nunits || periph->periph_lun != 0) {
862		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
863		    "target %d, lun %d\n", device_xname(sc->sc_dev),
864		    target, periph->periph_lun));
865		xs->error = XS_DRIVER_STUFFUP;
866		ips_ccb_put(sc, ccb);
867		scsipi_done(xs);
868		return;
869	}
870
871	drive = &di->drive[target];
872	xs->error = XS_NOERROR;
873
874	/* Fake SCSI commands */
875	switch (xs->cmd->opcode) {
876	case READ_10:
877	case SCSI_READ_6_COMMAND:
878	case WRITE_10:
879	case SCSI_WRITE_6_COMMAND: {
880		struct scsi_rw_6 *rw;
881		struct scsipi_rw_10 *rwb;
882
883		if (xs->cmdlen == sizeof(struct scsi_rw_6)) {
884			rw = (void *)xs->cmd;
885			blkno = _3btol(rw->addr) &
886			    (SRW_TOPADDR << 16 | 0xffff);
887			blkcnt = rw->length ? rw->length : 0x100;
888		} else {
889			rwb = (void *)xs->cmd;
890			blkno = _4btol(rwb->addr);
891			blkcnt = _2btol(rwb->length);
892		}
893
894		if (blkno >= htole32(drive->seccnt) || blkno + blkcnt >
895		    htole32(drive->seccnt)) {
896			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
897			    "blkno %u, blkcnt %u\n", device_xname(sc->sc_dev),
898			    blkno, blkcnt));
899			xs->error = XS_DRIVER_STUFFUP;
900			break;
901		}
902
903		if (xs->xs_control & XS_CTL_DATA_IN)
904			code = IPS_CMD_READ;
905		else
906			code = IPS_CMD_WRITE;
907
908		cmd = ccb->c_cmdbva;
909		cmd->code = code;
910		cmd->drive = target;
911		cmd->lba = htole32(blkno);
912		cmd->seccnt = htole16(blkcnt);
913
914		if (ips_load_xs(sc, ccb, xs)) {
915			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
916			    "failed\n", device_xname(sc->sc_dev)));
917			xs->error = XS_DRIVER_STUFFUP;
918			ips_ccb_put(sc, ccb);
919			scsipi_done(xs);
920			return;
921		}
922
923		if (cmd->sgcnt > 0)
924			cmd->code |= IPS_CMD_SG;
925
926		ccb->c_done = ips_done_xs;
927		ips_start_xs(sc, ccb, xs);
928		return;
929	}
930	case INQUIRY: {
931		struct scsipi_inquiry_data inq;
932
933		bzero(&inq, sizeof(inq));
934		inq.device = T_DIRECT;
935		inq.version = 2;
936		inq.response_format = 2;
937		inq.additional_length = 32;
938		inq.flags3 |= SID_CmdQue;
939		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
940		snprintf(inq.product, sizeof(inq.product),
941		    "LD%d RAID%d", target, drive->raid);
942		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
943		memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq)));
944		break;
945	}
946	case READ_CAPACITY_10: {
947		struct scsipi_read_capacity_10_data rcd;
948
949		bzero(&rcd, sizeof(rcd));
950		_lto4b(htole32(drive->seccnt) - 1, rcd.addr);
951		_lto4b(IPS_SECSZ, rcd.length);
952		memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd)));
953		break;
954	}
955	case SCSI_REQUEST_SENSE: {
956		struct scsi_sense_data sd;
957
958		bzero(&sd, sizeof(sd));
959		sd.response_code = SSD_RCODE_CURRENT;
960		sd.flags = SKEY_NO_SENSE;
961		memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd)));
962		break;
963	}
964	case SCSI_SYNCHRONIZE_CACHE_10:
965		cmd = ccb->c_cmdbva;
966		cmd->code = IPS_CMD_FLUSH;
967
968		ccb->c_done = ips_done_xs;
969		ips_start_xs(sc, ccb, xs);
970		return;
971	case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
972	case START_STOP:
973	case SCSI_TEST_UNIT_READY:
974		break;
975	default:
976		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
977		    device_xname(sc->sc_dev), xs->cmd->opcode));
978		xs->error = XS_DRIVER_STUFFUP;
979	}
980
981	ips_ccb_put(sc, ccb);
982	scsipi_done(xs);
983}
984
985/*
986 * Start a SCSI command.
987 */
988static void
989ips_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
990		   void *arg)
991{
992	switch (req) {
993	case ADAPTER_REQ_RUN_XFER: {
994		struct ips_ccb *ccb;
995		struct scsipi_xfer *xs;
996		struct ips_softc *sc;
997
998		sc = device_private(chan->chan_adapter->adapt_dev);
999		xs = (struct scsipi_xfer *)arg;
1000
1001		if ((ccb = ips_ccb_get(sc)) == NULL) {
1002			xs->error = XS_RESOURCE_SHORTAGE;
1003			scsipi_done(xs);
1004			break;
1005		}
1006
1007		ccb->c_xfer = xs;
1008		ips_scsi_cmd(ccb);
1009
1010		break;
1011	}
1012
1013	case ADAPTER_REQ_SET_XFER_MODE: {
1014		struct scsipi_xfer_mode *xm = arg;
1015		xm->xm_mode = PERIPH_CAP_TQING;
1016		xm->xm_period = 0;
1017		xm->xm_offset = 0;
1018		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1019		return;
1020	}
1021
1022	case ADAPTER_REQ_GROW_RESOURCES:
1023		/*
1024		 * Not supported.
1025		 */
1026		break;
1027	}
1028}
1029
1030int
1031ips_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
1032    int flag, struct proc *p)
1033{
1034#if NBIO > 0
1035	return (ips_ioctl(chan->chan_adapter->adapt_dev, cmd, data));
1036#else
1037	return (ENOTTY);
1038#endif
1039}
1040
1041#if NBIO > 0
1042int
1043ips_ioctl(device_t dev, u_long cmd, void *data)
1044{
1045	struct ips_softc *sc = device_private(dev);
1046
1047	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1048	    device_xname(sc->sc_dev), cmd));
1049
1050	switch (cmd) {
1051	case BIOCINQ:
1052		return (ips_ioctl_inq(sc, (struct bioc_inq *)data));
1053	case BIOCVOL:
1054		return (ips_ioctl_vol(sc, (struct bioc_vol *)data));
1055	case BIOCDISK:
1056		return (ips_ioctl_disk(sc, (struct bioc_disk *)data));
1057	case BIOCSETSTATE:
1058		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)data));
1059	default:
1060		return (ENOTTY);
1061	}
1062}
1063
1064int
1065ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1066{
1067	struct ips_conf *conf = &sc->sc_info->conf;
1068	int i;
1069
1070	strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1071	bi->bi_novol = sc->sc_nunits;
1072	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1073		bi->bi_nodisk += conf->ld[i].chunkcnt;
1074
1075	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1076	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1077
1078	return (0);
1079}
1080
1081int
1082ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1083{
1084	struct ips_driveinfo *di = &sc->sc_info->drive;
1085	struct ips_conf *conf = &sc->sc_info->conf;
1086	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1087	struct ips_ld *ld;
1088	int vid = bv->bv_volid;
1089	device_t dv;
1090	int error, rebuild = 0;
1091	u_int32_t total = 0, done = 0;
1092
1093	if (vid >= sc->sc_nunits)
1094		return (EINVAL);
1095	if ((error = ips_getconf(sc, 0)))
1096		return (error);
1097	ld = &conf->ld[vid];
1098
1099	switch (ld->state) {
1100	case IPS_DS_ONLINE:
1101		bv->bv_status = BIOC_SVONLINE;
1102		break;
1103	case IPS_DS_DEGRADED:
1104		bv->bv_status = BIOC_SVDEGRADED;
1105		rebuild++;
1106		break;
1107	case IPS_DS_OFFLINE:
1108		bv->bv_status = BIOC_SVOFFLINE;
1109		break;
1110	default:
1111		bv->bv_status = BIOC_SVINVALID;
1112	}
1113
1114	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1115		total = htole32(rblstat->ld[vid].total);
1116		done = total - htole32(rblstat->ld[vid].remain);
1117		if (total && total > done) {
1118			bv->bv_status = BIOC_SVREBUILD;
1119			bv->bv_percent = 100 * done / total;
1120		}
1121	}
1122
1123	bv->bv_size = (uint64_t)htole32(ld->size) * IPS_SECSZ;
1124	bv->bv_level = di->drive[vid].raid;
1125	bv->bv_nodisk = ld->chunkcnt;
1126
1127	/* Associate all unused and spare drives with first volume */
1128	if (vid == 0) {
1129		struct ips_dev *dev;
1130		int chan, target;
1131
1132		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1133			for (target = 0; target < IPS_MAXTARGETS; target++) {
1134				dev = &conf->dev[chan][target];
1135				if (dev->state && !(dev->state &
1136				    IPS_DVS_MEMBER) &&
1137				    (dev->params & SID_TYPE) == T_DIRECT)
1138					bv->bv_nodisk++;
1139			}
1140	}
1141
1142	dv = sc->sc_dev;
1143	strlcpy(bv->bv_dev, device_xname(dv), sizeof(bv->bv_dev));
1144	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1145
1146	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1147	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1148	    device_xname(sc->sc_dev), vid, ld->state, total, done, bv->bv_size,
1149	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1150
1151	return (0);
1152}
1153
1154int
1155ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1156{
1157	struct ips_conf *conf = &sc->sc_info->conf;
1158	struct ips_ld *ld;
1159	struct ips_chunk *chunk;
1160	struct ips_dev *dev;
1161	int vid = bd->bd_volid, did = bd->bd_diskid;
1162	int chan, target, error, i;
1163
1164	if (vid >= sc->sc_nunits)
1165		return (EINVAL);
1166	if ((error = ips_getconf(sc, 0)))
1167		return (error);
1168	ld = &conf->ld[vid];
1169
1170	if (did >= ld->chunkcnt) {
1171		/* Probably unused or spare drives */
1172		if (vid != 0)
1173			return (EINVAL);
1174
1175		i = ld->chunkcnt;
1176		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1177			for (target = 0; target < IPS_MAXTARGETS; target++) {
1178				dev = &conf->dev[chan][target];
1179				if (dev->state && !(dev->state &
1180				    IPS_DVS_MEMBER) &&
1181				    (dev->params & SID_TYPE) == T_DIRECT)
1182					if (i++ == did)
1183						goto out;
1184			}
1185	} else {
1186		chunk = &ld->chunk[did];
1187		chan = chunk->channel;
1188		target = chunk->target;
1189	}
1190
1191out:
1192	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1193		return (EINVAL);
1194	dev = &conf->dev[chan][target];
1195
1196	bd->bd_channel = chan;
1197	bd->bd_target = target;
1198	bd->bd_lun = 0;
1199	bd->bd_size = (uint64_t)htole32(dev->seccnt) * IPS_SECSZ;
1200
1201	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1202	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1203	    sizeof(dev->devid)));
1204	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1205	    sizeof(bd->bd_procdev));
1206
1207	if (dev->state & IPS_DVS_READY) {
1208		bd->bd_status = BIOC_SDUNUSED;
1209		if (dev->state & IPS_DVS_MEMBER)
1210			bd->bd_status = BIOC_SDONLINE;
1211		if (dev->state & IPS_DVS_SPARE)
1212			bd->bd_status = BIOC_SDHOTSPARE;
1213		if (dev->state & IPS_DVS_REBUILD)
1214			bd->bd_status = BIOC_SDREBUILD;
1215	} else {
1216		bd->bd_status = BIOC_SDOFFLINE;
1217	}
1218
1219	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1220	    "target %d, size %llu, state 0x%02x\n", device_xname(sc->sc_dev),
1221	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1222
1223	return (0);
1224}
1225
1226int
1227ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1228{
1229	struct ips_conf *conf = &sc->sc_info->conf;
1230	struct ips_dev *dev;
1231	int state, error;
1232
1233	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1234		return (EINVAL);
1235	if ((error = ips_getconf(sc, 0)))
1236		return (error);
1237	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1238	state = dev->state;
1239
1240	switch (bs->bs_status) {
1241	case BIOC_SSONLINE:
1242		state |= IPS_DVS_READY;
1243		break;
1244	case BIOC_SSOFFLINE:
1245		state &= ~IPS_DVS_READY;
1246		break;
1247	case BIOC_SSHOTSPARE:
1248		state |= IPS_DVS_SPARE;
1249		break;
1250	case BIOC_SSREBUILD:
1251		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1252		    bs->bs_channel, bs->bs_target, 0));
1253	default:
1254		return (EINVAL);
1255	}
1256
1257	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1258}
1259#endif	/* NBIO > 0 */
1260
1261int
1262ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsipi_xfer *xs)
1263{
1264	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1265	struct ips_cmd *cmd = &cmdb->cmd;
1266	struct ips_sg *sg = cmdb->sg;
1267	int nsegs, i;
1268
1269	if (xs->datalen == 0)
1270		return (0);
1271
1272	/* Map data buffer into DMA segments */
1273	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1274	    NULL, (xs->xs_control & XS_CTL_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1275		return (1);
1276	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1277	    xs->xs_control & XS_CTL_DATA_IN ? BUS_DMASYNC_PREREAD :
1278	    BUS_DMASYNC_PREWRITE);
1279
1280	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1281		return (1);
1282
1283	if (nsegs > 1) {
1284		cmd->sgcnt = nsegs;
1285		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1286		    sg));
1287
1288		/* Fill in scatter-gather array */
1289		for (i = 0; i < nsegs; i++) {
1290			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1291			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1292		}
1293	} else {
1294		cmd->sgcnt = 0;
1295		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1296	}
1297
1298	return (0);
1299}
1300
1301void
1302ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsipi_xfer *xs)
1303{
1304	ccb->c_flags = xs->xs_control;
1305	ccb->c_xfer = xs;
1306	int ispoll = xs->xs_control & XS_CTL_POLL;
1307
1308	if (!ispoll) {
1309		int timeout = mstohz(xs->timeout);
1310		if (timeout == 0)
1311			timeout = 1;
1312
1313		callout_reset(&xs->xs_callout, timeout, ips_timeout, ccb);
1314	}
1315
1316	/*
1317	 * Return value not used here because ips_cmd() must complete
1318	 * scsipi_xfer on any failure and SCSI layer will handle possible
1319	 * errors.
1320	 */
1321	ips_cmd(sc, ccb);
1322}
1323
1324int
1325ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1326{
1327	struct ips_cmd *cmd = ccb->c_cmdbva;
1328	int s, error = 0;
1329
1330	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1331	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1332	    "seccnt %d\n", device_xname(sc->sc_dev), ccb->c_id, ccb->c_flags,
1333	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, htole32(cmd->lba),
1334	    htole32(cmd->sgaddr), htole16(cmd->seccnt)));
1335
1336	cmd->id = ccb->c_id;
1337
1338	/* Post command to controller and optionally wait for completion */
1339	s = splbio();
1340	ips_exec(sc, ccb);
1341	ccb->c_state = IPS_CCB_QUEUED;
1342	if (ccb->c_flags & XS_CTL_POLL)
1343		error = ips_poll(sc, ccb);
1344	splx(s);
1345
1346	return (error);
1347}
1348
1349int
1350ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1351{
1352	struct timeval tv;
1353	int error, timo;
1354
1355	if (ccb->c_flags & XS_CTL_NOSLEEP) {
1356		/* busy-wait */
1357		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1358		    device_xname(sc->sc_dev)));
1359
1360		for (timo = 10000; timo > 0; timo--) {
1361			delay(100);
1362			ips_intr(sc);
1363			if (ccb->c_state == IPS_CCB_DONE)
1364				break;
1365		}
1366	} else {
1367		/* sleep */
1368		timo = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1369		tv.tv_sec = timo / 1000;
1370		tv.tv_usec = (timo % 1000) * 1000;
1371		timo = tvtohz(&tv);
1372
1373		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d hz\n",
1374		    device_xname(sc->sc_dev), timo));
1375		tsleep(ccb, PRIBIO + 1, "ipscmd", timo);
1376	}
1377	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n",
1378	    device_xname(sc->sc_dev),
1379	    ccb->c_state));
1380
1381	if (ccb->c_state != IPS_CCB_DONE)
1382		/*
1383		 * Command never completed. Fake hardware status byte
1384		 * to indicate timeout.
1385		 */
1386		ccb->c_stat = IPS_STAT_TIMO;
1387
1388	ips_done(sc, ccb);
1389	error = ccb->c_error;
1390
1391	return (error);
1392}
1393
1394void
1395ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1396{
1397	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1398	    device_xname(sc->sc_dev), ccb->c_id, ccb->c_flags, ccb->c_xfer));
1399
1400	ccb->c_error = ips_error(sc, ccb);
1401	ccb->c_done(sc, ccb);
1402}
1403
1404void
1405ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1406{
1407	struct scsipi_xfer *xs = ccb->c_xfer;
1408
1409	if (!(xs->xs_control & XS_CTL_POLL))
1410		callout_stop(&xs->xs_callout);
1411
1412	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1413		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1414		    ccb->c_dmam->dm_mapsize, xs->xs_control & XS_CTL_DATA_IN ?
1415		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1416		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1417	}
1418
1419	xs->resid = 0;
1420	xs->error = ips_error_xs(sc, ccb);
1421	ips_ccb_put(sc, ccb);
1422	scsipi_done(xs);
1423}
1424
1425void
1426ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1427{
1428	struct scsipi_xfer *xs = ccb->c_xfer;
1429	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1430	struct ips_dcdb *dcdb = &cmdb->dcdb;
1431	int done = htole16(dcdb->datalen);
1432
1433	if (!(xs->xs_control & XS_CTL_POLL))
1434		callout_stop(&xs->xs_callout);
1435
1436	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1437		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1438		    ccb->c_dmam->dm_mapsize, xs->xs_control & XS_CTL_DATA_IN ?
1439		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1440		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1441	}
1442
1443	if (done && done < xs->datalen)
1444		xs->resid = xs->datalen - done;
1445	else
1446		xs->resid = 0;
1447	xs->error = ips_error_xs(sc, ccb);
1448	xs->status = dcdb->status;
1449
1450	if (xs->error == XS_SENSE)
1451		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1452		    sizeof(dcdb->sense)));
1453
1454	if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) {
1455		int type = ((struct scsipi_inquiry_data *)xs->data)->device &
1456		    SID_TYPE;
1457
1458		if (type == T_DIRECT)
1459			/* mask physical drives */
1460			xs->error = XS_DRIVER_STUFFUP;
1461	}
1462
1463	ips_ccb_put(sc, ccb);
1464	scsipi_done(xs);
1465}
1466
1467void
1468ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1469{
1470	if (ccb->c_flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1471		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1472		    sc->sc_infom.dm_map->dm_mapsize,
1473		    ccb->c_flags & XS_CTL_DATA_IN ? BUS_DMASYNC_POSTREAD :
1474		    BUS_DMASYNC_POSTWRITE);
1475
1476	ips_ccb_put(sc, ccb);
1477}
1478
1479int
1480ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1481{
1482	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1483	struct ips_cmd *cmd = &cmdb->cmd;
1484	struct ips_dcdb *dcdb = &cmdb->dcdb;
1485	struct scsipi_xfer *xs = ccb->c_xfer;
1486	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1487
1488	if (gsc == IPS_STAT_OK)
1489		return (0);
1490
1491	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1492	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1493	    device_xname(sc->sc_dev), ccb->c_stat, ccb->c_estat, cmd->code,
1494	    cmd->drive, cmd->sgcnt, htole32(cmd->lba), htole16(cmd->seccnt)));
1495	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1496		int i;
1497
1498		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1499		    "datalen %d, sgcnt %d, status 0x%02x",
1500		    dcdb->device, dcdb->attr, htole16(dcdb->datalen),
1501		    dcdb->sgcnt, dcdb->status));
1502
1503		DPRINTF(IPS_D_ERR, (", cdb"));
1504		for (i = 0; i < dcdb->cdblen; i++)
1505			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1506		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1507			DPRINTF(IPS_D_ERR, (", sense"));
1508			for (i = 0; i < dcdb->senselen; i++)
1509				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1510		}
1511	}
1512	DPRINTF(IPS_D_ERR, ("\n"));
1513
1514	switch (gsc) {
1515	case IPS_STAT_RECOV:
1516		return (0);
1517	case IPS_STAT_INVOP:
1518	case IPS_STAT_INVCMD:
1519	case IPS_STAT_INVPARM:
1520		return (EINVAL);
1521	case IPS_STAT_BUSY:
1522		return (EBUSY);
1523	case IPS_STAT_TIMO:
1524		return (ETIMEDOUT);
1525	case IPS_STAT_PDRVERR:
1526		switch (ccb->c_estat) {
1527		case IPS_ESTAT_SELTIMO:
1528			return (ENODEV);
1529		case IPS_ESTAT_OURUN:
1530			if (xs && htole16(dcdb->datalen) < xs->datalen)
1531				/* underrun */
1532				return (0);
1533			break;
1534		case IPS_ESTAT_RECOV:
1535			return (0);
1536		}
1537		break;
1538	}
1539
1540	return (EIO);
1541}
1542
1543int
1544ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1545{
1546	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1547	struct ips_dcdb *dcdb = &cmdb->dcdb;
1548	struct scsipi_xfer *xs = ccb->c_xfer;
1549	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1550
1551	/* Map hardware error codes to SCSI ones */
1552	switch (gsc) {
1553	case IPS_STAT_OK:
1554	case IPS_STAT_RECOV:
1555		return (XS_NOERROR);
1556	case IPS_STAT_BUSY:
1557		return (XS_BUSY);
1558	case IPS_STAT_TIMO:
1559		return (XS_TIMEOUT);
1560	case IPS_STAT_PDRVERR:
1561		switch (ccb->c_estat) {
1562		case IPS_ESTAT_SELTIMO:
1563			return (XS_SELTIMEOUT);
1564		case IPS_ESTAT_OURUN:
1565			if (xs && htole16(dcdb->datalen) < xs->datalen)
1566				/* underrun */
1567				return (XS_NOERROR);
1568			break;
1569		case IPS_ESTAT_HOSTRST:
1570		case IPS_ESTAT_DEVRST:
1571			return (XS_RESET);
1572		case IPS_ESTAT_RECOV:
1573			return (XS_NOERROR);
1574		case IPS_ESTAT_CKCOND:
1575			return (XS_SENSE);
1576		}
1577		break;
1578	}
1579
1580	return (XS_DRIVER_STUFFUP);
1581}
1582
1583int
1584ips_intr(void *arg)
1585{
1586	struct ips_softc *sc = arg;
1587	struct ips_ccb *ccb;
1588	u_int32_t status;
1589	int id;
1590
1591	DPRINTF(IPS_D_XFER, ("%s: ips_intr", device_xname(sc->sc_dev)));
1592	if (!ips_isintr(sc)) {
1593		DPRINTF(IPS_D_XFER, (": not ours\n"));
1594		return (0);
1595	}
1596	DPRINTF(IPS_D_XFER, ("\n"));
1597
1598	/* Process completed commands */
1599	while ((status = ips_status(sc)) != 0xffffffff) {
1600		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1601		    device_xname(sc->sc_dev), status));
1602
1603		id = IPS_STAT_ID(status);
1604		if (id >= sc->sc_nccbs) {
1605			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1606			    device_xname(sc->sc_dev), id));
1607			continue;
1608		}
1609
1610		ccb = &sc->sc_ccb[id];
1611		if (ccb->c_state != IPS_CCB_QUEUED) {
1612			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1613			    "queued, state %d, status 0x%08x\n",
1614			    device_xname(sc->sc_dev), ccb->c_id, ccb->c_state,
1615			    status));
1616			continue;
1617		}
1618
1619		ccb->c_state = IPS_CCB_DONE;
1620		ccb->c_stat = IPS_STAT_BASIC(status);
1621		ccb->c_estat = IPS_STAT_EXT(status);
1622
1623		if (ccb->c_flags & XS_CTL_POLL) {
1624			wakeup(ccb);
1625		} else {
1626			ips_done(sc, ccb);
1627		}
1628	}
1629
1630	return (1);
1631}
1632
1633void
1634ips_timeout(void *arg)
1635{
1636	struct ips_ccb *ccb = arg;
1637	struct ips_softc *sc = ccb->c_sc;
1638	struct scsipi_xfer *xs = ccb->c_xfer;
1639	int s;
1640
1641	s = splbio();
1642	if (xs)
1643		scsi_print_addr(xs->xs_periph);
1644	else
1645		printf("%s: ", device_xname(sc->sc_dev));
1646	printf("timeout\n");
1647
1648	/*
1649	 * Command never completed. Fake hardware status byte
1650	 * to indicate timeout.
1651	 * XXX: need to remove command from controller.
1652	 */
1653	ccb->c_stat = IPS_STAT_TIMO;
1654	ips_done(sc, ccb);
1655	splx(s);
1656}
1657
1658int
1659ips_getadapterinfo(struct ips_softc *sc, int flags)
1660{
1661	struct ips_ccb *ccb;
1662	struct ips_cmd *cmd;
1663
1664	ccb = ips_ccb_get(sc);
1665	if (ccb == NULL)
1666		return (1);
1667
1668	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1669	ccb->c_done = ips_done_mgmt;
1670
1671	cmd = ccb->c_cmdbva;
1672	cmd->code = IPS_CMD_GETADAPTERINFO;
1673	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1674	    adapter));
1675
1676	return (ips_cmd(sc, ccb));
1677}
1678
1679int
1680ips_getdriveinfo(struct ips_softc *sc, int flags)
1681{
1682	struct ips_ccb *ccb;
1683	struct ips_cmd *cmd;
1684
1685	ccb = ips_ccb_get(sc);
1686	if (ccb == NULL)
1687		return (1);
1688
1689	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1690	ccb->c_done = ips_done_mgmt;
1691
1692	cmd = ccb->c_cmdbva;
1693	cmd->code = IPS_CMD_GETDRIVEINFO;
1694	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1695	    drive));
1696
1697	return (ips_cmd(sc, ccb));
1698}
1699
1700int
1701ips_getconf(struct ips_softc *sc, int flags)
1702{
1703	struct ips_ccb *ccb;
1704	struct ips_cmd *cmd;
1705
1706	ccb = ips_ccb_get(sc);
1707	if (ccb == NULL)
1708		return (1);
1709
1710	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1711	ccb->c_done = ips_done_mgmt;
1712
1713	cmd = ccb->c_cmdbva;
1714	cmd->code = IPS_CMD_READCONF;
1715	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1716	    conf));
1717
1718	return (ips_cmd(sc, ccb));
1719}
1720
1721int
1722ips_getpg5(struct ips_softc *sc, int flags)
1723{
1724	struct ips_ccb *ccb;
1725	struct ips_cmd *cmd;
1726
1727	ccb = ips_ccb_get(sc);
1728	if (ccb == NULL)
1729		return (1);
1730
1731	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1732	ccb->c_done = ips_done_mgmt;
1733
1734	cmd = ccb->c_cmdbva;
1735	cmd->code = IPS_CMD_RWNVRAM;
1736	cmd->drive = 5;
1737	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1738	    pg5));
1739
1740	return (ips_cmd(sc, ccb));
1741}
1742
1743#if NBIO > 0
1744int
1745ips_getrblstat(struct ips_softc *sc, int flags)
1746{
1747	struct ips_ccb *ccb;
1748	struct ips_cmd *cmd;
1749
1750	ccb = ips_ccb_get(sc);
1751	if (ccb == NULL)
1752		return (1);
1753
1754	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1755	ccb->c_done = ips_done_mgmt;
1756
1757	cmd = ccb->c_cmdbva;
1758	cmd->code = IPS_CMD_REBUILDSTATUS;
1759	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1760	    rblstat));
1761
1762	return (ips_cmd(sc, ccb));
1763}
1764
1765int
1766ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1767{
1768	struct ips_ccb *ccb;
1769	struct ips_cmd *cmd;
1770
1771	ccb = ips_ccb_get(sc);
1772	if (ccb == NULL)
1773		return (1);
1774
1775	ccb->c_flags = XS_CTL_POLL | flags;
1776	ccb->c_done = ips_done_mgmt;
1777
1778	cmd = ccb->c_cmdbva;
1779	cmd->code = IPS_CMD_SETSTATE;
1780	cmd->drive = chan;
1781	cmd->sgcnt = target;
1782	cmd->seg4g = state;
1783
1784	return (ips_cmd(sc, ccb));
1785}
1786
1787int
1788ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1789    int ntarget, int flags)
1790{
1791	struct ips_ccb *ccb;
1792	struct ips_cmd *cmd;
1793
1794	ccb = ips_ccb_get(sc);
1795	if (ccb == NULL)
1796		return (1);
1797
1798	ccb->c_flags = XS_CTL_POLL | flags;
1799	ccb->c_done = ips_done_mgmt;
1800
1801	cmd = ccb->c_cmdbva;
1802	cmd->code = IPS_CMD_REBUILD;
1803	cmd->drive = chan;
1804	cmd->sgcnt = target;
1805	cmd->seccnt = htole16(ntarget << 8 | nchan);
1806
1807	return (ips_cmd(sc, ccb));
1808}
1809#endif	/* NBIO > 0 */
1810
1811void
1812ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1813{
1814	u_int32_t reg;
1815	int timeout;
1816
1817	for (timeout = 100; timeout-- > 0; delay(100)) {
1818		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1819		if ((reg & IPS_REG_CCC_SEM) == 0)
1820			break;
1821	}
1822	if (timeout < 0) {
1823		device_printf(sc->sc_dev, "semaphore timeout\n");
1824		return;
1825	}
1826
1827	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1828	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1829	    IPS_REG_CCC_START);
1830}
1831
1832void
1833ips_copperhead_intren(struct ips_softc *sc)
1834{
1835	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1836}
1837
1838int
1839ips_copperhead_isintr(struct ips_softc *sc)
1840{
1841	u_int8_t reg;
1842
1843	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1844	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1845	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1846		return (1);
1847
1848	return (0);
1849}
1850
1851u_int32_t
1852ips_copperhead_status(struct ips_softc *sc)
1853{
1854	u_int32_t sqhead, sqtail, status;
1855
1856	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1857	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n",
1858	    device_xname(sc->sc_dev), sqhead, sc->sc_sqtail));
1859
1860	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1861	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1862		sqtail = sc->sc_sqm.dm_paddr;
1863	if (sqtail == sqhead)
1864		return (0xffffffff);
1865
1866	sc->sc_sqtail = sqtail;
1867	if (++sc->sc_sqidx == IPS_MAXCMDS)
1868		sc->sc_sqidx = 0;
1869	status = htole32(sc->sc_sqbuf[sc->sc_sqidx]);
1870	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1871
1872	return (status);
1873}
1874
1875void
1876ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1877{
1878	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1879}
1880
1881void
1882ips_morpheus_intren(struct ips_softc *sc)
1883{
1884	u_int32_t reg;
1885
1886	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1887	reg &= ~IPS_REG_OIM_DS;
1888	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1889}
1890
1891int
1892ips_morpheus_isintr(struct ips_softc *sc)
1893{
1894	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1895	    IPS_REG_OIS_PEND);
1896}
1897
1898u_int32_t
1899ips_morpheus_status(struct ips_softc *sc)
1900{
1901	u_int32_t reg;
1902
1903	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1904	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", device_xname(sc->sc_dev),
1905	    reg));
1906
1907	return (reg);
1908}
1909
1910struct ips_ccb *
1911ips_ccb_alloc(struct ips_softc *sc, int n)
1912{
1913	struct ips_ccb *ccb;
1914	int i;
1915
1916	ccb = malloc(n * sizeof(*ccb), M_DEVBUF, M_WAITOK | M_ZERO);
1917	for (i = 0; i < n; i++) {
1918		ccb[i].c_sc = sc;
1919		ccb[i].c_id = i;
1920		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1921		    i * sizeof(struct ips_cmdb);
1922		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1923		    i * sizeof(struct ips_cmdb);
1924		if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS,
1925		    IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1926		    &ccb[i].c_dmam))
1927			goto fail;
1928	}
1929
1930	return (ccb);
1931fail:
1932	for (; i > 0; i--)
1933		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1934	free(ccb, M_DEVBUF);
1935	return (NULL);
1936}
1937
1938void
1939ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
1940{
1941	int i;
1942
1943	for (i = 0; i < n; i++)
1944		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1945	free(ccb, M_DEVBUF);
1946}
1947
1948struct ips_ccb *
1949ips_ccb_get(struct ips_softc *sc)
1950{
1951	struct ips_ccb *ccb;
1952
1953	mutex_enter(&sc->sc_ccb_mtx);
1954	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
1955		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
1956		ccb->c_flags = 0;
1957		ccb->c_xfer = NULL;
1958		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
1959	}
1960	mutex_exit(&sc->sc_ccb_mtx);
1961
1962	return (ccb);
1963}
1964
1965void
1966ips_ccb_put(struct ips_softc *sc, struct ips_ccb *ccb)
1967{
1968	ccb->c_state = IPS_CCB_FREE;
1969	mutex_enter(&sc->sc_ccb_mtx);
1970	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
1971	mutex_exit(&sc->sc_ccb_mtx);
1972}
1973
1974int
1975ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
1976{
1977	int nsegs;
1978
1979	dm->dm_tag = tag;
1980	dm->dm_size = size;
1981
1982	if (bus_dmamap_create(tag, size, 1, size, 0,
1983	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
1984		return (1);
1985	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
1986	    BUS_DMA_NOWAIT))
1987		goto fail1;
1988	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, &dm->dm_vaddr,
1989	    BUS_DMA_NOWAIT))
1990		goto fail2;
1991	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
1992	    BUS_DMA_NOWAIT))
1993		goto fail3;
1994
1995	return (0);
1996
1997fail3:
1998	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
1999fail2:
2000	bus_dmamem_free(tag, &dm->dm_seg, 1);
2001fail1:
2002	bus_dmamap_destroy(tag, dm->dm_map);
2003	return (1);
2004}
2005
2006void
2007ips_dmamem_free(struct dmamem *dm)
2008{
2009	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2010	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2011	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2012	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2013}
2014