scsi_sa.c revision 39884
1/*
2 * Implementation of SCSI Sequential Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *      $Id: scsi_sa.c,v 1.1 1998/09/15 06:36:34 gibbs Exp $
29 */
30
31#include <sys/param.h>
32#include <sys/queue.h>
33#ifdef KERNEL
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#endif
37#include <sys/types.h>
38#include <sys/buf.h>
39#include <sys/malloc.h>
40#include <sys/mtio.h>
41#include <sys/conf.h>
42#include <sys/buf.h>
43#include <sys/devicestat.h>
44#include <machine/limits.h>
45
46#ifndef KERNEL
47#include <stdio.h>
48#include <string.h>
49#endif
50
51#include <cam/cam.h>
52#include <cam/cam_ccb.h>
53#include <cam/cam_extend.h>
54#include <cam/cam_periph.h>
55#include <cam/cam_xpt_periph.h>
56#include <cam/cam_debug.h>
57
58#include <cam/scsi/scsi_all.h>
59#include <cam/scsi/scsi_message.h>
60#include <cam/scsi/scsi_sa.h>
61
62#ifdef KERNEL
63
64#include <opt_sa.h>
65
66#ifndef SA_SPACE_TIMEOUT
67#define SA_SPACE_TIMEOUT	1 * 60
68#endif
69#ifndef SA_REWIND_TIMEOUT
70#define SA_REWIND_TIMEOUT	2 * 60
71#endif
72#ifndef SA_ERASE_TIMEOUT
73#define SA_ERASE_TIMEOUT	4 * 60
74#endif
75
76#define	SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4)	/* 4 bit unit.	*/
77#define	SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4))
78
79typedef enum {
80	SA_STATE_NORMAL
81} sa_state;
82
83typedef enum {
84	SA_CCB_BUFFER_IO,
85	SA_CCB_WAITING
86} sa_ccb_types;
87
88#define ccb_type ppriv_field0
89#define ccb_bp	 ppriv_ptr1
90
91typedef enum {
92	SA_FLAG_OPEN		= 0x0001,
93	SA_FLAG_FIXED		= 0x0002,
94	SA_FLAG_TAPE_LOCKED	= 0x0004,
95	SA_FLAG_TAPE_MOUNTED	= 0x0008,
96	SA_FLAG_TAPE_WP		= 0x0010,
97	SA_FLAG_TAPE_WRITTEN	= 0x0020,
98	SA_FLAG_2FM_AT_EOD	= 0x0040,
99	SA_FLAG_EOM_PENDING	= 0x0080,
100	SA_FLAG_EIO_PENDING	= 0x0100,
101	SA_FLAG_EOF_PENDING	= 0x0200,
102	SA_FLAG_ERR_PENDING	= (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING|
103				   SA_FLAG_EOF_PENDING),
104	SA_FLAG_INVALID		= 0x0400,
105	SA_FLAG_COMP_ENABLED	= 0x0800,
106	SA_FLAG_COMP_UNSUPP	= 0x1000
107} sa_flags;
108
109typedef enum {
110	SA_MODE_REWIND		= 0x00,
111	SA_MODE_NOREWIND	= 0x01,
112	SA_MODE_OFFLINE		= 0x02
113} sa_mode;
114
115typedef enum {
116	SA_PARAM_NONE		= 0x00,
117	SA_PARAM_BLOCKSIZE	= 0x01,
118	SA_PARAM_DENSITY	= 0x02,
119	SA_PARAM_COMPRESSION	= 0x04,
120	SA_PARAM_BUFF_MODE	= 0x08,
121	SA_PARAM_NUMBLOCKS	= 0x10,
122	SA_PARAM_WP		= 0x20,
123	SA_PARAM_SPEED		= 0x40,
124	SA_PARAM_ALL		= 0x7f
125} sa_params;
126
127typedef enum {
128	SA_QUIRK_NONE		= 0x00,
129	SA_QUIRK_NOCOMP		= 0x01
130} sa_quirks;
131
132struct sa_softc {
133	sa_state	state;
134	sa_flags	flags;
135	sa_quirks	quirks;
136	struct		buf_queue_head buf_queue;
137	struct		devstat device_stats;
138	int		blk_gran;
139	int		blk_mask;
140	int		blk_shift;
141	u_int32_t	max_blk;
142	u_int32_t	min_blk;
143	u_int8_t	media_density;
144	u_int32_t	media_blksize;
145	u_int32_t	media_numblks;
146	u_int32_t	comp_algorithm;
147	u_int32_t	saved_comp_algorithm;
148	u_int8_t	speed;
149	int		buffer_mode;
150	int		filemarks;
151	union		ccb saved_ccb;
152};
153
154struct sa_quirk_entry {
155	struct scsi_inquiry_pattern inq_pat;
156	sa_quirks quirks;
157};
158
159static struct sa_quirk_entry sa_quirk_table[] =
160{
161	{
162		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE",
163		  "Python 25601*", "*"}, /*quirks*/SA_QUIRK_NOCOMP
164	}
165};
166
167static	d_open_t	saopen;
168static	d_read_t	saread;
169static	d_write_t	sawrite;
170static	d_close_t	saclose;
171static	d_strategy_t	sastrategy;
172static	d_ioctl_t	saioctl;
173static	periph_init_t	sainit;
174static	periph_ctor_t	saregister;
175static	periph_dtor_t	sacleanup;
176static	periph_start_t	sastart;
177static	void		saasync(void *callback_arg, u_int32_t code,
178				struct cam_path *path, void *arg);
179static	void		sadone(struct cam_periph *periph,
180			       union ccb *start_ccb);
181static  int		saerror(union ccb *ccb, u_int32_t cam_flags,
182				u_int32_t sense_flags);
183static int		sacheckeod(struct cam_periph *periph);
184static int		sagetparams(struct cam_periph *periph,
185				    sa_params params_to_get,
186				    u_int32_t *blocksize, u_int8_t *density,
187				    u_int32_t *numblocks, int *buff_mode,
188				    u_int8_t *write_protect, u_int8_t *speed,
189				    int *comp_supported, int *comp_enabled,
190				    u_int32_t *comp_algorithm,
191				  struct scsi_data_compression_page *comp_page);
192static int		sasetparams(struct cam_periph *periph,
193				    sa_params params_to_set,
194				    u_int32_t blocksize, u_int8_t density,
195				    u_int32_t comp_algorithm);
196static void		saprevent(struct cam_periph *periph, int action);
197static int		sarewind(struct cam_periph *periph);
198static int		saspace(struct cam_periph *periph, int count,
199				scsi_space_code code);
200static int		samount(struct cam_periph *periph);
201static int		saretension(struct cam_periph *periph);
202static int		sareservereleaseunit(struct cam_periph *periph,
203					     int reserve);
204static int		saloadunload(struct cam_periph *periph, int load);
205static int		saerase(struct cam_periph *periph, int longerase);
206static int		sawritefilemarks(struct cam_periph *periph,
207					 int nmarks, int setmarks);
208
209static struct periph_driver sadriver =
210{
211	sainit, "sa",
212	TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0
213};
214
215DATA_SET(periphdriver_set, sadriver);
216
217#define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4)	/* 4 bit unit. */
218#define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4))
219
220#define SAMODE(z) ((minor(z) & 0x03))
221#define SADENSITY(z) (((minor(z) >> 2) & 0x03))
222
223/* For 2.2-stable support */
224#ifndef D_TAPE
225#define D_TAPE 0
226#endif
227
228#define CTLMODE	3
229#define SA_CDEV_MAJOR 14
230#define SA_BDEV_MAJOR 5
231
232static struct cdevsw sa_cdevsw =
233{
234	/*d_open*/	saopen,
235	/*d_close*/	saclose,
236	/*d_read*/	saread,
237	/*d_write*/	sawrite,
238	/*d_ioctl*/	saioctl,
239	/*d_stop*/	nostop,
240	/*d_reset*/	noreset,
241	/*d_devtotty*/	nodevtotty,
242	/*d_poll*/	seltrue,
243	/*d_mmap*/	nommap,
244	/*d_strategy*/	sastrategy,
245	/*d_name*/	"sa",
246	/*d_spare*/	NULL,
247	/*d_maj*/	-1,
248	/*d_dump*/	nodump,
249	/*d_psize*/	nopsize,
250	/*d_flags*/	D_TAPE,
251	/*d_maxio*/	0,
252	/*b_maj*/	-1
253};
254
255static struct extend_array *saperiphs;
256
257static int
258saopen(dev_t dev, int flags, int fmt, struct proc *p)
259{
260	struct cam_periph *periph;
261	struct sa_softc *softc;
262	int unit;
263	int mode;
264	int density;
265	int error;
266
267	unit = SAUNIT(dev);
268	mode = SAMODE(dev);
269	density = SADENSITY(dev);
270
271	periph = cam_extend_get(saperiphs, unit);
272	if (periph == NULL)
273		return (ENXIO);
274
275	softc = (struct sa_softc *)periph->softc;
276
277	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
278	    ("saaopen: dev=0x%x (unit %d , mode %d, density %d)\n", dev,
279	     unit, mode, density));
280
281	if (softc->flags & SA_FLAG_INVALID)
282		return(ENXIO);
283
284	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
285		return (error); /* error code from tsleep */
286	}
287
288	if ((softc->flags & SA_FLAG_OPEN) == 0) {
289		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
290			return(ENXIO);
291
292		if ((error = sareservereleaseunit(periph, TRUE)) != 0) {
293			cam_periph_unlock(periph);
294			cam_periph_release(periph);
295			return(error);
296		}
297	}
298
299	if (error == 0) {
300		if ((softc->flags & SA_FLAG_OPEN) != 0) {
301			error = EBUSY;
302		}
303
304		if (error == 0) {
305			error = samount(periph);
306		}
307		/* Perform other checking... */
308	}
309
310	if (error == 0) {
311		saprevent(periph, PR_PREVENT);
312		softc->flags |= SA_FLAG_OPEN;
313	}
314
315	cam_periph_unlock(periph);
316	return (error);
317}
318
319static int
320saclose(dev_t dev, int flag, int fmt, struct proc *p)
321{
322	struct	cam_periph *periph;
323	struct	sa_softc *softc;
324	int	unit;
325	int	mode;
326	int	error;
327
328	unit = SAUNIT(dev);
329	mode = SAMODE(dev);
330	periph = cam_extend_get(saperiphs, unit);
331	if (periph == NULL)
332		return (ENXIO);
333
334	softc = (struct sa_softc *)periph->softc;
335
336	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
337		return (error); /* error code from tsleep */
338	}
339
340	sacheckeod(periph);
341
342	saprevent(periph, PR_ALLOW);
343
344	switch (mode) {
345	case SA_MODE_REWIND:
346		sarewind(periph);
347		break;
348	case SA_MODE_OFFLINE:
349		sarewind(periph);
350		saloadunload(periph, /*load*/FALSE);
351		break;
352	case SA_MODE_NOREWIND:
353	default:
354		break;
355	}
356
357	softc->flags &= ~SA_FLAG_OPEN;
358
359	/* release the device */
360	sareservereleaseunit(periph, FALSE);
361
362	cam_periph_unlock(periph);
363	cam_periph_release(periph);
364
365	return (0);
366}
367
368static int
369saread(dev_t dev, struct uio *uio, int ioflag)
370{
371	return(physio(sastrategy, NULL, dev, 1, minphys, uio));
372}
373
374static int
375sawrite(dev_t dev, struct uio *uio, int ioflag)
376{
377	return(physio(sastrategy, NULL, dev, 0, minphys, uio));
378}
379
380/*
381 * Actually translate the requested transfer into one the physical driver
382 * can understand.  The transfer is described by a buf and will include
383 * only one physical transfer.
384 */
385static void
386sastrategy(struct buf *bp)
387{
388	struct cam_periph *periph;
389	struct sa_softc *softc;
390	u_int  unit;
391	int    s;
392
393	unit = SAUNIT(bp->b_dev);
394	periph = cam_extend_get(saperiphs, unit);
395	if (periph == NULL) {
396		bp->b_error = ENXIO;
397		goto bad;
398	}
399	softc = (struct sa_softc *)periph->softc;
400
401	/*
402	 * If it's a null transfer, return immediatly
403	 */
404	if (bp->b_bcount == 0)
405		goto done;
406
407	/* valid request?  */
408	if (softc->flags & SA_FLAG_FIXED) {
409		/*
410		 * Fixed block device.  The byte count must
411		 * be a multiple of our block size.
412		 */
413		if (((softc->blk_mask != ~0)
414		  && ((bp->b_bcount & softc->blk_mask) != 0))
415		 || ((softc->blk_mask == ~0)
416		  && ((bp->b_bcount % softc->min_blk) != 0))) {
417			xpt_print_path(periph->path);
418			printf("Invalid request.  Fixed block device "
419			       "requests must be a multiple "
420			       "of %d bytes\n", softc->min_blk);
421			bp->b_error = EINVAL;
422			goto bad;
423		}
424	} else if ((bp->b_bcount > softc->max_blk)
425		|| (bp->b_bcount < softc->min_blk)
426		|| (bp->b_bcount & softc->blk_mask) != 0) {
427
428		xpt_print_path(periph->path);
429		printf("Invalid request.  Variable block device "
430		       "requests must be ");
431		if (softc->blk_mask != 0) {
432			printf("a multiple of %d ",
433			       (0x1 << softc->blk_gran));
434		}
435		printf("between %d and %d bytes\n",
436		       softc->min_blk, softc->max_blk);
437		bp->b_error = EINVAL;
438		goto bad;
439        }
440
441	/*
442	 * Mask interrupts so that the pack cannot be invalidated until
443	 * after we are in the queue.  Otherwise, we might not properly
444	 * clean up one of the buffers.
445	 */
446	s = splbio();
447
448	/*
449	 * Place it in the queue of disk activities for this disk
450	 */
451	bufq_insert_tail(&softc->buf_queue, bp);
452
453	splx(s);
454
455	/*
456	 * Schedule ourselves for performing the work.
457	 */
458	xpt_schedule(periph, /* XXX priority */1);
459
460	return;
461bad:
462	bp->b_flags |= B_ERROR;
463done:
464
465	/*
466	 * Correctly set the buf to indicate a completed xfer
467	 */
468	bp->b_resid = bp->b_bcount;
469	biodone(bp);
470}
471
472static int
473saioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *p)
474{
475	struct cam_periph *periph;
476	struct sa_softc *softc;
477	int unit;
478	int mode;
479	int density;
480	int error;
481
482	unit = SAUNIT(dev);
483	mode = SAMODE(dev);
484	density = SADENSITY(dev);
485
486	periph = cam_extend_get(saperiphs, unit);
487	if (periph == NULL)
488		return (ENXIO);
489
490	softc = (struct sa_softc *)periph->softc;
491
492	/*
493	 * Find the device that the user is talking about
494	 */
495	switch (cmd) {
496	case MTIOCGET:
497	{
498		struct mtget *g = (struct mtget *)arg;
499
500		CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
501			 ("saioctl: MTIOGET\n"));
502
503		bzero(g, sizeof(struct mtget));
504		g->mt_type = 0x7;	/* Ultrix compat *//*? */
505		g->mt_density = softc->media_density;
506		g->mt_blksiz = softc->media_blksize;
507		if (softc->flags & SA_FLAG_COMP_UNSUPP) {
508			g->mt_comp = MT_COMP_UNSUPP;
509			g->mt_comp0 = MT_COMP_UNSUPP;
510			g->mt_comp1 = MT_COMP_UNSUPP;
511			g->mt_comp2 = MT_COMP_UNSUPP;
512			g->mt_comp3 = MT_COMP_UNSUPP;
513		} else if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) {
514			g->mt_comp = MT_COMP_DISABLED;
515			g->mt_comp0 = MT_COMP_DISABLED;
516			g->mt_comp1 = MT_COMP_DISABLED;
517			g->mt_comp2 = MT_COMP_DISABLED;
518			g->mt_comp3 = MT_COMP_DISABLED;
519		} else {
520			g->mt_comp = softc->comp_algorithm;
521			g->mt_comp0 = softc->comp_algorithm;
522			g->mt_comp1 = softc->comp_algorithm;
523			g->mt_comp2 = softc->comp_algorithm;
524			g->mt_comp3 = softc->comp_algorithm;
525		}
526		g->mt_density0 = softc->media_density;
527		g->mt_density1 = softc->media_density;
528		g->mt_density2 = softc->media_density;
529		g->mt_density3 = softc->media_density;
530		g->mt_blksiz0 = softc->media_blksize;
531		g->mt_blksiz1 = softc->media_blksize;
532		g->mt_blksiz2 = softc->media_blksize;
533		g->mt_blksiz3 = softc->media_blksize;
534		error = 0;
535		break;
536	}
537	case MTIOCTOP:
538	{
539		struct mtop *mt;
540		int    count;
541
542		mt = (struct mtop *)arg;
543
544		CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
545			 ("saioctl: op=0x%x count=0x%x\n",
546			  mt->mt_op, mt->mt_count));
547
548		count = mt->mt_count;
549		switch (mt->mt_op) {
550		case MTWEOF:	/* write an end-of-file record */
551			error = sawritefilemarks(periph, count,
552						   /*setmarks*/FALSE);
553			break;
554		case MTBSR:	/* backward space record */
555		case MTFSR:	/* forward space record */
556		case MTBSF:	/* backward space file */
557		case MTFSF:	/* forward space file */
558		case MTEOD:	/* space to end of recorded medium */
559		{
560			int nmarks;
561			scsi_space_code spaceop;
562
563			nmarks = softc->filemarks;
564			error = sacheckeod(periph);
565			nmarks -= softc->filemarks;
566
567			if ((mt->mt_op == MTBSR) || (mt->mt_op == MTBSF))
568				count = -count;
569
570			if ((mt->mt_op == MTBSF) || (mt->mt_op == MTFSF))
571				spaceop = SS_FILEMARKS;
572			else if ((mt->mt_op == MTBSR) || (mt->mt_op == MTFSR))
573				spaceop = SS_BLOCKS;
574			else {
575				spaceop = SS_EOD;
576				count = 0;
577				nmarks = 0;
578			}
579
580			nmarks = softc->filemarks;
581			error = sacheckeod(periph);
582			nmarks -= softc->filemarks;
583			if (error == 0)
584				error = saspace(periph, count - nmarks,
585						spaceop);
586			break;
587		}
588		case MTREW:	/* rewind */
589			error = sarewind(periph);
590			break;
591		case MTERASE:	/* erase */
592			error = saerase(periph, count);
593			break;
594		case MTRETENS:	/* re-tension tape */
595			error = saretension(periph);
596			break;
597		case MTOFFL:	/* rewind and put the drive offline */
598			/*
599			 * Be sure to allow media removal before
600			 * attempting the eject.
601			 */
602			saprevent(periph, PR_ALLOW);
603			error = sarewind(periph);
604
605			if (error == 0)
606				error = saloadunload(periph, /*load*/FALSE);
607			else
608				break;
609
610			/* XXX KDM */
611			softc->flags &= ~SA_FLAG_TAPE_LOCKED;
612			softc->flags &= ~SA_FLAG_TAPE_MOUNTED;
613			break;
614		case MTNOP:	/* no operation, sets status only */
615		case MTCACHE:	/* enable controller cache */
616		case MTNOCACHE:	/* disable controller cache */
617			error = 0;
618			break;
619		case MTSETBSIZ:	/* Set block size for device */
620
621			error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count,
622					    0, 0);
623			break;
624		case MTSETDNSTY:	/* Set density for device and mode */
625			if (count > UCHAR_MAX) {
626				error = EINVAL;
627				break;
628			} else {
629				error = sasetparams(periph, SA_PARAM_DENSITY,
630						    0, count, 0);
631			}
632			break;
633		case MTCOMP:	/* enable compression */
634			/*
635			 * Some devices don't support compression, and
636			 * don't like it if you ask them for the
637			 * compression page.
638			 */
639			if ((softc->quirks & SA_QUIRK_NOCOMP)
640			 || (softc->flags & SA_FLAG_COMP_UNSUPP)) {
641				error = ENODEV;
642				break;
643			}
644			error = sasetparams(periph, SA_PARAM_COMPRESSION,
645					    0, 0, count);
646			break;
647		default:
648			error = EINVAL;
649		}
650		break;
651	}
652	case MTIOCIEOT:
653	case MTIOCEEOT:
654		error = 0;
655		break;
656	default:
657		error = cam_periph_ioctl(periph, cmd, arg, saerror);
658		break;
659	}
660	return (error);
661}
662
663static void
664sainit(void)
665{
666	cam_status status;
667	struct cam_path *path;
668
669	/*
670	 * Create our extend array for storing the devices we attach to.
671	 */
672	saperiphs = cam_extend_new();
673	if (saperiphs == NULL) {
674		printf("sa: Failed to alloc extend array!\n");
675		return;
676	}
677
678	/*
679	 * Install a global async callback.
680	 */
681	status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
682				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
683
684	if (status == CAM_REQ_CMP) {
685		/* Register the async callbacks of interrest */
686		struct ccb_setasync csa; /*
687					  * This is an immediate CCB,
688					  * so using the stack is OK
689					  */
690		xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
691		csa.ccb_h.func_code = XPT_SASYNC_CB;
692		csa.event_enable = AC_FOUND_DEVICE;
693		csa.callback = saasync;
694		csa.callback_arg = NULL;
695		xpt_action((union ccb *)&csa);
696		status = csa.ccb_h.status;
697		xpt_free_path(path);
698	}
699
700	if (status != CAM_REQ_CMP) {
701		printf("sa: Failed to attach master async callback "
702		       "due to status 0x%x!\n", status);
703	} else {
704		/* If we were successfull, register our devsw */
705		cdevsw_add_generic(SA_BDEV_MAJOR, SA_CDEV_MAJOR, &sa_cdevsw);
706	}
707}
708
709static void
710sacleanup(struct cam_periph *periph)
711{
712	cam_extend_release(saperiphs, periph->unit_number);
713	xpt_print_path(periph->path);
714	printf("removing device entry\n");
715	free(periph->softc, M_DEVBUF);
716}
717
718static void
719saasync(void *callback_arg, u_int32_t code,
720	struct cam_path *path, void *arg)
721{
722	struct cam_periph *periph;
723
724	periph = (struct cam_periph *)callback_arg;
725	switch (code) {
726	case AC_FOUND_DEVICE:
727	{
728		struct ccb_getdev *cgd;
729		cam_status status;
730
731		cgd = (struct ccb_getdev *)arg;
732
733		if (cgd->pd_type != T_SEQUENTIAL)
734			break;
735
736		/*
737		 * Allocate a peripheral instance for
738		 * this device and start the probe
739		 * process.
740		 */
741		status = cam_periph_alloc(saregister, sacleanup, sastart,
742					  "sa", CAM_PERIPH_BIO, cgd->ccb_h.path,
743					  saasync, AC_FOUND_DEVICE, cgd);
744
745		if (status != CAM_REQ_CMP
746		 && status != CAM_REQ_INPROG)
747			printf("saasync: Unable to probe new device "
748				"due to status 0x%x\n", status);
749		break;
750	}
751	case AC_LOST_DEVICE:
752	{
753		int s;
754		struct sa_softc *softc;
755		struct buf *q_bp;
756		struct ccb_setasync csa;
757
758		softc = (struct sa_softc *)periph->softc;
759
760		/*
761		 * Insure that no other async callbacks that
762		 * might affect this peripheral can come through.
763		 */
764		s = splcam();
765
766		/*
767		 * De-register any async callbacks.
768		 */
769		xpt_setup_ccb(&csa.ccb_h, periph->path,
770			      /* priority */ 5);
771		csa.ccb_h.func_code = XPT_SASYNC_CB;
772		csa.event_enable = 0;
773		csa.callback = saasync;
774		csa.callback_arg = periph;
775		xpt_action((union ccb *)&csa);
776
777		softc->flags |= SA_FLAG_INVALID;
778
779		/*
780		 * Return all queued I/O with ENXIO.
781		 * XXX Handle any transactions queued to the card
782		 *     with XPT_ABORT_CCB.
783		 */
784		while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
785			bufq_remove(&softc->buf_queue, q_bp);
786			q_bp->b_resid = q_bp->b_bcount;
787			q_bp->b_error = ENXIO;
788			q_bp->b_flags |= B_ERROR;
789			biodone(q_bp);
790		}
791		devstat_remove_entry(&softc->device_stats);
792
793		xpt_print_path(periph->path);
794		printf("lost device\n");
795
796		splx(s);
797
798		cam_periph_invalidate(periph);
799	}
800	case AC_TRANSFER_NEG:
801	case AC_SENT_BDR:
802	case AC_SCSI_AEN:
803	case AC_UNSOL_RESEL:
804	case AC_BUS_RESET:
805	default:
806		break;
807	}
808}
809
810static cam_status
811saregister(struct cam_periph *periph, void *arg)
812{
813	int s;
814	struct sa_softc *softc;
815	struct ccb_setasync csa;
816	struct ccb_getdev *cgd;
817	caddr_t match;
818
819	cgd = (struct ccb_getdev *)arg;
820	if (periph == NULL) {
821		printf("saregister: periph was NULL!!\n");
822		return(CAM_REQ_CMP_ERR);
823	}
824
825	if (cgd == NULL) {
826		printf("saregister: no getdev CCB, can't register device\n");
827		return(CAM_REQ_CMP_ERR);
828	}
829
830	softc = (struct sa_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
831
832	if (softc == NULL) {
833		printf("saregister: Unable to probe new device. "
834		       "Unable to allocate softc\n");
835		return(CAM_REQ_CMP_ERR);
836	}
837
838	bzero(softc, sizeof(*softc));
839	softc->state = SA_STATE_NORMAL;
840	bufq_init(&softc->buf_queue);
841	periph->softc = softc;
842	cam_extend_set(saperiphs, periph->unit_number, periph);
843
844	/*
845	 * See if this device has any quirks.
846	 */
847	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
848			       (caddr_t)sa_quirk_table,
849			       sizeof(sa_quirk_table)/sizeof(*sa_quirk_table),
850			       sizeof(*sa_quirk_table), scsi_inquiry_match);
851
852	if (match != NULL)
853		softc->quirks = ((struct sa_quirk_entry *)match)->quirks;
854	else
855		softc->quirks = SA_QUIRK_NONE;
856
857	/*
858 	 * The SA driver supports a blocksize, but we don't know the
859	 * blocksize until we sense the media.  So, set a flag to
860	 * indicate that the blocksize is unavailable right now.
861	 * We'll clear the flag as soon as we've done a read capacity.
862	 */
863	devstat_add_entry(&softc->device_stats, "sa",
864			  periph->unit_number, 0,
865			  DEVSTAT_BS_UNAVAILABLE,
866			  cgd->pd_type | DEVSTAT_TYPE_IF_SCSI);
867
868	/*
869	 * Add an async callback so that we get
870	 * notified if this device goes away.
871	 */
872	xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5);
873	csa.ccb_h.func_code = XPT_SASYNC_CB;
874	csa.event_enable = AC_LOST_DEVICE;
875	csa.callback = saasync;
876	csa.callback_arg = periph;
877	xpt_action((union ccb *)&csa);
878
879	xpt_announce_periph(periph, NULL);
880
881	return(CAM_REQ_CMP);
882}
883
884static void
885sastart(struct cam_periph *periph, union ccb *start_ccb)
886{
887	struct sa_softc *softc;
888
889	softc = (struct sa_softc *)periph->softc;
890
891
892	switch (softc->state) {
893	case SA_STATE_NORMAL:
894	{
895		/* Pull a buffer from the queue and get going on it */
896		struct buf *bp;
897		int s;
898
899		/*
900		 * See if there is a buf with work for us to do..
901		 */
902		s = splbio();
903		bp = bufq_first(&softc->buf_queue);
904		if (periph->immediate_priority <= periph->pinfo.priority) {
905			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
906					("queuing for immediate ccb\n"));
907			start_ccb->ccb_h.ccb_type = SA_CCB_WAITING;
908			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
909					  periph_links.sle);
910			periph->immediate_priority = CAM_PRIORITY_NONE;
911			splx(s);
912			wakeup(&periph->ccb_list);
913		} else if (bp == NULL) {
914			splx(s);
915			xpt_release_ccb(start_ccb);
916		} else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) {
917
918			bufq_remove(&softc->buf_queue, bp);
919			bp->b_resid = bp->b_bcount;
920			bp->b_flags |= B_ERROR;
921			if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) {
922				if ((bp->b_flags & B_READ) == 0)
923					bp->b_error = ENOSPC;
924			}
925			if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) {
926				bp->b_error = EIO;
927			}
928			softc->flags &= ~SA_FLAG_ERR_PENDING;
929			bp = bufq_first(&softc->buf_queue);
930			splx(s);
931			biodone(bp);
932		} else {
933			u_int32_t length;
934
935			bufq_remove(&softc->buf_queue, bp);
936
937			if ((softc->flags & SA_FLAG_FIXED) != 0) {
938				if (softc->blk_shift != 0) {
939					length =
940					    bp->b_bcount >> softc->blk_shift;
941				} else {
942					length =
943					    bp->b_bcount / softc->min_blk;
944				}
945			} else {
946				length = bp->b_bcount;
947			}
948
949			devstat_start_transaction(&softc->device_stats);
950
951			/*
952			 * XXX - Perhaps we should...
953			 * suppress illegal length indication if we are
954			 * running in variable block mode so that we don't
955			 * have to request sense every time our requested
956			 * block size is larger than the written block.
957			 * The residual information from the ccb allows
958			 * us to identify this situation anyway.  The only
959			 * problem with this is that we will not get
960			 * information about blocks that are larger than
961			 * our read buffer unless we set the block size
962			 * in the mode page to something other than 0.
963			 */
964			scsi_sa_read_write(&start_ccb->csio,
965					   /*retries*/4,
966					   sadone,
967					   MSG_SIMPLE_Q_TAG,
968					   bp->b_flags & B_READ,
969					   /*SILI*/FALSE,
970					   softc->flags & SA_FLAG_FIXED,
971					   length,
972					   bp->b_data,
973					   bp->b_bcount,
974					   SSD_FULL_SIZE,
975					   120 * 60 * 1000); /* 2min */
976			start_ccb->ccb_h.ccb_type = SA_CCB_BUFFER_IO;
977			start_ccb->ccb_h.ccb_bp = bp;
978			bp = bufq_first(&softc->buf_queue);
979			splx(s);
980
981			xpt_action(start_ccb);
982		}
983
984		if (bp != NULL) {
985			/* Have more work to do, so ensure we stay scheduled */
986			xpt_schedule(periph, /* XXX priority */1);
987		}
988		break;
989	}
990	}
991}
992
993
994static void
995sadone(struct cam_periph *periph, union ccb *done_ccb)
996{
997	struct sa_softc *softc;
998	struct ccb_scsiio *csio;
999
1000	softc = (struct sa_softc *)periph->softc;
1001	csio = &done_ccb->csio;
1002	switch (csio->ccb_h.ccb_type) {
1003	case SA_CCB_BUFFER_IO:
1004	{
1005		struct buf *bp;
1006		int error;
1007
1008		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1009		error = 0;
1010		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1011
1012			if ((error = saerror(done_ccb, 0, 0)) == ERESTART) {
1013				/*
1014				 * A retry was scheuled, so
1015				 * just return.
1016				 */
1017				return;
1018			}
1019		}
1020
1021		if (error == EIO) {
1022			int s;
1023			struct buf *q_bp;
1024
1025			/*
1026			 * Catastrophic error.  Mark our pack as invalid,
1027			 * return all queued I/O with EIO, and unfreeze
1028			 * our queue so that future transactions that
1029			 * attempt to fix this problem can get to the
1030			 * device.
1031			 *
1032			 */
1033
1034			s = splbio();
1035			softc->flags &= ~SA_FLAG_TAPE_MOUNTED;
1036
1037			while ((q_bp = bufq_first(&softc->buf_queue)) != NULL) {
1038				bufq_remove(&softc->buf_queue, q_bp);
1039				q_bp->b_resid = q_bp->b_bcount;
1040				q_bp->b_error = EIO;
1041				q_bp->b_flags |= B_ERROR;
1042				biodone(q_bp);
1043			}
1044			splx(s);
1045		}
1046		if (error != 0) {
1047			bp->b_resid = bp->b_bcount;
1048			bp->b_error = error;
1049			bp->b_flags |= B_ERROR;
1050			cam_release_devq(done_ccb->ccb_h.path,
1051					 /*relsim_flags*/0,
1052					 /*reduction*/0,
1053					 /*timeout*/0,
1054					 /*getcount_only*/0);
1055		} else {
1056			bp->b_resid = csio->resid;
1057			bp->b_error = 0;
1058			if (csio->resid != 0) {
1059				bp->b_flags |= B_ERROR;
1060			}
1061			if ((bp->b_flags & B_READ) == 0) {
1062				softc->flags |= SA_FLAG_TAPE_WRITTEN;
1063				softc->filemarks = 0;
1064			}
1065		}
1066
1067		devstat_end_transaction(&softc->device_stats,
1068					bp->b_bcount - bp->b_resid,
1069					done_ccb->csio.tag_action & 0xf,
1070					(bp->b_flags & B_READ) ? DEVSTAT_READ
1071							       : DEVSTAT_WRITE);
1072		biodone(bp);
1073		break;
1074	}
1075	case SA_CCB_WAITING:
1076	{
1077		/* Caller will release the CCB */
1078		wakeup(&done_ccb->ccb_h.cbfcnp);
1079		return;
1080	}
1081	}
1082	xpt_release_ccb(done_ccb);
1083}
1084
1085static int
1086samount(struct cam_periph *periph)
1087{
1088	struct	sa_softc *softc;
1089	union	ccb *ccb;
1090	struct	ccb_scsiio *csio;
1091	int	error;
1092
1093	softc = (struct sa_softc *)periph->softc;
1094	ccb = cam_periph_getccb(periph, /* priority */1);
1095	csio = &ccb->csio;
1096	error = 0;
1097
1098	/*
1099	 * Determine if something has happend since the last
1100	 * open/mount that would invalidate a mount.  This
1101	 * will also eat any pending UAs.
1102	 */
1103	scsi_test_unit_ready(csio,
1104			     /*retries*/1,
1105			     sadone,
1106			     MSG_SIMPLE_Q_TAG,
1107			     SSD_FULL_SIZE,
1108			     /*timeout*/5000);
1109
1110	cam_periph_runccb(ccb, /*error handler*/NULL, /*cam_flags*/0,
1111			  /*sense_flags*/0, &softc->device_stats);
1112
1113	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1114		cam_release_devq(ccb->ccb_h.path,
1115				 /*relsim_flags*/0,
1116				 /*reduction*/0,
1117				 /*timeout*/0,
1118				 /*getcount_only*/0);
1119		softc->flags &= ~SA_FLAG_TAPE_MOUNTED;
1120	}
1121
1122	if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) {
1123		struct	scsi_read_block_limits_data *rblim;
1124		int	buff_mode, comp_enabled, comp_supported;
1125		u_int8_t write_protect;
1126
1127		/*
1128		 * Clear out old state.
1129		 */
1130		softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN|
1131				  SA_FLAG_ERR_PENDING|SA_FLAG_COMP_ENABLED|
1132				  SA_FLAG_COMP_UNSUPP);
1133		softc->filemarks = 0;
1134
1135		/*
1136		 * First off, determine block limits.
1137		 */
1138		rblim = (struct  scsi_read_block_limits_data *)
1139		    malloc(sizeof(*rblim), M_TEMP, M_WAITOK);
1140
1141		scsi_read_block_limits(csio,
1142				       /*retries*/1,
1143				       sadone,
1144				       MSG_SIMPLE_Q_TAG,
1145				       rblim,
1146				       SSD_FULL_SIZE,
1147				       /*timeout*/5000);
1148
1149		error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
1150					  /*sense_flags*/SF_RETRY_UA,
1151					  &softc->device_stats);
1152
1153		xpt_release_ccb(ccb);
1154
1155		if (error != 0)
1156			goto exit;
1157
1158		softc->blk_gran = RBL_GRAN(rblim);
1159		softc->max_blk = scsi_3btoul(rblim->maximum);
1160		softc->min_blk = scsi_2btoul(rblim->minimum);
1161		if (softc->max_blk == softc->min_blk) {
1162			softc->flags |= SA_FLAG_FIXED;
1163			if (powerof2(softc->min_blk)) {
1164				softc->blk_mask = softc->min_blk - 1;
1165				softc->blk_shift = 0;
1166				softc->blk_shift = ffs(softc->min_blk) - 1;
1167			} else {
1168				softc->blk_mask = ~0;
1169				softc->blk_shift = 0;
1170			}
1171		} else {
1172			/*
1173			 * SCSI-III spec allows 0
1174			 * to mean "unspecified"
1175			 */
1176			if (softc->max_blk == 0) {
1177				softc->max_blk = ~0;
1178			}
1179			softc->blk_shift = 0;
1180			if (softc->blk_gran != 0) {
1181				softc->blk_mask = softc->blk_gran - 1;
1182			} else {
1183				softc->blk_mask = 0;
1184			}
1185		}
1186
1187		/*
1188		 * Next, perform a mode sense to determine
1189		 * current density, blocksize, compression etc.
1190		 */
1191		error = sagetparams(periph, SA_PARAM_ALL,
1192				    &softc->media_blksize,
1193				    &softc->media_density,
1194				    &softc->media_numblks,
1195				    &softc->buffer_mode, &write_protect,
1196				    &softc->speed, &comp_supported,
1197				    &comp_enabled, &softc->comp_algorithm,
1198				    NULL);
1199
1200		if (error != 0)
1201			goto exit;
1202
1203		if (write_protect)
1204			softc->flags |= SA_FLAG_TAPE_WP;
1205
1206		if (comp_supported) {
1207			if (comp_enabled) {
1208				softc->flags |= SA_FLAG_COMP_ENABLED;
1209
1210				if (softc->saved_comp_algorithm == 0)
1211					softc->saved_comp_algorithm =
1212						softc->comp_algorithm;
1213			}
1214		} else
1215			softc->flags |= SA_FLAG_COMP_UNSUPP;
1216
1217		if (softc->buffer_mode != SMH_SA_BUF_MODE_NOBUF)
1218			goto exit;
1219
1220		error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0);
1221
1222		if (error == 0)
1223			softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF;
1224exit:
1225		if (rblim != NULL)
1226			free(rblim, M_TEMP);
1227
1228		if (error != 0) {
1229			cam_release_devq(ccb->ccb_h.path,
1230					 /*relsim_flags*/0,
1231					 /*reduction*/0,
1232					 /*timeout*/0,
1233					 /*getcount_only*/0);
1234		}
1235	} else
1236		xpt_release_ccb(ccb);
1237
1238	return (error);
1239}
1240
1241static int
1242sacheckeod(struct cam_periph *periph)
1243{
1244	int	error;
1245	int	markswanted;
1246	struct	sa_softc *softc;
1247
1248	softc = (struct sa_softc *)periph->softc;
1249	markswanted = 0;
1250
1251	if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) {
1252		markswanted++;
1253
1254		if ((softc->flags & SA_FLAG_2FM_AT_EOD) != 0)
1255			markswanted++;
1256	}
1257
1258	if (softc->filemarks < markswanted) {
1259		markswanted -= softc->filemarks;
1260		error = sawritefilemarks(periph, markswanted,
1261					 /*setmarks*/FALSE);
1262	} else {
1263		error = 0;
1264	}
1265	return (error);
1266}
1267
1268static int
1269saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1270{
1271	struct	cam_periph *periph;
1272	struct	sa_softc *softc;
1273	struct	ccb_scsiio *csio;
1274	struct	scsi_sense_data *sense;
1275	int	error_code, sense_key, asc, ascq;
1276	int	error;
1277
1278	periph = xpt_path_periph(ccb->ccb_h.path);
1279	softc = (struct sa_softc *)periph->softc;
1280	csio = &ccb->csio;
1281	sense = &csio->sense_data;
1282	scsi_extract_sense(sense, &error_code, &sense_key, &asc, &ascq);
1283	error = 0;
1284
1285	if (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR)
1286	 && ((sense->flags & (SSD_EOM|SSD_FILEMARK|SSD_ILI)) != 0)
1287	 && ((sense_key == SSD_KEY_NO_SENSE)
1288	  || (sense_key == SSD_KEY_BLANK_CHECK))) {
1289		u_int32_t info;
1290		u_int32_t resid;
1291		int	  defer_action;
1292
1293		/*
1294		 * Filter out some sense codes of interest.
1295		 */
1296		if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1297			info = scsi_4btoul(sense->info);
1298			resid = info;
1299			if ((softc->flags & SA_FLAG_FIXED) != 0)
1300				resid *= softc->media_blksize;
1301		} else {
1302			resid = csio->dxfer_len;
1303			info = resid;
1304			if ((softc->flags & SA_FLAG_FIXED) != 0)
1305				info /= softc->media_blksize;
1306		}
1307		if ((resid > 0 && resid < csio->dxfer_len)
1308		 && (softc->flags & SA_FLAG_FIXED) != 0)
1309			defer_action = TRUE;
1310		else
1311			defer_action = FALSE;
1312
1313		if ((sense->flags & SSD_EOM) != 0
1314		 || (sense_key == 0x8 /* BLANK CHECK*/)) {
1315			csio->resid = resid;
1316			if (defer_action) {
1317				softc->flags |= SA_FLAG_EOM_PENDING;
1318			} else {
1319				if (csio->cdb_io.cdb_bytes[0] == SA_WRITE)
1320					error = ENOSPC;
1321			}
1322		}
1323		if ((sense->flags & SSD_FILEMARK) != 0) {
1324			csio->resid = resid;
1325			if (defer_action)
1326				softc->flags |= SA_FLAG_EOF_PENDING;
1327		}
1328		if (sense->flags & SSD_ILI) {
1329			if (info < 0) {
1330				/*
1331				 * The record was too big.
1332				 */
1333				xpt_print_path(csio->ccb_h.path);
1334				printf("%d-byte tape record bigger "
1335				       "than suplied read buffer\n",
1336				       csio->dxfer_len - info);
1337				csio->resid = csio->dxfer_len;
1338				error = EIO;
1339			} else {
1340				csio->resid = resid;
1341				if ((softc->flags & SA_FLAG_FIXED) != 0) {
1342					if (defer_action)
1343						softc->flags |=
1344						    SA_FLAG_EIO_PENDING;
1345					else
1346						error = EIO;
1347				}
1348			}
1349		}
1350	}
1351	if (error == 0)
1352		error = cam_periph_error(ccb, cam_flags, sense_flags,
1353					 &softc->saved_ccb);
1354
1355	return (error);
1356}
1357
1358static int
1359sagetparams(struct cam_periph *periph, sa_params params_to_get,
1360	    u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks,
1361	    int *buff_mode, u_int8_t *write_protect, u_int8_t *speed,
1362	    int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm,
1363	    struct scsi_data_compression_page *comp_page)
1364{
1365	union ccb *ccb;
1366	void *mode_buffer;
1367	struct scsi_mode_header_6 *mode_hdr;
1368	struct scsi_mode_blk_desc *mode_blk;
1369	struct scsi_data_compression_page *ncomp_page;
1370	int mode_buffer_len;
1371	struct sa_softc *softc;
1372	int error;
1373	cam_status status;
1374
1375	softc = (struct sa_softc *)periph->softc;
1376
1377	ccb = cam_periph_getccb(periph, /*priority*/ 1);
1378
1379retry:
1380	mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk);
1381
1382	if (params_to_get & SA_PARAM_COMPRESSION) {
1383		if (softc->quirks & SA_QUIRK_NOCOMP) {
1384			*comp_supported = FALSE;
1385			params_to_get &= ~SA_PARAM_COMPRESSION;
1386		} else
1387			mode_buffer_len +=
1388				sizeof(struct scsi_data_compression_page);
1389	}
1390
1391	mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK);
1392
1393	bzero(mode_buffer, mode_buffer_len);
1394
1395	mode_hdr = (struct scsi_mode_header_6 *)mode_buffer;
1396	mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1];
1397
1398	if (params_to_get & SA_PARAM_COMPRESSION)
1399		ncomp_page = (struct scsi_data_compression_page *)&mode_blk[1];
1400	else
1401		ncomp_page = NULL;
1402
1403	scsi_mode_sense(&ccb->csio,
1404			/*retries*/ 1,
1405			/*cbfcnp*/ sadone,
1406			/*tag_action*/ MSG_SIMPLE_Q_TAG,
1407			/*dbd*/ FALSE,
1408			/*page_code*/ SMS_PAGE_CTRL_CURRENT,
1409			/*page*/ (params_to_get & SA_PARAM_COMPRESSION) ?
1410				  SA_DATA_COMPRESSION_PAGE :
1411				  SMS_VENDOR_SPECIFIC_PAGE,
1412			/*param_buf*/ mode_buffer,
1413			/*param_len*/ mode_buffer_len,
1414			/*sense_len*/ SSD_FULL_SIZE,
1415			/*timeout*/ 5000);
1416
1417	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
1418				  /*sense_flags*/SF_NO_PRINT,
1419				  &softc->device_stats);
1420
1421	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1422		cam_release_devq(ccb->ccb_h.path,
1423				 /* relsim_flags */0,
1424				 /* opening_reduction */0,
1425				 /* timeout */0,
1426				 /* getcount_only */ FALSE);
1427
1428	status = ccb->ccb_h.status & CAM_STATUS_MASK;
1429
1430	if (error == EINVAL
1431	 && (params_to_get & SA_PARAM_COMPRESSION) != 0) {
1432		/*
1433		 * Most likely doesn't support the compression
1434		 * page.  Remeber this for the future and attempt
1435		 * the request without asking for compression info.
1436		 */
1437		softc->quirks |= SA_QUIRK_NOCOMP;
1438		free(mode_buffer, M_TEMP);
1439		goto retry;
1440	} else if (error == 0) {
1441		struct scsi_data_compression_page *temp_comp_page;
1442
1443		temp_comp_page = NULL;
1444
1445		/*
1446		 * If the user only wants the compression information, and
1447		 * the device doesn't send back the block descriptor, it's
1448		 * no big deal.  If the user wants more than just
1449		 * compression, though, and the device doesn't pass back the
1450		 * block descriptor, we need to send another mode sense to
1451		 * get the block descriptor.
1452		 */
1453		if ((mode_hdr->blk_desc_len == 0)
1454		 && (params_to_get & SA_PARAM_COMPRESSION)
1455		 && ((params_to_get & ~(SA_PARAM_COMPRESSION)) != 0)) {
1456
1457			/*
1458			 * Decrease the mode buffer length by the size of
1459			 * the compression page, to make sure the data
1460			 * there doesn't get overwritten.
1461			 */
1462			mode_buffer_len -= sizeof(*ncomp_page);
1463
1464			/*
1465			 * Now move the compression page that we presumably
1466			 * got back down the memory chunk a little bit so
1467			 * it doesn't get spammed.
1468			 */
1469			temp_comp_page =
1470			      (struct scsi_data_compression_page *)&mode_hdr[1];
1471			bcopy(temp_comp_page, ncomp_page, sizeof(*ncomp_page));
1472
1473			/*
1474			 * Now, we issue another mode sense and just ask
1475			 * for the block descriptor, etc.
1476			 */
1477			scsi_mode_sense(&ccb->csio,
1478					/*retries*/ 1,
1479					/*cbfcnp*/ sadone,
1480					/*tag_action*/ MSG_SIMPLE_Q_TAG,
1481					/*dbd*/ FALSE,
1482					/*page_code*/ SMS_PAGE_CTRL_CURRENT,
1483					/*page*/ SMS_VENDOR_SPECIFIC_PAGE,
1484					/*param_buf*/ mode_buffer,
1485					/*param_len*/ mode_buffer_len,
1486					/*sense_len*/ SSD_FULL_SIZE,
1487					/*timeout*/ 5000);
1488
1489			error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
1490						  /*sense_flags*/ 0,
1491						  &softc->device_stats);
1492
1493			if (error != 0)
1494				goto sagetparamsexit;
1495
1496		}
1497
1498		if (params_to_get & SA_PARAM_BLOCKSIZE)
1499			*blocksize = scsi_3btoul(mode_blk->blklen);
1500
1501		if (params_to_get & SA_PARAM_NUMBLOCKS)
1502			*numblocks = scsi_3btoul(mode_blk->nblocks);
1503
1504		if (params_to_get & SA_PARAM_BUFF_MODE)
1505			*buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK;
1506
1507		if (params_to_get & SA_PARAM_DENSITY)
1508			*density = mode_blk->density;
1509
1510		if (params_to_get & SA_PARAM_WP)
1511			*write_protect = (mode_hdr->dev_spec & SMH_SA_WP) ?
1512					 TRUE : FALSE;
1513		if (params_to_get & SA_PARAM_SPEED)
1514			*speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK;
1515
1516		if (params_to_get & SA_PARAM_COMPRESSION) {
1517			*comp_supported =(ncomp_page->dce_and_dcc & SA_DCP_DCC)?
1518					 TRUE : FALSE;
1519			*comp_enabled = (ncomp_page->dce_and_dcc & SA_DCP_DCE)?
1520					TRUE : FALSE;
1521			*comp_algorithm =
1522				scsi_4btoul(ncomp_page->comp_algorithm);
1523			if (comp_page != NULL)
1524				bcopy(ncomp_page, comp_page,sizeof(*comp_page));
1525		}
1526
1527	} else if (status == CAM_SCSI_STATUS_ERROR) {
1528		/* Tell the user about the fatal error. */
1529		scsi_sense_print(&ccb->csio);
1530	}
1531
1532sagetparamsexit:
1533
1534	xpt_release_ccb(ccb);
1535	free(mode_buffer, M_TEMP);
1536	return(error);
1537}
1538
1539/*
1540 * The purpose of this function is to set one of four different parameters
1541 * for a tape drive:
1542 *	- blocksize
1543 *	- density
1544 *	- compression / compression algorithm
1545 *	- buffering mode
1546 *
1547 * The assumption is that this will be called from saioctl(), and therefore
1548 * from a process context.  Thus the waiting malloc calls below.  If that
1549 * assumption ever changes, the malloc calls should be changed to be
1550 * NOWAIT mallocs.
1551 *
1552 * Any or all of the four parameters may be set when this function is
1553 * called.  It should handle setting more than one parameter at once.
1554 */
1555static int
1556sasetparams(struct cam_periph *periph, sa_params params_to_set,
1557	    u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm)
1558{
1559	struct sa_softc *softc;
1560	u_int32_t current_blocksize;
1561	u_int32_t current_comp_algorithm;
1562	u_int8_t current_density;
1563	u_int8_t current_speed;
1564	int comp_enabled, comp_supported;
1565	void *mode_buffer;
1566	int mode_buffer_len;
1567	struct scsi_mode_header_6 *mode_hdr;
1568	struct scsi_mode_blk_desc *mode_blk;
1569	struct scsi_data_compression_page *comp_page;
1570	struct scsi_data_compression_page *current_comp_page;
1571	int buff_mode;
1572	union ccb *ccb;
1573	int error;
1574
1575	softc = (struct sa_softc *)periph->softc;
1576
1577	/* silence the compiler */
1578	ccb = NULL;
1579
1580	current_comp_page = malloc(sizeof(*current_comp_page),M_TEMP, M_WAITOK);
1581
1582	/*
1583	 * Since it doesn't make sense to set the number of blocks, or
1584	 * write protection, we won't try to get the current value.  We
1585	 * always want to get the blocksize, so we can set it back to the
1586	 * proper value.
1587	 */
1588	error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE |
1589			    SA_PARAM_SPEED, &current_blocksize,
1590			    &current_density, NULL, &buff_mode, NULL,
1591			    &current_speed, &comp_supported, &comp_enabled,
1592			    &current_comp_algorithm, current_comp_page);
1593
1594	if (error != 0) {
1595		free(current_comp_page, M_TEMP);
1596		return(error);
1597	}
1598
1599	mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk);
1600	if (params_to_set & SA_PARAM_COMPRESSION)
1601		mode_buffer_len += sizeof(struct scsi_data_compression_page);
1602
1603	mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK);
1604
1605	bzero(mode_buffer, mode_buffer_len);
1606
1607	mode_hdr = (struct scsi_mode_header_6 *)mode_buffer;
1608	mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1];
1609
1610	if (params_to_set & SA_PARAM_COMPRESSION) {
1611		comp_page = (struct scsi_data_compression_page *)&mode_blk[1];
1612		bcopy(current_comp_page, comp_page, sizeof(*comp_page));
1613	} else
1614		comp_page = NULL;
1615
1616	/*
1617	 * If the caller wants us to set the blocksize, use the one they
1618	 * pass in.  Otherwise, use the blocksize we got back from the
1619	 * mode select above.
1620	 */
1621	if (params_to_set & SA_PARAM_BLOCKSIZE)
1622		scsi_ulto3b(blocksize, mode_blk->blklen);
1623	else
1624		scsi_ulto3b(current_blocksize, mode_blk->blklen);
1625
1626	/*
1627	 * 0x7f means "same as before"
1628	 */
1629	if (params_to_set & SA_PARAM_DENSITY)
1630		mode_blk->density = density;
1631	else
1632		mode_blk->density = 0x7f;
1633
1634	/*
1635	 * For mode selects, these two fields must be zero.
1636	 */
1637	mode_hdr->data_length = 0;
1638	mode_hdr->medium_type = 0;
1639
1640	/* set the speed to the current value */
1641	mode_hdr->dev_spec = current_speed;
1642
1643	/* set single-initiator buffering mode */
1644	mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF;
1645
1646	mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc);
1647
1648	/*
1649	 * First, if the user wants us to set the compression algorithm or
1650	 * just turn compression on, check to make sure that this drive
1651	 * supports compression.
1652	 */
1653	if ((params_to_set & SA_PARAM_COMPRESSION)
1654	 && (current_comp_page->dce_and_dcc & SA_DCP_DCC)) {
1655
1656		/*
1657		 * If the compression algorithm is 0, disable compression.
1658		 * If the compression algorithm is non-zero, enable
1659		 * compression and set the compression type to the
1660		 * specified compression algorithm, unless the algorithm is
1661		 * MT_COMP_ENABLE.  In that case, we look at the
1662		 * compression algorithm that is currently set and if it is
1663		 * non-zero, we leave it as-is.  If it is zero, and we have
1664		 * saved a compression algorithm from a time when
1665		 * compression was enabled before, set the compression to
1666		 * the saved value.
1667		 */
1668		if (comp_algorithm == 0) {
1669			/* disable compression */
1670			comp_page->dce_and_dcc &= ~SA_DCP_DCE;
1671		} else {
1672			/* enable compression */
1673			comp_page->dce_and_dcc |= SA_DCP_DCE;
1674
1675			/* enable decompression */
1676			comp_page->dde_and_red |= SA_DCP_DDE;
1677
1678			if (comp_algorithm != MT_COMP_ENABLE) {
1679				/* set the compression algorithm */
1680				scsi_ulto4b(comp_algorithm,
1681					    comp_page->comp_algorithm);
1682
1683			} else if ((scsi_4btoul(comp_page->comp_algorithm) == 0)
1684				&& (softc->saved_comp_algorithm != 0)) {
1685				scsi_ulto4b(softc->saved_comp_algorithm,
1686					    comp_page->comp_algorithm);
1687			}
1688		}
1689	} else if (params_to_set & SA_PARAM_COMPRESSION) {
1690		/*
1691		 * The drive doesn't support compression, so turn off the
1692		 * set compression bit.
1693		 */
1694		params_to_set &= ~SA_PARAM_COMPRESSION;
1695
1696		/*
1697		 * Should probably do something other than a printf...like
1698		 * set a flag in the softc saying that this drive doesn't
1699		 * support compression.
1700		 */
1701		xpt_print_path(periph->path);
1702		printf("sasetparams: device does not support compression\n");
1703
1704		/*
1705		 * If that was the only thing the user wanted us to set,
1706		 * clean up allocated resources and return with 'operation
1707		 * not supported'.
1708		 */
1709		if (params_to_set == SA_PARAM_NONE) {
1710			free(mode_buffer, M_TEMP);
1711			return(ENODEV);
1712		}
1713
1714		/*
1715		 * That wasn't the only thing the user wanted us to set.
1716		 * So, decrease the stated mode buffer length by the size
1717		 * of the compression mode page.
1718		 */
1719		mode_buffer_len -= sizeof(*comp_page);
1720	}
1721
1722	ccb = cam_periph_getccb(periph, /*priority*/ 1);
1723
1724	scsi_mode_select(&ccb->csio,
1725			/*retries*/1,
1726			/*cbfcnp*/ sadone,
1727			/*tag_action*/ MSG_SIMPLE_Q_TAG,
1728			/*scsi_page_fmt*/(params_to_set & SA_PARAM_COMPRESSION)?
1729					 TRUE : FALSE,
1730			/*save_pages*/ FALSE,
1731			/*param_buf*/ mode_buffer,
1732			/*param_len*/ mode_buffer_len,
1733			/*sense_len*/ SSD_FULL_SIZE,
1734			/*timeout*/ 5000);
1735
1736	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
1737				  /*sense_flags*/ 0, &softc->device_stats);
1738
1739	if (error == 0) {
1740		xpt_release_ccb(ccb);
1741	} else {
1742		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1743			cam_release_devq(ccb->ccb_h.path,
1744					 /*relsim_flags*/0,
1745					 /*reduction*/0,
1746					 /*timeout*/0,
1747					 /*getcount_only*/0);
1748		/*
1749		 * If we were setting the blocksize, and that failed, we
1750		 * want to set it to its original value.  If we weren't
1751		 * setting the blocksize, we don't want to change it.
1752		 */
1753		scsi_ulto3b(current_blocksize, mode_blk->blklen);
1754
1755		/*
1756		 * 0x7f means "same as before".
1757		 */
1758		if (params_to_set & SA_PARAM_DENSITY)
1759			mode_blk->density = current_density;
1760		else
1761			mode_blk->density = 0x7f;
1762
1763		if (params_to_set & SA_PARAM_COMPRESSION)
1764			bcopy(current_comp_page, comp_page,
1765			      sizeof(struct scsi_data_compression_page));
1766
1767		/*
1768		 * The retry count is the only CCB field that might have been
1769		 * changed that we care about, so reset it back to 1.
1770		 */
1771		ccb->ccb_h.retry_count = 1;
1772		cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0,
1773				  /*sense_flags*/ 0, &softc->device_stats);
1774
1775		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1776			cam_release_devq(ccb->ccb_h.path,
1777					 /*relsim_flags*/0,
1778					 /*reduction*/0,
1779					 /*timeout*/0,
1780					 /*getcount_only*/0);
1781
1782		xpt_release_ccb(ccb);
1783	}
1784
1785	if (params_to_set & SA_PARAM_COMPRESSION)
1786		free(current_comp_page, M_TEMP);
1787
1788	free(mode_buffer, M_TEMP);
1789	return(error);
1790}
1791
1792static void
1793saprevent(struct cam_periph *periph, int action)
1794{
1795	struct	sa_softc *softc;
1796	union	ccb *ccb;
1797	int	error;
1798
1799	softc = (struct sa_softc *)periph->softc;
1800
1801	if (((action == PR_ALLOW)
1802	  && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0)
1803	 || ((action == PR_PREVENT)
1804	  && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0)) {
1805		return;
1806	}
1807
1808	ccb = cam_periph_getccb(periph, /*priority*/1);
1809
1810	scsi_prevent(&ccb->csio,
1811		     /*retries*/0,
1812		     /*cbcfp*/sadone,
1813		     MSG_SIMPLE_Q_TAG,
1814		     action,
1815		     SSD_FULL_SIZE,
1816		     60000);
1817
1818	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
1819				  /*sense_flags*/0, &softc->device_stats);
1820
1821	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1822		cam_release_devq(ccb->ccb_h.path,
1823				 /*relsim_flags*/0,
1824				 /*reduction*/0,
1825				 /*timeout*/0,
1826				 /*getcount_only*/0);
1827
1828
1829	if (error == 0) {
1830		if (action == PR_ALLOW)
1831			softc->flags &= ~SA_FLAG_TAPE_LOCKED;
1832		else
1833			softc->flags |= SA_FLAG_TAPE_LOCKED;
1834	}
1835
1836	xpt_release_ccb(ccb);
1837}
1838
1839static int
1840sarewind(struct cam_periph *periph)
1841{
1842	union	ccb *ccb;
1843	struct	sa_softc *softc;
1844	int	error;
1845
1846	softc = (struct sa_softc *)periph->softc;
1847
1848	ccb = cam_periph_getccb(periph, /*priority*/1);
1849
1850	scsi_rewind(&ccb->csio,
1851		    /*retries*/1,
1852		    /*cbcfp*/sadone,
1853		    MSG_SIMPLE_Q_TAG,
1854		    /*immediate*/FALSE,
1855		    SSD_FULL_SIZE,
1856		    (SA_REWIND_TIMEOUT) * 60 * 1000);
1857
1858	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
1859				  /*sense_flags*/0, &softc->device_stats);
1860
1861	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1862		cam_release_devq(ccb->ccb_h.path,
1863				 /*relsim_flags*/0,
1864				 /*reduction*/0,
1865				 /*timeout*/0,
1866				 /*getcount_only*/0);
1867
1868	xpt_release_ccb(ccb);
1869
1870	return (error);
1871}
1872
1873static int
1874saspace(struct cam_periph *periph, int count, scsi_space_code code)
1875{
1876	union	ccb *ccb;
1877	struct	sa_softc *softc;
1878	int	error;
1879
1880	softc = (struct sa_softc *)periph->softc;
1881
1882	ccb = cam_periph_getccb(periph, /*priority*/1);
1883
1884	scsi_space(&ccb->csio,
1885		   /*retries*/1,
1886		   /*cbcfp*/sadone,
1887		   MSG_SIMPLE_Q_TAG,
1888		   code, count,
1889		   SSD_FULL_SIZE,
1890		   (SA_SPACE_TIMEOUT) * 60 * 1000);
1891
1892	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
1893				  /*sense_flags*/0, &softc->device_stats);
1894
1895	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1896		cam_release_devq(ccb->ccb_h.path,
1897				 /*relsim_flags*/0,
1898				 /*reduction*/0,
1899				 /*timeout*/0,
1900				 /*getcount_only*/0);
1901
1902	xpt_release_ccb(ccb);
1903
1904	return (error);
1905}
1906
1907static int
1908sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks)
1909{
1910	union	ccb *ccb;
1911	struct	sa_softc *softc;
1912	int	error;
1913
1914	softc = (struct sa_softc *)periph->softc;
1915
1916	ccb = cam_periph_getccb(periph, /*priority*/1);
1917
1918	scsi_write_filemarks(&ccb->csio,
1919			     /*retries*/1,
1920			     /*cbcfp*/sadone,
1921			     MSG_SIMPLE_Q_TAG,
1922			     /*immediate*/FALSE,
1923			     setmarks,
1924			     nmarks,
1925			     SSD_FULL_SIZE,
1926			     60000);
1927
1928	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
1929				  /*sense_flags*/0, &softc->device_stats);
1930
1931	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1932		cam_release_devq(ccb->ccb_h.path,
1933				 /*relsim_flags*/0,
1934				 /*reduction*/0,
1935				 /*timeout*/0,
1936				 /*getcount_only*/0);
1937
1938	if (error == 0) {
1939		struct sa_softc *softc;
1940
1941		softc = (struct sa_softc *)periph->softc;
1942		softc->filemarks += nmarks;
1943	}
1944
1945	xpt_release_ccb(ccb);
1946
1947	return (error);
1948}
1949
1950static int
1951saretension(struct cam_periph *periph)
1952{
1953	union ccb *ccb;
1954	struct sa_softc *softc;
1955	int error;
1956
1957	softc = (struct sa_softc *)periph->softc;
1958
1959	ccb = cam_periph_getccb(periph, /*priority*/1);
1960
1961	scsi_load_unload(&ccb->csio,
1962			 /*retries*/ 1,
1963			 /*cbfcnp*/ sadone,
1964			 MSG_SIMPLE_Q_TAG,
1965			 /*immediate*/ FALSE,
1966			 /*eot*/ FALSE,
1967			 /*reten*/ TRUE,
1968			 /*load*/ TRUE,
1969			 SSD_FULL_SIZE,
1970			 60000);
1971
1972	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
1973				  /*sense_flags*/0, &softc->device_stats);
1974
1975	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1976		cam_release_devq(ccb->ccb_h.path,
1977				 /*relsim_flags*/0,
1978				 /*reduction*/0,
1979				 /*timeout*/0,
1980				 /*getcount_only*/0);
1981
1982	xpt_release_ccb(ccb);
1983
1984	return(error);
1985}
1986
1987static int
1988sareservereleaseunit(struct cam_periph *periph, int reserve)
1989{
1990	union ccb *ccb;
1991	struct sa_softc *softc;
1992	int error;
1993
1994	softc = (struct sa_softc *)periph->softc;
1995
1996	ccb = cam_periph_getccb(periph, /*priority*/ 1);
1997
1998	scsi_reserve_release_unit(&ccb->csio,
1999				  /*retries*/ 1,
2000				  /*cbfcnp*/ sadone,
2001				  /*tag_action*/ MSG_SIMPLE_Q_TAG,
2002				  /*third_party*/ FALSE,
2003				  /*third_party_id*/ 0,
2004				  /*sense_len*/ SSD_FULL_SIZE,
2005				  /*timeout*/ 5000,
2006				  reserve);
2007
2008	/*
2009	 * We set SF_RETRY_UA, since this is often the first command run
2010	 * when a tape device is opened, and there may be a unit attention
2011	 * condition pending.
2012	 */
2013	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
2014				  /*sense_flags*/SF_RETRY_UA,
2015				  &softc->device_stats);
2016
2017	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2018		cam_release_devq(ccb->ccb_h.path,
2019				 /*relsim_flags*/0,
2020				 /*reduction*/0,
2021				 /*timeout*/0,
2022				 /*getcount_only*/0);
2023
2024	xpt_release_ccb(ccb);
2025
2026	return (error);
2027}
2028
2029static int
2030saloadunload(struct cam_periph *periph, int load)
2031{
2032	union	ccb *ccb;
2033	struct	sa_softc *softc;
2034	int	error;
2035
2036	softc = (struct sa_softc *)periph->softc;
2037
2038	ccb = cam_periph_getccb(periph, /*priority*/1);
2039
2040	scsi_load_unload(&ccb->csio,
2041			 /*retries*/1,
2042			 /*cbfcnp*/sadone,
2043			 MSG_SIMPLE_Q_TAG,
2044			 /*immediate*/FALSE,
2045			 /*eot*/FALSE,
2046			 /*reten*/FALSE,
2047			 load,
2048			 SSD_FULL_SIZE,
2049			 60000);
2050
2051	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
2052				  /*sense_flags*/0, &softc->device_stats);
2053
2054	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2055		cam_release_devq(ccb->ccb_h.path,
2056				 /*relsim_flags*/0,
2057				 /*reduction*/0,
2058				 /*timeout*/0,
2059				 /*getcount_only*/0);
2060
2061	xpt_release_ccb(ccb);
2062
2063	return (error);
2064}
2065
2066static int
2067saerase(struct cam_periph *periph, int longerase)
2068{
2069
2070	union	ccb *ccb;
2071	struct	sa_softc *softc;
2072	int error;
2073
2074	softc = (struct sa_softc *)periph->softc;
2075
2076	ccb = cam_periph_getccb(periph, /*priority*/ 1);
2077
2078	scsi_erase(&ccb->csio,
2079		   /*retries*/ 1,
2080		   /*cbfcnp*/ sadone,
2081		   /*tag_action*/ MSG_SIMPLE_Q_TAG,
2082		   /*immediate*/ FALSE,
2083		   /*long_erase*/ longerase,
2084		   /*sense_len*/ SSD_FULL_SIZE,
2085		   /*timeout*/ (SA_ERASE_TIMEOUT) * 60 * 1000);
2086
2087	error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0,
2088				  /*sense_flags*/0, &softc->device_stats);
2089
2090	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2091		cam_release_devq(ccb->ccb_h.path,
2092				 /*relsim_flags*/0,
2093				 /*reduction*/0,
2094				 /*timeout*/0,
2095				 /*getcount_only*/0);
2096
2097	xpt_release_ccb(ccb);
2098
2099	return (error);
2100}
2101
2102#endif /* KERNEL */
2103
2104/*
2105 * Read tape block limits command.
2106 */
2107void
2108scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries,
2109		   void (*cbfcnp)(struct cam_periph *, union ccb *),
2110		   u_int8_t tag_action,
2111		   struct scsi_read_block_limits_data *rlimit_buf,
2112		   u_int8_t sense_len, u_int32_t timeout)
2113{
2114	struct scsi_read_block_limits *scsi_cmd;
2115
2116	cam_fill_csio(csio,
2117		      retries,
2118		      cbfcnp,
2119		      /*flags*/CAM_DIR_IN,
2120		      tag_action,
2121		      /*data_ptr*/(u_int8_t *)rlimit_buf,
2122		      /*dxfer_len*/sizeof(*rlimit_buf),
2123		      sense_len,
2124		      sizeof(*scsi_cmd),
2125		      timeout);
2126
2127	scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes;
2128	bzero(scsi_cmd, sizeof(*scsi_cmd));
2129	scsi_cmd->opcode = READ_BLOCK_LIMITS;
2130}
2131
2132void
2133scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries,
2134		   void (*cbfcnp)(struct cam_periph *, union ccb *),
2135		   u_int8_t tag_action, int readop, int sli,
2136		   int fixed, u_int32_t length, u_int8_t *data_ptr,
2137		   u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
2138{
2139	struct scsi_sa_rw *scsi_cmd;
2140
2141	scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes;
2142	scsi_cmd->opcode = readop ? SA_READ : SA_WRITE;
2143	scsi_cmd->sli_fixed = 0;
2144	if (sli && readop)
2145		scsi_cmd->sli_fixed |= SAR_SLI;
2146	if (fixed)
2147		scsi_cmd->sli_fixed |= SARW_FIXED;
2148	scsi_ulto3b(length, scsi_cmd->length);
2149	scsi_cmd->control = 0;
2150
2151	cam_fill_csio(csio,
2152		      retries,
2153		      cbfcnp,
2154		      /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT,
2155		      tag_action,
2156		      data_ptr,
2157		      dxfer_len,
2158		      sense_len,
2159		      sizeof(*scsi_cmd),
2160		      timeout);
2161}
2162
2163void
2164scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries,
2165		 void (*cbfcnp)(struct cam_periph *, union ccb *),
2166		 u_int8_t tag_action, int immediate, int eot,
2167		 int reten, int load, u_int8_t sense_len,
2168		 u_int32_t timeout)
2169{
2170	struct scsi_load_unload *scsi_cmd;
2171
2172	scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes;
2173	bzero(scsi_cmd, sizeof(*scsi_cmd));
2174	scsi_cmd->opcode = LOAD_UNLOAD;
2175	if (immediate)
2176		scsi_cmd->immediate = SLU_IMMED;
2177	if (eot)
2178		scsi_cmd->eot_reten_load |= SLU_EOT;
2179	if (reten)
2180		scsi_cmd->eot_reten_load |= SLU_RETEN;
2181	if (load)
2182		scsi_cmd->eot_reten_load |= SLU_LOAD;
2183
2184	cam_fill_csio(csio,
2185		      retries,
2186		      cbfcnp,
2187		      /*flags*/CAM_DIR_NONE,
2188		      tag_action,
2189		      /*data_ptr*/NULL,
2190		      /*dxfer_len*/0,
2191		      sense_len,
2192		      sizeof(*scsi_cmd),
2193		      timeout);
2194}
2195
2196void
2197scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries,
2198	    void (*cbfcnp)(struct cam_periph *, union ccb *),
2199	    u_int8_t tag_action, int immediate, u_int8_t sense_len,
2200	    u_int32_t timeout)
2201{
2202	struct scsi_rewind *scsi_cmd;
2203
2204	scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes;
2205	bzero(scsi_cmd, sizeof(*scsi_cmd));
2206	scsi_cmd->opcode = REWIND;
2207	if (immediate)
2208		scsi_cmd->immediate = SREW_IMMED;
2209
2210	cam_fill_csio(csio,
2211		      retries,
2212		      cbfcnp,
2213		      /*flags*/CAM_DIR_NONE,
2214		      tag_action,
2215		      /*data_ptr*/NULL,
2216		      /*dxfer_len*/0,
2217		      sense_len,
2218		      sizeof(*scsi_cmd),
2219		      timeout);
2220}
2221
2222void
2223scsi_space(struct ccb_scsiio *csio, u_int32_t retries,
2224	   void (*cbfcnp)(struct cam_periph *, union ccb *),
2225	   u_int8_t tag_action, scsi_space_code code,
2226	   u_int32_t count, u_int8_t sense_len, u_int32_t timeout)
2227{
2228	struct scsi_space *scsi_cmd;
2229
2230	scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes;
2231	scsi_cmd->opcode = SPACE;
2232	scsi_cmd->code = code;
2233	scsi_ulto3b(count, scsi_cmd->count);
2234	scsi_cmd->control = 0;
2235
2236	cam_fill_csio(csio,
2237		      retries,
2238		      cbfcnp,
2239		      /*flags*/CAM_DIR_NONE,
2240		      tag_action,
2241		      /*data_ptr*/NULL,
2242		      /*dxfer_len*/0,
2243		      sense_len,
2244		      sizeof(*scsi_cmd),
2245		      timeout);
2246}
2247
2248void
2249scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries,
2250		     void (*cbfcnp)(struct cam_periph *, union ccb *),
2251		     u_int8_t tag_action, int immediate, int setmark,
2252		     u_int32_t num_marks, u_int8_t sense_len,
2253		     u_int32_t timeout)
2254{
2255	struct scsi_write_filemarks *scsi_cmd;
2256
2257	scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes;
2258	bzero(scsi_cmd, sizeof(*scsi_cmd));
2259	scsi_cmd->opcode = WRITE_FILEMARKS;
2260	if (immediate)
2261		scsi_cmd->byte2 |= SWFMRK_IMMED;
2262	if (setmark)
2263		scsi_cmd->byte2 |= SWFMRK_WSMK;
2264
2265	scsi_ulto3b(num_marks, scsi_cmd->num_marks);
2266
2267	cam_fill_csio(csio,
2268		      retries,
2269		      cbfcnp,
2270		      /*flags*/CAM_DIR_NONE,
2271		      tag_action,
2272		      /*data_ptr*/NULL,
2273		      /*dxfer_len*/0,
2274		      sense_len,
2275		      sizeof(*scsi_cmd),
2276		      timeout);
2277}
2278
2279/*
2280 * The reserve and release unit commands differ only by their opcodes.
2281 */
2282void
2283scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries,
2284			  void (*cbfcnp)(struct cam_periph *, union ccb *),
2285			  u_int8_t tag_action, int third_party,
2286			  int third_party_id, u_int8_t sense_len,
2287			  u_int32_t timeout, int reserve)
2288{
2289	struct scsi_reserve_release_unit *scsi_cmd;
2290
2291	scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes;
2292	bzero(scsi_cmd, sizeof(*scsi_cmd));
2293
2294	if (reserve)
2295		scsi_cmd->opcode = RESERVE_UNIT;
2296	else
2297		scsi_cmd->opcode = RELEASE_UNIT;
2298
2299	if (third_party) {
2300		scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY;
2301		scsi_cmd->lun_thirdparty |=
2302			((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK);
2303	}
2304
2305	cam_fill_csio(csio,
2306		      retries,
2307		      cbfcnp,
2308		      /*flags*/ CAM_DIR_NONE,
2309		      tag_action,
2310		      /*data_ptr*/ NULL,
2311		      /*dxfer_len*/ 0,
2312		      sense_len,
2313		      sizeof(*scsi_cmd),
2314		      timeout);
2315}
2316
2317void
2318scsi_erase(struct ccb_scsiio *csio, u_int32_t retries,
2319	   void (*cbfcnp)(struct cam_periph *, union ccb *),
2320	   u_int8_t tag_action, int immediate, int long_erase,
2321	   u_int8_t sense_len, u_int32_t timeout)
2322{
2323	struct scsi_erase *scsi_cmd;
2324
2325	scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes;
2326	bzero(scsi_cmd, sizeof(*scsi_cmd));
2327
2328	scsi_cmd->opcode = ERASE;
2329
2330	if (immediate)
2331		scsi_cmd->lun_imm_long |= SE_IMMED;
2332
2333	if (long_erase)
2334		scsi_cmd->lun_imm_long |= SE_LONG;
2335
2336	cam_fill_csio(csio,
2337		      retries,
2338		      cbfcnp,
2339		      /*flags*/ CAM_DIR_NONE,
2340		      tag_action,
2341		      /*data_ptr*/ NULL,
2342		      /*dxfer_len*/ 0,
2343		      sense_len,
2344		      sizeof(*scsi_cmd),
2345		      timeout);
2346}
2347