1/*	$NetBSD$	*/
2
3/*-
4 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Originally written by Julian Elischer (julian@dialix.oz.au)
34 * for TRW Financial Systems for use under the MACH(2.5) operating system.
35 *
36 * TRW Financial Systems, in accordance with their agreement with Carnegie
37 * Mellon University, makes this software available to CMU to distribute
38 * or use in any manner that they see fit as long as this message is kept with
39 * the software. For this reason TFS also grants any other persons or
40 * organisations permission to use or modify this software.
41 *
42 * TFS supplies this software to be publicly redistributed
43 * on the understanding that TFS is not responsible for the correct
44 * functioning of this software in any circumstances.
45 *
46 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
47 */
48
49#include <sys/cdefs.h>
50__KERNEL_RCSID(0, "$NetBSD$");
51
52#include "opt_scsi.h"
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/kernel.h>
57#include <sys/file.h>
58#include <sys/stat.h>
59#include <sys/ioctl.h>
60#include <sys/scsiio.h>
61#include <sys/buf.h>
62#include <sys/bufq.h>
63#include <sys/uio.h>
64#include <sys/malloc.h>
65#include <sys/errno.h>
66#include <sys/device.h>
67#include <sys/disklabel.h>
68#include <sys/disk.h>
69#include <sys/proc.h>
70#include <sys/conf.h>
71#include <sys/vnode.h>
72#include <sys/rnd.h>
73#include <sys/cprng.h>
74
75#include <dev/scsipi/scsi_spc.h>
76#include <dev/scsipi/scsipi_all.h>
77#include <dev/scsipi/scsi_all.h>
78#include <dev/scsipi/scsipi_disk.h>
79#include <dev/scsipi/scsi_disk.h>
80#include <dev/scsipi/scsiconf.h>
81#include <dev/scsipi/scsipi_base.h>
82#include <dev/scsipi/sdvar.h>
83
84#include <prop/proplib.h>
85
86#define	SDUNIT(dev)			DISKUNIT(dev)
87#define	SDPART(dev)			DISKPART(dev)
88#define	SDMINOR(unit, part)		DISKMINOR(unit, part)
89#define	MAKESDDEV(maj, unit, part)	MAKEDISKDEV(maj, unit, part)
90
91#define	SDLABELDEV(dev)	(MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
92
93#define	SD_DEFAULT_BLKSIZE	512
94
95static void	sdminphys(struct buf *);
96static void	sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
97static int	sdgetdisklabel(struct sd_softc *);
98static void	sdstart(struct scsipi_periph *);
99static void	sdrestart(void *);
100static void	sddone(struct scsipi_xfer *, int);
101static bool	sd_suspend(device_t, const pmf_qual_t *);
102static bool	sd_shutdown(device_t, int);
103static int	sd_interpret_sense(struct scsipi_xfer *);
104static int	sdlastclose(device_t);
105
106static int	sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
107		    int, int *);
108static int	sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
109		    int);
110static int	sd_validate_blksize(struct scsipi_periph *, int);
111static u_int64_t sd_read_capacity(struct scsipi_periph *, int *, int flags);
112static int	sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
113		    int);
114static int	sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
115static int	sd_get_parms(struct sd_softc *, struct disk_parms *, int);
116static int	sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
117		    int);
118static int	sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
119		    int);
120
121static int	sd_flush(struct sd_softc *, int);
122static int	sd_getcache(struct sd_softc *, int *);
123static int	sd_setcache(struct sd_softc *, int);
124
125static int	sdmatch(device_t, cfdata_t, void *);
126static void	sdattach(device_t, device_t, void *);
127static int	sddetach(device_t, int);
128static void	sd_set_properties(struct sd_softc *);
129
130CFATTACH_DECL3_NEW(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
131    NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
132
133extern struct cfdriver sd_cd;
134
135static const struct scsipi_inquiry_pattern sd_patterns[] = {
136	{T_DIRECT, T_FIXED,
137	 "",         "",                 ""},
138	{T_DIRECT, T_REMOV,
139	 "",         "",                 ""},
140	{T_OPTICAL, T_FIXED,
141	 "",         "",                 ""},
142	{T_OPTICAL, T_REMOV,
143	 "",         "",                 ""},
144	{T_SIMPLE_DIRECT, T_FIXED,
145	 "",         "",                 ""},
146	{T_SIMPLE_DIRECT, T_REMOV,
147	 "",         "",                 ""},
148};
149
150static dev_type_open(sdopen);
151static dev_type_close(sdclose);
152static dev_type_read(sdread);
153static dev_type_write(sdwrite);
154static dev_type_ioctl(sdioctl);
155static dev_type_strategy(sdstrategy);
156static dev_type_dump(sddump);
157static dev_type_size(sdsize);
158
159const struct bdevsw sd_bdevsw = {
160	sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
161};
162
163const struct cdevsw sd_cdevsw = {
164	sdopen, sdclose, sdread, sdwrite, sdioctl,
165	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
166};
167
168static struct dkdriver sddkdriver = { sdstrategy, sdminphys };
169
170static const struct scsipi_periphsw sd_switch = {
171	sd_interpret_sense,	/* check our error handler first */
172	sdstart,		/* have a queue, served by this */
173	NULL,			/* have no async handler */
174	sddone,			/* deal with stats at interrupt time */
175};
176
177struct sd_mode_sense_data {
178	/*
179	 * XXX
180	 * We are not going to parse this as-is -- it just has to be large
181	 * enough.
182	 */
183	union {
184		struct scsi_mode_parameter_header_6 small;
185		struct scsi_mode_parameter_header_10 big;
186	} header;
187	struct scsi_general_block_descriptor blk_desc;
188	union scsi_disk_pages pages;
189};
190
191/*
192 * The routine called by the low level scsi routine when it discovers
193 * A device suitable for this driver
194 */
195static int
196sdmatch(device_t parent, cfdata_t match,
197    void *aux)
198{
199	struct scsipibus_attach_args *sa = aux;
200	int priority;
201
202	(void)scsipi_inqmatch(&sa->sa_inqbuf,
203	    sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
204	    sizeof(sd_patterns[0]), &priority);
205
206	return (priority);
207}
208
209/*
210 * Attach routine common to atapi & scsi.
211 */
212static void
213sdattach(device_t parent, device_t self, void *aux)
214{
215	struct sd_softc *sd = device_private(self);
216	struct scsipibus_attach_args *sa = aux;
217	struct scsipi_periph *periph = sa->sa_periph;
218	int error, result, rndval = cprng_strong32();
219	struct disk_parms *dp = &sd->params;
220	char pbuf[9];
221
222	SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
223
224	sd->sc_dev = self;
225	sd->type = (sa->sa_inqbuf.type & SID_TYPE);
226	strncpy(sd->name, sa->sa_inqbuf.product, sizeof(sd->name));
227	if (sd->type == T_SIMPLE_DIRECT)
228		periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
229
230	if (SCSIPI_BUSTYPE_TYPE(scsipi_periph_bustype(sa->sa_periph)) ==
231	    SCSIPI_BUSTYPE_SCSI && periph->periph_version == 0)
232		sd->flags |= SDF_ANCIENT;
233
234	bufq_alloc(&sd->buf_queue, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
235
236	callout_init(&sd->sc_callout, 0);
237
238	/*
239	 * Store information needed to contact our base driver
240	 */
241	sd->sc_periph = periph;
242
243	periph->periph_dev = sd->sc_dev;
244	periph->periph_switch = &sd_switch;
245
246        /*
247         * Increase our openings to the maximum-per-periph
248         * supported by the adapter.  This will either be
249         * clamped down or grown by the adapter if necessary.
250         */
251	periph->periph_openings =
252	    SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
253	periph->periph_flags |= PERIPH_GROW_OPENINGS;
254
255	/*
256	 * Initialize and attach the disk structure.
257	 */
258	disk_init(&sd->sc_dk, device_xname(sd->sc_dev), &sddkdriver);
259	disk_attach(&sd->sc_dk);
260
261	/*
262	 * Use the subdriver to request information regarding the drive.
263	 */
264	aprint_naive("\n");
265	aprint_normal("\n");
266
267	error = scsipi_test_unit_ready(periph,
268	    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
269	    XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
270
271	if (error)
272		result = SDGP_RESULT_OFFLINE;
273	else
274		result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
275	aprint_normal_dev(sd->sc_dev, "");
276	switch (result) {
277	case SDGP_RESULT_OK:
278		format_bytes(pbuf, sizeof(pbuf),
279		    (u_int64_t)dp->disksize * dp->blksize);
280	        aprint_normal(
281		"%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
282		    pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
283		    (unsigned long long)dp->disksize);
284		break;
285
286	case SDGP_RESULT_OFFLINE:
287		aprint_normal("drive offline");
288		break;
289
290	case SDGP_RESULT_UNFORMATTED:
291		aprint_normal("unformatted media");
292		break;
293
294#ifdef DIAGNOSTIC
295	default:
296		panic("sdattach: unknown result from get_parms");
297		break;
298#endif
299	}
300	aprint_normal("\n");
301
302	/*
303	 * Establish a shutdown hook so that we can ensure that
304	 * our data has actually made it onto the platter at
305	 * shutdown time.  Note that this relies on the fact
306	 * that the shutdown hooks at the "leaves" of the device tree
307	 * are run, first (thus guaranteeing that our hook runs before
308	 * our ancestors').
309	 */
310	if (!pmf_device_register1(self, sd_suspend, NULL, sd_shutdown))
311		aprint_error_dev(self, "couldn't establish power handler\n");
312
313	/*
314	 * attach the device into the random source list
315	 */
316	rnd_attach_source(&sd->rnd_source, device_xname(sd->sc_dev),
317			  RND_TYPE_DISK, 0);
318
319	/* Discover wedges on this disk. */
320	dkwedge_discover(&sd->sc_dk);
321
322	/*
323	 * Disk insertion and removal times can be a useful source
324	 * of entropy, though the estimator should never _count_
325	 * these bits, on insertion, because the deltas to the
326	 * nonexistent) previous event should never allow it.
327	 */
328	rnd_add_uint32(&sd->rnd_source, rndval);
329}
330
331static int
332sddetach(device_t self, int flags)
333{
334	struct sd_softc *sd = device_private(self);
335	int s, bmaj, cmaj, i, mn, rc, rndval = cprng_strong32();
336
337	rnd_add_uint32(&sd->rnd_source, rndval);
338
339	if ((rc = disk_begindetach(&sd->sc_dk, sdlastclose, self, flags)) != 0)
340		return rc;
341
342	/* locate the major number */
343	bmaj = bdevsw_lookup_major(&sd_bdevsw);
344	cmaj = cdevsw_lookup_major(&sd_cdevsw);
345
346	/* Nuke the vnodes for any open instances */
347	for (i = 0; i < MAXPARTITIONS; i++) {
348		mn = SDMINOR(device_unit(self), i);
349		vdevgone(bmaj, mn, mn, VBLK);
350		vdevgone(cmaj, mn, mn, VCHR);
351	}
352
353	/* kill any pending restart */
354	callout_stop(&sd->sc_callout);
355
356	/* Delete all of our wedges. */
357	dkwedge_delall(&sd->sc_dk);
358
359	s = splbio();
360
361	/* Kill off any queued buffers. */
362	bufq_drain(sd->buf_queue);
363
364	bufq_free(sd->buf_queue);
365
366	/* Kill off any pending commands. */
367	scsipi_kill_pending(sd->sc_periph);
368
369	splx(s);
370
371	/* Detach from the disk list. */
372	disk_detach(&sd->sc_dk);
373	disk_destroy(&sd->sc_dk);
374
375	callout_destroy(&sd->sc_callout);
376
377	pmf_device_deregister(self);
378
379	/* Unhook the entropy source. */
380	rnd_detach_source(&sd->rnd_source);
381
382	return (0);
383}
384
385/*
386 * open the device. Make sure the partition info is a up-to-date as can be.
387 */
388static int
389sdopen(dev_t dev, int flag, int fmt, struct lwp *l)
390{
391	struct sd_softc *sd;
392	struct scsipi_periph *periph;
393	struct scsipi_adapter *adapt;
394	int unit, part;
395	int error;
396
397	unit = SDUNIT(dev);
398	sd = device_lookup_private(&sd_cd, unit);
399	if (sd == NULL)
400		return (ENXIO);
401
402	if (!device_is_active(sd->sc_dev))
403		return (ENODEV);
404
405	part = SDPART(dev);
406
407	mutex_enter(&sd->sc_dk.dk_openlock);
408
409	/*
410	 * If there are wedges, and this is not RAW_PART, then we
411	 * need to fail.
412	 */
413	if (sd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
414		error = EBUSY;
415		goto bad1;
416	}
417
418	periph = sd->sc_periph;
419	adapt = periph->periph_channel->chan_adapter;
420
421	SC_DEBUG(periph, SCSIPI_DB1,
422	    ("sdopen: dev=0x%"PRIx64" (unit %d (of %d), partition %d)\n", dev, unit,
423	    sd_cd.cd_ndevs, part));
424
425	/*
426	 * If this is the first open of this device, add a reference
427	 * to the adapter.
428	 */
429	if (sd->sc_dk.dk_openmask == 0 &&
430	    (error = scsipi_adapter_addref(adapt)) != 0)
431		goto bad1;
432
433	if ((periph->periph_flags & PERIPH_OPEN) != 0) {
434		/*
435		 * If any partition is open, but the disk has been invalidated,
436		 * disallow further opens of non-raw partition
437		 */
438		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
439		    (part != RAW_PART || fmt != S_IFCHR)) {
440			error = EIO;
441			goto bad2;
442		}
443	} else {
444		int silent;
445
446		if ((part == RAW_PART && fmt == S_IFCHR) || (flag & FSILENT))
447			silent = XS_CTL_SILENT;
448		else
449			silent = 0;
450
451		/* Check that it is still responding and ok. */
452		error = scsipi_test_unit_ready(periph,
453		    XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
454		    silent);
455
456		/*
457		 * Start the pack spinning if necessary. Always allow the
458		 * raw parition to be opened, for raw IOCTLs. Data transfers
459		 * will check for SDEV_MEDIA_LOADED.
460		 */
461		if (error == EIO) {
462			int error2;
463
464			error2 = scsipi_start(periph, SSS_START, silent);
465			switch (error2) {
466			case 0:
467				error = 0;
468				break;
469			case EIO:
470			case EINVAL:
471				break;
472			default:
473				error = error2;
474				break;
475			}
476		}
477		if (error) {
478			if (silent && (flag & FSILENT) == 0)
479				goto out;
480			goto bad2;
481		}
482
483		periph->periph_flags |= PERIPH_OPEN;
484
485		if (periph->periph_flags & PERIPH_REMOVABLE) {
486			/* Lock the pack in. */
487			error = scsipi_prevent(periph, SPAMR_PREVENT_DT,
488			    XS_CTL_IGNORE_ILLEGAL_REQUEST |
489			    XS_CTL_IGNORE_MEDIA_CHANGE |
490			    XS_CTL_SILENT);
491			if (error)
492				goto bad3;
493		}
494
495		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
496			int param_error;
497			periph->periph_flags |= PERIPH_MEDIA_LOADED;
498
499			/*
500			 * Load the physical device parameters.
501			 *
502			 * Note that if media is present but unformatted,
503			 * we allow the open (so that it can be formatted!).
504			 * The drive should refuse real I/O, if the media is
505			 * unformatted.
506			 */
507			if ((param_error = sd_get_parms(sd, &sd->params, 0))
508			     == SDGP_RESULT_OFFLINE) {
509				error = ENXIO;
510				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
511				goto bad3;
512			}
513			SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
514
515			/* Load the partition info if not already loaded. */
516			if (param_error == 0) {
517				if ((sdgetdisklabel(sd) != 0) && (part != RAW_PART)) {
518					error = EIO;
519					goto bad3;
520				}
521				SC_DEBUG(periph, SCSIPI_DB3,
522				     ("Disklabel loaded "));
523			}
524		}
525	}
526
527	/* Check that the partition exists. */
528	if (part != RAW_PART &&
529	    (part >= sd->sc_dk.dk_label->d_npartitions ||
530	     sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
531		error = ENXIO;
532		goto bad3;
533	}
534
535 out:	/* Insure only one open at a time. */
536	switch (fmt) {
537	case S_IFCHR:
538		sd->sc_dk.dk_copenmask |= (1 << part);
539		break;
540	case S_IFBLK:
541		sd->sc_dk.dk_bopenmask |= (1 << part);
542		break;
543	}
544	sd->sc_dk.dk_openmask =
545	    sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
546
547	SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
548	mutex_exit(&sd->sc_dk.dk_openlock);
549	return (0);
550
551 bad3:
552	if (sd->sc_dk.dk_openmask == 0) {
553		if (periph->periph_flags & PERIPH_REMOVABLE)
554			scsipi_prevent(periph, SPAMR_ALLOW,
555			    XS_CTL_IGNORE_ILLEGAL_REQUEST |
556			    XS_CTL_IGNORE_MEDIA_CHANGE |
557			    XS_CTL_SILENT);
558		periph->periph_flags &= ~PERIPH_OPEN;
559	}
560
561 bad2:
562	if (sd->sc_dk.dk_openmask == 0)
563		scsipi_adapter_delref(adapt);
564
565 bad1:
566	mutex_exit(&sd->sc_dk.dk_openlock);
567	return (error);
568}
569
570/*
571 * Caller must hold sd->sc_dk.dk_openlock.
572 */
573static int
574sdlastclose(device_t self)
575{
576	struct sd_softc *sd = device_private(self);
577	struct scsipi_periph *periph = sd->sc_periph;
578	struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
579
580	/*
581	 * If the disk cache needs flushing, and the disk supports
582	 * it, do it now.
583	 */
584	if ((sd->flags & SDF_DIRTY) != 0) {
585		if (sd_flush(sd, 0)) {
586			aprint_error_dev(sd->sc_dev,
587				"cache synchronization failed\n");
588			sd->flags &= ~SDF_FLUSHING;
589		} else
590			sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
591	}
592
593	scsipi_wait_drain(periph);
594
595	if (periph->periph_flags & PERIPH_REMOVABLE)
596		scsipi_prevent(periph, SPAMR_ALLOW,
597		    XS_CTL_IGNORE_ILLEGAL_REQUEST |
598		    XS_CTL_IGNORE_NOT_READY |
599		    XS_CTL_SILENT);
600	periph->periph_flags &= ~PERIPH_OPEN;
601
602	scsipi_wait_drain(periph);
603
604	scsipi_adapter_delref(adapt);
605
606	return 0;
607}
608
609/*
610 * close the device.. only called if we are the LAST occurence of an open
611 * device.  Convenient now but usually a pain.
612 */
613static int
614sdclose(dev_t dev, int flag, int fmt, struct lwp *l)
615{
616	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(dev));
617	int part = SDPART(dev);
618
619	mutex_enter(&sd->sc_dk.dk_openlock);
620	switch (fmt) {
621	case S_IFCHR:
622		sd->sc_dk.dk_copenmask &= ~(1 << part);
623		break;
624	case S_IFBLK:
625		sd->sc_dk.dk_bopenmask &= ~(1 << part);
626		break;
627	}
628	sd->sc_dk.dk_openmask =
629	    sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
630
631	if (sd->sc_dk.dk_openmask == 0)
632		sdlastclose(sd->sc_dev);
633
634	mutex_exit(&sd->sc_dk.dk_openlock);
635	return (0);
636}
637
638/*
639 * Actually translate the requested transfer into one the physical driver
640 * can understand.  The transfer is described by a buf and will include
641 * only one physical transfer.
642 */
643static void
644sdstrategy(struct buf *bp)
645{
646	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(bp->b_dev));
647	struct scsipi_periph *periph = sd->sc_periph;
648	struct disklabel *lp;
649	daddr_t blkno;
650	int s;
651	bool sector_aligned;
652
653	SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
654	SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
655	    ("%d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
656	/*
657	 * If the device has been made invalid, error out
658	 */
659	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
660	    !device_is_active(sd->sc_dev)) {
661		if (periph->periph_flags & PERIPH_OPEN)
662			bp->b_error = EIO;
663		else
664			bp->b_error = ENODEV;
665		goto done;
666	}
667
668	lp = sd->sc_dk.dk_label;
669
670	/*
671	 * The transfer must be a whole number of blocks, offset must not be
672	 * negative.
673	 */
674	if (lp->d_secsize == DEV_BSIZE) {
675		sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
676	} else {
677		sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
678	}
679	if (!sector_aligned || bp->b_blkno < 0) {
680		bp->b_error = EINVAL;
681		goto done;
682	}
683	/*
684	 * If it's a null transfer, return immediatly
685	 */
686	if (bp->b_bcount == 0)
687		goto done;
688
689	/*
690	 * Do bounds checking, adjust transfer. if error, process.
691	 * If end of partition, just return.
692	 */
693	if (SDPART(bp->b_dev) == RAW_PART) {
694		if (bounds_check_with_mediasize(bp, DEV_BSIZE,
695		    sd->params.disksize512) <= 0)
696			goto done;
697	} else {
698		if (bounds_check_with_label(&sd->sc_dk, bp,
699		    (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
700			goto done;
701	}
702
703	/*
704	 * Now convert the block number to absolute and put it in
705	 * terms of the device's logical block size.
706	 */
707	if (lp->d_secsize == DEV_BSIZE)
708		blkno = bp->b_blkno;
709	else if (lp->d_secsize > DEV_BSIZE)
710		blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
711	else
712		blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
713
714	if (SDPART(bp->b_dev) != RAW_PART)
715		blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
716
717	bp->b_rawblkno = blkno;
718
719	s = splbio();
720
721	/*
722	 * Place it in the queue of disk activities for this disk.
723	 *
724	 * XXX Only do disksort() if the current operating mode does not
725	 * XXX include tagged queueing.
726	 */
727	bufq_put(sd->buf_queue, bp);
728
729	/*
730	 * Tell the device to get going on the transfer if it's
731	 * not doing anything, otherwise just wait for completion
732	 */
733	sdstart(sd->sc_periph);
734
735	splx(s);
736	return;
737
738done:
739	/*
740	 * Correctly set the buf to indicate a completed xfer
741	 */
742	bp->b_resid = bp->b_bcount;
743	biodone(bp);
744}
745
746/*
747 * sdstart looks to see if there is a buf waiting for the device
748 * and that the device is not already busy. If both are true,
749 * It dequeues the buf and creates a scsi command to perform the
750 * transfer in the buf. The transfer request will call scsipi_done
751 * on completion, which will in turn call this routine again
752 * so that the next queued transfer is performed.
753 * The bufs are queued by the strategy routine (sdstrategy)
754 *
755 * This routine is also called after other non-queued requests
756 * have been made of the scsi driver, to ensure that the queue
757 * continues to be drained.
758 *
759 * must be called at the correct (highish) spl level
760 * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
761 */
762static void
763sdstart(struct scsipi_periph *periph)
764{
765	struct sd_softc *sd = device_private(periph->periph_dev);
766	struct disklabel *lp = sd->sc_dk.dk_label;
767	struct buf *bp = 0;
768	struct scsipi_rw_16 cmd16;
769	struct scsipi_rw_10 cmd_big;
770	struct scsi_rw_6 cmd_small;
771	struct scsipi_generic *cmdp;
772	struct scsipi_xfer *xs;
773	int nblks, cmdlen, error, flags;
774
775	SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
776	/*
777	 * Check if the device has room for another command
778	 */
779	while (periph->periph_active < periph->periph_openings) {
780		/*
781		 * there is excess capacity, but a special waits
782		 * It'll need the adapter as soon as we clear out of the
783		 * way and let it run (user level wait).
784		 */
785		if (periph->periph_flags & PERIPH_WAITING) {
786			periph->periph_flags &= ~PERIPH_WAITING;
787			wakeup((void *)periph);
788			return;
789		}
790
791		/*
792		 * If the device has become invalid, abort all the
793		 * reads and writes until all files have been closed and
794		 * re-opened
795		 */
796		if (__predict_false(
797		    (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
798			if ((bp = bufq_get(sd->buf_queue)) != NULL) {
799				bp->b_error = EIO;
800				bp->b_resid = bp->b_bcount;
801				biodone(bp);
802				continue;
803			} else {
804				return;
805			}
806		}
807
808		/*
809		 * See if there is a buf with work for us to do..
810		 */
811		if ((bp = bufq_peek(sd->buf_queue)) == NULL)
812			return;
813
814		/*
815		 * We have a buf, now we should make a command.
816		 */
817
818		if (lp->d_secsize == DEV_BSIZE)
819			nblks = bp->b_bcount >> DEV_BSHIFT;
820		else
821			nblks = howmany(bp->b_bcount, lp->d_secsize);
822
823		/*
824		 * Fill out the scsi command.  Use the smallest CDB possible
825		 * (6-byte, 10-byte, or 16-byte).
826		 */
827		if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
828		    ((nblks & 0xff) == nblks) &&
829		    !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
830			/* 6-byte CDB */
831			memset(&cmd_small, 0, sizeof(cmd_small));
832			cmd_small.opcode = (bp->b_flags & B_READ) ?
833			    SCSI_READ_6_COMMAND : SCSI_WRITE_6_COMMAND;
834			_lto3b(bp->b_rawblkno, cmd_small.addr);
835			cmd_small.length = nblks & 0xff;
836			cmdlen = sizeof(cmd_small);
837			cmdp = (struct scsipi_generic *)&cmd_small;
838		} else if ((bp->b_rawblkno & 0xffffffff) == bp->b_rawblkno) {
839			/* 10-byte CDB */
840			memset(&cmd_big, 0, sizeof(cmd_big));
841			cmd_big.opcode = (bp->b_flags & B_READ) ?
842			    READ_10 : WRITE_10;
843			_lto4b(bp->b_rawblkno, cmd_big.addr);
844			_lto2b(nblks, cmd_big.length);
845			cmdlen = sizeof(cmd_big);
846			cmdp = (struct scsipi_generic *)&cmd_big;
847		} else {
848			/* 16-byte CDB */
849			memset(&cmd16, 0, sizeof(cmd16));
850			cmd16.opcode = (bp->b_flags & B_READ) ?
851			    READ_16 : WRITE_16;
852			_lto8b(bp->b_rawblkno, cmd16.addr);
853			_lto4b(nblks, cmd16.length);
854			cmdlen = sizeof(cmd16);
855			cmdp = (struct scsipi_generic *)&cmd16;
856		}
857
858		/* Instrumentation. */
859		disk_busy(&sd->sc_dk);
860
861		/*
862		 * Mark the disk dirty so that the cache will be
863		 * flushed on close.
864		 */
865		if ((bp->b_flags & B_READ) == 0)
866			sd->flags |= SDF_DIRTY;
867
868		/*
869		 * Figure out what flags to use.
870		 */
871		flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
872		if (bp->b_flags & B_READ)
873			flags |= XS_CTL_DATA_IN;
874		else
875			flags |= XS_CTL_DATA_OUT;
876
877		/*
878		 * Call the routine that chats with the adapter.
879		 * Note: we cannot sleep as we may be an interrupt
880		 */
881		xs = scsipi_make_xs(periph, cmdp, cmdlen,
882		    (u_char *)bp->b_data, bp->b_bcount,
883		    SDRETRIES, SD_IO_TIMEOUT, bp, flags);
884		if (__predict_false(xs == NULL)) {
885			/*
886			 * out of memory. Keep this buffer in the queue, and
887			 * retry later.
888			 */
889			callout_reset(&sd->sc_callout, hz / 2, sdrestart,
890			    periph);
891			return;
892		}
893		/*
894		 * need to dequeue the buffer before queuing the command,
895		 * because cdstart may be called recursively from the
896		 * HBA driver
897		 */
898#ifdef DIAGNOSTIC
899		if (bufq_get(sd->buf_queue) != bp)
900			panic("sdstart(): dequeued wrong buf");
901#else
902		bufq_get(sd->buf_queue);
903#endif
904		error = scsipi_execute_xs(xs);
905		/* with a scsipi_xfer preallocated, scsipi_command can't fail */
906		KASSERT(error == 0);
907	}
908}
909
910static void
911sdrestart(void *v)
912{
913	int s = splbio();
914	sdstart((struct scsipi_periph *)v);
915	splx(s);
916}
917
918static void
919sddone(struct scsipi_xfer *xs, int error)
920{
921	struct sd_softc *sd = device_private(xs->xs_periph->periph_dev);
922	struct buf *bp = xs->bp;
923
924	if (sd->flags & SDF_FLUSHING) {
925		/* Flush completed, no longer dirty. */
926		sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
927	}
928
929	if (bp) {
930		bp->b_error = error;
931		bp->b_resid = xs->resid;
932		if (error) {
933			/* on a read/write error bp->b_resid is zero, so fix */
934			bp->b_resid = bp->b_bcount;
935		}
936
937		disk_unbusy(&sd->sc_dk, bp->b_bcount - bp->b_resid,
938		    (bp->b_flags & B_READ));
939		rnd_add_uint32(&sd->rnd_source, bp->b_rawblkno);
940
941		biodone(bp);
942	}
943}
944
945static void
946sdminphys(struct buf *bp)
947{
948	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(bp->b_dev));
949	long xmax;
950
951	/*
952	 * If the device is ancient, we want to make sure that
953	 * the transfer fits into a 6-byte cdb.
954	 *
955	 * XXX Note that the SCSI-I spec says that 256-block transfers
956	 * are allowed in a 6-byte read/write, and are specified
957	 * by settng the "length" to 0.  However, we're conservative
958	 * here, allowing only 255-block transfers in case an
959	 * ancient device gets confused by length == 0.  A length of 0
960	 * in a 10-byte read/write actually means 0 blocks.
961	 */
962	if ((sd->flags & SDF_ANCIENT) &&
963	    ((sd->sc_periph->periph_flags &
964	    (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
965		xmax = sd->sc_dk.dk_label->d_secsize * 0xff;
966
967		if (bp->b_bcount > xmax)
968			bp->b_bcount = xmax;
969	}
970
971	scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
972}
973
974static int
975sdread(dev_t dev, struct uio *uio, int ioflag)
976{
977
978	return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
979}
980
981static int
982sdwrite(dev_t dev, struct uio *uio, int ioflag)
983{
984
985	return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
986}
987
988/*
989 * Perform special action on behalf of the user
990 * Knows about the internals of this device
991 */
992static int
993sdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
994{
995	struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(dev));
996	struct scsipi_periph *periph = sd->sc_periph;
997	int part = SDPART(dev);
998	int error = 0;
999	int s;
1000#ifdef __HAVE_OLD_DISKLABEL
1001	struct disklabel *newlabel = NULL;
1002#endif
1003
1004	SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1005
1006	/*
1007	 * If the device is not valid, some IOCTLs can still be
1008	 * handled on the raw partition. Check this here.
1009	 */
1010	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1011		switch (cmd) {
1012		case DIOCKLABEL:
1013		case DIOCWLABEL:
1014		case DIOCLOCK:
1015		case DIOCEJECT:
1016		case ODIOCEJECT:
1017		case DIOCGCACHE:
1018		case DIOCSCACHE:
1019		case DIOCGSTRATEGY:
1020		case DIOCSSTRATEGY:
1021		case SCIOCIDENTIFY:
1022		case OSCIOCIDENTIFY:
1023		case SCIOCCOMMAND:
1024		case SCIOCDEBUG:
1025			if (part == RAW_PART)
1026				break;
1027		/* FALLTHROUGH */
1028		default:
1029			if ((periph->periph_flags & PERIPH_OPEN) == 0)
1030				return (ENODEV);
1031			else
1032				return (EIO);
1033		}
1034	}
1035
1036	error = disk_ioctl(&sd->sc_dk, cmd, addr, flag, l);
1037	if (error != EPASSTHROUGH)
1038		return (error);
1039
1040	switch (cmd) {
1041	case DIOCGDINFO:
1042		*(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1043		return (0);
1044
1045#ifdef __HAVE_OLD_DISKLABEL
1046	case ODIOCGDINFO:
1047		newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1048		if (newlabel == NULL)
1049			return EIO;
1050		memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1051		if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1052			memcpy(addr, newlabel, sizeof (struct olddisklabel));
1053		else
1054			error = ENOTTY;
1055		free(newlabel, M_TEMP);
1056		return error;
1057#endif
1058
1059	case DIOCGPART:
1060		((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1061		((struct partinfo *)addr)->part =
1062		    &sd->sc_dk.dk_label->d_partitions[part];
1063		return (0);
1064
1065	case DIOCWDINFO:
1066	case DIOCSDINFO:
1067#ifdef __HAVE_OLD_DISKLABEL
1068	case ODIOCWDINFO:
1069	case ODIOCSDINFO:
1070#endif
1071	{
1072		struct disklabel *lp;
1073
1074		if ((flag & FWRITE) == 0)
1075			return (EBADF);
1076
1077#ifdef __HAVE_OLD_DISKLABEL
1078 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1079			newlabel = malloc(sizeof *newlabel, M_TEMP,
1080			    M_WAITOK | M_ZERO);
1081			if (newlabel == NULL)
1082				return EIO;
1083			memcpy(newlabel, addr, sizeof (struct olddisklabel));
1084			lp = newlabel;
1085		} else
1086#endif
1087		lp = (struct disklabel *)addr;
1088
1089		mutex_enter(&sd->sc_dk.dk_openlock);
1090		sd->flags |= SDF_LABELLING;
1091
1092		error = setdisklabel(sd->sc_dk.dk_label,
1093		    lp, /*sd->sc_dk.dk_openmask : */0,
1094		    sd->sc_dk.dk_cpulabel);
1095		if (error == 0) {
1096			if (cmd == DIOCWDINFO
1097#ifdef __HAVE_OLD_DISKLABEL
1098			    || cmd == ODIOCWDINFO
1099#endif
1100			   )
1101				error = writedisklabel(SDLABELDEV(dev),
1102				    sdstrategy, sd->sc_dk.dk_label,
1103				    sd->sc_dk.dk_cpulabel);
1104		}
1105
1106		sd->flags &= ~SDF_LABELLING;
1107		mutex_exit(&sd->sc_dk.dk_openlock);
1108#ifdef __HAVE_OLD_DISKLABEL
1109		if (newlabel != NULL)
1110			free(newlabel, M_TEMP);
1111#endif
1112		return (error);
1113	}
1114
1115	case DIOCKLABEL:
1116		if (*(int *)addr)
1117			periph->periph_flags |= PERIPH_KEEP_LABEL;
1118		else
1119			periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1120		return (0);
1121
1122	case DIOCWLABEL:
1123		if ((flag & FWRITE) == 0)
1124			return (EBADF);
1125		if (*(int *)addr)
1126			sd->flags |= SDF_WLABEL;
1127		else
1128			sd->flags &= ~SDF_WLABEL;
1129		return (0);
1130
1131	case DIOCLOCK:
1132		if (periph->periph_flags & PERIPH_REMOVABLE)
1133			return (scsipi_prevent(periph,
1134			    (*(int *)addr) ?
1135			    SPAMR_PREVENT_DT : SPAMR_ALLOW, 0));
1136		else
1137			return (ENOTTY);
1138
1139	case DIOCEJECT:
1140		if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1141			return (ENOTTY);
1142		if (*(int *)addr == 0) {
1143			/*
1144			 * Don't force eject: check that we are the only
1145			 * partition open. If so, unlock it.
1146			 */
1147			if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1148			    sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1149			    sd->sc_dk.dk_openmask) {
1150				error = scsipi_prevent(periph, SPAMR_ALLOW,
1151				    XS_CTL_IGNORE_NOT_READY);
1152				if (error)
1153					return (error);
1154			} else {
1155				return (EBUSY);
1156			}
1157		}
1158		/* FALLTHROUGH */
1159	case ODIOCEJECT:
1160		return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1161		    ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1162
1163	case DIOCGDEFLABEL:
1164		sdgetdefaultlabel(sd, (struct disklabel *)addr);
1165		return (0);
1166
1167#ifdef __HAVE_OLD_DISKLABEL
1168	case ODIOCGDEFLABEL:
1169		newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1170		if (newlabel == NULL)
1171			return EIO;
1172		sdgetdefaultlabel(sd, newlabel);
1173		if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1174			memcpy(addr, newlabel, sizeof (struct olddisklabel));
1175		else
1176			error = ENOTTY;
1177		free(newlabel, M_TEMP);
1178		return error;
1179#endif
1180
1181	case DIOCGCACHE:
1182		return (sd_getcache(sd, (int *) addr));
1183
1184	case DIOCSCACHE:
1185		if ((flag & FWRITE) == 0)
1186			return (EBADF);
1187		return (sd_setcache(sd, *(int *) addr));
1188
1189	case DIOCCACHESYNC:
1190		/*
1191		 * XXX Do we really need to care about having a writable
1192		 * file descriptor here?
1193		 */
1194		if ((flag & FWRITE) == 0)
1195			return (EBADF);
1196		if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1197			error = sd_flush(sd, 0);
1198			if (error)
1199				sd->flags &= ~SDF_FLUSHING;
1200			else
1201				sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1202		} else
1203			error = 0;
1204		return (error);
1205
1206	case DIOCAWEDGE:
1207	    {
1208	    	struct dkwedge_info *dkw = (void *) addr;
1209
1210		if ((flag & FWRITE) == 0)
1211			return (EBADF);
1212
1213		/* If the ioctl happens here, the parent is us. */
1214		strlcpy(dkw->dkw_parent, device_xname(sd->sc_dev),
1215			sizeof(dkw->dkw_parent));
1216		return (dkwedge_add(dkw));
1217	    }
1218
1219	case DIOCDWEDGE:
1220	    {
1221	    	struct dkwedge_info *dkw = (void *) addr;
1222
1223		if ((flag & FWRITE) == 0)
1224			return (EBADF);
1225
1226		/* If the ioctl happens here, the parent is us. */
1227		strlcpy(dkw->dkw_parent, device_xname(sd->sc_dev),
1228			sizeof(dkw->dkw_parent));
1229		return (dkwedge_del(dkw));
1230	    }
1231
1232	case DIOCLWEDGES:
1233	    {
1234	    	struct dkwedge_list *dkwl = (void *) addr;
1235
1236		return (dkwedge_list(&sd->sc_dk, dkwl, l));
1237	    }
1238
1239	case DIOCGSTRATEGY:
1240	    {
1241		struct disk_strategy *dks = addr;
1242
1243		s = splbio();
1244		strlcpy(dks->dks_name, bufq_getstrategyname(sd->buf_queue),
1245		    sizeof(dks->dks_name));
1246		splx(s);
1247		dks->dks_paramlen = 0;
1248
1249		return 0;
1250	    }
1251
1252	case DIOCSSTRATEGY:
1253	    {
1254		struct disk_strategy *dks = addr;
1255		struct bufq_state *new;
1256		struct bufq_state *old;
1257
1258		if ((flag & FWRITE) == 0) {
1259			return EBADF;
1260		}
1261
1262		if (dks->dks_param != NULL) {
1263			return EINVAL;
1264		}
1265		dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
1266		error = bufq_alloc(&new, dks->dks_name,
1267		    BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
1268		if (error) {
1269			return error;
1270		}
1271		s = splbio();
1272		old = sd->buf_queue;
1273		bufq_move(new, old);
1274		sd->buf_queue = new;
1275		splx(s);
1276		bufq_free(old);
1277
1278		return 0;
1279	    }
1280
1281	default:
1282		if (part != RAW_PART)
1283			return (ENOTTY);
1284		return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, l));
1285	}
1286
1287#ifdef DIAGNOSTIC
1288	panic("sdioctl: impossible");
1289#endif
1290}
1291
1292static void
1293sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1294{
1295
1296	memset(lp, 0, sizeof(struct disklabel));
1297
1298	lp->d_secsize = sd->params.blksize;
1299	lp->d_ntracks = sd->params.heads;
1300	lp->d_nsectors = sd->params.sectors;
1301	lp->d_ncylinders = sd->params.cyls;
1302	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1303
1304	switch (SCSIPI_BUSTYPE_TYPE(scsipi_periph_bustype(sd->sc_periph))) {
1305	case SCSIPI_BUSTYPE_SCSI:
1306		lp->d_type = DTYPE_SCSI;
1307		break;
1308	case SCSIPI_BUSTYPE_ATAPI:
1309		lp->d_type = DTYPE_ATAPI;
1310		break;
1311	}
1312	/*
1313	 * XXX
1314	 * We could probe the mode pages to figure out what kind of disc it is.
1315	 * Is this worthwhile?
1316	 */
1317	strncpy(lp->d_typename, sd->name, 16);
1318	strncpy(lp->d_packname, "fictitious", 16);
1319	lp->d_secperunit = sd->params.disksize;
1320	lp->d_rpm = sd->params.rot_rate;
1321	lp->d_interleave = 1;
1322	lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1323	    D_REMOVABLE : 0;
1324
1325	lp->d_partitions[RAW_PART].p_offset = 0;
1326	lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
1327	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1328	lp->d_npartitions = RAW_PART + 1;
1329
1330	lp->d_magic = DISKMAGIC;
1331	lp->d_magic2 = DISKMAGIC;
1332	lp->d_checksum = dkcksum(lp);
1333}
1334
1335
1336/*
1337 * Load the label information on the named device
1338 */
1339static int
1340sdgetdisklabel(struct sd_softc *sd)
1341{
1342	struct disklabel *lp = sd->sc_dk.dk_label;
1343	const char *errstring;
1344
1345	memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1346
1347	sdgetdefaultlabel(sd, lp);
1348
1349	if (lp->d_secpercyl == 0) {
1350		lp->d_secpercyl = 100;
1351		/* as long as it's not 0 - readdisklabel divides by it (?) */
1352	}
1353
1354	/*
1355	 * Call the generic disklabel extraction routine
1356	 */
1357	errstring = readdisklabel(MAKESDDEV(0, device_unit(sd->sc_dev),
1358	    RAW_PART), sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1359	if (errstring) {
1360		aprint_error_dev(sd->sc_dev, "%s\n", errstring);
1361		return EIO;
1362	}
1363	return 0;
1364}
1365
1366static bool
1367sd_shutdown(device_t self, int how)
1368{
1369	struct sd_softc *sd = device_private(self);
1370
1371	/*
1372	 * If the disk cache needs to be flushed, and the disk supports
1373	 * it, flush it.  We're cold at this point, so we poll for
1374	 * completion.
1375	 */
1376	if ((sd->flags & SDF_DIRTY) != 0) {
1377		if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1378			aprint_error_dev(sd->sc_dev,
1379				"cache synchronization failed\n");
1380			sd->flags &= ~SDF_FLUSHING;
1381		} else
1382			sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1383	}
1384	return true;
1385}
1386
1387static bool
1388sd_suspend(device_t dv, const pmf_qual_t *qual)
1389{
1390	return sd_shutdown(dv, boothowto); /* XXX no need to poll */
1391}
1392
1393/*
1394 * Check Errors
1395 */
1396static int
1397sd_interpret_sense(struct scsipi_xfer *xs)
1398{
1399	struct scsipi_periph *periph = xs->xs_periph;
1400	struct scsi_sense_data *sense = &xs->sense.scsi_sense;
1401	struct sd_softc *sd = device_private(periph->periph_dev);
1402	int s, error, retval = EJUSTRETURN;
1403
1404	/*
1405	 * If the periph is already recovering, just do the normal
1406	 * error processing.
1407	 */
1408	if (periph->periph_flags & PERIPH_RECOVERING)
1409		return (retval);
1410
1411	/*
1412	 * Ignore errors from accessing illegal fields (e.g. trying to
1413	 * lock the door of a digicam, which doesn't have a door that
1414	 * can be locked) for the SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL command.
1415	 */
1416	if (xs->cmd->opcode == SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL &&
1417	    SSD_SENSE_KEY(sense->flags) == SKEY_ILLEGAL_REQUEST &&
1418	    sense->asc == 0x24 &&
1419	    sense->ascq == 0x00) { /* Illegal field in CDB */
1420		if (!(xs->xs_control & XS_CTL_SILENT)) {
1421			scsipi_printaddr(periph);
1422			printf("no door lock\n");
1423		}
1424		xs->xs_control |= XS_CTL_IGNORE_ILLEGAL_REQUEST;
1425		return (retval);
1426	}
1427
1428
1429
1430	/*
1431	 * If the device is not open yet, let the generic code handle it.
1432	 */
1433	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1434		return (retval);
1435
1436	/*
1437	 * If it isn't a extended or extended/deferred error, let
1438	 * the generic code handle it.
1439	 */
1440	if (SSD_RCODE(sense->response_code) != SSD_RCODE_CURRENT &&
1441	    SSD_RCODE(sense->response_code) != SSD_RCODE_DEFERRED)
1442		return (retval);
1443
1444	if (SSD_SENSE_KEY(sense->flags) == SKEY_NOT_READY &&
1445	    sense->asc == 0x4) {
1446		if (sense->ascq == 0x01)	{
1447			/*
1448			 * Unit In The Process Of Becoming Ready.
1449			 */
1450			printf("%s: waiting for pack to spin up...\n",
1451			    device_xname(sd->sc_dev));
1452			if (!callout_pending(&periph->periph_callout))
1453				scsipi_periph_freeze(periph, 1);
1454			callout_reset(&periph->periph_callout,
1455			    5 * hz, scsipi_periph_timed_thaw, periph);
1456			retval = ERESTART;
1457		} else if (sense->ascq == 0x02) {
1458			printf("%s: pack is stopped, restarting...\n",
1459			    device_xname(sd->sc_dev));
1460			s = splbio();
1461			periph->periph_flags |= PERIPH_RECOVERING;
1462			splx(s);
1463			error = scsipi_start(periph, SSS_START,
1464			    XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1465			    XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1466			if (error) {
1467				aprint_error_dev(sd->sc_dev,
1468					"unable to restart pack\n");
1469				retval = error;
1470			} else
1471				retval = ERESTART;
1472			s = splbio();
1473			periph->periph_flags &= ~PERIPH_RECOVERING;
1474			splx(s);
1475		}
1476	}
1477	if (SSD_SENSE_KEY(sense->flags) == SKEY_MEDIUM_ERROR &&
1478	    sense->asc == 0x31 &&
1479	    sense->ascq == 0x00)	{ /* maybe for any asq ? */
1480		/* Medium Format Corrupted */
1481		retval = EFTYPE;
1482	}
1483	return (retval);
1484}
1485
1486
1487static int
1488sdsize(dev_t dev)
1489{
1490	struct sd_softc *sd;
1491	int part, unit, omask;
1492	int size;
1493
1494	unit = SDUNIT(dev);
1495	sd = device_lookup_private(&sd_cd, unit);
1496	if (sd == NULL)
1497		return (-1);
1498
1499	if (!device_is_active(sd->sc_dev))
1500		return (-1);
1501
1502	part = SDPART(dev);
1503	omask = sd->sc_dk.dk_openmask & (1 << part);
1504
1505	if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1506		return (-1);
1507	if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1508		size = -1;
1509	else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1510		size = -1;
1511	else
1512		size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1513		    (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1514	if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1515		return (-1);
1516	return (size);
1517}
1518
1519/* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1520static struct scsipi_xfer sx;
1521static int sddoingadump;
1522
1523/*
1524 * dump all of physical memory into the partition specified, starting
1525 * at offset 'dumplo' into the partition.
1526 */
1527static int
1528sddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1529{
1530	struct sd_softc *sd;	/* disk unit to do the I/O */
1531	struct disklabel *lp;	/* disk's disklabel */
1532	int	unit, part;
1533	int	sectorsize;	/* size of a disk sector */
1534	int	nsects;		/* number of sectors in partition */
1535	int	sectoff;	/* sector offset of partition */
1536	int	totwrt;		/* total number of sectors left to write */
1537	int	nwrt;		/* current number of sectors to write */
1538	struct scsipi_rw_10 cmd;	/* write command */
1539	struct scsipi_xfer *xs;	/* ... convenience */
1540	struct scsipi_periph *periph;
1541	struct scsipi_channel *chan;
1542
1543	/* Check if recursive dump; if so, punt. */
1544	if (sddoingadump)
1545		return (EFAULT);
1546
1547	/* Mark as active early. */
1548	sddoingadump = 1;
1549
1550	unit = SDUNIT(dev);	/* Decompose unit & partition. */
1551	part = SDPART(dev);
1552
1553	/* Check for acceptable drive number. */
1554	sd = device_lookup_private(&sd_cd, unit);
1555	if (sd == NULL)
1556		return (ENXIO);
1557
1558	if (!device_is_active(sd->sc_dev))
1559		return (ENODEV);
1560
1561	periph = sd->sc_periph;
1562	chan = periph->periph_channel;
1563
1564	/* Make sure it was initialized. */
1565	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1566		return (ENXIO);
1567
1568	/* Convert to disk sectors.  Request must be a multiple of size. */
1569	lp = sd->sc_dk.dk_label;
1570	sectorsize = lp->d_secsize;
1571	if ((size % sectorsize) != 0)
1572		return (EFAULT);
1573	totwrt = size / sectorsize;
1574	blkno = dbtob(blkno) / sectorsize;	/* blkno in DEV_BSIZE units */
1575
1576	nsects = lp->d_partitions[part].p_size;
1577	sectoff = lp->d_partitions[part].p_offset;
1578
1579	/* Check transfer bounds against partition size. */
1580	if ((blkno < 0) || ((blkno + totwrt) > nsects))
1581		return (EINVAL);
1582
1583	/* Offset block number to start of partition. */
1584	blkno += sectoff;
1585
1586	xs = &sx;
1587
1588	while (totwrt > 0) {
1589		nwrt = totwrt;		/* XXX */
1590#ifndef	SD_DUMP_NOT_TRUSTED
1591		/*
1592		 *  Fill out the scsi command
1593		 */
1594		memset(&cmd, 0, sizeof(cmd));
1595		cmd.opcode = WRITE_10;
1596		_lto4b(blkno, cmd.addr);
1597		_lto2b(nwrt, cmd.length);
1598		/*
1599		 * Fill out the scsipi_xfer structure
1600		 *    Note: we cannot sleep as we may be an interrupt
1601		 * don't use scsipi_command() as it may want to wait
1602		 * for an xs.
1603		 */
1604		memset(xs, 0, sizeof(sx));
1605		xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1606		    XS_CTL_DATA_OUT;
1607		xs->xs_status = 0;
1608		xs->xs_periph = periph;
1609		xs->xs_retries = SDRETRIES;
1610		xs->timeout = 10000;	/* 10000 millisecs for a disk ! */
1611		xs->cmd = (struct scsipi_generic *)&cmd;
1612		xs->cmdlen = sizeof(cmd);
1613		xs->resid = nwrt * sectorsize;
1614		xs->error = XS_NOERROR;
1615		xs->bp = 0;
1616		xs->data = va;
1617		xs->datalen = nwrt * sectorsize;
1618		callout_init(&xs->xs_callout, 0);
1619
1620		/*
1621		 * Pass all this info to the scsi driver.
1622		 */
1623		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1624		if ((xs->xs_status & XS_STS_DONE) == 0 ||
1625		    xs->error != XS_NOERROR)
1626			return (EIO);
1627#else	/* SD_DUMP_NOT_TRUSTED */
1628		/* Let's just talk about this first... */
1629		printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1630		delay(500 * 1000);	/* half a second */
1631#endif	/* SD_DUMP_NOT_TRUSTED */
1632
1633		/* update block count */
1634		totwrt -= nwrt;
1635		blkno += nwrt;
1636		va = (char *)va + sectorsize * nwrt;
1637	}
1638	sddoingadump = 0;
1639	return (0);
1640}
1641
1642static int
1643sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1644    int page, int flags, int *big)
1645{
1646
1647	if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1648	    !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1649		*big = 1;
1650		return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1651		    size + sizeof(struct scsi_mode_parameter_header_10),
1652		    flags, SDRETRIES, 6000);
1653	} else {
1654		*big = 0;
1655		return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1656		    size + sizeof(struct scsi_mode_parameter_header_6),
1657		    flags, SDRETRIES, 6000);
1658	}
1659}
1660
1661static int
1662sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1663    int flags, int big)
1664{
1665
1666	if (big) {
1667		struct scsi_mode_parameter_header_10 *header = sense;
1668
1669		_lto2b(0, header->data_length);
1670		return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1671		    size + sizeof(struct scsi_mode_parameter_header_10),
1672		    flags, SDRETRIES, 6000);
1673	} else {
1674		struct scsi_mode_parameter_header_6 *header = sense;
1675
1676		header->data_length = 0;
1677		return scsipi_mode_select(sd->sc_periph, byte2, sense,
1678		    size + sizeof(struct scsi_mode_parameter_header_6),
1679		    flags, SDRETRIES, 6000);
1680	}
1681}
1682
1683/*
1684 * sd_validate_blksize:
1685 *
1686 *	Validate the block size.  Print error if periph is specified,
1687 */
1688static int
1689sd_validate_blksize(struct scsipi_periph *periph, int len)
1690{
1691
1692	switch (len) {
1693	case 256:
1694	case 512:
1695	case 1024:
1696	case 2048:
1697	case 4096:
1698		return 1;
1699	}
1700
1701	if (periph) {
1702		scsipi_printaddr(periph);
1703		printf("%s sector size: 0x%x.  Defaulting to %d bytes.\n",
1704		    (len ^ (1 << (ffs(len) - 1))) ?
1705		    "preposterous" : "unsupported",
1706		    len, SD_DEFAULT_BLKSIZE);
1707	}
1708
1709	return 0;
1710}
1711
1712/*
1713 * sd_read_capacity:
1714 *
1715 *	Find out from the device what its capacity is.
1716 */
1717static u_int64_t
1718sd_read_capacity(struct scsipi_periph *periph, int *blksize, int flags)
1719{
1720	union {
1721		struct scsipi_read_capacity_10 cmd;
1722		struct scsipi_read_capacity_16 cmd16;
1723	} cmd;
1724	union {
1725		struct scsipi_read_capacity_10_data data;
1726		struct scsipi_read_capacity_16_data data16;
1727	} *datap;
1728	uint64_t rv;
1729
1730	memset(&cmd, 0, sizeof(cmd));
1731	cmd.cmd.opcode = READ_CAPACITY_10;
1732
1733	/*
1734	 * Don't allocate data buffer on stack;
1735	 * The lower driver layer might use the same stack and
1736	 * if it uses region which is in the same cacheline,
1737	 * cache flush ops against the data buffer won't work properly.
1738	 */
1739	datap = malloc(sizeof(*datap), M_TEMP, M_WAITOK);
1740	if (datap == NULL)
1741		return 0;
1742
1743	/*
1744	 * If the command works, interpret the result as a 4 byte
1745	 * number of blocks
1746	 */
1747	rv = 0;
1748	memset(datap, 0, sizeof(datap->data));
1749	if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1750	    (void *)datap, sizeof(datap->data), SCSIPIRETRIES, 20000, NULL,
1751	    flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1752		goto out;
1753
1754	if (_4btol(datap->data.addr) != 0xffffffff) {
1755		*blksize = _4btol(datap->data.length);
1756		rv = _4btol(datap->data.addr) + 1;
1757		goto out;
1758	}
1759
1760	/*
1761	 * Device is larger than can be reflected by READ CAPACITY (10).
1762	 * Try READ CAPACITY (16).
1763	 */
1764
1765	memset(&cmd, 0, sizeof(cmd));
1766	cmd.cmd16.opcode = READ_CAPACITY_16;
1767	cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1768	_lto4b(sizeof(datap->data16), cmd.cmd16.len);
1769
1770	memset(datap, 0, sizeof(datap->data16));
1771	if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1772	    (void *)datap, sizeof(datap->data16), SCSIPIRETRIES, 20000, NULL,
1773	    flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1774		goto out;
1775
1776	*blksize = _4btol(datap->data16.length);
1777	rv = _8btol(datap->data16.addr) + 1;
1778
1779 out:
1780	free(datap, M_TEMP);
1781	return rv;
1782}
1783
1784static int
1785sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1786{
1787	struct {
1788		struct scsi_mode_parameter_header_6 header;
1789		/* no block descriptor */
1790		u_int8_t pg_code; /* page code (should be 6) */
1791		u_int8_t pg_length; /* page length (should be 11) */
1792		u_int8_t wcd; /* bit0: cache disable */
1793		u_int8_t lbs[2]; /* logical block size */
1794		u_int8_t size[5]; /* number of log. blocks */
1795		u_int8_t pp; /* power/performance */
1796		u_int8_t flags;
1797		u_int8_t resvd;
1798	} scsipi_sense;
1799	u_int64_t blocks;
1800	int error, blksize;
1801
1802	/*
1803	 * sd_read_capacity (ie "read capacity") and mode sense page 6
1804	 * give the same information. Do both for now, and check
1805	 * for consistency.
1806	 * XXX probably differs for removable media
1807	 */
1808	dp->blksize = SD_DEFAULT_BLKSIZE;
1809	if ((blocks = sd_read_capacity(sd->sc_periph, &blksize, flags)) == 0)
1810		return (SDGP_RESULT_OFFLINE);		/* XXX? */
1811
1812	error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1813	    &scsipi_sense.header, sizeof(scsipi_sense),
1814	    flags, SDRETRIES, 6000);
1815
1816	if (error != 0)
1817		return (SDGP_RESULT_OFFLINE);		/* XXX? */
1818
1819	dp->blksize = blksize;
1820	if (!sd_validate_blksize(NULL, dp->blksize))
1821		dp->blksize = _2btol(scsipi_sense.lbs);
1822	if (!sd_validate_blksize(sd->sc_periph, dp->blksize))
1823		dp->blksize = SD_DEFAULT_BLKSIZE;
1824
1825	/*
1826	 * Create a pseudo-geometry.
1827	 */
1828	dp->heads = 64;
1829	dp->sectors = 32;
1830	dp->cyls = blocks / (dp->heads * dp->sectors);
1831	dp->disksize = _5btol(scsipi_sense.size);
1832	if (dp->disksize <= UINT32_MAX && dp->disksize != blocks) {
1833		printf("RBC size: mode sense=%llu, get cap=%llu\n",
1834		       (unsigned long long)dp->disksize,
1835		       (unsigned long long)blocks);
1836		dp->disksize = blocks;
1837	}
1838	dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1839
1840	return (SDGP_RESULT_OK);
1841}
1842
1843/*
1844 * Get the scsi driver to send a full inquiry to the * device and use the
1845 * results to fill out the disk parameter structure.
1846 */
1847static int
1848sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1849{
1850	u_int64_t blocks;
1851	int error, blksize;
1852#if 0
1853	int i;
1854	u_int8_t *p;
1855#endif
1856
1857	dp->disksize = blocks = sd_read_capacity(sd->sc_periph, &blksize,
1858	    flags);
1859	if (blocks == 0) {
1860		struct scsipi_read_format_capacities cmd;
1861		struct {
1862			struct scsipi_capacity_list_header header;
1863			struct scsipi_capacity_descriptor desc;
1864		} __packed data;
1865
1866		memset(&cmd, 0, sizeof(cmd));
1867		memset(&data, 0, sizeof(data));
1868		cmd.opcode = READ_FORMAT_CAPACITIES;
1869		_lto2b(sizeof(data), cmd.length);
1870
1871		error = scsipi_command(sd->sc_periph,
1872		    (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1873		    SDRETRIES, 20000, NULL,
1874		    flags | XS_CTL_DATA_IN);
1875		if (error == EFTYPE) {
1876			/* Medium Format Corrupted, handle as not formatted */
1877			return (SDGP_RESULT_UNFORMATTED);
1878		}
1879		if (error || data.header.length == 0)
1880			return (SDGP_RESULT_OFFLINE);
1881
1882#if 0
1883printf("rfc: length=%d\n", data.header.length);
1884printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1885#endif
1886		switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1887		case SCSIPI_CAP_DESC_CODE_RESERVED:
1888		case SCSIPI_CAP_DESC_CODE_FORMATTED:
1889			break;
1890
1891		case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1892			return (SDGP_RESULT_UNFORMATTED);
1893
1894		case SCSIPI_CAP_DESC_CODE_NONE:
1895			return (SDGP_RESULT_OFFLINE);
1896		}
1897
1898		dp->disksize = blocks = _4btol(data.desc.nblks);
1899		if (blocks == 0)
1900			return (SDGP_RESULT_OFFLINE);		/* XXX? */
1901
1902		blksize = _3btol(data.desc.blklen);
1903
1904	} else if (!sd_validate_blksize(NULL, blksize)) {
1905		struct sd_mode_sense_data scsipi_sense;
1906		int big, bsize;
1907		struct scsi_general_block_descriptor *bdesc;
1908
1909		memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1910		error = sd_mode_sense(sd, 0, &scsipi_sense,
1911		    sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1912		if (!error) {
1913			if (big) {
1914				bdesc = (void *)(&scsipi_sense.header.big + 1);
1915				bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1916			} else {
1917				bdesc = (void *)(&scsipi_sense.header.small + 1);
1918				bsize = scsipi_sense.header.small.blk_desc_len;
1919			}
1920
1921#if 0
1922printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1923printf("page 0 bsize=%d\n", bsize);
1924printf("page 0 ok\n");
1925#endif
1926
1927			if (bsize >= 8) {
1928				blksize = _3btol(bdesc->blklen);
1929			}
1930		}
1931	}
1932
1933	if (!sd_validate_blksize(sd->sc_periph, blksize))
1934		blksize = SD_DEFAULT_BLKSIZE;
1935
1936	dp->blksize = blksize;
1937	dp->disksize512 = (blocks * dp->blksize) / DEV_BSIZE;
1938	return (0);
1939}
1940
1941static int
1942sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1943{
1944	struct sd_mode_sense_data scsipi_sense;
1945	int error;
1946	int big, byte2;
1947	size_t poffset;
1948	union scsi_disk_pages *pages;
1949
1950	byte2 = SMS_DBD;
1951again:
1952	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1953	error = sd_mode_sense(sd, byte2, &scsipi_sense,
1954	    (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1955	    sizeof(scsipi_sense.pages.rigid_geometry), 4,
1956	    flags | XS_CTL_SILENT, &big);
1957	if (error) {
1958		if (byte2 == SMS_DBD) {
1959			/* No result; try once more with DBD off */
1960			byte2 = 0;
1961			goto again;
1962		}
1963		return (error);
1964	}
1965
1966	if (big) {
1967		poffset = sizeof scsipi_sense.header.big;
1968		poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1969	} else {
1970		poffset = sizeof scsipi_sense.header.small;
1971		poffset += scsipi_sense.header.small.blk_desc_len;
1972	}
1973
1974	if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry))
1975		return ERESTART;
1976
1977	pages = (void *)((u_long)&scsipi_sense + poffset);
1978#if 0
1979	{
1980		size_t i;
1981		u_int8_t *p;
1982
1983		printf("page 4 sense:");
1984		for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
1985		    i--, p++)
1986			printf(" %02x", *p);
1987		printf("\n");
1988		printf("page 4 pg_code=%d sense=%p/%p\n",
1989		    pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1990	}
1991#endif
1992
1993	if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1994		return (ERESTART);
1995
1996	SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1997	    ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1998	    _3btol(pages->rigid_geometry.ncyl),
1999	    pages->rigid_geometry.nheads,
2000	    _2btol(pages->rigid_geometry.st_cyl_wp),
2001	    _2btol(pages->rigid_geometry.st_cyl_rwc),
2002	    _2btol(pages->rigid_geometry.land_zone)));
2003
2004	/*
2005	 * KLUDGE!! (for zone recorded disks)
2006	 * give a number of sectors so that sec * trks * cyls
2007	 * is <= disk_size
2008	 * can lead to wasted space! THINK ABOUT THIS !
2009	 */
2010	dp->heads = pages->rigid_geometry.nheads;
2011	dp->cyls = _3btol(pages->rigid_geometry.ncyl);
2012	if (dp->heads == 0 || dp->cyls == 0)
2013		return (ERESTART);
2014	dp->sectors = dp->disksize / (dp->heads * dp->cyls);	/* XXX */
2015
2016	dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2017	if (dp->rot_rate == 0)
2018		dp->rot_rate = 3600;
2019
2020#if 0
2021printf("page 4 ok\n");
2022#endif
2023	return (0);
2024}
2025
2026static int
2027sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
2028{
2029	struct sd_mode_sense_data scsipi_sense;
2030	int error;
2031	int big, byte2;
2032	size_t poffset;
2033	union scsi_disk_pages *pages;
2034
2035	byte2 = SMS_DBD;
2036again:
2037	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2038	error = sd_mode_sense(sd, 0, &scsipi_sense,
2039	    (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
2040	    sizeof(scsipi_sense.pages.flex_geometry), 5,
2041	    flags | XS_CTL_SILENT, &big);
2042	if (error) {
2043		if (byte2 == SMS_DBD) {
2044			/* No result; try once more with DBD off */
2045			byte2 = 0;
2046			goto again;
2047		}
2048		return (error);
2049	}
2050
2051	if (big) {
2052		poffset = sizeof scsipi_sense.header.big;
2053		poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
2054	} else {
2055		poffset = sizeof scsipi_sense.header.small;
2056		poffset += scsipi_sense.header.small.blk_desc_len;
2057	}
2058
2059	if (poffset > sizeof(scsipi_sense) - sizeof(pages->flex_geometry))
2060		return ERESTART;
2061
2062	pages = (void *)((u_long)&scsipi_sense + poffset);
2063#if 0
2064	{
2065		size_t i;
2066		u_int8_t *p;
2067
2068		printf("page 5 sense:");
2069		for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
2070		    i--, p++)
2071			printf(" %02x", *p);
2072		printf("\n");
2073		printf("page 5 pg_code=%d sense=%p/%p\n",
2074		    pages->flex_geometry.pg_code, &scsipi_sense, pages);
2075	}
2076#endif
2077
2078	if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
2079		return (ERESTART);
2080
2081	SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
2082	    ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
2083	    _3btol(pages->flex_geometry.ncyl),
2084	    pages->flex_geometry.nheads,
2085	    pages->flex_geometry.ph_sec_tr,
2086	    _2btol(pages->flex_geometry.bytes_s)));
2087
2088	dp->heads = pages->flex_geometry.nheads;
2089	dp->cyls = _2btol(pages->flex_geometry.ncyl);
2090	dp->sectors = pages->flex_geometry.ph_sec_tr;
2091	if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
2092		return (ERESTART);
2093
2094	dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2095	if (dp->rot_rate == 0)
2096		dp->rot_rate = 3600;
2097
2098#if 0
2099printf("page 5 ok\n");
2100#endif
2101	return (0);
2102}
2103
2104static int
2105sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
2106{
2107	int error;
2108
2109	/*
2110	 * If offline, the SDEV_MEDIA_LOADED flag will be
2111	 * cleared by the caller if necessary.
2112	 */
2113	if (sd->type == T_SIMPLE_DIRECT) {
2114		error = sd_get_simplifiedparms(sd, dp, flags);
2115		if (!error)
2116			disk_blocksize(&sd->sc_dk, dp->blksize);
2117		return (error);
2118	}
2119
2120	error = sd_get_capacity(sd, dp, flags);
2121	if (error)
2122		return (error);
2123
2124	disk_blocksize(&sd->sc_dk, dp->blksize);
2125
2126	if (sd->type == T_OPTICAL)
2127		goto page0;
2128
2129	if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
2130		if (!sd_get_parms_page5(sd, dp, flags) ||
2131		    !sd_get_parms_page4(sd, dp, flags))
2132			goto setprops;
2133	} else {
2134		if (!sd_get_parms_page4(sd, dp, flags) ||
2135		    !sd_get_parms_page5(sd, dp, flags))
2136			goto setprops;
2137	}
2138
2139page0:
2140	printf("%s: fabricating a geometry\n", device_xname(sd->sc_dev));
2141	/* Try calling driver's method for figuring out geometry. */
2142	if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
2143	    !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
2144		(sd->sc_periph, dp, dp->disksize)) {
2145		/*
2146		 * Use adaptec standard fictitious geometry
2147		 * this depends on which controller (e.g. 1542C is
2148		 * different. but we have to put SOMETHING here..)
2149		 */
2150		dp->heads = 64;
2151		dp->sectors = 32;
2152		dp->cyls = dp->disksize / (64 * 32);
2153	}
2154	dp->rot_rate = 3600;
2155
2156setprops:
2157	sd_set_properties(sd);
2158
2159	return (SDGP_RESULT_OK);
2160}
2161
2162static int
2163sd_flush(struct sd_softc *sd, int flags)
2164{
2165	struct scsipi_periph *periph = sd->sc_periph;
2166	struct scsi_synchronize_cache_10 cmd;
2167
2168	/*
2169	 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
2170	 * We issue with address 0 length 0, which should be
2171	 * interpreted by the device as "all remaining blocks
2172	 * starting at address 0".  We ignore ILLEGAL REQUEST
2173	 * in the event that the command is not supported by
2174	 * the device, and poll for completion so that we know
2175	 * that the cache has actually been flushed.
2176	 *
2177	 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
2178	 * command, as indicated by our quirks flags.
2179	 *
2180	 * XXX What about older devices?
2181	 */
2182	if (periph->periph_version < 2 ||
2183	    (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
2184		return (0);
2185
2186	sd->flags |= SDF_FLUSHING;
2187	memset(&cmd, 0, sizeof(cmd));
2188	cmd.opcode = SCSI_SYNCHRONIZE_CACHE_10;
2189
2190	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
2191	    SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
2192}
2193
2194static int
2195sd_getcache(struct sd_softc *sd, int *bitsp)
2196{
2197	struct scsipi_periph *periph = sd->sc_periph;
2198	struct sd_mode_sense_data scsipi_sense;
2199	int error, bits = 0;
2200	int big;
2201	union scsi_disk_pages *pages;
2202
2203	if (periph->periph_version < 2)
2204		return (EOPNOTSUPP);
2205
2206	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2207	error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2208	    sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2209	if (error)
2210		return (error);
2211
2212	if (big)
2213		pages = (void *)(&scsipi_sense.header.big + 1);
2214	else
2215		pages = (void *)(&scsipi_sense.header.small + 1);
2216
2217	if ((pages->caching_params.flags & CACHING_RCD) == 0)
2218		bits |= DKCACHE_READ;
2219	if (pages->caching_params.flags & CACHING_WCE)
2220		bits |= DKCACHE_WRITE;
2221	if (pages->caching_params.pg_code & PGCODE_PS)
2222		bits |= DKCACHE_SAVE;
2223
2224	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2225	error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2226	    sizeof(scsipi_sense.pages.caching_params),
2227	    SMS_PCTRL_CHANGEABLE|8, 0, &big);
2228	if (error == 0) {
2229		if (big)
2230			pages = (void *)(&scsipi_sense.header.big + 1);
2231		else
2232			pages = (void *)(&scsipi_sense.header.small + 1);
2233
2234		if (pages->caching_params.flags & CACHING_RCD)
2235			bits |= DKCACHE_RCHANGE;
2236		if (pages->caching_params.flags & CACHING_WCE)
2237			bits |= DKCACHE_WCHANGE;
2238	}
2239
2240	*bitsp = bits;
2241
2242	return (0);
2243}
2244
2245static int
2246sd_setcache(struct sd_softc *sd, int bits)
2247{
2248	struct scsipi_periph *periph = sd->sc_periph;
2249	struct sd_mode_sense_data scsipi_sense;
2250	int error;
2251	uint8_t oflags, byte2 = 0;
2252	int big;
2253	union scsi_disk_pages *pages;
2254
2255	if (periph->periph_version < 2)
2256		return (EOPNOTSUPP);
2257
2258	memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2259	error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2260	    sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2261	if (error)
2262		return (error);
2263
2264	if (big)
2265		pages = (void *)(&scsipi_sense.header.big + 1);
2266	else
2267		pages = (void *)(&scsipi_sense.header.small + 1);
2268
2269	oflags = pages->caching_params.flags;
2270
2271	if (bits & DKCACHE_READ)
2272		pages->caching_params.flags &= ~CACHING_RCD;
2273	else
2274		pages->caching_params.flags |= CACHING_RCD;
2275
2276	if (bits & DKCACHE_WRITE)
2277		pages->caching_params.flags |= CACHING_WCE;
2278	else
2279		pages->caching_params.flags &= ~CACHING_WCE;
2280
2281	if (oflags == pages->caching_params.flags)
2282		return (0);
2283
2284	pages->caching_params.pg_code &= PGCODE_MASK;
2285
2286	if (bits & DKCACHE_SAVE)
2287		byte2 |= SMS_SP;
2288
2289	return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2290	    sizeof(struct scsi_mode_page_header) +
2291	    pages->caching_params.pg_length, 0, big));
2292}
2293
2294static void
2295sd_set_properties(struct sd_softc *sd)
2296{
2297	prop_dictionary_t disk_info, odisk_info, geom;
2298
2299	disk_info = prop_dictionary_create();
2300
2301	geom = prop_dictionary_create();
2302
2303	prop_dictionary_set_uint64(geom, "sectors-per-unit",
2304	    sd->params.disksize);
2305
2306	prop_dictionary_set_uint32(geom, "sector-size",
2307	    sd->params.blksize);
2308
2309	prop_dictionary_set_uint16(geom, "sectors-per-track",
2310	    sd->params.sectors);
2311
2312	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
2313	    sd->params.heads);
2314
2315	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
2316	    sd->params.cyls);
2317
2318	prop_dictionary_set(disk_info, "geometry", geom);
2319	prop_object_release(geom);
2320
2321	prop_dictionary_set(device_properties(sd->sc_dev),
2322	    "disk-info", disk_info);
2323
2324	/*
2325	 * Don't release disk_info here; we keep a reference to it.
2326	 * disk_detach() will release it when we go away.
2327	 */
2328
2329	odisk_info = sd->sc_dk.dk_info;
2330	sd->sc_dk.dk_info = disk_info;
2331	if (odisk_info)
2332		prop_object_release(odisk_info);
2333}
2334