1/*	$NetBSD: ld_iop.c,v 1.40 2023/05/31 20:00:50 ad Exp $	*/
2
3/*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * I2O front-end for ld(4) driver, supporting random block storage class
34 * devices.  Currently, this doesn't handle anything more complex than
35 * fixed direct-access devices.
36 */
37
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.40 2023/05/31 20:00:50 ad Exp $");
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/device.h>
45#include <sys/buf.h>
46#include <sys/bufq.h>
47#include <sys/endian.h>
48#include <sys/dkio.h>
49#include <sys/disk.h>
50#include <sys/proc.h>
51
52#include <sys/bus.h>
53
54#include <dev/ldvar.h>
55
56#include <dev/i2o/i2o.h>
57#include <dev/i2o/iopio.h>
58#include <dev/i2o/iopvar.h>
59
60#define	LD_IOP_TIMEOUT		30*1000
61
62#define	LD_IOP_CLAIMED		0x01
63#define	LD_IOP_NEW_EVTMASK	0x02
64
65struct ld_iop_softc {
66	struct	ld_softc sc_ld;
67	struct	iop_initiator sc_ii;
68	struct	iop_initiator sc_eventii;
69	int	sc_flags;
70};
71
72static void	ld_iop_adjqparam(device_t, int);
73static void	ld_iop_attach(device_t, device_t, void *);
74static int	ld_iop_detach(device_t, int);
75static int	ld_iop_dump(struct ld_softc *, void *, int, int);
76static int	ld_iop_flush(struct ld_softc *, bool);
77static int	ld_iop_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
78static void	ld_iop_intr(device_t, struct iop_msg *, void *);
79static void	ld_iop_intr_event(device_t, struct iop_msg *, void *);
80static int	ld_iop_match(device_t, cfdata_t, void *);
81static int	ld_iop_start(struct ld_softc *, struct buf *);
82static void	ld_iop_unconfig(struct ld_iop_softc *, int);
83
84CFATTACH_DECL_NEW(ld_iop, sizeof(struct ld_iop_softc),
85    ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
86
87static const char * const ld_iop_errors[] = {
88	"success",
89	"media error",
90	"access error",
91	"device failure",
92	"device not ready",
93	"media not present",
94	"media locked",
95	"media failure",
96	"protocol failure",
97	"bus failure",
98	"access violation",
99	"media write protected",
100	"device reset",
101	"volume changed, waiting for acknowledgement",
102	"timeout",
103};
104
105static int
106ld_iop_match(device_t parent, cfdata_t match, void *aux)
107{
108	struct iop_attach_args *ia;
109
110	ia = aux;
111
112	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
113}
114
115static void
116ld_iop_attach(device_t parent, device_t self, void *aux)
117{
118	struct iop_attach_args *ia = aux;
119	struct ld_iop_softc *sc = device_private(self);
120	struct iop_softc *iop = device_private(parent);
121	struct ld_softc *ld = &sc->sc_ld;
122	int rv, evreg, enable;
123	const char *typestr, *fixedstr;
124	u_int cachesz;
125	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
126	struct {
127		struct	i2o_param_op_results pr;
128		struct	i2o_param_read_results prr;
129		union {
130			struct	i2o_param_rbs_cache_control cc;
131			struct	i2o_param_rbs_device_info bdi;
132		} p;
133	} __packed param;
134
135	ld->sc_dv = self;
136	evreg = 0;
137
138	/* Register us as an initiator. */
139	sc->sc_ii.ii_dv = self;
140	sc->sc_ii.ii_intr = ld_iop_intr;
141	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
142	sc->sc_ii.ii_flags = 0;
143	sc->sc_ii.ii_tid = ia->ia_tid;
144	iop_initiator_register(iop, &sc->sc_ii);
145
146	/* Register another initiator to handle events from the device. */
147	sc->sc_eventii.ii_dv = self;
148	sc->sc_eventii.ii_intr = ld_iop_intr_event;
149	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
150	sc->sc_eventii.ii_tid = ia->ia_tid;
151	iop_initiator_register(iop, &sc->sc_eventii);
152
153	rv = iop_util_eventreg(iop, &sc->sc_eventii,
154	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
155	    I2O_EVENT_GEN_DEVICE_RESET |
156	    I2O_EVENT_GEN_STATE_CHANGE |
157	    I2O_EVENT_GEN_GENERAL_WARNING);
158	if (rv != 0) {
159		aprint_error_dev(self, "unable to register for events");
160		goto bad;
161	}
162	evreg = 1;
163
164	/*
165	 * Start out with one queued command.  The `iop' driver will adjust
166	 * the queue parameters once we're up and running.
167	 */
168	ld->sc_maxqueuecnt = 1;
169
170	ld->sc_maxxfer = IOP_MAX_XFER;
171	ld->sc_dump = ld_iop_dump;
172	ld->sc_ioctl = ld_iop_ioctl;
173	ld->sc_start = ld_iop_start;
174	ld->sc_flags = LDF_MPSAFE;
175
176	/* Say what the device is. */
177	printf(":");
178	iop_print_ident(iop, ia->ia_tid);
179
180	/*
181	 * Claim the device so that we don't get any nasty surprises.  Allow
182	 * failure.
183	 */
184	rv = iop_util_claim(iop, &sc->sc_ii, 0,
185	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
186	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
187	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
188	    I2O_UTIL_CLAIM_PRIMARY_USER);
189	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
190
191	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
192	    &param, sizeof(param), NULL);
193	if (rv != 0)
194		goto bad;
195
196	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
197	ld->sc_secperunit = (int)
198	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
199
200	switch (param.p.bdi.type) {
201	case I2O_RBS_TYPE_DIRECT:
202		typestr = "direct access";
203		enable = 1;
204		break;
205	case I2O_RBS_TYPE_WORM:
206		typestr = "WORM";
207		enable = 0;
208		break;
209	case I2O_RBS_TYPE_CDROM:
210		typestr = "CD-ROM";
211		enable = 0;
212		break;
213	case I2O_RBS_TYPE_OPTICAL:
214		typestr = "optical";
215		enable = 0;
216		break;
217	default:
218		typestr = "unknown";
219		enable = 0;
220		break;
221	}
222
223	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
224	    != 0) {
225		/* ld->sc_flags |= LDF_REMOVABLE; */
226		fixedstr = "removable";
227		enable = 0;
228	} else
229		fixedstr = "fixed";
230
231	printf(" %s, %s", typestr, fixedstr);
232
233	/*
234	 * Determine if the device has an private cache.  If so, print the
235	 * cache size.  Even if the device doesn't appear to have a cache,
236	 * we perform a flush at shutdown.
237	 */
238	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
239	    &param, sizeof(param), NULL);
240	if (rv != 0)
241		goto bad;
242
243	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
244		printf(", %dkB cache", cachesz >> 10);
245
246	printf("\n");
247
248	/*
249	 * Configure the DDM's timeout functions to time out all commands
250	 * after 30 seconds.
251	 */
252	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
253	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
254	rwvtimeout = 0;
255
256	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
257	    &timeoutbase, sizeof(timeoutbase),
258	    I2O_PARAM_RBS_OPERATION_timeoutbase);
259	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
260	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
261	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
262	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
263	    &rwvtimeout, sizeof(rwvtimeout),
264	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
265
266	if (enable)
267		ld->sc_flags |= LDF_ENABLED;
268	else
269		aprint_error_dev(self, "device not yet supported\n");
270
271	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
272	return;
273
274 bad:
275	ld_iop_unconfig(sc, evreg);
276}
277
278static void
279ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
280{
281	struct iop_softc *iop;
282
283	iop = device_private(device_parent(sc->sc_ld.sc_dv));
284
285	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
286		iop_util_claim(iop, &sc->sc_ii, 1,
287		    I2O_UTIL_CLAIM_PRIMARY_USER);
288
289	if (evreg) {
290		/*
291		 * Mask off events, and wait up to 5 seconds for a reply.
292		 * Note that some adapters won't reply to this (XXX We
293		 * should check the event capabilities).
294		 */
295		mutex_spin_enter(&iop->sc_intrlock);
296		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
297		mutex_spin_exit(&iop->sc_intrlock);
298
299		iop_util_eventreg(iop, &sc->sc_eventii,
300		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
301
302		mutex_spin_enter(&iop->sc_intrlock);
303		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
304			cv_timedwait(&sc->sc_eventii.ii_cv,
305			    &iop->sc_intrlock, hz * 5);
306		mutex_spin_exit(&iop->sc_intrlock);
307	}
308
309	iop_initiator_unregister(iop, &sc->sc_eventii);
310	iop_initiator_unregister(iop, &sc->sc_ii);
311}
312
313static int
314ld_iop_detach(device_t self, int flags)
315{
316	struct ld_iop_softc *sc;
317	struct iop_softc *iop;
318	int rv;
319
320	sc = device_private(self);
321	iop = device_private(device_parent(self));
322
323	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
324		return (rv);
325
326	/*
327	 * Abort any requests queued with the IOP, but allow requests that
328	 * are already in progress to complete.
329	 */
330	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
331		iop_util_abort(iop, &sc->sc_ii, 0, 0,
332		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
333
334	ldenddetach(&sc->sc_ld);
335
336	/* Un-claim the target, and un-register our initiators. */
337	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
338		ld_iop_unconfig(sc, 1);
339
340	return (0);
341}
342
343static int
344ld_iop_start(struct ld_softc *ld, struct buf *bp)
345{
346	struct iop_msg *im;
347	struct iop_softc *iop;
348	struct ld_iop_softc *sc;
349	struct i2o_rbs_block_read *mf;
350	u_int rv, flags, write;
351	u_int64_t ba;
352	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
353
354	sc = device_private(ld->sc_dv);
355	iop = device_private(device_parent(ld->sc_dv));
356
357	im = iop_msg_alloc(iop, 0);
358	im->im_dvcontext = bp;
359
360	write = ((bp->b_flags & B_READ) == 0);
361	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
362
363	/*
364	 * Write through the cache when performing synchronous writes.  When
365	 * performing a read, we don't request that the DDM cache the data,
366	 * as there's little advantage to it.
367	 */
368	if (write) {
369		if ((bp->b_flags & B_ASYNC) == 0)
370			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
371		else
372			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
373	} else
374		flags = 0;
375
376	/*
377	 * Fill the message frame.  We can use the block_read structure for
378	 * both reads and writes, as it's almost identical to the
379	 * block_write structure.
380	 */
381	mf = (struct i2o_rbs_block_read *)mb;
382	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
383	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
384	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
385	mf->msgictx = sc->sc_ii.ii_ictx;
386	mf->msgtctx = im->im_tctx;
387	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
388	mf->datasize = bp->b_bcount;
389	mf->lowoffset = (u_int32_t)ba;
390	mf->highoffset = (u_int32_t)(ba >> 32);
391
392	/* Map the data transfer and enqueue the command. */
393	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
394	if (rv == 0) {
395		if ((rv = iop_post(iop, mb)) != 0) {
396			iop_msg_unmap(iop, im);
397			iop_msg_free(iop, im);
398		}
399	}
400	return (rv);
401}
402
403static int
404ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
405{
406	struct iop_msg *im;
407	struct iop_softc *iop;
408	struct ld_iop_softc *sc;
409	struct i2o_rbs_block_write *mf;
410	int rv, bcount;
411	u_int64_t ba;
412	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
413
414	sc = device_private(ld->sc_dv);
415	iop = device_private(device_parent(ld->sc_dv));
416	bcount = blkcnt * ld->sc_secsize;
417	ba = (u_int64_t)blkno * ld->sc_secsize;
418	im = iop_msg_alloc(iop, IM_POLL);
419
420	mf = (struct i2o_rbs_block_write *)mb;
421	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
422	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
423	mf->msgictx = sc->sc_ii.ii_ictx;
424	mf->msgtctx = im->im_tctx;
425	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
426	mf->datasize = bcount;
427	mf->lowoffset = (u_int32_t)ba;
428	mf->highoffset = (u_int32_t)(ba >> 32);
429
430	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
431		iop_msg_free(iop, im);
432		return (rv);
433	}
434
435	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
436	iop_msg_unmap(iop, im);
437	iop_msg_free(iop, im);
438 	return (rv);
439}
440
441static int
442ld_iop_flush(struct ld_softc *ld, bool poll)
443{
444	struct iop_msg *im;
445	struct iop_softc *iop;
446	struct ld_iop_softc *sc;
447	struct i2o_rbs_cache_flush mf;
448	int rv;
449
450	sc = device_private(ld->sc_dv);
451	iop = device_private(device_parent(ld->sc_dv));
452	im = iop_msg_alloc(iop, poll ? IM_POLL : IM_WAIT);
453
454	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
455	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
456	mf.msgictx = sc->sc_ii.ii_ictx;
457	mf.msgtctx = im->im_tctx;
458	mf.flags = 1 << 16;			/* time multiplier */
459
460	/* Ancient disks will return an error here. */
461	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
462	iop_msg_free(iop, im);
463	return (rv);
464}
465
466static int
467ld_iop_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll)
468{
469	int error;
470
471	switch (cmd) {
472        case DIOCCACHESYNC:
473		error = ld_iop_flush(ld, poll);
474		break;
475
476	default:
477		error = EPASSTHROUGH;
478		break;
479	}
480
481        return error;
482}
483
484static void
485ld_iop_intr(device_t dv, struct iop_msg *im, void *reply)
486{
487	struct i2o_rbs_reply *rb;
488	struct buf *bp;
489	struct ld_iop_softc *sc;
490	struct iop_softc *iop;
491	int err, detail;
492	const char *errstr;
493
494	rb = reply;
495	bp = im->im_dvcontext;
496	sc = device_private(dv);
497	iop = device_private(device_parent(dv));
498
499	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
500
501	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
502		detail = le16toh(rb->detail);
503		if (detail >= __arraycount(ld_iop_errors))
504			errstr = "<unknown>";
505		else
506			errstr = ld_iop_errors[detail];
507		aprint_error_dev(dv, "error 0x%04x: %s\n", detail, errstr);
508		err = 1;
509	}
510
511	if (err) {
512		bp->b_error = EIO;
513		bp->b_resid = bp->b_bcount;
514	} else
515		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
516
517	iop_msg_unmap(iop, im);
518	iop_msg_free(iop, im);
519	lddone(&sc->sc_ld, bp);
520}
521
522static void
523ld_iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
524{
525	struct i2o_util_event_register_reply *rb;
526	struct ld_iop_softc *sc;
527	struct iop_softc *iop;
528	u_int event;
529
530	rb = reply;
531
532	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
533		return;
534
535	event = le32toh(rb->event);
536	sc = device_private(dv);
537
538	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
539		iop = device_private(device_parent(dv));
540		mutex_spin_enter(&iop->sc_intrlock);
541		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
542		cv_broadcast(&sc->sc_eventii.ii_cv);
543		mutex_spin_exit(&iop->sc_intrlock);
544		return;
545	}
546
547	printf("%s: event 0x%08x received\n", device_xname(dv), event);
548}
549
550static void
551ld_iop_adjqparam(device_t dv, int mpi)
552{
553	struct ld_iop_softc *sc = device_private(dv);
554	struct iop_softc *iop = device_private(device_parent(dv));
555	struct ld_softc *ld = &sc->sc_ld;
556
557	/*
558	 * AMI controllers seem to lose the plot if you hand off lots of
559	 * queued commands.
560	 */
561	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
562		mpi = 64;
563
564	ldadjqparam(ld, mpi);
565}
566