1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 2013 Nathan Whitehorn
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/selinfo.h>
38#include <sys/bus.h>
39#include <sys/conf.h>
40#include <sys/eventhandler.h>
41#include <sys/rman.h>
42#include <sys/bus_dma.h>
43#include <sys/bio.h>
44#include <sys/ioccom.h>
45#include <sys/uio.h>
46#include <sys/proc.h>
47#include <sys/signalvar.h>
48#include <sys/sysctl.h>
49#include <sys/endian.h>
50#include <sys/vmem.h>
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_debug.h>
55#include <cam/cam_periph.h>
56#include <cam/cam_sim.h>
57#include <cam/cam_xpt_periph.h>
58#include <cam/cam_xpt_sim.h>
59#include <cam/scsi/scsi_all.h>
60#include <cam/scsi/scsi_message.h>
61
62#include <dev/ofw/openfirm.h>
63#include <dev/ofw/ofw_bus.h>
64#include <dev/ofw/ofw_bus_subr.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68
69#include <powerpc/pseries/phyp-hvcall.h>
70
71struct vscsi_softc;
72
73/* VSCSI CRQ format from table 260 of PAPR spec 2.4 (page 760) */
74struct vscsi_crq {
75	uint8_t valid;
76	uint8_t format;
77	uint8_t reserved;
78	uint8_t status;
79	uint16_t timeout;
80	uint16_t iu_length;
81	uint64_t iu_data;
82};
83
84struct vscsi_xfer {
85        TAILQ_ENTRY(vscsi_xfer) queue;
86        struct vscsi_softc *sc;
87        union ccb *ccb;
88        bus_dmamap_t dmamap;
89        uint64_t tag;
90
91	vmem_addr_t srp_iu_offset;
92	vmem_size_t srp_iu_size;
93};
94
95TAILQ_HEAD(vscsi_xferq, vscsi_xfer);
96
97struct vscsi_softc {
98	device_t	dev;
99	struct cam_devq *devq;
100	struct cam_sim	*sim;
101	struct cam_path	*path;
102	struct mtx io_lock;
103
104	cell_t		unit;
105	int		bus_initialized;
106	int		bus_logged_in;
107	int		max_transactions;
108
109	int		irqid;
110	struct resource	*irq;
111	void		*irq_cookie;
112
113	bus_dma_tag_t	crq_tag;
114	struct vscsi_crq *crq_queue;
115	int		n_crqs, cur_crq;
116	bus_dmamap_t	crq_map;
117	bus_addr_t	crq_phys;
118
119	vmem_t		*srp_iu_arena;
120	void		*srp_iu_queue;
121	bus_addr_t	srp_iu_phys;
122
123	bus_dma_tag_t	data_tag;
124
125	struct vscsi_xfer loginxp;
126	struct vscsi_xfer *xfer;
127	struct vscsi_xferq active_xferq;
128	struct vscsi_xferq free_xferq;
129};
130
131struct srp_login {
132	uint8_t type;
133	uint8_t reserved[7];
134	uint64_t tag;
135	uint64_t max_cmd_length;
136	uint32_t reserved2;
137	uint16_t buffer_formats;
138	uint8_t flags;
139	uint8_t reserved3[5];
140	uint8_t initiator_port_id[16];
141	uint8_t target_port_id[16];
142} __packed;
143
144struct srp_login_rsp {
145	uint8_t type;
146	uint8_t reserved[3];
147	uint32_t request_limit_delta;
148	uint8_t tag;
149	uint32_t max_i_to_t_len;
150	uint32_t max_t_to_i_len;
151	uint16_t buffer_formats;
152	uint8_t flags;
153	/* Some reserved bits follow */
154} __packed;
155
156struct srp_cmd {
157	uint8_t type;
158	uint8_t flags1;
159	uint8_t reserved[3];
160	uint8_t formats;
161	uint8_t out_buffer_count;
162	uint8_t in_buffer_count;
163	uint64_t tag;
164	uint32_t reserved2;
165	uint64_t lun;
166	uint8_t reserved3[3];
167	uint8_t additional_cdb;
168	uint8_t cdb[16];
169	uint8_t data_payload[0];
170} __packed;
171
172struct srp_rsp {
173	uint8_t type;
174	uint8_t reserved[3];
175	uint32_t request_limit_delta;
176	uint64_t tag;
177	uint16_t reserved2;
178	uint8_t flags;
179	uint8_t status;
180	uint32_t data_out_resid;
181	uint32_t data_in_resid;
182	uint32_t sense_data_len;
183	uint32_t response_data_len;
184	uint8_t data_payload[0];
185} __packed;
186
187struct srp_tsk_mgmt {
188	uint8_t type;
189	uint8_t reserved[7];
190	uint64_t tag;
191	uint32_t reserved2;
192	uint64_t lun;
193	uint8_t reserved3[2];
194	uint8_t function;
195	uint8_t reserved4;
196	uint64_t manage_tag;
197	uint64_t reserved5;
198} __packed;
199
200/* Message code type */
201#define SRP_LOGIN_REQ	0x00
202#define SRP_TSK_MGMT	0x01
203#define SRP_CMD		0x02
204#define SRP_I_LOGOUT	0x03
205
206#define SRP_LOGIN_RSP	0xC0
207#define SRP_RSP		0xC1
208#define SRP_LOGIN_REJ	0xC2
209
210#define SRP_T_LOGOUT	0x80
211#define SRP_CRED_REQ	0x81
212#define SRP_AER_REQ	0x82
213
214#define SRP_CRED_RSP	0x41
215#define SRP_AER_RSP	0x41
216
217/* Flags for srp_rsp flags field */
218#define SRP_RSPVALID	0x01
219#define SRP_SNSVALID	0x02
220#define SRP_DOOVER	0x04
221#define SRP_DOUNDER	0x08
222#define SRP_DIOVER	0x10
223#define SRP_DIUNDER	0x20
224
225#define	MAD_SUCESS			0x00
226#define	MAD_NOT_SUPPORTED		0xf1
227#define	MAD_FAILED			0xf7
228
229#define	MAD_EMPTY_IU			0x01
230#define	MAD_ERROR_LOGGING_REQUEST	0x02
231#define	MAD_ADAPTER_INFO_REQUEST	0x03
232#define	MAD_CAPABILITIES_EXCHANGE	0x05
233#define	MAD_PHYS_ADAP_INFO_REQUEST	0x06
234#define	MAD_TAPE_PASSTHROUGH_REQUEST	0x07
235#define	MAD_ENABLE_FAST_FAIL		0x08
236
237static int	vscsi_probe(device_t);
238static int	vscsi_attach(device_t);
239static int	vscsi_detach(device_t);
240static void	vscsi_cam_action(struct cam_sim *, union ccb *);
241static void	vscsi_cam_poll(struct cam_sim *);
242static void	vscsi_intr(void *arg);
243static void	vscsi_check_response_queue(struct vscsi_softc *sc);
244static void	vscsi_setup_bus(struct vscsi_softc *sc);
245
246static void	vscsi_srp_login(struct vscsi_softc *sc);
247static void	vscsi_crq_load_cb(void *, bus_dma_segment_t *, int, int);
248static void	vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs,
249		    int nsegs, int err);
250static void	vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb);
251static void	vscsi_srp_response(struct vscsi_xfer *, struct vscsi_crq *);
252
253static devclass_t	vscsi_devclass;
254static device_method_t	vscsi_methods[] = {
255	DEVMETHOD(device_probe,		vscsi_probe),
256	DEVMETHOD(device_attach,	vscsi_attach),
257	DEVMETHOD(device_detach,	vscsi_detach),
258
259	DEVMETHOD_END
260};
261static driver_t vscsi_driver = {
262	"vscsi",
263	vscsi_methods,
264	sizeof(struct vscsi_softc)
265};
266DRIVER_MODULE(vscsi, vdevice, vscsi_driver, vscsi_devclass, 0, 0);
267MALLOC_DEFINE(M_VSCSI, "vscsi", "CAM device queue for VSCSI");
268
269static int
270vscsi_probe(device_t dev)
271{
272
273	if (!ofw_bus_is_compatible(dev, "IBM,v-scsi"))
274		return (ENXIO);
275
276	device_set_desc(dev, "POWER Hypervisor Virtual SCSI Bus");
277	return (0);
278}
279
280static int
281vscsi_attach(device_t dev)
282{
283	struct vscsi_softc *sc;
284	struct vscsi_xfer *xp;
285	int error, i;
286
287	sc = device_get_softc(dev);
288	if (sc == NULL)
289		return (EINVAL);
290
291	sc->dev = dev;
292	mtx_init(&sc->io_lock, "vscsi", NULL, MTX_DEF);
293
294	/* Get properties */
295	OF_getencprop(ofw_bus_get_node(dev), "reg", &sc->unit,
296	    sizeof(sc->unit));
297
298	/* Setup interrupt */
299	sc->irqid = 0;
300	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
301	    RF_ACTIVE);
302
303	if (!sc->irq) {
304		device_printf(dev, "Could not allocate IRQ\n");
305		mtx_destroy(&sc->io_lock);
306		return (ENXIO);
307	}
308
309	bus_setup_intr(dev, sc->irq, INTR_TYPE_CAM | INTR_MPSAFE |
310	    INTR_ENTROPY, NULL, vscsi_intr, sc, &sc->irq_cookie);
311
312	/* Data DMA */
313	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
314	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
315	    256, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &sc->io_lock,
316	    &sc->data_tag);
317
318	TAILQ_INIT(&sc->active_xferq);
319	TAILQ_INIT(&sc->free_xferq);
320
321	/* First XFER for login data */
322	sc->loginxp.sc = sc;
323	bus_dmamap_create(sc->data_tag, 0, &sc->loginxp.dmamap);
324	TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
325
326	/* CRQ area */
327	error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
328	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 8*PAGE_SIZE,
329	    1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->crq_tag);
330	error = bus_dmamem_alloc(sc->crq_tag, (void **)&sc->crq_queue,
331	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->crq_map);
332	sc->crq_phys = 0;
333	sc->n_crqs = 0;
334	error = bus_dmamap_load(sc->crq_tag, sc->crq_map, sc->crq_queue,
335	    8*PAGE_SIZE, vscsi_crq_load_cb, sc, 0);
336
337	mtx_lock(&sc->io_lock);
338	vscsi_setup_bus(sc);
339	sc->xfer = malloc(sizeof(sc->xfer[0])*sc->max_transactions, M_VSCSI,
340	    M_NOWAIT);
341	for (i = 0; i < sc->max_transactions; i++) {
342		xp = &sc->xfer[i];
343		xp->sc = sc;
344
345		error = bus_dmamap_create(sc->data_tag, 0, &xp->dmamap);
346		if (error) {
347			device_printf(dev, "Could not create DMA map (%d)\n",
348			    error);
349			break;
350		}
351
352		TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
353	}
354	mtx_unlock(&sc->io_lock);
355
356	/* Allocate CAM bits */
357	if ((sc->devq = cam_simq_alloc(sc->max_transactions)) == NULL)
358		return (ENOMEM);
359
360	sc->sim = cam_sim_alloc(vscsi_cam_action, vscsi_cam_poll, "vscsi", sc,
361				device_get_unit(dev), &sc->io_lock,
362				sc->max_transactions, sc->max_transactions,
363				sc->devq);
364	if (sc->sim == NULL) {
365		cam_simq_free(sc->devq);
366		sc->devq = NULL;
367		device_printf(dev, "CAM SIM attach failed\n");
368		return (EINVAL);
369	}
370
371	mtx_lock(&sc->io_lock);
372	if (xpt_bus_register(sc->sim, dev, 0) != 0) {
373		device_printf(dev, "XPT bus registration failed\n");
374		cam_sim_free(sc->sim, FALSE);
375		sc->sim = NULL;
376		cam_simq_free(sc->devq);
377		sc->devq = NULL;
378		mtx_unlock(&sc->io_lock);
379		return (EINVAL);
380	}
381	mtx_unlock(&sc->io_lock);
382
383	return (0);
384}
385
386static int
387vscsi_detach(device_t dev)
388{
389	struct vscsi_softc *sc;
390
391	sc = device_get_softc(dev);
392	if (sc == NULL)
393		return (EINVAL);
394
395	if (sc->sim != NULL) {
396		mtx_lock(&sc->io_lock);
397		xpt_bus_deregister(cam_sim_path(sc->sim));
398		cam_sim_free(sc->sim, FALSE);
399		sc->sim = NULL;
400		mtx_unlock(&sc->io_lock);
401	}
402
403	if (sc->devq != NULL) {
404		cam_simq_free(sc->devq);
405		sc->devq = NULL;
406	}
407
408	mtx_destroy(&sc->io_lock);
409
410	return (0);
411}
412
413static void
414vscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
415{
416	struct vscsi_softc *sc = cam_sim_softc(sim);
417
418	mtx_assert(&sc->io_lock, MA_OWNED);
419
420	switch (ccb->ccb_h.func_code) {
421	case XPT_PATH_INQ:
422	{
423		struct ccb_pathinq *cpi = &ccb->cpi;
424
425		cpi->version_num = 1;
426		cpi->hba_inquiry = PI_TAG_ABLE;
427		cpi->hba_misc = PIM_EXTLUNS;
428		cpi->target_sprt = 0;
429		cpi->hba_eng_cnt = 0;
430		cpi->max_target = 0;
431		cpi->max_lun = 0;
432		cpi->initiator_id = ~0;
433		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
434		strlcpy(cpi->hba_vid, "IBM", HBA_IDLEN);
435		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
436		cpi->unit_number = cam_sim_unit(sim);
437		cpi->bus_id = cam_sim_bus(sim);
438		cpi->base_transfer_speed = 150000;
439		cpi->transport = XPORT_SRP;
440		cpi->transport_version = 0;
441		cpi->protocol = PROTO_SCSI;
442		cpi->protocol_version = SCSI_REV_SPC4;
443		cpi->ccb_h.status = CAM_REQ_CMP;
444		break;
445	}
446	case XPT_RESET_BUS:
447		ccb->ccb_h.status = CAM_REQ_CMP;
448		break;
449	case XPT_RESET_DEV:
450		ccb->ccb_h.status = CAM_REQ_INPROG;
451		vscsi_task_management(sc, ccb);
452		return;
453	case XPT_GET_TRAN_SETTINGS:
454		ccb->cts.protocol = PROTO_SCSI;
455		ccb->cts.protocol_version = SCSI_REV_SPC4;
456		ccb->cts.transport = XPORT_SRP;
457		ccb->cts.transport_version = 0;
458		ccb->cts.proto_specific.valid = 0;
459		ccb->cts.xport_specific.valid = 0;
460		ccb->ccb_h.status = CAM_REQ_CMP;
461		break;
462	case XPT_SET_TRAN_SETTINGS:
463		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
464		break;
465	case XPT_SCSI_IO:
466	{
467		struct vscsi_xfer *xp;
468
469		ccb->ccb_h.status = CAM_REQ_INPROG;
470
471		xp = TAILQ_FIRST(&sc->free_xferq);
472		if (xp == NULL)
473			panic("SCSI queue flooded");
474		xp->ccb = ccb;
475		TAILQ_REMOVE(&sc->free_xferq, xp, queue);
476		TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
477		bus_dmamap_load_ccb(sc->data_tag, xp->dmamap,
478		    ccb, vscsi_scsi_command, xp, 0);
479
480		return;
481	}
482	default:
483		ccb->ccb_h.status = CAM_REQ_INVALID;
484		break;
485	}
486
487	xpt_done(ccb);
488	return;
489}
490
491static void
492vscsi_srp_login(struct vscsi_softc *sc)
493{
494	struct vscsi_xfer *xp;
495	struct srp_login *login;
496	struct vscsi_crq crq;
497	int err;
498
499	mtx_assert(&sc->io_lock, MA_OWNED);
500
501	xp = TAILQ_FIRST(&sc->free_xferq);
502	if (xp == NULL)
503		panic("SCSI queue flooded");
504	xp->ccb = NULL;
505	TAILQ_REMOVE(&sc->free_xferq, xp, queue);
506	TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
507
508	/* Set up command */
509	xp->srp_iu_size = 64;
510	crq.iu_length = htobe16(xp->srp_iu_size);
511	err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
512	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
513	if (err)
514		panic("Error during VMEM allocation (%d)", err);
515
516	login = (struct srp_login *)((uint8_t *)xp->sc->srp_iu_queue +
517	    (uintptr_t)xp->srp_iu_offset);
518	bzero(login, xp->srp_iu_size);
519	login->type = SRP_LOGIN_REQ;
520	login->tag = (uint64_t)(xp);
521	login->max_cmd_length = htobe64(256);
522	login->buffer_formats = htobe16(0x1 | 0x2); /* Direct and indirect */
523	login->flags = 0;
524
525	/* Create CRQ entry */
526	crq.valid = 0x80;
527	crq.format = 0x01;
528	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
529	bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
530
531	err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
532	    be64toh(((uint64_t *)(&crq))[0]),
533	    be64toh(((uint64_t *)(&crq))[1]));
534	if (err != 0)
535		panic("CRQ send failure (%d)", err);
536}
537
538static void
539vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb)
540{
541	struct srp_tsk_mgmt *cmd;
542	struct vscsi_xfer *xp;
543	struct vscsi_crq crq;
544	int err;
545
546	mtx_assert(&sc->io_lock, MA_OWNED);
547
548	xp = TAILQ_FIRST(&sc->free_xferq);
549	if (xp == NULL)
550		panic("SCSI queue flooded");
551	xp->ccb = ccb;
552	TAILQ_REMOVE(&sc->free_xferq, xp, queue);
553	TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
554
555	xp->srp_iu_size = sizeof(*cmd);
556	crq.iu_length = htobe16(xp->srp_iu_size);
557	err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
558	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
559	if (err)
560		panic("Error during VMEM allocation (%d)", err);
561
562	cmd = (struct srp_tsk_mgmt *)((uint8_t *)xp->sc->srp_iu_queue +
563	    (uintptr_t)xp->srp_iu_offset);
564	bzero(cmd, xp->srp_iu_size);
565	cmd->type = SRP_TSK_MGMT;
566	cmd->tag = (uint64_t)xp;
567	cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
568
569	switch (ccb->ccb_h.func_code) {
570	case XPT_RESET_DEV:
571		cmd->function = 0x08;
572		break;
573	default:
574		panic("Unimplemented code %d", ccb->ccb_h.func_code);
575		break;
576	}
577
578	bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
579
580	/* Create CRQ entry */
581	crq.valid = 0x80;
582	crq.format = 0x01;
583	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
584
585	err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
586	    be64toh(((uint64_t *)(&crq))[0]),
587	    be64toh(((uint64_t *)(&crq))[1]));
588	if (err != 0)
589		panic("CRQ send failure (%d)", err);
590}
591
592static void
593vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs, int nsegs, int err)
594{
595	struct vscsi_xfer *xp = xxp;
596	uint8_t *cdb;
597	union ccb *ccb = xp->ccb;
598	struct srp_cmd *cmd;
599	uint64_t chunk_addr;
600	uint32_t chunk_size;
601	int desc_start, i;
602	struct vscsi_crq crq;
603
604	KASSERT(err == 0, ("DMA error %d\n", err));
605
606	mtx_assert(&xp->sc->io_lock, MA_OWNED);
607
608	cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
609	    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
610
611	/* Command format from Table 20, page 37 of SRP spec */
612	xp->srp_iu_size = 48 + ((nsegs > 1) ? 20 : 16) +
613	    ((ccb->csio.cdb_len > 16) ? (ccb->csio.cdb_len - 16) : 0);
614	crq.iu_length = htobe16(xp->srp_iu_size);
615	if (nsegs > 1)
616		xp->srp_iu_size += nsegs*16;
617	xp->srp_iu_size = roundup(xp->srp_iu_size, 16);
618	err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
619	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
620	if (err)
621		panic("Error during VMEM allocation (%d)", err);
622
623	cmd = (struct srp_cmd *)((uint8_t *)xp->sc->srp_iu_queue +
624	    (uintptr_t)xp->srp_iu_offset);
625	bzero(cmd, xp->srp_iu_size);
626	cmd->type = SRP_CMD;
627	if (ccb->csio.cdb_len > 16)
628		cmd->additional_cdb = (ccb->csio.cdb_len - 16) << 2;
629	memcpy(cmd->cdb, cdb, ccb->csio.cdb_len);
630
631	cmd->tag = (uint64_t)(xp); /* Let the responder find this again */
632	cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
633
634	if (nsegs > 1) {
635		/* Use indirect descriptors */
636		switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
637		case CAM_DIR_OUT:
638			cmd->formats = (2 << 4);
639			break;
640		case CAM_DIR_IN:
641			cmd->formats = 2;
642			break;
643		default:
644			panic("Does not support bidirectional commands (%d)",
645			    ccb->ccb_h.flags & CAM_DIR_MASK);
646			break;
647		}
648
649		desc_start = ((ccb->csio.cdb_len > 16) ?
650		    ccb->csio.cdb_len - 16 : 0);
651		chunk_addr = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 20 +
652		    desc_start + sizeof(*cmd));
653		chunk_size = htobe32(16*nsegs);
654		memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
655		memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
656		chunk_size = 0;
657		for (i = 0; i < nsegs; i++)
658			chunk_size += segs[i].ds_len;
659		chunk_size = htobe32(chunk_size);
660		memcpy(&cmd->data_payload[desc_start+16], &chunk_size, 4);
661		desc_start += 20;
662		for (i = 0; i < nsegs; i++) {
663			chunk_addr = htobe64(segs[i].ds_addr);
664			chunk_size = htobe32(segs[i].ds_len);
665
666			memcpy(&cmd->data_payload[desc_start + 16*i],
667			    &chunk_addr, 8);
668			/* Set handle tag to 0 */
669			memcpy(&cmd->data_payload[desc_start + 16*i + 12],
670			    &chunk_size, 4);
671		}
672	} else if (nsegs == 1) {
673		switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
674		case CAM_DIR_OUT:
675			cmd->formats = (1 << 4);
676			break;
677		case CAM_DIR_IN:
678			cmd->formats = 1;
679			break;
680		default:
681			panic("Does not support bidirectional commands (%d)",
682			    ccb->ccb_h.flags & CAM_DIR_MASK);
683			break;
684		}
685
686		/*
687		 * Memory descriptor:
688		 * 8 byte address
689		 * 4 byte handle
690		 * 4 byte length
691		 */
692
693		chunk_addr = htobe64(segs[0].ds_addr);
694		chunk_size = htobe32(segs[0].ds_len);
695		desc_start = ((ccb->csio.cdb_len > 16) ?
696		    ccb->csio.cdb_len - 16 : 0);
697
698		memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
699		/* Set handle tag to 0 */
700		memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
701		KASSERT(xp->srp_iu_size >= 48 + ((ccb->csio.cdb_len > 16) ?
702		    ccb->csio.cdb_len : 16), ("SRP IU command length"));
703	} else {
704		cmd->formats = 0;
705	}
706	bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
707
708	/* Create CRQ entry */
709	crq.valid = 0x80;
710	crq.format = 0x01;
711	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
712
713	err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
714	    be64toh(((uint64_t *)(&crq))[0]),
715	    be64toh(((uint64_t *)(&crq))[1]));
716	if (err != 0)
717		panic("CRQ send failure (%d)", err);
718}
719
720static void
721vscsi_crq_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
722{
723	struct vscsi_softc *sc = xsc;
724
725	sc->crq_phys = segs[0].ds_addr;
726	sc->n_crqs = PAGE_SIZE/sizeof(struct vscsi_crq);
727
728	sc->srp_iu_queue = (uint8_t *)(sc->crq_queue);
729	sc->srp_iu_phys = segs[0].ds_addr;
730	sc->srp_iu_arena = vmem_create("VSCSI SRP IU", PAGE_SIZE,
731	    segs[0].ds_len - PAGE_SIZE, 16, 0, M_BESTFIT | M_NOWAIT);
732}
733
734static void
735vscsi_setup_bus(struct vscsi_softc *sc)
736{
737	struct vscsi_crq crq;
738	struct vscsi_xfer *xp;
739	int error;
740
741	struct {
742		uint32_t type;
743		uint16_t status;
744		uint16_t length;
745		uint64_t tag;
746		uint64_t buffer;
747		struct {
748			char srp_version[8];
749			char partition_name[96];
750			uint32_t partition_number;
751			uint32_t mad_version;
752			uint32_t os_type;
753			uint32_t port_max_txu[8];
754		} payload;
755	} mad_adapter_info;
756
757	bzero(&crq, sizeof(crq));
758
759	/* Init message */
760	crq.valid = 0xc0;
761	crq.format = 0x01;
762
763	do {
764		error = phyp_hcall(H_FREE_CRQ, sc->unit);
765	} while (error == H_BUSY);
766
767	/* See initialization sequence page 757 */
768	bzero(sc->crq_queue, sc->n_crqs*sizeof(sc->crq_queue[0]));
769	sc->cur_crq = 0;
770	sc->bus_initialized = 0;
771	sc->bus_logged_in = 0;
772	bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
773	error = phyp_hcall(H_REG_CRQ, sc->unit, sc->crq_phys,
774	    sc->n_crqs*sizeof(sc->crq_queue[0]));
775	KASSERT(error == 0, ("CRQ registration success"));
776
777	error = phyp_hcall(H_SEND_CRQ, sc->unit,
778	    be64toh(((uint64_t *)(&crq))[0]),
779	    be64toh(((uint64_t *)(&crq))[1]));
780	if (error != 0)
781		panic("CRQ setup failure (%d)", error);
782
783	while (sc->bus_initialized == 0)
784		vscsi_check_response_queue(sc);
785
786	/* Send MAD adapter info */
787	mad_adapter_info.type = htobe32(MAD_ADAPTER_INFO_REQUEST);
788	mad_adapter_info.status = 0;
789	mad_adapter_info.length = htobe16(sizeof(mad_adapter_info.payload));
790
791	strcpy(mad_adapter_info.payload.srp_version, "16.a");
792	strcpy(mad_adapter_info.payload.partition_name, "UNKNOWN");
793	mad_adapter_info.payload.partition_number = -1;
794	mad_adapter_info.payload.mad_version = htobe32(1);
795	mad_adapter_info.payload.os_type = htobe32(2); /* Claim we are Linux */
796	mad_adapter_info.payload.port_max_txu[0] = 0;
797	/* If this fails, we get the defaults above */
798	OF_getprop(OF_finddevice("/"), "ibm,partition-name",
799	    mad_adapter_info.payload.partition_name,
800	    sizeof(mad_adapter_info.payload.partition_name));
801	OF_getprop(OF_finddevice("/"), "ibm,partition-no",
802	    &mad_adapter_info.payload.partition_number,
803	    sizeof(mad_adapter_info.payload.partition_number));
804
805	xp = TAILQ_FIRST(&sc->free_xferq);
806	xp->ccb = NULL;
807	TAILQ_REMOVE(&sc->free_xferq, xp, queue);
808	TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
809	xp->srp_iu_size = sizeof(mad_adapter_info);
810	crq.iu_length = htobe16(xp->srp_iu_size);
811	vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
812	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
813	mad_adapter_info.buffer = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 24);
814	mad_adapter_info.tag = (uint64_t)xp;
815	memcpy((uint8_t *)xp->sc->srp_iu_queue + (uintptr_t)xp->srp_iu_offset,
816		&mad_adapter_info, sizeof(mad_adapter_info));
817	crq.valid = 0x80;
818	crq.format = 0x02;
819	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
820	bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
821	phyp_hcall(H_SEND_CRQ, xp->sc->unit,
822	    be64toh(((uint64_t *)(&crq))[0]),
823	    be64toh(((uint64_t *)(&crq))[1]));
824
825	while (TAILQ_EMPTY(&sc->free_xferq))
826		vscsi_check_response_queue(sc);
827
828	/* Send SRP login */
829	vscsi_srp_login(sc);
830	while (sc->bus_logged_in == 0)
831		vscsi_check_response_queue(sc);
832
833	error = phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
834}
835
836static void
837vscsi_intr(void *xsc)
838{
839	struct vscsi_softc *sc = xsc;
840
841	mtx_lock(&sc->io_lock);
842	vscsi_check_response_queue(sc);
843	mtx_unlock(&sc->io_lock);
844}
845
846static void
847vscsi_srp_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
848{
849	union ccb *ccb = xp->ccb;
850	struct vscsi_softc *sc = xp->sc;
851	struct srp_rsp *rsp;
852	uint32_t sense_len;
853
854	/* SRP response packet in original request */
855	rsp = (struct srp_rsp *)((uint8_t *)sc->srp_iu_queue +
856	    (uintptr_t)xp->srp_iu_offset);
857	ccb->csio.scsi_status = rsp->status;
858	if (ccb->csio.scsi_status == SCSI_STATUS_OK)
859		ccb->ccb_h.status = CAM_REQ_CMP;
860	else
861		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
862#ifdef NOTYET
863	/* Collect fast fail codes */
864	if (crq->status != 0)
865		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
866#endif
867
868	if (ccb->ccb_h.status != CAM_REQ_CMP) {
869		ccb->ccb_h.status |= CAM_DEV_QFRZN;
870		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
871	}
872
873	if (!(rsp->flags & SRP_RSPVALID))
874		rsp->response_data_len = 0;
875	if (!(rsp->flags & SRP_SNSVALID))
876		rsp->sense_data_len = 0;
877	if (!(rsp->flags & (SRP_DOOVER | SRP_DOUNDER)))
878		rsp->data_out_resid = 0;
879	if (!(rsp->flags & (SRP_DIOVER | SRP_DIUNDER)))
880		rsp->data_in_resid = 0;
881
882	if (rsp->flags & SRP_SNSVALID) {
883		bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data));
884		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
885		sense_len = min(be32toh(rsp->sense_data_len),
886		    ccb->csio.sense_len);
887		memcpy(&ccb->csio.sense_data,
888		    &rsp->data_payload[be32toh(rsp->response_data_len)],
889		    sense_len);
890		ccb->csio.sense_resid = ccb->csio.sense_len -
891		    be32toh(rsp->sense_data_len);
892	}
893
894	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
895	case CAM_DIR_OUT:
896		ccb->csio.resid = rsp->data_out_resid;
897		break;
898	case CAM_DIR_IN:
899		ccb->csio.resid = rsp->data_in_resid;
900		break;
901	}
902
903	bus_dmamap_sync(sc->data_tag, xp->dmamap, BUS_DMASYNC_POSTREAD);
904	bus_dmamap_unload(sc->data_tag, xp->dmamap);
905	xpt_done(ccb);
906	xp->ccb = NULL;
907}
908
909static void
910vscsi_login_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
911{
912	struct vscsi_softc *sc = xp->sc;
913	struct srp_login_rsp *rsp;
914
915	/* SRP response packet in original request */
916	rsp = (struct srp_login_rsp *)((uint8_t *)sc->srp_iu_queue +
917	    (uintptr_t)xp->srp_iu_offset);
918	KASSERT(be16toh(rsp->buffer_formats) & 0x3, ("Both direct and indirect "
919	    "buffers supported"));
920
921	sc->max_transactions = be32toh(rsp->request_limit_delta);
922	device_printf(sc->dev, "Queue depth %d commands\n",
923	    sc->max_transactions);
924	sc->bus_logged_in = 1;
925}
926
927static void
928vscsi_cam_poll(struct cam_sim *sim)
929{
930	struct vscsi_softc *sc = cam_sim_softc(sim);
931
932	vscsi_check_response_queue(sc);
933}
934
935static void
936vscsi_check_response_queue(struct vscsi_softc *sc)
937{
938	struct vscsi_crq *crq;
939	struct vscsi_xfer *xp;
940	int code;
941
942	mtx_assert(&sc->io_lock, MA_OWNED);
943
944	while (sc->crq_queue[sc->cur_crq].valid != 0) {
945		/* The hypercalls at both ends of this are not optimal */
946		phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
947		bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_POSTREAD);
948
949		crq = &sc->crq_queue[sc->cur_crq];
950
951		switch (crq->valid) {
952		case 0xc0:
953			if (crq->format == 0x02)
954				sc->bus_initialized = 1;
955			break;
956		case 0x80:
957			/* IU data is set to tag pointer (the XP) */
958			xp = (struct vscsi_xfer *)crq->iu_data;
959
960			switch (crq->format) {
961			case 0x01:
962				code = *((uint8_t *)sc->srp_iu_queue +
963	    			    (uintptr_t)xp->srp_iu_offset);
964				switch (code) {
965				case SRP_RSP:
966					vscsi_srp_response(xp, crq);
967					break;
968				case SRP_LOGIN_RSP:
969					vscsi_login_response(xp, crq);
970					break;
971				default:
972					device_printf(sc->dev, "Unknown SRP "
973					    "response code %d\n", code);
974					break;
975				}
976				break;
977			case 0x02:
978				/* Ignore management datagrams */
979				break;
980			default:
981				panic("Unknown CRQ format %d\n", crq->format);
982				break;
983			}
984			vmem_free(sc->srp_iu_arena, xp->srp_iu_offset,
985			    xp->srp_iu_size);
986			TAILQ_REMOVE(&sc->active_xferq, xp, queue);
987			TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
988			break;
989		default:
990			device_printf(sc->dev,
991			    "Unknown CRQ message type %d\n", crq->valid);
992			break;
993		}
994
995		crq->valid = 0;
996		sc->cur_crq = (sc->cur_crq + 1) % sc->n_crqs;
997
998		bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
999		phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
1000	}
1001}
1002