1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Finite state machine for vfio-ccw device handling
4 *
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
7 *
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 *            Cornelia Huck <cohuck@redhat.com>
10 */
11
12#include <linux/vfio.h>
13
14#include <asm/isc.h>
15
16#include "ioasm.h"
17#include "vfio_ccw_private.h"
18
19static int fsm_io_helper(struct vfio_ccw_private *private)
20{
21	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
22	union orb *orb;
23	int ccode;
24	__u8 lpm;
25	unsigned long flags;
26	int ret;
27
28	spin_lock_irqsave(&sch->lock, flags);
29
30	orb = cp_get_orb(&private->cp, sch);
31	if (!orb) {
32		ret = -EIO;
33		goto out;
34	}
35
36	VFIO_CCW_TRACE_EVENT(5, "stIO");
37	VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
38
39	/* Issue "Start Subchannel" */
40	ccode = ssch(sch->schid, orb);
41
42	VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
43
44	switch (ccode) {
45	case 0:
46		/*
47		 * Initialize device status information
48		 */
49		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
50		ret = 0;
51		private->state = VFIO_CCW_STATE_CP_PENDING;
52		break;
53	case 1:		/* Status pending */
54	case 2:		/* Busy */
55		ret = -EBUSY;
56		break;
57	case 3:		/* Device/path not operational */
58	{
59		lpm = orb->cmd.lpm;
60		if (lpm != 0)
61			sch->lpm &= ~lpm;
62		else
63			sch->lpm = 0;
64
65		if (cio_update_schib(sch))
66			ret = -ENODEV;
67		else
68			ret = sch->lpm ? -EACCES : -ENODEV;
69		break;
70	}
71	default:
72		ret = ccode;
73	}
74out:
75	spin_unlock_irqrestore(&sch->lock, flags);
76	return ret;
77}
78
79static int fsm_do_halt(struct vfio_ccw_private *private)
80{
81	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
82	unsigned long flags;
83	int ccode;
84	int ret;
85
86	spin_lock_irqsave(&sch->lock, flags);
87
88	VFIO_CCW_TRACE_EVENT(2, "haltIO");
89	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
90
91	/* Issue "Halt Subchannel" */
92	ccode = hsch(sch->schid);
93
94	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
95
96	switch (ccode) {
97	case 0:
98		/*
99		 * Initialize device status information
100		 */
101		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
102		ret = 0;
103		break;
104	case 1:		/* Status pending */
105	case 2:		/* Busy */
106		ret = -EBUSY;
107		break;
108	case 3:		/* Device not operational */
109		ret = -ENODEV;
110		break;
111	default:
112		ret = ccode;
113	}
114	spin_unlock_irqrestore(&sch->lock, flags);
115	return ret;
116}
117
118static int fsm_do_clear(struct vfio_ccw_private *private)
119{
120	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
121	unsigned long flags;
122	int ccode;
123	int ret;
124
125	spin_lock_irqsave(&sch->lock, flags);
126
127	VFIO_CCW_TRACE_EVENT(2, "clearIO");
128	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
129
130	/* Issue "Clear Subchannel" */
131	ccode = csch(sch->schid);
132
133	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
134
135	switch (ccode) {
136	case 0:
137		/*
138		 * Initialize device status information
139		 */
140		sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
141		/* TODO: check what else we might need to clear */
142		ret = 0;
143		break;
144	case 3:		/* Device not operational */
145		ret = -ENODEV;
146		break;
147	default:
148		ret = ccode;
149	}
150	spin_unlock_irqrestore(&sch->lock, flags);
151	return ret;
152}
153
154static void fsm_notoper(struct vfio_ccw_private *private,
155			enum vfio_ccw_event event)
156{
157	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
158
159	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
160			   sch->schid.cssid,
161			   sch->schid.ssid,
162			   sch->schid.sch_no,
163			   event,
164			   private->state);
165
166	/*
167	 * TODO:
168	 * Probably we should send the machine check to the guest.
169	 */
170	css_sched_sch_todo(sch, SCH_TODO_UNREG);
171	private->state = VFIO_CCW_STATE_NOT_OPER;
172
173	/* This is usually handled during CLOSE event */
174	cp_free(&private->cp);
175}
176
177/*
178 * No operation action.
179 */
180static void fsm_nop(struct vfio_ccw_private *private,
181		    enum vfio_ccw_event event)
182{
183}
184
185static void fsm_io_error(struct vfio_ccw_private *private,
186			 enum vfio_ccw_event event)
187{
188	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
189	private->io_region->ret_code = -EIO;
190}
191
192static void fsm_io_busy(struct vfio_ccw_private *private,
193			enum vfio_ccw_event event)
194{
195	private->io_region->ret_code = -EBUSY;
196}
197
198static void fsm_io_retry(struct vfio_ccw_private *private,
199			 enum vfio_ccw_event event)
200{
201	private->io_region->ret_code = -EAGAIN;
202}
203
204static void fsm_async_error(struct vfio_ccw_private *private,
205			    enum vfio_ccw_event event)
206{
207	struct ccw_cmd_region *cmd_region = private->cmd_region;
208
209	pr_err("vfio-ccw: FSM: %s request from state:%d\n",
210	       cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
211	       cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
212	       "<unknown>", private->state);
213	cmd_region->ret_code = -EIO;
214}
215
216static void fsm_async_retry(struct vfio_ccw_private *private,
217			    enum vfio_ccw_event event)
218{
219	private->cmd_region->ret_code = -EAGAIN;
220}
221
222static void fsm_disabled_irq(struct vfio_ccw_private *private,
223			     enum vfio_ccw_event event)
224{
225	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
226
227	/*
228	 * An interrupt in a disabled state means a previous disable was not
229	 * successful - should not happen, but we try to disable again.
230	 */
231	cio_disable_subchannel(sch);
232}
233inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
234{
235	struct subchannel *sch = to_subchannel(p->vdev.dev->parent);
236
237	return sch->schid;
238}
239
240/*
241 * Deal with the ccw command request from the userspace.
242 */
243static void fsm_io_request(struct vfio_ccw_private *private,
244			   enum vfio_ccw_event event)
245{
246	union orb *orb;
247	union scsw *scsw = &private->scsw;
248	struct ccw_io_region *io_region = private->io_region;
249	char *errstr = "request";
250	struct subchannel_id schid = get_schid(private);
251
252	private->state = VFIO_CCW_STATE_CP_PROCESSING;
253	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
254
255	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
256		orb = (union orb *)io_region->orb_area;
257
258		/* Don't try to build a cp if transport mode is specified. */
259		if (orb->tm.b) {
260			io_region->ret_code = -EOPNOTSUPP;
261			VFIO_CCW_MSG_EVENT(2,
262					   "sch %x.%x.%04x: transport mode\n",
263					   schid.cssid,
264					   schid.ssid, schid.sch_no);
265			errstr = "transport mode";
266			goto err_out;
267		}
268		io_region->ret_code = cp_init(&private->cp, orb);
269		if (io_region->ret_code) {
270			VFIO_CCW_MSG_EVENT(2,
271					   "sch %x.%x.%04x: cp_init=%d\n",
272					   schid.cssid,
273					   schid.ssid, schid.sch_no,
274					   io_region->ret_code);
275			errstr = "cp init";
276			goto err_out;
277		}
278
279		io_region->ret_code = cp_prefetch(&private->cp);
280		if (io_region->ret_code) {
281			VFIO_CCW_MSG_EVENT(2,
282					   "sch %x.%x.%04x: cp_prefetch=%d\n",
283					   schid.cssid,
284					   schid.ssid, schid.sch_no,
285					   io_region->ret_code);
286			errstr = "cp prefetch";
287			cp_free(&private->cp);
288			goto err_out;
289		}
290
291		/* Start channel program and wait for I/O interrupt. */
292		io_region->ret_code = fsm_io_helper(private);
293		if (io_region->ret_code) {
294			VFIO_CCW_MSG_EVENT(2,
295					   "sch %x.%x.%04x: fsm_io_helper=%d\n",
296					   schid.cssid,
297					   schid.ssid, schid.sch_no,
298					   io_region->ret_code);
299			errstr = "cp fsm_io_helper";
300			cp_free(&private->cp);
301			goto err_out;
302		}
303		return;
304	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
305		VFIO_CCW_MSG_EVENT(2,
306				   "sch %x.%x.%04x: halt on io_region\n",
307				   schid.cssid,
308				   schid.ssid, schid.sch_no);
309		/* halt is handled via the async cmd region */
310		io_region->ret_code = -EOPNOTSUPP;
311		goto err_out;
312	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
313		VFIO_CCW_MSG_EVENT(2,
314				   "sch %x.%x.%04x: clear on io_region\n",
315				   schid.cssid,
316				   schid.ssid, schid.sch_no);
317		/* clear is handled via the async cmd region */
318		io_region->ret_code = -EOPNOTSUPP;
319		goto err_out;
320	}
321
322err_out:
323	private->state = VFIO_CCW_STATE_IDLE;
324	trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
325				      io_region->ret_code, errstr);
326}
327
328/*
329 * Deal with an async request from userspace.
330 */
331static void fsm_async_request(struct vfio_ccw_private *private,
332			      enum vfio_ccw_event event)
333{
334	struct ccw_cmd_region *cmd_region = private->cmd_region;
335
336	switch (cmd_region->command) {
337	case VFIO_CCW_ASYNC_CMD_HSCH:
338		cmd_region->ret_code = fsm_do_halt(private);
339		break;
340	case VFIO_CCW_ASYNC_CMD_CSCH:
341		cmd_region->ret_code = fsm_do_clear(private);
342		break;
343	default:
344		/* should not happen? */
345		cmd_region->ret_code = -EINVAL;
346	}
347
348	trace_vfio_ccw_fsm_async_request(get_schid(private),
349					 cmd_region->command,
350					 cmd_region->ret_code);
351}
352
353/*
354 * Got an interrupt for a normal io (state busy).
355 */
356static void fsm_irq(struct vfio_ccw_private *private,
357		    enum vfio_ccw_event event)
358{
359	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
360	struct irb *irb = this_cpu_ptr(&cio_irb);
361
362	VFIO_CCW_TRACE_EVENT(6, "IRQ");
363	VFIO_CCW_TRACE_EVENT(6, dev_name(&sch->dev));
364
365	memcpy(&private->irb, irb, sizeof(*irb));
366
367	queue_work(vfio_ccw_work_q, &private->io_work);
368
369	if (private->completion)
370		complete(private->completion);
371}
372
373static void fsm_open(struct vfio_ccw_private *private,
374		     enum vfio_ccw_event event)
375{
376	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
377	int ret;
378
379	spin_lock_irq(&sch->lock);
380	sch->isc = VFIO_CCW_ISC;
381	ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
382	if (ret)
383		goto err_unlock;
384
385	private->state = VFIO_CCW_STATE_IDLE;
386	spin_unlock_irq(&sch->lock);
387	return;
388
389err_unlock:
390	spin_unlock_irq(&sch->lock);
391	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
392}
393
394static void fsm_close(struct vfio_ccw_private *private,
395		      enum vfio_ccw_event event)
396{
397	struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
398	int ret;
399
400	spin_lock_irq(&sch->lock);
401
402	if (!sch->schib.pmcw.ena)
403		goto err_unlock;
404
405	ret = cio_disable_subchannel(sch);
406	if (ret == -EBUSY)
407		ret = vfio_ccw_sch_quiesce(sch);
408	if (ret)
409		goto err_unlock;
410
411	private->state = VFIO_CCW_STATE_STANDBY;
412	spin_unlock_irq(&sch->lock);
413	cp_free(&private->cp);
414	return;
415
416err_unlock:
417	spin_unlock_irq(&sch->lock);
418	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
419}
420
421/*
422 * Device statemachine
423 */
424fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
425	[VFIO_CCW_STATE_NOT_OPER] = {
426		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
427		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
428		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
429		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
430		[VFIO_CCW_EVENT_OPEN]		= fsm_nop,
431		[VFIO_CCW_EVENT_CLOSE]		= fsm_nop,
432	},
433	[VFIO_CCW_STATE_STANDBY] = {
434		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
435		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
436		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
437		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
438		[VFIO_CCW_EVENT_OPEN]		= fsm_open,
439		[VFIO_CCW_EVENT_CLOSE]		= fsm_notoper,
440	},
441	[VFIO_CCW_STATE_IDLE] = {
442		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
443		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
444		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
445		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
446		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
447		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
448	},
449	[VFIO_CCW_STATE_CP_PROCESSING] = {
450		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
451		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_retry,
452		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_retry,
453		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
454		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
455		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
456	},
457	[VFIO_CCW_STATE_CP_PENDING] = {
458		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
459		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
460		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
461		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
462		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
463		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
464	},
465};
466