1/*
2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling
4 *
5 *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 *			 IBM Corporation
7 *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/jiffies.h>
14#include <linux/string.h>
15
16#include <asm/ccwdev.h>
17#include <asm/cio.h>
18#include <asm/chpid.h>
19
20#include "cio.h"
21#include "cio_debug.h"
22#include "css.h"
23#include "device.h"
24#include "chsc.h"
25#include "ioasm.h"
26#include "chp.h"
27
28int
29device_is_online(struct subchannel *sch)
30{
31	struct ccw_device *cdev;
32
33	if (!sch->dev.driver_data)
34		return 0;
35	cdev = sch->dev.driver_data;
36	return (cdev->private->state == DEV_STATE_ONLINE);
37}
38
39int
40device_is_disconnected(struct subchannel *sch)
41{
42	struct ccw_device *cdev;
43
44	if (!sch->dev.driver_data)
45		return 0;
46	cdev = sch->dev.driver_data;
47	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
48		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
49}
50
51void
52device_set_disconnected(struct subchannel *sch)
53{
54	struct ccw_device *cdev;
55
56	if (!sch->dev.driver_data)
57		return;
58	cdev = sch->dev.driver_data;
59	ccw_device_set_timeout(cdev, 0);
60	cdev->private->flags.fake_irb = 0;
61	cdev->private->state = DEV_STATE_DISCONNECTED;
62}
63
64void device_set_intretry(struct subchannel *sch)
65{
66	struct ccw_device *cdev;
67
68	cdev = sch->dev.driver_data;
69	if (!cdev)
70		return;
71	cdev->private->flags.intretry = 1;
72}
73
74int device_trigger_verify(struct subchannel *sch)
75{
76	struct ccw_device *cdev;
77
78	cdev = sch->dev.driver_data;
79	if (!cdev || !cdev->online)
80		return -EINVAL;
81	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
82	return 0;
83}
84
85/*
86 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
87 */
88static void
89ccw_device_timeout(unsigned long data)
90{
91	struct ccw_device *cdev;
92
93	cdev = (struct ccw_device *) data;
94	spin_lock_irq(cdev->ccwlock);
95	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
96	spin_unlock_irq(cdev->ccwlock);
97}
98
99/*
100 * Set timeout
101 */
102void
103ccw_device_set_timeout(struct ccw_device *cdev, int expires)
104{
105	if (expires == 0) {
106		del_timer(&cdev->private->timer);
107		return;
108	}
109	if (timer_pending(&cdev->private->timer)) {
110		if (mod_timer(&cdev->private->timer, jiffies + expires))
111			return;
112	}
113	cdev->private->timer.function = ccw_device_timeout;
114	cdev->private->timer.data = (unsigned long) cdev;
115	cdev->private->timer.expires = jiffies + expires;
116	add_timer(&cdev->private->timer);
117}
118
119/* Kill any pending timers after machine check. */
120void
121device_kill_pending_timer(struct subchannel *sch)
122{
123	struct ccw_device *cdev;
124
125	if (!sch->dev.driver_data)
126		return;
127	cdev = sch->dev.driver_data;
128	ccw_device_set_timeout(cdev, 0);
129}
130
131/*
132 * Cancel running i/o. This is called repeatedly since halt/clear are
133 * asynchronous operations. We do one try with cio_cancel, two tries
134 * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
135 * Returns 0 if device now idle, -ENODEV for device not operational and
136 * -EBUSY if an interrupt is expected (either from halt/clear or from a
137 * status pending).
138 */
139int
140ccw_device_cancel_halt_clear(struct ccw_device *cdev)
141{
142	struct subchannel *sch;
143	int ret;
144
145	sch = to_subchannel(cdev->dev.parent);
146	ret = stsch(sch->schid, &sch->schib);
147	if (ret || !sch->schib.pmcw.dnv)
148		return -ENODEV;
149	if (!sch->schib.pmcw.ena)
150		/* Not operational -> done. */
151		return 0;
152	/* Stage 1: cancel io. */
153	if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
154	    !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
155		ret = cio_cancel(sch);
156		if (ret != -EINVAL)
157			return ret;
158		/* cancel io unsuccessful. From now on it is asynchronous. */
159		cdev->private->iretry = 3;	/* 3 halt retries. */
160	}
161	if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
162		/* Stage 2: halt io. */
163		if (cdev->private->iretry) {
164			cdev->private->iretry--;
165			ret = cio_halt(sch);
166			if (ret != -EBUSY)
167				return (ret == 0) ? -EBUSY : ret;
168		}
169		/* halt io unsuccessful. */
170		cdev->private->iretry = 255;	/* 255 clear retries. */
171	}
172	/* Stage 3: clear io. */
173	if (cdev->private->iretry) {
174		cdev->private->iretry--;
175		ret = cio_clear (sch);
176		return (ret == 0) ? -EBUSY : ret;
177	}
178	panic("Can't stop i/o on subchannel.\n");
179}
180
181static int
182ccw_device_handle_oper(struct ccw_device *cdev)
183{
184	struct subchannel *sch;
185
186	sch = to_subchannel(cdev->dev.parent);
187	cdev->private->flags.recog_done = 1;
188	/*
189	 * Check if cu type and device type still match. If
190	 * not, it is certainly another device and we have to
191	 * de- and re-register.
192	 */
193	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
194	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
195	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
196	    cdev->id.dev_model != cdev->private->senseid.dev_model) {
197		PREPARE_WORK(&cdev->private->kick_work,
198			     ccw_device_do_unreg_rereg);
199		queue_work(ccw_device_work, &cdev->private->kick_work);
200		return 0;
201	}
202	cdev->private->flags.donotify = 1;
203	return 1;
204}
205
206/*
207 * The machine won't give us any notification by machine check if a chpid has
208 * been varied online on the SE so we have to find out by magic (i. e. driving
209 * the channel subsystem to device selection and updating our path masks).
210 */
211static void
212__recover_lost_chpids(struct subchannel *sch, int old_lpm)
213{
214	int mask, i;
215	struct chp_id chpid;
216
217	chp_id_init(&chpid);
218	for (i = 0; i<8; i++) {
219		mask = 0x80 >> i;
220		if (!(sch->lpm & mask))
221			continue;
222		if (old_lpm & mask)
223			continue;
224		chpid.id = sch->schib.pmcw.chpid[i];
225		if (!chp_is_registered(chpid))
226			css_schedule_eval_all();
227	}
228}
229
230/*
231 * Stop device recognition.
232 */
233static void
234ccw_device_recog_done(struct ccw_device *cdev, int state)
235{
236	struct subchannel *sch;
237	int notify, old_lpm, same_dev;
238
239	sch = to_subchannel(cdev->dev.parent);
240
241	ccw_device_set_timeout(cdev, 0);
242	cio_disable_subchannel(sch);
243	/*
244	 * Now that we tried recognition, we have performed device selection
245	 * through ssch() and the path information is up to date.
246	 */
247	old_lpm = sch->lpm;
248	stsch(sch->schid, &sch->schib);
249	sch->lpm = sch->schib.pmcw.pam & sch->opm;
250	/* Check since device may again have become not operational. */
251	if (!sch->schib.pmcw.dnv)
252		state = DEV_STATE_NOT_OPER;
253	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
254		/* Force reprobe on all chpids. */
255		old_lpm = 0;
256	if (sch->lpm != old_lpm)
257		__recover_lost_chpids(sch, old_lpm);
258	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
259		if (state == DEV_STATE_NOT_OPER) {
260			cdev->private->flags.recog_done = 1;
261			cdev->private->state = DEV_STATE_DISCONNECTED;
262			return;
263		}
264		/* Boxed devices don't need extra treatment. */
265	}
266	notify = 0;
267	same_dev = 0; /* Keep the compiler quiet... */
268	switch (state) {
269	case DEV_STATE_NOT_OPER:
270		CIO_DEBUG(KERN_WARNING, 2,
271			  "SenseID : unknown device %04x on subchannel "
272			  "0.%x.%04x\n", cdev->private->dev_id.devno,
273			  sch->schid.ssid, sch->schid.sch_no);
274		break;
275	case DEV_STATE_OFFLINE:
276		if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
277			same_dev = ccw_device_handle_oper(cdev);
278			notify = 1;
279		}
280		/* fill out sense information */
281		memset(&cdev->id, 0, sizeof(cdev->id));
282		cdev->id.cu_type   = cdev->private->senseid.cu_type;
283		cdev->id.cu_model  = cdev->private->senseid.cu_model;
284		cdev->id.dev_type  = cdev->private->senseid.dev_type;
285		cdev->id.dev_model = cdev->private->senseid.dev_model;
286		if (notify) {
287			cdev->private->state = DEV_STATE_OFFLINE;
288			if (same_dev) {
289				/* Get device online again. */
290				ccw_device_online(cdev);
291				wake_up(&cdev->private->wait_q);
292			}
293			return;
294		}
295		/* Issue device info message. */
296		CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
297			  "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
298			  "%04X/%02X\n",
299			  cdev->private->dev_id.ssid,
300			  cdev->private->dev_id.devno,
301			  cdev->id.cu_type, cdev->id.cu_model,
302			  cdev->id.dev_type, cdev->id.dev_model);
303		break;
304	case DEV_STATE_BOXED:
305		CIO_DEBUG(KERN_WARNING, 2,
306			  "SenseID : boxed device %04x on subchannel "
307			  "0.%x.%04x\n", cdev->private->dev_id.devno,
308			  sch->schid.ssid, sch->schid.sch_no);
309		break;
310	}
311	cdev->private->state = state;
312	io_subchannel_recog_done(cdev);
313	if (state != DEV_STATE_NOT_OPER)
314		wake_up(&cdev->private->wait_q);
315}
316
317/*
318 * Function called from device_id.c after sense id has completed.
319 */
320void
321ccw_device_sense_id_done(struct ccw_device *cdev, int err)
322{
323	switch (err) {
324	case 0:
325		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
326		break;
327	case -ETIME:		/* Sense id stopped by timeout. */
328		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
329		break;
330	default:
331		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
332		break;
333	}
334}
335
336static void
337ccw_device_oper_notify(struct work_struct *work)
338{
339	struct ccw_device_private *priv;
340	struct ccw_device *cdev;
341	struct subchannel *sch;
342	int ret;
343	unsigned long flags;
344
345	priv = container_of(work, struct ccw_device_private, kick_work);
346	cdev = priv->cdev;
347	spin_lock_irqsave(cdev->ccwlock, flags);
348	sch = to_subchannel(cdev->dev.parent);
349	if (sch->driver && sch->driver->notify) {
350		spin_unlock_irqrestore(cdev->ccwlock, flags);
351		ret = sch->driver->notify(&sch->dev, CIO_OPER);
352		spin_lock_irqsave(cdev->ccwlock, flags);
353	} else
354		ret = 0;
355	if (ret) {
356		/* Reenable channel measurements, if needed. */
357		spin_unlock_irqrestore(cdev->ccwlock, flags);
358		cmf_reenable(cdev);
359		spin_lock_irqsave(cdev->ccwlock, flags);
360		wake_up(&cdev->private->wait_q);
361	}
362	spin_unlock_irqrestore(cdev->ccwlock, flags);
363	if (!ret)
364		/* Driver doesn't want device back. */
365		ccw_device_do_unreg_rereg(work);
366}
367
368/*
369 * Finished with online/offline processing.
370 */
371static void
372ccw_device_done(struct ccw_device *cdev, int state)
373{
374	struct subchannel *sch;
375
376	sch = to_subchannel(cdev->dev.parent);
377
378	ccw_device_set_timeout(cdev, 0);
379
380	if (state != DEV_STATE_ONLINE)
381		cio_disable_subchannel(sch);
382
383	/* Reset device status. */
384	memset(&cdev->private->irb, 0, sizeof(struct irb));
385
386	cdev->private->state = state;
387
388
389	if (state == DEV_STATE_BOXED)
390		CIO_DEBUG(KERN_WARNING, 2,
391			  "Boxed device %04x on subchannel %04x\n",
392			  cdev->private->dev_id.devno, sch->schid.sch_no);
393
394	if (cdev->private->flags.donotify) {
395		cdev->private->flags.donotify = 0;
396		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
397		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
398	}
399	wake_up(&cdev->private->wait_q);
400
401	if (css_init_done && state != DEV_STATE_ONLINE)
402		put_device (&cdev->dev);
403}
404
405static int cmp_pgid(struct pgid *p1, struct pgid *p2)
406{
407	char *c1;
408	char *c2;
409
410	c1 = (char *)p1;
411	c2 = (char *)p2;
412
413	return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
414}
415
416static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
417{
418	int i;
419	int last;
420
421	last = 0;
422	for (i = 0; i < 8; i++) {
423		if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
424			/* No PGID yet */
425			continue;
426		if (cdev->private->pgid[last].inf.ps.state1 ==
427		    SNID_STATE1_RESET) {
428			/* First non-zero PGID */
429			last = i;
430			continue;
431		}
432		if (cmp_pgid(&cdev->private->pgid[i],
433			     &cdev->private->pgid[last]) == 0)
434			/* Non-conflicting PGIDs */
435			continue;
436
437		/* PGID mismatch, can't pathgroup. */
438		CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
439			      "0.%x.%04x, can't pathgroup\n",
440			      cdev->private->dev_id.ssid,
441			      cdev->private->dev_id.devno);
442		cdev->private->options.pgroup = 0;
443		return;
444	}
445	if (cdev->private->pgid[last].inf.ps.state1 ==
446	    SNID_STATE1_RESET)
447		/* No previous pgid found */
448		memcpy(&cdev->private->pgid[0], &css[0]->global_pgid,
449		       sizeof(struct pgid));
450	else
451		/* Use existing pgid */
452		memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
453		       sizeof(struct pgid));
454}
455
456/*
457 * Function called from device_pgid.c after sense path ground has completed.
458 */
459void
460ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
461{
462	struct subchannel *sch;
463
464	sch = to_subchannel(cdev->dev.parent);
465	switch (err) {
466	case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
467		cdev->private->options.pgroup = 0;
468		break;
469	case 0: /* success */
470	case -EACCES: /* partial success, some paths not operational */
471		/* Check if all pgids are equal or 0. */
472		__ccw_device_get_common_pgid(cdev);
473		break;
474	case -ETIME:		/* Sense path group id stopped by timeout. */
475	case -EUSERS:		/* device is reserved for someone else. */
476		ccw_device_done(cdev, DEV_STATE_BOXED);
477		return;
478	default:
479		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
480		return;
481	}
482	/* Start Path Group verification. */
483	cdev->private->state = DEV_STATE_VERIFY;
484	cdev->private->flags.doverify = 0;
485	ccw_device_verify_start(cdev);
486}
487
488/*
489 * Start device recognition.
490 */
491int
492ccw_device_recognition(struct ccw_device *cdev)
493{
494	struct subchannel *sch;
495	int ret;
496
497	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
498	    (cdev->private->state != DEV_STATE_BOXED))
499		return -EINVAL;
500	sch = to_subchannel(cdev->dev.parent);
501	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
502	if (ret != 0)
503		/* Couldn't enable the subchannel for i/o. Sick device. */
504		return ret;
505
506	/* After 60s the device recognition is considered to have failed. */
507	ccw_device_set_timeout(cdev, 60*HZ);
508
509	/*
510	 * We used to start here with a sense pgid to find out whether a device
511	 * is locked by someone else. Unfortunately, the sense pgid command
512	 * code has other meanings on devices predating the path grouping
513	 * algorithm, so we start with sense id and box the device after an
514	 * timeout (or if sense pgid during path verification detects the device
515	 * is locked, as may happen on newer devices).
516	 */
517	cdev->private->flags.recog_done = 0;
518	cdev->private->state = DEV_STATE_SENSE_ID;
519	ccw_device_sense_id_start(cdev);
520	return 0;
521}
522
523/*
524 * Handle timeout in device recognition.
525 */
526static void
527ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
528{
529	int ret;
530
531	ret = ccw_device_cancel_halt_clear(cdev);
532	switch (ret) {
533	case 0:
534		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
535		break;
536	case -ENODEV:
537		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
538		break;
539	default:
540		ccw_device_set_timeout(cdev, 3*HZ);
541	}
542}
543
544
545static void
546ccw_device_nopath_notify(struct work_struct *work)
547{
548	struct ccw_device_private *priv;
549	struct ccw_device *cdev;
550	struct subchannel *sch;
551	int ret;
552	unsigned long flags;
553
554	priv = container_of(work, struct ccw_device_private, kick_work);
555	cdev = priv->cdev;
556	spin_lock_irqsave(cdev->ccwlock, flags);
557	sch = to_subchannel(cdev->dev.parent);
558	/* Extra sanity. */
559	if (sch->lpm)
560		goto out_unlock;
561	if (sch->driver && sch->driver->notify) {
562		spin_unlock_irqrestore(cdev->ccwlock, flags);
563		ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
564		spin_lock_irqsave(cdev->ccwlock, flags);
565	} else
566		ret = 0;
567	if (!ret) {
568		if (get_device(&sch->dev)) {
569			/* Driver doesn't want to keep device. */
570			cio_disable_subchannel(sch);
571			if (get_device(&cdev->dev)) {
572				PREPARE_WORK(&cdev->private->kick_work,
573					     ccw_device_call_sch_unregister);
574				queue_work(ccw_device_work,
575					   &cdev->private->kick_work);
576			} else
577				put_device(&sch->dev);
578		}
579	} else {
580		cio_disable_subchannel(sch);
581		ccw_device_set_timeout(cdev, 0);
582		cdev->private->flags.fake_irb = 0;
583		cdev->private->state = DEV_STATE_DISCONNECTED;
584		wake_up(&cdev->private->wait_q);
585	}
586out_unlock:
587	spin_unlock_irqrestore(cdev->ccwlock, flags);
588}
589
590void
591ccw_device_verify_done(struct ccw_device *cdev, int err)
592{
593	struct subchannel *sch;
594
595	sch = to_subchannel(cdev->dev.parent);
596	/* Update schib - pom may have changed. */
597	stsch(sch->schid, &sch->schib);
598	/* Update lpm with verified path mask. */
599	sch->lpm = sch->vpm;
600	/* Repeat path verification? */
601	if (cdev->private->flags.doverify) {
602		cdev->private->flags.doverify = 0;
603		ccw_device_verify_start(cdev);
604		return;
605	}
606	switch (err) {
607	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
608		cdev->private->options.pgroup = 0;
609	case 0:
610		ccw_device_done(cdev, DEV_STATE_ONLINE);
611		/* Deliver fake irb to device driver, if needed. */
612		if (cdev->private->flags.fake_irb) {
613			memset(&cdev->private->irb, 0, sizeof(struct irb));
614			cdev->private->irb.scsw.cc = 1;
615			cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
616			cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
617			cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
618			cdev->private->flags.fake_irb = 0;
619			if (cdev->handler)
620				cdev->handler(cdev, cdev->private->intparm,
621					      &cdev->private->irb);
622			memset(&cdev->private->irb, 0, sizeof(struct irb));
623		}
624		break;
625	case -ETIME:
626		/* Reset oper notify indication after verify error. */
627		cdev->private->flags.donotify = 0;
628		ccw_device_done(cdev, DEV_STATE_BOXED);
629		break;
630	default:
631		/* Reset oper notify indication after verify error. */
632		cdev->private->flags.donotify = 0;
633		if (cdev->online) {
634			PREPARE_WORK(&cdev->private->kick_work,
635				     ccw_device_nopath_notify);
636			queue_work(ccw_device_notify_work,
637				   &cdev->private->kick_work);
638		} else
639			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
640		break;
641	}
642}
643
644/*
645 * Get device online.
646 */
647int
648ccw_device_online(struct ccw_device *cdev)
649{
650	struct subchannel *sch;
651	int ret;
652
653	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
654	    (cdev->private->state != DEV_STATE_BOXED))
655		return -EINVAL;
656	sch = to_subchannel(cdev->dev.parent);
657	if (css_init_done && !get_device(&cdev->dev))
658		return -ENODEV;
659	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
660	if (ret != 0) {
661		/* Couldn't enable the subchannel for i/o. Sick device. */
662		if (ret == -ENODEV)
663			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
664		return ret;
665	}
666	/* Do we want to do path grouping? */
667	if (!cdev->private->options.pgroup) {
668		/* Start initial path verification. */
669		cdev->private->state = DEV_STATE_VERIFY;
670		cdev->private->flags.doverify = 0;
671		ccw_device_verify_start(cdev);
672		return 0;
673	}
674	/* Do a SensePGID first. */
675	cdev->private->state = DEV_STATE_SENSE_PGID;
676	ccw_device_sense_pgid_start(cdev);
677	return 0;
678}
679
680void
681ccw_device_disband_done(struct ccw_device *cdev, int err)
682{
683	switch (err) {
684	case 0:
685		ccw_device_done(cdev, DEV_STATE_OFFLINE);
686		break;
687	case -ETIME:
688		ccw_device_done(cdev, DEV_STATE_BOXED);
689		break;
690	default:
691		cdev->private->flags.donotify = 0;
692		if (get_device(&cdev->dev)) {
693			PREPARE_WORK(&cdev->private->kick_work,
694				     ccw_device_call_sch_unregister);
695			queue_work(ccw_device_work, &cdev->private->kick_work);
696		}
697		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
698		break;
699	}
700}
701
702/*
703 * Shutdown device.
704 */
705int
706ccw_device_offline(struct ccw_device *cdev)
707{
708	struct subchannel *sch;
709
710	if (ccw_device_is_orphan(cdev)) {
711		ccw_device_done(cdev, DEV_STATE_OFFLINE);
712		return 0;
713	}
714	sch = to_subchannel(cdev->dev.parent);
715	if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
716		return -ENODEV;
717	if (cdev->private->state != DEV_STATE_ONLINE) {
718		if (sch->schib.scsw.actl != 0)
719			return -EBUSY;
720		return -EINVAL;
721	}
722	if (sch->schib.scsw.actl != 0)
723		return -EBUSY;
724	/* Are we doing path grouping? */
725	if (!cdev->private->options.pgroup) {
726		/* No, set state offline immediately. */
727		ccw_device_done(cdev, DEV_STATE_OFFLINE);
728		return 0;
729	}
730	/* Start Set Path Group commands. */
731	cdev->private->state = DEV_STATE_DISBAND_PGID;
732	ccw_device_disband_start(cdev);
733	return 0;
734}
735
736/*
737 * Handle timeout in device online/offline process.
738 */
739static void
740ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
741{
742	int ret;
743
744	ret = ccw_device_cancel_halt_clear(cdev);
745	switch (ret) {
746	case 0:
747		ccw_device_done(cdev, DEV_STATE_BOXED);
748		break;
749	case -ENODEV:
750		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
751		break;
752	default:
753		ccw_device_set_timeout(cdev, 3*HZ);
754	}
755}
756
757/*
758 * Handle not oper event in device recognition.
759 */
760static void
761ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
762{
763	ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
764}
765
766/*
767 * Handle not operational event while offline.
768 */
769static void
770ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
771{
772	struct subchannel *sch;
773
774	cdev->private->state = DEV_STATE_NOT_OPER;
775	sch = to_subchannel(cdev->dev.parent);
776	if (get_device(&cdev->dev)) {
777		PREPARE_WORK(&cdev->private->kick_work,
778			     ccw_device_call_sch_unregister);
779		queue_work(ccw_device_work, &cdev->private->kick_work);
780	}
781	wake_up(&cdev->private->wait_q);
782}
783
784/*
785 * Handle not operational event while online.
786 */
787static void
788ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
789{
790	struct subchannel *sch;
791	int ret;
792
793	sch = to_subchannel(cdev->dev.parent);
794	if (sch->driver->notify) {
795		spin_unlock_irq(cdev->ccwlock);
796		ret = sch->driver->notify(&sch->dev,
797					  sch->lpm ? CIO_GONE : CIO_NO_PATH);
798		spin_lock_irq(cdev->ccwlock);
799	} else
800		ret = 0;
801	if (ret) {
802		ccw_device_set_timeout(cdev, 0);
803		cdev->private->flags.fake_irb = 0;
804		cdev->private->state = DEV_STATE_DISCONNECTED;
805		wake_up(&cdev->private->wait_q);
806		return;
807	}
808	cdev->private->state = DEV_STATE_NOT_OPER;
809	cio_disable_subchannel(sch);
810	if (sch->schib.scsw.actl != 0) {
811		ccw_device_call_handler(cdev);
812	}
813	if (get_device(&cdev->dev)) {
814		PREPARE_WORK(&cdev->private->kick_work,
815			     ccw_device_call_sch_unregister);
816		queue_work(ccw_device_work, &cdev->private->kick_work);
817	}
818	wake_up(&cdev->private->wait_q);
819}
820
821/*
822 * Handle path verification event.
823 */
824static void
825ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
826{
827	struct subchannel *sch;
828
829	if (cdev->private->state == DEV_STATE_W4SENSE) {
830		cdev->private->flags.doverify = 1;
831		return;
832	}
833	sch = to_subchannel(cdev->dev.parent);
834	/*
835	 * Since we might not just be coming from an interrupt from the
836	 * subchannel we have to update the schib.
837	 */
838	stsch(sch->schid, &sch->schib);
839
840	if (sch->schib.scsw.actl != 0 ||
841	    (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
842	    (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
843		/*
844		 * No final status yet or final status not yet delivered
845		 * to the device driver. Can't do path verfication now,
846		 * delay until final status was delivered.
847		 */
848		cdev->private->flags.doverify = 1;
849		return;
850	}
851	/* Device is idle, we can do the path verification. */
852	cdev->private->state = DEV_STATE_VERIFY;
853	cdev->private->flags.doverify = 0;
854	ccw_device_verify_start(cdev);
855}
856
857/*
858 * Got an interrupt for a normal io (state online).
859 */
860static void
861ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
862{
863	struct irb *irb;
864
865	irb = (struct irb *) __LC_IRB;
866	/* Check for unsolicited interrupt. */
867	if ((irb->scsw.stctl ==
868	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
869	    && (!irb->scsw.cc)) {
870		if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
871		    !irb->esw.esw0.erw.cons) {
872			/* Unit check but no sense data. Need basic sense. */
873			if (ccw_device_do_sense(cdev, irb) != 0)
874				goto call_handler_unsol;
875			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
876			cdev->private->state = DEV_STATE_W4SENSE;
877			cdev->private->intparm = 0;
878			return;
879		}
880call_handler_unsol:
881		if (cdev->handler)
882			cdev->handler (cdev, 0, irb);
883		if (cdev->private->flags.doverify)
884			ccw_device_online_verify(cdev, 0);
885		return;
886	}
887	/* Accumulate status and find out if a basic sense is needed. */
888	ccw_device_accumulate_irb(cdev, irb);
889	if (cdev->private->flags.dosense) {
890		if (ccw_device_do_sense(cdev, irb) == 0) {
891			cdev->private->state = DEV_STATE_W4SENSE;
892		}
893		return;
894	}
895	/* Call the handler. */
896	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
897		/* Start delayed path verification. */
898		ccw_device_online_verify(cdev, 0);
899}
900
901/*
902 * Got an timeout in online state.
903 */
904static void
905ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
906{
907	int ret;
908
909	ccw_device_set_timeout(cdev, 0);
910	ret = ccw_device_cancel_halt_clear(cdev);
911	if (ret == -EBUSY) {
912		ccw_device_set_timeout(cdev, 3*HZ);
913		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
914		return;
915	}
916	if (ret == -ENODEV) {
917		struct subchannel *sch;
918
919		sch = to_subchannel(cdev->dev.parent);
920		if (!sch->lpm) {
921			PREPARE_WORK(&cdev->private->kick_work,
922				     ccw_device_nopath_notify);
923			queue_work(ccw_device_notify_work,
924				   &cdev->private->kick_work);
925		} else
926			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
927	} else if (cdev->handler)
928		cdev->handler(cdev, cdev->private->intparm,
929			      ERR_PTR(-ETIMEDOUT));
930}
931
932/*
933 * Got an interrupt for a basic sense.
934 */
935static void
936ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
937{
938	struct irb *irb;
939
940	irb = (struct irb *) __LC_IRB;
941	/* Check for unsolicited interrupt. */
942	if (irb->scsw.stctl ==
943	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
944		if (irb->scsw.cc == 1)
945			/* Basic sense hasn't started. Try again. */
946			ccw_device_do_sense(cdev, irb);
947		else {
948			printk(KERN_INFO "Huh? %s(%s): unsolicited "
949			       "interrupt...\n",
950			       __FUNCTION__, cdev->dev.bus_id);
951			if (cdev->handler)
952				cdev->handler (cdev, 0, irb);
953		}
954		return;
955	}
956	/*
957	 * Check if a halt or clear has been issued in the meanwhile. If yes,
958	 * only deliver the halt/clear interrupt to the device driver as if it
959	 * had killed the original request.
960	 */
961	if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
962		/* Retry Basic Sense if requested. */
963		if (cdev->private->flags.intretry) {
964			cdev->private->flags.intretry = 0;
965			ccw_device_do_sense(cdev, irb);
966			return;
967		}
968		cdev->private->flags.dosense = 0;
969		memset(&cdev->private->irb, 0, sizeof(struct irb));
970		ccw_device_accumulate_irb(cdev, irb);
971		goto call_handler;
972	}
973	/* Add basic sense info to irb. */
974	ccw_device_accumulate_basic_sense(cdev, irb);
975	if (cdev->private->flags.dosense) {
976		/* Another basic sense is needed. */
977		ccw_device_do_sense(cdev, irb);
978		return;
979	}
980call_handler:
981	cdev->private->state = DEV_STATE_ONLINE;
982	/* Call the handler. */
983	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
984		/* Start delayed path verification. */
985		ccw_device_online_verify(cdev, 0);
986}
987
988static void
989ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
990{
991	struct irb *irb;
992
993	irb = (struct irb *) __LC_IRB;
994	/* Accumulate status. We don't do basic sense. */
995	ccw_device_accumulate_irb(cdev, irb);
996	/* Remember to clear irb to avoid residuals. */
997	memset(&cdev->private->irb, 0, sizeof(struct irb));
998	/* Try to start delayed device verification. */
999	ccw_device_online_verify(cdev, 0);
1000	/* Note: Don't call handler for cio initiated clear! */
1001}
1002
1003static void
1004ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
1005{
1006	struct subchannel *sch;
1007
1008	sch = to_subchannel(cdev->dev.parent);
1009	ccw_device_set_timeout(cdev, 0);
1010	/* Start delayed path verification. */
1011	ccw_device_online_verify(cdev, 0);
1012	/* OK, i/o is dead now. Call interrupt handler. */
1013	if (cdev->handler)
1014		cdev->handler(cdev, cdev->private->intparm,
1015			      ERR_PTR(-EIO));
1016}
1017
1018static void
1019ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1020{
1021	int ret;
1022
1023	ret = ccw_device_cancel_halt_clear(cdev);
1024	if (ret == -EBUSY) {
1025		ccw_device_set_timeout(cdev, 3*HZ);
1026		return;
1027	}
1028	/* Start delayed path verification. */
1029	ccw_device_online_verify(cdev, 0);
1030	if (cdev->handler)
1031		cdev->handler(cdev, cdev->private->intparm,
1032			      ERR_PTR(-EIO));
1033}
1034
1035void device_kill_io(struct subchannel *sch)
1036{
1037	int ret;
1038	struct ccw_device *cdev;
1039
1040	cdev = sch->dev.driver_data;
1041	ret = ccw_device_cancel_halt_clear(cdev);
1042	if (ret == -EBUSY) {
1043		ccw_device_set_timeout(cdev, 3*HZ);
1044		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
1045		return;
1046	}
1047	/* Start delayed path verification. */
1048	ccw_device_online_verify(cdev, 0);
1049	if (cdev->handler)
1050		cdev->handler(cdev, cdev->private->intparm,
1051			      ERR_PTR(-EIO));
1052}
1053
1054static void
1055ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1056{
1057	/* Start verification after current task finished. */
1058	cdev->private->flags.doverify = 1;
1059}
1060
1061static void
1062ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1063{
1064	struct irb *irb;
1065
1066	switch (dev_event) {
1067	case DEV_EVENT_INTERRUPT:
1068		irb = (struct irb *) __LC_IRB;
1069		/* Check for unsolicited interrupt. */
1070		if ((irb->scsw.stctl ==
1071		     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1072		    (!irb->scsw.cc))
1073			goto out_wakeup;
1074
1075		ccw_device_accumulate_irb(cdev, irb);
1076		/* We don't care about basic sense etc. */
1077		break;
1078	default: /* timeout */
1079		break;
1080	}
1081out_wakeup:
1082	wake_up(&cdev->private->wait_q);
1083}
1084
1085static void
1086ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1087{
1088	struct subchannel *sch;
1089
1090	sch = to_subchannel(cdev->dev.parent);
1091	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1092		/* Couldn't enable the subchannel for i/o. Sick device. */
1093		return;
1094
1095	/* After 60s the device recognition is considered to have failed. */
1096	ccw_device_set_timeout(cdev, 60*HZ);
1097
1098	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1099	ccw_device_sense_id_start(cdev);
1100}
1101
1102void
1103device_trigger_reprobe(struct subchannel *sch)
1104{
1105	struct ccw_device *cdev;
1106
1107	if (!sch->dev.driver_data)
1108		return;
1109	cdev = sch->dev.driver_data;
1110	if (cdev->private->state != DEV_STATE_DISCONNECTED)
1111		return;
1112
1113	/* Update some values. */
1114	if (stsch(sch->schid, &sch->schib))
1115		return;
1116	if (!sch->schib.pmcw.dnv)
1117		return;
1118	/*
1119	 * The pim, pam, pom values may not be accurate, but they are the best
1120	 * we have before performing device selection :/
1121	 */
1122	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1123	/* Re-set some bits in the pmcw that were lost. */
1124	sch->schib.pmcw.isc = 3;
1125	sch->schib.pmcw.csense = 1;
1126	sch->schib.pmcw.ena = 0;
1127	if ((sch->lpm & (sch->lpm - 1)) != 0)
1128		sch->schib.pmcw.mp = 1;
1129	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1130	/* We should also udate ssd info, but this has to wait. */
1131	/* Check if this is another device which appeared on the same sch. */
1132	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1133		PREPARE_WORK(&cdev->private->kick_work,
1134			     ccw_device_move_to_orphanage);
1135		queue_work(ccw_device_work, &cdev->private->kick_work);
1136	} else
1137		ccw_device_start_id(cdev, 0);
1138}
1139
1140static void
1141ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1142{
1143	struct subchannel *sch;
1144
1145	sch = to_subchannel(cdev->dev.parent);
1146	/*
1147	 * An interrupt in state offline means a previous disable was not
1148	 * successful. Try again.
1149	 */
1150	cio_disable_subchannel(sch);
1151}
1152
1153static void
1154ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1155{
1156	retry_set_schib(cdev);
1157	cdev->private->state = DEV_STATE_ONLINE;
1158	dev_fsm_event(cdev, dev_event);
1159}
1160
1161static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1162				       enum dev_event dev_event)
1163{
1164	cmf_retry_copy_block(cdev);
1165	cdev->private->state = DEV_STATE_ONLINE;
1166	dev_fsm_event(cdev, dev_event);
1167}
1168
1169static void
1170ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1171{
1172	ccw_device_set_timeout(cdev, 0);
1173	if (dev_event == DEV_EVENT_NOTOPER)
1174		cdev->private->state = DEV_STATE_NOT_OPER;
1175	else
1176		cdev->private->state = DEV_STATE_OFFLINE;
1177	wake_up(&cdev->private->wait_q);
1178}
1179
1180static void
1181ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1182{
1183	int ret;
1184
1185	ret = ccw_device_cancel_halt_clear(cdev);
1186	switch (ret) {
1187	case 0:
1188		cdev->private->state = DEV_STATE_OFFLINE;
1189		wake_up(&cdev->private->wait_q);
1190		break;
1191	case -ENODEV:
1192		cdev->private->state = DEV_STATE_NOT_OPER;
1193		wake_up(&cdev->private->wait_q);
1194		break;
1195	default:
1196		ccw_device_set_timeout(cdev, HZ/10);
1197	}
1198}
1199
1200/*
1201 * No operation action. This is used e.g. to ignore a timeout event in
1202 * state offline.
1203 */
1204static void
1205ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1206{
1207}
1208
1209/*
1210 * Bug operation action.
1211 */
1212static void
1213ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1214{
1215	printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1216	       cdev->private->state, dev_event);
1217	BUG();
1218}
1219
1220/*
1221 * device statemachine
1222 */
1223fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1224	[DEV_STATE_NOT_OPER] = {
1225		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1226		[DEV_EVENT_INTERRUPT]	= ccw_device_bug,
1227		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1228		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1229	},
1230	[DEV_STATE_SENSE_PGID] = {
1231		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1232		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_pgid_irq,
1233		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1234		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1235	},
1236	[DEV_STATE_SENSE_ID] = {
1237		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1238		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1239		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1240		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1241	},
1242	[DEV_STATE_OFFLINE] = {
1243		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1244		[DEV_EVENT_INTERRUPT]	= ccw_device_offline_irq,
1245		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1246		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1247	},
1248	[DEV_STATE_VERIFY] = {
1249		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1250		[DEV_EVENT_INTERRUPT]	= ccw_device_verify_irq,
1251		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1252		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
1253	},
1254	[DEV_STATE_ONLINE] = {
1255		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1256		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1257		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1258		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1259	},
1260	[DEV_STATE_W4SENSE] = {
1261		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1262		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1263		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1264		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1265	},
1266	[DEV_STATE_DISBAND_PGID] = {
1267		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1268		[DEV_EVENT_INTERRUPT]	= ccw_device_disband_irq,
1269		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1270		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1271	},
1272	[DEV_STATE_BOXED] = {
1273		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1274		[DEV_EVENT_INTERRUPT]	= ccw_device_stlck_done,
1275		[DEV_EVENT_TIMEOUT]	= ccw_device_stlck_done,
1276		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1277	},
1278	/* states to wait for i/o completion before doing something */
1279	[DEV_STATE_CLEAR_VERIFY] = {
1280		[DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1281		[DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1282		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1283		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1284	},
1285	[DEV_STATE_TIMEOUT_KILL] = {
1286		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1287		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1288		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1289		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1290	},
1291	[DEV_STATE_QUIESCE] = {
1292		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1293		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1294		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1295		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1296	},
1297	/* special states for devices gone not operational */
1298	[DEV_STATE_DISCONNECTED] = {
1299		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1300		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1301		[DEV_EVENT_TIMEOUT]	= ccw_device_bug,
1302		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
1303	},
1304	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1305		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1306		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1307		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1308		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1309	},
1310	[DEV_STATE_CMFCHANGE] = {
1311		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1312		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1313		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1314		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1315	},
1316	[DEV_STATE_CMFUPDATE] = {
1317		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
1318		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
1319		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
1320		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
1321	},
1322};
1323
1324/*
1325 * io_subchannel_irq is called for "real" interrupts or for status
1326 * pending conditions on msch.
1327 */
1328void
1329io_subchannel_irq (struct device *pdev)
1330{
1331	struct ccw_device *cdev;
1332
1333	cdev = to_subchannel(pdev)->dev.driver_data;
1334
1335	CIO_TRACE_EVENT (3, "IRQ");
1336	CIO_TRACE_EVENT (3, pdev->bus_id);
1337	if (cdev)
1338		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1339}
1340
1341EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1342