1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for s390 eadm subchannels
4 *
5 * Copyright IBM Corp. 2012
6 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
7 */
8
9#include <linux/kernel_stat.h>
10#include <linux/completion.h>
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/device.h>
14#include <linux/module.h>
15#include <linux/timer.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <linux/io.h>
19
20#include <asm/css_chars.h>
21#include <asm/debug.h>
22#include <asm/isc.h>
23#include <asm/cio.h>
24#include <asm/scsw.h>
25#include <asm/eadm.h>
26
27#include "eadm_sch.h"
28#include "ioasm.h"
29#include "cio.h"
30#include "css.h"
31#include "orb.h"
32
33MODULE_DESCRIPTION("driver for s390 eadm subchannels");
34MODULE_LICENSE("GPL");
35
36#define EADM_TIMEOUT (7 * HZ)
37static DEFINE_SPINLOCK(list_lock);
38static LIST_HEAD(eadm_list);
39
40static debug_info_t *eadm_debug;
41
42#define EADM_LOG(imp, txt) do {					\
43		debug_text_event(eadm_debug, imp, txt);		\
44	} while (0)
45
46static void EADM_LOG_HEX(int level, void *data, int length)
47{
48	debug_event(eadm_debug, level, data, length);
49}
50
51static void orb_init(union orb *orb)
52{
53	memset(orb, 0, sizeof(union orb));
54	orb->eadm.compat1 = 1;
55	orb->eadm.compat2 = 1;
56	orb->eadm.fmt = 1;
57	orb->eadm.x = 1;
58}
59
60static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
61{
62	union orb *orb = &get_eadm_private(sch)->orb;
63	int cc;
64
65	orb_init(orb);
66	orb->eadm.aob = virt_to_dma32(aob);
67	orb->eadm.intparm = (u32)virt_to_phys(sch);
68	orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
69
70	EADM_LOG(6, "start");
71	EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
72
73	cc = ssch(sch->schid, orb);
74	switch (cc) {
75	case 0:
76		sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
77		break;
78	case 1:		/* status pending */
79	case 2:		/* busy */
80		return -EBUSY;
81	case 3:		/* not operational */
82		return -ENODEV;
83	}
84	return 0;
85}
86
87static int eadm_subchannel_clear(struct subchannel *sch)
88{
89	int cc;
90
91	cc = csch(sch->schid);
92	if (cc)
93		return -ENODEV;
94
95	sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
96	return 0;
97}
98
99static void eadm_subchannel_timeout(struct timer_list *t)
100{
101	struct eadm_private *private = from_timer(private, t, timer);
102	struct subchannel *sch = private->sch;
103
104	spin_lock_irq(&sch->lock);
105	EADM_LOG(1, "timeout");
106	EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
107	if (eadm_subchannel_clear(sch))
108		EADM_LOG(0, "clear failed");
109	spin_unlock_irq(&sch->lock);
110}
111
112static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
113{
114	struct eadm_private *private = get_eadm_private(sch);
115
116	if (expires == 0)
117		del_timer(&private->timer);
118	else
119		mod_timer(&private->timer, jiffies + expires);
120}
121
122static void eadm_subchannel_irq(struct subchannel *sch)
123{
124	struct eadm_private *private = get_eadm_private(sch);
125	struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
126	struct irb *irb = this_cpu_ptr(&cio_irb);
127	blk_status_t error = BLK_STS_OK;
128
129	EADM_LOG(6, "irq");
130	EADM_LOG_HEX(6, irb, sizeof(*irb));
131
132	inc_irq_stat(IRQIO_ADM);
133
134	if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
135	    && scsw->eswf == 1 && irb->esw.eadm.erw.r)
136		error = BLK_STS_IOERR;
137
138	if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
139		error = BLK_STS_TIMEOUT;
140
141	eadm_subchannel_set_timeout(sch, 0);
142
143	if (private->state != EADM_BUSY) {
144		EADM_LOG(1, "irq unsol");
145		EADM_LOG_HEX(1, irb, sizeof(*irb));
146		private->state = EADM_NOT_OPER;
147		css_sched_sch_todo(sch, SCH_TODO_EVAL);
148		return;
149	}
150	scm_irq_handler(dma32_to_virt(scsw->aob), error);
151	private->state = EADM_IDLE;
152
153	if (private->completion)
154		complete(private->completion);
155}
156
157static struct subchannel *eadm_get_idle_sch(void)
158{
159	struct eadm_private *private;
160	struct subchannel *sch;
161	unsigned long flags;
162
163	spin_lock_irqsave(&list_lock, flags);
164	list_for_each_entry(private, &eadm_list, head) {
165		sch = private->sch;
166		spin_lock(&sch->lock);
167		if (private->state == EADM_IDLE) {
168			private->state = EADM_BUSY;
169			list_move_tail(&private->head, &eadm_list);
170			spin_unlock(&sch->lock);
171			spin_unlock_irqrestore(&list_lock, flags);
172
173			return sch;
174		}
175		spin_unlock(&sch->lock);
176	}
177	spin_unlock_irqrestore(&list_lock, flags);
178
179	return NULL;
180}
181
182int eadm_start_aob(struct aob *aob)
183{
184	struct eadm_private *private;
185	struct subchannel *sch;
186	unsigned long flags;
187	int ret;
188
189	sch = eadm_get_idle_sch();
190	if (!sch)
191		return -EBUSY;
192
193	spin_lock_irqsave(&sch->lock, flags);
194	eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
195	ret = eadm_subchannel_start(sch, aob);
196	if (!ret)
197		goto out_unlock;
198
199	/* Handle start subchannel failure. */
200	eadm_subchannel_set_timeout(sch, 0);
201	private = get_eadm_private(sch);
202	private->state = EADM_NOT_OPER;
203	css_sched_sch_todo(sch, SCH_TODO_EVAL);
204
205out_unlock:
206	spin_unlock_irqrestore(&sch->lock, flags);
207
208	return ret;
209}
210EXPORT_SYMBOL_GPL(eadm_start_aob);
211
212static int eadm_subchannel_probe(struct subchannel *sch)
213{
214	struct eadm_private *private;
215	int ret;
216
217	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
218	if (!private)
219		return -ENOMEM;
220
221	INIT_LIST_HEAD(&private->head);
222	timer_setup(&private->timer, eadm_subchannel_timeout, 0);
223
224	spin_lock_irq(&sch->lock);
225	set_eadm_private(sch, private);
226	private->state = EADM_IDLE;
227	private->sch = sch;
228	sch->isc = EADM_SCH_ISC;
229	ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
230	if (ret) {
231		set_eadm_private(sch, NULL);
232		spin_unlock_irq(&sch->lock);
233		kfree(private);
234		goto out;
235	}
236	spin_unlock_irq(&sch->lock);
237
238	spin_lock_irq(&list_lock);
239	list_add(&private->head, &eadm_list);
240	spin_unlock_irq(&list_lock);
241out:
242	return ret;
243}
244
245static void eadm_quiesce(struct subchannel *sch)
246{
247	struct eadm_private *private = get_eadm_private(sch);
248	DECLARE_COMPLETION_ONSTACK(completion);
249	int ret;
250
251	spin_lock_irq(&sch->lock);
252	if (private->state != EADM_BUSY)
253		goto disable;
254
255	if (eadm_subchannel_clear(sch))
256		goto disable;
257
258	private->completion = &completion;
259	spin_unlock_irq(&sch->lock);
260
261	wait_for_completion_io(&completion);
262
263	spin_lock_irq(&sch->lock);
264	private->completion = NULL;
265
266disable:
267	eadm_subchannel_set_timeout(sch, 0);
268	do {
269		ret = cio_disable_subchannel(sch);
270	} while (ret == -EBUSY);
271
272	spin_unlock_irq(&sch->lock);
273}
274
275static void eadm_subchannel_remove(struct subchannel *sch)
276{
277	struct eadm_private *private = get_eadm_private(sch);
278
279	spin_lock_irq(&list_lock);
280	list_del(&private->head);
281	spin_unlock_irq(&list_lock);
282
283	eadm_quiesce(sch);
284
285	spin_lock_irq(&sch->lock);
286	set_eadm_private(sch, NULL);
287	spin_unlock_irq(&sch->lock);
288
289	kfree(private);
290}
291
292static void eadm_subchannel_shutdown(struct subchannel *sch)
293{
294	eadm_quiesce(sch);
295}
296
297/**
298 * eadm_subchannel_sch_event - process subchannel event
299 * @sch: subchannel
300 * @process: non-zero if function is called in process context
301 *
302 * An unspecified event occurred for this subchannel. Adjust data according
303 * to the current operational state of the subchannel. Return zero when the
304 * event has been handled sufficiently or -EAGAIN when this function should
305 * be called again in process context.
306 */
307static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
308{
309	struct eadm_private *private;
310	unsigned long flags;
311
312	spin_lock_irqsave(&sch->lock, flags);
313	if (!device_is_registered(&sch->dev))
314		goto out_unlock;
315
316	if (work_pending(&sch->todo_work))
317		goto out_unlock;
318
319	if (cio_update_schib(sch)) {
320		css_sched_sch_todo(sch, SCH_TODO_UNREG);
321		goto out_unlock;
322	}
323	private = get_eadm_private(sch);
324	if (private->state == EADM_NOT_OPER)
325		private->state = EADM_IDLE;
326
327out_unlock:
328	spin_unlock_irqrestore(&sch->lock, flags);
329
330	return 0;
331}
332
333static struct css_device_id eadm_subchannel_ids[] = {
334	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
335	{ /* end of list */ },
336};
337MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
338
339static struct css_driver eadm_subchannel_driver = {
340	.drv = {
341		.name = "eadm_subchannel",
342		.owner = THIS_MODULE,
343	},
344	.subchannel_type = eadm_subchannel_ids,
345	.irq = eadm_subchannel_irq,
346	.probe = eadm_subchannel_probe,
347	.remove = eadm_subchannel_remove,
348	.shutdown = eadm_subchannel_shutdown,
349	.sch_event = eadm_subchannel_sch_event,
350};
351
352static int __init eadm_sch_init(void)
353{
354	int ret;
355
356	if (!css_general_characteristics.eadm)
357		return -ENXIO;
358
359	eadm_debug = debug_register("eadm_log", 16, 1, 16);
360	if (!eadm_debug)
361		return -ENOMEM;
362
363	debug_register_view(eadm_debug, &debug_hex_ascii_view);
364	debug_set_level(eadm_debug, 2);
365
366	isc_register(EADM_SCH_ISC);
367	ret = css_driver_register(&eadm_subchannel_driver);
368	if (ret)
369		goto cleanup;
370
371	return ret;
372
373cleanup:
374	isc_unregister(EADM_SCH_ISC);
375	debug_unregister(eadm_debug);
376	return ret;
377}
378
379static void __exit eadm_sch_exit(void)
380{
381	css_driver_unregister(&eadm_subchannel_driver);
382	isc_unregister(EADM_SCH_ISC);
383	debug_unregister(eadm_debug);
384}
385module_init(eadm_sch_init);
386module_exit(eadm_sch_exit);
387