1/*
2 *  drivers/s390/cio/chsc.c
3 *   S/390 common I/O routines -- channel subsystem call
4 *
5 *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 *			      IBM Corporation
7 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
9 *		 Arnd Bergmann (arndb@de.ibm.com)
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/device.h>
16
17#include <asm/cio.h>
18#include <asm/chpid.h>
19
20#include "css.h"
21#include "cio.h"
22#include "cio_debug.h"
23#include "ioasm.h"
24#include "chp.h"
25#include "chsc.h"
26
27static void *sei_page;
28
29struct chsc_ssd_area {
30	struct chsc_header request;
31	u16 :10;
32	u16 ssid:2;
33	u16 :4;
34	u16 f_sch;	  /* first subchannel */
35	u16 :16;
36	u16 l_sch;	  /* last subchannel */
37	u32 :32;
38	struct chsc_header response;
39	u32 :32;
40	u8 sch_valid : 1;
41	u8 dev_valid : 1;
42	u8 st	     : 3; /* subchannel type */
43	u8 zeroes    : 3;
44	u8  unit_addr;	  /* unit address */
45	u16 devno;	  /* device number */
46	u8 path_mask;
47	u8 fla_valid_mask;
48	u16 sch;	  /* subchannel */
49	u8 chpid[8];	  /* chpids 0-7 */
50	u16 fla[8];	  /* full link addresses 0-7 */
51} __attribute__ ((packed));
52
53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
54{
55	unsigned long page;
56	struct chsc_ssd_area *ssd_area;
57	int ccode;
58	int ret;
59	int i;
60	int mask;
61
62	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63	if (!page)
64		return -ENOMEM;
65	ssd_area = (struct chsc_ssd_area *) page;
66	ssd_area->request.length = 0x0010;
67	ssd_area->request.code = 0x0004;
68	ssd_area->ssid = schid.ssid;
69	ssd_area->f_sch = schid.sch_no;
70	ssd_area->l_sch = schid.sch_no;
71
72	ccode = chsc(ssd_area);
73	/* Check response. */
74	if (ccode > 0) {
75		ret = (ccode == 3) ? -ENODEV : -EBUSY;
76		goto out_free;
77	}
78	if (ssd_area->response.code != 0x0001) {
79		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80			      schid.ssid, schid.sch_no,
81			      ssd_area->response.code);
82		ret = -EIO;
83		goto out_free;
84	}
85	if (!ssd_area->sch_valid) {
86		ret = -ENODEV;
87		goto out_free;
88	}
89	/* Copy data */
90	ret = 0;
91	memset(ssd, 0, sizeof(struct chsc_ssd_info));
92	if ((ssd_area->st != 0) && (ssd_area->st != 2))
93		goto out_free;
94	ssd->path_mask = ssd_area->path_mask;
95	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
96	for (i = 0; i < 8; i++) {
97		mask = 0x80 >> i;
98		if (ssd_area->path_mask & mask) {
99			chp_id_init(&ssd->chpid[i]);
100			ssd->chpid[i].id = ssd_area->chpid[i];
101		}
102		if (ssd_area->fla_valid_mask & mask)
103			ssd->fla[i] = ssd_area->fla[i];
104	}
105out_free:
106	free_page(page);
107	return ret;
108}
109
110static int check_for_io_on_path(struct subchannel *sch, int mask)
111{
112	int cc;
113
114	cc = stsch(sch->schid, &sch->schib);
115	if (cc)
116		return 0;
117	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
118		return 1;
119	return 0;
120}
121
122static void terminate_internal_io(struct subchannel *sch)
123{
124	if (cio_clear(sch)) {
125		/* Recheck device in case clear failed. */
126		sch->lpm = 0;
127		if (device_trigger_verify(sch) != 0)
128			css_schedule_eval(sch->schid);
129		return;
130	}
131	/* Request retry of internal operation. */
132	device_set_intretry(sch);
133	/* Call handler. */
134	if (sch->driver && sch->driver->termination)
135		sch->driver->termination(&sch->dev);
136}
137
138static int
139s390_subchannel_remove_chpid(struct device *dev, void *data)
140{
141	int j;
142	int mask;
143	struct subchannel *sch;
144	struct chp_id *chpid;
145	struct schib schib;
146
147	sch = to_subchannel(dev);
148	chpid = data;
149	for (j = 0; j < 8; j++) {
150		mask = 0x80 >> j;
151		if ((sch->schib.pmcw.pim & mask) &&
152		    (sch->schib.pmcw.chpid[j] == chpid->id))
153			break;
154	}
155	if (j >= 8)
156		return 0;
157
158	spin_lock_irq(sch->lock);
159
160	stsch(sch->schid, &schib);
161	if (!schib.pmcw.dnv)
162		goto out_unreg;
163	memcpy(&sch->schib, &schib, sizeof(struct schib));
164	/* Check for single path devices. */
165	if (sch->schib.pmcw.pim == 0x80)
166		goto out_unreg;
167
168	if (check_for_io_on_path(sch, mask)) {
169		if (device_is_online(sch))
170			device_kill_io(sch);
171		else {
172			terminate_internal_io(sch);
173			/* Re-start path verification. */
174			if (sch->driver && sch->driver->verify)
175				sch->driver->verify(&sch->dev);
176		}
177	} else {
178		/* trigger path verification. */
179		if (sch->driver && sch->driver->verify)
180			sch->driver->verify(&sch->dev);
181		else if (sch->lpm == mask)
182			goto out_unreg;
183	}
184
185	spin_unlock_irq(sch->lock);
186	return 0;
187
188out_unreg:
189	sch->lpm = 0;
190	spin_unlock_irq(sch->lock);
191	css_schedule_eval(sch->schid);
192	return 0;
193}
194
195void chsc_chp_offline(struct chp_id chpid)
196{
197	char dbf_txt[15];
198
199	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
200	CIO_TRACE_EVENT(2, dbf_txt);
201
202	if (chp_get_status(chpid) <= 0)
203		return;
204	bus_for_each_dev(&css_bus_type, NULL, &chpid,
205			 s390_subchannel_remove_chpid);
206}
207
208static int
209s390_process_res_acc_new_sch(struct subchannel_id schid)
210{
211	struct schib schib;
212	/*
213	 * We don't know the device yet, but since a path
214	 * may be available now to the device we'll have
215	 * to do recognition again.
216	 * Since we don't have any idea about which chpid
217	 * that beast may be on we'll have to do a stsch
218	 * on all devices, grr...
219	 */
220	if (stsch_err(schid, &schib))
221		/* We're through */
222		return -ENXIO;
223
224	/* Put it on the slow path. */
225	css_schedule_eval(schid);
226	return 0;
227}
228
229struct res_acc_data {
230	struct chp_id chpid;
231	u32 fla_mask;
232	u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236			      struct res_acc_data *data)
237{
238	int i;
239	int mask;
240
241	for (i = 0; i < 8; i++) {
242		mask = 0x80 >> i;
243		if (!(ssd->path_mask & mask))
244			continue;
245		if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246			continue;
247		if ((ssd->fla_valid_mask & mask) &&
248		    ((ssd->fla[i] & data->fla_mask) != data->fla))
249			continue;
250		return mask;
251	}
252	return 0;
253}
254
255static int
256__s390_process_res_acc(struct subchannel_id schid, void *data)
257{
258	int chp_mask, old_lpm;
259	struct res_acc_data *res_data;
260	struct subchannel *sch;
261
262	res_data = data;
263	sch = get_subchannel_by_schid(schid);
264	if (!sch)
265		/* Check if a subchannel is newly available. */
266		return s390_process_res_acc_new_sch(schid);
267
268	spin_lock_irq(sch->lock);
269	chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
270	if (chp_mask == 0)
271		goto out;
272	if (stsch(sch->schid, &sch->schib))
273		goto out;
274	old_lpm = sch->lpm;
275	sch->lpm = ((sch->schib.pmcw.pim &
276		     sch->schib.pmcw.pam &
277		     sch->schib.pmcw.pom)
278		    | chp_mask) & sch->opm;
279	if (!old_lpm && sch->lpm)
280		device_trigger_reprobe(sch);
281	else if (sch->driver && sch->driver->verify)
282		sch->driver->verify(&sch->dev);
283out:
284	spin_unlock_irq(sch->lock);
285	put_device(&sch->dev);
286	return 0;
287}
288
289static void s390_process_res_acc (struct res_acc_data *res_data)
290{
291	char dbf_txt[15];
292
293	sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294		res_data->chpid.id);
295	CIO_TRACE_EVENT( 2, dbf_txt);
296	if (res_data->fla != 0) {
297		sprintf(dbf_txt, "fla%x", res_data->fla);
298		CIO_TRACE_EVENT( 2, dbf_txt);
299	}
300
301	/*
302	 * I/O resources may have become accessible.
303	 * Scan through all subchannels that may be concerned and
304	 * do a validation on those.
305	 * The more information we have (info), the less scanning
306	 * will we have to do.
307	 */
308	for_each_subchannel(__s390_process_res_acc, res_data);
309}
310
311static int
312__get_chpid_from_lir(void *data)
313{
314	struct lir {
315		u8  iq;
316		u8  ic;
317		u16 sci;
318		/* incident-node descriptor */
319		u32 indesc[28];
320		/* attached-node descriptor */
321		u32 andesc[28];
322		/* incident-specific information */
323		u32 isinfo[28];
324	} __attribute__ ((packed)) *lir;
325
326	lir = data;
327	if (!(lir->iq&0x80))
328		/* NULL link incident record */
329		return -EINVAL;
330	if (!(lir->indesc[0]&0xc0000000))
331		/* node descriptor not valid */
332		return -EINVAL;
333	if (!(lir->indesc[0]&0x10000000))
334		return -EINVAL;
335	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
336
337	return (u16) (lir->indesc[0]&0x000000ff);
338}
339
340struct chsc_sei_area {
341	struct chsc_header request;
342	u32 reserved1;
343	u32 reserved2;
344	u32 reserved3;
345	struct chsc_header response;
346	u32 reserved4;
347	u8  flags;
348	u8  vf;		/* validity flags */
349	u8  rs;		/* reporting source */
350	u8  cc;		/* content code */
351	u16 fla;	/* full link address */
352	u16 rsid;	/* reporting source id */
353	u32 reserved5;
354	u32 reserved6;
355	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
356	/* ccdf has to be big enough for a link-incident record */
357} __attribute__ ((packed));
358
359static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
360{
361	struct chp_id chpid;
362	int id;
363
364	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
365		      sei_area->rs, sei_area->rsid);
366	if (sei_area->rs != 4)
367		return;
368	id = __get_chpid_from_lir(sei_area->ccdf);
369	if (id < 0)
370		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
371	else {
372		chp_id_init(&chpid);
373		chpid.id = id;
374		chsc_chp_offline(chpid);
375	}
376}
377
378static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
379{
380	struct res_acc_data res_data;
381	struct chp_id chpid;
382	int status;
383
384	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
385		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
386	if (sei_area->rs != 4)
387		return;
388	chp_id_init(&chpid);
389	chpid.id = sei_area->rsid;
390	/* allocate a new channel path structure, if needed */
391	status = chp_get_status(chpid);
392	if (status < 0)
393		chp_new(chpid);
394	else if (!status)
395		return;
396	memset(&res_data, 0, sizeof(struct res_acc_data));
397	res_data.chpid = chpid;
398	if ((sei_area->vf & 0xc0) != 0) {
399		res_data.fla = sei_area->fla;
400		if ((sei_area->vf & 0xc0) == 0xc0)
401			/* full link address */
402			res_data.fla_mask = 0xffff;
403		else
404			/* link address */
405			res_data.fla_mask = 0xff00;
406	}
407	s390_process_res_acc(&res_data);
408}
409
410struct chp_config_data {
411	u8 map[32];
412	u8 op;
413	u8 pc;
414};
415
416static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
417{
418	struct chp_config_data *data;
419	struct chp_id chpid;
420	int num;
421
422	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
423	if (sei_area->rs != 0)
424		return;
425	data = (struct chp_config_data *) &(sei_area->ccdf);
426	chp_id_init(&chpid);
427	for (num = 0; num <= __MAX_CHPID; num++) {
428		if (!chp_test_bit(data->map, num))
429			continue;
430		chpid.id = num;
431		printk(KERN_WARNING "cio: processing configure event %d for "
432		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
433		switch (data->op) {
434		case 0:
435			chp_cfg_schedule(chpid, 1);
436			break;
437		case 1:
438			chp_cfg_schedule(chpid, 0);
439			break;
440		case 2:
441			chp_cfg_cancel_deconfigure(chpid);
442			break;
443		}
444	}
445}
446
447static void chsc_process_sei(struct chsc_sei_area *sei_area)
448{
449	/* Check if we might have lost some information. */
450	if (sei_area->flags & 0x40) {
451		CIO_CRW_EVENT(2, "chsc: event overflow\n");
452		css_schedule_eval_all();
453	}
454	/* which kind of information was stored? */
455	switch (sei_area->cc) {
456	case 1: /* link incident*/
457		chsc_process_sei_link_incident(sei_area);
458		break;
459	case 2: /* i/o resource accessibiliy */
460		chsc_process_sei_res_acc(sei_area);
461		break;
462	case 8: /* channel-path-configuration notification */
463		chsc_process_sei_chp_config(sei_area);
464		break;
465	default: /* other stuff */
466		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
467			      sei_area->cc);
468		break;
469	}
470}
471
472void chsc_process_crw(void)
473{
474	struct chsc_sei_area *sei_area;
475
476	if (!sei_page)
477		return;
478	/* Access to sei_page is serialized through machine check handler
479	 * thread, so no need for locking. */
480	sei_area = sei_page;
481
482	CIO_TRACE_EVENT( 2, "prcss");
483	do {
484		memset(sei_area, 0, sizeof(*sei_area));
485		sei_area->request.length = 0x0010;
486		sei_area->request.code = 0x000e;
487		if (chsc(sei_area))
488			break;
489
490		if (sei_area->response.code == 0x0001) {
491			CIO_CRW_EVENT(4, "chsc: sei successful\n");
492			chsc_process_sei(sei_area);
493		} else {
494			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
495				      sei_area->response.code);
496			break;
497		}
498	} while (sei_area->flags & 0x80);
499}
500
501static int
502__chp_add_new_sch(struct subchannel_id schid)
503{
504	struct schib schib;
505
506	if (stsch_err(schid, &schib))
507		/* We're through */
508		return -ENXIO;
509
510	/* Put it on the slow path. */
511	css_schedule_eval(schid);
512	return 0;
513}
514
515
516static int
517__chp_add(struct subchannel_id schid, void *data)
518{
519	int i, mask;
520	struct chp_id *chpid;
521	struct subchannel *sch;
522
523	chpid = data;
524	sch = get_subchannel_by_schid(schid);
525	if (!sch)
526		/* Check if the subchannel is now available. */
527		return __chp_add_new_sch(schid);
528	spin_lock_irq(sch->lock);
529	for (i=0; i<8; i++) {
530		mask = 0x80 >> i;
531		if ((sch->schib.pmcw.pim & mask) &&
532		    (sch->schib.pmcw.chpid[i] == chpid->id)) {
533			if (stsch(sch->schid, &sch->schib) != 0) {
534				/* Endgame. */
535				spin_unlock_irq(sch->lock);
536				return -ENXIO;
537			}
538			break;
539		}
540	}
541	if (i==8) {
542		spin_unlock_irq(sch->lock);
543		return 0;
544	}
545	sch->lpm = ((sch->schib.pmcw.pim &
546		     sch->schib.pmcw.pam &
547		     sch->schib.pmcw.pom)
548		    | mask) & sch->opm;
549
550	if (sch->driver && sch->driver->verify)
551		sch->driver->verify(&sch->dev);
552
553	spin_unlock_irq(sch->lock);
554	put_device(&sch->dev);
555	return 0;
556}
557
558void chsc_chp_online(struct chp_id chpid)
559{
560	char dbf_txt[15];
561
562	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
563	CIO_TRACE_EVENT(2, dbf_txt);
564
565	if (chp_get_status(chpid) != 0)
566		for_each_subchannel(__chp_add, &chpid);
567}
568
569static void __s390_subchannel_vary_chpid(struct subchannel *sch,
570					 struct chp_id chpid, int on)
571{
572	int chp, old_lpm;
573	int mask;
574	unsigned long flags;
575
576	spin_lock_irqsave(sch->lock, flags);
577	old_lpm = sch->lpm;
578	for (chp = 0; chp < 8; chp++) {
579		mask = 0x80 >> chp;
580		if (!(sch->ssd_info.path_mask & mask))
581			continue;
582		if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
583			continue;
584
585		if (on) {
586			sch->opm |= mask;
587			sch->lpm |= mask;
588			if (!old_lpm)
589				device_trigger_reprobe(sch);
590			else if (sch->driver && sch->driver->verify)
591				sch->driver->verify(&sch->dev);
592			break;
593		}
594		sch->opm &= ~mask;
595		sch->lpm &= ~mask;
596		if (check_for_io_on_path(sch, mask)) {
597			if (device_is_online(sch))
598				/* Path verification is done after killing. */
599				device_kill_io(sch);
600			else {
601				/* Kill and retry internal I/O. */
602				terminate_internal_io(sch);
603				/* Re-start path verification. */
604				if (sch->driver && sch->driver->verify)
605					sch->driver->verify(&sch->dev);
606			}
607		} else if (!sch->lpm) {
608			if (device_trigger_verify(sch) != 0)
609				css_schedule_eval(sch->schid);
610		} else if (sch->driver && sch->driver->verify)
611			sch->driver->verify(&sch->dev);
612		break;
613	}
614	spin_unlock_irqrestore(sch->lock, flags);
615}
616
617static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
618{
619	struct subchannel *sch;
620	struct chp_id *chpid;
621
622	sch = to_subchannel(dev);
623	chpid = data;
624
625	__s390_subchannel_vary_chpid(sch, *chpid, 0);
626	return 0;
627}
628
629static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
630{
631	struct subchannel *sch;
632	struct chp_id *chpid;
633
634	sch = to_subchannel(dev);
635	chpid = data;
636
637	__s390_subchannel_vary_chpid(sch, *chpid, 1);
638	return 0;
639}
640
641static int
642__s390_vary_chpid_on(struct subchannel_id schid, void *data)
643{
644	struct schib schib;
645	struct subchannel *sch;
646
647	sch = get_subchannel_by_schid(schid);
648	if (sch) {
649		put_device(&sch->dev);
650		return 0;
651	}
652	if (stsch_err(schid, &schib))
653		/* We're through */
654		return -ENXIO;
655	/* Put it on the slow path. */
656	css_schedule_eval(schid);
657	return 0;
658}
659
660/**
661 * chsc_chp_vary - propagate channel-path vary operation to subchannels
662 * @chpid: channl-path ID
663 * @on: non-zero for vary online, zero for vary offline
664 */
665int chsc_chp_vary(struct chp_id chpid, int on)
666{
667	/*
668	 * Redo PathVerification on the devices the chpid connects to
669	 */
670
671	bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
672			 s390_subchannel_vary_chpid_on :
673			 s390_subchannel_vary_chpid_off);
674	if (on)
675		/* Scan for new devices on varied on path. */
676		for_each_subchannel(__s390_vary_chpid_on, NULL);
677	return 0;
678}
679
680static void
681chsc_remove_cmg_attr(struct channel_subsystem *css)
682{
683	int i;
684
685	for (i = 0; i <= __MAX_CHPID; i++) {
686		if (!css->chps[i])
687			continue;
688		chp_remove_cmg_attr(css->chps[i]);
689	}
690}
691
692static int
693chsc_add_cmg_attr(struct channel_subsystem *css)
694{
695	int i, ret;
696
697	ret = 0;
698	for (i = 0; i <= __MAX_CHPID; i++) {
699		if (!css->chps[i])
700			continue;
701		ret = chp_add_cmg_attr(css->chps[i]);
702		if (ret)
703			goto cleanup;
704	}
705	return ret;
706cleanup:
707	for (--i; i >= 0; i--) {
708		if (!css->chps[i])
709			continue;
710		chp_remove_cmg_attr(css->chps[i]);
711	}
712	return ret;
713}
714
715static int
716__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
717{
718	struct {
719		struct chsc_header request;
720		u32 operation_code : 2;
721		u32 : 30;
722		u32 key : 4;
723		u32 : 28;
724		u32 zeroes1;
725		u32 cub_addr1;
726		u32 zeroes2;
727		u32 cub_addr2;
728		u32 reserved[13];
729		struct chsc_header response;
730		u32 status : 8;
731		u32 : 4;
732		u32 fmt : 4;
733		u32 : 16;
734	} __attribute__ ((packed)) *secm_area;
735	int ret, ccode;
736
737	secm_area = page;
738	secm_area->request.length = 0x0050;
739	secm_area->request.code = 0x0016;
740
741	secm_area->key = PAGE_DEFAULT_KEY;
742	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
743	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
744
745	secm_area->operation_code = enable ? 0 : 1;
746
747	ccode = chsc(secm_area);
748	if (ccode > 0)
749		return (ccode == 3) ? -ENODEV : -EBUSY;
750
751	switch (secm_area->response.code) {
752	case 0x0001: /* Success. */
753		ret = 0;
754		break;
755	case 0x0003: /* Invalid block. */
756	case 0x0007: /* Invalid format. */
757	case 0x0008: /* Other invalid block. */
758		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
759		ret = -EINVAL;
760		break;
761	case 0x0004: /* Command not provided in model. */
762		CIO_CRW_EVENT(2, "Model does not provide secm\n");
763		ret = -EOPNOTSUPP;
764		break;
765	case 0x0102: /* cub adresses incorrect */
766		CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
767		ret = -EINVAL;
768		break;
769	case 0x0103: /* key error */
770		CIO_CRW_EVENT(2, "Access key error in secm\n");
771		ret = -EINVAL;
772		break;
773	case 0x0105: /* error while starting */
774		CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
775		ret = -EIO;
776		break;
777	default:
778		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
779			      secm_area->response.code);
780		ret = -EIO;
781	}
782	return ret;
783}
784
785int
786chsc_secm(struct channel_subsystem *css, int enable)
787{
788	void  *secm_area;
789	int ret;
790
791	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
792	if (!secm_area)
793		return -ENOMEM;
794
795	mutex_lock(&css->mutex);
796	if (enable && !css->cm_enabled) {
797		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
798		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
799		if (!css->cub_addr1 || !css->cub_addr2) {
800			free_page((unsigned long)css->cub_addr1);
801			free_page((unsigned long)css->cub_addr2);
802			free_page((unsigned long)secm_area);
803			mutex_unlock(&css->mutex);
804			return -ENOMEM;
805		}
806	}
807	ret = __chsc_do_secm(css, enable, secm_area);
808	if (!ret) {
809		css->cm_enabled = enable;
810		if (css->cm_enabled) {
811			ret = chsc_add_cmg_attr(css);
812			if (ret) {
813				memset(secm_area, 0, PAGE_SIZE);
814				__chsc_do_secm(css, 0, secm_area);
815				css->cm_enabled = 0;
816			}
817		} else
818			chsc_remove_cmg_attr(css);
819	}
820	if (!css->cm_enabled) {
821		free_page((unsigned long)css->cub_addr1);
822		free_page((unsigned long)css->cub_addr2);
823	}
824	mutex_unlock(&css->mutex);
825	free_page((unsigned long)secm_area);
826	return ret;
827}
828
829int chsc_determine_channel_path_description(struct chp_id chpid,
830					    struct channel_path_desc *desc)
831{
832	int ccode, ret;
833
834	struct {
835		struct chsc_header request;
836		u32 : 24;
837		u32 first_chpid : 8;
838		u32 : 24;
839		u32 last_chpid : 8;
840		u32 zeroes1;
841		struct chsc_header response;
842		u32 zeroes2;
843		struct channel_path_desc desc;
844	} __attribute__ ((packed)) *scpd_area;
845
846	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
847	if (!scpd_area)
848		return -ENOMEM;
849
850	scpd_area->request.length = 0x0010;
851	scpd_area->request.code = 0x0002;
852
853	scpd_area->first_chpid = chpid.id;
854	scpd_area->last_chpid = chpid.id;
855
856	ccode = chsc(scpd_area);
857	if (ccode > 0) {
858		ret = (ccode == 3) ? -ENODEV : -EBUSY;
859		goto out;
860	}
861
862	switch (scpd_area->response.code) {
863	case 0x0001: /* Success. */
864		memcpy(desc, &scpd_area->desc,
865		       sizeof(struct channel_path_desc));
866		ret = 0;
867		break;
868	case 0x0003: /* Invalid block. */
869	case 0x0007: /* Invalid format. */
870	case 0x0008: /* Other invalid block. */
871		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
872		ret = -EINVAL;
873		break;
874	case 0x0004: /* Command not provided in model. */
875		CIO_CRW_EVENT(2, "Model does not provide scpd\n");
876		ret = -EOPNOTSUPP;
877		break;
878	default:
879		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
880			      scpd_area->response.code);
881		ret = -EIO;
882	}
883out:
884	free_page((unsigned long)scpd_area);
885	return ret;
886}
887
888static void
889chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
890			  struct cmg_chars *chars)
891{
892	switch (chp->cmg) {
893	case 2:
894	case 3:
895		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
896					 GFP_KERNEL);
897		if (chp->cmg_chars) {
898			int i, mask;
899			struct cmg_chars *cmg_chars;
900
901			cmg_chars = chp->cmg_chars;
902			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
903				mask = 0x80 >> (i + 3);
904				if (cmcv & mask)
905					cmg_chars->values[i] = chars->values[i];
906				else
907					cmg_chars->values[i] = 0;
908			}
909		}
910		break;
911	default:
912		/* No cmg-dependent data. */
913		break;
914	}
915}
916
917int chsc_get_channel_measurement_chars(struct channel_path *chp)
918{
919	int ccode, ret;
920
921	struct {
922		struct chsc_header request;
923		u32 : 24;
924		u32 first_chpid : 8;
925		u32 : 24;
926		u32 last_chpid : 8;
927		u32 zeroes1;
928		struct chsc_header response;
929		u32 zeroes2;
930		u32 not_valid : 1;
931		u32 shared : 1;
932		u32 : 22;
933		u32 chpid : 8;
934		u32 cmcv : 5;
935		u32 : 11;
936		u32 cmgq : 8;
937		u32 cmg : 8;
938		u32 zeroes3;
939		u32 data[NR_MEASUREMENT_CHARS];
940	} __attribute__ ((packed)) *scmc_area;
941
942	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
943	if (!scmc_area)
944		return -ENOMEM;
945
946	scmc_area->request.length = 0x0010;
947	scmc_area->request.code = 0x0022;
948
949	scmc_area->first_chpid = chp->chpid.id;
950	scmc_area->last_chpid = chp->chpid.id;
951
952	ccode = chsc(scmc_area);
953	if (ccode > 0) {
954		ret = (ccode == 3) ? -ENODEV : -EBUSY;
955		goto out;
956	}
957
958	switch (scmc_area->response.code) {
959	case 0x0001: /* Success. */
960		if (!scmc_area->not_valid) {
961			chp->cmg = scmc_area->cmg;
962			chp->shared = scmc_area->shared;
963			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
964						  (struct cmg_chars *)
965						  &scmc_area->data);
966		} else {
967			chp->cmg = -1;
968			chp->shared = -1;
969		}
970		ret = 0;
971		break;
972	case 0x0003: /* Invalid block. */
973	case 0x0007: /* Invalid format. */
974	case 0x0008: /* Invalid bit combination. */
975		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
976		ret = -EINVAL;
977		break;
978	case 0x0004: /* Command not provided. */
979		CIO_CRW_EVENT(2, "Model does not provide scmc\n");
980		ret = -EOPNOTSUPP;
981		break;
982	default:
983		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
984			      scmc_area->response.code);
985		ret = -EIO;
986	}
987out:
988	free_page((unsigned long)scmc_area);
989	return ret;
990}
991
992static int __init
993chsc_alloc_sei_area(void)
994{
995	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
996	if (!sei_page)
997		printk(KERN_WARNING"Can't allocate page for processing of " \
998		       "chsc machine checks!\n");
999	return (sei_page ? 0 : -ENOMEM);
1000}
1001
1002int __init
1003chsc_enable_facility(int operation_code)
1004{
1005	int ret;
1006	struct {
1007		struct chsc_header request;
1008		u8 reserved1:4;
1009		u8 format:4;
1010		u8 reserved2;
1011		u16 operation_code;
1012		u32 reserved3;
1013		u32 reserved4;
1014		u32 operation_data_area[252];
1015		struct chsc_header response;
1016		u32 reserved5:4;
1017		u32 format2:4;
1018		u32 reserved6:24;
1019	} __attribute__ ((packed)) *sda_area;
1020
1021	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1022	if (!sda_area)
1023		return -ENOMEM;
1024	sda_area->request.length = 0x0400;
1025	sda_area->request.code = 0x0031;
1026	sda_area->operation_code = operation_code;
1027
1028	ret = chsc(sda_area);
1029	if (ret > 0) {
1030		ret = (ret == 3) ? -ENODEV : -EBUSY;
1031		goto out;
1032	}
1033	switch (sda_area->response.code) {
1034	case 0x0001: /* everything ok */
1035		ret = 0;
1036		break;
1037	case 0x0003: /* invalid request block */
1038	case 0x0007:
1039		ret = -EINVAL;
1040		break;
1041	case 0x0004: /* command not provided */
1042	case 0x0101: /* facility not provided */
1043		ret = -EOPNOTSUPP;
1044		break;
1045	default: /* something went wrong */
1046		ret = -EIO;
1047	}
1048 out:
1049	free_page((unsigned long)sda_area);
1050	return ret;
1051}
1052
1053subsys_initcall(chsc_alloc_sei_area);
1054
1055struct css_general_char css_general_characteristics;
1056struct css_chsc_char css_chsc_characteristics;
1057
1058int __init
1059chsc_determine_css_characteristics(void)
1060{
1061	int result;
1062	struct {
1063		struct chsc_header request;
1064		u32 reserved1;
1065		u32 reserved2;
1066		u32 reserved3;
1067		struct chsc_header response;
1068		u32 reserved4;
1069		u32 general_char[510];
1070		u32 chsc_char[518];
1071	} __attribute__ ((packed)) *scsc_area;
1072
1073	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1074	if (!scsc_area) {
1075	        printk(KERN_WARNING"cio: Was not able to determine available" \
1076		       "CHSCs due to no memory.\n");
1077		return -ENOMEM;
1078	}
1079
1080	scsc_area->request.length = 0x0010;
1081	scsc_area->request.code = 0x0010;
1082
1083	result = chsc(scsc_area);
1084	if (result) {
1085		printk(KERN_WARNING"cio: Was not able to determine " \
1086		       "available CHSCs, cc=%i.\n", result);
1087		result = -EIO;
1088		goto exit;
1089	}
1090
1091	if (scsc_area->response.code != 1) {
1092		printk(KERN_WARNING"cio: Was not able to determine " \
1093		       "available CHSCs.\n");
1094		result = -EIO;
1095		goto exit;
1096	}
1097	memcpy(&css_general_characteristics, scsc_area->general_char,
1098	       sizeof(css_general_characteristics));
1099	memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1100	       sizeof(css_chsc_characteristics));
1101exit:
1102	free_page ((unsigned long) scsc_area);
1103	return result;
1104}
1105
1106EXPORT_SYMBOL_GPL(css_general_characteristics);
1107EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1108