1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * Copyright IBM Corp. 1999, 2009
6 */
7
8#define KMSG_COMPONENT "dasd-fba"
9
10#include <linux/stddef.h>
11#include <linux/kernel.h>
12#include <asm/debug.h>
13
14#include <linux/slab.h>
15#include <linux/hdreg.h>	/* HDIO_GETGEO			    */
16#include <linux/bio.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/io.h>
20
21#include <asm/idals.h>
22#include <asm/ebcdic.h>
23#include <asm/ccwdev.h>
24
25#include "dasd_int.h"
26#include "dasd_fba.h"
27
28#define FBA_DEFAULT_RETRIES 32
29
30#define DASD_FBA_CCW_WRITE 0x41
31#define DASD_FBA_CCW_READ 0x42
32#define DASD_FBA_CCW_LOCATE 0x43
33#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
34
35MODULE_LICENSE("GPL");
36
37static struct dasd_discipline dasd_fba_discipline;
38static void *dasd_fba_zero_page;
39
40struct dasd_fba_private {
41	struct dasd_fba_characteristics rdc_data;
42};
43
44static struct ccw_device_id dasd_fba_ids[] = {
45	{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
46	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
47	{ /* end of list */ },
48};
49
50MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
51
52static int
53dasd_fba_set_online(struct ccw_device *cdev)
54{
55	return dasd_generic_set_online(cdev, &dasd_fba_discipline);
56}
57
58static struct ccw_driver dasd_fba_driver = {
59	.driver = {
60		.name	= "dasd-fba",
61		.owner	= THIS_MODULE,
62		.dev_groups = dasd_dev_groups,
63	},
64	.ids         = dasd_fba_ids,
65	.probe       = dasd_generic_probe,
66	.remove      = dasd_generic_remove,
67	.set_offline = dasd_generic_set_offline,
68	.set_online  = dasd_fba_set_online,
69	.notify      = dasd_generic_notify,
70	.path_event  = dasd_generic_path_event,
71	.int_class   = IRQIO_DAS,
72};
73
74static void
75define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
76	      int blksize, int beg, int nr)
77{
78	ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
79	ccw->flags = 0;
80	ccw->count = 16;
81	ccw->cda = virt_to_dma32(data);
82	memset(data, 0, sizeof (struct DE_fba_data));
83	if (rw == WRITE)
84		(data->mask).perm = 0x0;
85	else if (rw == READ)
86		(data->mask).perm = 0x1;
87	else
88		data->mask.perm = 0x2;
89	data->blk_size = blksize;
90	data->ext_loc = beg;
91	data->ext_end = nr - 1;
92}
93
94static void
95locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
96	      int block_nr, int block_ct)
97{
98	ccw->cmd_code = DASD_FBA_CCW_LOCATE;
99	ccw->flags = 0;
100	ccw->count = 8;
101	ccw->cda = virt_to_dma32(data);
102	memset(data, 0, sizeof (struct LO_fba_data));
103	if (rw == WRITE)
104		data->operation.cmd = 0x5;
105	else if (rw == READ)
106		data->operation.cmd = 0x6;
107	else
108		data->operation.cmd = 0x8;
109	data->blk_nr = block_nr;
110	data->blk_ct = block_ct;
111}
112
113static int
114dasd_fba_check_characteristics(struct dasd_device *device)
115{
116	struct dasd_fba_private *private = device->private;
117	struct ccw_device *cdev = device->cdev;
118	struct dasd_block *block;
119	int readonly, rc;
120
121	if (!private) {
122		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
123		if (!private) {
124			dev_warn(&device->cdev->dev,
125				 "Allocating memory for private DASD "
126				 "data failed\n");
127			return -ENOMEM;
128		}
129		device->private = private;
130	} else {
131		memset(private, 0, sizeof(*private));
132	}
133	block = dasd_alloc_block();
134	if (IS_ERR(block)) {
135		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
136				"dasd block structure");
137		device->private = NULL;
138		kfree(private);
139		return PTR_ERR(block);
140	}
141	device->block = block;
142	block->base = device;
143
144	/* Read Device Characteristics */
145	rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
146					 &private->rdc_data, 32);
147	if (rc) {
148		DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
149				"characteristics returned error %d", rc);
150		device->block = NULL;
151		dasd_free_block(block);
152		device->private = NULL;
153		kfree(private);
154		return rc;
155	}
156
157	device->default_expires = DASD_EXPIRES;
158	device->default_retries = FBA_DEFAULT_RETRIES;
159	dasd_path_set_opm(device, LPM_ANYPATH);
160
161	readonly = dasd_device_is_ro(device);
162	if (readonly)
163		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
164
165	/* FBA supports discard, set the according feature bit */
166	dasd_set_feature(cdev, DASD_FEATURE_DISCARD, 1);
167
168	dev_info(&device->cdev->dev,
169		 "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
170		 "and %d B/blk%s\n",
171		 cdev->id.dev_type,
172		 cdev->id.dev_model,
173		 cdev->id.cu_type,
174		 cdev->id.cu_model,
175		 ((private->rdc_data.blk_bdsa *
176		   (private->rdc_data.blk_size >> 9)) >> 11),
177		 private->rdc_data.blk_size,
178		 readonly ? ", read-only device" : "");
179	return 0;
180}
181
182static int dasd_fba_do_analysis(struct dasd_block *block)
183{
184	struct dasd_fba_private *private = block->base->private;
185	int sb, rc;
186
187	rc = dasd_check_blocksize(private->rdc_data.blk_size);
188	if (rc) {
189		DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
190			    private->rdc_data.blk_size);
191		return rc;
192	}
193	block->blocks = private->rdc_data.blk_bdsa;
194	block->bp_block = private->rdc_data.blk_size;
195	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
196	for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
197		block->s2b_shift++;
198	return 0;
199}
200
201static int dasd_fba_fill_geometry(struct dasd_block *block,
202				  struct hd_geometry *geo)
203{
204	if (dasd_check_blocksize(block->bp_block) != 0)
205		return -EINVAL;
206	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
207	geo->heads = 16;
208	geo->sectors = 128 >> block->s2b_shift;
209	return 0;
210}
211
212static dasd_erp_fn_t
213dasd_fba_erp_action(struct dasd_ccw_req * cqr)
214{
215	return dasd_default_erp_action;
216}
217
218static dasd_erp_fn_t
219dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
220{
221	if (cqr->function == dasd_default_erp_action)
222		return dasd_default_erp_postaction;
223
224	DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
225		    cqr->function);
226	return NULL;
227}
228
229static void dasd_fba_check_for_device_change(struct dasd_device *device,
230					     struct dasd_ccw_req *cqr,
231					     struct irb *irb)
232{
233	char mask;
234
235	/* first of all check for state change pending interrupt */
236	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
237	if ((irb->scsw.cmd.dstat & mask) == mask)
238		dasd_generic_handle_state_change(device);
239};
240
241
242/*
243 * Builds a CCW with no data payload
244 */
245static void ccw_write_no_data(struct ccw1 *ccw)
246{
247	ccw->cmd_code = DASD_FBA_CCW_WRITE;
248	ccw->flags |= CCW_FLAG_SLI;
249	ccw->count = 0;
250}
251
252/*
253 * Builds a CCW that writes only zeroes.
254 */
255static void ccw_write_zero(struct ccw1 *ccw, int count)
256{
257	ccw->cmd_code = DASD_FBA_CCW_WRITE;
258	ccw->flags |= CCW_FLAG_SLI;
259	ccw->count = count;
260	ccw->cda = virt_to_dma32(dasd_fba_zero_page);
261}
262
263/*
264 * Helper function to count the amount of necessary CCWs within a given range
265 * with 4k alignment and command chaining in mind.
266 */
267static int count_ccws(sector_t first_rec, sector_t last_rec,
268		      unsigned int blocks_per_page)
269{
270	sector_t wz_stop = 0, d_stop = 0;
271	int cur_pos = 0;
272	int count = 0;
273
274	if (first_rec % blocks_per_page != 0) {
275		wz_stop = first_rec + blocks_per_page -
276			(first_rec % blocks_per_page) - 1;
277		if (wz_stop > last_rec)
278			wz_stop = last_rec;
279		cur_pos = wz_stop - first_rec + 1;
280		count++;
281	}
282
283	if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
284		if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
285			d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
286					     blocks_per_page);
287		else
288			d_stop = last_rec;
289
290		cur_pos += d_stop - (first_rec + cur_pos) + 1;
291		count++;
292	}
293
294	if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec)
295		count++;
296
297	return count;
298}
299
300/*
301 * This function builds a CCW request for block layer discard requests.
302 * Each page in the z/VM hypervisor that represents certain records of an FBA
303 * device will be padded with zeros. This is a special behaviour of the WRITE
304 * command which is triggered when no data payload is added to the CCW.
305 *
306 * Note: Due to issues in some z/VM versions, we can't fully utilise this
307 * special behaviour. We have to keep a 4k (or 8 block) alignment in mind to
308 * work around those issues and write actual zeroes to the unaligned parts in
309 * the request. This workaround might be removed in the future.
310 */
311static struct dasd_ccw_req *dasd_fba_build_cp_discard(
312						struct dasd_device *memdev,
313						struct dasd_block *block,
314						struct request *req)
315{
316	struct LO_fba_data *LO_data;
317	struct dasd_ccw_req *cqr;
318	struct ccw1 *ccw;
319
320	sector_t wz_stop = 0, d_stop = 0;
321	sector_t first_rec, last_rec;
322
323	unsigned int blksize = block->bp_block;
324	unsigned int blocks_per_page;
325	int wz_count = 0;
326	int d_count = 0;
327	int cur_pos = 0; /* Current position within the extent */
328	int count = 0;
329	int cplength;
330	int datasize;
331	int nr_ccws;
332
333	first_rec = blk_rq_pos(req) >> block->s2b_shift;
334	last_rec =
335		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
336	count = last_rec - first_rec + 1;
337
338	blocks_per_page = BLOCKS_PER_PAGE(blksize);
339	nr_ccws = count_ccws(first_rec, last_rec, blocks_per_page);
340
341	/* define extent + nr_ccws * locate record + nr_ccws * single CCW */
342	cplength = 1 + 2 * nr_ccws;
343	datasize = sizeof(struct DE_fba_data) +
344		nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
345
346	cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
347				   blk_mq_rq_to_pdu(req));
348	if (IS_ERR(cqr))
349		return cqr;
350
351	ccw = cqr->cpaddr;
352
353	define_extent(ccw++, cqr->data, WRITE, blksize, first_rec, count);
354	LO_data = cqr->data + sizeof(struct DE_fba_data);
355
356	/* First part is not aligned. Calculate range to write zeroes. */
357	if (first_rec % blocks_per_page != 0) {
358		wz_stop = first_rec + blocks_per_page -
359			(first_rec % blocks_per_page) - 1;
360		if (wz_stop > last_rec)
361			wz_stop = last_rec;
362		wz_count = wz_stop - first_rec + 1;
363
364		ccw[-1].flags |= CCW_FLAG_CC;
365		locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
366
367		ccw[-1].flags |= CCW_FLAG_CC;
368		ccw_write_zero(ccw++, wz_count * blksize);
369
370		cur_pos = wz_count;
371	}
372
373	/* We can do proper discard when we've got at least blocks_per_page blocks. */
374	if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
375		/* is last record at page boundary? */
376		if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
377			d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
378					     blocks_per_page);
379		else
380			d_stop = last_rec;
381
382		d_count = d_stop - (first_rec + cur_pos) + 1;
383
384		ccw[-1].flags |= CCW_FLAG_CC;
385		locate_record(ccw++, LO_data++, WRITE, cur_pos, d_count);
386
387		ccw[-1].flags |= CCW_FLAG_CC;
388		ccw_write_no_data(ccw++);
389
390		cur_pos += d_count;
391	}
392
393	/* We might still have some bits left which need to be zeroed. */
394	if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec) {
395		if (d_stop != 0)
396			wz_count = last_rec - d_stop;
397		else if (wz_stop != 0)
398			wz_count = last_rec - wz_stop;
399		else
400			wz_count = count;
401
402		ccw[-1].flags |= CCW_FLAG_CC;
403		locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
404
405		ccw[-1].flags |= CCW_FLAG_CC;
406		ccw_write_zero(ccw++, wz_count * blksize);
407	}
408
409	if (blk_noretry_request(req) ||
410	    block->base->features & DASD_FEATURE_FAILFAST)
411		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
412
413	cqr->startdev = memdev;
414	cqr->memdev = memdev;
415	cqr->block = block;
416	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
417	cqr->retries = memdev->default_retries;
418	cqr->buildclk = get_tod_clock();
419	cqr->status = DASD_CQR_FILLED;
420
421	return cqr;
422}
423
424static struct dasd_ccw_req *dasd_fba_build_cp_regular(
425						struct dasd_device *memdev,
426						struct dasd_block *block,
427						struct request *req)
428{
429	struct dasd_fba_private *private = block->base->private;
430	dma64_t *idaws;
431	struct LO_fba_data *LO_data;
432	struct dasd_ccw_req *cqr;
433	struct ccw1 *ccw;
434	struct req_iterator iter;
435	struct bio_vec bv;
436	char *dst;
437	int count, cidaw, cplength, datasize;
438	sector_t recid, first_rec, last_rec;
439	unsigned int blksize, off;
440	unsigned char cmd;
441
442	if (rq_data_dir(req) == READ) {
443		cmd = DASD_FBA_CCW_READ;
444	} else if (rq_data_dir(req) == WRITE) {
445		cmd = DASD_FBA_CCW_WRITE;
446	} else
447		return ERR_PTR(-EINVAL);
448	blksize = block->bp_block;
449	/* Calculate record id of first and last block. */
450	first_rec = blk_rq_pos(req) >> block->s2b_shift;
451	last_rec =
452		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
453	/* Check struct bio and count the number of blocks for the request. */
454	count = 0;
455	cidaw = 0;
456	rq_for_each_segment(bv, req, iter) {
457		if (bv.bv_len & (blksize - 1))
458			/* Fba can only do full blocks. */
459			return ERR_PTR(-EINVAL);
460		count += bv.bv_len >> (block->s2b_shift + 9);
461		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
462			cidaw += bv.bv_len / blksize;
463	}
464	/* Paranoia. */
465	if (count != last_rec - first_rec + 1)
466		return ERR_PTR(-EINVAL);
467	/* 1x define extent + 1x locate record + number of blocks */
468	cplength = 2 + count;
469	/* 1x define extent + 1x locate record */
470	datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
471		cidaw * sizeof(unsigned long);
472	/*
473	 * Find out number of additional locate record ccws if the device
474	 * can't do data chaining.
475	 */
476	if (private->rdc_data.mode.bits.data_chain == 0) {
477		cplength += count - 1;
478		datasize += (count - 1)*sizeof(struct LO_fba_data);
479	}
480	/* Allocate the ccw request. */
481	cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
482				   blk_mq_rq_to_pdu(req));
483	if (IS_ERR(cqr))
484		return cqr;
485	ccw = cqr->cpaddr;
486	/* First ccw is define extent. */
487	define_extent(ccw++, cqr->data, rq_data_dir(req),
488		      block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
489	/* Build locate_record + read/write ccws. */
490	idaws = (dma64_t *)(cqr->data + sizeof(struct DE_fba_data));
491	LO_data = (struct LO_fba_data *) (idaws + cidaw);
492	/* Locate record for all blocks for smart devices. */
493	if (private->rdc_data.mode.bits.data_chain != 0) {
494		ccw[-1].flags |= CCW_FLAG_CC;
495		locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
496	}
497	recid = first_rec;
498	rq_for_each_segment(bv, req, iter) {
499		dst = bvec_virt(&bv);
500		if (dasd_page_cache) {
501			char *copy = kmem_cache_alloc(dasd_page_cache,
502						      GFP_DMA | __GFP_NOWARN);
503			if (copy && rq_data_dir(req) == WRITE)
504				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
505			if (copy)
506				dst = copy + bv.bv_offset;
507		}
508		for (off = 0; off < bv.bv_len; off += blksize) {
509			/* Locate record for stupid devices. */
510			if (private->rdc_data.mode.bits.data_chain == 0) {
511				ccw[-1].flags |= CCW_FLAG_CC;
512				locate_record(ccw, LO_data++,
513					      rq_data_dir(req),
514					      recid - first_rec, 1);
515				ccw->flags = CCW_FLAG_CC;
516				ccw++;
517			} else {
518				if (recid > first_rec)
519					ccw[-1].flags |= CCW_FLAG_DC;
520				else
521					ccw[-1].flags |= CCW_FLAG_CC;
522			}
523			ccw->cmd_code = cmd;
524			ccw->count = block->bp_block;
525			if (idal_is_needed(dst, blksize)) {
526				ccw->cda = virt_to_dma32(idaws);
527				ccw->flags = CCW_FLAG_IDA;
528				idaws = idal_create_words(idaws, dst, blksize);
529			} else {
530				ccw->cda = virt_to_dma32(dst);
531				ccw->flags = 0;
532			}
533			ccw++;
534			dst += blksize;
535			recid++;
536		}
537	}
538	if (blk_noretry_request(req) ||
539	    block->base->features & DASD_FEATURE_FAILFAST)
540		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
541	cqr->startdev = memdev;
542	cqr->memdev = memdev;
543	cqr->block = block;
544	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
545	cqr->retries = memdev->default_retries;
546	cqr->buildclk = get_tod_clock();
547	cqr->status = DASD_CQR_FILLED;
548	return cqr;
549}
550
551static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device *memdev,
552					      struct dasd_block *block,
553					      struct request *req)
554{
555	if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES)
556		return dasd_fba_build_cp_discard(memdev, block, req);
557	else
558		return dasd_fba_build_cp_regular(memdev, block, req);
559}
560
561static int
562dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
563{
564	struct dasd_fba_private *private = cqr->block->base->private;
565	struct ccw1 *ccw;
566	struct req_iterator iter;
567	struct bio_vec bv;
568	char *dst, *cda;
569	unsigned int blksize, off;
570	int status;
571
572	if (!dasd_page_cache)
573		goto out;
574	blksize = cqr->block->bp_block;
575	ccw = cqr->cpaddr;
576	/* Skip over define extent & locate record. */
577	ccw++;
578	if (private->rdc_data.mode.bits.data_chain != 0)
579		ccw++;
580	rq_for_each_segment(bv, req, iter) {
581		dst = bvec_virt(&bv);
582		for (off = 0; off < bv.bv_len; off += blksize) {
583			/* Skip locate record. */
584			if (private->rdc_data.mode.bits.data_chain == 0)
585				ccw++;
586			if (dst) {
587				if (ccw->flags & CCW_FLAG_IDA)
588					cda = *((char **)dma32_to_virt(ccw->cda));
589				else
590					cda = dma32_to_virt(ccw->cda);
591				if (dst != cda) {
592					if (rq_data_dir(req) == READ)
593						memcpy(dst, cda, bv.bv_len);
594					kmem_cache_free(dasd_page_cache,
595					    (void *)((addr_t)cda & PAGE_MASK));
596				}
597				dst = NULL;
598			}
599			ccw++;
600		}
601	}
602out:
603	status = cqr->status == DASD_CQR_DONE;
604	dasd_sfree_request(cqr, cqr->memdev);
605	return status;
606}
607
608static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
609{
610	if (cqr->retries < 0)
611		cqr->status = DASD_CQR_FAILED;
612	else
613		cqr->status = DASD_CQR_FILLED;
614};
615
616static int
617dasd_fba_fill_info(struct dasd_device * device,
618		   struct dasd_information2_t * info)
619{
620	struct dasd_fba_private *private = device->private;
621
622	info->label_block = 1;
623	info->FBA_layout = 1;
624	info->format = DASD_FORMAT_LDL;
625	info->characteristics_size = sizeof(private->rdc_data);
626	memcpy(info->characteristics, &private->rdc_data,
627	       sizeof(private->rdc_data));
628	info->confdata_size = 0;
629	return 0;
630}
631
632static void
633dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
634			char *reason)
635{
636	u64 *sense;
637
638	sense = (u64 *) dasd_get_sense(irb);
639	if (sense) {
640		DBF_DEV_EVENT(DBF_EMERG, device,
641			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
642			      "%016llx", reason,
643			      scsw_is_tm(&irb->scsw) ? "t" : "c",
644			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
645			      scsw_dstat(&irb->scsw), sense[0], sense[1],
646			      sense[2], sense[3]);
647	} else {
648		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
649			      "SORRY - NO VALID SENSE AVAILABLE\n");
650	}
651}
652
653
654static void
655dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
656		    struct irb *irb)
657{
658	struct ccw1 *act, *end, *last;
659	int len, sl, sct, count;
660	struct device *dev;
661	char *page;
662
663	dev = &device->cdev->dev;
664
665	page = (char *) get_zeroed_page(GFP_ATOMIC);
666	if (page == NULL) {
667		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
668			      "No memory to dump sense data");
669		return;
670	}
671	len = sprintf(page, "I/O status report:\n");
672	len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n",
673		       req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
674	len += sprintf(page + len, "Failing CCW: %px\n",
675		       (void *)(u64)dma32_to_u32(irb->scsw.cmd.cpa));
676	if (irb->esw.esw0.erw.cons) {
677		for (sl = 0; sl < 4; sl++) {
678			len += sprintf(page + len, "Sense(hex) %2d-%2d:",
679				       (8 * sl), ((8 * sl) + 7));
680
681			for (sct = 0; sct < 8; sct++) {
682				len += sprintf(page + len, " %02x",
683					       irb->ecw[8 * sl + sct]);
684			}
685			len += sprintf(page + len, "\n");
686		}
687	} else {
688		len += sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
689	}
690	dev_err(dev, "%s", page);
691
692	/* dump the Channel Program */
693	/* print first CCWs (maximum 8) */
694	act = req->cpaddr;
695	for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
696	end = min(act + 8, last);
697	len = sprintf(page, "Related CP in req: %px\n", req);
698	while (act <= end) {
699		len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
700			       act, ((int *) act)[0], ((int *) act)[1]);
701		for (count = 0; count < 32 && count < act->count;
702		     count += sizeof(int))
703			len += sprintf(page + len, " %08X",
704				       ((int *)dma32_to_virt(act->cda))
705				       [(count>>2)]);
706		len += sprintf(page + len, "\n");
707		act++;
708	}
709	dev_err(dev, "%s", page);
710
711	/* print failing CCW area */
712	len = 0;
713	if (act < ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2) {
714		act = ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2;
715		len += sprintf(page + len, "......\n");
716	}
717	end = min((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) + 2, last);
718	while (act <= end) {
719		len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
720			       act, ((int *) act)[0], ((int *) act)[1]);
721		for (count = 0; count < 32 && count < act->count;
722		     count += sizeof(int))
723			len += sprintf(page + len, " %08X",
724				       ((int *)dma32_to_virt(act->cda))
725				       [(count>>2)]);
726		len += sprintf(page + len, "\n");
727		act++;
728	}
729
730	/* print last CCWs */
731	if (act <  last - 2) {
732		act = last - 2;
733		len += sprintf(page + len, "......\n");
734	}
735	while (act <= last) {
736		len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
737			       act, ((int *) act)[0], ((int *) act)[1]);
738		for (count = 0; count < 32 && count < act->count;
739		     count += sizeof(int))
740			len += sprintf(page + len, " %08X",
741				       ((int *)dma32_to_virt(act->cda))
742				       [(count>>2)]);
743		len += sprintf(page + len, "\n");
744		act++;
745	}
746	if (len > 0)
747		dev_err(dev, "%s", page);
748	free_page((unsigned long) page);
749}
750
751static unsigned int dasd_fba_max_sectors(struct dasd_block *block)
752{
753	return DASD_FBA_MAX_BLOCKS << block->s2b_shift;
754}
755
756static int dasd_fba_pe_handler(struct dasd_device *device,
757			       __u8 tbvpm, __u8 fcsecpm)
758{
759	return dasd_generic_verify_path(device, tbvpm);
760}
761
762static struct dasd_discipline dasd_fba_discipline = {
763	.owner = THIS_MODULE,
764	.name = "FBA ",
765	.ebcname = "FBA ",
766	.has_discard = true,
767	.check_device = dasd_fba_check_characteristics,
768	.do_analysis = dasd_fba_do_analysis,
769	.pe_handler = dasd_fba_pe_handler,
770	.max_sectors = dasd_fba_max_sectors,
771	.fill_geometry = dasd_fba_fill_geometry,
772	.start_IO = dasd_start_IO,
773	.term_IO = dasd_term_IO,
774	.handle_terminated_request = dasd_fba_handle_terminated_request,
775	.erp_action = dasd_fba_erp_action,
776	.erp_postaction = dasd_fba_erp_postaction,
777	.check_for_device_change = dasd_fba_check_for_device_change,
778	.build_cp = dasd_fba_build_cp,
779	.free_cp = dasd_fba_free_cp,
780	.dump_sense = dasd_fba_dump_sense,
781	.dump_sense_dbf = dasd_fba_dump_sense_dbf,
782	.fill_info = dasd_fba_fill_info,
783};
784
785static int __init
786dasd_fba_init(void)
787{
788	int ret;
789
790	ASCEBC(dasd_fba_discipline.ebcname, 4);
791
792	dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
793	if (!dasd_fba_zero_page)
794		return -ENOMEM;
795
796	ret = ccw_driver_register(&dasd_fba_driver);
797	if (!ret)
798		wait_for_device_probe();
799
800	return ret;
801}
802
803static void __exit
804dasd_fba_cleanup(void)
805{
806	ccw_driver_unregister(&dasd_fba_driver);
807	free_page((unsigned long)dasd_fba_zero_page);
808}
809
810module_init(dasd_fba_init);
811module_exit(dasd_fba_cleanup);
812