• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/block/
1/* sunvdc.c: Sun LDOM Virtual Disk Client.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/blkdev.h>
10#include <linux/hdreg.h>
11#include <linux/genhd.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/list.h>
18#include <linux/scatterlist.h>
19
20#include <asm/vio.h>
21#include <asm/ldc.h>
22
23#define DRV_MODULE_NAME		"sunvdc"
24#define PFX DRV_MODULE_NAME	": "
25#define DRV_MODULE_VERSION	"1.0"
26#define DRV_MODULE_RELDATE	"June 25, 2007"
27
28static char version[] __devinitdata =
29	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
30MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
31MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
32MODULE_LICENSE("GPL");
33MODULE_VERSION(DRV_MODULE_VERSION);
34
35#define VDC_TX_RING_SIZE	256
36
37#define WAITING_FOR_LINK_UP	0x01
38#define WAITING_FOR_TX_SPACE	0x02
39#define WAITING_FOR_GEN_CMD	0x04
40#define WAITING_FOR_ANY		-1
41
42struct vdc_req_entry {
43	struct request		*req;
44};
45
46struct vdc_port {
47	struct vio_driver_state	vio;
48
49	struct gendisk		*disk;
50
51	struct vdc_completion	*cmp;
52
53	u64			req_id;
54	u64			seq;
55	struct vdc_req_entry	rq_arr[VDC_TX_RING_SIZE];
56
57	unsigned long		ring_cookies;
58
59	u64			max_xfer_size;
60	u32			vdisk_block_size;
61
62	/* The server fills these in for us in the disk attribute
63	 * ACK packet.
64	 */
65	u64			operations;
66	u32			vdisk_size;
67	u8			vdisk_type;
68
69	char			disk_name[32];
70
71	struct vio_disk_geom	geom;
72	struct vio_disk_vtoc	label;
73};
74
75static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
76{
77	return container_of(vio, struct vdc_port, vio);
78}
79
80/* Ordered from largest major to lowest */
81static struct vio_version vdc_versions[] = {
82	{ .major = 1, .minor = 0 },
83};
84
85#define VDCBLK_NAME	"vdisk"
86static int vdc_major;
87#define PARTITION_SHIFT	3
88
89static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
90{
91	return vio_dring_avail(dr, VDC_TX_RING_SIZE);
92}
93
94static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
95{
96	struct gendisk *disk = bdev->bd_disk;
97	struct vdc_port *port = disk->private_data;
98
99	geo->heads = (u8) port->geom.num_hd;
100	geo->sectors = (u8) port->geom.num_sec;
101	geo->cylinders = port->geom.num_cyl;
102
103	return 0;
104}
105
106static const struct block_device_operations vdc_fops = {
107	.owner		= THIS_MODULE,
108	.getgeo		= vdc_getgeo,
109};
110
111static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
112{
113	if (vio->cmp &&
114	    (waiting_for == -1 ||
115	     vio->cmp->waiting_for == waiting_for)) {
116		vio->cmp->err = err;
117		complete(&vio->cmp->com);
118		vio->cmp = NULL;
119	}
120}
121
122static void vdc_handshake_complete(struct vio_driver_state *vio)
123{
124	vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
125}
126
127static int vdc_handle_unknown(struct vdc_port *port, void *arg)
128{
129	struct vio_msg_tag *pkt = arg;
130
131	printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
132	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
133	printk(KERN_ERR PFX "Resetting connection.\n");
134
135	ldc_disconnect(port->vio.lp);
136
137	return -ECONNRESET;
138}
139
140static int vdc_send_attr(struct vio_driver_state *vio)
141{
142	struct vdc_port *port = to_vdc_port(vio);
143	struct vio_disk_attr_info pkt;
144
145	memset(&pkt, 0, sizeof(pkt));
146
147	pkt.tag.type = VIO_TYPE_CTRL;
148	pkt.tag.stype = VIO_SUBTYPE_INFO;
149	pkt.tag.stype_env = VIO_ATTR_INFO;
150	pkt.tag.sid = vio_send_sid(vio);
151
152	pkt.xfer_mode = VIO_DRING_MODE;
153	pkt.vdisk_block_size = port->vdisk_block_size;
154	pkt.max_xfer_size = port->max_xfer_size;
155
156	viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
157	       pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
158
159	return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
160}
161
162static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
163{
164	struct vdc_port *port = to_vdc_port(vio);
165	struct vio_disk_attr_info *pkt = arg;
166
167	viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
168	       "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
169	       pkt->tag.stype, pkt->operations,
170	       pkt->vdisk_size, pkt->vdisk_type,
171	       pkt->xfer_mode, pkt->vdisk_block_size,
172	       pkt->max_xfer_size);
173
174	if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
175		switch (pkt->vdisk_type) {
176		case VD_DISK_TYPE_DISK:
177		case VD_DISK_TYPE_SLICE:
178			break;
179
180		default:
181			printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
182			       vio->name, pkt->vdisk_type);
183			return -ECONNRESET;
184		}
185
186		if (pkt->vdisk_block_size > port->vdisk_block_size) {
187			printk(KERN_ERR PFX "%s: BLOCK size increased "
188			       "%u --> %u\n",
189			       vio->name,
190			       port->vdisk_block_size, pkt->vdisk_block_size);
191			return -ECONNRESET;
192		}
193
194		port->operations = pkt->operations;
195		port->vdisk_size = pkt->vdisk_size;
196		port->vdisk_type = pkt->vdisk_type;
197		if (pkt->max_xfer_size < port->max_xfer_size)
198			port->max_xfer_size = pkt->max_xfer_size;
199		port->vdisk_block_size = pkt->vdisk_block_size;
200		return 0;
201	} else {
202		printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
203
204		return -ECONNRESET;
205	}
206}
207
208static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
209{
210	int err = desc->status;
211
212	vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213}
214
215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
216			unsigned int index)
217{
218	struct vio_disk_desc *desc = vio_dring_entry(dr, index);
219	struct vdc_req_entry *rqe = &port->rq_arr[index];
220	struct request *req;
221
222	if (unlikely(desc->hdr.state != VIO_DESC_DONE))
223		return;
224
225	ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
226	desc->hdr.state = VIO_DESC_FREE;
227	dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
228
229	req = rqe->req;
230	if (req == NULL) {
231		vdc_end_special(port, desc);
232		return;
233	}
234
235	rqe->req = NULL;
236
237	__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
238
239	if (blk_queue_stopped(port->disk->queue))
240		blk_start_queue(port->disk->queue);
241}
242
243static int vdc_ack(struct vdc_port *port, void *msgbuf)
244{
245	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
246	struct vio_dring_data *pkt = msgbuf;
247
248	if (unlikely(pkt->dring_ident != dr->ident ||
249		     pkt->start_idx != pkt->end_idx ||
250		     pkt->start_idx >= VDC_TX_RING_SIZE))
251		return 0;
252
253	vdc_end_one(port, dr, pkt->start_idx);
254
255	return 0;
256}
257
258static int vdc_nack(struct vdc_port *port, void *msgbuf)
259{
260	return 0;
261}
262
263static void vdc_event(void *arg, int event)
264{
265	struct vdc_port *port = arg;
266	struct vio_driver_state *vio = &port->vio;
267	unsigned long flags;
268	int err;
269
270	spin_lock_irqsave(&vio->lock, flags);
271
272	if (unlikely(event == LDC_EVENT_RESET ||
273		     event == LDC_EVENT_UP)) {
274		vio_link_state_change(vio, event);
275		spin_unlock_irqrestore(&vio->lock, flags);
276		return;
277	}
278
279	if (unlikely(event != LDC_EVENT_DATA_READY)) {
280		printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
281		spin_unlock_irqrestore(&vio->lock, flags);
282		return;
283	}
284
285	err = 0;
286	while (1) {
287		union {
288			struct vio_msg_tag tag;
289			u64 raw[8];
290		} msgbuf;
291
292		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
293		if (unlikely(err < 0)) {
294			if (err == -ECONNRESET)
295				vio_conn_reset(vio);
296			break;
297		}
298		if (err == 0)
299			break;
300		viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
301		       msgbuf.tag.type,
302		       msgbuf.tag.stype,
303		       msgbuf.tag.stype_env,
304		       msgbuf.tag.sid);
305		err = vio_validate_sid(vio, &msgbuf.tag);
306		if (err < 0)
307			break;
308
309		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
310			if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
311				err = vdc_ack(port, &msgbuf);
312			else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
313				err = vdc_nack(port, &msgbuf);
314			else
315				err = vdc_handle_unknown(port, &msgbuf);
316		} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
317			err = vio_control_pkt_engine(vio, &msgbuf);
318		} else {
319			err = vdc_handle_unknown(port, &msgbuf);
320		}
321		if (err < 0)
322			break;
323	}
324	if (err < 0)
325		vdc_finish(&port->vio, err, WAITING_FOR_ANY);
326	spin_unlock_irqrestore(&vio->lock, flags);
327}
328
329static int __vdc_tx_trigger(struct vdc_port *port)
330{
331	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
332	struct vio_dring_data hdr = {
333		.tag = {
334			.type		= VIO_TYPE_DATA,
335			.stype		= VIO_SUBTYPE_INFO,
336			.stype_env	= VIO_DRING_DATA,
337			.sid		= vio_send_sid(&port->vio),
338		},
339		.dring_ident		= dr->ident,
340		.start_idx		= dr->prod,
341		.end_idx		= dr->prod,
342	};
343	int err, delay;
344
345	hdr.seq = dr->snd_nxt;
346	delay = 1;
347	do {
348		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
349		if (err > 0) {
350			dr->snd_nxt++;
351			break;
352		}
353		udelay(delay);
354		if ((delay <<= 1) > 128)
355			delay = 128;
356	} while (err == -EAGAIN);
357
358	return err;
359}
360
361static int __send_request(struct request *req)
362{
363	struct vdc_port *port = req->rq_disk->private_data;
364	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
365	struct scatterlist sg[port->ring_cookies];
366	struct vdc_req_entry *rqe;
367	struct vio_disk_desc *desc;
368	unsigned int map_perm;
369	int nsg, err, i;
370	u64 len;
371	u8 op;
372
373	map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
374
375	if (rq_data_dir(req) == READ) {
376		map_perm |= LDC_MAP_W;
377		op = VD_OP_BREAD;
378	} else {
379		map_perm |= LDC_MAP_R;
380		op = VD_OP_BWRITE;
381	}
382
383	sg_init_table(sg, port->ring_cookies);
384	nsg = blk_rq_map_sg(req->q, req, sg);
385
386	len = 0;
387	for (i = 0; i < nsg; i++)
388		len += sg[i].length;
389
390	if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
391		blk_stop_queue(port->disk->queue);
392		err = -ENOMEM;
393		goto out;
394	}
395
396	desc = vio_dring_cur(dr);
397
398	err = ldc_map_sg(port->vio.lp, sg, nsg,
399			 desc->cookies, port->ring_cookies,
400			 map_perm);
401	if (err < 0) {
402		printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
403		return err;
404	}
405
406	rqe = &port->rq_arr[dr->prod];
407	rqe->req = req;
408
409	desc->hdr.ack = VIO_ACK_ENABLE;
410	desc->req_id = port->req_id;
411	desc->operation = op;
412	if (port->vdisk_type == VD_DISK_TYPE_DISK) {
413		desc->slice = 0xff;
414	} else {
415		desc->slice = 0;
416	}
417	desc->status = ~0;
418	desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
419	desc->size = len;
420	desc->ncookies = err;
421
422	/* This has to be a non-SMP write barrier because we are writing
423	 * to memory which is shared with the peer LDOM.
424	 */
425	wmb();
426	desc->hdr.state = VIO_DESC_READY;
427
428	err = __vdc_tx_trigger(port);
429	if (err < 0) {
430		printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
431	} else {
432		port->req_id++;
433		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
434	}
435out:
436
437	return err;
438}
439
440static void do_vdc_request(struct request_queue *q)
441{
442	while (1) {
443		struct request *req = blk_fetch_request(q);
444
445		if (!req)
446			break;
447
448		if (__send_request(req) < 0)
449			__blk_end_request_all(req, -EIO);
450	}
451}
452
453static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
454{
455	struct vio_dring_state *dr;
456	struct vio_completion comp;
457	struct vio_disk_desc *desc;
458	unsigned int map_perm;
459	unsigned long flags;
460	int op_len, err;
461	void *req_buf;
462
463	if (!(((u64)1 << ((u64)op - 1)) & port->operations))
464		return -EOPNOTSUPP;
465
466	switch (op) {
467	case VD_OP_BREAD:
468	case VD_OP_BWRITE:
469	default:
470		return -EINVAL;
471
472	case VD_OP_FLUSH:
473		op_len = 0;
474		map_perm = 0;
475		break;
476
477	case VD_OP_GET_WCE:
478		op_len = sizeof(u32);
479		map_perm = LDC_MAP_W;
480		break;
481
482	case VD_OP_SET_WCE:
483		op_len = sizeof(u32);
484		map_perm = LDC_MAP_R;
485		break;
486
487	case VD_OP_GET_VTOC:
488		op_len = sizeof(struct vio_disk_vtoc);
489		map_perm = LDC_MAP_W;
490		break;
491
492	case VD_OP_SET_VTOC:
493		op_len = sizeof(struct vio_disk_vtoc);
494		map_perm = LDC_MAP_R;
495		break;
496
497	case VD_OP_GET_DISKGEOM:
498		op_len = sizeof(struct vio_disk_geom);
499		map_perm = LDC_MAP_W;
500		break;
501
502	case VD_OP_SET_DISKGEOM:
503		op_len = sizeof(struct vio_disk_geom);
504		map_perm = LDC_MAP_R;
505		break;
506
507	case VD_OP_SCSICMD:
508		op_len = 16;
509		map_perm = LDC_MAP_RW;
510		break;
511
512	case VD_OP_GET_DEVID:
513		op_len = sizeof(struct vio_disk_devid);
514		map_perm = LDC_MAP_W;
515		break;
516
517	case VD_OP_GET_EFI:
518	case VD_OP_SET_EFI:
519		return -EOPNOTSUPP;
520		break;
521	};
522
523	map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
524
525	op_len = (op_len + 7) & ~7;
526	req_buf = kzalloc(op_len, GFP_KERNEL);
527	if (!req_buf)
528		return -ENOMEM;
529
530	if (len > op_len)
531		len = op_len;
532
533	if (map_perm & LDC_MAP_R)
534		memcpy(req_buf, buf, len);
535
536	spin_lock_irqsave(&port->vio.lock, flags);
537
538	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
539
540	desc = vio_dring_cur(dr);
541
542	err = ldc_map_single(port->vio.lp, req_buf, op_len,
543			     desc->cookies, port->ring_cookies,
544			     map_perm);
545	if (err < 0) {
546		spin_unlock_irqrestore(&port->vio.lock, flags);
547		kfree(req_buf);
548		return err;
549	}
550
551	init_completion(&comp.com);
552	comp.waiting_for = WAITING_FOR_GEN_CMD;
553	port->vio.cmp = &comp;
554
555	desc->hdr.ack = VIO_ACK_ENABLE;
556	desc->req_id = port->req_id;
557	desc->operation = op;
558	desc->slice = 0;
559	desc->status = ~0;
560	desc->offset = 0;
561	desc->size = op_len;
562	desc->ncookies = err;
563
564	/* This has to be a non-SMP write barrier because we are writing
565	 * to memory which is shared with the peer LDOM.
566	 */
567	wmb();
568	desc->hdr.state = VIO_DESC_READY;
569
570	err = __vdc_tx_trigger(port);
571	if (err >= 0) {
572		port->req_id++;
573		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
574		spin_unlock_irqrestore(&port->vio.lock, flags);
575
576		wait_for_completion(&comp.com);
577		err = comp.err;
578	} else {
579		port->vio.cmp = NULL;
580		spin_unlock_irqrestore(&port->vio.lock, flags);
581	}
582
583	if (map_perm & LDC_MAP_W)
584		memcpy(buf, req_buf, len);
585
586	kfree(req_buf);
587
588	return err;
589}
590
591static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
592{
593	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
594	unsigned long len, entry_size;
595	int ncookies;
596	void *dring;
597
598	entry_size = sizeof(struct vio_disk_desc) +
599		(sizeof(struct ldc_trans_cookie) * port->ring_cookies);
600	len = (VDC_TX_RING_SIZE * entry_size);
601
602	ncookies = VIO_MAX_RING_COOKIES;
603	dring = ldc_alloc_exp_dring(port->vio.lp, len,
604				    dr->cookies, &ncookies,
605				    (LDC_MAP_SHADOW |
606				     LDC_MAP_DIRECT |
607				     LDC_MAP_RW));
608	if (IS_ERR(dring))
609		return PTR_ERR(dring);
610
611	dr->base = dring;
612	dr->entry_size = entry_size;
613	dr->num_entries = VDC_TX_RING_SIZE;
614	dr->prod = dr->cons = 0;
615	dr->pending = VDC_TX_RING_SIZE;
616	dr->ncookies = ncookies;
617
618	return 0;
619}
620
621static void vdc_free_tx_ring(struct vdc_port *port)
622{
623	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
624
625	if (dr->base) {
626		ldc_free_exp_dring(port->vio.lp, dr->base,
627				   (dr->entry_size * dr->num_entries),
628				   dr->cookies, dr->ncookies);
629		dr->base = NULL;
630		dr->entry_size = 0;
631		dr->num_entries = 0;
632		dr->pending = 0;
633		dr->ncookies = 0;
634	}
635}
636
637static int probe_disk(struct vdc_port *port)
638{
639	struct vio_completion comp;
640	struct request_queue *q;
641	struct gendisk *g;
642	int err;
643
644	init_completion(&comp.com);
645	comp.err = 0;
646	comp.waiting_for = WAITING_FOR_LINK_UP;
647	port->vio.cmp = &comp;
648
649	vio_port_up(&port->vio);
650
651	wait_for_completion(&comp.com);
652	if (comp.err)
653		return comp.err;
654
655	err = generic_request(port, VD_OP_GET_VTOC,
656			      &port->label, sizeof(port->label));
657	if (err < 0) {
658		printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
659		return err;
660	}
661
662	err = generic_request(port, VD_OP_GET_DISKGEOM,
663			      &port->geom, sizeof(port->geom));
664	if (err < 0) {
665		printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
666		       "error %d\n", err);
667		return err;
668	}
669
670	port->vdisk_size = ((u64)port->geom.num_cyl *
671			    (u64)port->geom.num_hd *
672			    (u64)port->geom.num_sec);
673
674	q = blk_init_queue(do_vdc_request, &port->vio.lock);
675	if (!q) {
676		printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
677		       port->vio.name);
678		return -ENOMEM;
679	}
680	g = alloc_disk(1 << PARTITION_SHIFT);
681	if (!g) {
682		printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
683		       port->vio.name);
684		blk_cleanup_queue(q);
685		return -ENOMEM;
686	}
687
688	port->disk = g;
689
690	blk_queue_max_segments(q, port->ring_cookies);
691	blk_queue_max_hw_sectors(q, port->max_xfer_size);
692	g->major = vdc_major;
693	g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
694	strcpy(g->disk_name, port->disk_name);
695
696	g->fops = &vdc_fops;
697	g->queue = q;
698	g->private_data = port;
699	g->driverfs_dev = &port->vio.vdev->dev;
700
701	set_capacity(g, port->vdisk_size);
702
703	printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
704	       g->disk_name,
705	       port->vdisk_size, (port->vdisk_size >> (20 - 9)));
706
707	add_disk(g);
708
709	return 0;
710}
711
712static struct ldc_channel_config vdc_ldc_cfg = {
713	.event		= vdc_event,
714	.mtu		= 64,
715	.mode		= LDC_MODE_UNRELIABLE,
716};
717
718static struct vio_driver_ops vdc_vio_ops = {
719	.send_attr		= vdc_send_attr,
720	.handle_attr		= vdc_handle_attr,
721	.handshake_complete	= vdc_handshake_complete,
722};
723
724static void __devinit print_version(void)
725{
726	static int version_printed;
727
728	if (version_printed++ == 0)
729		printk(KERN_INFO "%s", version);
730}
731
732static int __devinit vdc_port_probe(struct vio_dev *vdev,
733				    const struct vio_device_id *id)
734{
735	struct mdesc_handle *hp;
736	struct vdc_port *port;
737	int err;
738
739	print_version();
740
741	hp = mdesc_grab();
742
743	err = -ENODEV;
744	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
745		printk(KERN_ERR PFX "Port id [%llu] too large.\n",
746		       vdev->dev_no);
747		goto err_out_release_mdesc;
748	}
749
750	port = kzalloc(sizeof(*port), GFP_KERNEL);
751	err = -ENOMEM;
752	if (!port) {
753		printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
754		goto err_out_release_mdesc;
755	}
756
757	if (vdev->dev_no >= 26)
758		snprintf(port->disk_name, sizeof(port->disk_name),
759			 VDCBLK_NAME "%c%c",
760			 'a' + ((int)vdev->dev_no / 26) - 1,
761			 'a' + ((int)vdev->dev_no % 26));
762	else
763		snprintf(port->disk_name, sizeof(port->disk_name),
764			 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
765
766	err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
767			      vdc_versions, ARRAY_SIZE(vdc_versions),
768			      &vdc_vio_ops, port->disk_name);
769	if (err)
770		goto err_out_free_port;
771
772	port->vdisk_block_size = 512;
773	port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
774	port->ring_cookies = ((port->max_xfer_size *
775			       port->vdisk_block_size) / PAGE_SIZE) + 2;
776
777	err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
778	if (err)
779		goto err_out_free_port;
780
781	err = vdc_alloc_tx_ring(port);
782	if (err)
783		goto err_out_free_ldc;
784
785	err = probe_disk(port);
786	if (err)
787		goto err_out_free_tx_ring;
788
789	dev_set_drvdata(&vdev->dev, port);
790
791	mdesc_release(hp);
792
793	return 0;
794
795err_out_free_tx_ring:
796	vdc_free_tx_ring(port);
797
798err_out_free_ldc:
799	vio_ldc_free(&port->vio);
800
801err_out_free_port:
802	kfree(port);
803
804err_out_release_mdesc:
805	mdesc_release(hp);
806	return err;
807}
808
809static int vdc_port_remove(struct vio_dev *vdev)
810{
811	struct vdc_port *port = dev_get_drvdata(&vdev->dev);
812
813	if (port) {
814		del_timer_sync(&port->vio.timer);
815
816		vdc_free_tx_ring(port);
817		vio_ldc_free(&port->vio);
818
819		dev_set_drvdata(&vdev->dev, NULL);
820
821		kfree(port);
822	}
823	return 0;
824}
825
826static const struct vio_device_id vdc_port_match[] = {
827	{
828		.type = "vdc-port",
829	},
830	{},
831};
832MODULE_DEVICE_TABLE(vio, vdc_port_match);
833
834static struct vio_driver vdc_port_driver = {
835	.id_table	= vdc_port_match,
836	.probe		= vdc_port_probe,
837	.remove		= vdc_port_remove,
838	.driver		= {
839		.name	= "vdc_port",
840		.owner	= THIS_MODULE,
841	}
842};
843
844static int __init vdc_init(void)
845{
846	int err;
847
848	err = register_blkdev(0, VDCBLK_NAME);
849	if (err < 0)
850		goto out_err;
851
852	vdc_major = err;
853
854	err = vio_register_driver(&vdc_port_driver);
855	if (err)
856		goto out_unregister_blkdev;
857
858	return 0;
859
860out_unregister_blkdev:
861	unregister_blkdev(vdc_major, VDCBLK_NAME);
862	vdc_major = 0;
863
864out_err:
865	return err;
866}
867
868static void __exit vdc_exit(void)
869{
870	vio_unregister_driver(&vdc_port_driver);
871	unregister_blkdev(vdc_major, VDCBLK_NAME);
872}
873
874module_init(vdc_init);
875module_exit(vdc_exit);
876