1// SPDX-License-Identifier: GPL-2.0
2/* viohs.c: LDOM Virtual I/O handshake helper layer.
3 *
4 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
5 */
6
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/string.h>
10#include <linux/delay.h>
11#include <linux/sched.h>
12#include <linux/sched/clock.h>
13#include <linux/slab.h>
14
15#include <asm/ldc.h>
16#include <asm/vio.h>
17
18int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
19{
20	int err, limit = 1000;
21
22	err = -EINVAL;
23	while (limit-- > 0) {
24		err = ldc_write(vio->lp, data, len);
25		if (!err || (err != -EAGAIN))
26			break;
27		udelay(1);
28	}
29
30	return err;
31}
32EXPORT_SYMBOL(vio_ldc_send);
33
34static int send_ctrl(struct vio_driver_state *vio,
35		     struct vio_msg_tag *tag, int len)
36{
37	tag->sid = vio_send_sid(vio);
38	return vio_ldc_send(vio, tag, len);
39}
40
41static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
42{
43	tag->type = type;
44	tag->stype = stype;
45	tag->stype_env = stype_env;
46}
47
48static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
49{
50	struct vio_ver_info pkt;
51
52	vio->_local_sid = (u32) sched_clock();
53
54	memset(&pkt, 0, sizeof(pkt));
55	init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
56	pkt.major = major;
57	pkt.minor = minor;
58	pkt.dev_class = vio->dev_class;
59
60	viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
61	       major, minor, vio->dev_class);
62
63	return send_ctrl(vio, &pkt.tag, sizeof(pkt));
64}
65
66static int start_handshake(struct vio_driver_state *vio)
67{
68	int err;
69
70	viodbg(HS, "START HANDSHAKE\n");
71
72	vio->hs_state = VIO_HS_INVALID;
73
74	err = send_version(vio,
75			   vio->ver_table[0].major,
76			   vio->ver_table[0].minor);
77	if (err < 0)
78		return err;
79
80	return 0;
81}
82
83static void flush_rx_dring(struct vio_driver_state *vio)
84{
85	struct vio_dring_state *dr;
86	u64 ident;
87
88	BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
89
90	dr = &vio->drings[VIO_DRIVER_RX_RING];
91	ident = dr->ident;
92
93	BUG_ON(!vio->desc_buf);
94	kfree(vio->desc_buf);
95	vio->desc_buf = NULL;
96
97	memset(dr, 0, sizeof(*dr));
98	dr->ident = ident;
99}
100
101void vio_link_state_change(struct vio_driver_state *vio, int event)
102{
103	if (event == LDC_EVENT_UP) {
104		vio->hs_state = VIO_HS_INVALID;
105
106		switch (vio->dev_class) {
107		case VDEV_NETWORK:
108		case VDEV_NETWORK_SWITCH:
109			vio->dr_state = (VIO_DR_STATE_TXREQ |
110					 VIO_DR_STATE_RXREQ);
111			break;
112
113		case VDEV_DISK:
114			vio->dr_state = VIO_DR_STATE_TXREQ;
115			break;
116		case VDEV_DISK_SERVER:
117			vio->dr_state = VIO_DR_STATE_RXREQ;
118			break;
119		}
120		start_handshake(vio);
121	} else if (event == LDC_EVENT_RESET) {
122		vio->hs_state = VIO_HS_INVALID;
123
124		if (vio->dr_state & VIO_DR_STATE_RXREG)
125			flush_rx_dring(vio);
126
127		vio->dr_state = 0x00;
128		memset(&vio->ver, 0, sizeof(vio->ver));
129
130		ldc_disconnect(vio->lp);
131	}
132}
133EXPORT_SYMBOL(vio_link_state_change);
134
135static int handshake_failure(struct vio_driver_state *vio)
136{
137	struct vio_dring_state *dr;
138
139	/* XXX Put policy here...  Perhaps start a timer to fire
140	 * XXX in 100 ms, which will bring the link up and retry
141	 * XXX the handshake.
142	 */
143
144	viodbg(HS, "HANDSHAKE FAILURE\n");
145
146	vio->dr_state &= ~(VIO_DR_STATE_TXREG |
147			   VIO_DR_STATE_RXREG);
148
149	dr = &vio->drings[VIO_DRIVER_RX_RING];
150	memset(dr, 0, sizeof(*dr));
151
152	kfree(vio->desc_buf);
153	vio->desc_buf = NULL;
154	vio->desc_buf_len = 0;
155
156	vio->hs_state = VIO_HS_INVALID;
157
158	return -ECONNRESET;
159}
160
161static int process_unknown(struct vio_driver_state *vio, void *arg)
162{
163	struct vio_msg_tag *pkt = arg;
164
165	viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
166	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
167
168	printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
169	       vio->vdev->channel_id);
170
171	ldc_disconnect(vio->lp);
172
173	return -ECONNRESET;
174}
175
176static int send_dreg(struct vio_driver_state *vio)
177{
178	struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
179	union {
180		struct vio_dring_register pkt;
181		char all[sizeof(struct vio_dring_register) +
182			 (sizeof(struct ldc_trans_cookie) *
183			  VIO_MAX_RING_COOKIES)];
184	} u;
185	size_t bytes = sizeof(struct vio_dring_register) +
186		       (sizeof(struct ldc_trans_cookie) *
187			dr->ncookies);
188	int i;
189
190	if (WARN_ON(bytes > sizeof(u)))
191		return -EINVAL;
192
193	memset(&u, 0, bytes);
194	init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
195	u.pkt.dring_ident = 0;
196	u.pkt.num_descr = dr->num_entries;
197	u.pkt.descr_size = dr->entry_size;
198	u.pkt.options = VIO_TX_DRING;
199	u.pkt.num_cookies = dr->ncookies;
200
201	viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
202	       "ncookies[%u]\n",
203	       u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
204	       u.pkt.num_cookies);
205
206	for (i = 0; i < dr->ncookies; i++) {
207		u.pkt.cookies[i] = dr->cookies[i];
208
209		viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
210		       i,
211		       (unsigned long long) u.pkt.cookies[i].cookie_addr,
212		       (unsigned long long) u.pkt.cookies[i].cookie_size);
213	}
214
215	return send_ctrl(vio, &u.pkt.tag, bytes);
216}
217
218static int send_rdx(struct vio_driver_state *vio)
219{
220	struct vio_rdx pkt;
221
222	memset(&pkt, 0, sizeof(pkt));
223
224	init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
225
226	viodbg(HS, "SEND RDX INFO\n");
227
228	return send_ctrl(vio, &pkt.tag, sizeof(pkt));
229}
230
231static int send_attr(struct vio_driver_state *vio)
232{
233	if (!vio->ops)
234		return -EINVAL;
235
236	return vio->ops->send_attr(vio);
237}
238
239static struct vio_version *find_by_major(struct vio_driver_state *vio,
240					 u16 major)
241{
242	struct vio_version *ret = NULL;
243	int i;
244
245	for (i = 0; i < vio->ver_table_entries; i++) {
246		struct vio_version *v = &vio->ver_table[i];
247		if (v->major <= major) {
248			ret = v;
249			break;
250		}
251	}
252	return ret;
253}
254
255static int process_ver_info(struct vio_driver_state *vio,
256			    struct vio_ver_info *pkt)
257{
258	struct vio_version *vap;
259	int err;
260
261	viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
262	       pkt->major, pkt->minor, pkt->dev_class);
263
264	if (vio->hs_state != VIO_HS_INVALID) {
265		/* XXX Perhaps invoke start_handshake? XXX */
266		memset(&vio->ver, 0, sizeof(vio->ver));
267		vio->hs_state = VIO_HS_INVALID;
268	}
269
270	vap = find_by_major(vio, pkt->major);
271
272	vio->_peer_sid = pkt->tag.sid;
273
274	if (!vap) {
275		pkt->tag.stype = VIO_SUBTYPE_NACK;
276		pkt->major = 0;
277		pkt->minor = 0;
278		viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
279		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
280	} else if (vap->major != pkt->major) {
281		pkt->tag.stype = VIO_SUBTYPE_NACK;
282		pkt->major = vap->major;
283		pkt->minor = vap->minor;
284		viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
285		       pkt->major, pkt->minor);
286		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
287	} else {
288		struct vio_version ver = {
289			.major = pkt->major,
290			.minor = pkt->minor,
291		};
292		if (ver.minor > vap->minor)
293			ver.minor = vap->minor;
294		pkt->minor = ver.minor;
295		pkt->tag.stype = VIO_SUBTYPE_ACK;
296		pkt->dev_class = vio->dev_class;
297		viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
298		       pkt->major, pkt->minor);
299		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
300		if (err > 0) {
301			vio->ver = ver;
302			vio->hs_state = VIO_HS_GOTVERS;
303		}
304	}
305	if (err < 0)
306		return handshake_failure(vio);
307
308	return 0;
309}
310
311static int process_ver_ack(struct vio_driver_state *vio,
312			   struct vio_ver_info *pkt)
313{
314	viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
315	       pkt->major, pkt->minor, pkt->dev_class);
316
317	if (vio->hs_state & VIO_HS_GOTVERS) {
318		if (vio->ver.major != pkt->major ||
319		    vio->ver.minor != pkt->minor) {
320			pkt->tag.stype = VIO_SUBTYPE_NACK;
321			(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
322			return handshake_failure(vio);
323		}
324	} else {
325		vio->ver.major = pkt->major;
326		vio->ver.minor = pkt->minor;
327		vio->hs_state = VIO_HS_GOTVERS;
328	}
329
330	switch (vio->dev_class) {
331	case VDEV_NETWORK:
332	case VDEV_DISK:
333		if (send_attr(vio) < 0)
334			return handshake_failure(vio);
335		break;
336
337	default:
338		break;
339	}
340
341	return 0;
342}
343
344static int process_ver_nack(struct vio_driver_state *vio,
345			    struct vio_ver_info *pkt)
346{
347	struct vio_version *nver;
348
349	viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
350	       pkt->major, pkt->minor, pkt->dev_class);
351
352	if (pkt->major == 0 && pkt->minor == 0)
353		return handshake_failure(vio);
354	nver = find_by_major(vio, pkt->major);
355	if (!nver)
356		return handshake_failure(vio);
357
358	if (send_version(vio, nver->major, nver->minor) < 0)
359		return handshake_failure(vio);
360
361	return 0;
362}
363
364static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
365{
366	switch (pkt->tag.stype) {
367	case VIO_SUBTYPE_INFO:
368		return process_ver_info(vio, pkt);
369
370	case VIO_SUBTYPE_ACK:
371		return process_ver_ack(vio, pkt);
372
373	case VIO_SUBTYPE_NACK:
374		return process_ver_nack(vio, pkt);
375
376	default:
377		return handshake_failure(vio);
378	}
379}
380
381static int process_attr(struct vio_driver_state *vio, void *pkt)
382{
383	int err;
384
385	if (!(vio->hs_state & VIO_HS_GOTVERS))
386		return handshake_failure(vio);
387
388	if (!vio->ops)
389		return 0;
390
391	err = vio->ops->handle_attr(vio, pkt);
392	if (err < 0) {
393		return handshake_failure(vio);
394	} else {
395		vio->hs_state |= VIO_HS_GOT_ATTR;
396
397		if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
398		    !(vio->hs_state & VIO_HS_SENT_DREG)) {
399			if (send_dreg(vio) < 0)
400				return handshake_failure(vio);
401
402			vio->hs_state |= VIO_HS_SENT_DREG;
403		}
404	}
405
406	return 0;
407}
408
409static int all_drings_registered(struct vio_driver_state *vio)
410{
411	int need_rx, need_tx;
412
413	need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
414	need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
415
416	if (need_rx &&
417	    !(vio->dr_state & VIO_DR_STATE_RXREG))
418		return 0;
419
420	if (need_tx &&
421	    !(vio->dr_state & VIO_DR_STATE_TXREG))
422		return 0;
423
424	return 1;
425}
426
427static int process_dreg_info(struct vio_driver_state *vio,
428			     struct vio_dring_register *pkt)
429{
430	struct vio_dring_state *dr;
431	int i;
432
433	viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
434	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
435	       (unsigned long long) pkt->dring_ident,
436	       pkt->num_descr, pkt->descr_size, pkt->options,
437	       pkt->num_cookies);
438
439	if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
440		goto send_nack;
441
442	if (vio->dr_state & VIO_DR_STATE_RXREG)
443		goto send_nack;
444
445	/* v1.6 and higher, ACK with desired, supported mode, or NACK */
446	if (vio_version_after_eq(vio, 1, 6)) {
447		if (!(pkt->options & VIO_TX_DRING))
448			goto send_nack;
449		pkt->options = VIO_TX_DRING;
450	}
451
452	BUG_ON(vio->desc_buf);
453
454	vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
455	if (!vio->desc_buf)
456		goto send_nack;
457
458	vio->desc_buf_len = pkt->descr_size;
459
460	dr = &vio->drings[VIO_DRIVER_RX_RING];
461
462	dr->num_entries = pkt->num_descr;
463	dr->entry_size = pkt->descr_size;
464	dr->ncookies = pkt->num_cookies;
465	for (i = 0; i < dr->ncookies; i++) {
466		dr->cookies[i] = pkt->cookies[i];
467
468		viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
469		       i,
470		       (unsigned long long)
471		       pkt->cookies[i].cookie_addr,
472		       (unsigned long long)
473		       pkt->cookies[i].cookie_size);
474	}
475
476	pkt->tag.stype = VIO_SUBTYPE_ACK;
477	pkt->dring_ident = ++dr->ident;
478
479	viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
480	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
481	       (unsigned long long) pkt->dring_ident,
482	       pkt->num_descr, pkt->descr_size, pkt->options,
483	       pkt->num_cookies);
484
485	if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0)
486		goto send_nack;
487
488	vio->dr_state |= VIO_DR_STATE_RXREG;
489
490	return 0;
491
492send_nack:
493	pkt->tag.stype = VIO_SUBTYPE_NACK;
494	viodbg(HS, "SEND DRING_REG NACK\n");
495	(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
496
497	return handshake_failure(vio);
498}
499
500static int process_dreg_ack(struct vio_driver_state *vio,
501			    struct vio_dring_register *pkt)
502{
503	struct vio_dring_state *dr;
504
505	viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
506	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
507	       (unsigned long long) pkt->dring_ident,
508	       pkt->num_descr, pkt->descr_size, pkt->options,
509	       pkt->num_cookies);
510
511	dr = &vio->drings[VIO_DRIVER_TX_RING];
512
513	if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
514		return handshake_failure(vio);
515
516	dr->ident = pkt->dring_ident;
517	vio->dr_state |= VIO_DR_STATE_TXREG;
518
519	if (all_drings_registered(vio)) {
520		if (send_rdx(vio) < 0)
521			return handshake_failure(vio);
522		vio->hs_state = VIO_HS_SENT_RDX;
523	}
524	return 0;
525}
526
527static int process_dreg_nack(struct vio_driver_state *vio,
528			     struct vio_dring_register *pkt)
529{
530	viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
531	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
532	       (unsigned long long) pkt->dring_ident,
533	       pkt->num_descr, pkt->descr_size, pkt->options,
534	       pkt->num_cookies);
535
536	return handshake_failure(vio);
537}
538
539static int process_dreg(struct vio_driver_state *vio,
540			struct vio_dring_register *pkt)
541{
542	if (!(vio->hs_state & VIO_HS_GOTVERS))
543		return handshake_failure(vio);
544
545	switch (pkt->tag.stype) {
546	case VIO_SUBTYPE_INFO:
547		return process_dreg_info(vio, pkt);
548
549	case VIO_SUBTYPE_ACK:
550		return process_dreg_ack(vio, pkt);
551
552	case VIO_SUBTYPE_NACK:
553		return process_dreg_nack(vio, pkt);
554
555	default:
556		return handshake_failure(vio);
557	}
558}
559
560static int process_dunreg(struct vio_driver_state *vio,
561			  struct vio_dring_unregister *pkt)
562{
563	struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
564
565	viodbg(HS, "GOT DRING_UNREG\n");
566
567	if (pkt->dring_ident != dr->ident)
568		return 0;
569
570	vio->dr_state &= ~VIO_DR_STATE_RXREG;
571
572	memset(dr, 0, sizeof(*dr));
573
574	kfree(vio->desc_buf);
575	vio->desc_buf = NULL;
576	vio->desc_buf_len = 0;
577
578	return 0;
579}
580
581static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
582{
583	viodbg(HS, "GOT RDX INFO\n");
584
585	pkt->tag.stype = VIO_SUBTYPE_ACK;
586	viodbg(HS, "SEND RDX ACK\n");
587	if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
588		return handshake_failure(vio);
589
590	vio->hs_state |= VIO_HS_SENT_RDX_ACK;
591	return 0;
592}
593
594static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
595{
596	viodbg(HS, "GOT RDX ACK\n");
597
598	if (!(vio->hs_state & VIO_HS_SENT_RDX))
599		return handshake_failure(vio);
600
601	vio->hs_state |= VIO_HS_GOT_RDX_ACK;
602	return 0;
603}
604
605static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
606{
607	viodbg(HS, "GOT RDX NACK\n");
608
609	return handshake_failure(vio);
610}
611
612static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
613{
614	if (!all_drings_registered(vio))
615		handshake_failure(vio);
616
617	switch (pkt->tag.stype) {
618	case VIO_SUBTYPE_INFO:
619		return process_rdx_info(vio, pkt);
620
621	case VIO_SUBTYPE_ACK:
622		return process_rdx_ack(vio, pkt);
623
624	case VIO_SUBTYPE_NACK:
625		return process_rdx_nack(vio, pkt);
626
627	default:
628		return handshake_failure(vio);
629	}
630}
631
632int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
633{
634	struct vio_msg_tag *tag = pkt;
635	u8 prev_state = vio->hs_state;
636	int err;
637
638	switch (tag->stype_env) {
639	case VIO_VER_INFO:
640		err = process_ver(vio, pkt);
641		break;
642
643	case VIO_ATTR_INFO:
644		err = process_attr(vio, pkt);
645		break;
646
647	case VIO_DRING_REG:
648		err = process_dreg(vio, pkt);
649		break;
650
651	case VIO_DRING_UNREG:
652		err = process_dunreg(vio, pkt);
653		break;
654
655	case VIO_RDX:
656		err = process_rdx(vio, pkt);
657		break;
658
659	default:
660		err = process_unknown(vio, pkt);
661		break;
662	}
663
664	if (!err &&
665	    vio->hs_state != prev_state &&
666	    (vio->hs_state & VIO_HS_COMPLETE)) {
667		if (vio->ops)
668			vio->ops->handshake_complete(vio);
669	}
670
671	return err;
672}
673EXPORT_SYMBOL(vio_control_pkt_engine);
674
675void vio_conn_reset(struct vio_driver_state *vio)
676{
677}
678EXPORT_SYMBOL(vio_conn_reset);
679
680/* The issue is that the Solaris virtual disk server just mirrors the
681 * SID values it gets from the client peer.  So we work around that
682 * here in vio_{validate,send}_sid() so that the drivers don't need
683 * to be aware of this crap.
684 */
685int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
686{
687	u32 sid;
688
689	/* Always let VERSION+INFO packets through unchecked, they
690	 * define the new SID.
691	 */
692	if (tp->type == VIO_TYPE_CTRL &&
693	    tp->stype == VIO_SUBTYPE_INFO &&
694	    tp->stype_env == VIO_VER_INFO)
695		return 0;
696
697	/* Ok, now figure out which SID to use.  */
698	switch (vio->dev_class) {
699	case VDEV_NETWORK:
700	case VDEV_NETWORK_SWITCH:
701	case VDEV_DISK_SERVER:
702	default:
703		sid = vio->_peer_sid;
704		break;
705
706	case VDEV_DISK:
707		sid = vio->_local_sid;
708		break;
709	}
710
711	if (sid == tp->sid)
712		return 0;
713	viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
714	       tp->sid, vio->_peer_sid, vio->_local_sid);
715	return -EINVAL;
716}
717EXPORT_SYMBOL(vio_validate_sid);
718
719u32 vio_send_sid(struct vio_driver_state *vio)
720{
721	switch (vio->dev_class) {
722	case VDEV_NETWORK:
723	case VDEV_NETWORK_SWITCH:
724	case VDEV_DISK:
725	default:
726		return vio->_local_sid;
727
728	case VDEV_DISK_SERVER:
729		return vio->_peer_sid;
730	}
731}
732EXPORT_SYMBOL(vio_send_sid);
733
734int vio_ldc_alloc(struct vio_driver_state *vio,
735			 struct ldc_channel_config *base_cfg,
736			 void *event_arg)
737{
738	struct ldc_channel_config cfg = *base_cfg;
739	struct ldc_channel *lp;
740
741	cfg.tx_irq = vio->vdev->tx_irq;
742	cfg.rx_irq = vio->vdev->rx_irq;
743
744	lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
745	if (IS_ERR(lp))
746		return PTR_ERR(lp);
747
748	vio->lp = lp;
749
750	return 0;
751}
752EXPORT_SYMBOL(vio_ldc_alloc);
753
754void vio_ldc_free(struct vio_driver_state *vio)
755{
756	ldc_free(vio->lp);
757	vio->lp = NULL;
758
759	kfree(vio->desc_buf);
760	vio->desc_buf = NULL;
761	vio->desc_buf_len = 0;
762}
763EXPORT_SYMBOL(vio_ldc_free);
764
765void vio_port_up(struct vio_driver_state *vio)
766{
767	unsigned long flags;
768	int err, state;
769
770	spin_lock_irqsave(&vio->lock, flags);
771
772	state = ldc_state(vio->lp);
773
774	err = 0;
775	if (state == LDC_STATE_INIT) {
776		err = ldc_bind(vio->lp);
777		if (err)
778			printk(KERN_WARNING "%s: Port %lu bind failed, "
779			       "err=%d\n",
780			       vio->name, vio->vdev->channel_id, err);
781	}
782
783	if (!err) {
784		if (ldc_mode(vio->lp) == LDC_MODE_RAW)
785			ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
786		else
787			err = ldc_connect(vio->lp);
788
789		if (err)
790			printk(KERN_WARNING "%s: Port %lu connect failed, "
791			       "err=%d\n",
792			       vio->name, vio->vdev->channel_id, err);
793	}
794	if (err) {
795		unsigned long expires = jiffies + HZ;
796
797		expires = round_jiffies(expires);
798		mod_timer(&vio->timer, expires);
799	}
800
801	spin_unlock_irqrestore(&vio->lock, flags);
802}
803EXPORT_SYMBOL(vio_port_up);
804
805static void vio_port_timer(struct timer_list *t)
806{
807	struct vio_driver_state *vio = from_timer(vio, t, timer);
808
809	vio_port_up(vio);
810}
811
812int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
813		    u8 dev_class, struct vio_version *ver_table,
814		    int ver_table_size, struct vio_driver_ops *ops,
815		    char *name)
816{
817	switch (dev_class) {
818	case VDEV_NETWORK:
819	case VDEV_NETWORK_SWITCH:
820	case VDEV_DISK:
821	case VDEV_DISK_SERVER:
822	case VDEV_CONSOLE_CON:
823		break;
824
825	default:
826		return -EINVAL;
827	}
828
829	if (dev_class == VDEV_NETWORK ||
830	    dev_class == VDEV_NETWORK_SWITCH ||
831	    dev_class == VDEV_DISK ||
832	    dev_class == VDEV_DISK_SERVER) {
833		if (!ops || !ops->send_attr || !ops->handle_attr ||
834		    !ops->handshake_complete)
835			return -EINVAL;
836	}
837
838	if (!ver_table || ver_table_size < 0)
839		return -EINVAL;
840
841	if (!name)
842		return -EINVAL;
843
844	spin_lock_init(&vio->lock);
845
846	vio->name = name;
847
848	vio->dev_class = dev_class;
849	vio->vdev = vdev;
850
851	vio->ver_table = ver_table;
852	vio->ver_table_entries = ver_table_size;
853
854	vio->ops = ops;
855
856	timer_setup(&vio->timer, vio_port_timer, 0);
857
858	return 0;
859}
860EXPORT_SYMBOL(vio_driver_init);
861