1/*
2 ****************************************************************************
3 * (C) 2006 - Cambridge University
4 ****************************************************************************
5 *
6 *        File: xenbus.c
7 *      Author: Steven Smith (sos22@cam.ac.uk)
8 *     Changes: Grzegorz Milos (gm281@cam.ac.uk)
9 *     Changes: John D. Ramsdell
10 *
11 *        Date: Jun 2006, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: Minimal implementation of xenbus
15 *
16 ****************************************************************************
17 **/
18#include <mini-os/os.h>
19#include <mini-os/mm.h>
20#include <mini-os/lib.h>
21#include <mini-os/xenbus.h>
22#include <mini-os/events.h>
23#include <mini-os/wait.h>
24#include <xen/io/xs_wire.h>
25#include <mini-os/spinlock.h>
26
27#define _BMK_PRINTF_VA
28#include <bmk-core/memalloc.h>
29#include <bmk-core/printf.h>
30#include <bmk-core/string.h>
31
32#define min(x,y) ({                       \
33        typeof(x) tmpx = (x);                 \
34        typeof(y) tmpy = (y);                 \
35        tmpx < tmpy ? tmpx : tmpy;            \
36        })
37
38#ifdef XENBUS_DEBUG
39#define DEBUG(_f, _a...) \
40    minios_printk("MINI_OS(file=xenbus.c, line=%d) " _f , __LINE__, ## _a)
41#else
42#define DEBUG(_f, _a...)    ((void)0)
43#endif
44
45static struct xenstore_domain_interface *xenstore_buf;
46static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
47static spinlock_t xb_lock = SPIN_LOCK_UNLOCKED; /* protects xenbus req ring */
48
49struct xenbus_event_queue xenbus_default_watch_queue;
50static MINIOS_LIST_HEAD(, xenbus_watch) watches;
51struct xenbus_req_info
52{
53    struct xenbus_event_queue *reply_queue; /* non-0 iff in use */
54    struct xenbus_event *for_queue;
55};
56
57
58spinlock_t xenbus_req_lock = SPIN_LOCK_UNLOCKED;
59/*
60 * This lock protects:
61 *    the xenbus request ring
62 *    req_info[]
63 *    all live struct xenbus_event_queue (including xenbus_default_watch_queue)
64 *    nr_live_reqs
65 *    req_wq
66 *    watches
67 */
68
69static void queue_wakeup(struct xenbus_event_queue *queue)
70{
71    minios_wake_up(&queue->waitq);
72}
73
74void xenbus_event_queue_init(struct xenbus_event_queue *queue)
75{
76    MINIOS_STAILQ_INIT(&queue->events);
77    queue->wakeup = queue_wakeup;
78    minios_init_waitqueue_head(&queue->waitq);
79}
80
81static struct xenbus_event *remove_event(struct xenbus_event_queue *queue)
82{
83    /* Called with lock held */
84    struct xenbus_event *event;
85
86    event = MINIOS_STAILQ_FIRST(&queue->events);
87    if (!event)
88        goto out;
89    MINIOS_STAILQ_REMOVE_HEAD(&queue->events, entry);
90
91 out:
92    return event;
93}
94
95static void queue_event(struct xenbus_event_queue *queue,
96                        struct xenbus_event *event)
97{
98    /* Called with lock held */
99    MINIOS_STAILQ_INSERT_TAIL(&queue->events, event, entry);
100    queue->wakeup(queue);
101}
102
103static struct xenbus_event *await_event(struct xenbus_event_queue *queue)
104{
105    struct xenbus_event *event;
106    DEFINE_WAIT(w);
107    spin_lock(&xenbus_req_lock);
108    while (!(event = remove_event(queue))) {
109        minios_add_waiter(w, queue->waitq);
110        spin_unlock(&xenbus_req_lock);
111        minios_wait(w);
112        spin_lock(&xenbus_req_lock);
113    }
114    minios_remove_waiter(w, queue->waitq);
115    spin_unlock(&xenbus_req_lock);
116    return event;
117}
118
119
120#define NR_REQS 32
121static struct xenbus_req_info req_info[NR_REQS];
122
123static void memcpy_from_ring(const void *Ring,
124        void *Dest,
125        int off,
126        int len)
127{
128    int c1, c2;
129    const char *ring = Ring;
130    char *dest = Dest;
131    c1 = min(len, XENSTORE_RING_SIZE - off);
132    c2 = len - c1;
133    bmk_memcpy(dest, ring + off, c1);
134    bmk_memcpy(dest + c1, ring, c2);
135}
136
137char **xenbus_wait_for_watch_return(struct xenbus_event_queue *queue)
138{
139    struct xenbus_event *event;
140    if (!queue)
141        queue = &xenbus_default_watch_queue;
142    event = await_event(queue);
143    return &event->path;
144}
145
146void xenbus_wait_for_watch(struct xenbus_event_queue *queue)
147{
148    char **ret;
149    if (!queue)
150        queue = &xenbus_default_watch_queue;
151    ret = xenbus_wait_for_watch_return(queue);
152    if (ret)
153        bmk_memfree(ret, BMK_MEMWHO_WIREDBMK);
154    else
155        minios_printk("unexpected path returned by watch\n");
156}
157
158char* xenbus_wait_for_value(const char* path, const char* value, struct xenbus_event_queue *queue)
159{
160    if (!queue)
161        queue = &xenbus_default_watch_queue;
162    for(;;)
163    {
164        char *res, *msg;
165        int r;
166
167        msg = xenbus_read(XBT_NIL, path, &res);
168        if(msg) return msg;
169
170        r = bmk_strcmp(value,res);
171        bmk_memfree(res, BMK_MEMWHO_WIREDBMK);
172
173        if(r==0) break;
174        else xenbus_wait_for_watch(queue);
175    }
176    return NULL;
177}
178
179char *xenbus_switch_state(xenbus_transaction_t xbt, const char* path, XenbusState state)
180{
181    char *current_state;
182    char *msg = NULL;
183    char *msg2 = NULL;
184    char value[2];
185    XenbusState rs;
186    int xbt_flag = 0;
187    int retry = 0;
188
189    do {
190        if (xbt == XBT_NIL) {
191            msg = xenbus_transaction_start(&xbt);
192            if (msg) goto exit;
193            xbt_flag = 1;
194        }
195
196        msg = xenbus_read(xbt, path, &current_state);
197        if (msg) goto exit;
198
199        rs = (XenbusState) (current_state[0] - '0');
200        bmk_memfree(current_state, BMK_MEMWHO_WIREDBMK);
201        if (rs == state) {
202            msg = NULL;
203            goto exit;
204        }
205
206        bmk_snprintf(value, 2, "%d", state);
207        msg = xenbus_write(xbt, path, value);
208
209exit:
210        if (xbt_flag) {
211            msg2 = xenbus_transaction_end(xbt, 0, &retry);
212            xbt = XBT_NIL;
213        }
214        if (msg == NULL && msg2 != NULL)
215            msg = msg2;
216    } while (retry);
217
218    return msg;
219}
220
221char *xenbus_wait_for_state_change(const char* path, XenbusState *state, struct xenbus_event_queue *queue)
222{
223    if (!queue)
224        queue = &xenbus_default_watch_queue;
225    for(;;)
226    {
227        char *res, *msg;
228        XenbusState rs;
229
230        msg = xenbus_read(XBT_NIL, path, &res);
231        if(msg) return msg;
232
233        rs = (XenbusState) (res[0] - 48);
234        bmk_memfree(res, BMK_MEMWHO_WIREDBMK);
235
236        if (rs == *state)
237            xenbus_wait_for_watch(queue);
238        else {
239            *state = rs;
240            break;
241        }
242    }
243    return NULL;
244}
245
246
247static void xenbus_thread_func(void *ign)
248{
249    struct xsd_sockmsg msg;
250    unsigned prod = xenstore_buf->rsp_prod;
251
252    for (;;)
253    {
254        minios_wait_event(xb_waitq, prod != xenstore_buf->rsp_prod);
255        while (1)
256        {
257            prod = xenstore_buf->rsp_prod;
258            DEBUG("Rsp_cons %d, rsp_prod %d.\n", xenstore_buf->rsp_cons,
259                    xenstore_buf->rsp_prod);
260            if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg)) {
261                minios_notify_remote_via_evtchn(start_info.store_evtchn);
262                break;
263            }
264            rmb();
265            memcpy_from_ring(xenstore_buf->rsp,
266                    &msg,
267                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
268                    sizeof(msg));
269            DEBUG("Msg len %d, %d avail, id %d.\n",
270                    msg.len + sizeof(msg),
271                    xenstore_buf->rsp_prod - xenstore_buf->rsp_cons,
272                    msg.req_id);
273            if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons <
274                    sizeof(msg) + msg.len) {
275                minios_notify_remote_via_evtchn(start_info.store_evtchn);
276                break;
277            }
278
279            DEBUG("Message is good.\n");
280
281            if(msg.type == XS_WATCH_EVENT)
282            {
283		struct xenbus_event *event
284		    = bmk_xmalloc_bmk(sizeof(*event) + msg.len);
285                struct xenbus_event_queue *events = NULL;
286		char *data = (char*)event + sizeof(*event);
287                struct xenbus_watch *watch;
288
289                memcpy_from_ring(xenstore_buf->rsp,
290		    data,
291                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)),
292                    msg.len);
293
294		event->path = data;
295		event->token = event->path + bmk_strlen(event->path) + 1;
296
297                mb();
298                xenstore_buf->rsp_cons += msg.len + sizeof(msg);
299
300                spin_lock(&xenbus_req_lock);
301
302                MINIOS_LIST_FOREACH(watch, &watches, entry)
303                    if (!bmk_strcmp(watch->token, event->token)) {
304                        event->watch = watch;
305                        events = watch->events;
306                        break;
307                    }
308
309                if (events) {
310                    queue_event(events, event);
311                } else {
312                    minios_printk("unexpected watch token %s\n", event->token);
313                    bmk_memfree(event, BMK_MEMWHO_WIREDBMK);
314                }
315
316                spin_unlock(&xenbus_req_lock);
317            }
318
319            else
320            {
321                req_info[msg.req_id].for_queue->reply =
322                    bmk_xmalloc_bmk(sizeof(msg) + msg.len);
323                memcpy_from_ring(xenstore_buf->rsp,
324                    req_info[msg.req_id].for_queue->reply,
325                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
326                    msg.len + sizeof(msg));
327                mb();
328                xenstore_buf->rsp_cons += msg.len + sizeof(msg);
329                spin_lock(&xenbus_req_lock);
330                queue_event(req_info[msg.req_id].reply_queue,
331                            req_info[msg.req_id].for_queue);
332                spin_unlock(&xenbus_req_lock);
333            }
334
335            wmb();
336            minios_notify_remote_via_evtchn(start_info.store_evtchn);
337        }
338    }
339}
340
341static void xenbus_evtchn_handler(evtchn_port_t port, struct pt_regs *regs,
342				  void *ign)
343{
344    minios_wake_up(&xb_waitq);
345}
346
347static int nr_live_reqs;
348static DECLARE_WAIT_QUEUE_HEAD(req_wq);
349
350/* Release a xenbus identifier */
351void xenbus_id_release(int id)
352{
353    BUG_ON(!req_info[id].reply_queue);
354    spin_lock(&xenbus_req_lock);
355    req_info[id].reply_queue = 0;
356    nr_live_reqs--;
357    if (nr_live_reqs == NR_REQS - 1)
358        minios_wake_up(&req_wq);
359    spin_unlock(&xenbus_req_lock);
360}
361
362int xenbus_id_allocate(struct xenbus_event_queue *reply_queue,
363                       struct xenbus_event *for_queue)
364{
365    static int probe;
366    int o_probe;
367
368    while (1)
369    {
370        spin_lock(&xenbus_req_lock);
371        if (nr_live_reqs < NR_REQS)
372            break;
373        spin_unlock(&xenbus_req_lock);
374        minios_wait_event(req_wq, (nr_live_reqs < NR_REQS));
375    }
376
377    o_probe = probe;
378    for (;;)
379    {
380        if (!req_info[o_probe].reply_queue)
381            break;
382        o_probe = (o_probe + 1) % NR_REQS;
383        BUG_ON(o_probe == probe);
384    }
385    nr_live_reqs++;
386    req_info[o_probe].reply_queue = reply_queue;
387    req_info[o_probe].for_queue = for_queue;
388    probe = (o_probe + 1) % NR_REQS;
389    spin_unlock(&xenbus_req_lock);
390
391    return o_probe;
392}
393
394void xenbus_watch_init(struct xenbus_watch *watch)
395{
396    watch->token = 0;
397}
398
399void xenbus_watch_prepare(struct xenbus_watch *watch)
400{
401    BUG_ON(!watch->events);
402    size_t size = sizeof(void*)*2 + 5;
403    watch->token = bmk_xmalloc_bmk(size);
404    int r = bmk_snprintf(watch->token,size,"*%p",(void*)watch);
405    BUG_ON(!(r > 0 && r < size));
406    spin_lock(&xenbus_req_lock);
407    MINIOS_LIST_INSERT_HEAD(&watches, watch, entry);
408    spin_unlock(&xenbus_req_lock);
409}
410
411void xenbus_watch_release(struct xenbus_watch *watch)
412{
413    if (!watch->token)
414        return;
415    spin_lock(&xenbus_req_lock);
416    MINIOS_LIST_REMOVE(watch, entry);
417    spin_unlock(&xenbus_req_lock);
418    bmk_memfree(watch->token, BMK_MEMWHO_WIREDBMK);
419    watch->token = 0;
420}
421
422/* Initialise xenbus. */
423void init_xenbus(void)
424{
425    int err;
426    DEBUG("init_xenbus called.\n");
427    xenbus_event_queue_init(&xenbus_default_watch_queue);
428    xenstore_buf = mfn_to_virt(start_info.store_mfn);
429    bmk_sched_create("xenstore", NULL, 0, xenbus_thread_func, NULL,
430      NULL, 0);
431    DEBUG("buf at %p.\n", xenstore_buf);
432    err = minios_bind_evtchn(start_info.store_evtchn,
433		      xenbus_evtchn_handler,
434              NULL);
435    minios_unmask_evtchn(start_info.store_evtchn);
436    minios_printk("xenbus initialised on irq %d mfn %#lx\n",
437	   err, start_info.store_mfn);
438}
439
440void fini_xenbus(void)
441{
442}
443
444void xenbus_xb_write(int type, int req_id, xenbus_transaction_t trans_id,
445		     const struct write_req *req, int nr_reqs)
446{
447    XENSTORE_RING_IDX prod;
448    int r;
449    int len = 0;
450    const struct write_req *cur_req;
451    int req_off;
452    int total_off;
453    int this_chunk;
454    struct xsd_sockmsg m = {.type = type, .req_id = req_id,
455        .tx_id = trans_id };
456    struct write_req header_req = { &m, sizeof(m) };
457
458    for (r = 0; r < nr_reqs; r++)
459        len += req[r].len;
460    m.len = len;
461    len += sizeof(m);
462
463    cur_req = &header_req;
464
465    BUG_ON(len > XENSTORE_RING_SIZE);
466
467    spin_lock(&xb_lock);
468    /* Wait for the ring to drain to the point where we can send the
469       message. */
470    prod = xenstore_buf->req_prod;
471    if (prod + len - xenstore_buf->req_cons > XENSTORE_RING_SIZE)
472    {
473        /* Wait for there to be space on the ring */
474        DEBUG("prod %d, len %d, cons %d, size %d; waiting.\n",
475                prod, len, xenstore_buf->req_cons, XENSTORE_RING_SIZE);
476        spin_unlock(&xb_lock);
477        minios_wait_event(xb_waitq,
478                xenstore_buf->req_prod + len - xenstore_buf->req_cons <=
479                XENSTORE_RING_SIZE);
480        spin_lock(&xb_lock);
481        DEBUG("Back from wait.\n");
482        prod = xenstore_buf->req_prod;
483    }
484
485    /* We're now guaranteed to be able to send the message without
486       overflowing the ring.  Do so. */
487    total_off = 0;
488    req_off = 0;
489    while (total_off < len)
490    {
491        this_chunk = min(cur_req->len - req_off,
492                XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod));
493        bmk_memcpy((char *)xenstore_buf->req + MASK_XENSTORE_IDX(prod),
494                (char *)cur_req->data + req_off, this_chunk);
495        prod += this_chunk;
496        req_off += this_chunk;
497        total_off += this_chunk;
498        if (req_off == cur_req->len)
499        {
500            req_off = 0;
501            if (cur_req == &header_req)
502                cur_req = req;
503            else
504                cur_req++;
505        }
506    }
507
508    DEBUG("Complete main loop of xb_write.\n");
509    BUG_ON(req_off != 0);
510    BUG_ON(total_off != len);
511    BUG_ON(prod > xenstore_buf->req_cons + XENSTORE_RING_SIZE);
512
513    /* Remote must see entire message before updating indexes */
514    wmb();
515
516    xenstore_buf->req_prod += len;
517    spin_unlock(&xb_lock);
518
519    /* Send evtchn to notify remote */
520    minios_notify_remote_via_evtchn(start_info.store_evtchn);
521}
522
523/* Send a mesasge to xenbus, in the same fashion as xb_write, and
524   block waiting for a reply.  The reply is malloced and should be
525   freed by the caller. */
526struct xsd_sockmsg *
527xenbus_msg_reply(int type,
528		 xenbus_transaction_t trans,
529		 struct write_req *io,
530		 int nr_reqs)
531{
532    int id;
533    struct xsd_sockmsg *rep;
534    struct xenbus_event_queue queue;
535    struct xenbus_event event_buf;
536
537    xenbus_event_queue_init(&queue);
538
539    id = xenbus_id_allocate(&queue,&event_buf);
540
541    xenbus_xb_write(type, id, trans, io, nr_reqs);
542
543    struct xenbus_event *event = await_event(&queue);
544    BUG_ON(event != &event_buf);
545
546    rep = req_info[id].for_queue->reply;
547    BUG_ON(rep->req_id != id);
548    xenbus_id_release(id);
549    return rep;
550}
551
552void xenbus_free(void *p) { bmk_memfree(p, BMK_MEMWHO_WIREDBMK); }
553
554static char *errmsg(struct xsd_sockmsg *rep)
555{
556    char *res;
557    if (!rep) {
558	char msg[] = "No reply";
559	size_t len = bmk_strlen(msg) + 1;
560	return bmk_memcpy(bmk_xmalloc_bmk(len), msg, len);
561    }
562    if (rep->type != XS_ERROR)
563	return NULL;
564    res = bmk_xmalloc_bmk(rep->len + 1);
565    bmk_memcpy(res, rep + 1, rep->len);
566    res[rep->len] = 0;
567    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
568    return res;
569}
570
571/* Send a debug message to xenbus.  Can block. */
572static void xenbus_debug_msg(const char *msg)
573{
574    int len = bmk_strlen(msg);
575    struct write_req req[] = {
576        { "print", sizeof("print") },
577        { msg, len },
578        { "", 1 }};
579    struct xsd_sockmsg *reply;
580
581    reply = xenbus_msg_reply(XS_DEBUG, 0, req, ARRAY_SIZE(req));
582    minios_printk("Got a reply, type %d, id %d, len %d.\n",
583            reply->type, reply->req_id, reply->len);
584}
585
586/* List the contents of a directory.  Returns a malloc()ed array of
587   pointers to malloc()ed strings.  The array is NULL terminated.  May
588   block. */
589char *xenbus_ls(xenbus_transaction_t xbt, const char *pre, char ***contents)
590{
591    struct xsd_sockmsg *reply, *repmsg;
592    struct write_req req[] = { { pre, bmk_strlen(pre)+1 } };
593    int nr_elems, x, i;
594    char **res, *msg;
595
596    repmsg = xenbus_msg_reply(XS_DIRECTORY, xbt, req, ARRAY_SIZE(req));
597    msg = errmsg(repmsg);
598    if (msg) {
599	*contents = NULL;
600	return msg;
601    }
602    reply = repmsg + 1;
603    for (x = nr_elems = 0; x < repmsg->len; x++)
604        nr_elems += (((char *)reply)[x] == 0);
605    res = bmk_memcalloc(nr_elems+1, sizeof(res[0]), BMK_MEMWHO_WIREDBMK);
606    for (x = i = 0; i < nr_elems; i++) {
607        int l = bmk_strlen((char *)reply + x);
608        res[i] = bmk_xmalloc_bmk(l + 1);
609        bmk_memcpy(res[i], (char *)reply + x, l + 1);
610        x += l + 1;
611    }
612    res[i] = NULL;
613    bmk_memfree(repmsg, BMK_MEMWHO_WIREDBMK);
614    *contents = res;
615    return NULL;
616}
617
618char *xenbus_read(xenbus_transaction_t xbt, const char *path, char **value)
619{
620    struct write_req req[] = { {path, bmk_strlen(path) + 1} };
621    struct xsd_sockmsg *rep;
622    char *res, *msg;
623    rep = xenbus_msg_reply(XS_READ, xbt, req, ARRAY_SIZE(req));
624    msg = errmsg(rep);
625    if (msg) {
626	*value = NULL;
627	return msg;
628    }
629    res = bmk_xmalloc_bmk(rep->len + 1);
630    bmk_memcpy(res, rep + 1, rep->len);
631    res[rep->len] = 0;
632    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
633    *value = res;
634    return NULL;
635}
636
637char *xenbus_write(xenbus_transaction_t xbt, const char *path, const char *value)
638{
639    struct write_req req[] = {
640	{path, bmk_strlen(path) + 1},
641	{value, bmk_strlen(value)},
642    };
643    struct xsd_sockmsg *rep;
644    char *msg;
645    rep = xenbus_msg_reply(XS_WRITE, xbt, req, ARRAY_SIZE(req));
646    msg = errmsg(rep);
647    if (msg) return msg;
648    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
649    return NULL;
650}
651
652char* xenbus_watch_path_token( xenbus_transaction_t xbt, const char *path, const char *token, struct xenbus_event_queue *events)
653{
654    struct xsd_sockmsg *rep;
655
656    struct write_req req[] = {
657        {path, bmk_strlen(path) + 1},
658	{token, bmk_strlen(token) + 1},
659    };
660
661    struct xenbus_watch *watch = bmk_xmalloc_bmk(sizeof(*watch));
662
663    char *msg;
664
665    if (!events)
666        events = &xenbus_default_watch_queue;
667
668    watch->token = bmk_xmalloc_bmk(bmk_strlen(token)+1);
669    bmk_strcpy(watch->token, token);
670    watch->events = events;
671
672    spin_lock(&xenbus_req_lock);
673    MINIOS_LIST_INSERT_HEAD(&watches, watch, entry);
674    spin_unlock(&xenbus_req_lock);
675
676    rep = xenbus_msg_reply(XS_WATCH, xbt, req, ARRAY_SIZE(req));
677
678    msg = errmsg(rep);
679    if (msg) return msg;
680    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
681
682    return NULL;
683}
684
685char* xenbus_unwatch_path_token( xenbus_transaction_t xbt, const char *path, const char *token)
686{
687    struct xsd_sockmsg *rep;
688
689    struct write_req req[] = {
690        {path, bmk_strlen(path) + 1},
691	{token, bmk_strlen(token) + 1},
692    };
693
694    struct xenbus_watch *watch;
695
696    char *msg;
697
698    rep = xenbus_msg_reply(XS_UNWATCH, xbt, req, ARRAY_SIZE(req));
699
700    msg = errmsg(rep);
701    if (msg) return msg;
702    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
703
704    spin_lock(&xenbus_req_lock);
705    MINIOS_LIST_FOREACH(watch, &watches, entry)
706        if (!bmk_strcmp(watch->token, token)) {
707            bmk_memfree(watch->token, BMK_MEMWHO_WIREDBMK);
708            MINIOS_LIST_REMOVE(watch, entry);
709            bmk_memfree(watch, BMK_MEMWHO_WIREDBMK);
710            break;
711        }
712    spin_unlock(&xenbus_req_lock);
713
714    return NULL;
715}
716
717char *xenbus_rm(xenbus_transaction_t xbt, const char *path)
718{
719    struct write_req req[] = { {path, bmk_strlen(path) + 1} };
720    struct xsd_sockmsg *rep;
721    char *msg;
722    rep = xenbus_msg_reply(XS_RM, xbt, req, ARRAY_SIZE(req));
723    msg = errmsg(rep);
724    if (msg)
725	return msg;
726    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
727    return NULL;
728}
729
730char *xenbus_get_perms(xenbus_transaction_t xbt, const char *path, char **value)
731{
732    struct write_req req[] = { {path, bmk_strlen(path) + 1} };
733    struct xsd_sockmsg *rep;
734    char *res, *msg;
735    rep = xenbus_msg_reply(XS_GET_PERMS, xbt, req, ARRAY_SIZE(req));
736    msg = errmsg(rep);
737    if (msg) {
738	*value = NULL;
739	return msg;
740    }
741    res = bmk_xmalloc_bmk(rep->len + 1);
742    bmk_memcpy(res, rep + 1, rep->len);
743    res[rep->len] = 0;
744    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
745    *value = res;
746    return NULL;
747}
748
749#define PERM_MAX_SIZE 32
750char *xenbus_set_perms(xenbus_transaction_t xbt, const char *path, domid_t dom, char perm)
751{
752    char value[PERM_MAX_SIZE];
753    struct write_req req[] = {
754	{path, bmk_strlen(path) + 1},
755	{value, 0},
756    };
757    struct xsd_sockmsg *rep;
758    char *msg;
759    bmk_snprintf(value, PERM_MAX_SIZE, "%c%hu", perm, dom);
760    req[1].len = bmk_strlen(value) + 1;
761    rep = xenbus_msg_reply(XS_SET_PERMS, xbt, req, ARRAY_SIZE(req));
762    msg = errmsg(rep);
763    if (msg)
764	return msg;
765    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
766    return NULL;
767}
768
769char *xenbus_transaction_start(xenbus_transaction_t *xbt)
770{
771    /* xenstored becomes angry if you send a length 0 message, so just
772       shove a nul terminator on the end */
773    struct write_req req = { "", 1};
774    struct xsd_sockmsg *rep;
775    char *err;
776
777    rep = xenbus_msg_reply(XS_TRANSACTION_START, 0, &req, 1);
778    err = errmsg(rep);
779    if (err)
780	return err;
781
782    /* hint: typeof(*xbt) == unsigned long */
783    *xbt = bmk_strtoul((char *)(rep+1), NULL, 10);
784
785    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
786    return NULL;
787}
788
789char *
790xenbus_transaction_end(xenbus_transaction_t t, int abort, int *retry)
791{
792    struct xsd_sockmsg *rep;
793    struct write_req req;
794    char *err;
795
796    *retry = 0;
797
798    req.data = abort ? "F" : "T";
799    req.len = 2;
800    rep = xenbus_msg_reply(XS_TRANSACTION_END, t, &req, 1);
801    err = errmsg(rep);
802    if (err) {
803	if (!bmk_strcmp(err, "EAGAIN")) {
804	    *retry = 1;
805	    bmk_memfree(err, BMK_MEMWHO_WIREDBMK);
806	    return NULL;
807	} else {
808	    return err;
809	}
810    }
811    bmk_memfree(rep, BMK_MEMWHO_WIREDBMK);
812    return NULL;
813}
814
815int xenbus_read_integer(const char *path)
816{
817    char *res, *buf;
818    int t;
819
820    res = xenbus_read(XBT_NIL, path, &buf);
821    if (res) {
822	minios_printk("Failed to read %s.\n", path);
823	bmk_memfree(res, BMK_MEMWHO_WIREDBMK);
824	return -1;
825    }
826    t = bmk_strtoul(buf, NULL, 10);
827    bmk_memfree(buf, BMK_MEMWHO_WIREDBMK);
828    return t;
829}
830
831char* xenbus_printf(xenbus_transaction_t xbt,
832                                  const char* node, const char* path,
833                                  const char* fmt, ...)
834{
835#define BUFFER_SIZE 256
836    char fullpath[BUFFER_SIZE];
837    char val[BUFFER_SIZE];
838    va_list args;
839    int rv;
840
841    rv = bmk_snprintf(fullpath,sizeof(fullpath),"%s/%s", node, path);
842    BUG_ON(rv >= BUFFER_SIZE);
843
844    va_start(args, fmt);
845    rv = bmk_vsnprintf(val, sizeof(val), fmt, args);
846    BUG_ON(rv >= BUFFER_SIZE);
847    va_end(args);
848    return xenbus_write(xbt,fullpath,val);
849}
850
851domid_t xenbus_get_self_id(void)
852{
853    char *dom_id;
854    domid_t ret;
855
856    BUG_ON(xenbus_read(XBT_NIL, "domid", &dom_id));
857    ret = bmk_strtoul(dom_id, NULL, 10);
858
859    return ret;
860}
861
862static void do_ls_test(const char *pre)
863{
864    char **dirs, *msg;
865    int x;
866
867    minios_printk("ls %s...\n", pre);
868    msg = xenbus_ls(XBT_NIL, pre, &dirs);
869    if (msg) {
870	minios_printk("Error in xenbus ls: %s\n", msg);
871	bmk_memfree(msg, BMK_MEMWHO_WIREDBMK);
872	return;
873    }
874    for (x = 0; dirs[x]; x++)
875    {
876        minios_printk("ls %s[%d] -> %s\n", pre, x, dirs[x]);
877        bmk_memfree(dirs[x], BMK_MEMWHO_WIREDBMK);
878    }
879    bmk_memfree(dirs, BMK_MEMWHO_WIREDBMK);
880}
881
882static void do_read_test(const char *path)
883{
884    char *res, *msg;
885    minios_printk("Read %s...\n", path);
886    msg = xenbus_read(XBT_NIL, path, &res);
887    if (msg) {
888	minios_printk("Error in xenbus read: %s\n", msg);
889	bmk_memfree(msg, BMK_MEMWHO_WIREDBMK);
890	return;
891    }
892    minios_printk("Read %s -> %s.\n", path, res);
893    bmk_memfree(res, BMK_MEMWHO_WIREDBMK);
894}
895
896static void do_write_test(const char *path, const char *val)
897{
898    char *msg;
899    minios_printk("Write %s to %s...\n", val, path);
900    msg = xenbus_write(XBT_NIL, path, val);
901    if (msg) {
902	minios_printk("Result %s\n", msg);
903	bmk_memfree(msg, BMK_MEMWHO_WIREDBMK);
904    } else {
905	minios_printk("Success.\n");
906    }
907}
908
909static void do_rm_test(const char *path)
910{
911    char *msg;
912    minios_printk("rm %s...\n", path);
913    msg = xenbus_rm(XBT_NIL, path);
914    if (msg) {
915	minios_printk("Result %s\n", msg);
916	bmk_memfree(msg, BMK_MEMWHO_WIREDBMK);
917    } else {
918	minios_printk("Success.\n");
919    }
920}
921
922/* Simple testing thing */
923void test_xenbus(void)
924{
925    minios_printk("Doing xenbus test.\n");
926    xenbus_debug_msg("Testing xenbus...\n");
927
928    minios_printk("Doing ls test.\n");
929    do_ls_test("device");
930    do_ls_test("device/vif");
931    do_ls_test("device/vif/0");
932
933    minios_printk("Doing read test.\n");
934    do_read_test("device/vif/0/mac");
935    do_read_test("device/vif/0/backend");
936
937    minios_printk("Doing write test.\n");
938    do_write_test("device/vif/0/flibble", "flobble");
939    do_read_test("device/vif/0/flibble");
940    do_write_test("device/vif/0/flibble", "widget");
941    do_read_test("device/vif/0/flibble");
942
943    minios_printk("Doing rm test.\n");
944    do_rm_test("device/vif/0/flibble");
945    do_read_test("device/vif/0/flibble");
946    minios_printk("(Should have said ENOENT)\n");
947}
948
949/*
950 * Local variables:
951 * mode: C
952 * c-basic-offset: 4
953 * End:
954 */
955