1/*
2 * Copyright 2006-2007, Fran��ois Revol. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 */
5
6/*
7 * nbd driver for Haiku
8 *
9 * Maps a Network Block Device as virtual partitions.
10 */
11
12
13#include <ByteOrder.h>
14#include <KernelExport.h>
15#include <Drivers.h>
16#include <driver_settings.h>
17#include <Errors.h>
18#include <errno.h>
19#include <stdio.h>
20#include <stdlib.h>
21#include <string.h>
22#include <unistd.h>
23#include <ksocket.h>
24#include <netinet/in.h>
25
26//#define DEBUG 1
27
28/* on the first open(), open ourselves for some seconds,
29 * to avoid trying to reconnect and failing on a 2nd open,
30 * as it happens with the python server.
31 */
32//#define MOUNT_KLUDGE
33
34
35/* names, ohh names... */
36#ifndef SHUT_RDWR
37#define SHUT_RDWR SHUTDOWN_BOTH
38#endif
39
40/* locking support */
41#ifdef __HAIKU__
42#include <kernel/lock.h>
43#else
44/* wrappers for R5 */
45#ifndef _IMPEXP_KERNEL
46#define _IMPEXP_KERNEL
47#endif
48#include "lock.h"
49#define mutex lock
50#define mutex_init new_lock
51#define mutex_destroy free_lock
52#define mutex_lock LOCK
53#define mutex_unlock UNLOCK
54#endif
55
56#include "nbd.h"
57
58#define DRV "nbd"
59#define DP "nbd:"
60#define MAX_NBDS 4
61#define DEVICE_PREFIX "disk/virtual/nbd/"
62#define DEVICE_FMT DEVICE_PREFIX "%d/raw"
63#define DEVICE_NAME_MAX 32
64#define MAX_REQ_SIZE (32*1024*1024)
65#define BLKSIZE 512
66
67/* debugging */
68#if DEBUG
69#define PRINT(a) dprintf a
70#define WHICH(dev) ((int)(dev - nbd_devices))
71#else
72#define PRINT(a)
73#endif
74
75struct nbd_request_entry {
76	struct nbd_request_entry *next;
77	struct nbd_request req; /* net byte order */
78	struct nbd_reply reply; /* net byte order */
79	sem_id sem;
80	bool replied;
81	bool discard;
82	uint64 handle;
83	uint32 type;
84	uint64 from;
85	size_t len;
86	void *buffer; /* write: ptr to passed buffer; read: ptr to malloc()ed extra */
87};
88
89struct nbd_device {
90	bool valid;
91	bool readonly;
92	struct sockaddr_in server;
93	mutex ben;
94	vint32 refcnt;
95	uint64 req; /* next ID for requests */
96	int sock;
97	thread_id postoffice;
98	uint64 size;
99	struct nbd_request_entry *reqs;
100#ifdef MOUNT_KLUDGE
101	int kludge;
102#endif
103};
104
105typedef struct cookie {
106	struct nbd_device *dev;
107
108} cookie_t;
109
110/* data=NULL on read */
111status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data);
112status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req);
113status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req);
114status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req);
115
116struct nbd_device *nbd_find_device(const char* name);
117
118int32 nbd_postoffice(void *arg);
119status_t nbd_connect(struct nbd_device *dev);
120status_t nbd_teardown(struct nbd_device *dev);
121status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req);
122
123status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie);
124status_t nbd_close(cookie_t *cookie);
125status_t nbd_free(cookie_t *cookie);
126status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len);
127status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes);
128status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes);
129
130KSOCKET_MODULE_DECL;
131
132/* HACK:
133 * In BONE at least, if connect() fails (EINTR or ETIMEDOUT)
134 * keeps locked pages around (likely a bone_data,
135 * until TCP gets the last ACK). If that happens, we snooze()
136 * in unload_driver() to let TCP timeout before the kernel
137 * tries to delete the image. */
138bool gDelayUnload = false;
139#define BONE_TEARDOWN_DELAY 60000000
140
141#if 0
142#pragma mark ==== support ====
143#endif
144
145// move that to ksocket inlined
146static int kinet_aton(const char *in, struct in_addr *addr)
147{
148	int i;
149	unsigned long a;
150	uint32 inaddr = 0L;
151	char *p = (char *)in;
152	for (i = 0; i < 4; i++) {
153		a = strtoul(p, &p, 10);
154		if (!p)
155			return -1;
156		inaddr = (inaddr >> 8) | ((a & 0x0ff) << 24);
157		*(uint32 *)addr = inaddr;
158		if (!*p)
159			return 0;
160		p++;
161	}
162	return 0;
163}
164
165#if 0
166#pragma mark ==== request manager ====
167#endif
168
169status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data)
170{
171	bool w = (type == NBD_CMD_WRITE);
172	struct nbd_request_entry *r;
173	status_t err = EINVAL;
174	uint64 handle;
175	PRINT((DP ">%s(%" B_PRIu32 ", %" B_PRIdOFF ", %ld)\n", __FUNCTION__, type,
176		from, len));
177
178	if (type != NBD_CMD_READ && type != NBD_CMD_WRITE && type != NBD_CMD_DISC)
179		return err;
180	if (!dev || !req || from < 0)
181		return err;
182
183	//LOCK
184	err = mutex_lock(&dev->ben);
185	if (err)
186		return err;
187
188	// atomic
189	handle = dev->req++;
190
191
192	//UNLOCK
193	mutex_unlock(&dev->ben);
194
195	err = ENOMEM;
196	r = malloc(sizeof(struct nbd_request_entry) + (w ? 0 : len));
197	if (r == NULL)
198		goto err0;
199	r->next = NULL;
200	err = r->sem = create_sem(0, "nbd request sem");
201	if (err < 0)
202		goto err1;
203
204	r->replied = false;
205	r->discard = false;
206	r->handle = handle;
207	r->type = type;
208	r->from = from;
209	r->len = len;
210
211	r->req.magic = B_HOST_TO_BENDIAN_INT32(NBD_REQUEST_MAGIC);
212	r->req.type = B_HOST_TO_BENDIAN_INT32(type);
213	r->req.handle = B_HOST_TO_BENDIAN_INT64(r->handle);
214	r->req.from = B_HOST_TO_BENDIAN_INT64(r->from);
215	r->req.len = B_HOST_TO_BENDIAN_INT32(len);
216
217	r->buffer = (void *)(w ? data : (((char *)r) + sizeof(struct nbd_request_entry)));
218
219	*req = r;
220	return B_OK;
221
222err1:
223	free(r);
224err0:
225	dprintf(DP " %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err);
226	return err;
227}
228
229
230status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req)
231{
232	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle));
233	req->next = dev->reqs;
234	dev->reqs = req;
235	return B_OK;
236}
237
238
239status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req)
240{
241	struct nbd_request_entry *r, *prev;
242	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, handle));
243	r = dev->reqs;
244	prev = NULL;
245	while (r && r->handle != handle) {
246		prev = r;
247		r = r->next;
248	}
249	if (!r)
250		return ENOENT;
251
252	if (prev)
253		prev->next = r->next;
254	else
255		dev->reqs = r->next;
256
257	*req = r;
258	return B_OK;
259}
260
261
262status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req)
263{
264	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle));
265	delete_sem(req->sem);
266	free(req);
267	return B_OK;
268}
269
270
271#if 0
272#pragma mark ==== nbd handler ====
273#endif
274
275int32 nbd_postoffice(void *arg)
276{
277	struct nbd_device *dev = (struct nbd_device *)arg;
278	struct nbd_request_entry *req = NULL;
279	struct nbd_reply reply;
280	status_t err;
281	const char *reason;
282	PRINT((DP ">%s()\n", __FUNCTION__));
283
284	for (;;) {
285		reason = "recv";
286		err = krecv(dev->sock, &reply, sizeof(reply), 0);
287		if (err == -1 && errno < 0)
288			err = errno;
289		if (err < 0)
290			goto err;
291		reason = "recv:size";
292		if (err < (status_t)sizeof(reply))
293			err = EINVAL;
294		if (err < 0)
295			goto err;
296		reason = "magic";
297		err = EINVAL;
298		if (B_BENDIAN_TO_HOST_INT32(reply.magic) != NBD_REPLY_MAGIC)
299			goto err;
300
301		reason = "lock";
302		//LOCK
303		err = mutex_lock(&dev->ben);
304		if (err)
305			goto err;
306
307		reason = "dequeue_request";
308		err = nbd_dequeue_request(dev, B_BENDIAN_TO_HOST_INT64(reply.handle), &req);
309
310		//UNLOCK
311		mutex_unlock(&dev->ben);
312
313		if (!err && !req) {
314			dprintf(DP "nbd_dequeue_rquest found NULL!\n");
315			err = ENOENT;
316		}
317
318		if (err == B_OK) {
319			memcpy(&req->reply, &reply, sizeof(reply));
320			if (req->type == NBD_CMD_READ) {
321				err = 0;
322				reason = "recv(data)";
323				if (reply.error == 0)
324					err = krecv(dev->sock, req->buffer, req->len, 0);
325				if (err < 0)
326					goto err;
327				/* tell back how much we've got (?) */
328				req->len = err;
329			} else {
330				if (reply.error)
331					req->len = 0;
332			}
333
334			reason = "lock";
335			//LOCK
336			err = mutex_lock(&dev->ben);
337			if (err)
338				goto err;
339
340			// this also must be atomic!
341			release_sem(req->sem);
342			req->replied = true;
343			if (req->discard)
344				nbd_free_request(dev, req);
345
346			//UNLOCK
347			mutex_unlock(&dev->ben);
348		}
349
350	}
351
352	PRINT((DP "<%s\n", __FUNCTION__));
353	return 0;
354
355err:
356	dprintf(DP "%s: %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, reason, err);
357	return err;
358}
359
360
361status_t nbd_connect(struct nbd_device *dev)
362{
363	struct nbd_init_packet initpkt;
364	status_t err;
365	PRINT((DP ">%s()\n", __FUNCTION__));
366
367	PRINT((DP " %s: socket()\n", __FUNCTION__));
368	err = dev->sock = ksocket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
369	if (err == -1 && errno < 0)
370		err = errno;
371	if (err < 0)
372		goto err0;
373
374	PRINT((DP " %s: connect()\n", __FUNCTION__));
375	err = kconnect(dev->sock, (struct sockaddr *)&dev->server, sizeof(dev->server));
376	//err = ENOSYS;
377	if (err == -1 && errno < 0)
378		err = errno;
379	/* HACK: avoid the kernel unloading us with locked pages from TCP */
380	if (err)
381		gDelayUnload = true;
382	if (err)
383		goto err1;
384
385	PRINT((DP " %s: recv(initpkt)\n", __FUNCTION__));
386	err = krecv(dev->sock, &initpkt, sizeof(initpkt), 0);
387	if (err == -1 && errno < 0)
388		err = errno;
389	if (err < (status_t)sizeof(initpkt))
390		goto err2;
391	err = EINVAL;//EPROTO;
392	if (memcmp(initpkt.passwd, NBD_INIT_PASSWD, sizeof(initpkt.passwd)))
393		goto err3;
394	if (B_BENDIAN_TO_HOST_INT64(initpkt.magic) != NBD_INIT_MAGIC)
395		goto err3;
396
397	dev->size = B_BENDIAN_TO_HOST_INT64(initpkt.device_size);
398
399	dprintf(DP " %s: connected, device size %" B_PRIu64 " bytes.\n",
400		__FUNCTION__, dev->size);
401
402	err = dev->postoffice = spawn_kernel_thread(nbd_postoffice, "nbd postoffice", B_REAL_TIME_PRIORITY, dev);
403	if (err < B_OK)
404		goto err4;
405	resume_thread(dev->postoffice);
406
407	PRINT((DP "<%s\n", __FUNCTION__));
408	return B_OK;
409
410err4:
411	dev->postoffice = -1;
412err3:
413err2:
414err1:
415	kclosesocket(dev->sock);
416	dev->sock = -1;
417err0:
418	dprintf(DP "<%s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err);
419	return err;
420}
421
422
423status_t nbd_teardown(struct nbd_device *dev)
424{
425	status_t ret;
426	PRINT((DP ">%s()\n", __FUNCTION__));
427	kshutdown(dev->sock, SHUT_RDWR);
428	kclosesocket(dev->sock);
429	dev->sock = -1;
430	wait_for_thread(dev->postoffice, &ret);
431	return B_OK;
432}
433
434
435status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req)
436{
437	status_t err;
438	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle));
439
440	err = ksend(dev->sock, &req->req, sizeof(req->req), 0);
441	if (err < 0)
442		return err;
443
444	if (req->type == NBD_CMD_WRITE)
445		err = ksend(dev->sock, req->buffer, req->len, 0);
446	if (err < 0)
447		return err;
448	else
449		req->len = err;
450
451	err = nbd_queue_request(dev, req);
452	return err;
453}
454
455
456#if 0
457#pragma mark ==== device hooks ====
458#endif
459
460static struct nbd_device nbd_devices[MAX_NBDS];
461
462status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie) {
463	status_t err;
464#ifdef MOUNT_KLUDGE
465	int32 refcnt;
466	int kfd;
467#endif
468	struct nbd_device *dev = NULL;
469	PRINT((DP ">%s(%s, %" B_PRIx32 ", )\n", __FUNCTION__, name, flags));
470	(void)name; (void)flags;
471	dev = nbd_find_device(name);
472	if (!dev || !dev->valid)
473		return ENOENT;
474	err = ENOMEM;
475	*cookie = (void*)malloc(sizeof(cookie_t));
476	if (*cookie == NULL)
477		goto err0;
478	memset(*cookie, 0, sizeof(cookie_t));
479	(*cookie)->dev = dev;
480	err = mutex_lock(&dev->ben);
481	if (err)
482		goto err1;
483	/*  */
484	if (dev->sock < 0)
485		err = nbd_connect(dev);
486	if (err)
487		goto err2;
488#ifdef MOUNT_KLUDGE
489	refcnt = dev->refcnt++;
490	kfd = dev->kludge;
491	dev->kludge = -1;
492#endif
493	mutex_unlock(&dev->ben);
494
495#ifdef MOUNT_KLUDGE
496	if (refcnt == 0) {
497		char buf[32];
498		sprintf(buf, "/dev/%s", name);
499		dev->kludge = open(buf, O_RDONLY);
500	} else if (kfd) {
501		close(kfd);
502	}
503#endif
504
505	return B_OK;
506
507err2:
508	mutex_unlock(&dev->ben);
509err1:
510	free(*cookie);
511err0:
512	dprintf(DP " %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err);
513	return err;
514}
515
516
517status_t nbd_close(cookie_t *cookie) {
518	struct nbd_device *dev = cookie->dev;
519	status_t err;
520#ifdef MOUNT_KLUDGE
521	int kfd = -1;
522#endif
523	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
524
525	err = mutex_lock(&dev->ben);
526	if (err)
527		return err;
528
529	// XXX: do something ?
530#ifdef MOUNT_KLUDGE
531	kfd = dev->kludge;
532	dev->kludge = -1;
533#endif
534
535	mutex_unlock(&dev->ben);
536
537#ifdef MOUNT_KLUDGE
538	if (kfd > -1) {
539		close(kfd);
540	}
541#endif
542	return B_OK;
543}
544
545
546status_t nbd_free(cookie_t *cookie) {
547	struct nbd_device *dev = cookie->dev;
548	status_t err;
549	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
550
551	err = mutex_lock(&dev->ben);
552	if (err)
553		return err;
554
555	if (--dev->refcnt == 0) {
556		err = nbd_teardown(dev);
557	}
558
559	mutex_unlock(&dev->ben);
560
561	free(cookie);
562	return err;
563}
564
565
566status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len) {
567	PRINT((DP ">%s(%d, %" B_PRIu32 ", , %ld)\n", __FUNCTION__,
568		WHICH(cookie->dev), op, len));
569	switch (op) {
570	case B_GET_DEVICE_SIZE: /* this one is broken anyway... */
571		if (data) {
572			*(size_t *)data = (size_t)cookie->dev->size;
573			return B_OK;
574		}
575		return EINVAL;
576	case B_SET_DEVICE_SIZE: /* broken */
577		return EINVAL;
578	case B_SET_NONBLOCKING_IO:
579		return EINVAL;
580	case B_SET_BLOCKING_IO:
581		return B_OK;
582	case B_GET_READ_STATUS:
583	case B_GET_WRITE_STATUS:
584		if (data) {
585			*(bool *)data = false;
586			return B_OK;
587		}
588		return EINVAL;
589	case B_GET_GEOMETRY:
590	case B_GET_BIOS_GEOMETRY:
591		if (data != NULL && len <= sizeof(device_geometry)) {
592			device_geometry geometry;
593			geometry.bytes_per_sector = BLKSIZE;
594			geometry.sectors_per_track = 1;
595			geometry.cylinder_count = cookie->dev->size / BLKSIZE;
596			geometry.head_count = 1;
597			geometry.device_type = B_DISK;
598			geometry.removable = false;
599			geometry.read_only = cookie->dev->readonly;
600			geometry.write_once = false;
601			geometry.bytes_per_physical_sector = BLKSIZE;
602			return user_memcpy(data, &geometry, len);
603		}
604		return EINVAL;
605	case B_GET_MEDIA_STATUS:
606		if (data) {
607			*(status_t *)data = B_OK;
608			return B_OK;
609		}
610		return EINVAL;
611
612	case B_EJECT_DEVICE:
613	case B_LOAD_MEDIA:
614		return B_BAD_VALUE;
615	case B_FLUSH_DRIVE_CACHE: /* wait for request list to be empty ? */
616		return B_OK;
617	default:
618		return B_BAD_VALUE;
619	}
620	return B_NOT_ALLOWED;
621}
622
623
624status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes) {
625	struct nbd_device *dev = cookie->dev;
626	struct nbd_request_entry *req;
627	status_t err, semerr;
628	PRINT((DP ">%s(%d, %" B_PRIdOFF ", , )\n", __FUNCTION__,
629		WHICH(cookie->dev), position));
630
631	if (position < 0)
632		return EINVAL;
633	if (!data)
634		return EINVAL;
635
636	err = nbd_alloc_request(dev, &req, NBD_CMD_READ, position, *numbytes, NULL);
637	if (err)
638		goto err0;
639
640	//LOCK
641	err = mutex_lock(&dev->ben);
642	if (err)
643		goto err1;
644
645	err = nbd_post_request(dev, req);
646
647	//UNLOCK
648	mutex_unlock(&dev->ben);
649
650	if (err)
651		goto err2;
652
653
654	semerr = acquire_sem(req->sem);
655
656	//LOCK
657	err = mutex_lock(&dev->ben);
658	if(err)
659		goto err3;
660
661	/* bad scenarii */
662	if (!req->replied)
663		req->discard = true;
664	else if (semerr)
665		nbd_free_request(dev, req);
666
667	//UNLOCK
668	mutex_unlock(&dev->ben);
669
670	if (semerr == B_OK) {
671		*numbytes = req->len;
672		memcpy(data, req->buffer, req->len);
673		err = B_OK;
674		if (*numbytes == 0 && req->reply.error)
675			err = EIO;
676		nbd_free_request(dev, req);
677		return err;
678	}
679
680	*numbytes = 0;
681	return semerr;
682
683
684err3:
685err2:
686err1:
687	nbd_free_request(dev, req);
688err0:
689	*numbytes = 0;
690	return err;
691}
692
693
694status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes) {
695	struct nbd_device *dev = cookie->dev;
696	struct nbd_request_entry *req;
697	status_t err, semerr;
698	PRINT((DP ">%s(%d, %" B_PRIdOFF ", %ld, )\n", __FUNCTION__,
699		WHICH(cookie->dev), position, *numbytes));
700
701	if (position < 0)
702		return EINVAL;
703	if (!data)
704		return EINVAL;
705	err = B_NOT_ALLOWED;
706	if (dev->readonly)
707		goto err0;
708
709	err = nbd_alloc_request(dev, &req, NBD_CMD_WRITE, position, *numbytes, data);
710	if (err)
711		goto err0;
712
713	//LOCK
714	err = mutex_lock(&dev->ben);
715	if (err)
716		goto err1;
717
718	/* sending request+data must be atomic */
719	err = nbd_post_request(dev, req);
720
721	//UNLOCK
722	mutex_unlock(&dev->ben);
723
724	if (err)
725		goto err2;
726
727
728	semerr = acquire_sem(req->sem);
729
730	//LOCK
731	err = mutex_lock(&dev->ben);
732	if(err)
733		goto err3;
734
735	/* bad scenarii */
736	if (!req->replied)
737		req->discard = true;
738	else if (semerr)
739		nbd_free_request(dev, req);
740
741	//UNLOCK
742	mutex_unlock(&dev->ben);
743
744	if (semerr == B_OK) {
745		*numbytes = req->len;
746		err = B_OK;
747		if (*numbytes == 0 && req->reply.error)
748			err = EIO;
749		nbd_free_request(dev, req);
750		return err;
751	}
752
753	*numbytes = 0;
754	return semerr;
755
756
757err3:
758err2:
759err1:
760	nbd_free_request(dev, req);
761err0:
762	*numbytes = 0;
763	return err;
764}
765
766
767device_hooks nbd_hooks={
768	(device_open_hook)nbd_open,
769	(device_close_hook)nbd_close,
770	(device_free_hook)nbd_free,
771	(device_control_hook)nbd_control,
772	(device_read_hook)nbd_read,
773	(device_write_hook)nbd_write,
774	NULL,
775	NULL,
776	NULL,
777	NULL
778};
779
780#if 0
781#pragma mark ==== driver hooks ====
782#endif
783
784int32 api_version = B_CUR_DRIVER_API_VERSION;
785
786static char *nbd_name[MAX_NBDS+1] = {
787	NULL
788};
789
790
791status_t
792init_hardware (void)
793{
794	PRINT((DP ">%s()\n", __FUNCTION__));
795	return B_OK;
796}
797
798
799status_t
800init_driver (void)
801{
802	status_t err;
803	int i, j;
804	// XXX: load settings
805	void *handle;
806	char **names = nbd_name;
807	PRINT((DP ">%s()\n", __FUNCTION__));
808
809	handle = load_driver_settings(DRV);
810	if (handle == NULL)
811		return ENOENT;
812	// XXX: test for boot args ?
813
814
815	err = ksocket_init();
816	if (err < B_OK)
817		return err;
818
819	for (i = 0; i < MAX_NBDS; i++) {
820		nbd_devices[i].valid = false;
821		nbd_devices[i].readonly = false;
822		mutex_init(&nbd_devices[i].ben, "nbd lock");
823		nbd_devices[i].refcnt = 0;
824		nbd_devices[i].req = 0LL; /* next ID for requests */
825		nbd_devices[i].sock = -1;
826		nbd_devices[i].postoffice = -1;
827		nbd_devices[i].size = 0LL;
828		nbd_devices[i].reqs = NULL;
829#ifdef MOUNT_KLUDGE
830		nbd_devices[i].kludge = -1;
831#endif
832		nbd_name[i] = NULL;
833	}
834
835	for (i = 0; i < MAX_NBDS; i++) {
836		const driver_settings *settings = get_driver_settings(handle);
837		driver_parameter *p = NULL;
838		char keyname[10];
839		sprintf(keyname, "%d", i);
840		for (j = 0; j < settings->parameter_count; j++)
841			if (!strcmp(settings->parameters[j].name, keyname))
842				p = &settings->parameters[j];
843		if (!p)
844			continue;
845		for (j = 0; j < p->parameter_count; j++) {
846			if (!strcmp(p->parameters[j].name, "readonly"))
847				nbd_devices[i].readonly = true;
848			if (!strcmp(p->parameters[j].name, "server")) {
849				if (p->parameters[j].value_count < 2)
850					continue;
851				nbd_devices[i].server.sin_len = sizeof(struct sockaddr_in);
852				nbd_devices[i].server.sin_family = AF_INET;
853				kinet_aton(p->parameters[j].values[0], &nbd_devices[i].server.sin_addr);
854				nbd_devices[i].server.sin_port = htons(atoi(p->parameters[j].values[1]));
855				dprintf(DP " configured [%d]\n", i);
856				*(names) = malloc(DEVICE_NAME_MAX);
857				if (*(names) == NULL)
858					return ENOMEM;
859				sprintf(*(names++), DEVICE_FMT, i);
860				nbd_devices[i].valid = true;
861			}
862		}
863	}
864	*names = NULL;
865
866	unload_driver_settings(handle);
867	return B_OK;
868}
869
870
871void
872uninit_driver (void)
873{
874	int i;
875	PRINT((DP ">%s()\n", __FUNCTION__));
876	for (i = 0; i < MAX_NBDS; i++) {
877		free(nbd_name[i]);
878		mutex_destroy(&nbd_devices[i].ben);
879	}
880	ksocket_cleanup();
881	/* HACK */
882	if (gDelayUnload)
883		snooze(BONE_TEARDOWN_DELAY);
884}
885
886
887const char**
888publish_devices()
889{
890	PRINT((DP ">%s()\n", __FUNCTION__));
891	return (const char **)nbd_name;
892}
893
894
895device_hooks*
896find_device(const char* name)
897{
898	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
899	return &nbd_hooks;
900}
901
902
903struct nbd_device*
904nbd_find_device(const char* name)
905{
906	int i;
907	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
908	for (i = 0; i < MAX_NBDS; i++) {
909		char buf[DEVICE_NAME_MAX];
910		sprintf(buf, DEVICE_FMT, i);
911		if (!strcmp(buf, name))
912			return &nbd_devices[i];
913	}
914	return NULL;
915}
916
917