Deleted Added
full compact
cuse.c (359651) cuse.c (359652)
1/* $FreeBSD: stable/11/sys/fs/cuse/cuse.c 359651 2020-04-06 07:09:04Z hselasky $ */
1/* $FreeBSD: stable/11/sys/fs/cuse/cuse.c 359652 2020-04-06 07:16:31Z hselasky $ */
2/*-
3 * Copyright (c) 2010-2020 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include "opt_compat.h"
28
29#include <sys/stdint.h>
30#include <sys/stddef.h>
31#include <sys/param.h>
32#include <sys/types.h>
33#include <sys/systm.h>
34#include <sys/conf.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/linker_set.h>
38#include <sys/module.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/condvar.h>
42#include <sys/sysctl.h>
43#include <sys/unistd.h>
44#include <sys/malloc.h>
45#include <sys/priv.h>
46#include <sys/uio.h>
47#include <sys/poll.h>
48#include <sys/sx.h>
49#include <sys/rwlock.h>
50#include <sys/queue.h>
51#include <sys/fcntl.h>
52#include <sys/proc.h>
53#include <sys/vnode.h>
54#include <sys/selinfo.h>
55#include <sys/ptrace.h>
56#include <sys/sysent.h>
57
58#include <machine/bus.h>
59
60#include <vm/vm.h>
61#include <vm/pmap.h>
62#include <vm/vm_object.h>
63#include <vm/vm_page.h>
64#include <vm/vm_pager.h>
65
66#include <fs/cuse/cuse_defs.h>
67#include <fs/cuse/cuse_ioctl.h>
68
69MODULE_VERSION(cuse, 1);
70
71/*
72 * Prevent cuse4bsd.ko and cuse.ko from loading at the same time by
73 * declaring support for the cuse4bsd interface in cuse.ko:
74 */
75MODULE_VERSION(cuse4bsd, 1);
76
77#ifdef FEATURE
78FEATURE(cuse, "Userspace character devices");
79#endif
80
81struct cuse_command;
82struct cuse_server;
83struct cuse_client;
84
85struct cuse_client_command {
86 TAILQ_ENTRY(cuse_client_command) entry;
87 struct cuse_command sub;
88 struct sx sx;
89 struct cv cv;
90 struct thread *entered;
91 struct cuse_client *client;
92 struct proc *proc_curr;
93 int proc_refs;
94 int got_signal;
95 int error;
96 int command;
97};
98
99struct cuse_memory {
100 TAILQ_ENTRY(cuse_memory) entry;
101 vm_object_t object;
102 uint32_t page_count;
103 uint32_t alloc_nr;
104};
105
106struct cuse_server_dev {
107 TAILQ_ENTRY(cuse_server_dev) entry;
108 struct cuse_server *server;
109 struct cdev *kern_dev;
110 struct cuse_dev *user_dev;
111};
112
113struct cuse_server {
114 TAILQ_ENTRY(cuse_server) entry;
115 TAILQ_HEAD(, cuse_client_command) head;
116 TAILQ_HEAD(, cuse_server_dev) hdev;
117 TAILQ_HEAD(, cuse_client) hcli;
118 TAILQ_HEAD(, cuse_memory) hmem;
119 struct mtx mtx;
120 struct cv cv;
121 struct selinfo selinfo;
122 pid_t pid;
123 int is_closing;
124 int refs;
125};
126
127struct cuse_client {
128 TAILQ_ENTRY(cuse_client) entry;
129 TAILQ_ENTRY(cuse_client) entry_ref;
130 struct cuse_client_command cmds[CUSE_CMD_MAX];
131 struct cuse_server *server;
132 struct cuse_server_dev *server_dev;
133
134 uint8_t ioctl_buffer[CUSE_BUFFER_MAX] __aligned(4);
135
136 int fflags; /* file flags */
137 int cflags; /* client flags */
138#define CUSE_CLI_IS_CLOSING 0x01
139#define CUSE_CLI_KNOTE_NEED_READ 0x02
140#define CUSE_CLI_KNOTE_NEED_WRITE 0x04
141#define CUSE_CLI_KNOTE_HAS_READ 0x08
142#define CUSE_CLI_KNOTE_HAS_WRITE 0x10
143};
144
145#define CUSE_CLIENT_CLOSING(pcc) \
146 ((pcc)->cflags & CUSE_CLI_IS_CLOSING)
147
148static MALLOC_DEFINE(M_CUSE, "cuse", "CUSE memory");
149
150static TAILQ_HEAD(, cuse_server) cuse_server_head;
151static struct mtx cuse_global_mtx;
152static struct cdev *cuse_dev;
153static struct cuse_server *cuse_alloc_unit[CUSE_DEVICES_MAX];
154static int cuse_alloc_unit_id[CUSE_DEVICES_MAX];
155
156static void cuse_server_wakeup_all_client_locked(struct cuse_server *pcs);
157static void cuse_client_kqfilter_read_detach(struct knote *kn);
158static void cuse_client_kqfilter_write_detach(struct knote *kn);
159static int cuse_client_kqfilter_read_event(struct knote *kn, long hint);
160static int cuse_client_kqfilter_write_event(struct knote *kn, long hint);
161
162static struct filterops cuse_client_kqfilter_read_ops = {
163 .f_isfd = 1,
164 .f_detach = cuse_client_kqfilter_read_detach,
165 .f_event = cuse_client_kqfilter_read_event,
166};
167
168static struct filterops cuse_client_kqfilter_write_ops = {
169 .f_isfd = 1,
170 .f_detach = cuse_client_kqfilter_write_detach,
171 .f_event = cuse_client_kqfilter_write_event,
172};
173
174static d_open_t cuse_client_open;
175static d_close_t cuse_client_close;
176static d_ioctl_t cuse_client_ioctl;
177static d_read_t cuse_client_read;
178static d_write_t cuse_client_write;
179static d_poll_t cuse_client_poll;
180static d_mmap_single_t cuse_client_mmap_single;
181static d_kqfilter_t cuse_client_kqfilter;
182
183static struct cdevsw cuse_client_devsw = {
184 .d_version = D_VERSION,
185 .d_open = cuse_client_open,
186 .d_close = cuse_client_close,
187 .d_ioctl = cuse_client_ioctl,
188 .d_name = "cuse_client",
189 .d_flags = D_TRACKCLOSE,
190 .d_read = cuse_client_read,
191 .d_write = cuse_client_write,
192 .d_poll = cuse_client_poll,
193 .d_mmap_single = cuse_client_mmap_single,
194 .d_kqfilter = cuse_client_kqfilter,
195};
196
197static d_open_t cuse_server_open;
198static d_close_t cuse_server_close;
199static d_ioctl_t cuse_server_ioctl;
200static d_read_t cuse_server_read;
201static d_write_t cuse_server_write;
202static d_poll_t cuse_server_poll;
203static d_mmap_single_t cuse_server_mmap_single;
204
205static struct cdevsw cuse_server_devsw = {
206 .d_version = D_VERSION,
207 .d_open = cuse_server_open,
208 .d_close = cuse_server_close,
209 .d_ioctl = cuse_server_ioctl,
210 .d_name = "cuse_server",
211 .d_flags = D_TRACKCLOSE,
212 .d_read = cuse_server_read,
213 .d_write = cuse_server_write,
214 .d_poll = cuse_server_poll,
215 .d_mmap_single = cuse_server_mmap_single,
216};
217
218static void cuse_client_is_closing(struct cuse_client *);
219static int cuse_free_unit_by_id_locked(struct cuse_server *, int);
220
221static void
222cuse_global_lock(void)
223{
224 mtx_lock(&cuse_global_mtx);
225}
226
227static void
228cuse_global_unlock(void)
229{
230 mtx_unlock(&cuse_global_mtx);
231}
232
233static void
234cuse_server_lock(struct cuse_server *pcs)
235{
236 mtx_lock(&pcs->mtx);
237}
238
239static void
240cuse_server_unlock(struct cuse_server *pcs)
241{
242 mtx_unlock(&pcs->mtx);
243}
244
245static void
246cuse_cmd_lock(struct cuse_client_command *pccmd)
247{
248 sx_xlock(&pccmd->sx);
249}
250
251static void
252cuse_cmd_unlock(struct cuse_client_command *pccmd)
253{
254 sx_xunlock(&pccmd->sx);
255}
256
257static void
258cuse_kern_init(void *arg)
259{
260 TAILQ_INIT(&cuse_server_head);
261
262 mtx_init(&cuse_global_mtx, "cuse-global-mtx", NULL, MTX_DEF);
263
264 cuse_dev = make_dev(&cuse_server_devsw, 0,
265 UID_ROOT, GID_OPERATOR, 0600, "cuse");
266
267 printf("Cuse v%d.%d.%d @ /dev/cuse\n",
268 (CUSE_VERSION >> 16) & 0xFF, (CUSE_VERSION >> 8) & 0xFF,
269 (CUSE_VERSION >> 0) & 0xFF);
270}
2/*-
3 * Copyright (c) 2010-2020 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include "opt_compat.h"
28
29#include <sys/stdint.h>
30#include <sys/stddef.h>
31#include <sys/param.h>
32#include <sys/types.h>
33#include <sys/systm.h>
34#include <sys/conf.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/linker_set.h>
38#include <sys/module.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/condvar.h>
42#include <sys/sysctl.h>
43#include <sys/unistd.h>
44#include <sys/malloc.h>
45#include <sys/priv.h>
46#include <sys/uio.h>
47#include <sys/poll.h>
48#include <sys/sx.h>
49#include <sys/rwlock.h>
50#include <sys/queue.h>
51#include <sys/fcntl.h>
52#include <sys/proc.h>
53#include <sys/vnode.h>
54#include <sys/selinfo.h>
55#include <sys/ptrace.h>
56#include <sys/sysent.h>
57
58#include <machine/bus.h>
59
60#include <vm/vm.h>
61#include <vm/pmap.h>
62#include <vm/vm_object.h>
63#include <vm/vm_page.h>
64#include <vm/vm_pager.h>
65
66#include <fs/cuse/cuse_defs.h>
67#include <fs/cuse/cuse_ioctl.h>
68
69MODULE_VERSION(cuse, 1);
70
71/*
72 * Prevent cuse4bsd.ko and cuse.ko from loading at the same time by
73 * declaring support for the cuse4bsd interface in cuse.ko:
74 */
75MODULE_VERSION(cuse4bsd, 1);
76
77#ifdef FEATURE
78FEATURE(cuse, "Userspace character devices");
79#endif
80
81struct cuse_command;
82struct cuse_server;
83struct cuse_client;
84
85struct cuse_client_command {
86 TAILQ_ENTRY(cuse_client_command) entry;
87 struct cuse_command sub;
88 struct sx sx;
89 struct cv cv;
90 struct thread *entered;
91 struct cuse_client *client;
92 struct proc *proc_curr;
93 int proc_refs;
94 int got_signal;
95 int error;
96 int command;
97};
98
99struct cuse_memory {
100 TAILQ_ENTRY(cuse_memory) entry;
101 vm_object_t object;
102 uint32_t page_count;
103 uint32_t alloc_nr;
104};
105
106struct cuse_server_dev {
107 TAILQ_ENTRY(cuse_server_dev) entry;
108 struct cuse_server *server;
109 struct cdev *kern_dev;
110 struct cuse_dev *user_dev;
111};
112
113struct cuse_server {
114 TAILQ_ENTRY(cuse_server) entry;
115 TAILQ_HEAD(, cuse_client_command) head;
116 TAILQ_HEAD(, cuse_server_dev) hdev;
117 TAILQ_HEAD(, cuse_client) hcli;
118 TAILQ_HEAD(, cuse_memory) hmem;
119 struct mtx mtx;
120 struct cv cv;
121 struct selinfo selinfo;
122 pid_t pid;
123 int is_closing;
124 int refs;
125};
126
127struct cuse_client {
128 TAILQ_ENTRY(cuse_client) entry;
129 TAILQ_ENTRY(cuse_client) entry_ref;
130 struct cuse_client_command cmds[CUSE_CMD_MAX];
131 struct cuse_server *server;
132 struct cuse_server_dev *server_dev;
133
134 uint8_t ioctl_buffer[CUSE_BUFFER_MAX] __aligned(4);
135
136 int fflags; /* file flags */
137 int cflags; /* client flags */
138#define CUSE_CLI_IS_CLOSING 0x01
139#define CUSE_CLI_KNOTE_NEED_READ 0x02
140#define CUSE_CLI_KNOTE_NEED_WRITE 0x04
141#define CUSE_CLI_KNOTE_HAS_READ 0x08
142#define CUSE_CLI_KNOTE_HAS_WRITE 0x10
143};
144
145#define CUSE_CLIENT_CLOSING(pcc) \
146 ((pcc)->cflags & CUSE_CLI_IS_CLOSING)
147
148static MALLOC_DEFINE(M_CUSE, "cuse", "CUSE memory");
149
150static TAILQ_HEAD(, cuse_server) cuse_server_head;
151static struct mtx cuse_global_mtx;
152static struct cdev *cuse_dev;
153static struct cuse_server *cuse_alloc_unit[CUSE_DEVICES_MAX];
154static int cuse_alloc_unit_id[CUSE_DEVICES_MAX];
155
156static void cuse_server_wakeup_all_client_locked(struct cuse_server *pcs);
157static void cuse_client_kqfilter_read_detach(struct knote *kn);
158static void cuse_client_kqfilter_write_detach(struct knote *kn);
159static int cuse_client_kqfilter_read_event(struct knote *kn, long hint);
160static int cuse_client_kqfilter_write_event(struct knote *kn, long hint);
161
162static struct filterops cuse_client_kqfilter_read_ops = {
163 .f_isfd = 1,
164 .f_detach = cuse_client_kqfilter_read_detach,
165 .f_event = cuse_client_kqfilter_read_event,
166};
167
168static struct filterops cuse_client_kqfilter_write_ops = {
169 .f_isfd = 1,
170 .f_detach = cuse_client_kqfilter_write_detach,
171 .f_event = cuse_client_kqfilter_write_event,
172};
173
174static d_open_t cuse_client_open;
175static d_close_t cuse_client_close;
176static d_ioctl_t cuse_client_ioctl;
177static d_read_t cuse_client_read;
178static d_write_t cuse_client_write;
179static d_poll_t cuse_client_poll;
180static d_mmap_single_t cuse_client_mmap_single;
181static d_kqfilter_t cuse_client_kqfilter;
182
183static struct cdevsw cuse_client_devsw = {
184 .d_version = D_VERSION,
185 .d_open = cuse_client_open,
186 .d_close = cuse_client_close,
187 .d_ioctl = cuse_client_ioctl,
188 .d_name = "cuse_client",
189 .d_flags = D_TRACKCLOSE,
190 .d_read = cuse_client_read,
191 .d_write = cuse_client_write,
192 .d_poll = cuse_client_poll,
193 .d_mmap_single = cuse_client_mmap_single,
194 .d_kqfilter = cuse_client_kqfilter,
195};
196
197static d_open_t cuse_server_open;
198static d_close_t cuse_server_close;
199static d_ioctl_t cuse_server_ioctl;
200static d_read_t cuse_server_read;
201static d_write_t cuse_server_write;
202static d_poll_t cuse_server_poll;
203static d_mmap_single_t cuse_server_mmap_single;
204
205static struct cdevsw cuse_server_devsw = {
206 .d_version = D_VERSION,
207 .d_open = cuse_server_open,
208 .d_close = cuse_server_close,
209 .d_ioctl = cuse_server_ioctl,
210 .d_name = "cuse_server",
211 .d_flags = D_TRACKCLOSE,
212 .d_read = cuse_server_read,
213 .d_write = cuse_server_write,
214 .d_poll = cuse_server_poll,
215 .d_mmap_single = cuse_server_mmap_single,
216};
217
218static void cuse_client_is_closing(struct cuse_client *);
219static int cuse_free_unit_by_id_locked(struct cuse_server *, int);
220
221static void
222cuse_global_lock(void)
223{
224 mtx_lock(&cuse_global_mtx);
225}
226
227static void
228cuse_global_unlock(void)
229{
230 mtx_unlock(&cuse_global_mtx);
231}
232
233static void
234cuse_server_lock(struct cuse_server *pcs)
235{
236 mtx_lock(&pcs->mtx);
237}
238
239static void
240cuse_server_unlock(struct cuse_server *pcs)
241{
242 mtx_unlock(&pcs->mtx);
243}
244
245static void
246cuse_cmd_lock(struct cuse_client_command *pccmd)
247{
248 sx_xlock(&pccmd->sx);
249}
250
251static void
252cuse_cmd_unlock(struct cuse_client_command *pccmd)
253{
254 sx_xunlock(&pccmd->sx);
255}
256
257static void
258cuse_kern_init(void *arg)
259{
260 TAILQ_INIT(&cuse_server_head);
261
262 mtx_init(&cuse_global_mtx, "cuse-global-mtx", NULL, MTX_DEF);
263
264 cuse_dev = make_dev(&cuse_server_devsw, 0,
265 UID_ROOT, GID_OPERATOR, 0600, "cuse");
266
267 printf("Cuse v%d.%d.%d @ /dev/cuse\n",
268 (CUSE_VERSION >> 16) & 0xFF, (CUSE_VERSION >> 8) & 0xFF,
269 (CUSE_VERSION >> 0) & 0xFF);
270}
271SYSINIT(cuse_kern_init, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_init, 0);
271SYSINIT(cuse_kern_init, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_init, NULL);
272
273static void
274cuse_kern_uninit(void *arg)
275{
276 void *ptr;
277
278 while (1) {
279
280 printf("Cuse: Please exit all /dev/cuse instances "
281 "and processes which have used this device.\n");
282
283 pause("DRAIN", 2 * hz);
284
285 cuse_global_lock();
286 ptr = TAILQ_FIRST(&cuse_server_head);
287 cuse_global_unlock();
288
289 if (ptr == NULL)
290 break;
291 }
292
293 if (cuse_dev != NULL)
294 destroy_dev(cuse_dev);
295
296 mtx_destroy(&cuse_global_mtx);
297}
298SYSUNINIT(cuse_kern_uninit, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_uninit, 0);
299
300static int
301cuse_server_get(struct cuse_server **ppcs)
302{
303 struct cuse_server *pcs;
304 int error;
305
306 error = devfs_get_cdevpriv((void **)&pcs);
307 if (error != 0) {
308 *ppcs = NULL;
309 return (error);
310 }
311 if (pcs->is_closing) {
312 *ppcs = NULL;
313 return (EINVAL);
314 }
315 *ppcs = pcs;
316 return (0);
317}
318
319static void
320cuse_server_is_closing(struct cuse_server *pcs)
321{
322 struct cuse_client *pcc;
323
324 if (pcs->is_closing)
325 return;
326
327 pcs->is_closing = 1;
328
329 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
330 cuse_client_is_closing(pcc);
331 }
332}
333
334static struct cuse_client_command *
335cuse_server_find_command(struct cuse_server *pcs, struct thread *td)
336{
337 struct cuse_client *pcc;
338 int n;
339
340 if (pcs->is_closing)
341 goto done;
342
343 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
344 if (CUSE_CLIENT_CLOSING(pcc))
345 continue;
346 for (n = 0; n != CUSE_CMD_MAX; n++) {
347 if (pcc->cmds[n].entered == td)
348 return (&pcc->cmds[n]);
349 }
350 }
351done:
352 return (NULL);
353}
354
355static void
356cuse_str_filter(char *ptr)
357{
358 int c;
359
360 while (((c = *ptr) != 0)) {
361
362 if ((c >= 'a') && (c <= 'z')) {
363 ptr++;
364 continue;
365 }
366 if ((c >= 'A') && (c <= 'Z')) {
367 ptr++;
368 continue;
369 }
370 if ((c >= '0') && (c <= '9')) {
371 ptr++;
372 continue;
373 }
374 if ((c == '.') || (c == '_') || (c == '/')) {
375 ptr++;
376 continue;
377 }
378 *ptr = '_';
379
380 ptr++;
381 }
382}
383
384static int
385cuse_convert_error(int error)
386{
387 ; /* indent fix */
388 switch (error) {
389 case CUSE_ERR_NONE:
390 return (0);
391 case CUSE_ERR_BUSY:
392 return (EBUSY);
393 case CUSE_ERR_WOULDBLOCK:
394 return (EWOULDBLOCK);
395 case CUSE_ERR_INVALID:
396 return (EINVAL);
397 case CUSE_ERR_NO_MEMORY:
398 return (ENOMEM);
399 case CUSE_ERR_FAULT:
400 return (EFAULT);
401 case CUSE_ERR_SIGNAL:
402 return (EINTR);
403 case CUSE_ERR_NO_DEVICE:
404 return (ENODEV);
405 default:
406 return (ENXIO);
407 }
408}
409
410static void
411cuse_vm_memory_free(struct cuse_memory *mem)
412{
413 /* last user is gone - free */
414 vm_object_deallocate(mem->object);
415
416 /* free CUSE memory */
417 free(mem, M_CUSE);
418}
419
420static int
421cuse_server_alloc_memory(struct cuse_server *pcs, uint32_t alloc_nr,
422 uint32_t page_count)
423{
424 struct cuse_memory *temp;
425 struct cuse_memory *mem;
426 vm_object_t object;
427 int error;
428
429 mem = malloc(sizeof(*mem), M_CUSE, M_WAITOK | M_ZERO);
430 if (mem == NULL)
431 return (ENOMEM);
432
433 object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * page_count,
434 VM_PROT_DEFAULT, 0, curthread->td_ucred);
435 if (object == NULL) {
436 error = ENOMEM;
437 goto error_0;
438 }
439
440 cuse_server_lock(pcs);
441 /* check if allocation number already exists */
442 TAILQ_FOREACH(temp, &pcs->hmem, entry) {
443 if (temp->alloc_nr == alloc_nr)
444 break;
445 }
446 if (temp != NULL) {
447 cuse_server_unlock(pcs);
448 error = EBUSY;
449 goto error_1;
450 }
451 mem->object = object;
452 mem->page_count = page_count;
453 mem->alloc_nr = alloc_nr;
454 TAILQ_INSERT_TAIL(&pcs->hmem, mem, entry);
455 cuse_server_unlock(pcs);
456
457 return (0);
458
459error_1:
460 vm_object_deallocate(object);
461error_0:
462 free(mem, M_CUSE);
463 return (error);
464}
465
466static int
467cuse_server_free_memory(struct cuse_server *pcs, uint32_t alloc_nr)
468{
469 struct cuse_memory *mem;
470
471 cuse_server_lock(pcs);
472 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
473 if (mem->alloc_nr == alloc_nr)
474 break;
475 }
476 if (mem == NULL) {
477 cuse_server_unlock(pcs);
478 return (EINVAL);
479 }
480 TAILQ_REMOVE(&pcs->hmem, mem, entry);
481 cuse_server_unlock(pcs);
482
483 cuse_vm_memory_free(mem);
484
485 return (0);
486}
487
488static int
489cuse_client_get(struct cuse_client **ppcc)
490{
491 struct cuse_client *pcc;
492 int error;
493
494 /* try to get private data */
495 error = devfs_get_cdevpriv((void **)&pcc);
496 if (error != 0) {
497 *ppcc = NULL;
498 return (error);
499 }
500 if (CUSE_CLIENT_CLOSING(pcc) || pcc->server->is_closing) {
501 *ppcc = NULL;
502 return (EINVAL);
503 }
504 *ppcc = pcc;
505 return (0);
506}
507
508static void
509cuse_client_is_closing(struct cuse_client *pcc)
510{
511 struct cuse_client_command *pccmd;
512 uint32_t n;
513
514 if (CUSE_CLIENT_CLOSING(pcc))
515 return;
516
517 pcc->cflags |= CUSE_CLI_IS_CLOSING;
518 pcc->server_dev = NULL;
519
520 for (n = 0; n != CUSE_CMD_MAX; n++) {
521
522 pccmd = &pcc->cmds[n];
523
524 if (pccmd->entry.tqe_prev != NULL) {
525 TAILQ_REMOVE(&pcc->server->head, pccmd, entry);
526 pccmd->entry.tqe_prev = NULL;
527 }
528 cv_broadcast(&pccmd->cv);
529 }
530}
531
532static void
533cuse_client_send_command_locked(struct cuse_client_command *pccmd,
534 uintptr_t data_ptr, unsigned long arg, int fflags, int ioflag)
535{
536 unsigned long cuse_fflags = 0;
537 struct cuse_server *pcs;
538
539 if (fflags & FREAD)
540 cuse_fflags |= CUSE_FFLAG_READ;
541
542 if (fflags & FWRITE)
543 cuse_fflags |= CUSE_FFLAG_WRITE;
544
545 if (ioflag & IO_NDELAY)
546 cuse_fflags |= CUSE_FFLAG_NONBLOCK;
547#if defined(__LP64__)
548 if (SV_CURPROC_FLAG(SV_ILP32))
549 cuse_fflags |= CUSE_FFLAG_COMPAT32;
550#endif
551 pccmd->sub.fflags = cuse_fflags;
552 pccmd->sub.data_pointer = data_ptr;
553 pccmd->sub.argument = arg;
554
555 pcs = pccmd->client->server;
556
557 if ((pccmd->entry.tqe_prev == NULL) &&
558 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
559 (pcs->is_closing == 0)) {
560 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
561 cv_signal(&pcs->cv);
562 }
563}
564
565static void
566cuse_client_got_signal(struct cuse_client_command *pccmd)
567{
568 struct cuse_server *pcs;
569
570 pccmd->got_signal = 1;
571
572 pccmd = &pccmd->client->cmds[CUSE_CMD_SIGNAL];
573
574 pcs = pccmd->client->server;
575
576 if ((pccmd->entry.tqe_prev == NULL) &&
577 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
578 (pcs->is_closing == 0)) {
579 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
580 cv_signal(&pcs->cv);
581 }
582}
583
584static int
585cuse_client_receive_command_locked(struct cuse_client_command *pccmd,
586 uint8_t *arg_ptr, uint32_t arg_len)
587{
588 struct cuse_server *pcs;
589 int error;
590
591 pcs = pccmd->client->server;
592 error = 0;
593
594 pccmd->proc_curr = curthread->td_proc;
595
596 if (CUSE_CLIENT_CLOSING(pccmd->client) || pcs->is_closing) {
597 error = CUSE_ERR_OTHER;
598 goto done;
599 }
600 while (pccmd->command == CUSE_CMD_NONE) {
601 if (error != 0) {
602 cv_wait(&pccmd->cv, &pcs->mtx);
603 } else {
604 error = cv_wait_sig(&pccmd->cv, &pcs->mtx);
605
606 if (error != 0)
607 cuse_client_got_signal(pccmd);
608 }
609 if (CUSE_CLIENT_CLOSING(pccmd->client) || pcs->is_closing) {
610 error = CUSE_ERR_OTHER;
611 goto done;
612 }
613 }
614
615 error = pccmd->error;
616 pccmd->command = CUSE_CMD_NONE;
617 cv_signal(&pccmd->cv);
618
619done:
620
621 /* wait until all process references are gone */
622
623 pccmd->proc_curr = NULL;
624
625 while (pccmd->proc_refs != 0)
626 cv_wait(&pccmd->cv, &pcs->mtx);
627
628 return (error);
629}
630
631/*------------------------------------------------------------------------*
632 * CUSE SERVER PART
633 *------------------------------------------------------------------------*/
634
635static void
636cuse_server_free_dev(struct cuse_server_dev *pcsd)
637{
638 struct cuse_server *pcs;
639 struct cuse_client *pcc;
640
641 /* get server pointer */
642 pcs = pcsd->server;
643
644 /* prevent creation of more devices */
645 cuse_server_lock(pcs);
646 if (pcsd->kern_dev != NULL)
647 pcsd->kern_dev->si_drv1 = NULL;
648
649 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
650 if (pcc->server_dev == pcsd)
651 cuse_client_is_closing(pcc);
652 }
653 cuse_server_unlock(pcs);
654
655 /* destroy device, if any */
656 if (pcsd->kern_dev != NULL) {
657 /* destroy device synchronously */
658 destroy_dev(pcsd->kern_dev);
659 }
660 free(pcsd, M_CUSE);
661}
662
663static void
664cuse_server_unref(struct cuse_server *pcs)
665{
666 struct cuse_server_dev *pcsd;
667 struct cuse_memory *mem;
668
669 cuse_server_lock(pcs);
670 if (--(pcs->refs) != 0) {
671 cuse_server_unlock(pcs);
672 return;
673 }
674 cuse_server_is_closing(pcs);
675 /* final client wakeup, if any */
676 cuse_server_wakeup_all_client_locked(pcs);
677
678 cuse_global_lock();
679 TAILQ_REMOVE(&cuse_server_head, pcs, entry);
680 cuse_global_unlock();
681
682 while ((pcsd = TAILQ_FIRST(&pcs->hdev)) != NULL) {
683 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
684 cuse_server_unlock(pcs);
685 cuse_server_free_dev(pcsd);
686 cuse_server_lock(pcs);
687 }
688
689 cuse_free_unit_by_id_locked(pcs, -1);
690
691 while ((mem = TAILQ_FIRST(&pcs->hmem)) != NULL) {
692 TAILQ_REMOVE(&pcs->hmem, mem, entry);
693 cuse_server_unlock(pcs);
694 cuse_vm_memory_free(mem);
695 cuse_server_lock(pcs);
696 }
697
698 knlist_clear(&pcs->selinfo.si_note, 1);
699 knlist_destroy(&pcs->selinfo.si_note);
700
701 cuse_server_unlock(pcs);
702
703 seldrain(&pcs->selinfo);
704
705 cv_destroy(&pcs->cv);
706
707 mtx_destroy(&pcs->mtx);
708
709 free(pcs, M_CUSE);
710}
711
712static int
713cuse_server_do_close(struct cuse_server *pcs)
714{
715 int retval;
716
717 cuse_server_lock(pcs);
718 cuse_server_is_closing(pcs);
719 /* final client wakeup, if any */
720 cuse_server_wakeup_all_client_locked(pcs);
721
722 knlist_clear(&pcs->selinfo.si_note, 1);
723
724 retval = pcs->refs;
725 cuse_server_unlock(pcs);
726
727 return (retval);
728}
729
730static void
731cuse_server_free(void *arg)
732{
733 struct cuse_server *pcs = arg;
734
735 /*
736 * The final server unref should be done by the server thread
737 * to prevent deadlock in the client cdevpriv destructor,
738 * which cannot destroy itself.
739 */
740 while (cuse_server_do_close(pcs) != 1)
741 pause("W", hz);
742
743 /* drop final refcount */
744 cuse_server_unref(pcs);
745}
746
747static int
748cuse_server_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
749{
750 struct cuse_server *pcs;
751
752 pcs = malloc(sizeof(*pcs), M_CUSE, M_WAITOK | M_ZERO);
753 if (pcs == NULL)
754 return (ENOMEM);
755
756 if (devfs_set_cdevpriv(pcs, &cuse_server_free)) {
757 printf("Cuse: Cannot set cdevpriv.\n");
758 free(pcs, M_CUSE);
759 return (ENOMEM);
760 }
761 /* store current process ID */
762 pcs->pid = curproc->p_pid;
763
764 TAILQ_INIT(&pcs->head);
765 TAILQ_INIT(&pcs->hdev);
766 TAILQ_INIT(&pcs->hcli);
767 TAILQ_INIT(&pcs->hmem);
768
769 cv_init(&pcs->cv, "cuse-server-cv");
770
771 mtx_init(&pcs->mtx, "cuse-server-mtx", NULL, MTX_DEF);
772
773 knlist_init_mtx(&pcs->selinfo.si_note, &pcs->mtx);
774
775 cuse_global_lock();
776 pcs->refs++;
777 TAILQ_INSERT_TAIL(&cuse_server_head, pcs, entry);
778 cuse_global_unlock();
779
780 return (0);
781}
782
783static int
784cuse_server_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
785{
786 struct cuse_server *pcs;
787
788 if (cuse_server_get(&pcs) == 0)
789 cuse_server_do_close(pcs);
790
791 return (0);
792}
793
794static int
795cuse_server_read(struct cdev *dev, struct uio *uio, int ioflag)
796{
797 return (ENXIO);
798}
799
800static int
801cuse_server_write(struct cdev *dev, struct uio *uio, int ioflag)
802{
803 return (ENXIO);
804}
805
806static int
807cuse_server_ioctl_copy_locked(struct cuse_server *pcs,
808 struct cuse_client_command *pccmd,
809 struct cuse_data_chunk *pchk, int isread)
810{
811 struct proc *p_proc;
812 uint32_t offset;
813 int error;
814
815 offset = pchk->peer_ptr - CUSE_BUF_MIN_PTR;
816
817 if (pchk->length > CUSE_BUFFER_MAX)
818 return (EFAULT);
819
820 if (offset >= CUSE_BUFFER_MAX)
821 return (EFAULT);
822
823 if ((offset + pchk->length) > CUSE_BUFFER_MAX)
824 return (EFAULT);
825
826 p_proc = pccmd->proc_curr;
827 if (p_proc == NULL)
828 return (ENXIO);
829
830 if (pccmd->proc_refs < 0)
831 return (ENOMEM);
832
833 pccmd->proc_refs++;
834
835 cuse_server_unlock(pcs);
836
837 if (isread == 0) {
838 error = copyin(
839 (void *)pchk->local_ptr,
840 pccmd->client->ioctl_buffer + offset,
841 pchk->length);
842 } else {
843 error = copyout(
844 pccmd->client->ioctl_buffer + offset,
845 (void *)pchk->local_ptr,
846 pchk->length);
847 }
848
849 cuse_server_lock(pcs);
850
851 pccmd->proc_refs--;
852
853 if (pccmd->proc_curr == NULL)
854 cv_signal(&pccmd->cv);
855
856 return (error);
857}
858
859static int
860cuse_proc2proc_copy(struct proc *proc_s, vm_offset_t data_s,
861 struct proc *proc_d, vm_offset_t data_d, size_t len)
862{
863 struct thread *td;
864 struct proc *proc_cur;
865 int error;
866
867 td = curthread;
868 proc_cur = td->td_proc;
869
870 if (proc_cur == proc_d) {
871 struct iovec iov = {
872 .iov_base = (caddr_t)data_d,
873 .iov_len = len,
874 };
875 struct uio uio = {
876 .uio_iov = &iov,
877 .uio_iovcnt = 1,
878 .uio_offset = (off_t)data_s,
879 .uio_resid = len,
880 .uio_segflg = UIO_USERSPACE,
881 .uio_rw = UIO_READ,
882 .uio_td = td,
883 };
884
885 PHOLD(proc_s);
886 error = proc_rwmem(proc_s, &uio);
887 PRELE(proc_s);
888
889 } else if (proc_cur == proc_s) {
890 struct iovec iov = {
891 .iov_base = (caddr_t)data_s,
892 .iov_len = len,
893 };
894 struct uio uio = {
895 .uio_iov = &iov,
896 .uio_iovcnt = 1,
897 .uio_offset = (off_t)data_d,
898 .uio_resid = len,
899 .uio_segflg = UIO_USERSPACE,
900 .uio_rw = UIO_WRITE,
901 .uio_td = td,
902 };
903
904 PHOLD(proc_d);
905 error = proc_rwmem(proc_d, &uio);
906 PRELE(proc_d);
907 } else {
908 error = EINVAL;
909 }
910 return (error);
911}
912
913static int
914cuse_server_data_copy_locked(struct cuse_server *pcs,
915 struct cuse_client_command *pccmd,
916 struct cuse_data_chunk *pchk, int isread)
917{
918 struct proc *p_proc;
919 int error;
920
921 p_proc = pccmd->proc_curr;
922 if (p_proc == NULL)
923 return (ENXIO);
924
925 if (pccmd->proc_refs < 0)
926 return (ENOMEM);
927
928 pccmd->proc_refs++;
929
930 cuse_server_unlock(pcs);
931
932 if (isread == 0) {
933 error = cuse_proc2proc_copy(
934 curthread->td_proc, pchk->local_ptr,
935 p_proc, pchk->peer_ptr,
936 pchk->length);
937 } else {
938 error = cuse_proc2proc_copy(
939 p_proc, pchk->peer_ptr,
940 curthread->td_proc, pchk->local_ptr,
941 pchk->length);
942 }
943
944 cuse_server_lock(pcs);
945
946 pccmd->proc_refs--;
947
948 if (pccmd->proc_curr == NULL)
949 cv_signal(&pccmd->cv);
950
951 return (error);
952}
953
954static int
955cuse_alloc_unit_by_id_locked(struct cuse_server *pcs, int id)
956{
957 int n;
958 int x = 0;
959 int match;
960
961 do {
962 for (match = n = 0; n != CUSE_DEVICES_MAX; n++) {
963 if (cuse_alloc_unit[n] != NULL) {
964 if ((cuse_alloc_unit_id[n] ^ id) & CUSE_ID_MASK)
965 continue;
966 if ((cuse_alloc_unit_id[n] & ~CUSE_ID_MASK) == x) {
967 x++;
968 match = 1;
969 }
970 }
971 }
972 } while (match);
973
974 if (x < 256) {
975 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
976 if (cuse_alloc_unit[n] == NULL) {
977 cuse_alloc_unit[n] = pcs;
978 cuse_alloc_unit_id[n] = id | x;
979 return (x);
980 }
981 }
982 }
983 return (-1);
984}
985
986static void
987cuse_server_wakeup_locked(struct cuse_server *pcs)
988{
989 selwakeup(&pcs->selinfo);
990 KNOTE_LOCKED(&pcs->selinfo.si_note, 0);
991}
992
993static void
994cuse_server_wakeup_all_client_locked(struct cuse_server *pcs)
995{
996 struct cuse_client *pcc;
997
998 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
999 pcc->cflags |= (CUSE_CLI_KNOTE_NEED_READ |
1000 CUSE_CLI_KNOTE_NEED_WRITE);
1001 }
1002 cuse_server_wakeup_locked(pcs);
1003}
1004
1005static int
1006cuse_free_unit_by_id_locked(struct cuse_server *pcs, int id)
1007{
1008 int n;
1009 int found = 0;
1010
1011 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
1012 if (cuse_alloc_unit[n] == pcs) {
1013 if (cuse_alloc_unit_id[n] == id || id == -1) {
1014 cuse_alloc_unit[n] = NULL;
1015 cuse_alloc_unit_id[n] = 0;
1016 found = 1;
1017 }
1018 }
1019 }
1020
1021 return (found ? 0 : EINVAL);
1022}
1023
1024static int
1025cuse_server_ioctl(struct cdev *dev, unsigned long cmd,
1026 caddr_t data, int fflag, struct thread *td)
1027{
1028 struct cuse_server *pcs;
1029 int error;
1030
1031 error = cuse_server_get(&pcs);
1032 if (error != 0)
1033 return (error);
1034
1035 switch (cmd) {
1036 struct cuse_client_command *pccmd;
1037 struct cuse_client *pcc;
1038 struct cuse_command *pcmd;
1039 struct cuse_alloc_info *pai;
1040 struct cuse_create_dev *pcd;
1041 struct cuse_server_dev *pcsd;
1042 struct cuse_data_chunk *pchk;
1043 int n;
1044
1045 case CUSE_IOCTL_GET_COMMAND:
1046 pcmd = (void *)data;
1047
1048 cuse_server_lock(pcs);
1049
1050 while ((pccmd = TAILQ_FIRST(&pcs->head)) == NULL) {
1051 error = cv_wait_sig(&pcs->cv, &pcs->mtx);
1052
1053 if (pcs->is_closing)
1054 error = ENXIO;
1055
1056 if (error) {
1057 cuse_server_unlock(pcs);
1058 return (error);
1059 }
1060 }
1061
1062 TAILQ_REMOVE(&pcs->head, pccmd, entry);
1063 pccmd->entry.tqe_prev = NULL;
1064
1065 pccmd->entered = curthread;
1066
1067 *pcmd = pccmd->sub;
1068
1069 cuse_server_unlock(pcs);
1070
1071 break;
1072
1073 case CUSE_IOCTL_SYNC_COMMAND:
1074
1075 cuse_server_lock(pcs);
1076 while ((pccmd = cuse_server_find_command(pcs, curthread)) != NULL) {
1077
1078 /* send sync command */
1079 pccmd->entered = NULL;
1080 pccmd->error = *(int *)data;
1081 pccmd->command = CUSE_CMD_SYNC;
1082
1083 /* signal peer, if any */
1084 cv_signal(&pccmd->cv);
1085 }
1086 cuse_server_unlock(pcs);
1087
1088 break;
1089
1090 case CUSE_IOCTL_ALLOC_UNIT:
1091
1092 cuse_server_lock(pcs);
1093 n = cuse_alloc_unit_by_id_locked(pcs,
1094 CUSE_ID_DEFAULT(0));
1095 cuse_server_unlock(pcs);
1096
1097 if (n < 0)
1098 error = ENOMEM;
1099 else
1100 *(int *)data = n;
1101 break;
1102
1103 case CUSE_IOCTL_ALLOC_UNIT_BY_ID:
1104
1105 n = *(int *)data;
1106
1107 n = (n & CUSE_ID_MASK);
1108
1109 cuse_server_lock(pcs);
1110 n = cuse_alloc_unit_by_id_locked(pcs, n);
1111 cuse_server_unlock(pcs);
1112
1113 if (n < 0)
1114 error = ENOMEM;
1115 else
1116 *(int *)data = n;
1117 break;
1118
1119 case CUSE_IOCTL_FREE_UNIT:
1120
1121 n = *(int *)data;
1122
1123 n = CUSE_ID_DEFAULT(n);
1124
1125 cuse_server_lock(pcs);
1126 error = cuse_free_unit_by_id_locked(pcs, n);
1127 cuse_server_unlock(pcs);
1128 break;
1129
1130 case CUSE_IOCTL_FREE_UNIT_BY_ID:
1131
1132 n = *(int *)data;
1133
1134 cuse_server_lock(pcs);
1135 error = cuse_free_unit_by_id_locked(pcs, n);
1136 cuse_server_unlock(pcs);
1137 break;
1138
1139 case CUSE_IOCTL_ALLOC_MEMORY:
1140
1141 pai = (void *)data;
1142
1143 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1144 error = ENOMEM;
1145 break;
1146 }
1147 if (pai->page_count >= CUSE_ALLOC_PAGES_MAX) {
1148 error = ENOMEM;
1149 break;
1150 }
1151 error = cuse_server_alloc_memory(pcs,
1152 pai->alloc_nr, pai->page_count);
1153 break;
1154
1155 case CUSE_IOCTL_FREE_MEMORY:
1156 pai = (void *)data;
1157
1158 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1159 error = ENOMEM;
1160 break;
1161 }
1162 error = cuse_server_free_memory(pcs, pai->alloc_nr);
1163 break;
1164
1165 case CUSE_IOCTL_GET_SIG:
1166
1167 cuse_server_lock(pcs);
1168 pccmd = cuse_server_find_command(pcs, curthread);
1169
1170 if (pccmd != NULL) {
1171 n = pccmd->got_signal;
1172 pccmd->got_signal = 0;
1173 } else {
1174 n = 0;
1175 }
1176 cuse_server_unlock(pcs);
1177
1178 *(int *)data = n;
1179
1180 break;
1181
1182 case CUSE_IOCTL_SET_PFH:
1183
1184 cuse_server_lock(pcs);
1185 pccmd = cuse_server_find_command(pcs, curthread);
1186
1187 if (pccmd != NULL) {
1188 pcc = pccmd->client;
1189 for (n = 0; n != CUSE_CMD_MAX; n++) {
1190 pcc->cmds[n].sub.per_file_handle = *(uintptr_t *)data;
1191 }
1192 } else {
1193 error = ENXIO;
1194 }
1195 cuse_server_unlock(pcs);
1196 break;
1197
1198 case CUSE_IOCTL_CREATE_DEV:
1199
1200 error = priv_check(curthread, PRIV_DRIVER);
1201 if (error)
1202 break;
1203
1204 pcd = (void *)data;
1205
1206 /* filter input */
1207
1208 pcd->devname[sizeof(pcd->devname) - 1] = 0;
1209
1210 if (pcd->devname[0] == 0) {
1211 error = EINVAL;
1212 break;
1213 }
1214 cuse_str_filter(pcd->devname);
1215
1216 pcd->permissions &= 0777;
1217
1218 /* try to allocate a character device */
1219
1220 pcsd = malloc(sizeof(*pcsd), M_CUSE, M_WAITOK | M_ZERO);
1221
1222 if (pcsd == NULL) {
1223 error = ENOMEM;
1224 break;
1225 }
1226 pcsd->server = pcs;
1227
1228 pcsd->user_dev = pcd->dev;
1229
1230 pcsd->kern_dev = make_dev_credf(MAKEDEV_CHECKNAME,
1231 &cuse_client_devsw, 0, NULL, pcd->user_id, pcd->group_id,
1232 pcd->permissions, "%s", pcd->devname);
1233
1234 if (pcsd->kern_dev == NULL) {
1235 free(pcsd, M_CUSE);
1236 error = ENOMEM;
1237 break;
1238 }
1239 pcsd->kern_dev->si_drv1 = pcsd;
1240
1241 cuse_server_lock(pcs);
1242 TAILQ_INSERT_TAIL(&pcs->hdev, pcsd, entry);
1243 cuse_server_unlock(pcs);
1244
1245 break;
1246
1247 case CUSE_IOCTL_DESTROY_DEV:
1248
1249 error = priv_check(curthread, PRIV_DRIVER);
1250 if (error)
1251 break;
1252
1253 cuse_server_lock(pcs);
1254
1255 error = EINVAL;
1256
1257 pcsd = TAILQ_FIRST(&pcs->hdev);
1258 while (pcsd != NULL) {
1259 if (pcsd->user_dev == *(struct cuse_dev **)data) {
1260 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
1261 cuse_server_unlock(pcs);
1262 cuse_server_free_dev(pcsd);
1263 cuse_server_lock(pcs);
1264 error = 0;
1265 pcsd = TAILQ_FIRST(&pcs->hdev);
1266 } else {
1267 pcsd = TAILQ_NEXT(pcsd, entry);
1268 }
1269 }
1270
1271 cuse_server_unlock(pcs);
1272 break;
1273
1274 case CUSE_IOCTL_WRITE_DATA:
1275 case CUSE_IOCTL_READ_DATA:
1276
1277 cuse_server_lock(pcs);
1278 pchk = (struct cuse_data_chunk *)data;
1279
1280 pccmd = cuse_server_find_command(pcs, curthread);
1281
1282 if (pccmd == NULL) {
1283 error = ENXIO; /* invalid request */
1284 } else if (pchk->peer_ptr < CUSE_BUF_MIN_PTR) {
1285 error = EFAULT; /* NULL pointer */
1286 } else if (pchk->peer_ptr < CUSE_BUF_MAX_PTR) {
1287 error = cuse_server_ioctl_copy_locked(pcs, pccmd,
1288 pchk, cmd == CUSE_IOCTL_READ_DATA);
1289 } else {
1290 error = cuse_server_data_copy_locked(pcs, pccmd,
1291 pchk, cmd == CUSE_IOCTL_READ_DATA);
1292 }
1293 cuse_server_unlock(pcs);
1294 break;
1295
1296 case CUSE_IOCTL_SELWAKEUP:
1297 cuse_server_lock(pcs);
1298 /*
1299 * We don't know which direction caused the event.
1300 * Wakeup both!
1301 */
1302 cuse_server_wakeup_all_client_locked(pcs);
1303 cuse_server_unlock(pcs);
1304 break;
1305
1306 default:
1307 error = ENXIO;
1308 break;
1309 }
1310 return (error);
1311}
1312
1313static int
1314cuse_server_poll(struct cdev *dev, int events, struct thread *td)
1315{
1316 return (events & (POLLHUP | POLLPRI | POLLIN |
1317 POLLRDNORM | POLLOUT | POLLWRNORM));
1318}
1319
1320static int
1321cuse_server_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1322 vm_size_t size, struct vm_object **object, int nprot)
1323{
1324 uint32_t page_nr = *offset / PAGE_SIZE;
1325 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1326 struct cuse_memory *mem;
1327 struct cuse_server *pcs;
1328 int error;
1329
1330 error = cuse_server_get(&pcs);
1331 if (error != 0)
1332 return (error);
1333
1334 cuse_server_lock(pcs);
1335 /* lookup memory structure */
1336 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
1337 if (mem->alloc_nr == alloc_nr)
1338 break;
1339 }
1340 if (mem == NULL) {
1341 cuse_server_unlock(pcs);
1342 return (ENOMEM);
1343 }
1344 /* verify page offset */
1345 page_nr %= CUSE_ALLOC_PAGES_MAX;
1346 if (page_nr >= mem->page_count) {
1347 cuse_server_unlock(pcs);
1348 return (ENXIO);
1349 }
1350 /* verify mmap size */
1351 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1352 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1353 cuse_server_unlock(pcs);
1354 return (EINVAL);
1355 }
1356 vm_object_reference(mem->object);
1357 *object = mem->object;
1358 cuse_server_unlock(pcs);
1359
1360 /* set new VM object offset to use */
1361 *offset = page_nr * PAGE_SIZE;
1362
1363 /* success */
1364 return (0);
1365}
1366
1367/*------------------------------------------------------------------------*
1368 * CUSE CLIENT PART
1369 *------------------------------------------------------------------------*/
1370static void
1371cuse_client_free(void *arg)
1372{
1373 struct cuse_client *pcc = arg;
1374 struct cuse_client_command *pccmd;
1375 struct cuse_server *pcs;
1376 int n;
1377
1378 pcs = pcc->server;
1379
1380 cuse_server_lock(pcs);
1381 cuse_client_is_closing(pcc);
1382 TAILQ_REMOVE(&pcs->hcli, pcc, entry);
1383 cuse_server_unlock(pcs);
1384
1385 for (n = 0; n != CUSE_CMD_MAX; n++) {
1386
1387 pccmd = &pcc->cmds[n];
1388
1389 sx_destroy(&pccmd->sx);
1390 cv_destroy(&pccmd->cv);
1391 }
1392
1393 free(pcc, M_CUSE);
1394
1395 /* drop reference on server */
1396 cuse_server_unref(pcs);
1397}
1398
1399static int
1400cuse_client_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
1401{
1402 struct cuse_client_command *pccmd;
1403 struct cuse_server_dev *pcsd;
1404 struct cuse_client *pcc;
1405 struct cuse_server *pcs;
1406 struct cuse_dev *pcd;
1407 int error;
1408 int n;
1409
1410 pcsd = dev->si_drv1;
1411 if (pcsd != NULL) {
1412 pcs = pcsd->server;
1413 pcd = pcsd->user_dev;
1414
1415 cuse_server_lock(pcs);
1416 /*
1417 * Check that the refcount didn't wrap and that the
1418 * same process is not both client and server. This
1419 * can easily lead to deadlocks when destroying the
1420 * CUSE character device nodes:
1421 */
1422 pcs->refs++;
1423 if (pcs->refs < 0 || pcs->pid == curproc->p_pid) {
1424 /* overflow or wrong PID */
1425 pcs->refs--;
1426 cuse_server_unlock(pcs);
1427 return (EINVAL);
1428 }
1429 cuse_server_unlock(pcs);
1430 } else {
1431 return (EINVAL);
1432 }
1433
1434 pcc = malloc(sizeof(*pcc), M_CUSE, M_WAITOK | M_ZERO);
1435 if (pcc == NULL) {
1436 /* drop reference on server */
1437 cuse_server_unref(pcs);
1438 return (ENOMEM);
1439 }
1440 if (devfs_set_cdevpriv(pcc, &cuse_client_free)) {
1441 printf("Cuse: Cannot set cdevpriv.\n");
1442 /* drop reference on server */
1443 cuse_server_unref(pcs);
1444 free(pcc, M_CUSE);
1445 return (ENOMEM);
1446 }
1447 pcc->fflags = fflags;
1448 pcc->server_dev = pcsd;
1449 pcc->server = pcs;
1450
1451 for (n = 0; n != CUSE_CMD_MAX; n++) {
1452
1453 pccmd = &pcc->cmds[n];
1454
1455 pccmd->sub.dev = pcd;
1456 pccmd->sub.command = n;
1457 pccmd->client = pcc;
1458
1459 sx_init(&pccmd->sx, "cuse-client-sx");
1460 cv_init(&pccmd->cv, "cuse-client-cv");
1461 }
1462
1463 cuse_server_lock(pcs);
1464
1465 /* cuse_client_free() assumes that the client is listed somewhere! */
1466 /* always enqueue */
1467
1468 TAILQ_INSERT_TAIL(&pcs->hcli, pcc, entry);
1469
1470 /* check if server is closing */
1471 if ((pcs->is_closing != 0) || (dev->si_drv1 == NULL)) {
1472 error = EINVAL;
1473 } else {
1474 error = 0;
1475 }
1476 cuse_server_unlock(pcs);
1477
1478 if (error) {
1479 devfs_clear_cdevpriv(); /* XXX bugfix */
1480 return (error);
1481 }
1482 pccmd = &pcc->cmds[CUSE_CMD_OPEN];
1483
1484 cuse_cmd_lock(pccmd);
1485
1486 cuse_server_lock(pcs);
1487 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1488
1489 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1490 cuse_server_unlock(pcs);
1491
1492 if (error < 0) {
1493 error = cuse_convert_error(error);
1494 } else {
1495 error = 0;
1496 }
1497
1498 cuse_cmd_unlock(pccmd);
1499
1500 if (error)
1501 devfs_clear_cdevpriv(); /* XXX bugfix */
1502
1503 return (error);
1504}
1505
1506static int
1507cuse_client_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1508{
1509 struct cuse_client_command *pccmd;
1510 struct cuse_client *pcc;
1511 struct cuse_server *pcs;
1512 int error;
1513
1514 error = cuse_client_get(&pcc);
1515 if (error != 0)
1516 return (0);
1517
1518 pccmd = &pcc->cmds[CUSE_CMD_CLOSE];
1519 pcs = pcc->server;
1520
1521 cuse_cmd_lock(pccmd);
1522
1523 cuse_server_lock(pcs);
1524 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1525
1526 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1527 cuse_cmd_unlock(pccmd);
1528
1529 cuse_client_is_closing(pcc);
1530 cuse_server_unlock(pcs);
1531
1532 return (0);
1533}
1534
1535static void
1536cuse_client_kqfilter_poll(struct cdev *dev, struct cuse_client *pcc)
1537{
1538 struct cuse_server *pcs = pcc->server;
1539 int temp;
1540
1541 cuse_server_lock(pcs);
1542 temp = (pcc->cflags & (CUSE_CLI_KNOTE_HAS_READ |
1543 CUSE_CLI_KNOTE_HAS_WRITE));
1544 pcc->cflags &= ~(CUSE_CLI_KNOTE_NEED_READ |
1545 CUSE_CLI_KNOTE_NEED_WRITE);
1546 cuse_server_unlock(pcs);
1547
1548 if (temp != 0) {
1549 /* get the latest polling state from the server */
1550 temp = cuse_client_poll(dev, POLLIN | POLLOUT, NULL);
1551
1552 if (temp & (POLLIN | POLLOUT)) {
1553 cuse_server_lock(pcs);
1554 if (temp & POLLIN)
1555 pcc->cflags |= CUSE_CLI_KNOTE_NEED_READ;
1556 if (temp & POLLOUT)
1557 pcc->cflags |= CUSE_CLI_KNOTE_NEED_WRITE;
1558
1559 /* make sure the "knote" gets woken up */
1560 cuse_server_wakeup_locked(pcc->server);
1561 cuse_server_unlock(pcs);
1562 }
1563 }
1564}
1565
1566static int
1567cuse_client_read(struct cdev *dev, struct uio *uio, int ioflag)
1568{
1569 struct cuse_client_command *pccmd;
1570 struct cuse_client *pcc;
1571 struct cuse_server *pcs;
1572 int error;
1573 int len;
1574
1575 error = cuse_client_get(&pcc);
1576 if (error != 0)
1577 return (error);
1578
1579 pccmd = &pcc->cmds[CUSE_CMD_READ];
1580 pcs = pcc->server;
1581
1582 if (uio->uio_segflg != UIO_USERSPACE) {
1583 return (EINVAL);
1584 }
1585 uio->uio_segflg = UIO_NOCOPY;
1586
1587 cuse_cmd_lock(pccmd);
1588
1589 while (uio->uio_resid != 0) {
1590
1591 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1592 error = ENOMEM;
1593 break;
1594 }
1595 len = uio->uio_iov->iov_len;
1596
1597 cuse_server_lock(pcs);
1598 cuse_client_send_command_locked(pccmd,
1599 (uintptr_t)uio->uio_iov->iov_base,
1600 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1601
1602 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1603 cuse_server_unlock(pcs);
1604
1605 if (error < 0) {
1606 error = cuse_convert_error(error);
1607 break;
1608 } else if (error == len) {
1609 error = uiomove(NULL, error, uio);
1610 if (error)
1611 break;
1612 } else {
1613 error = uiomove(NULL, error, uio);
1614 break;
1615 }
1616 }
1617 cuse_cmd_unlock(pccmd);
1618
1619 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1620
1621 if (error == EWOULDBLOCK)
1622 cuse_client_kqfilter_poll(dev, pcc);
1623
1624 return (error);
1625}
1626
1627static int
1628cuse_client_write(struct cdev *dev, struct uio *uio, int ioflag)
1629{
1630 struct cuse_client_command *pccmd;
1631 struct cuse_client *pcc;
1632 struct cuse_server *pcs;
1633 int error;
1634 int len;
1635
1636 error = cuse_client_get(&pcc);
1637 if (error != 0)
1638 return (error);
1639
1640 pccmd = &pcc->cmds[CUSE_CMD_WRITE];
1641 pcs = pcc->server;
1642
1643 if (uio->uio_segflg != UIO_USERSPACE) {
1644 return (EINVAL);
1645 }
1646 uio->uio_segflg = UIO_NOCOPY;
1647
1648 cuse_cmd_lock(pccmd);
1649
1650 while (uio->uio_resid != 0) {
1651
1652 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1653 error = ENOMEM;
1654 break;
1655 }
1656 len = uio->uio_iov->iov_len;
1657
1658 cuse_server_lock(pcs);
1659 cuse_client_send_command_locked(pccmd,
1660 (uintptr_t)uio->uio_iov->iov_base,
1661 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1662
1663 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1664 cuse_server_unlock(pcs);
1665
1666 if (error < 0) {
1667 error = cuse_convert_error(error);
1668 break;
1669 } else if (error == len) {
1670 error = uiomove(NULL, error, uio);
1671 if (error)
1672 break;
1673 } else {
1674 error = uiomove(NULL, error, uio);
1675 break;
1676 }
1677 }
1678 cuse_cmd_unlock(pccmd);
1679
1680 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1681
1682 if (error == EWOULDBLOCK)
1683 cuse_client_kqfilter_poll(dev, pcc);
1684
1685 return (error);
1686}
1687
1688int
1689cuse_client_ioctl(struct cdev *dev, unsigned long cmd,
1690 caddr_t data, int fflag, struct thread *td)
1691{
1692 struct cuse_client_command *pccmd;
1693 struct cuse_client *pcc;
1694 struct cuse_server *pcs;
1695 int error;
1696 int len;
1697
1698 error = cuse_client_get(&pcc);
1699 if (error != 0)
1700 return (error);
1701
1702 len = IOCPARM_LEN(cmd);
1703 if (len > CUSE_BUFFER_MAX)
1704 return (ENOMEM);
1705
1706 pccmd = &pcc->cmds[CUSE_CMD_IOCTL];
1707 pcs = pcc->server;
1708
1709 cuse_cmd_lock(pccmd);
1710
1711 if (cmd & (IOC_IN | IOC_VOID))
1712 memcpy(pcc->ioctl_buffer, data, len);
1713
1714 /*
1715 * When the ioctl-length is zero drivers can pass information
1716 * through the data pointer of the ioctl. Make sure this information
1717 * is forwarded to the driver.
1718 */
1719
1720 cuse_server_lock(pcs);
1721 cuse_client_send_command_locked(pccmd,
1722 (len == 0) ? *(long *)data : CUSE_BUF_MIN_PTR,
1723 (unsigned long)cmd, pcc->fflags,
1724 (fflag & O_NONBLOCK) ? IO_NDELAY : 0);
1725
1726 error = cuse_client_receive_command_locked(pccmd, data, len);
1727 cuse_server_unlock(pcs);
1728
1729 if (error < 0) {
1730 error = cuse_convert_error(error);
1731 } else {
1732 error = 0;
1733 }
1734
1735 if (cmd & IOC_OUT)
1736 memcpy(data, pcc->ioctl_buffer, len);
1737
1738 cuse_cmd_unlock(pccmd);
1739
1740 if (error == EWOULDBLOCK)
1741 cuse_client_kqfilter_poll(dev, pcc);
1742
1743 return (error);
1744}
1745
1746static int
1747cuse_client_poll(struct cdev *dev, int events, struct thread *td)
1748{
1749 struct cuse_client_command *pccmd;
1750 struct cuse_client *pcc;
1751 struct cuse_server *pcs;
1752 unsigned long temp;
1753 int error;
1754 int revents;
1755
1756 error = cuse_client_get(&pcc);
1757 if (error != 0)
1758 goto pollnval;
1759
1760 temp = 0;
1761 pcs = pcc->server;
1762
1763 if (events & (POLLPRI | POLLIN | POLLRDNORM))
1764 temp |= CUSE_POLL_READ;
1765
1766 if (events & (POLLOUT | POLLWRNORM))
1767 temp |= CUSE_POLL_WRITE;
1768
1769 if (events & POLLHUP)
1770 temp |= CUSE_POLL_ERROR;
1771
1772 pccmd = &pcc->cmds[CUSE_CMD_POLL];
1773
1774 cuse_cmd_lock(pccmd);
1775
1776 /* Need to selrecord() first to not loose any events. */
1777 if (temp != 0 && td != NULL)
1778 selrecord(td, &pcs->selinfo);
1779
1780 cuse_server_lock(pcs);
1781 cuse_client_send_command_locked(pccmd,
1782 0, temp, pcc->fflags, IO_NDELAY);
1783
1784 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1785 cuse_server_unlock(pcs);
1786
1787 cuse_cmd_unlock(pccmd);
1788
1789 if (error < 0) {
1790 goto pollnval;
1791 } else {
1792 revents = 0;
1793 if (error & CUSE_POLL_READ)
1794 revents |= (events & (POLLPRI | POLLIN | POLLRDNORM));
1795 if (error & CUSE_POLL_WRITE)
1796 revents |= (events & (POLLOUT | POLLWRNORM));
1797 if (error & CUSE_POLL_ERROR)
1798 revents |= (events & POLLHUP);
1799 }
1800 return (revents);
1801
1802pollnval:
1803 /* XXX many clients don't understand POLLNVAL */
1804 return (events & (POLLHUP | POLLPRI | POLLIN |
1805 POLLRDNORM | POLLOUT | POLLWRNORM));
1806}
1807
1808static int
1809cuse_client_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1810 vm_size_t size, struct vm_object **object, int nprot)
1811{
1812 uint32_t page_nr = *offset / PAGE_SIZE;
1813 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1814 struct cuse_memory *mem;
1815 struct cuse_client *pcc;
1816 struct cuse_server *pcs;
1817 int error;
1818
1819 error = cuse_client_get(&pcc);
1820 if (error != 0)
1821 return (error);
1822
1823 pcs = pcc->server;
1824
1825 cuse_server_lock(pcs);
1826 /* lookup memory structure */
1827 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
1828 if (mem->alloc_nr == alloc_nr)
1829 break;
1830 }
1831 if (mem == NULL) {
1832 cuse_server_unlock(pcs);
1833 return (ENOMEM);
1834 }
1835 /* verify page offset */
1836 page_nr %= CUSE_ALLOC_PAGES_MAX;
1837 if (page_nr >= mem->page_count) {
1838 cuse_server_unlock(pcs);
1839 return (ENXIO);
1840 }
1841 /* verify mmap size */
1842 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1843 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1844 cuse_server_unlock(pcs);
1845 return (EINVAL);
1846 }
1847 vm_object_reference(mem->object);
1848 *object = mem->object;
1849 cuse_server_unlock(pcs);
1850
1851 /* set new VM object offset to use */
1852 *offset = page_nr * PAGE_SIZE;
1853
1854 /* success */
1855 return (0);
1856}
1857
1858static void
1859cuse_client_kqfilter_read_detach(struct knote *kn)
1860{
1861 struct cuse_client *pcc;
1862 struct cuse_server *pcs;
1863
1864 pcc = kn->kn_hook;
1865 pcs = pcc->server;
1866
1867 cuse_server_lock(pcs);
1868 knlist_remove(&pcs->selinfo.si_note, kn, 1);
1869 cuse_server_unlock(pcs);
1870}
1871
1872static void
1873cuse_client_kqfilter_write_detach(struct knote *kn)
1874{
1875 struct cuse_client *pcc;
1876 struct cuse_server *pcs;
1877
1878 pcc = kn->kn_hook;
1879 pcs = pcc->server;
1880
1881 cuse_server_lock(pcs);
1882 knlist_remove(&pcs->selinfo.si_note, kn, 1);
1883 cuse_server_unlock(pcs);
1884}
1885
1886static int
1887cuse_client_kqfilter_read_event(struct knote *kn, long hint)
1888{
1889 struct cuse_client *pcc;
1890
1891 pcc = kn->kn_hook;
1892
1893 mtx_assert(&pcc->server->mtx, MA_OWNED);
1894
1895 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_READ) ? 1 : 0);
1896}
1897
1898static int
1899cuse_client_kqfilter_write_event(struct knote *kn, long hint)
1900{
1901 struct cuse_client *pcc;
1902
1903 pcc = kn->kn_hook;
1904
1905 mtx_assert(&pcc->server->mtx, MA_OWNED);
1906
1907 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_WRITE) ? 1 : 0);
1908}
1909
1910static int
1911cuse_client_kqfilter(struct cdev *dev, struct knote *kn)
1912{
1913 struct cuse_client *pcc;
1914 struct cuse_server *pcs;
1915 int error;
1916
1917 error = cuse_client_get(&pcc);
1918 if (error != 0)
1919 return (error);
1920
1921 pcs = pcc->server;
1922
1923 cuse_server_lock(pcs);
1924 switch (kn->kn_filter) {
1925 case EVFILT_READ:
1926 pcc->cflags |= CUSE_CLI_KNOTE_HAS_READ;
1927 kn->kn_hook = pcc;
1928 kn->kn_fop = &cuse_client_kqfilter_read_ops;
1929 knlist_add(&pcs->selinfo.si_note, kn, 1);
1930 break;
1931 case EVFILT_WRITE:
1932 pcc->cflags |= CUSE_CLI_KNOTE_HAS_WRITE;
1933 kn->kn_hook = pcc;
1934 kn->kn_fop = &cuse_client_kqfilter_write_ops;
1935 knlist_add(&pcs->selinfo.si_note, kn, 1);
1936 break;
1937 default:
1938 error = EINVAL;
1939 break;
1940 }
1941 cuse_server_unlock(pcs);
1942
1943 if (error == 0)
1944 cuse_client_kqfilter_poll(dev, pcc);
1945 return (error);
1946}
272
273static void
274cuse_kern_uninit(void *arg)
275{
276 void *ptr;
277
278 while (1) {
279
280 printf("Cuse: Please exit all /dev/cuse instances "
281 "and processes which have used this device.\n");
282
283 pause("DRAIN", 2 * hz);
284
285 cuse_global_lock();
286 ptr = TAILQ_FIRST(&cuse_server_head);
287 cuse_global_unlock();
288
289 if (ptr == NULL)
290 break;
291 }
292
293 if (cuse_dev != NULL)
294 destroy_dev(cuse_dev);
295
296 mtx_destroy(&cuse_global_mtx);
297}
298SYSUNINIT(cuse_kern_uninit, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_uninit, 0);
299
300static int
301cuse_server_get(struct cuse_server **ppcs)
302{
303 struct cuse_server *pcs;
304 int error;
305
306 error = devfs_get_cdevpriv((void **)&pcs);
307 if (error != 0) {
308 *ppcs = NULL;
309 return (error);
310 }
311 if (pcs->is_closing) {
312 *ppcs = NULL;
313 return (EINVAL);
314 }
315 *ppcs = pcs;
316 return (0);
317}
318
319static void
320cuse_server_is_closing(struct cuse_server *pcs)
321{
322 struct cuse_client *pcc;
323
324 if (pcs->is_closing)
325 return;
326
327 pcs->is_closing = 1;
328
329 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
330 cuse_client_is_closing(pcc);
331 }
332}
333
334static struct cuse_client_command *
335cuse_server_find_command(struct cuse_server *pcs, struct thread *td)
336{
337 struct cuse_client *pcc;
338 int n;
339
340 if (pcs->is_closing)
341 goto done;
342
343 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
344 if (CUSE_CLIENT_CLOSING(pcc))
345 continue;
346 for (n = 0; n != CUSE_CMD_MAX; n++) {
347 if (pcc->cmds[n].entered == td)
348 return (&pcc->cmds[n]);
349 }
350 }
351done:
352 return (NULL);
353}
354
355static void
356cuse_str_filter(char *ptr)
357{
358 int c;
359
360 while (((c = *ptr) != 0)) {
361
362 if ((c >= 'a') && (c <= 'z')) {
363 ptr++;
364 continue;
365 }
366 if ((c >= 'A') && (c <= 'Z')) {
367 ptr++;
368 continue;
369 }
370 if ((c >= '0') && (c <= '9')) {
371 ptr++;
372 continue;
373 }
374 if ((c == '.') || (c == '_') || (c == '/')) {
375 ptr++;
376 continue;
377 }
378 *ptr = '_';
379
380 ptr++;
381 }
382}
383
384static int
385cuse_convert_error(int error)
386{
387 ; /* indent fix */
388 switch (error) {
389 case CUSE_ERR_NONE:
390 return (0);
391 case CUSE_ERR_BUSY:
392 return (EBUSY);
393 case CUSE_ERR_WOULDBLOCK:
394 return (EWOULDBLOCK);
395 case CUSE_ERR_INVALID:
396 return (EINVAL);
397 case CUSE_ERR_NO_MEMORY:
398 return (ENOMEM);
399 case CUSE_ERR_FAULT:
400 return (EFAULT);
401 case CUSE_ERR_SIGNAL:
402 return (EINTR);
403 case CUSE_ERR_NO_DEVICE:
404 return (ENODEV);
405 default:
406 return (ENXIO);
407 }
408}
409
410static void
411cuse_vm_memory_free(struct cuse_memory *mem)
412{
413 /* last user is gone - free */
414 vm_object_deallocate(mem->object);
415
416 /* free CUSE memory */
417 free(mem, M_CUSE);
418}
419
420static int
421cuse_server_alloc_memory(struct cuse_server *pcs, uint32_t alloc_nr,
422 uint32_t page_count)
423{
424 struct cuse_memory *temp;
425 struct cuse_memory *mem;
426 vm_object_t object;
427 int error;
428
429 mem = malloc(sizeof(*mem), M_CUSE, M_WAITOK | M_ZERO);
430 if (mem == NULL)
431 return (ENOMEM);
432
433 object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * page_count,
434 VM_PROT_DEFAULT, 0, curthread->td_ucred);
435 if (object == NULL) {
436 error = ENOMEM;
437 goto error_0;
438 }
439
440 cuse_server_lock(pcs);
441 /* check if allocation number already exists */
442 TAILQ_FOREACH(temp, &pcs->hmem, entry) {
443 if (temp->alloc_nr == alloc_nr)
444 break;
445 }
446 if (temp != NULL) {
447 cuse_server_unlock(pcs);
448 error = EBUSY;
449 goto error_1;
450 }
451 mem->object = object;
452 mem->page_count = page_count;
453 mem->alloc_nr = alloc_nr;
454 TAILQ_INSERT_TAIL(&pcs->hmem, mem, entry);
455 cuse_server_unlock(pcs);
456
457 return (0);
458
459error_1:
460 vm_object_deallocate(object);
461error_0:
462 free(mem, M_CUSE);
463 return (error);
464}
465
466static int
467cuse_server_free_memory(struct cuse_server *pcs, uint32_t alloc_nr)
468{
469 struct cuse_memory *mem;
470
471 cuse_server_lock(pcs);
472 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
473 if (mem->alloc_nr == alloc_nr)
474 break;
475 }
476 if (mem == NULL) {
477 cuse_server_unlock(pcs);
478 return (EINVAL);
479 }
480 TAILQ_REMOVE(&pcs->hmem, mem, entry);
481 cuse_server_unlock(pcs);
482
483 cuse_vm_memory_free(mem);
484
485 return (0);
486}
487
488static int
489cuse_client_get(struct cuse_client **ppcc)
490{
491 struct cuse_client *pcc;
492 int error;
493
494 /* try to get private data */
495 error = devfs_get_cdevpriv((void **)&pcc);
496 if (error != 0) {
497 *ppcc = NULL;
498 return (error);
499 }
500 if (CUSE_CLIENT_CLOSING(pcc) || pcc->server->is_closing) {
501 *ppcc = NULL;
502 return (EINVAL);
503 }
504 *ppcc = pcc;
505 return (0);
506}
507
508static void
509cuse_client_is_closing(struct cuse_client *pcc)
510{
511 struct cuse_client_command *pccmd;
512 uint32_t n;
513
514 if (CUSE_CLIENT_CLOSING(pcc))
515 return;
516
517 pcc->cflags |= CUSE_CLI_IS_CLOSING;
518 pcc->server_dev = NULL;
519
520 for (n = 0; n != CUSE_CMD_MAX; n++) {
521
522 pccmd = &pcc->cmds[n];
523
524 if (pccmd->entry.tqe_prev != NULL) {
525 TAILQ_REMOVE(&pcc->server->head, pccmd, entry);
526 pccmd->entry.tqe_prev = NULL;
527 }
528 cv_broadcast(&pccmd->cv);
529 }
530}
531
532static void
533cuse_client_send_command_locked(struct cuse_client_command *pccmd,
534 uintptr_t data_ptr, unsigned long arg, int fflags, int ioflag)
535{
536 unsigned long cuse_fflags = 0;
537 struct cuse_server *pcs;
538
539 if (fflags & FREAD)
540 cuse_fflags |= CUSE_FFLAG_READ;
541
542 if (fflags & FWRITE)
543 cuse_fflags |= CUSE_FFLAG_WRITE;
544
545 if (ioflag & IO_NDELAY)
546 cuse_fflags |= CUSE_FFLAG_NONBLOCK;
547#if defined(__LP64__)
548 if (SV_CURPROC_FLAG(SV_ILP32))
549 cuse_fflags |= CUSE_FFLAG_COMPAT32;
550#endif
551 pccmd->sub.fflags = cuse_fflags;
552 pccmd->sub.data_pointer = data_ptr;
553 pccmd->sub.argument = arg;
554
555 pcs = pccmd->client->server;
556
557 if ((pccmd->entry.tqe_prev == NULL) &&
558 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
559 (pcs->is_closing == 0)) {
560 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
561 cv_signal(&pcs->cv);
562 }
563}
564
565static void
566cuse_client_got_signal(struct cuse_client_command *pccmd)
567{
568 struct cuse_server *pcs;
569
570 pccmd->got_signal = 1;
571
572 pccmd = &pccmd->client->cmds[CUSE_CMD_SIGNAL];
573
574 pcs = pccmd->client->server;
575
576 if ((pccmd->entry.tqe_prev == NULL) &&
577 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
578 (pcs->is_closing == 0)) {
579 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
580 cv_signal(&pcs->cv);
581 }
582}
583
584static int
585cuse_client_receive_command_locked(struct cuse_client_command *pccmd,
586 uint8_t *arg_ptr, uint32_t arg_len)
587{
588 struct cuse_server *pcs;
589 int error;
590
591 pcs = pccmd->client->server;
592 error = 0;
593
594 pccmd->proc_curr = curthread->td_proc;
595
596 if (CUSE_CLIENT_CLOSING(pccmd->client) || pcs->is_closing) {
597 error = CUSE_ERR_OTHER;
598 goto done;
599 }
600 while (pccmd->command == CUSE_CMD_NONE) {
601 if (error != 0) {
602 cv_wait(&pccmd->cv, &pcs->mtx);
603 } else {
604 error = cv_wait_sig(&pccmd->cv, &pcs->mtx);
605
606 if (error != 0)
607 cuse_client_got_signal(pccmd);
608 }
609 if (CUSE_CLIENT_CLOSING(pccmd->client) || pcs->is_closing) {
610 error = CUSE_ERR_OTHER;
611 goto done;
612 }
613 }
614
615 error = pccmd->error;
616 pccmd->command = CUSE_CMD_NONE;
617 cv_signal(&pccmd->cv);
618
619done:
620
621 /* wait until all process references are gone */
622
623 pccmd->proc_curr = NULL;
624
625 while (pccmd->proc_refs != 0)
626 cv_wait(&pccmd->cv, &pcs->mtx);
627
628 return (error);
629}
630
631/*------------------------------------------------------------------------*
632 * CUSE SERVER PART
633 *------------------------------------------------------------------------*/
634
635static void
636cuse_server_free_dev(struct cuse_server_dev *pcsd)
637{
638 struct cuse_server *pcs;
639 struct cuse_client *pcc;
640
641 /* get server pointer */
642 pcs = pcsd->server;
643
644 /* prevent creation of more devices */
645 cuse_server_lock(pcs);
646 if (pcsd->kern_dev != NULL)
647 pcsd->kern_dev->si_drv1 = NULL;
648
649 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
650 if (pcc->server_dev == pcsd)
651 cuse_client_is_closing(pcc);
652 }
653 cuse_server_unlock(pcs);
654
655 /* destroy device, if any */
656 if (pcsd->kern_dev != NULL) {
657 /* destroy device synchronously */
658 destroy_dev(pcsd->kern_dev);
659 }
660 free(pcsd, M_CUSE);
661}
662
663static void
664cuse_server_unref(struct cuse_server *pcs)
665{
666 struct cuse_server_dev *pcsd;
667 struct cuse_memory *mem;
668
669 cuse_server_lock(pcs);
670 if (--(pcs->refs) != 0) {
671 cuse_server_unlock(pcs);
672 return;
673 }
674 cuse_server_is_closing(pcs);
675 /* final client wakeup, if any */
676 cuse_server_wakeup_all_client_locked(pcs);
677
678 cuse_global_lock();
679 TAILQ_REMOVE(&cuse_server_head, pcs, entry);
680 cuse_global_unlock();
681
682 while ((pcsd = TAILQ_FIRST(&pcs->hdev)) != NULL) {
683 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
684 cuse_server_unlock(pcs);
685 cuse_server_free_dev(pcsd);
686 cuse_server_lock(pcs);
687 }
688
689 cuse_free_unit_by_id_locked(pcs, -1);
690
691 while ((mem = TAILQ_FIRST(&pcs->hmem)) != NULL) {
692 TAILQ_REMOVE(&pcs->hmem, mem, entry);
693 cuse_server_unlock(pcs);
694 cuse_vm_memory_free(mem);
695 cuse_server_lock(pcs);
696 }
697
698 knlist_clear(&pcs->selinfo.si_note, 1);
699 knlist_destroy(&pcs->selinfo.si_note);
700
701 cuse_server_unlock(pcs);
702
703 seldrain(&pcs->selinfo);
704
705 cv_destroy(&pcs->cv);
706
707 mtx_destroy(&pcs->mtx);
708
709 free(pcs, M_CUSE);
710}
711
712static int
713cuse_server_do_close(struct cuse_server *pcs)
714{
715 int retval;
716
717 cuse_server_lock(pcs);
718 cuse_server_is_closing(pcs);
719 /* final client wakeup, if any */
720 cuse_server_wakeup_all_client_locked(pcs);
721
722 knlist_clear(&pcs->selinfo.si_note, 1);
723
724 retval = pcs->refs;
725 cuse_server_unlock(pcs);
726
727 return (retval);
728}
729
730static void
731cuse_server_free(void *arg)
732{
733 struct cuse_server *pcs = arg;
734
735 /*
736 * The final server unref should be done by the server thread
737 * to prevent deadlock in the client cdevpriv destructor,
738 * which cannot destroy itself.
739 */
740 while (cuse_server_do_close(pcs) != 1)
741 pause("W", hz);
742
743 /* drop final refcount */
744 cuse_server_unref(pcs);
745}
746
747static int
748cuse_server_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
749{
750 struct cuse_server *pcs;
751
752 pcs = malloc(sizeof(*pcs), M_CUSE, M_WAITOK | M_ZERO);
753 if (pcs == NULL)
754 return (ENOMEM);
755
756 if (devfs_set_cdevpriv(pcs, &cuse_server_free)) {
757 printf("Cuse: Cannot set cdevpriv.\n");
758 free(pcs, M_CUSE);
759 return (ENOMEM);
760 }
761 /* store current process ID */
762 pcs->pid = curproc->p_pid;
763
764 TAILQ_INIT(&pcs->head);
765 TAILQ_INIT(&pcs->hdev);
766 TAILQ_INIT(&pcs->hcli);
767 TAILQ_INIT(&pcs->hmem);
768
769 cv_init(&pcs->cv, "cuse-server-cv");
770
771 mtx_init(&pcs->mtx, "cuse-server-mtx", NULL, MTX_DEF);
772
773 knlist_init_mtx(&pcs->selinfo.si_note, &pcs->mtx);
774
775 cuse_global_lock();
776 pcs->refs++;
777 TAILQ_INSERT_TAIL(&cuse_server_head, pcs, entry);
778 cuse_global_unlock();
779
780 return (0);
781}
782
783static int
784cuse_server_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
785{
786 struct cuse_server *pcs;
787
788 if (cuse_server_get(&pcs) == 0)
789 cuse_server_do_close(pcs);
790
791 return (0);
792}
793
794static int
795cuse_server_read(struct cdev *dev, struct uio *uio, int ioflag)
796{
797 return (ENXIO);
798}
799
800static int
801cuse_server_write(struct cdev *dev, struct uio *uio, int ioflag)
802{
803 return (ENXIO);
804}
805
806static int
807cuse_server_ioctl_copy_locked(struct cuse_server *pcs,
808 struct cuse_client_command *pccmd,
809 struct cuse_data_chunk *pchk, int isread)
810{
811 struct proc *p_proc;
812 uint32_t offset;
813 int error;
814
815 offset = pchk->peer_ptr - CUSE_BUF_MIN_PTR;
816
817 if (pchk->length > CUSE_BUFFER_MAX)
818 return (EFAULT);
819
820 if (offset >= CUSE_BUFFER_MAX)
821 return (EFAULT);
822
823 if ((offset + pchk->length) > CUSE_BUFFER_MAX)
824 return (EFAULT);
825
826 p_proc = pccmd->proc_curr;
827 if (p_proc == NULL)
828 return (ENXIO);
829
830 if (pccmd->proc_refs < 0)
831 return (ENOMEM);
832
833 pccmd->proc_refs++;
834
835 cuse_server_unlock(pcs);
836
837 if (isread == 0) {
838 error = copyin(
839 (void *)pchk->local_ptr,
840 pccmd->client->ioctl_buffer + offset,
841 pchk->length);
842 } else {
843 error = copyout(
844 pccmd->client->ioctl_buffer + offset,
845 (void *)pchk->local_ptr,
846 pchk->length);
847 }
848
849 cuse_server_lock(pcs);
850
851 pccmd->proc_refs--;
852
853 if (pccmd->proc_curr == NULL)
854 cv_signal(&pccmd->cv);
855
856 return (error);
857}
858
859static int
860cuse_proc2proc_copy(struct proc *proc_s, vm_offset_t data_s,
861 struct proc *proc_d, vm_offset_t data_d, size_t len)
862{
863 struct thread *td;
864 struct proc *proc_cur;
865 int error;
866
867 td = curthread;
868 proc_cur = td->td_proc;
869
870 if (proc_cur == proc_d) {
871 struct iovec iov = {
872 .iov_base = (caddr_t)data_d,
873 .iov_len = len,
874 };
875 struct uio uio = {
876 .uio_iov = &iov,
877 .uio_iovcnt = 1,
878 .uio_offset = (off_t)data_s,
879 .uio_resid = len,
880 .uio_segflg = UIO_USERSPACE,
881 .uio_rw = UIO_READ,
882 .uio_td = td,
883 };
884
885 PHOLD(proc_s);
886 error = proc_rwmem(proc_s, &uio);
887 PRELE(proc_s);
888
889 } else if (proc_cur == proc_s) {
890 struct iovec iov = {
891 .iov_base = (caddr_t)data_s,
892 .iov_len = len,
893 };
894 struct uio uio = {
895 .uio_iov = &iov,
896 .uio_iovcnt = 1,
897 .uio_offset = (off_t)data_d,
898 .uio_resid = len,
899 .uio_segflg = UIO_USERSPACE,
900 .uio_rw = UIO_WRITE,
901 .uio_td = td,
902 };
903
904 PHOLD(proc_d);
905 error = proc_rwmem(proc_d, &uio);
906 PRELE(proc_d);
907 } else {
908 error = EINVAL;
909 }
910 return (error);
911}
912
913static int
914cuse_server_data_copy_locked(struct cuse_server *pcs,
915 struct cuse_client_command *pccmd,
916 struct cuse_data_chunk *pchk, int isread)
917{
918 struct proc *p_proc;
919 int error;
920
921 p_proc = pccmd->proc_curr;
922 if (p_proc == NULL)
923 return (ENXIO);
924
925 if (pccmd->proc_refs < 0)
926 return (ENOMEM);
927
928 pccmd->proc_refs++;
929
930 cuse_server_unlock(pcs);
931
932 if (isread == 0) {
933 error = cuse_proc2proc_copy(
934 curthread->td_proc, pchk->local_ptr,
935 p_proc, pchk->peer_ptr,
936 pchk->length);
937 } else {
938 error = cuse_proc2proc_copy(
939 p_proc, pchk->peer_ptr,
940 curthread->td_proc, pchk->local_ptr,
941 pchk->length);
942 }
943
944 cuse_server_lock(pcs);
945
946 pccmd->proc_refs--;
947
948 if (pccmd->proc_curr == NULL)
949 cv_signal(&pccmd->cv);
950
951 return (error);
952}
953
954static int
955cuse_alloc_unit_by_id_locked(struct cuse_server *pcs, int id)
956{
957 int n;
958 int x = 0;
959 int match;
960
961 do {
962 for (match = n = 0; n != CUSE_DEVICES_MAX; n++) {
963 if (cuse_alloc_unit[n] != NULL) {
964 if ((cuse_alloc_unit_id[n] ^ id) & CUSE_ID_MASK)
965 continue;
966 if ((cuse_alloc_unit_id[n] & ~CUSE_ID_MASK) == x) {
967 x++;
968 match = 1;
969 }
970 }
971 }
972 } while (match);
973
974 if (x < 256) {
975 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
976 if (cuse_alloc_unit[n] == NULL) {
977 cuse_alloc_unit[n] = pcs;
978 cuse_alloc_unit_id[n] = id | x;
979 return (x);
980 }
981 }
982 }
983 return (-1);
984}
985
986static void
987cuse_server_wakeup_locked(struct cuse_server *pcs)
988{
989 selwakeup(&pcs->selinfo);
990 KNOTE_LOCKED(&pcs->selinfo.si_note, 0);
991}
992
993static void
994cuse_server_wakeup_all_client_locked(struct cuse_server *pcs)
995{
996 struct cuse_client *pcc;
997
998 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
999 pcc->cflags |= (CUSE_CLI_KNOTE_NEED_READ |
1000 CUSE_CLI_KNOTE_NEED_WRITE);
1001 }
1002 cuse_server_wakeup_locked(pcs);
1003}
1004
1005static int
1006cuse_free_unit_by_id_locked(struct cuse_server *pcs, int id)
1007{
1008 int n;
1009 int found = 0;
1010
1011 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
1012 if (cuse_alloc_unit[n] == pcs) {
1013 if (cuse_alloc_unit_id[n] == id || id == -1) {
1014 cuse_alloc_unit[n] = NULL;
1015 cuse_alloc_unit_id[n] = 0;
1016 found = 1;
1017 }
1018 }
1019 }
1020
1021 return (found ? 0 : EINVAL);
1022}
1023
1024static int
1025cuse_server_ioctl(struct cdev *dev, unsigned long cmd,
1026 caddr_t data, int fflag, struct thread *td)
1027{
1028 struct cuse_server *pcs;
1029 int error;
1030
1031 error = cuse_server_get(&pcs);
1032 if (error != 0)
1033 return (error);
1034
1035 switch (cmd) {
1036 struct cuse_client_command *pccmd;
1037 struct cuse_client *pcc;
1038 struct cuse_command *pcmd;
1039 struct cuse_alloc_info *pai;
1040 struct cuse_create_dev *pcd;
1041 struct cuse_server_dev *pcsd;
1042 struct cuse_data_chunk *pchk;
1043 int n;
1044
1045 case CUSE_IOCTL_GET_COMMAND:
1046 pcmd = (void *)data;
1047
1048 cuse_server_lock(pcs);
1049
1050 while ((pccmd = TAILQ_FIRST(&pcs->head)) == NULL) {
1051 error = cv_wait_sig(&pcs->cv, &pcs->mtx);
1052
1053 if (pcs->is_closing)
1054 error = ENXIO;
1055
1056 if (error) {
1057 cuse_server_unlock(pcs);
1058 return (error);
1059 }
1060 }
1061
1062 TAILQ_REMOVE(&pcs->head, pccmd, entry);
1063 pccmd->entry.tqe_prev = NULL;
1064
1065 pccmd->entered = curthread;
1066
1067 *pcmd = pccmd->sub;
1068
1069 cuse_server_unlock(pcs);
1070
1071 break;
1072
1073 case CUSE_IOCTL_SYNC_COMMAND:
1074
1075 cuse_server_lock(pcs);
1076 while ((pccmd = cuse_server_find_command(pcs, curthread)) != NULL) {
1077
1078 /* send sync command */
1079 pccmd->entered = NULL;
1080 pccmd->error = *(int *)data;
1081 pccmd->command = CUSE_CMD_SYNC;
1082
1083 /* signal peer, if any */
1084 cv_signal(&pccmd->cv);
1085 }
1086 cuse_server_unlock(pcs);
1087
1088 break;
1089
1090 case CUSE_IOCTL_ALLOC_UNIT:
1091
1092 cuse_server_lock(pcs);
1093 n = cuse_alloc_unit_by_id_locked(pcs,
1094 CUSE_ID_DEFAULT(0));
1095 cuse_server_unlock(pcs);
1096
1097 if (n < 0)
1098 error = ENOMEM;
1099 else
1100 *(int *)data = n;
1101 break;
1102
1103 case CUSE_IOCTL_ALLOC_UNIT_BY_ID:
1104
1105 n = *(int *)data;
1106
1107 n = (n & CUSE_ID_MASK);
1108
1109 cuse_server_lock(pcs);
1110 n = cuse_alloc_unit_by_id_locked(pcs, n);
1111 cuse_server_unlock(pcs);
1112
1113 if (n < 0)
1114 error = ENOMEM;
1115 else
1116 *(int *)data = n;
1117 break;
1118
1119 case CUSE_IOCTL_FREE_UNIT:
1120
1121 n = *(int *)data;
1122
1123 n = CUSE_ID_DEFAULT(n);
1124
1125 cuse_server_lock(pcs);
1126 error = cuse_free_unit_by_id_locked(pcs, n);
1127 cuse_server_unlock(pcs);
1128 break;
1129
1130 case CUSE_IOCTL_FREE_UNIT_BY_ID:
1131
1132 n = *(int *)data;
1133
1134 cuse_server_lock(pcs);
1135 error = cuse_free_unit_by_id_locked(pcs, n);
1136 cuse_server_unlock(pcs);
1137 break;
1138
1139 case CUSE_IOCTL_ALLOC_MEMORY:
1140
1141 pai = (void *)data;
1142
1143 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1144 error = ENOMEM;
1145 break;
1146 }
1147 if (pai->page_count >= CUSE_ALLOC_PAGES_MAX) {
1148 error = ENOMEM;
1149 break;
1150 }
1151 error = cuse_server_alloc_memory(pcs,
1152 pai->alloc_nr, pai->page_count);
1153 break;
1154
1155 case CUSE_IOCTL_FREE_MEMORY:
1156 pai = (void *)data;
1157
1158 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1159 error = ENOMEM;
1160 break;
1161 }
1162 error = cuse_server_free_memory(pcs, pai->alloc_nr);
1163 break;
1164
1165 case CUSE_IOCTL_GET_SIG:
1166
1167 cuse_server_lock(pcs);
1168 pccmd = cuse_server_find_command(pcs, curthread);
1169
1170 if (pccmd != NULL) {
1171 n = pccmd->got_signal;
1172 pccmd->got_signal = 0;
1173 } else {
1174 n = 0;
1175 }
1176 cuse_server_unlock(pcs);
1177
1178 *(int *)data = n;
1179
1180 break;
1181
1182 case CUSE_IOCTL_SET_PFH:
1183
1184 cuse_server_lock(pcs);
1185 pccmd = cuse_server_find_command(pcs, curthread);
1186
1187 if (pccmd != NULL) {
1188 pcc = pccmd->client;
1189 for (n = 0; n != CUSE_CMD_MAX; n++) {
1190 pcc->cmds[n].sub.per_file_handle = *(uintptr_t *)data;
1191 }
1192 } else {
1193 error = ENXIO;
1194 }
1195 cuse_server_unlock(pcs);
1196 break;
1197
1198 case CUSE_IOCTL_CREATE_DEV:
1199
1200 error = priv_check(curthread, PRIV_DRIVER);
1201 if (error)
1202 break;
1203
1204 pcd = (void *)data;
1205
1206 /* filter input */
1207
1208 pcd->devname[sizeof(pcd->devname) - 1] = 0;
1209
1210 if (pcd->devname[0] == 0) {
1211 error = EINVAL;
1212 break;
1213 }
1214 cuse_str_filter(pcd->devname);
1215
1216 pcd->permissions &= 0777;
1217
1218 /* try to allocate a character device */
1219
1220 pcsd = malloc(sizeof(*pcsd), M_CUSE, M_WAITOK | M_ZERO);
1221
1222 if (pcsd == NULL) {
1223 error = ENOMEM;
1224 break;
1225 }
1226 pcsd->server = pcs;
1227
1228 pcsd->user_dev = pcd->dev;
1229
1230 pcsd->kern_dev = make_dev_credf(MAKEDEV_CHECKNAME,
1231 &cuse_client_devsw, 0, NULL, pcd->user_id, pcd->group_id,
1232 pcd->permissions, "%s", pcd->devname);
1233
1234 if (pcsd->kern_dev == NULL) {
1235 free(pcsd, M_CUSE);
1236 error = ENOMEM;
1237 break;
1238 }
1239 pcsd->kern_dev->si_drv1 = pcsd;
1240
1241 cuse_server_lock(pcs);
1242 TAILQ_INSERT_TAIL(&pcs->hdev, pcsd, entry);
1243 cuse_server_unlock(pcs);
1244
1245 break;
1246
1247 case CUSE_IOCTL_DESTROY_DEV:
1248
1249 error = priv_check(curthread, PRIV_DRIVER);
1250 if (error)
1251 break;
1252
1253 cuse_server_lock(pcs);
1254
1255 error = EINVAL;
1256
1257 pcsd = TAILQ_FIRST(&pcs->hdev);
1258 while (pcsd != NULL) {
1259 if (pcsd->user_dev == *(struct cuse_dev **)data) {
1260 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
1261 cuse_server_unlock(pcs);
1262 cuse_server_free_dev(pcsd);
1263 cuse_server_lock(pcs);
1264 error = 0;
1265 pcsd = TAILQ_FIRST(&pcs->hdev);
1266 } else {
1267 pcsd = TAILQ_NEXT(pcsd, entry);
1268 }
1269 }
1270
1271 cuse_server_unlock(pcs);
1272 break;
1273
1274 case CUSE_IOCTL_WRITE_DATA:
1275 case CUSE_IOCTL_READ_DATA:
1276
1277 cuse_server_lock(pcs);
1278 pchk = (struct cuse_data_chunk *)data;
1279
1280 pccmd = cuse_server_find_command(pcs, curthread);
1281
1282 if (pccmd == NULL) {
1283 error = ENXIO; /* invalid request */
1284 } else if (pchk->peer_ptr < CUSE_BUF_MIN_PTR) {
1285 error = EFAULT; /* NULL pointer */
1286 } else if (pchk->peer_ptr < CUSE_BUF_MAX_PTR) {
1287 error = cuse_server_ioctl_copy_locked(pcs, pccmd,
1288 pchk, cmd == CUSE_IOCTL_READ_DATA);
1289 } else {
1290 error = cuse_server_data_copy_locked(pcs, pccmd,
1291 pchk, cmd == CUSE_IOCTL_READ_DATA);
1292 }
1293 cuse_server_unlock(pcs);
1294 break;
1295
1296 case CUSE_IOCTL_SELWAKEUP:
1297 cuse_server_lock(pcs);
1298 /*
1299 * We don't know which direction caused the event.
1300 * Wakeup both!
1301 */
1302 cuse_server_wakeup_all_client_locked(pcs);
1303 cuse_server_unlock(pcs);
1304 break;
1305
1306 default:
1307 error = ENXIO;
1308 break;
1309 }
1310 return (error);
1311}
1312
1313static int
1314cuse_server_poll(struct cdev *dev, int events, struct thread *td)
1315{
1316 return (events & (POLLHUP | POLLPRI | POLLIN |
1317 POLLRDNORM | POLLOUT | POLLWRNORM));
1318}
1319
1320static int
1321cuse_server_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1322 vm_size_t size, struct vm_object **object, int nprot)
1323{
1324 uint32_t page_nr = *offset / PAGE_SIZE;
1325 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1326 struct cuse_memory *mem;
1327 struct cuse_server *pcs;
1328 int error;
1329
1330 error = cuse_server_get(&pcs);
1331 if (error != 0)
1332 return (error);
1333
1334 cuse_server_lock(pcs);
1335 /* lookup memory structure */
1336 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
1337 if (mem->alloc_nr == alloc_nr)
1338 break;
1339 }
1340 if (mem == NULL) {
1341 cuse_server_unlock(pcs);
1342 return (ENOMEM);
1343 }
1344 /* verify page offset */
1345 page_nr %= CUSE_ALLOC_PAGES_MAX;
1346 if (page_nr >= mem->page_count) {
1347 cuse_server_unlock(pcs);
1348 return (ENXIO);
1349 }
1350 /* verify mmap size */
1351 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1352 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1353 cuse_server_unlock(pcs);
1354 return (EINVAL);
1355 }
1356 vm_object_reference(mem->object);
1357 *object = mem->object;
1358 cuse_server_unlock(pcs);
1359
1360 /* set new VM object offset to use */
1361 *offset = page_nr * PAGE_SIZE;
1362
1363 /* success */
1364 return (0);
1365}
1366
1367/*------------------------------------------------------------------------*
1368 * CUSE CLIENT PART
1369 *------------------------------------------------------------------------*/
1370static void
1371cuse_client_free(void *arg)
1372{
1373 struct cuse_client *pcc = arg;
1374 struct cuse_client_command *pccmd;
1375 struct cuse_server *pcs;
1376 int n;
1377
1378 pcs = pcc->server;
1379
1380 cuse_server_lock(pcs);
1381 cuse_client_is_closing(pcc);
1382 TAILQ_REMOVE(&pcs->hcli, pcc, entry);
1383 cuse_server_unlock(pcs);
1384
1385 for (n = 0; n != CUSE_CMD_MAX; n++) {
1386
1387 pccmd = &pcc->cmds[n];
1388
1389 sx_destroy(&pccmd->sx);
1390 cv_destroy(&pccmd->cv);
1391 }
1392
1393 free(pcc, M_CUSE);
1394
1395 /* drop reference on server */
1396 cuse_server_unref(pcs);
1397}
1398
1399static int
1400cuse_client_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
1401{
1402 struct cuse_client_command *pccmd;
1403 struct cuse_server_dev *pcsd;
1404 struct cuse_client *pcc;
1405 struct cuse_server *pcs;
1406 struct cuse_dev *pcd;
1407 int error;
1408 int n;
1409
1410 pcsd = dev->si_drv1;
1411 if (pcsd != NULL) {
1412 pcs = pcsd->server;
1413 pcd = pcsd->user_dev;
1414
1415 cuse_server_lock(pcs);
1416 /*
1417 * Check that the refcount didn't wrap and that the
1418 * same process is not both client and server. This
1419 * can easily lead to deadlocks when destroying the
1420 * CUSE character device nodes:
1421 */
1422 pcs->refs++;
1423 if (pcs->refs < 0 || pcs->pid == curproc->p_pid) {
1424 /* overflow or wrong PID */
1425 pcs->refs--;
1426 cuse_server_unlock(pcs);
1427 return (EINVAL);
1428 }
1429 cuse_server_unlock(pcs);
1430 } else {
1431 return (EINVAL);
1432 }
1433
1434 pcc = malloc(sizeof(*pcc), M_CUSE, M_WAITOK | M_ZERO);
1435 if (pcc == NULL) {
1436 /* drop reference on server */
1437 cuse_server_unref(pcs);
1438 return (ENOMEM);
1439 }
1440 if (devfs_set_cdevpriv(pcc, &cuse_client_free)) {
1441 printf("Cuse: Cannot set cdevpriv.\n");
1442 /* drop reference on server */
1443 cuse_server_unref(pcs);
1444 free(pcc, M_CUSE);
1445 return (ENOMEM);
1446 }
1447 pcc->fflags = fflags;
1448 pcc->server_dev = pcsd;
1449 pcc->server = pcs;
1450
1451 for (n = 0; n != CUSE_CMD_MAX; n++) {
1452
1453 pccmd = &pcc->cmds[n];
1454
1455 pccmd->sub.dev = pcd;
1456 pccmd->sub.command = n;
1457 pccmd->client = pcc;
1458
1459 sx_init(&pccmd->sx, "cuse-client-sx");
1460 cv_init(&pccmd->cv, "cuse-client-cv");
1461 }
1462
1463 cuse_server_lock(pcs);
1464
1465 /* cuse_client_free() assumes that the client is listed somewhere! */
1466 /* always enqueue */
1467
1468 TAILQ_INSERT_TAIL(&pcs->hcli, pcc, entry);
1469
1470 /* check if server is closing */
1471 if ((pcs->is_closing != 0) || (dev->si_drv1 == NULL)) {
1472 error = EINVAL;
1473 } else {
1474 error = 0;
1475 }
1476 cuse_server_unlock(pcs);
1477
1478 if (error) {
1479 devfs_clear_cdevpriv(); /* XXX bugfix */
1480 return (error);
1481 }
1482 pccmd = &pcc->cmds[CUSE_CMD_OPEN];
1483
1484 cuse_cmd_lock(pccmd);
1485
1486 cuse_server_lock(pcs);
1487 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1488
1489 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1490 cuse_server_unlock(pcs);
1491
1492 if (error < 0) {
1493 error = cuse_convert_error(error);
1494 } else {
1495 error = 0;
1496 }
1497
1498 cuse_cmd_unlock(pccmd);
1499
1500 if (error)
1501 devfs_clear_cdevpriv(); /* XXX bugfix */
1502
1503 return (error);
1504}
1505
1506static int
1507cuse_client_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1508{
1509 struct cuse_client_command *pccmd;
1510 struct cuse_client *pcc;
1511 struct cuse_server *pcs;
1512 int error;
1513
1514 error = cuse_client_get(&pcc);
1515 if (error != 0)
1516 return (0);
1517
1518 pccmd = &pcc->cmds[CUSE_CMD_CLOSE];
1519 pcs = pcc->server;
1520
1521 cuse_cmd_lock(pccmd);
1522
1523 cuse_server_lock(pcs);
1524 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1525
1526 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1527 cuse_cmd_unlock(pccmd);
1528
1529 cuse_client_is_closing(pcc);
1530 cuse_server_unlock(pcs);
1531
1532 return (0);
1533}
1534
1535static void
1536cuse_client_kqfilter_poll(struct cdev *dev, struct cuse_client *pcc)
1537{
1538 struct cuse_server *pcs = pcc->server;
1539 int temp;
1540
1541 cuse_server_lock(pcs);
1542 temp = (pcc->cflags & (CUSE_CLI_KNOTE_HAS_READ |
1543 CUSE_CLI_KNOTE_HAS_WRITE));
1544 pcc->cflags &= ~(CUSE_CLI_KNOTE_NEED_READ |
1545 CUSE_CLI_KNOTE_NEED_WRITE);
1546 cuse_server_unlock(pcs);
1547
1548 if (temp != 0) {
1549 /* get the latest polling state from the server */
1550 temp = cuse_client_poll(dev, POLLIN | POLLOUT, NULL);
1551
1552 if (temp & (POLLIN | POLLOUT)) {
1553 cuse_server_lock(pcs);
1554 if (temp & POLLIN)
1555 pcc->cflags |= CUSE_CLI_KNOTE_NEED_READ;
1556 if (temp & POLLOUT)
1557 pcc->cflags |= CUSE_CLI_KNOTE_NEED_WRITE;
1558
1559 /* make sure the "knote" gets woken up */
1560 cuse_server_wakeup_locked(pcc->server);
1561 cuse_server_unlock(pcs);
1562 }
1563 }
1564}
1565
1566static int
1567cuse_client_read(struct cdev *dev, struct uio *uio, int ioflag)
1568{
1569 struct cuse_client_command *pccmd;
1570 struct cuse_client *pcc;
1571 struct cuse_server *pcs;
1572 int error;
1573 int len;
1574
1575 error = cuse_client_get(&pcc);
1576 if (error != 0)
1577 return (error);
1578
1579 pccmd = &pcc->cmds[CUSE_CMD_READ];
1580 pcs = pcc->server;
1581
1582 if (uio->uio_segflg != UIO_USERSPACE) {
1583 return (EINVAL);
1584 }
1585 uio->uio_segflg = UIO_NOCOPY;
1586
1587 cuse_cmd_lock(pccmd);
1588
1589 while (uio->uio_resid != 0) {
1590
1591 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1592 error = ENOMEM;
1593 break;
1594 }
1595 len = uio->uio_iov->iov_len;
1596
1597 cuse_server_lock(pcs);
1598 cuse_client_send_command_locked(pccmd,
1599 (uintptr_t)uio->uio_iov->iov_base,
1600 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1601
1602 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1603 cuse_server_unlock(pcs);
1604
1605 if (error < 0) {
1606 error = cuse_convert_error(error);
1607 break;
1608 } else if (error == len) {
1609 error = uiomove(NULL, error, uio);
1610 if (error)
1611 break;
1612 } else {
1613 error = uiomove(NULL, error, uio);
1614 break;
1615 }
1616 }
1617 cuse_cmd_unlock(pccmd);
1618
1619 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1620
1621 if (error == EWOULDBLOCK)
1622 cuse_client_kqfilter_poll(dev, pcc);
1623
1624 return (error);
1625}
1626
1627static int
1628cuse_client_write(struct cdev *dev, struct uio *uio, int ioflag)
1629{
1630 struct cuse_client_command *pccmd;
1631 struct cuse_client *pcc;
1632 struct cuse_server *pcs;
1633 int error;
1634 int len;
1635
1636 error = cuse_client_get(&pcc);
1637 if (error != 0)
1638 return (error);
1639
1640 pccmd = &pcc->cmds[CUSE_CMD_WRITE];
1641 pcs = pcc->server;
1642
1643 if (uio->uio_segflg != UIO_USERSPACE) {
1644 return (EINVAL);
1645 }
1646 uio->uio_segflg = UIO_NOCOPY;
1647
1648 cuse_cmd_lock(pccmd);
1649
1650 while (uio->uio_resid != 0) {
1651
1652 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1653 error = ENOMEM;
1654 break;
1655 }
1656 len = uio->uio_iov->iov_len;
1657
1658 cuse_server_lock(pcs);
1659 cuse_client_send_command_locked(pccmd,
1660 (uintptr_t)uio->uio_iov->iov_base,
1661 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1662
1663 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1664 cuse_server_unlock(pcs);
1665
1666 if (error < 0) {
1667 error = cuse_convert_error(error);
1668 break;
1669 } else if (error == len) {
1670 error = uiomove(NULL, error, uio);
1671 if (error)
1672 break;
1673 } else {
1674 error = uiomove(NULL, error, uio);
1675 break;
1676 }
1677 }
1678 cuse_cmd_unlock(pccmd);
1679
1680 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1681
1682 if (error == EWOULDBLOCK)
1683 cuse_client_kqfilter_poll(dev, pcc);
1684
1685 return (error);
1686}
1687
1688int
1689cuse_client_ioctl(struct cdev *dev, unsigned long cmd,
1690 caddr_t data, int fflag, struct thread *td)
1691{
1692 struct cuse_client_command *pccmd;
1693 struct cuse_client *pcc;
1694 struct cuse_server *pcs;
1695 int error;
1696 int len;
1697
1698 error = cuse_client_get(&pcc);
1699 if (error != 0)
1700 return (error);
1701
1702 len = IOCPARM_LEN(cmd);
1703 if (len > CUSE_BUFFER_MAX)
1704 return (ENOMEM);
1705
1706 pccmd = &pcc->cmds[CUSE_CMD_IOCTL];
1707 pcs = pcc->server;
1708
1709 cuse_cmd_lock(pccmd);
1710
1711 if (cmd & (IOC_IN | IOC_VOID))
1712 memcpy(pcc->ioctl_buffer, data, len);
1713
1714 /*
1715 * When the ioctl-length is zero drivers can pass information
1716 * through the data pointer of the ioctl. Make sure this information
1717 * is forwarded to the driver.
1718 */
1719
1720 cuse_server_lock(pcs);
1721 cuse_client_send_command_locked(pccmd,
1722 (len == 0) ? *(long *)data : CUSE_BUF_MIN_PTR,
1723 (unsigned long)cmd, pcc->fflags,
1724 (fflag & O_NONBLOCK) ? IO_NDELAY : 0);
1725
1726 error = cuse_client_receive_command_locked(pccmd, data, len);
1727 cuse_server_unlock(pcs);
1728
1729 if (error < 0) {
1730 error = cuse_convert_error(error);
1731 } else {
1732 error = 0;
1733 }
1734
1735 if (cmd & IOC_OUT)
1736 memcpy(data, pcc->ioctl_buffer, len);
1737
1738 cuse_cmd_unlock(pccmd);
1739
1740 if (error == EWOULDBLOCK)
1741 cuse_client_kqfilter_poll(dev, pcc);
1742
1743 return (error);
1744}
1745
1746static int
1747cuse_client_poll(struct cdev *dev, int events, struct thread *td)
1748{
1749 struct cuse_client_command *pccmd;
1750 struct cuse_client *pcc;
1751 struct cuse_server *pcs;
1752 unsigned long temp;
1753 int error;
1754 int revents;
1755
1756 error = cuse_client_get(&pcc);
1757 if (error != 0)
1758 goto pollnval;
1759
1760 temp = 0;
1761 pcs = pcc->server;
1762
1763 if (events & (POLLPRI | POLLIN | POLLRDNORM))
1764 temp |= CUSE_POLL_READ;
1765
1766 if (events & (POLLOUT | POLLWRNORM))
1767 temp |= CUSE_POLL_WRITE;
1768
1769 if (events & POLLHUP)
1770 temp |= CUSE_POLL_ERROR;
1771
1772 pccmd = &pcc->cmds[CUSE_CMD_POLL];
1773
1774 cuse_cmd_lock(pccmd);
1775
1776 /* Need to selrecord() first to not loose any events. */
1777 if (temp != 0 && td != NULL)
1778 selrecord(td, &pcs->selinfo);
1779
1780 cuse_server_lock(pcs);
1781 cuse_client_send_command_locked(pccmd,
1782 0, temp, pcc->fflags, IO_NDELAY);
1783
1784 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1785 cuse_server_unlock(pcs);
1786
1787 cuse_cmd_unlock(pccmd);
1788
1789 if (error < 0) {
1790 goto pollnval;
1791 } else {
1792 revents = 0;
1793 if (error & CUSE_POLL_READ)
1794 revents |= (events & (POLLPRI | POLLIN | POLLRDNORM));
1795 if (error & CUSE_POLL_WRITE)
1796 revents |= (events & (POLLOUT | POLLWRNORM));
1797 if (error & CUSE_POLL_ERROR)
1798 revents |= (events & POLLHUP);
1799 }
1800 return (revents);
1801
1802pollnval:
1803 /* XXX many clients don't understand POLLNVAL */
1804 return (events & (POLLHUP | POLLPRI | POLLIN |
1805 POLLRDNORM | POLLOUT | POLLWRNORM));
1806}
1807
1808static int
1809cuse_client_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1810 vm_size_t size, struct vm_object **object, int nprot)
1811{
1812 uint32_t page_nr = *offset / PAGE_SIZE;
1813 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1814 struct cuse_memory *mem;
1815 struct cuse_client *pcc;
1816 struct cuse_server *pcs;
1817 int error;
1818
1819 error = cuse_client_get(&pcc);
1820 if (error != 0)
1821 return (error);
1822
1823 pcs = pcc->server;
1824
1825 cuse_server_lock(pcs);
1826 /* lookup memory structure */
1827 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
1828 if (mem->alloc_nr == alloc_nr)
1829 break;
1830 }
1831 if (mem == NULL) {
1832 cuse_server_unlock(pcs);
1833 return (ENOMEM);
1834 }
1835 /* verify page offset */
1836 page_nr %= CUSE_ALLOC_PAGES_MAX;
1837 if (page_nr >= mem->page_count) {
1838 cuse_server_unlock(pcs);
1839 return (ENXIO);
1840 }
1841 /* verify mmap size */
1842 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1843 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1844 cuse_server_unlock(pcs);
1845 return (EINVAL);
1846 }
1847 vm_object_reference(mem->object);
1848 *object = mem->object;
1849 cuse_server_unlock(pcs);
1850
1851 /* set new VM object offset to use */
1852 *offset = page_nr * PAGE_SIZE;
1853
1854 /* success */
1855 return (0);
1856}
1857
1858static void
1859cuse_client_kqfilter_read_detach(struct knote *kn)
1860{
1861 struct cuse_client *pcc;
1862 struct cuse_server *pcs;
1863
1864 pcc = kn->kn_hook;
1865 pcs = pcc->server;
1866
1867 cuse_server_lock(pcs);
1868 knlist_remove(&pcs->selinfo.si_note, kn, 1);
1869 cuse_server_unlock(pcs);
1870}
1871
1872static void
1873cuse_client_kqfilter_write_detach(struct knote *kn)
1874{
1875 struct cuse_client *pcc;
1876 struct cuse_server *pcs;
1877
1878 pcc = kn->kn_hook;
1879 pcs = pcc->server;
1880
1881 cuse_server_lock(pcs);
1882 knlist_remove(&pcs->selinfo.si_note, kn, 1);
1883 cuse_server_unlock(pcs);
1884}
1885
1886static int
1887cuse_client_kqfilter_read_event(struct knote *kn, long hint)
1888{
1889 struct cuse_client *pcc;
1890
1891 pcc = kn->kn_hook;
1892
1893 mtx_assert(&pcc->server->mtx, MA_OWNED);
1894
1895 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_READ) ? 1 : 0);
1896}
1897
1898static int
1899cuse_client_kqfilter_write_event(struct knote *kn, long hint)
1900{
1901 struct cuse_client *pcc;
1902
1903 pcc = kn->kn_hook;
1904
1905 mtx_assert(&pcc->server->mtx, MA_OWNED);
1906
1907 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_WRITE) ? 1 : 0);
1908}
1909
1910static int
1911cuse_client_kqfilter(struct cdev *dev, struct knote *kn)
1912{
1913 struct cuse_client *pcc;
1914 struct cuse_server *pcs;
1915 int error;
1916
1917 error = cuse_client_get(&pcc);
1918 if (error != 0)
1919 return (error);
1920
1921 pcs = pcc->server;
1922
1923 cuse_server_lock(pcs);
1924 switch (kn->kn_filter) {
1925 case EVFILT_READ:
1926 pcc->cflags |= CUSE_CLI_KNOTE_HAS_READ;
1927 kn->kn_hook = pcc;
1928 kn->kn_fop = &cuse_client_kqfilter_read_ops;
1929 knlist_add(&pcs->selinfo.si_note, kn, 1);
1930 break;
1931 case EVFILT_WRITE:
1932 pcc->cflags |= CUSE_CLI_KNOTE_HAS_WRITE;
1933 kn->kn_hook = pcc;
1934 kn->kn_fop = &cuse_client_kqfilter_write_ops;
1935 knlist_add(&pcs->selinfo.si_note, kn, 1);
1936 break;
1937 default:
1938 error = EINVAL;
1939 break;
1940 }
1941 cuse_server_unlock(pcs);
1942
1943 if (error == 0)
1944 cuse_client_kqfilter_poll(dev, pcc);
1945 return (error);
1946}