1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * User-space I/O driver support for HID subsystem
4 * Copyright (c) 2012 David Herrmann
5 */
6
7/*
8 */
9
10#include <linux/atomic.h>
11#include <linux/compat.h>
12#include <linux/cred.h>
13#include <linux/device.h>
14#include <linux/fs.h>
15#include <linux/hid.h>
16#include <linux/input.h>
17#include <linux/miscdevice.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/poll.h>
21#include <linux/sched.h>
22#include <linux/spinlock.h>
23#include <linux/uhid.h>
24#include <linux/wait.h>
25
26#define UHID_NAME	"uhid"
27#define UHID_BUFSIZE	32
28
29struct uhid_device {
30	struct mutex devlock;
31
32	/* This flag tracks whether the HID device is usable for commands from
33	 * userspace. The flag is already set before hid_add_device(), which
34	 * runs in workqueue context, to allow hid_add_device() to communicate
35	 * with userspace.
36	 * However, if hid_add_device() fails, the flag is cleared without
37	 * holding devlock.
38	 * We guarantee that if @running changes from true to false while you're
39	 * holding @devlock, it's still fine to access @hid.
40	 */
41	bool running;
42
43	__u8 *rd_data;
44	uint rd_size;
45
46	/* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
47	struct hid_device *hid;
48	struct uhid_event input_buf;
49
50	wait_queue_head_t waitq;
51	spinlock_t qlock;
52	__u8 head;
53	__u8 tail;
54	struct uhid_event *outq[UHID_BUFSIZE];
55
56	/* blocking GET_REPORT support; state changes protected by qlock */
57	struct mutex report_lock;
58	wait_queue_head_t report_wait;
59	bool report_running;
60	u32 report_id;
61	u32 report_type;
62	struct uhid_event report_buf;
63	struct work_struct worker;
64};
65
66static struct miscdevice uhid_misc;
67
68static void uhid_device_add_worker(struct work_struct *work)
69{
70	struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
71	int ret;
72
73	ret = hid_add_device(uhid->hid);
74	if (ret) {
75		hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
76
77		/* We used to call hid_destroy_device() here, but that's really
78		 * messy to get right because we have to coordinate with
79		 * concurrent writes from userspace that might be in the middle
80		 * of using uhid->hid.
81		 * Just leave uhid->hid as-is for now, and clean it up when
82		 * userspace tries to close or reinitialize the uhid instance.
83		 *
84		 * However, we do have to clear the ->running flag and do a
85		 * wakeup to make sure userspace knows that the device is gone.
86		 */
87		WRITE_ONCE(uhid->running, false);
88		wake_up_interruptible(&uhid->report_wait);
89	}
90}
91
92static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
93{
94	__u8 newhead;
95
96	newhead = (uhid->head + 1) % UHID_BUFSIZE;
97
98	if (newhead != uhid->tail) {
99		uhid->outq[uhid->head] = ev;
100		uhid->head = newhead;
101		wake_up_interruptible(&uhid->waitq);
102	} else {
103		hid_warn(uhid->hid, "Output queue is full\n");
104		kfree(ev);
105	}
106}
107
108static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
109{
110	unsigned long flags;
111	struct uhid_event *ev;
112
113	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
114	if (!ev)
115		return -ENOMEM;
116
117	ev->type = event;
118
119	spin_lock_irqsave(&uhid->qlock, flags);
120	uhid_queue(uhid, ev);
121	spin_unlock_irqrestore(&uhid->qlock, flags);
122
123	return 0;
124}
125
126static int uhid_hid_start(struct hid_device *hid)
127{
128	struct uhid_device *uhid = hid->driver_data;
129	struct uhid_event *ev;
130	unsigned long flags;
131
132	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
133	if (!ev)
134		return -ENOMEM;
135
136	ev->type = UHID_START;
137
138	if (hid->report_enum[HID_FEATURE_REPORT].numbered)
139		ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
140	if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
141		ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
142	if (hid->report_enum[HID_INPUT_REPORT].numbered)
143		ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
144
145	spin_lock_irqsave(&uhid->qlock, flags);
146	uhid_queue(uhid, ev);
147	spin_unlock_irqrestore(&uhid->qlock, flags);
148
149	return 0;
150}
151
152static void uhid_hid_stop(struct hid_device *hid)
153{
154	struct uhid_device *uhid = hid->driver_data;
155
156	hid->claimed = 0;
157	uhid_queue_event(uhid, UHID_STOP);
158}
159
160static int uhid_hid_open(struct hid_device *hid)
161{
162	struct uhid_device *uhid = hid->driver_data;
163
164	return uhid_queue_event(uhid, UHID_OPEN);
165}
166
167static void uhid_hid_close(struct hid_device *hid)
168{
169	struct uhid_device *uhid = hid->driver_data;
170
171	uhid_queue_event(uhid, UHID_CLOSE);
172}
173
174static int uhid_hid_parse(struct hid_device *hid)
175{
176	struct uhid_device *uhid = hid->driver_data;
177
178	return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
179}
180
181/* must be called with report_lock held */
182static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
183					struct uhid_event *ev,
184					__u32 *report_id)
185{
186	unsigned long flags;
187	int ret;
188
189	spin_lock_irqsave(&uhid->qlock, flags);
190	*report_id = ++uhid->report_id;
191	uhid->report_type = ev->type + 1;
192	uhid->report_running = true;
193	uhid_queue(uhid, ev);
194	spin_unlock_irqrestore(&uhid->qlock, flags);
195
196	ret = wait_event_interruptible_timeout(uhid->report_wait,
197				!uhid->report_running || !READ_ONCE(uhid->running),
198				5 * HZ);
199	if (!ret || !READ_ONCE(uhid->running) || uhid->report_running)
200		ret = -EIO;
201	else if (ret < 0)
202		ret = -ERESTARTSYS;
203	else
204		ret = 0;
205
206	uhid->report_running = false;
207
208	return ret;
209}
210
211static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
212				const struct uhid_event *ev)
213{
214	unsigned long flags;
215
216	spin_lock_irqsave(&uhid->qlock, flags);
217
218	/* id for old report; drop it silently */
219	if (uhid->report_type != ev->type || uhid->report_id != id)
220		goto unlock;
221	if (!uhid->report_running)
222		goto unlock;
223
224	memcpy(&uhid->report_buf, ev, sizeof(*ev));
225	uhid->report_running = false;
226	wake_up_interruptible(&uhid->report_wait);
227
228unlock:
229	spin_unlock_irqrestore(&uhid->qlock, flags);
230}
231
232static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
233			       u8 *buf, size_t count, u8 rtype)
234{
235	struct uhid_device *uhid = hid->driver_data;
236	struct uhid_get_report_reply_req *req;
237	struct uhid_event *ev;
238	int ret;
239
240	if (!READ_ONCE(uhid->running))
241		return -EIO;
242
243	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
244	if (!ev)
245		return -ENOMEM;
246
247	ev->type = UHID_GET_REPORT;
248	ev->u.get_report.rnum = rnum;
249	ev->u.get_report.rtype = rtype;
250
251	ret = mutex_lock_interruptible(&uhid->report_lock);
252	if (ret) {
253		kfree(ev);
254		return ret;
255	}
256
257	/* this _always_ takes ownership of @ev */
258	ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
259	if (ret)
260		goto unlock;
261
262	req = &uhid->report_buf.u.get_report_reply;
263	if (req->err) {
264		ret = -EIO;
265	} else {
266		ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
267		memcpy(buf, req->data, ret);
268	}
269
270unlock:
271	mutex_unlock(&uhid->report_lock);
272	return ret;
273}
274
275static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
276			       const u8 *buf, size_t count, u8 rtype)
277{
278	struct uhid_device *uhid = hid->driver_data;
279	struct uhid_event *ev;
280	int ret;
281
282	if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX)
283		return -EIO;
284
285	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
286	if (!ev)
287		return -ENOMEM;
288
289	ev->type = UHID_SET_REPORT;
290	ev->u.set_report.rnum = rnum;
291	ev->u.set_report.rtype = rtype;
292	ev->u.set_report.size = count;
293	memcpy(ev->u.set_report.data, buf, count);
294
295	ret = mutex_lock_interruptible(&uhid->report_lock);
296	if (ret) {
297		kfree(ev);
298		return ret;
299	}
300
301	/* this _always_ takes ownership of @ev */
302	ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
303	if (ret)
304		goto unlock;
305
306	if (uhid->report_buf.u.set_report_reply.err)
307		ret = -EIO;
308	else
309		ret = count;
310
311unlock:
312	mutex_unlock(&uhid->report_lock);
313	return ret;
314}
315
316static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
317				__u8 *buf, size_t len, unsigned char rtype,
318				int reqtype)
319{
320	u8 u_rtype;
321
322	switch (rtype) {
323	case HID_FEATURE_REPORT:
324		u_rtype = UHID_FEATURE_REPORT;
325		break;
326	case HID_OUTPUT_REPORT:
327		u_rtype = UHID_OUTPUT_REPORT;
328		break;
329	case HID_INPUT_REPORT:
330		u_rtype = UHID_INPUT_REPORT;
331		break;
332	default:
333		return -EINVAL;
334	}
335
336	switch (reqtype) {
337	case HID_REQ_GET_REPORT:
338		return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
339	case HID_REQ_SET_REPORT:
340		return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
341	default:
342		return -EIO;
343	}
344}
345
346static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
347			       unsigned char report_type)
348{
349	struct uhid_device *uhid = hid->driver_data;
350	__u8 rtype;
351	unsigned long flags;
352	struct uhid_event *ev;
353
354	switch (report_type) {
355	case HID_FEATURE_REPORT:
356		rtype = UHID_FEATURE_REPORT;
357		break;
358	case HID_OUTPUT_REPORT:
359		rtype = UHID_OUTPUT_REPORT;
360		break;
361	default:
362		return -EINVAL;
363	}
364
365	if (count < 1 || count > UHID_DATA_MAX)
366		return -EINVAL;
367
368	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
369	if (!ev)
370		return -ENOMEM;
371
372	ev->type = UHID_OUTPUT;
373	ev->u.output.size = count;
374	ev->u.output.rtype = rtype;
375	memcpy(ev->u.output.data, buf, count);
376
377	spin_lock_irqsave(&uhid->qlock, flags);
378	uhid_queue(uhid, ev);
379	spin_unlock_irqrestore(&uhid->qlock, flags);
380
381	return count;
382}
383
384static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
385				  size_t count)
386{
387	return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
388}
389
390static const struct hid_ll_driver uhid_hid_driver = {
391	.start = uhid_hid_start,
392	.stop = uhid_hid_stop,
393	.open = uhid_hid_open,
394	.close = uhid_hid_close,
395	.parse = uhid_hid_parse,
396	.raw_request = uhid_hid_raw_request,
397	.output_report = uhid_hid_output_report,
398	.max_buffer_size = UHID_DATA_MAX,
399};
400
401#ifdef CONFIG_COMPAT
402
403/* Apparently we haven't stepped on these rakes enough times yet. */
404struct uhid_create_req_compat {
405	__u8 name[128];
406	__u8 phys[64];
407	__u8 uniq[64];
408
409	compat_uptr_t rd_data;
410	__u16 rd_size;
411
412	__u16 bus;
413	__u32 vendor;
414	__u32 product;
415	__u32 version;
416	__u32 country;
417} __attribute__((__packed__));
418
419static int uhid_event_from_user(const char __user *buffer, size_t len,
420				struct uhid_event *event)
421{
422	if (in_compat_syscall()) {
423		u32 type;
424
425		if (get_user(type, buffer))
426			return -EFAULT;
427
428		if (type == UHID_CREATE) {
429			/*
430			 * This is our messed up request with compat pointer.
431			 * It is largish (more than 256 bytes) so we better
432			 * allocate it from the heap.
433			 */
434			struct uhid_create_req_compat *compat;
435
436			compat = kzalloc(sizeof(*compat), GFP_KERNEL);
437			if (!compat)
438				return -ENOMEM;
439
440			buffer += sizeof(type);
441			len -= sizeof(type);
442			if (copy_from_user(compat, buffer,
443					   min(len, sizeof(*compat)))) {
444				kfree(compat);
445				return -EFAULT;
446			}
447
448			/* Shuffle the data over to proper structure */
449			event->type = type;
450
451			memcpy(event->u.create.name, compat->name,
452				sizeof(compat->name));
453			memcpy(event->u.create.phys, compat->phys,
454				sizeof(compat->phys));
455			memcpy(event->u.create.uniq, compat->uniq,
456				sizeof(compat->uniq));
457
458			event->u.create.rd_data = compat_ptr(compat->rd_data);
459			event->u.create.rd_size = compat->rd_size;
460
461			event->u.create.bus = compat->bus;
462			event->u.create.vendor = compat->vendor;
463			event->u.create.product = compat->product;
464			event->u.create.version = compat->version;
465			event->u.create.country = compat->country;
466
467			kfree(compat);
468			return 0;
469		}
470		/* All others can be copied directly */
471	}
472
473	if (copy_from_user(event, buffer, min(len, sizeof(*event))))
474		return -EFAULT;
475
476	return 0;
477}
478#else
479static int uhid_event_from_user(const char __user *buffer, size_t len,
480				struct uhid_event *event)
481{
482	if (copy_from_user(event, buffer, min(len, sizeof(*event))))
483		return -EFAULT;
484
485	return 0;
486}
487#endif
488
489static int uhid_dev_create2(struct uhid_device *uhid,
490			    const struct uhid_event *ev)
491{
492	struct hid_device *hid;
493	size_t rd_size;
494	void *rd_data;
495	int ret;
496
497	if (uhid->hid)
498		return -EALREADY;
499
500	rd_size = ev->u.create2.rd_size;
501	if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
502		return -EINVAL;
503
504	rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
505	if (!rd_data)
506		return -ENOMEM;
507
508	uhid->rd_size = rd_size;
509	uhid->rd_data = rd_data;
510
511	hid = hid_allocate_device();
512	if (IS_ERR(hid)) {
513		ret = PTR_ERR(hid);
514		goto err_free;
515	}
516
517	BUILD_BUG_ON(sizeof(hid->name) != sizeof(ev->u.create2.name));
518	strscpy(hid->name, ev->u.create2.name, sizeof(hid->name));
519	BUILD_BUG_ON(sizeof(hid->phys) != sizeof(ev->u.create2.phys));
520	strscpy(hid->phys, ev->u.create2.phys, sizeof(hid->phys));
521	BUILD_BUG_ON(sizeof(hid->uniq) != sizeof(ev->u.create2.uniq));
522	strscpy(hid->uniq, ev->u.create2.uniq, sizeof(hid->uniq));
523
524	hid->ll_driver = &uhid_hid_driver;
525	hid->bus = ev->u.create2.bus;
526	hid->vendor = ev->u.create2.vendor;
527	hid->product = ev->u.create2.product;
528	hid->version = ev->u.create2.version;
529	hid->country = ev->u.create2.country;
530	hid->driver_data = uhid;
531	hid->dev.parent = uhid_misc.this_device;
532
533	uhid->hid = hid;
534	uhid->running = true;
535
536	/* Adding of a HID device is done through a worker, to allow HID drivers
537	 * which use feature requests during .probe to work, without they would
538	 * be blocked on devlock, which is held by uhid_char_write.
539	 */
540	schedule_work(&uhid->worker);
541
542	return 0;
543
544err_free:
545	kfree(uhid->rd_data);
546	uhid->rd_data = NULL;
547	uhid->rd_size = 0;
548	return ret;
549}
550
551static int uhid_dev_create(struct uhid_device *uhid,
552			   struct uhid_event *ev)
553{
554	struct uhid_create_req orig;
555
556	orig = ev->u.create;
557
558	if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
559		return -EINVAL;
560	if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
561		return -EFAULT;
562
563	memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
564	memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
565	memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
566	ev->u.create2.rd_size = orig.rd_size;
567	ev->u.create2.bus = orig.bus;
568	ev->u.create2.vendor = orig.vendor;
569	ev->u.create2.product = orig.product;
570	ev->u.create2.version = orig.version;
571	ev->u.create2.country = orig.country;
572
573	return uhid_dev_create2(uhid, ev);
574}
575
576static int uhid_dev_destroy(struct uhid_device *uhid)
577{
578	if (!uhid->hid)
579		return -EINVAL;
580
581	WRITE_ONCE(uhid->running, false);
582	wake_up_interruptible(&uhid->report_wait);
583
584	cancel_work_sync(&uhid->worker);
585
586	hid_destroy_device(uhid->hid);
587	uhid->hid = NULL;
588	kfree(uhid->rd_data);
589
590	return 0;
591}
592
593static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
594{
595	if (!READ_ONCE(uhid->running))
596		return -EINVAL;
597
598	hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
599			 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
600
601	return 0;
602}
603
604static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
605{
606	if (!READ_ONCE(uhid->running))
607		return -EINVAL;
608
609	hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
610			 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
611
612	return 0;
613}
614
615static int uhid_dev_get_report_reply(struct uhid_device *uhid,
616				     struct uhid_event *ev)
617{
618	if (!READ_ONCE(uhid->running))
619		return -EINVAL;
620
621	uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
622	return 0;
623}
624
625static int uhid_dev_set_report_reply(struct uhid_device *uhid,
626				     struct uhid_event *ev)
627{
628	if (!READ_ONCE(uhid->running))
629		return -EINVAL;
630
631	uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
632	return 0;
633}
634
635static int uhid_char_open(struct inode *inode, struct file *file)
636{
637	struct uhid_device *uhid;
638
639	uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
640	if (!uhid)
641		return -ENOMEM;
642
643	mutex_init(&uhid->devlock);
644	mutex_init(&uhid->report_lock);
645	spin_lock_init(&uhid->qlock);
646	init_waitqueue_head(&uhid->waitq);
647	init_waitqueue_head(&uhid->report_wait);
648	uhid->running = false;
649	INIT_WORK(&uhid->worker, uhid_device_add_worker);
650
651	file->private_data = uhid;
652	stream_open(inode, file);
653
654	return 0;
655}
656
657static int uhid_char_release(struct inode *inode, struct file *file)
658{
659	struct uhid_device *uhid = file->private_data;
660	unsigned int i;
661
662	uhid_dev_destroy(uhid);
663
664	for (i = 0; i < UHID_BUFSIZE; ++i)
665		kfree(uhid->outq[i]);
666
667	kfree(uhid);
668
669	return 0;
670}
671
672static ssize_t uhid_char_read(struct file *file, char __user *buffer,
673				size_t count, loff_t *ppos)
674{
675	struct uhid_device *uhid = file->private_data;
676	int ret;
677	unsigned long flags;
678	size_t len;
679
680	/* they need at least the "type" member of uhid_event */
681	if (count < sizeof(__u32))
682		return -EINVAL;
683
684try_again:
685	if (file->f_flags & O_NONBLOCK) {
686		if (uhid->head == uhid->tail)
687			return -EAGAIN;
688	} else {
689		ret = wait_event_interruptible(uhid->waitq,
690						uhid->head != uhid->tail);
691		if (ret)
692			return ret;
693	}
694
695	ret = mutex_lock_interruptible(&uhid->devlock);
696	if (ret)
697		return ret;
698
699	if (uhid->head == uhid->tail) {
700		mutex_unlock(&uhid->devlock);
701		goto try_again;
702	} else {
703		len = min(count, sizeof(**uhid->outq));
704		if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
705			ret = -EFAULT;
706		} else {
707			kfree(uhid->outq[uhid->tail]);
708			uhid->outq[uhid->tail] = NULL;
709
710			spin_lock_irqsave(&uhid->qlock, flags);
711			uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
712			spin_unlock_irqrestore(&uhid->qlock, flags);
713		}
714	}
715
716	mutex_unlock(&uhid->devlock);
717	return ret ? ret : len;
718}
719
720static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
721				size_t count, loff_t *ppos)
722{
723	struct uhid_device *uhid = file->private_data;
724	int ret;
725	size_t len;
726
727	/* we need at least the "type" member of uhid_event */
728	if (count < sizeof(__u32))
729		return -EINVAL;
730
731	ret = mutex_lock_interruptible(&uhid->devlock);
732	if (ret)
733		return ret;
734
735	memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
736	len = min(count, sizeof(uhid->input_buf));
737
738	ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
739	if (ret)
740		goto unlock;
741
742	switch (uhid->input_buf.type) {
743	case UHID_CREATE:
744		/*
745		 * 'struct uhid_create_req' contains a __user pointer which is
746		 * copied from, so it's unsafe to allow this with elevated
747		 * privileges (e.g. from a setuid binary) or via kernel_write().
748		 */
749		if (file->f_cred != current_cred()) {
750			pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
751				    task_tgid_vnr(current), current->comm);
752			ret = -EACCES;
753			goto unlock;
754		}
755		ret = uhid_dev_create(uhid, &uhid->input_buf);
756		break;
757	case UHID_CREATE2:
758		ret = uhid_dev_create2(uhid, &uhid->input_buf);
759		break;
760	case UHID_DESTROY:
761		ret = uhid_dev_destroy(uhid);
762		break;
763	case UHID_INPUT:
764		ret = uhid_dev_input(uhid, &uhid->input_buf);
765		break;
766	case UHID_INPUT2:
767		ret = uhid_dev_input2(uhid, &uhid->input_buf);
768		break;
769	case UHID_GET_REPORT_REPLY:
770		ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
771		break;
772	case UHID_SET_REPORT_REPLY:
773		ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
774		break;
775	default:
776		ret = -EOPNOTSUPP;
777	}
778
779unlock:
780	mutex_unlock(&uhid->devlock);
781
782	/* return "count" not "len" to not confuse the caller */
783	return ret ? ret : count;
784}
785
786static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
787{
788	struct uhid_device *uhid = file->private_data;
789	__poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
790
791	poll_wait(file, &uhid->waitq, wait);
792
793	if (uhid->head != uhid->tail)
794		mask |= EPOLLIN | EPOLLRDNORM;
795
796	return mask;
797}
798
799static const struct file_operations uhid_fops = {
800	.owner		= THIS_MODULE,
801	.open		= uhid_char_open,
802	.release	= uhid_char_release,
803	.read		= uhid_char_read,
804	.write		= uhid_char_write,
805	.poll		= uhid_char_poll,
806	.llseek		= no_llseek,
807};
808
809static struct miscdevice uhid_misc = {
810	.fops		= &uhid_fops,
811	.minor		= UHID_MINOR,
812	.name		= UHID_NAME,
813};
814module_misc_device(uhid_misc);
815
816MODULE_LICENSE("GPL");
817MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
818MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
819MODULE_ALIAS_MISCDEV(UHID_MINOR);
820MODULE_ALIAS("devname:" UHID_NAME);
821