1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 */
6
7#include <linux/cdev.h>
8#include <linux/fs.h>
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/compat.h>
12#include <linux/miscdevice.h>
13
14#include "vchiq_core.h"
15#include "vchiq_ioctl.h"
16#include "vchiq_arm.h"
17#include "vchiq_debugfs.h"
18
19static const char *const ioctl_names[] = {
20	"CONNECT",
21	"SHUTDOWN",
22	"CREATE_SERVICE",
23	"REMOVE_SERVICE",
24	"QUEUE_MESSAGE",
25	"QUEUE_BULK_TRANSMIT",
26	"QUEUE_BULK_RECEIVE",
27	"AWAIT_COMPLETION",
28	"DEQUEUE_MESSAGE",
29	"GET_CLIENT_ID",
30	"GET_CONFIG",
31	"CLOSE_SERVICE",
32	"USE_SERVICE",
33	"RELEASE_SERVICE",
34	"SET_SERVICE_OPTION",
35	"DUMP_PHYS_MEM",
36	"LIB_VERSION",
37	"CLOSE_DELIVERED"
38};
39
40static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
41
42static void
43user_service_free(void *userdata)
44{
45	kfree(userdata);
46}
47
48static void close_delivered(struct user_service *user_service)
49{
50	dev_dbg(user_service->service->state->dev,
51		"arm: (handle=%x)\n", user_service->service->handle);
52
53	if (user_service->close_pending) {
54		/* Allow the underlying service to be culled */
55		vchiq_service_put(user_service->service);
56
57		/* Wake the user-thread blocked in close_ or remove_service */
58		complete(&user_service->close_event);
59
60		user_service->close_pending = 0;
61	}
62}
63
64struct vchiq_io_copy_callback_context {
65	struct vchiq_element *element;
66	size_t element_offset;
67	unsigned long elements_to_go;
68};
69
70static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
71					   size_t offset, size_t maxsize)
72{
73	struct vchiq_io_copy_callback_context *cc = context;
74	size_t total_bytes_copied = 0;
75	size_t bytes_this_round;
76
77	while (total_bytes_copied < maxsize) {
78		if (!cc->elements_to_go)
79			return total_bytes_copied;
80
81		if (!cc->element->size) {
82			cc->elements_to_go--;
83			cc->element++;
84			cc->element_offset = 0;
85			continue;
86		}
87
88		bytes_this_round = min(cc->element->size - cc->element_offset,
89				       maxsize - total_bytes_copied);
90
91		if (copy_from_user(dest + total_bytes_copied,
92				   cc->element->data + cc->element_offset,
93				   bytes_this_round))
94			return -EFAULT;
95
96		cc->element_offset += bytes_this_round;
97		total_bytes_copied += bytes_this_round;
98
99		if (cc->element_offset == cc->element->size) {
100			cc->elements_to_go--;
101			cc->element++;
102			cc->element_offset = 0;
103		}
104	}
105
106	return maxsize;
107}
108
109static int
110vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
111			struct vchiq_element *elements, unsigned long count)
112{
113	struct vchiq_io_copy_callback_context context;
114	int status = 0;
115	unsigned long i;
116	size_t total_size = 0;
117
118	context.element = elements;
119	context.element_offset = 0;
120	context.elements_to_go = count;
121
122	for (i = 0; i < count; i++) {
123		if (!elements[i].data && elements[i].size != 0)
124			return -EFAULT;
125
126		total_size += elements[i].size;
127	}
128
129	status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
130				     &context, total_size);
131
132	if (status == -EINVAL)
133		return -EIO;
134	else if (status == -EAGAIN)
135		return -EINTR;
136	return 0;
137}
138
139static int vchiq_ioc_create_service(struct vchiq_instance *instance,
140				    struct vchiq_create_service *args)
141{
142	struct user_service *user_service = NULL;
143	struct vchiq_service *service;
144	int status = 0;
145	struct vchiq_service_params_kernel params;
146	int srvstate;
147
148	if (args->is_open && !instance->connected)
149		return -ENOTCONN;
150
151	user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
152	if (!user_service)
153		return -ENOMEM;
154
155	if (args->is_open) {
156		srvstate = VCHIQ_SRVSTATE_OPENING;
157	} else {
158		srvstate = instance->connected ?
159			 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
160	}
161
162	params = (struct vchiq_service_params_kernel) {
163		.fourcc   = args->params.fourcc,
164		.callback = service_callback,
165		.userdata = user_service,
166		.version  = args->params.version,
167		.version_min = args->params.version_min,
168	};
169	service = vchiq_add_service_internal(instance->state, &params,
170					     srvstate, instance,
171					     user_service_free);
172	if (!service) {
173		kfree(user_service);
174		return -EEXIST;
175	}
176
177	user_service->service = service;
178	user_service->userdata = args->params.userdata;
179	user_service->instance = instance;
180	user_service->is_vchi = (args->is_vchi != 0);
181	user_service->dequeue_pending = 0;
182	user_service->close_pending = 0;
183	user_service->message_available_pos = instance->completion_remove - 1;
184	user_service->msg_insert = 0;
185	user_service->msg_remove = 0;
186	init_completion(&user_service->insert_event);
187	init_completion(&user_service->remove_event);
188	init_completion(&user_service->close_event);
189
190	if (args->is_open) {
191		status = vchiq_open_service_internal(service, instance->pid);
192		if (status) {
193			vchiq_remove_service(instance, service->handle);
194			return (status == -EAGAIN) ?
195				-EINTR : -EIO;
196		}
197	}
198	args->handle = service->handle;
199
200	return 0;
201}
202
203static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
204				     struct vchiq_dequeue_message *args)
205{
206	struct user_service *user_service;
207	struct vchiq_service *service;
208	struct vchiq_header *header;
209	int ret;
210
211	DEBUG_INITIALISE(g_state.local);
212	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
213	service = find_service_for_instance(instance, args->handle);
214	if (!service)
215		return -EINVAL;
216
217	user_service = (struct user_service *)service->base.userdata;
218	if (user_service->is_vchi == 0) {
219		ret = -EINVAL;
220		goto out;
221	}
222
223	spin_lock(&msg_queue_spinlock);
224	if (user_service->msg_remove == user_service->msg_insert) {
225		if (!args->blocking) {
226			spin_unlock(&msg_queue_spinlock);
227			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
228			ret = -EWOULDBLOCK;
229			goto out;
230		}
231		user_service->dequeue_pending = 1;
232		ret = 0;
233		do {
234			spin_unlock(&msg_queue_spinlock);
235			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
236			if (wait_for_completion_interruptible(&user_service->insert_event)) {
237				dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n");
238				ret = -EINTR;
239				break;
240			}
241			spin_lock(&msg_queue_spinlock);
242		} while (user_service->msg_remove == user_service->msg_insert);
243
244		if (ret)
245			goto out;
246	}
247
248	if (WARN_ON_ONCE((int)(user_service->msg_insert -
249			 user_service->msg_remove) < 0)) {
250		spin_unlock(&msg_queue_spinlock);
251		ret = -EINVAL;
252		goto out;
253	}
254
255	header = user_service->msg_queue[user_service->msg_remove &
256		(MSG_QUEUE_SIZE - 1)];
257	user_service->msg_remove++;
258	spin_unlock(&msg_queue_spinlock);
259
260	complete(&user_service->remove_event);
261	if (!header) {
262		ret = -ENOTCONN;
263	} else if (header->size <= args->bufsize) {
264		/* Copy to user space if msgbuf is not NULL */
265		if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
266			ret = header->size;
267			vchiq_release_message(instance, service->handle, header);
268		} else {
269			ret = -EFAULT;
270		}
271	} else {
272		dev_err(service->state->dev,
273			"arm: header %pK: bufsize %x < size %x\n",
274			header, args->bufsize, header->size);
275		WARN(1, "invalid size\n");
276		ret = -EMSGSIZE;
277	}
278	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
279out:
280	vchiq_service_put(service);
281	return ret;
282}
283
284static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
285				      struct vchiq_queue_bulk_transfer *args,
286				      enum vchiq_bulk_dir dir,
287				      enum vchiq_bulk_mode __user *mode)
288{
289	struct vchiq_service *service;
290	struct bulk_waiter_node *waiter = NULL, *iter;
291	void *userdata;
292	int status = 0;
293	int ret;
294
295	service = find_service_for_instance(instance, args->handle);
296	if (!service)
297		return -EINVAL;
298
299	if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
300		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
301		if (!waiter) {
302			ret = -ENOMEM;
303			goto out;
304		}
305
306		userdata = &waiter->bulk_waiter;
307	} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
308		mutex_lock(&instance->bulk_waiter_list_mutex);
309		list_for_each_entry(iter, &instance->bulk_waiter_list,
310				    list) {
311			if (iter->pid == current->pid) {
312				list_del(&iter->list);
313				waiter = iter;
314				break;
315			}
316		}
317		mutex_unlock(&instance->bulk_waiter_list_mutex);
318		if (!waiter) {
319			dev_err(service->state->dev,
320				"arm: no bulk_waiter found for pid %d\n", current->pid);
321			ret = -ESRCH;
322			goto out;
323		}
324		dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
325			waiter, current->pid);
326		userdata = &waiter->bulk_waiter;
327	} else {
328		userdata = args->userdata;
329	}
330
331	status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size,
332				     userdata, args->mode, dir);
333
334	if (!waiter) {
335		ret = 0;
336		goto out;
337	}
338
339	if ((status != -EAGAIN) || fatal_signal_pending(current) ||
340	    !waiter->bulk_waiter.bulk) {
341		if (waiter->bulk_waiter.bulk) {
342			/* Cancel the signal when the transfer completes. */
343			spin_lock(&bulk_waiter_spinlock);
344			waiter->bulk_waiter.bulk->userdata = NULL;
345			spin_unlock(&bulk_waiter_spinlock);
346		}
347		kfree(waiter);
348		ret = 0;
349	} else {
350		const enum vchiq_bulk_mode mode_waiting =
351			VCHIQ_BULK_MODE_WAITING;
352		waiter->pid = current->pid;
353		mutex_lock(&instance->bulk_waiter_list_mutex);
354		list_add(&waiter->list, &instance->bulk_waiter_list);
355		mutex_unlock(&instance->bulk_waiter_list_mutex);
356		dev_dbg(service->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
357			waiter, current->pid);
358
359		ret = put_user(mode_waiting, mode);
360	}
361out:
362	vchiq_service_put(service);
363	if (ret)
364		return ret;
365	else if (status == -EINVAL)
366		return -EIO;
367	else if (status == -EAGAIN)
368		return -EINTR;
369	return 0;
370}
371
372/* read a user pointer value from an array pointers in user space */
373static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
374{
375	int ret;
376
377	if (in_compat_syscall()) {
378		compat_uptr_t ptr32;
379		compat_uptr_t __user *uptr = ubuf;
380
381		ret = get_user(ptr32, uptr + index);
382		if (ret)
383			return ret;
384
385		*buf = compat_ptr(ptr32);
386	} else {
387		uintptr_t ptr, __user *uptr = ubuf;
388
389		ret = get_user(ptr, uptr + index);
390
391		if (ret)
392			return ret;
393
394		*buf = (void __user *)ptr;
395	}
396
397	return 0;
398}
399
400struct vchiq_completion_data32 {
401	enum vchiq_reason reason;
402	compat_uptr_t header;
403	compat_uptr_t service_userdata;
404	compat_uptr_t bulk_userdata;
405};
406
407static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
408				struct vchiq_completion_data *completion,
409				int index)
410{
411	struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
412
413	if (in_compat_syscall()) {
414		struct vchiq_completion_data32 tmp = {
415			.reason		  = completion->reason,
416			.header		  = ptr_to_compat(completion->header),
417			.service_userdata = ptr_to_compat(completion->service_userdata),
418			.bulk_userdata	  = ptr_to_compat(completion->bulk_userdata),
419		};
420		if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
421			return -EFAULT;
422	} else {
423		if (copy_to_user(&buf[index], completion, sizeof(*completion)))
424			return -EFAULT;
425	}
426
427	return 0;
428}
429
430static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
431				      struct vchiq_await_completion *args,
432				      int __user *msgbufcountp)
433{
434	int msgbufcount;
435	int remove;
436	int ret;
437
438	DEBUG_INITIALISE(g_state.local);
439
440	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
441	if (!instance->connected)
442		return -ENOTCONN;
443
444	mutex_lock(&instance->completion_mutex);
445
446	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
447	while ((instance->completion_remove == instance->completion_insert) && !instance->closing) {
448		int rc;
449
450		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
451		mutex_unlock(&instance->completion_mutex);
452		rc = wait_for_completion_interruptible(&instance->insert_event);
453		mutex_lock(&instance->completion_mutex);
454		if (rc) {
455			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
456			dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n");
457			ret = -EINTR;
458			goto out;
459		}
460	}
461	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
462
463	msgbufcount = args->msgbufcount;
464	remove = instance->completion_remove;
465
466	for (ret = 0; ret < args->count; ret++) {
467		struct vchiq_completion_data_kernel *completion;
468		struct vchiq_completion_data user_completion;
469		struct vchiq_service *service;
470		struct user_service *user_service;
471		struct vchiq_header *header;
472
473		if (remove == instance->completion_insert)
474			break;
475
476		completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)];
477
478		/*
479		 * A read memory barrier is needed to stop
480		 * prefetch of a stale completion record
481		 */
482		rmb();
483
484		service = completion->service_userdata;
485		user_service = service->base.userdata;
486
487		memset(&user_completion, 0, sizeof(user_completion));
488		user_completion = (struct vchiq_completion_data) {
489			.reason = completion->reason,
490			.service_userdata = user_service->userdata,
491		};
492
493		header = completion->header;
494		if (header) {
495			void __user *msgbuf;
496			int msglen;
497
498			msglen = header->size + sizeof(struct vchiq_header);
499			/* This must be a VCHIQ-style service */
500			if (args->msgbufsize < msglen) {
501				dev_err(service->state->dev,
502					"arm: header %pK: msgbufsize %x < msglen %x\n",
503					header, args->msgbufsize, msglen);
504				WARN(1, "invalid message size\n");
505				if (ret == 0)
506					ret = -EMSGSIZE;
507				break;
508			}
509			if (msgbufcount <= 0)
510				/* Stall here for lack of a buffer for the message. */
511				break;
512			/* Get the pointer from user space */
513			msgbufcount--;
514			if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
515					       msgbufcount)) {
516				if (ret == 0)
517					ret = -EFAULT;
518				break;
519			}
520
521			/* Copy the message to user space */
522			if (copy_to_user(msgbuf, header, msglen)) {
523				if (ret == 0)
524					ret = -EFAULT;
525				break;
526			}
527
528			/* Now it has been copied, the message can be released. */
529			vchiq_release_message(instance, service->handle, header);
530
531			/* The completion must point to the msgbuf. */
532			user_completion.header = msgbuf;
533		}
534
535		if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
536		    !instance->use_close_delivered)
537			vchiq_service_put(service);
538
539		/*
540		 * FIXME: address space mismatch, does bulk_userdata
541		 * actually point to user or kernel memory?
542		 */
543		user_completion.bulk_userdata = completion->bulk_userdata;
544
545		if (vchiq_put_completion(args->buf, &user_completion, ret)) {
546			if (ret == 0)
547				ret = -EFAULT;
548			break;
549		}
550
551		/*
552		 * Ensure that the above copy has completed
553		 * before advancing the remove pointer.
554		 */
555		mb();
556		remove++;
557		instance->completion_remove = remove;
558	}
559
560	if (msgbufcount != args->msgbufcount) {
561		if (put_user(msgbufcount, msgbufcountp))
562			ret = -EFAULT;
563	}
564out:
565	if (ret)
566		complete(&instance->remove_event);
567	mutex_unlock(&instance->completion_mutex);
568	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
569
570	return ret;
571}
572
573static long
574vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
575{
576	struct vchiq_instance *instance = file->private_data;
577	int status = 0;
578	struct vchiq_service *service = NULL;
579	long ret = 0;
580	int i, rc;
581
582	dev_dbg(instance->state->dev, "arm: instance %pK, cmd %s, arg %lx\n", instance,
583		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
584		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
585
586	switch (cmd) {
587	case VCHIQ_IOC_SHUTDOWN:
588		if (!instance->connected)
589			break;
590
591		/* Remove all services */
592		i = 0;
593		while ((service = next_service_by_instance(instance->state,
594							   instance, &i))) {
595			status = vchiq_remove_service(instance, service->handle);
596			vchiq_service_put(service);
597			if (status)
598				break;
599		}
600		service = NULL;
601
602		if (!status) {
603			/* Wake the completion thread and ask it to exit */
604			instance->closing = 1;
605			complete(&instance->insert_event);
606		}
607
608		break;
609
610	case VCHIQ_IOC_CONNECT:
611		if (instance->connected) {
612			ret = -EINVAL;
613			break;
614		}
615		rc = mutex_lock_killable(&instance->state->mutex);
616		if (rc) {
617			dev_err(instance->state->dev,
618				"arm: vchiq: connect: could not lock mutex for state %d: %d\n",
619				instance->state->id, rc);
620			ret = -EINTR;
621			break;
622		}
623		status = vchiq_connect_internal(instance->state, instance);
624		mutex_unlock(&instance->state->mutex);
625
626		if (!status)
627			instance->connected = 1;
628		else
629			dev_err(instance->state->dev,
630				"arm: vchiq: could not connect: %d\n", status);
631		break;
632
633	case VCHIQ_IOC_CREATE_SERVICE: {
634		struct vchiq_create_service __user *argp;
635		struct vchiq_create_service args;
636
637		argp = (void __user *)arg;
638		if (copy_from_user(&args, argp, sizeof(args))) {
639			ret = -EFAULT;
640			break;
641		}
642
643		ret = vchiq_ioc_create_service(instance, &args);
644		if (ret < 0)
645			break;
646
647		if (put_user(args.handle, &argp->handle)) {
648			vchiq_remove_service(instance, args.handle);
649			ret = -EFAULT;
650		}
651	} break;
652
653	case VCHIQ_IOC_CLOSE_SERVICE:
654	case VCHIQ_IOC_REMOVE_SERVICE: {
655		unsigned int handle = (unsigned int)arg;
656		struct user_service *user_service;
657
658		service = find_service_for_instance(instance, handle);
659		if (!service) {
660			ret = -EINVAL;
661			break;
662		}
663
664		user_service = service->base.userdata;
665
666		/*
667		 * close_pending is false on first entry, and when the
668		 * wait in vchiq_close_service has been interrupted.
669		 */
670		if (!user_service->close_pending) {
671			status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
672				 vchiq_close_service(instance, service->handle) :
673				 vchiq_remove_service(instance, service->handle);
674			if (status)
675				break;
676		}
677
678		/*
679		 * close_pending is true once the underlying service
680		 * has been closed until the client library calls the
681		 * CLOSE_DELIVERED ioctl, signalling close_event.
682		 */
683		if (user_service->close_pending &&
684		    wait_for_completion_interruptible(&user_service->close_event))
685			status = -EAGAIN;
686		break;
687	}
688
689	case VCHIQ_IOC_USE_SERVICE:
690	case VCHIQ_IOC_RELEASE_SERVICE:	{
691		unsigned int handle = (unsigned int)arg;
692
693		service = find_service_for_instance(instance, handle);
694		if (service) {
695			ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
696				vchiq_use_service_internal(service) :
697				vchiq_release_service_internal(service);
698			if (ret) {
699				dev_err(instance->state->dev,
700					"suspend: cmd %s returned error %ld for service %p4cc:%03d\n",
701					(cmd == VCHIQ_IOC_USE_SERVICE) ?
702					"VCHIQ_IOC_USE_SERVICE" :
703					"VCHIQ_IOC_RELEASE_SERVICE",
704					ret, &service->base.fourcc,
705					service->client_id);
706			}
707		} else {
708			ret = -EINVAL;
709		}
710	} break;
711
712	case VCHIQ_IOC_QUEUE_MESSAGE: {
713		struct vchiq_queue_message args;
714
715		if (copy_from_user(&args, (const void __user *)arg,
716				   sizeof(args))) {
717			ret = -EFAULT;
718			break;
719		}
720
721		service = find_service_for_instance(instance, args.handle);
722
723		if (service && (args.count <= MAX_ELEMENTS)) {
724			/* Copy elements into kernel space */
725			struct vchiq_element elements[MAX_ELEMENTS];
726
727			if (copy_from_user(elements, args.elements,
728					   args.count * sizeof(struct vchiq_element)) == 0)
729				ret = vchiq_ioc_queue_message(instance, args.handle, elements,
730							      args.count);
731			else
732				ret = -EFAULT;
733		} else {
734			ret = -EINVAL;
735		}
736	} break;
737
738	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
739	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
740		struct vchiq_queue_bulk_transfer args;
741		struct vchiq_queue_bulk_transfer __user *argp;
742
743		enum vchiq_bulk_dir dir =
744			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
745			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
746
747		argp = (void __user *)arg;
748		if (copy_from_user(&args, argp, sizeof(args))) {
749			ret = -EFAULT;
750			break;
751		}
752
753		ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
754						 dir, &argp->mode);
755	} break;
756
757	case VCHIQ_IOC_AWAIT_COMPLETION: {
758		struct vchiq_await_completion args;
759		struct vchiq_await_completion __user *argp;
760
761		argp = (void __user *)arg;
762		if (copy_from_user(&args, argp, sizeof(args))) {
763			ret = -EFAULT;
764			break;
765		}
766
767		ret = vchiq_ioc_await_completion(instance, &args,
768						 &argp->msgbufcount);
769	} break;
770
771	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
772		struct vchiq_dequeue_message args;
773
774		if (copy_from_user(&args, (const void __user *)arg,
775				   sizeof(args))) {
776			ret = -EFAULT;
777			break;
778		}
779
780		ret = vchiq_ioc_dequeue_message(instance, &args);
781	} break;
782
783	case VCHIQ_IOC_GET_CLIENT_ID: {
784		unsigned int handle = (unsigned int)arg;
785
786		ret = vchiq_get_client_id(instance, handle);
787	} break;
788
789	case VCHIQ_IOC_GET_CONFIG: {
790		struct vchiq_get_config args;
791		struct vchiq_config config;
792
793		if (copy_from_user(&args, (const void __user *)arg,
794				   sizeof(args))) {
795			ret = -EFAULT;
796			break;
797		}
798		if (args.config_size > sizeof(config)) {
799			ret = -EINVAL;
800			break;
801		}
802
803		vchiq_get_config(&config);
804		if (copy_to_user(args.pconfig, &config, args.config_size)) {
805			ret = -EFAULT;
806			break;
807		}
808	} break;
809
810	case VCHIQ_IOC_SET_SERVICE_OPTION: {
811		struct vchiq_set_service_option args;
812
813		if (copy_from_user(&args, (const void __user *)arg,
814				   sizeof(args))) {
815			ret = -EFAULT;
816			break;
817		}
818
819		service = find_service_for_instance(instance, args.handle);
820		if (!service) {
821			ret = -EINVAL;
822			break;
823		}
824
825		ret = vchiq_set_service_option(instance, args.handle, args.option,
826					       args.value);
827	} break;
828
829	case VCHIQ_IOC_LIB_VERSION: {
830		unsigned int lib_version = (unsigned int)arg;
831
832		if (lib_version < VCHIQ_VERSION_MIN)
833			ret = -EINVAL;
834		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
835			instance->use_close_delivered = 1;
836	} break;
837
838	case VCHIQ_IOC_CLOSE_DELIVERED: {
839		unsigned int handle = (unsigned int)arg;
840
841		service = find_closed_service_for_instance(instance, handle);
842		if (service) {
843			struct user_service *user_service =
844				(struct user_service *)service->base.userdata;
845			close_delivered(user_service);
846		} else {
847			ret = -EINVAL;
848		}
849	} break;
850
851	default:
852		ret = -ENOTTY;
853		break;
854	}
855
856	if (service)
857		vchiq_service_put(service);
858
859	if (ret == 0) {
860		if (status == -EINVAL)
861			ret = -EIO;
862		else if (status == -EAGAIN)
863			ret = -EINTR;
864	}
865
866	if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
867		dev_dbg(instance->state->dev,
868			"arm: ioctl instance %pK, cmd %s -> status %d, %ld\n",
869			instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
870			ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
871	} else {
872		dev_dbg(instance->state->dev,
873			"arm: ioctl instance %pK, cmd %s -> status %d\n, %ld\n",
874			instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
875			ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
876	}
877
878	return ret;
879}
880
881#if defined(CONFIG_COMPAT)
882
883struct vchiq_service_params32 {
884	int fourcc;
885	compat_uptr_t callback;
886	compat_uptr_t userdata;
887	short version; /* Increment for non-trivial changes */
888	short version_min; /* Update for incompatible changes */
889};
890
891struct vchiq_create_service32 {
892	struct vchiq_service_params32 params;
893	int is_open;
894	int is_vchi;
895	unsigned int handle; /* OUT */
896};
897
898#define VCHIQ_IOC_CREATE_SERVICE32 \
899	_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
900
901static long
902vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
903				  struct vchiq_create_service32 __user *ptrargs32)
904{
905	struct vchiq_create_service args;
906	struct vchiq_create_service32 args32;
907	struct vchiq_instance *instance = file->private_data;
908	long ret;
909
910	if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
911		return -EFAULT;
912
913	args = (struct vchiq_create_service) {
914		.params = {
915			.fourcc	     = args32.params.fourcc,
916			.callback    = compat_ptr(args32.params.callback),
917			.userdata    = compat_ptr(args32.params.userdata),
918			.version     = args32.params.version,
919			.version_min = args32.params.version_min,
920		},
921		.is_open = args32.is_open,
922		.is_vchi = args32.is_vchi,
923		.handle  = args32.handle,
924	};
925
926	ret = vchiq_ioc_create_service(instance, &args);
927	if (ret < 0)
928		return ret;
929
930	if (put_user(args.handle, &ptrargs32->handle)) {
931		vchiq_remove_service(instance, args.handle);
932		return -EFAULT;
933	}
934
935	return 0;
936}
937
938struct vchiq_element32 {
939	compat_uptr_t data;
940	unsigned int size;
941};
942
943struct vchiq_queue_message32 {
944	unsigned int handle;
945	unsigned int count;
946	compat_uptr_t elements;
947};
948
949#define VCHIQ_IOC_QUEUE_MESSAGE32 \
950	_IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
951
952static long
953vchiq_compat_ioctl_queue_message(struct file *file,
954				 unsigned int cmd,
955				 struct vchiq_queue_message32 __user *arg)
956{
957	struct vchiq_queue_message args;
958	struct vchiq_queue_message32 args32;
959	struct vchiq_service *service;
960	struct vchiq_instance *instance = file->private_data;
961	int ret;
962
963	if (copy_from_user(&args32, arg, sizeof(args32)))
964		return -EFAULT;
965
966	args = (struct vchiq_queue_message) {
967		.handle   = args32.handle,
968		.count    = args32.count,
969		.elements = compat_ptr(args32.elements),
970	};
971
972	if (args32.count > MAX_ELEMENTS)
973		return -EINVAL;
974
975	service = find_service_for_instance(instance, args.handle);
976	if (!service)
977		return -EINVAL;
978
979	if (args32.elements && args32.count) {
980		struct vchiq_element32 element32[MAX_ELEMENTS];
981		struct vchiq_element elements[MAX_ELEMENTS];
982		unsigned int count;
983
984		if (copy_from_user(&element32, args.elements,
985				   sizeof(element32))) {
986			vchiq_service_put(service);
987			return -EFAULT;
988		}
989
990		for (count = 0; count < args32.count; count++) {
991			elements[count].data =
992				compat_ptr(element32[count].data);
993			elements[count].size = element32[count].size;
994		}
995		ret = vchiq_ioc_queue_message(instance, args.handle, elements,
996					      args.count);
997	} else {
998		ret = -EINVAL;
999	}
1000	vchiq_service_put(service);
1001
1002	return ret;
1003}
1004
1005struct vchiq_queue_bulk_transfer32 {
1006	unsigned int handle;
1007	compat_uptr_t data;
1008	unsigned int size;
1009	compat_uptr_t userdata;
1010	enum vchiq_bulk_mode mode;
1011};
1012
1013#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1014	_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1015#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1016	_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1017
1018static long
1019vchiq_compat_ioctl_queue_bulk(struct file *file,
1020			      unsigned int cmd,
1021			      struct vchiq_queue_bulk_transfer32 __user *argp)
1022{
1023	struct vchiq_queue_bulk_transfer32 args32;
1024	struct vchiq_queue_bulk_transfer args;
1025	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1026				  VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1027
1028	if (copy_from_user(&args32, argp, sizeof(args32)))
1029		return -EFAULT;
1030
1031	args = (struct vchiq_queue_bulk_transfer) {
1032		.handle   = args32.handle,
1033		.data	  = compat_ptr(args32.data),
1034		.size	  = args32.size,
1035		.userdata = compat_ptr(args32.userdata),
1036		.mode	  = args32.mode,
1037	};
1038
1039	return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1040					  dir, &argp->mode);
1041}
1042
1043struct vchiq_await_completion32 {
1044	unsigned int count;
1045	compat_uptr_t buf;
1046	unsigned int msgbufsize;
1047	unsigned int msgbufcount; /* IN/OUT */
1048	compat_uptr_t msgbufs;
1049};
1050
1051#define VCHIQ_IOC_AWAIT_COMPLETION32 \
1052	_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1053
1054static long
1055vchiq_compat_ioctl_await_completion(struct file *file,
1056				    unsigned int cmd,
1057				    struct vchiq_await_completion32 __user *argp)
1058{
1059	struct vchiq_await_completion args;
1060	struct vchiq_await_completion32 args32;
1061
1062	if (copy_from_user(&args32, argp, sizeof(args32)))
1063		return -EFAULT;
1064
1065	args = (struct vchiq_await_completion) {
1066		.count		= args32.count,
1067		.buf		= compat_ptr(args32.buf),
1068		.msgbufsize	= args32.msgbufsize,
1069		.msgbufcount	= args32.msgbufcount,
1070		.msgbufs	= compat_ptr(args32.msgbufs),
1071	};
1072
1073	return vchiq_ioc_await_completion(file->private_data, &args,
1074					  &argp->msgbufcount);
1075}
1076
1077struct vchiq_dequeue_message32 {
1078	unsigned int handle;
1079	int blocking;
1080	unsigned int bufsize;
1081	compat_uptr_t buf;
1082};
1083
1084#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1085	_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1086
1087static long
1088vchiq_compat_ioctl_dequeue_message(struct file *file,
1089				   unsigned int cmd,
1090				   struct vchiq_dequeue_message32 __user *arg)
1091{
1092	struct vchiq_dequeue_message32 args32;
1093	struct vchiq_dequeue_message args;
1094
1095	if (copy_from_user(&args32, arg, sizeof(args32)))
1096		return -EFAULT;
1097
1098	args = (struct vchiq_dequeue_message) {
1099		.handle		= args32.handle,
1100		.blocking	= args32.blocking,
1101		.bufsize	= args32.bufsize,
1102		.buf		= compat_ptr(args32.buf),
1103	};
1104
1105	return vchiq_ioc_dequeue_message(file->private_data, &args);
1106}
1107
1108struct vchiq_get_config32 {
1109	unsigned int config_size;
1110	compat_uptr_t pconfig;
1111};
1112
1113#define VCHIQ_IOC_GET_CONFIG32 \
1114	_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1115
1116static long
1117vchiq_compat_ioctl_get_config(struct file *file,
1118			      unsigned int cmd,
1119			      struct vchiq_get_config32 __user *arg)
1120{
1121	struct vchiq_get_config32 args32;
1122	struct vchiq_config config;
1123	void __user *ptr;
1124
1125	if (copy_from_user(&args32, arg, sizeof(args32)))
1126		return -EFAULT;
1127	if (args32.config_size > sizeof(config))
1128		return -EINVAL;
1129
1130	vchiq_get_config(&config);
1131	ptr = compat_ptr(args32.pconfig);
1132	if (copy_to_user(ptr, &config, args32.config_size))
1133		return -EFAULT;
1134
1135	return 0;
1136}
1137
1138static long
1139vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1140{
1141	void __user *argp = compat_ptr(arg);
1142
1143	switch (cmd) {
1144	case VCHIQ_IOC_CREATE_SERVICE32:
1145		return vchiq_compat_ioctl_create_service(file, cmd, argp);
1146	case VCHIQ_IOC_QUEUE_MESSAGE32:
1147		return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1148	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1149	case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1150		return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1151	case VCHIQ_IOC_AWAIT_COMPLETION32:
1152		return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1153	case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1154		return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1155	case VCHIQ_IOC_GET_CONFIG32:
1156		return vchiq_compat_ioctl_get_config(file, cmd, argp);
1157	default:
1158		return vchiq_ioctl(file, cmd, (unsigned long)argp);
1159	}
1160}
1161
1162#endif
1163
1164static int vchiq_open(struct inode *inode, struct file *file)
1165{
1166	struct vchiq_state *state = vchiq_get_state();
1167	struct vchiq_instance *instance;
1168
1169	dev_dbg(state->dev, "arm: vchiq open\n");
1170
1171	if (!state) {
1172		dev_err(state->dev, "arm: vchiq has no connection to VideoCore\n");
1173		return -ENOTCONN;
1174	}
1175
1176	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1177	if (!instance)
1178		return -ENOMEM;
1179
1180	instance->state = state;
1181	instance->pid = current->tgid;
1182
1183	vchiq_debugfs_add_instance(instance);
1184
1185	init_completion(&instance->insert_event);
1186	init_completion(&instance->remove_event);
1187	mutex_init(&instance->completion_mutex);
1188	mutex_init(&instance->bulk_waiter_list_mutex);
1189	INIT_LIST_HEAD(&instance->bulk_waiter_list);
1190
1191	file->private_data = instance;
1192
1193	return 0;
1194}
1195
1196static int vchiq_release(struct inode *inode, struct file *file)
1197{
1198	struct vchiq_instance *instance = file->private_data;
1199	struct vchiq_state *state = vchiq_get_state();
1200	struct vchiq_service *service;
1201	int ret = 0;
1202	int i;
1203
1204	dev_dbg(state->dev, "arm: instance=%p\n", instance);
1205
1206	if (!state) {
1207		ret = -EPERM;
1208		goto out;
1209	}
1210
1211	/* Ensure videocore is awake to allow termination. */
1212	vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1213
1214	mutex_lock(&instance->completion_mutex);
1215
1216	/* Wake the completion thread and ask it to exit */
1217	instance->closing = 1;
1218	complete(&instance->insert_event);
1219
1220	mutex_unlock(&instance->completion_mutex);
1221
1222	/* Wake the slot handler if the completion queue is full. */
1223	complete(&instance->remove_event);
1224
1225	/* Mark all services for termination... */
1226	i = 0;
1227	while ((service = next_service_by_instance(state, instance, &i))) {
1228		struct user_service *user_service = service->base.userdata;
1229
1230		/* Wake the slot handler if the msg queue is full. */
1231		complete(&user_service->remove_event);
1232
1233		vchiq_terminate_service_internal(service);
1234		vchiq_service_put(service);
1235	}
1236
1237	/* ...and wait for them to die */
1238	i = 0;
1239	while ((service = next_service_by_instance(state, instance, &i))) {
1240		struct user_service *user_service = service->base.userdata;
1241
1242		wait_for_completion(&service->remove_event);
1243
1244		if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1245			vchiq_service_put(service);
1246			break;
1247		}
1248
1249		spin_lock(&msg_queue_spinlock);
1250
1251		while (user_service->msg_remove != user_service->msg_insert) {
1252			struct vchiq_header *header;
1253			int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1254
1255			header = user_service->msg_queue[m];
1256			user_service->msg_remove++;
1257			spin_unlock(&msg_queue_spinlock);
1258
1259			if (header)
1260				vchiq_release_message(instance, service->handle, header);
1261			spin_lock(&msg_queue_spinlock);
1262		}
1263
1264		spin_unlock(&msg_queue_spinlock);
1265
1266		vchiq_service_put(service);
1267	}
1268
1269	/* Release any closed services */
1270	while (instance->completion_remove != instance->completion_insert) {
1271		struct vchiq_completion_data_kernel *completion;
1272		struct vchiq_service *service;
1273
1274		completion = &instance->completions[instance->completion_remove
1275						    & (MAX_COMPLETIONS - 1)];
1276		service = completion->service_userdata;
1277		if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1278			struct user_service *user_service =
1279							service->base.userdata;
1280
1281			/* Wake any blocked user-thread */
1282			if (instance->use_close_delivered)
1283				complete(&user_service->close_event);
1284			vchiq_service_put(service);
1285		}
1286		instance->completion_remove++;
1287	}
1288
1289	/* Release the PEER service count. */
1290	vchiq_release_internal(instance->state, NULL);
1291
1292	free_bulk_waiter(instance);
1293
1294	vchiq_debugfs_remove_instance(instance);
1295
1296	kfree(instance);
1297	file->private_data = NULL;
1298
1299out:
1300	return ret;
1301}
1302
1303static const struct file_operations
1304vchiq_fops = {
1305	.owner = THIS_MODULE,
1306	.unlocked_ioctl = vchiq_ioctl,
1307#if defined(CONFIG_COMPAT)
1308	.compat_ioctl = vchiq_compat_ioctl,
1309#endif
1310	.open = vchiq_open,
1311	.release = vchiq_release,
1312};
1313
1314static struct miscdevice vchiq_miscdev = {
1315	.fops = &vchiq_fops,
1316	.minor = MISC_DYNAMIC_MINOR,
1317	.name = "vchiq",
1318
1319};
1320
1321/**
1322 *	vchiq_register_chrdev - Register the char driver for vchiq
1323 *				and create the necessary class and
1324 *				device files in userspace.
1325 *	@parent		The parent of the char device.
1326 *
1327 *	Returns 0 on success else returns the error code.
1328 */
1329int vchiq_register_chrdev(struct device *parent)
1330{
1331	vchiq_miscdev.parent = parent;
1332
1333	return misc_register(&vchiq_miscdev);
1334}
1335
1336/**
1337 *	vchiq_deregister_chrdev	- Deregister and cleanup the vchiq char
1338 *				  driver and device files
1339 */
1340void vchiq_deregister_chrdev(void)
1341{
1342	misc_deregister(&vchiq_miscdev);
1343}
1344