1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Broadcom BCM2835 V4L2 driver
4 *
5 * Copyright �� 2013 Raspberry Pi (Trading) Ltd.
6 *
7 * Authors: Vincent Sanders @ Collabora
8 *          Dave Stevenson @ Broadcom
9 *		(now dave.stevenson@raspberrypi.org)
10 *          Simon Mellor @ Broadcom
11 *          Luke Diamand @ Broadcom
12 *
13 * V4L2 driver MMAL vchiq interface code
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/completion.h>
25#include <linux/vmalloc.h>
26#include <media/videobuf2-vmalloc.h>
27
28#include "../include/linux/raspberrypi/vchiq.h"
29#include "mmal-common.h"
30#include "mmal-vchiq.h"
31#include "mmal-msg.h"
32
33/*
34 * maximum number of components supported.
35 * This matches the maximum permitted by default on the VPU
36 */
37#define VCHIQ_MMAL_MAX_COMPONENTS 64
38
39/*
40 * Timeout for synchronous msg responses in seconds.
41 * Helpful to increase this if stopping in the VPU debugger.
42 */
43#define SYNC_MSG_TIMEOUT       3
44
45/*#define FULL_MSG_DUMP 1*/
46
47#ifdef DEBUG
48static const char *const msg_type_names[] = {
49	"UNKNOWN",
50	"QUIT",
51	"SERVICE_CLOSED",
52	"GET_VERSION",
53	"COMPONENT_CREATE",
54	"COMPONENT_DESTROY",
55	"COMPONENT_ENABLE",
56	"COMPONENT_DISABLE",
57	"PORT_INFO_GET",
58	"PORT_INFO_SET",
59	"PORT_ACTION",
60	"BUFFER_FROM_HOST",
61	"BUFFER_TO_HOST",
62	"GET_STATS",
63	"PORT_PARAMETER_SET",
64	"PORT_PARAMETER_GET",
65	"EVENT_TO_HOST",
66	"GET_CORE_STATS_FOR_PORT",
67	"OPAQUE_ALLOCATOR",
68	"CONSUME_MEM",
69	"LMK",
70	"OPAQUE_ALLOCATOR_DESC",
71	"DRM_GET_LHS32",
72	"DRM_GET_TIME",
73	"BUFFER_FROM_HOST_ZEROLEN",
74	"PORT_FLUSH",
75	"HOST_LOG",
76};
77#endif
78
79static const char *const port_action_type_names[] = {
80	"UNKNOWN",
81	"ENABLE",
82	"DISABLE",
83	"FLUSH",
84	"CONNECT",
85	"DISCONNECT",
86	"SET_REQUIREMENTS",
87};
88
89#if defined(DEBUG)
90#if defined(FULL_MSG_DUMP)
91#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
92	do {								\
93		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
94			 msg_type_names[(MSG)->h.type],			\
95			 (MSG)->h.type, (MSG_LEN));			\
96		print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET,	\
97			       16, 4, (MSG),				\
98			       sizeof(struct mmal_msg_header), 1);	\
99		print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET,	\
100			       16, 4,					\
101			       ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
102			       (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
103	} while (0)
104#else
105#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
106	{								\
107		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
108			 msg_type_names[(MSG)->h.type],			\
109			 (MSG)->h.type, (MSG_LEN));			\
110	}
111#endif
112#else
113#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
114#endif
115
116struct vchiq_mmal_instance;
117
118/* normal message context */
119struct mmal_msg_context {
120	struct vchiq_mmal_instance *instance;
121
122	/* Index in the context_map idr so that we can find the
123	 * mmal_msg_context again when servicing the VCHI reply.
124	 */
125	int handle;
126
127	union {
128		struct {
129			/* work struct for buffer_cb callback */
130			struct work_struct work;
131			/* work struct for deferred callback */
132			struct work_struct buffer_to_host_work;
133			/* mmal instance */
134			struct vchiq_mmal_instance *instance;
135			/* mmal port */
136			struct vchiq_mmal_port *port;
137			/* actual buffer used to store bulk reply */
138			struct mmal_buffer *buffer;
139			/* amount of buffer used */
140			unsigned long buffer_used;
141			/* MMAL buffer flags */
142			u32 mmal_flags;
143			/* Presentation and Decode timestamps */
144			s64 pts;
145			s64 dts;
146
147			int status;	/* context status */
148
149		} bulk;		/* bulk data */
150
151		struct {
152			/* message handle to release */
153			struct vchiq_header *msg_handle;
154			/* pointer to received message */
155			struct mmal_msg *msg;
156			/* received message length */
157			u32 msg_len;
158			/* completion upon reply */
159			struct completion cmplt;
160		} sync;		/* synchronous response */
161	} u;
162
163};
164
165struct vchiq_mmal_instance {
166	unsigned int service_handle;
167
168	/* ensure serialised access to service */
169	struct mutex vchiq_mutex;
170
171	struct idr context_map;
172	/* protect accesses to context_map */
173	struct mutex context_map_lock;
174
175	struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
176
177	/* ordered workqueue to process all bulk operations */
178	struct workqueue_struct *bulk_wq;
179
180	/* handle for a vchiq instance */
181	struct vchiq_instance *vchiq_instance;
182};
183
184static struct mmal_msg_context *
185get_msg_context(struct vchiq_mmal_instance *instance)
186{
187	struct mmal_msg_context *msg_context;
188	int handle;
189
190	/* todo: should this be allocated from a pool to avoid kzalloc */
191	msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
192
193	if (!msg_context)
194		return ERR_PTR(-ENOMEM);
195
196	/* Create an ID that will be passed along with our message so
197	 * that when we service the VCHI reply, we can look up what
198	 * message is being replied to.
199	 */
200	mutex_lock(&instance->context_map_lock);
201	handle = idr_alloc(&instance->context_map, msg_context,
202			   0, 0, GFP_KERNEL);
203	mutex_unlock(&instance->context_map_lock);
204
205	if (handle < 0) {
206		kfree(msg_context);
207		return ERR_PTR(handle);
208	}
209
210	msg_context->instance = instance;
211	msg_context->handle = handle;
212
213	return msg_context;
214}
215
216static struct mmal_msg_context *
217lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
218{
219	return idr_find(&instance->context_map, handle);
220}
221
222static void
223release_msg_context(struct mmal_msg_context *msg_context)
224{
225	struct vchiq_mmal_instance *instance = msg_context->instance;
226
227	mutex_lock(&instance->context_map_lock);
228	idr_remove(&instance->context_map, msg_context->handle);
229	mutex_unlock(&instance->context_map_lock);
230	kfree(msg_context);
231}
232
233/* deals with receipt of event to host message */
234static void event_to_host_cb(struct vchiq_mmal_instance *instance,
235			     struct mmal_msg *msg, u32 msg_len)
236{
237	pr_debug("unhandled event\n");
238	pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
239		 msg->u.event_to_host.client_component,
240		 msg->u.event_to_host.port_type,
241		 msg->u.event_to_host.port_num,
242		 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
243}
244
245/* workqueue scheduled callback
246 *
247 * we do this because it is important we do not call any other vchiq
248 * sync calls from within the message delivery thread
249 */
250static void buffer_work_cb(struct work_struct *work)
251{
252	struct mmal_msg_context *msg_context =
253		container_of(work, struct mmal_msg_context, u.bulk.work);
254	struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
255
256	if (!buffer) {
257		pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
258		       __func__, msg_context);
259		return;
260	}
261
262	buffer->length = msg_context->u.bulk.buffer_used;
263	buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
264	buffer->dts = msg_context->u.bulk.dts;
265	buffer->pts = msg_context->u.bulk.pts;
266
267	atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
268
269	msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
270					    msg_context->u.bulk.port,
271					    msg_context->u.bulk.status,
272					    msg_context->u.bulk.buffer);
273}
274
275/* workqueue scheduled callback to handle receiving buffers
276 *
277 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
278 * If we block in the service_callback context then we can't process the
279 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
280 * vchiq_bulk_receive() call to complete.
281 */
282static void buffer_to_host_work_cb(struct work_struct *work)
283{
284	struct mmal_msg_context *msg_context =
285		container_of(work, struct mmal_msg_context,
286			     u.bulk.buffer_to_host_work);
287	struct vchiq_mmal_instance *instance = msg_context->instance;
288	unsigned long len = msg_context->u.bulk.buffer_used;
289	int ret;
290
291	if (!len)
292		/* Dummy receive to ensure the buffers remain in order */
293		len = 8;
294	/* queue the bulk submission */
295	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
296	ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
297				 msg_context->u.bulk.buffer->buffer,
298				 /* Actual receive needs to be a multiple
299				  * of 4 bytes
300				  */
301				(len + 3) & ~3,
302				msg_context,
303				VCHIQ_BULK_MODE_CALLBACK);
304
305	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
306
307	if (ret != 0)
308		pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
309		       __func__, msg_context, ret);
310}
311
312/* enqueue a bulk receive for a given message context */
313static int bulk_receive(struct vchiq_mmal_instance *instance,
314			struct mmal_msg *msg,
315			struct mmal_msg_context *msg_context)
316{
317	unsigned long rd_len;
318
319	rd_len = msg->u.buffer_from_host.buffer_header.length;
320
321	if (!msg_context->u.bulk.buffer) {
322		pr_err("bulk.buffer not configured - error in buffer_from_host\n");
323
324		/* todo: this is a serious error, we should never have
325		 * committed a buffer_to_host operation to the mmal
326		 * port without the buffer to back it up (underflow
327		 * handling) and there is no obvious way to deal with
328		 * this - how is the mmal servie going to react when
329		 * we fail to do the xfer and reschedule a buffer when
330		 * it arrives? perhaps a starved flag to indicate a
331		 * waiting bulk receive?
332		 */
333
334		return -EINVAL;
335	}
336
337	/* ensure we do not overrun the available buffer */
338	if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
339		rd_len = msg_context->u.bulk.buffer->buffer_size;
340		pr_warn("short read as not enough receive buffer space\n");
341		/* todo: is this the correct response, what happens to
342		 * the rest of the message data?
343		 */
344	}
345
346	/* store length */
347	msg_context->u.bulk.buffer_used = rd_len;
348	msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
349	msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
350
351	queue_work(msg_context->instance->bulk_wq,
352		   &msg_context->u.bulk.buffer_to_host_work);
353
354	return 0;
355}
356
357/* data in message, memcpy from packet into output buffer */
358static int inline_receive(struct vchiq_mmal_instance *instance,
359			  struct mmal_msg *msg,
360			  struct mmal_msg_context *msg_context)
361{
362	memcpy(msg_context->u.bulk.buffer->buffer,
363	       msg->u.buffer_from_host.short_data,
364	       msg->u.buffer_from_host.payload_in_message);
365
366	msg_context->u.bulk.buffer_used =
367	    msg->u.buffer_from_host.payload_in_message;
368
369	return 0;
370}
371
372/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
373static int
374buffer_from_host(struct vchiq_mmal_instance *instance,
375		 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
376{
377	struct mmal_msg_context *msg_context;
378	struct mmal_msg m;
379	int ret;
380
381	if (!port->enabled)
382		return -EINVAL;
383
384	pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
385
386	/* get context */
387	if (!buf->msg_context) {
388		pr_err("%s: msg_context not allocated, buf %p\n", __func__,
389		       buf);
390		return -EINVAL;
391	}
392	msg_context = buf->msg_context;
393
394	/* store bulk message context for when data arrives */
395	msg_context->u.bulk.instance = instance;
396	msg_context->u.bulk.port = port;
397	msg_context->u.bulk.buffer = buf;
398	msg_context->u.bulk.buffer_used = 0;
399
400	/* initialise work structure ready to schedule callback */
401	INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
402	INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
403		  buffer_to_host_work_cb);
404
405	atomic_inc(&port->buffers_with_vpu);
406
407	/* prep the buffer from host message */
408	memset(&m, 0xbc, sizeof(m));	/* just to make debug clearer */
409
410	m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
411	m.h.magic = MMAL_MAGIC;
412	m.h.context = msg_context->handle;
413	m.h.status = 0;
414
415	/* drvbuf is our private data passed back */
416	m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
417	m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
418	m.u.buffer_from_host.drvbuf.port_handle = port->handle;
419	m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
420
421	/* buffer header */
422	m.u.buffer_from_host.buffer_header.cmd = 0;
423	m.u.buffer_from_host.buffer_header.data =
424		(u32)(unsigned long)buf->buffer;
425	m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
426	m.u.buffer_from_host.buffer_header.length = 0;	/* nothing used yet */
427	m.u.buffer_from_host.buffer_header.offset = 0;	/* no offset */
428	m.u.buffer_from_host.buffer_header.flags = 0;	/* no flags */
429	m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
430	m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
431
432	/* clear buffer type specific data */
433	memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
434	       sizeof(m.u.buffer_from_host.buffer_header_type_specific));
435
436	/* no payload in message */
437	m.u.buffer_from_host.payload_in_message = 0;
438
439	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
440
441	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
442					 sizeof(struct mmal_msg_header) +
443					 sizeof(m.u.buffer_from_host));
444	if (ret)
445		atomic_dec(&port->buffers_with_vpu);
446
447	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
448
449	return ret;
450}
451
452/* deals with receipt of buffer to host message */
453static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
454			      struct mmal_msg *msg, u32 msg_len)
455{
456	struct mmal_msg_context *msg_context;
457	u32 handle;
458
459	pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
460		 __func__, instance, msg, msg_len);
461
462	if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
463		handle = msg->u.buffer_from_host.drvbuf.client_context;
464		msg_context = lookup_msg_context(instance, handle);
465
466		if (!msg_context) {
467			pr_err("drvbuf.client_context(%u) is invalid\n",
468			       handle);
469			return;
470		}
471	} else {
472		pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
473		return;
474	}
475
476	msg_context->u.bulk.mmal_flags =
477				msg->u.buffer_from_host.buffer_header.flags;
478
479	if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
480		/* message reception had an error */
481		pr_warn("error %d in reply\n", msg->h.status);
482
483		msg_context->u.bulk.status = msg->h.status;
484
485	} else if (msg->u.buffer_from_host.buffer_header.length == 0) {
486		/* empty buffer */
487		if (msg->u.buffer_from_host.buffer_header.flags &
488		    MMAL_BUFFER_HEADER_FLAG_EOS) {
489			msg_context->u.bulk.status =
490			    bulk_receive(instance, msg, msg_context);
491			if (msg_context->u.bulk.status == 0)
492				return;	/* successful bulk submission, bulk
493					 * completion will trigger callback
494					 */
495		} else {
496			/* do callback with empty buffer - not EOS though */
497			msg_context->u.bulk.status = 0;
498			msg_context->u.bulk.buffer_used = 0;
499		}
500	} else if (msg->u.buffer_from_host.payload_in_message == 0) {
501		/* data is not in message, queue a bulk receive */
502		msg_context->u.bulk.status =
503		    bulk_receive(instance, msg, msg_context);
504		if (msg_context->u.bulk.status == 0)
505			return;	/* successful bulk submission, bulk
506				 * completion will trigger callback
507				 */
508
509		/* failed to submit buffer, this will end badly */
510		pr_err("error %d on bulk submission\n",
511		       msg_context->u.bulk.status);
512
513	} else if (msg->u.buffer_from_host.payload_in_message <=
514		   MMAL_VC_SHORT_DATA) {
515		/* data payload within message */
516		msg_context->u.bulk.status = inline_receive(instance, msg,
517							    msg_context);
518	} else {
519		pr_err("message with invalid short payload\n");
520
521		/* signal error */
522		msg_context->u.bulk.status = -EINVAL;
523		msg_context->u.bulk.buffer_used =
524		    msg->u.buffer_from_host.payload_in_message;
525	}
526
527	/* schedule the port callback */
528	schedule_work(&msg_context->u.bulk.work);
529}
530
531static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
532			    struct mmal_msg_context *msg_context)
533{
534	msg_context->u.bulk.status = 0;
535
536	/* schedule the port callback */
537	schedule_work(&msg_context->u.bulk.work);
538}
539
540static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
541			  struct mmal_msg_context *msg_context)
542{
543	pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
544
545	msg_context->u.bulk.status = -EINTR;
546
547	schedule_work(&msg_context->u.bulk.work);
548}
549
550/* incoming event service callback */
551static int service_callback(struct vchiq_instance *vchiq_instance,
552			    enum vchiq_reason reason, struct vchiq_header *header,
553			    unsigned int handle, void *bulk_ctx)
554{
555	struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
556	u32 msg_len;
557	struct mmal_msg *msg;
558	struct mmal_msg_context *msg_context;
559
560	if (!instance) {
561		pr_err("Message callback passed NULL instance\n");
562		return 0;
563	}
564
565	switch (reason) {
566	case VCHIQ_MESSAGE_AVAILABLE:
567		msg = (void *)header->data;
568		msg_len = header->size;
569
570		DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
571
572		/* handling is different for buffer messages */
573		switch (msg->h.type) {
574		case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
575			vchiq_release_message(vchiq_instance, handle, header);
576			break;
577
578		case MMAL_MSG_TYPE_EVENT_TO_HOST:
579			event_to_host_cb(instance, msg, msg_len);
580			vchiq_release_message(vchiq_instance, handle, header);
581
582			break;
583
584		case MMAL_MSG_TYPE_BUFFER_TO_HOST:
585			buffer_to_host_cb(instance, msg, msg_len);
586			vchiq_release_message(vchiq_instance, handle, header);
587			break;
588
589		default:
590			/* messages dependent on header context to complete */
591			if (!msg->h.context) {
592				pr_err("received message context was null!\n");
593				vchiq_release_message(vchiq_instance, handle, header);
594				break;
595			}
596
597			msg_context = lookup_msg_context(instance,
598							 msg->h.context);
599			if (!msg_context) {
600				pr_err("received invalid message context %u!\n",
601				       msg->h.context);
602				vchiq_release_message(vchiq_instance, handle, header);
603				break;
604			}
605
606			/* fill in context values */
607			msg_context->u.sync.msg_handle = header;
608			msg_context->u.sync.msg = msg;
609			msg_context->u.sync.msg_len = msg_len;
610
611			/* todo: should this check (completion_done()
612			 * == 1) for no one waiting? or do we need a
613			 * flag to tell us the completion has been
614			 * interrupted so we can free the message and
615			 * its context. This probably also solves the
616			 * message arriving after interruption todo
617			 * below
618			 */
619
620			/* complete message so caller knows it happened */
621			complete(&msg_context->u.sync.cmplt);
622			break;
623		}
624
625		break;
626
627	case VCHIQ_BULK_RECEIVE_DONE:
628		bulk_receive_cb(instance, bulk_ctx);
629		break;
630
631	case VCHIQ_BULK_RECEIVE_ABORTED:
632		bulk_abort_cb(instance, bulk_ctx);
633		break;
634
635	case VCHIQ_SERVICE_CLOSED:
636		/* TODO: consider if this requires action if received when
637		 * driver is not explicitly closing the service
638		 */
639		break;
640
641	default:
642		pr_err("Received unhandled message reason %d\n", reason);
643		break;
644	}
645
646	return 0;
647}
648
649static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
650				     struct mmal_msg *msg,
651				     unsigned int payload_len,
652				     struct mmal_msg **msg_out,
653				     struct vchiq_header **msg_handle)
654{
655	struct mmal_msg_context *msg_context;
656	int ret;
657	unsigned long timeout;
658
659	/* payload size must not cause message to exceed max size */
660	if (payload_len >
661	    (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
662		pr_err("payload length %d exceeds max:%d\n", payload_len,
663		       (int)(MMAL_MSG_MAX_SIZE -
664			    sizeof(struct mmal_msg_header)));
665		return -EINVAL;
666	}
667
668	msg_context = get_msg_context(instance);
669	if (IS_ERR(msg_context))
670		return PTR_ERR(msg_context);
671
672	init_completion(&msg_context->u.sync.cmplt);
673
674	msg->h.magic = MMAL_MAGIC;
675	msg->h.context = msg_context->handle;
676	msg->h.status = 0;
677
678	DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
679		     ">>> sync message");
680
681	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
682
683	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
684					 sizeof(struct mmal_msg_header) +
685					 payload_len);
686
687	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
688
689	if (ret) {
690		pr_err("error %d queuing message\n", ret);
691		release_msg_context(msg_context);
692		return ret;
693	}
694
695	timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
696					      SYNC_MSG_TIMEOUT * HZ);
697	if (timeout == 0) {
698		pr_err("timed out waiting for sync completion\n");
699		ret = -ETIME;
700		/* todo: what happens if the message arrives after aborting */
701		release_msg_context(msg_context);
702		return ret;
703	}
704
705	*msg_out = msg_context->u.sync.msg;
706	*msg_handle = msg_context->u.sync.msg_handle;
707	release_msg_context(msg_context);
708
709	return 0;
710}
711
712static void dump_port_info(struct vchiq_mmal_port *port)
713{
714	pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
715
716	pr_debug("buffer minimum num:%d size:%d align:%d\n",
717		 port->minimum_buffer.num,
718		 port->minimum_buffer.size, port->minimum_buffer.alignment);
719
720	pr_debug("buffer recommended num:%d size:%d align:%d\n",
721		 port->recommended_buffer.num,
722		 port->recommended_buffer.size,
723		 port->recommended_buffer.alignment);
724
725	pr_debug("buffer current values num:%d size:%d align:%d\n",
726		 port->current_buffer.num,
727		 port->current_buffer.size, port->current_buffer.alignment);
728
729	pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
730		 port->format.type,
731		 port->format.encoding, port->format.encoding_variant);
732
733	pr_debug("		    bitrate:%d flags:0x%x\n",
734		 port->format.bitrate, port->format.flags);
735
736	if (port->format.type == MMAL_ES_TYPE_VIDEO) {
737		pr_debug
738		    ("es video format: width:%d height:%d colourspace:0x%x\n",
739		     port->es.video.width, port->es.video.height,
740		     port->es.video.color_space);
741
742		pr_debug("		 : crop xywh %d,%d,%d,%d\n",
743			 port->es.video.crop.x,
744			 port->es.video.crop.y,
745			 port->es.video.crop.width, port->es.video.crop.height);
746		pr_debug("		 : framerate %d/%d  aspect %d/%d\n",
747			 port->es.video.frame_rate.numerator,
748			 port->es.video.frame_rate.denominator,
749			 port->es.video.par.numerator, port->es.video.par.denominator);
750	}
751}
752
753static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
754{
755	/* todo do readonly fields need setting at all? */
756	p->type = port->type;
757	p->index = port->index;
758	p->index_all = 0;
759	p->is_enabled = port->enabled;
760	p->buffer_num_min = port->minimum_buffer.num;
761	p->buffer_size_min = port->minimum_buffer.size;
762	p->buffer_alignment_min = port->minimum_buffer.alignment;
763	p->buffer_num_recommended = port->recommended_buffer.num;
764	p->buffer_size_recommended = port->recommended_buffer.size;
765
766	/* only three writable fields in a port */
767	p->buffer_num = port->current_buffer.num;
768	p->buffer_size = port->current_buffer.size;
769	p->userdata = (u32)(unsigned long)port;
770}
771
772static int port_info_set(struct vchiq_mmal_instance *instance,
773			 struct vchiq_mmal_port *port)
774{
775	int ret;
776	struct mmal_msg m;
777	struct mmal_msg *rmsg;
778	struct vchiq_header *rmsg_handle;
779
780	pr_debug("setting port info port %p\n", port);
781	if (!port)
782		return -1;
783	dump_port_info(port);
784
785	m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
786
787	m.u.port_info_set.component_handle = port->component->handle;
788	m.u.port_info_set.port_type = port->type;
789	m.u.port_info_set.port_index = port->index;
790
791	port_to_mmal_msg(port, &m.u.port_info_set.port);
792
793	/* elementary stream format setup */
794	m.u.port_info_set.format.type = port->format.type;
795	m.u.port_info_set.format.encoding = port->format.encoding;
796	m.u.port_info_set.format.encoding_variant =
797	    port->format.encoding_variant;
798	m.u.port_info_set.format.bitrate = port->format.bitrate;
799	m.u.port_info_set.format.flags = port->format.flags;
800
801	memcpy(&m.u.port_info_set.es, &port->es,
802	       sizeof(union mmal_es_specific_format));
803
804	m.u.port_info_set.format.extradata_size = port->format.extradata_size;
805	memcpy(&m.u.port_info_set.extradata, port->format.extradata,
806	       port->format.extradata_size);
807
808	ret = send_synchronous_mmal_msg(instance, &m,
809					sizeof(m.u.port_info_set),
810					&rmsg, &rmsg_handle);
811	if (ret)
812		return ret;
813
814	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
815		/* got an unexpected message type in reply */
816		ret = -EINVAL;
817		goto release_msg;
818	}
819
820	/* return operation status */
821	ret = -rmsg->u.port_info_get_reply.status;
822
823	pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
824		 port->component->handle, port->handle);
825
826release_msg:
827	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
828
829	return ret;
830}
831
832/* use port info get message to retrieve port information */
833static int port_info_get(struct vchiq_mmal_instance *instance,
834			 struct vchiq_mmal_port *port)
835{
836	int ret;
837	struct mmal_msg m;
838	struct mmal_msg *rmsg;
839	struct vchiq_header *rmsg_handle;
840
841	/* port info time */
842	m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
843	m.u.port_info_get.component_handle = port->component->handle;
844	m.u.port_info_get.port_type = port->type;
845	m.u.port_info_get.index = port->index;
846
847	ret = send_synchronous_mmal_msg(instance, &m,
848					sizeof(m.u.port_info_get),
849					&rmsg, &rmsg_handle);
850	if (ret)
851		return ret;
852
853	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
854		/* got an unexpected message type in reply */
855		ret = -EINVAL;
856		goto release_msg;
857	}
858
859	/* return operation status */
860	ret = -rmsg->u.port_info_get_reply.status;
861	if (ret != MMAL_MSG_STATUS_SUCCESS)
862		goto release_msg;
863
864	if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
865		port->enabled = false;
866	else
867		port->enabled = true;
868
869	/* copy the values out of the message */
870	port->handle = rmsg->u.port_info_get_reply.port_handle;
871
872	/* port type and index cached to use on port info set because
873	 * it does not use a port handle
874	 */
875	port->type = rmsg->u.port_info_get_reply.port_type;
876	port->index = rmsg->u.port_info_get_reply.port_index;
877
878	port->minimum_buffer.num =
879	    rmsg->u.port_info_get_reply.port.buffer_num_min;
880	port->minimum_buffer.size =
881	    rmsg->u.port_info_get_reply.port.buffer_size_min;
882	port->minimum_buffer.alignment =
883	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
884
885	port->recommended_buffer.alignment =
886	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
887	port->recommended_buffer.num =
888	    rmsg->u.port_info_get_reply.port.buffer_num_recommended;
889
890	port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
891	port->current_buffer.size =
892	    rmsg->u.port_info_get_reply.port.buffer_size;
893
894	/* stream format */
895	port->format.type = rmsg->u.port_info_get_reply.format.type;
896	port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
897	port->format.encoding_variant =
898	    rmsg->u.port_info_get_reply.format.encoding_variant;
899	port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
900	port->format.flags = rmsg->u.port_info_get_reply.format.flags;
901
902	/* elementary stream format */
903	memcpy(&port->es,
904	       &rmsg->u.port_info_get_reply.es,
905	       sizeof(union mmal_es_specific_format));
906	port->format.es = &port->es;
907
908	port->format.extradata_size =
909	    rmsg->u.port_info_get_reply.format.extradata_size;
910	memcpy(port->format.extradata,
911	       rmsg->u.port_info_get_reply.extradata,
912	       port->format.extradata_size);
913
914	pr_debug("received port info\n");
915	dump_port_info(port);
916
917release_msg:
918
919	pr_debug("%s:result:%d component:0x%x port:%d\n",
920		 __func__, ret, port->component->handle, port->handle);
921
922	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
923
924	return ret;
925}
926
927/* create component on vc */
928static int create_component(struct vchiq_mmal_instance *instance,
929			    struct vchiq_mmal_component *component,
930			    const char *name)
931{
932	int ret;
933	struct mmal_msg m;
934	struct mmal_msg *rmsg;
935	struct vchiq_header *rmsg_handle;
936
937	/* build component create message */
938	m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
939	m.u.component_create.client_component = component->client_component;
940	strscpy_pad(m.u.component_create.name, name,
941		    sizeof(m.u.component_create.name));
942	m.u.component_create.pid = 0;
943
944	ret = send_synchronous_mmal_msg(instance, &m,
945					sizeof(m.u.component_create),
946					&rmsg, &rmsg_handle);
947	if (ret)
948		return ret;
949
950	if (rmsg->h.type != m.h.type) {
951		/* got an unexpected message type in reply */
952		ret = -EINVAL;
953		goto release_msg;
954	}
955
956	ret = -rmsg->u.component_create_reply.status;
957	if (ret != MMAL_MSG_STATUS_SUCCESS)
958		goto release_msg;
959
960	/* a valid component response received */
961	component->handle = rmsg->u.component_create_reply.component_handle;
962	component->inputs = rmsg->u.component_create_reply.input_num;
963	component->outputs = rmsg->u.component_create_reply.output_num;
964	component->clocks = rmsg->u.component_create_reply.clock_num;
965
966	pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
967		 component->handle,
968		 component->inputs, component->outputs, component->clocks);
969
970release_msg:
971	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
972
973	return ret;
974}
975
976/* destroys a component on vc */
977static int destroy_component(struct vchiq_mmal_instance *instance,
978			     struct vchiq_mmal_component *component)
979{
980	int ret;
981	struct mmal_msg m;
982	struct mmal_msg *rmsg;
983	struct vchiq_header *rmsg_handle;
984
985	m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
986	m.u.component_destroy.component_handle = component->handle;
987
988	ret = send_synchronous_mmal_msg(instance, &m,
989					sizeof(m.u.component_destroy),
990					&rmsg, &rmsg_handle);
991	if (ret)
992		return ret;
993
994	if (rmsg->h.type != m.h.type) {
995		/* got an unexpected message type in reply */
996		ret = -EINVAL;
997		goto release_msg;
998	}
999
1000	ret = -rmsg->u.component_destroy_reply.status;
1001
1002release_msg:
1003
1004	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1005
1006	return ret;
1007}
1008
1009/* enable a component on vc */
1010static int enable_component(struct vchiq_mmal_instance *instance,
1011			    struct vchiq_mmal_component *component)
1012{
1013	int ret;
1014	struct mmal_msg m;
1015	struct mmal_msg *rmsg;
1016	struct vchiq_header *rmsg_handle;
1017
1018	m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1019	m.u.component_enable.component_handle = component->handle;
1020
1021	ret = send_synchronous_mmal_msg(instance, &m,
1022					sizeof(m.u.component_enable),
1023					&rmsg, &rmsg_handle);
1024	if (ret)
1025		return ret;
1026
1027	if (rmsg->h.type != m.h.type) {
1028		/* got an unexpected message type in reply */
1029		ret = -EINVAL;
1030		goto release_msg;
1031	}
1032
1033	ret = -rmsg->u.component_enable_reply.status;
1034
1035release_msg:
1036	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1037
1038	return ret;
1039}
1040
1041/* disable a component on vc */
1042static int disable_component(struct vchiq_mmal_instance *instance,
1043			     struct vchiq_mmal_component *component)
1044{
1045	int ret;
1046	struct mmal_msg m;
1047	struct mmal_msg *rmsg;
1048	struct vchiq_header *rmsg_handle;
1049
1050	m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1051	m.u.component_disable.component_handle = component->handle;
1052
1053	ret = send_synchronous_mmal_msg(instance, &m,
1054					sizeof(m.u.component_disable),
1055					&rmsg, &rmsg_handle);
1056	if (ret)
1057		return ret;
1058
1059	if (rmsg->h.type != m.h.type) {
1060		/* got an unexpected message type in reply */
1061		ret = -EINVAL;
1062		goto release_msg;
1063	}
1064
1065	ret = -rmsg->u.component_disable_reply.status;
1066
1067release_msg:
1068
1069	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1070
1071	return ret;
1072}
1073
1074/* get version of mmal implementation */
1075static int get_version(struct vchiq_mmal_instance *instance,
1076		       u32 *major_out, u32 *minor_out)
1077{
1078	int ret;
1079	struct mmal_msg m;
1080	struct mmal_msg *rmsg;
1081	struct vchiq_header *rmsg_handle;
1082
1083	m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1084
1085	ret = send_synchronous_mmal_msg(instance, &m,
1086					sizeof(m.u.version),
1087					&rmsg, &rmsg_handle);
1088	if (ret)
1089		return ret;
1090
1091	if (rmsg->h.type != m.h.type) {
1092		/* got an unexpected message type in reply */
1093		ret = -EINVAL;
1094		goto release_msg;
1095	}
1096
1097	*major_out = rmsg->u.version.major;
1098	*minor_out = rmsg->u.version.minor;
1099
1100release_msg:
1101	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1102
1103	return ret;
1104}
1105
1106/* do a port action with a port as a parameter */
1107static int port_action_port(struct vchiq_mmal_instance *instance,
1108			    struct vchiq_mmal_port *port,
1109			    enum mmal_msg_port_action_type action_type)
1110{
1111	int ret;
1112	struct mmal_msg m;
1113	struct mmal_msg *rmsg;
1114	struct vchiq_header *rmsg_handle;
1115
1116	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1117	m.u.port_action_port.component_handle = port->component->handle;
1118	m.u.port_action_port.port_handle = port->handle;
1119	m.u.port_action_port.action = action_type;
1120
1121	port_to_mmal_msg(port, &m.u.port_action_port.port);
1122
1123	ret = send_synchronous_mmal_msg(instance, &m,
1124					sizeof(m.u.port_action_port),
1125					&rmsg, &rmsg_handle);
1126	if (ret)
1127		return ret;
1128
1129	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1130		/* got an unexpected message type in reply */
1131		ret = -EINVAL;
1132		goto release_msg;
1133	}
1134
1135	ret = -rmsg->u.port_action_reply.status;
1136
1137	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1138		 __func__,
1139		 ret, port->component->handle, port->handle,
1140		 port_action_type_names[action_type], action_type);
1141
1142release_msg:
1143	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1144
1145	return ret;
1146}
1147
1148/* do a port action with handles as parameters */
1149static int port_action_handle(struct vchiq_mmal_instance *instance,
1150			      struct vchiq_mmal_port *port,
1151			      enum mmal_msg_port_action_type action_type,
1152			      u32 connect_component_handle,
1153			      u32 connect_port_handle)
1154{
1155	int ret;
1156	struct mmal_msg m;
1157	struct mmal_msg *rmsg;
1158	struct vchiq_header *rmsg_handle;
1159
1160	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1161
1162	m.u.port_action_handle.component_handle = port->component->handle;
1163	m.u.port_action_handle.port_handle = port->handle;
1164	m.u.port_action_handle.action = action_type;
1165
1166	m.u.port_action_handle.connect_component_handle =
1167	    connect_component_handle;
1168	m.u.port_action_handle.connect_port_handle = connect_port_handle;
1169
1170	ret = send_synchronous_mmal_msg(instance, &m,
1171					sizeof(m.u.port_action_handle),
1172					&rmsg, &rmsg_handle);
1173	if (ret)
1174		return ret;
1175
1176	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1177		/* got an unexpected message type in reply */
1178		ret = -EINVAL;
1179		goto release_msg;
1180	}
1181
1182	ret = -rmsg->u.port_action_reply.status;
1183
1184	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1185		 __func__,
1186		 ret, port->component->handle, port->handle,
1187		 port_action_type_names[action_type],
1188		 action_type, connect_component_handle, connect_port_handle);
1189
1190release_msg:
1191	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1192
1193	return ret;
1194}
1195
1196static int port_parameter_set(struct vchiq_mmal_instance *instance,
1197			      struct vchiq_mmal_port *port,
1198			      u32 parameter_id, void *value, u32 value_size)
1199{
1200	int ret;
1201	struct mmal_msg m;
1202	struct mmal_msg *rmsg;
1203	struct vchiq_header *rmsg_handle;
1204
1205	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1206
1207	m.u.port_parameter_set.component_handle = port->component->handle;
1208	m.u.port_parameter_set.port_handle = port->handle;
1209	m.u.port_parameter_set.id = parameter_id;
1210	m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1211	memcpy(&m.u.port_parameter_set.value, value, value_size);
1212
1213	ret = send_synchronous_mmal_msg(instance, &m,
1214					(4 * sizeof(u32)) + value_size,
1215					&rmsg, &rmsg_handle);
1216	if (ret)
1217		return ret;
1218
1219	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1220		/* got an unexpected message type in reply */
1221		ret = -EINVAL;
1222		goto release_msg;
1223	}
1224
1225	ret = -rmsg->u.port_parameter_set_reply.status;
1226
1227	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1228		 __func__,
1229		 ret, port->component->handle, port->handle, parameter_id);
1230
1231release_msg:
1232	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1233
1234	return ret;
1235}
1236
1237static int port_parameter_get(struct vchiq_mmal_instance *instance,
1238			      struct vchiq_mmal_port *port,
1239			      u32 parameter_id, void *value, u32 *value_size)
1240{
1241	int ret;
1242	struct mmal_msg m;
1243	struct mmal_msg *rmsg;
1244	struct vchiq_header *rmsg_handle;
1245
1246	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1247
1248	m.u.port_parameter_get.component_handle = port->component->handle;
1249	m.u.port_parameter_get.port_handle = port->handle;
1250	m.u.port_parameter_get.id = parameter_id;
1251	m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1252
1253	ret = send_synchronous_mmal_msg(instance, &m,
1254					sizeof(struct
1255					       mmal_msg_port_parameter_get),
1256					&rmsg, &rmsg_handle);
1257	if (ret)
1258		return ret;
1259
1260	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1261		/* got an unexpected message type in reply */
1262		pr_err("Incorrect reply type %d\n", rmsg->h.type);
1263		ret = -EINVAL;
1264		goto release_msg;
1265	}
1266
1267	ret = rmsg->u.port_parameter_get_reply.status;
1268
1269	/* port_parameter_get_reply.size includes the header,
1270	 * whilst *value_size doesn't.
1271	 */
1272	rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1273
1274	if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1275		/* Copy only as much as we have space for
1276		 * but report true size of parameter
1277		 */
1278		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1279		       *value_size);
1280	} else {
1281		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1282		       rmsg->u.port_parameter_get_reply.size);
1283	}
1284	/* Always report the size of the returned parameter to the caller */
1285	*value_size = rmsg->u.port_parameter_get_reply.size;
1286
1287	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1288		 ret, port->component->handle, port->handle, parameter_id);
1289
1290release_msg:
1291	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1292
1293	return ret;
1294}
1295
1296/* disables a port and drains buffers from it */
1297static int port_disable(struct vchiq_mmal_instance *instance,
1298			struct vchiq_mmal_port *port)
1299{
1300	int ret;
1301	struct list_head *q, *buf_head;
1302	unsigned long flags = 0;
1303
1304	if (!port->enabled)
1305		return 0;
1306
1307	port->enabled = false;
1308
1309	ret = port_action_port(instance, port,
1310			       MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1311	if (ret == 0) {
1312		/*
1313		 * Drain all queued buffers on port. This should only
1314		 * apply to buffers that have been queued before the port
1315		 * has been enabled. If the port has been enabled and buffers
1316		 * passed, then the buffers should have been removed from this
1317		 * list, and we should get the relevant callbacks via VCHIQ
1318		 * to release the buffers.
1319		 */
1320		spin_lock_irqsave(&port->slock, flags);
1321
1322		list_for_each_safe(buf_head, q, &port->buffers) {
1323			struct mmal_buffer *mmalbuf;
1324
1325			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1326					     list);
1327			list_del(buf_head);
1328			if (port->buffer_cb) {
1329				mmalbuf->length = 0;
1330				mmalbuf->mmal_flags = 0;
1331				mmalbuf->dts = MMAL_TIME_UNKNOWN;
1332				mmalbuf->pts = MMAL_TIME_UNKNOWN;
1333				port->buffer_cb(instance,
1334						port, 0, mmalbuf);
1335			}
1336		}
1337
1338		spin_unlock_irqrestore(&port->slock, flags);
1339
1340		ret = port_info_get(instance, port);
1341	}
1342
1343	return ret;
1344}
1345
1346/* enable a port */
1347static int port_enable(struct vchiq_mmal_instance *instance,
1348		       struct vchiq_mmal_port *port)
1349{
1350	unsigned int hdr_count;
1351	struct list_head *q, *buf_head;
1352	int ret;
1353
1354	if (port->enabled)
1355		return 0;
1356
1357	ret = port_action_port(instance, port,
1358			       MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1359	if (ret)
1360		goto done;
1361
1362	port->enabled = true;
1363
1364	if (port->buffer_cb) {
1365		/* send buffer headers to videocore */
1366		hdr_count = 1;
1367		list_for_each_safe(buf_head, q, &port->buffers) {
1368			struct mmal_buffer *mmalbuf;
1369
1370			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1371					     list);
1372			ret = buffer_from_host(instance, port, mmalbuf);
1373			if (ret)
1374				goto done;
1375
1376			list_del(buf_head);
1377			hdr_count++;
1378			if (hdr_count > port->current_buffer.num)
1379				break;
1380		}
1381	}
1382
1383	ret = port_info_get(instance, port);
1384
1385done:
1386	return ret;
1387}
1388
1389/* ------------------------------------------------------------------
1390 * Exported API
1391 *------------------------------------------------------------------
1392 */
1393
1394int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1395			       struct vchiq_mmal_port *port)
1396{
1397	int ret;
1398
1399	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1400		return -EINTR;
1401
1402	ret = port_info_set(instance, port);
1403	if (ret)
1404		goto release_unlock;
1405
1406	/* read what has actually been set */
1407	ret = port_info_get(instance, port);
1408
1409release_unlock:
1410	mutex_unlock(&instance->vchiq_mutex);
1411
1412	return ret;
1413}
1414EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1415
1416int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1417				  struct vchiq_mmal_port *port,
1418				  u32 parameter, void *value, u32 value_size)
1419{
1420	int ret;
1421
1422	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1423		return -EINTR;
1424
1425	ret = port_parameter_set(instance, port, parameter, value, value_size);
1426
1427	mutex_unlock(&instance->vchiq_mutex);
1428
1429	return ret;
1430}
1431EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1432
1433int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1434				  struct vchiq_mmal_port *port,
1435				  u32 parameter, void *value, u32 *value_size)
1436{
1437	int ret;
1438
1439	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1440		return -EINTR;
1441
1442	ret = port_parameter_get(instance, port, parameter, value, value_size);
1443
1444	mutex_unlock(&instance->vchiq_mutex);
1445
1446	return ret;
1447}
1448EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1449
1450/* enable a port
1451 *
1452 * enables a port and queues buffers for satisfying callbacks if we
1453 * provide a callback handler
1454 */
1455int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1456			   struct vchiq_mmal_port *port,
1457			   vchiq_mmal_buffer_cb buffer_cb)
1458{
1459	int ret;
1460
1461	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1462		return -EINTR;
1463
1464	/* already enabled - noop */
1465	if (port->enabled) {
1466		ret = 0;
1467		goto unlock;
1468	}
1469
1470	port->buffer_cb = buffer_cb;
1471
1472	ret = port_enable(instance, port);
1473
1474unlock:
1475	mutex_unlock(&instance->vchiq_mutex);
1476
1477	return ret;
1478}
1479EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1480
1481int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1482			    struct vchiq_mmal_port *port)
1483{
1484	int ret;
1485
1486	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1487		return -EINTR;
1488
1489	if (!port->enabled) {
1490		mutex_unlock(&instance->vchiq_mutex);
1491		return 0;
1492	}
1493
1494	ret = port_disable(instance, port);
1495
1496	mutex_unlock(&instance->vchiq_mutex);
1497
1498	return ret;
1499}
1500EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1501
1502/* ports will be connected in a tunneled manner so data buffers
1503 * are not handled by client.
1504 */
1505int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1506				   struct vchiq_mmal_port *src,
1507				   struct vchiq_mmal_port *dst)
1508{
1509	int ret;
1510
1511	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1512		return -EINTR;
1513
1514	/* disconnect ports if connected */
1515	if (src->connected) {
1516		ret = port_disable(instance, src);
1517		if (ret) {
1518			pr_err("failed disabling src port(%d)\n", ret);
1519			goto release_unlock;
1520		}
1521
1522		/* do not need to disable the destination port as they
1523		 * are connected and it is done automatically
1524		 */
1525
1526		ret = port_action_handle(instance, src,
1527					 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1528					 src->connected->component->handle,
1529					 src->connected->handle);
1530		if (ret < 0) {
1531			pr_err("failed disconnecting src port\n");
1532			goto release_unlock;
1533		}
1534		src->connected->enabled = false;
1535		src->connected = NULL;
1536	}
1537
1538	if (!dst) {
1539		/* do not make new connection */
1540		ret = 0;
1541		pr_debug("not making new connection\n");
1542		goto release_unlock;
1543	}
1544
1545	/* copy src port format to dst */
1546	dst->format.encoding = src->format.encoding;
1547	dst->es.video.width = src->es.video.width;
1548	dst->es.video.height = src->es.video.height;
1549	dst->es.video.crop.x = src->es.video.crop.x;
1550	dst->es.video.crop.y = src->es.video.crop.y;
1551	dst->es.video.crop.width = src->es.video.crop.width;
1552	dst->es.video.crop.height = src->es.video.crop.height;
1553	dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
1554	dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
1555
1556	/* set new format */
1557	ret = port_info_set(instance, dst);
1558	if (ret) {
1559		pr_debug("setting port info failed\n");
1560		goto release_unlock;
1561	}
1562
1563	/* read what has actually been set */
1564	ret = port_info_get(instance, dst);
1565	if (ret) {
1566		pr_debug("read back port info failed\n");
1567		goto release_unlock;
1568	}
1569
1570	/* connect two ports together */
1571	ret = port_action_handle(instance, src,
1572				 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1573				 dst->component->handle, dst->handle);
1574	if (ret < 0) {
1575		pr_debug("connecting port %d:%d to %d:%d failed\n",
1576			 src->component->handle, src->handle,
1577			 dst->component->handle, dst->handle);
1578		goto release_unlock;
1579	}
1580	src->connected = dst;
1581
1582release_unlock:
1583
1584	mutex_unlock(&instance->vchiq_mutex);
1585
1586	return ret;
1587}
1588EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1589
1590int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1591			     struct vchiq_mmal_port *port,
1592			     struct mmal_buffer *buffer)
1593{
1594	unsigned long flags = 0;
1595	int ret;
1596
1597	ret = buffer_from_host(instance, port, buffer);
1598	if (ret == -EINVAL) {
1599		/* Port is disabled. Queue for when it is enabled. */
1600		spin_lock_irqsave(&port->slock, flags);
1601		list_add_tail(&buffer->list, &port->buffers);
1602		spin_unlock_irqrestore(&port->slock, flags);
1603	}
1604
1605	return 0;
1606}
1607EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1608
1609int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1610			  struct mmal_buffer *buf)
1611{
1612	struct mmal_msg_context *msg_context = get_msg_context(instance);
1613
1614	if (IS_ERR(msg_context))
1615		return (PTR_ERR(msg_context));
1616
1617	buf->msg_context = msg_context;
1618	return 0;
1619}
1620EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1621
1622int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1623{
1624	struct mmal_msg_context *msg_context = buf->msg_context;
1625
1626	if (msg_context)
1627		release_msg_context(msg_context);
1628	buf->msg_context = NULL;
1629
1630	return 0;
1631}
1632EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1633
1634/* Initialise a mmal component and its ports
1635 *
1636 */
1637int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1638			      const char *name,
1639			      struct vchiq_mmal_component **component_out)
1640{
1641	int ret;
1642	int idx;		/* port index */
1643	struct vchiq_mmal_component *component = NULL;
1644
1645	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1646		return -EINTR;
1647
1648	for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1649		if (!instance->component[idx].in_use) {
1650			component = &instance->component[idx];
1651			component->in_use = true;
1652			break;
1653		}
1654	}
1655
1656	if (!component) {
1657		ret = -EINVAL;	/* todo is this correct error? */
1658		goto unlock;
1659	}
1660
1661	/* We need a handle to reference back to our component structure.
1662	 * Use the array index in instance->component rather than rolling
1663	 * another IDR.
1664	 */
1665	component->client_component = idx;
1666
1667	ret = create_component(instance, component, name);
1668	if (ret < 0) {
1669		pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1670		       __func__, ret);
1671		goto unlock;
1672	}
1673
1674	/* ports info needs gathering */
1675	component->control.type = MMAL_PORT_TYPE_CONTROL;
1676	component->control.index = 0;
1677	component->control.component = component;
1678	spin_lock_init(&component->control.slock);
1679	INIT_LIST_HEAD(&component->control.buffers);
1680	ret = port_info_get(instance, &component->control);
1681	if (ret < 0)
1682		goto release_component;
1683
1684	for (idx = 0; idx < component->inputs; idx++) {
1685		component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1686		component->input[idx].index = idx;
1687		component->input[idx].component = component;
1688		spin_lock_init(&component->input[idx].slock);
1689		INIT_LIST_HEAD(&component->input[idx].buffers);
1690		ret = port_info_get(instance, &component->input[idx]);
1691		if (ret < 0)
1692			goto release_component;
1693	}
1694
1695	for (idx = 0; idx < component->outputs; idx++) {
1696		component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1697		component->output[idx].index = idx;
1698		component->output[idx].component = component;
1699		spin_lock_init(&component->output[idx].slock);
1700		INIT_LIST_HEAD(&component->output[idx].buffers);
1701		ret = port_info_get(instance, &component->output[idx]);
1702		if (ret < 0)
1703			goto release_component;
1704	}
1705
1706	for (idx = 0; idx < component->clocks; idx++) {
1707		component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1708		component->clock[idx].index = idx;
1709		component->clock[idx].component = component;
1710		spin_lock_init(&component->clock[idx].slock);
1711		INIT_LIST_HEAD(&component->clock[idx].buffers);
1712		ret = port_info_get(instance, &component->clock[idx]);
1713		if (ret < 0)
1714			goto release_component;
1715	}
1716
1717	*component_out = component;
1718
1719	mutex_unlock(&instance->vchiq_mutex);
1720
1721	return 0;
1722
1723release_component:
1724	destroy_component(instance, component);
1725unlock:
1726	if (component)
1727		component->in_use = false;
1728	mutex_unlock(&instance->vchiq_mutex);
1729
1730	return ret;
1731}
1732EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1733
1734/*
1735 * cause a mmal component to be destroyed
1736 */
1737int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1738				  struct vchiq_mmal_component *component)
1739{
1740	int ret;
1741
1742	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1743		return -EINTR;
1744
1745	if (component->enabled)
1746		ret = disable_component(instance, component);
1747
1748	ret = destroy_component(instance, component);
1749
1750	component->in_use = false;
1751
1752	mutex_unlock(&instance->vchiq_mutex);
1753
1754	return ret;
1755}
1756EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1757
1758/*
1759 * cause a mmal component to be enabled
1760 */
1761int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1762				struct vchiq_mmal_component *component)
1763{
1764	int ret;
1765
1766	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1767		return -EINTR;
1768
1769	if (component->enabled) {
1770		mutex_unlock(&instance->vchiq_mutex);
1771		return 0;
1772	}
1773
1774	ret = enable_component(instance, component);
1775	if (ret == 0)
1776		component->enabled = true;
1777
1778	mutex_unlock(&instance->vchiq_mutex);
1779
1780	return ret;
1781}
1782EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1783
1784/*
1785 * cause a mmal component to be enabled
1786 */
1787int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1788				 struct vchiq_mmal_component *component)
1789{
1790	int ret;
1791
1792	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1793		return -EINTR;
1794
1795	if (!component->enabled) {
1796		mutex_unlock(&instance->vchiq_mutex);
1797		return 0;
1798	}
1799
1800	ret = disable_component(instance, component);
1801	if (ret == 0)
1802		component->enabled = false;
1803
1804	mutex_unlock(&instance->vchiq_mutex);
1805
1806	return ret;
1807}
1808EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1809
1810int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1811		       u32 *major_out, u32 *minor_out)
1812{
1813	int ret;
1814
1815	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1816		return -EINTR;
1817
1818	ret = get_version(instance, major_out, minor_out);
1819
1820	mutex_unlock(&instance->vchiq_mutex);
1821
1822	return ret;
1823}
1824EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1825
1826int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1827{
1828	int status = 0;
1829
1830	if (!instance)
1831		return -EINVAL;
1832
1833	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1834		return -EINTR;
1835
1836	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
1837
1838	status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1839	if (status != 0)
1840		pr_err("mmal-vchiq: VCHIQ close failed\n");
1841
1842	mutex_unlock(&instance->vchiq_mutex);
1843
1844	vchiq_shutdown(instance->vchiq_instance);
1845	destroy_workqueue(instance->bulk_wq);
1846
1847	idr_destroy(&instance->context_map);
1848
1849	kfree(instance);
1850
1851	return status;
1852}
1853EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1854
1855int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1856{
1857	int status;
1858	int err = -ENODEV;
1859	struct vchiq_mmal_instance *instance;
1860	struct vchiq_instance *vchiq_instance;
1861	struct vchiq_service_params_kernel params = {
1862		.version		= VC_MMAL_VER,
1863		.version_min		= VC_MMAL_MIN_VER,
1864		.fourcc			= VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1865		.callback		= service_callback,
1866		.userdata		= NULL,
1867	};
1868
1869	/* compile time checks to ensure structure size as they are
1870	 * directly (de)serialised from memory.
1871	 */
1872
1873	/* ensure the header structure has packed to the correct size */
1874	BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1875
1876	/* ensure message structure does not exceed maximum length */
1877	BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1878
1879	/* mmal port struct is correct size */
1880	BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1881
1882	/* create a vchi instance */
1883	status = vchiq_initialise(&vchiq_instance);
1884	if (status) {
1885		pr_err("Failed to initialise VCHI instance (status=%d)\n",
1886		       status);
1887		return -EIO;
1888	}
1889
1890	status = vchiq_connect(vchiq_instance);
1891	if (status) {
1892		pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1893		err = -EIO;
1894		goto err_shutdown_vchiq;
1895	}
1896
1897	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1898
1899	if (!instance) {
1900		err = -ENOMEM;
1901		goto err_shutdown_vchiq;
1902	}
1903
1904	mutex_init(&instance->vchiq_mutex);
1905
1906	instance->vchiq_instance = vchiq_instance;
1907
1908	mutex_init(&instance->context_map_lock);
1909	idr_init_base(&instance->context_map, 1);
1910
1911	params.userdata = instance;
1912
1913	instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1914						    WQ_MEM_RECLAIM);
1915	if (!instance->bulk_wq)
1916		goto err_free;
1917
1918	status = vchiq_open_service(vchiq_instance, &params,
1919				    &instance->service_handle);
1920	if (status) {
1921		pr_err("Failed to open VCHI service connection (status=%d)\n",
1922		       status);
1923		goto err_close_services;
1924	}
1925
1926	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
1927
1928	*out_instance = instance;
1929
1930	return 0;
1931
1932err_close_services:
1933	vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1934	destroy_workqueue(instance->bulk_wq);
1935err_free:
1936	kfree(instance);
1937err_shutdown_vchiq:
1938	vchiq_shutdown(vchiq_instance);
1939	return err;
1940}
1941EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1942
1943MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1944MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1945MODULE_LICENSE("GPL");
1946