1/*
2 * Copyright 2002-2014 Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 *		Christopher ML Zumwalt May (zummy@users.sf.net)
7 */
8
9
10/*	A MediaKit producer node which mixes sound from the GameKit
11	and sends them to the audio mixer
12*/
13
14
15#include "GameProducer.h"
16
17#include <string.h>
18#include <stdio.h>
19
20#include <Buffer.h>
21#include <BufferGroup.h>
22#include <ByteOrder.h>
23#include <List.h>
24#include <MediaDefs.h>
25#include <TimeSource.h>
26
27#include "GameSoundBuffer.h"
28#include "GameSoundDevice.h"
29#include "GSUtility.h"
30
31
32struct _gs_play  {
33	gs_id		sound;
34	bool*		hook;
35
36	_gs_play*	next;
37	_gs_play*	previous;
38};
39
40
41GameProducer::GameProducer(GameSoundBuffer* object,
42	const gs_audio_format* format)
43	:
44	BMediaNode("GameProducer.h"),
45	BBufferProducer(B_MEDIA_RAW_AUDIO),
46	BMediaEventLooper(),
47	fBufferGroup(NULL),
48	fLatency(0),
49	fInternalLatency(0),
50	fOutputEnabled(true)
51{
52	// initialize our preferred format object
53	fPreferredFormat.type = B_MEDIA_RAW_AUDIO;
54	fPreferredFormat.u.raw_audio.format = format->format;
55	fPreferredFormat.u.raw_audio.channel_count = format->channel_count;
56	fPreferredFormat.u.raw_audio.frame_rate = format->frame_rate; // Hertz
57	fPreferredFormat.u.raw_audio.byte_order = format->byte_order;
58//	fPreferredFormat.u.raw_audio.channel_mask
59//		= B_CHANNEL_LEFT | B_CHANNEL_RIGHT;
60//	fPreferredFormat.u.raw_audio.valid_bits = 32;
61//	fPreferredFormat.u.raw_audio.matrix_mask = B_MATRIX_AMBISONIC_WXYZ;
62
63	// we'll use the consumer's preferred buffer size, if any
64	fPreferredFormat.u.raw_audio.buffer_size
65		= media_raw_audio_format::wildcard.buffer_size;
66
67	// we're not connected yet
68	fOutput.destination = media_destination::null;
69	fOutput.format = fPreferredFormat;
70
71	fFrameSize = get_sample_size(format->format) * format->channel_count;
72	fObject = object;
73}
74
75
76GameProducer::~GameProducer()
77{
78	// Stop the BMediaEventLooper thread
79	Quit();
80}
81
82
83// BMediaNode methods
84BMediaAddOn*
85GameProducer::AddOn(int32* internal_id) const
86{
87	return NULL;
88}
89
90
91// BBufferProducer methods
92status_t
93GameProducer::GetNextOutput(int32* cookie, media_output* _output)
94{
95	// we currently support only one output
96	if (0 != *cookie)
97		return B_BAD_INDEX;
98
99	*_output = fOutput;
100	*cookie += 1;
101	return B_OK;
102}
103
104
105status_t
106GameProducer::DisposeOutputCookie(int32 cookie)
107{
108	// do nothing because our cookie is only an integer
109	return B_OK;
110}
111
112
113void
114GameProducer::EnableOutput(const media_source& what, bool enabled,
115	int32* _deprecated_)
116{
117	// If I had more than one output, I'd have to walk my list of output records
118	// to see which one matched the given source, and then enable/disable that
119	// one.  But this node only has one output,  so I just make sure the given
120	// source matches, then set the enable state accordingly.
121	if (what == fOutput.source)
122	{
123		fOutputEnabled = enabled;
124	}
125}
126
127
128status_t
129GameProducer::FormatSuggestionRequested(media_type type, int32 /*quality*/,
130	media_format* format)
131{
132	// insure that we received a format
133	if (!format)
134		return B_BAD_VALUE;
135
136	// returning our preferred format
137	*format = fPreferredFormat;
138
139	// our format is supported
140	if (type == B_MEDIA_UNKNOWN_TYPE)
141		return B_OK;
142
143	// we only support raw audo
144	return (type != B_MEDIA_RAW_AUDIO) ? B_MEDIA_BAD_FORMAT : B_OK;
145}
146
147
148status_t
149GameProducer::FormatProposal(const media_source& output, media_format* format)
150{
151	// doest the proposed output match our output?
152	if (output != fOutput.source)
153		return B_MEDIA_BAD_SOURCE;
154
155	// return our preferred format
156	*format = fPreferredFormat;
157
158	// we will reject the proposal if the format is not audio
159	media_type requestedType = format->type;
160	if ((requestedType != B_MEDIA_UNKNOWN_TYPE)
161		&& (requestedType != B_MEDIA_RAW_AUDIO)) {
162		return B_MEDIA_BAD_FORMAT;
163	}
164
165	return B_OK;		// raw audio or wildcard type, either is okay by us
166}
167
168
169status_t
170GameProducer::PrepareToConnect(const media_source& what,
171	const media_destination& where, media_format* format,
172	media_source* _source, char* out_name)
173{
174	// The format has been processed by the consumer at this point. We need
175	// to insure the format is still acceptable and any wild care are filled in.
176
177	// trying to connect something that isn't our source?
178	if (what != fOutput.source)
179		return B_MEDIA_BAD_SOURCE;
180
181	// are we already connected?
182	if (fOutput.destination != media_destination::null)
183		return B_MEDIA_ALREADY_CONNECTED;
184
185	// the format may not yet be fully specialized (the consumer might have
186	// passed back some wildcards).  Finish specializing it now, and return an
187	// error if we don't support the requested format.
188	if (format->type != B_MEDIA_RAW_AUDIO)
189		return B_MEDIA_BAD_FORMAT;
190
191	if (format->u.raw_audio.format != fPreferredFormat.u.raw_audio.format)
192		return B_MEDIA_BAD_FORMAT;
193
194	// check the buffer size, which may still be wildcarded
195	if (format->u.raw_audio.buffer_size
196		== media_raw_audio_format::wildcard.buffer_size) {
197		format->u.raw_audio.buffer_size = 4096;
198			// pick something comfortable to suggest
199	}
200
201	// Now reserve the connection, and return information about it
202	fOutput.destination = where;
203	fOutput.format = *format;
204	*_source = fOutput.source;
205	strlcpy(out_name, fOutput.name, B_MEDIA_NAME_LENGTH);
206	return B_OK;
207}
208
209
210void
211GameProducer::Connect(status_t error, const media_source& source,
212	const media_destination& destination, const media_format& format,
213	char* ioName)
214{
215	// If something earlier failed, Connect() might still be called, but with a
216	// non-zero error code.  When that happens we simply unreserve the
217	// connection and do nothing else.
218	if (error) {
219		fOutput.destination = media_destination::null;
220		fOutput.format = fPreferredFormat;
221		return;
222	}
223
224	// Okay, the connection has been confirmed.  Record the destination and
225	// format that we agreed on, and report our connection name again.
226	fOutput.destination = destination;
227	fOutput.format = format;
228	strlcpy(ioName, fOutput.name, B_MEDIA_NAME_LENGTH);
229
230	// Now that we're connected, we can determine our downstream latency.
231	// Do so, then make sure we get our events early enough.
232	media_node_id id;
233	FindLatencyFor(fOutput.destination, &fLatency, &id);
234
235	if (!fBufferGroup)
236		fBufferSize = fOutput.format.u.raw_audio.buffer_size;
237			// Have to set it before latency calculating
238
239	// Use a dry run to see how long it takes me to fill a buffer of data
240
241	// The first step to setup the buffer
242	bigtime_t start, produceLatency;
243	int32 frames = int32(fBufferSize / fFrameSize);
244	float* data = new float[frames * 2];
245
246	// Second, fill the buffer
247	start = ::system_time();
248	for (int32 i = 0; i < frames; i++) {
249		data[i * 2] = 0.8 * float(i / frames);
250		data[i * 2 + 1] = 0.8 * float(i / frames);
251	}
252	produceLatency = ::system_time();
253
254	// Third, calculate the latency
255	fInternalLatency = produceLatency - start;
256	SetEventLatency(fLatency + fInternalLatency);
257
258	// Finaily, clean up
259	delete [] data;
260
261	// reset our buffer duration, etc. to avoid later calculations
262	bigtime_t duration = bigtime_t(1000000) * frames
263		/ bigtime_t(fOutput.format.u.raw_audio.frame_rate);
264	SetBufferDuration(duration);
265
266	// Set up the buffer group for our connection, as long as nobody handed us a
267	// buffer group (via SetBufferGroup()) prior to this.
268	if (!fBufferGroup) {
269		int32 count = int32(fLatency / BufferDuration() + 2);
270		fBufferGroup = new BBufferGroup(fBufferSize, count);
271	}
272}
273
274
275void
276GameProducer::Disconnect(const media_source& what,
277	const media_destination& where)
278{
279	// Make sure that our connection is the one being disconnected
280	if ((where == fOutput.destination) && (what == fOutput.source)) {
281		fOutput.destination = media_destination::null;
282		fOutput.format = fPreferredFormat;
283		delete fBufferGroup;
284		fBufferGroup = NULL;
285	}
286}
287
288
289status_t
290GameProducer::FormatChangeRequested(const media_source& source,
291	const media_destination& destination, media_format* io_format,
292	int32* _deprecated_)
293{
294	// we don't support any other formats, so we just reject any format changes.
295	return B_ERROR;
296}
297
298
299status_t
300GameProducer::SetBufferGroup(const media_source& forSource,
301	BBufferGroup* newGroup)
302{
303	// verify that we didn't get bogus arguments before we proceed
304	if (forSource != fOutput.source)
305		return B_MEDIA_BAD_SOURCE;
306
307	// Are we being passed the buffer group we're already using?
308	if (newGroup == fBufferGroup)
309		return B_OK;
310
311	// Ahh, someone wants us to use a different buffer group.  At this point we
312	// delete the one we are using and use the specified one instead. If the
313	// specified group is NULL, we need to recreate one ourselves, and use
314	// *that*. Note that if we're caching a BBuffer that we requested earlier,
315	// we have to Recycle() that buffer *before* deleting the buffer group,
316	// otherwise we'll deadlock waiting for that buffer to be recycled!
317	delete fBufferGroup;		// waits for all buffers to recycle
318	if (newGroup != NULL) {
319		// we were given a valid group; just use that one from now on
320		fBufferGroup = newGroup;
321
322		// get buffer length from the first buffer
323		BBuffer* buffers[1];
324		if (newGroup->GetBufferList(1, buffers) != B_OK)
325			return B_BAD_VALUE;
326		fBufferSize = buffers[0]->SizeAvailable();
327	} else {
328		// we were passed a NULL group pointer; that means we construct
329		// our own buffer group to use from now on
330		fBufferSize = fOutput.format.u.raw_audio.buffer_size;
331		int32 count = int32(fLatency / BufferDuration() + 2);
332		fBufferGroup = new BBufferGroup(fBufferSize, count);
333	}
334
335	return B_OK;
336}
337
338
339status_t
340GameProducer::GetLatency(bigtime_t* _latency)
341{
342	// report our *total* latency:  internal plus downstream plus scheduling
343	*_latency = EventLatency() + SchedulingLatency();
344	return B_OK;
345}
346
347
348void
349GameProducer::LateNoticeReceived(const media_source& what, bigtime_t howMuch,
350	bigtime_t performanceDuration)
351{
352	// If we're late, we need to catch up.  Respond in a manner appropriate to
353	// our current run mode.
354	if (what == fOutput.source) {
355		if (RunMode() == B_RECORDING) {
356			// A hardware capture node can't adjust; it simply emits buffers at
357			// appropriate points.  We (partially) simulate this by not
358			// adjusting our behavior upon receiving late notices -- after all,
359			// the hardware can't choose to capture "sooner"...
360		} else if (RunMode() == B_INCREASE_LATENCY) {
361			// We're late, and our run mode dictates that we try to produce
362			// buffers earlier in order to catch up. This argues that the
363			// downstream nodes are not properly reporting their latency, but
364			// there's not much we can do about that at the moment, so we try
365			// to start producing buffers earlier to compensate.
366			fInternalLatency += howMuch;
367			SetEventLatency(fLatency + fInternalLatency);
368		} else {
369			// The other run modes dictate various strategies for sacrificing
370			// data quality in the interests of timely data delivery. The way we
371			// do this is to skip a buffer, which catches us up in time by one
372			// buffer duration.
373			size_t nSamples = fBufferSize / fFrameSize;
374			fFramesSent += nSamples;
375		}
376	}
377}
378
379
380void
381GameProducer::LatencyChanged(const media_source& source,
382	const media_destination& destination, bigtime_t new_latency, uint32 flags)
383{
384	// something downstream changed latency, so we need to start producing
385	// buffers earlier (or later) than we were previously.  Make sure that the
386	// connection that changed is ours, and adjust to the new downstream
387	// latency if so.
388	if ((source == fOutput.source) && (destination == fOutput.destination)) {
389		fLatency = new_latency;
390		SetEventLatency(fLatency + fInternalLatency);
391	}
392}
393
394
395status_t
396GameProducer::SetPlayRate(int32 numerator, int32 denominator)
397{
398	// Play rates are weird.  We don't support them
399	return B_ERROR;
400}
401
402
403status_t
404GameProducer::HandleMessage(int32 message, const void* data, size_t size)
405{
406	// We currently do not handle private messages
407	return B_ERROR;
408}
409
410
411void
412GameProducer::AdditionalBufferRequested(const media_source& source,
413	media_buffer_id prev_buffer, bigtime_t prev_time,
414	const media_seek_tag* prev_tag)
415{
416	// we don't support offline mode (yet...)
417	return;
418}
419
420
421// BMediaEventLooper methods
422void
423GameProducer::NodeRegistered()
424{
425	// set up as much information about our output as we can
426	fOutput.source.port = ControlPort();
427	fOutput.source.id = 0;
428	fOutput.node = Node();
429	strlcpy(fOutput.name, "GameProducer Output", B_MEDIA_NAME_LENGTH);
430
431	// Start the BMediaEventLooper thread
432	SetPriority(B_REAL_TIME_PRIORITY);
433	Run();
434}
435
436
437void
438GameProducer::SetRunMode(run_mode mode)
439{
440	// We don't support offline run mode, so broadcast an error if we're set to
441	// B_OFFLINE.  Unfortunately, we can't actually reject the mode change...
442	if (B_OFFLINE == mode) {
443		ReportError(B_NODE_FAILED_SET_RUN_MODE);
444	}
445}
446
447
448void
449GameProducer::HandleEvent(const media_timed_event* event, bigtime_t lateness,
450	bool realTimeEvent)
451{
452//	FPRINTF(stderr, "ToneProducer::HandleEvent\n");
453	switch (event->type)
454	{
455	case BTimedEventQueue::B_START:
456		// don't do anything if we're already running
457		if (RunState() != B_STARTED) {
458			// Going to start sending buffers so setup the needed bookkeeping
459			fFramesSent = 0;
460			fStartTime = event->event_time;
461			media_timed_event firstBufferEvent(fStartTime,
462				BTimedEventQueue::B_HANDLE_BUFFER);
463
464			// Alternatively, we could call HandleEvent() directly with this
465			// event, to avoid a trip through the event queue like this:
466			//		this->HandleEvent(&firstBufferEvent, 0, false);
467			EventQueue()->AddEvent(firstBufferEvent);
468		}
469		break;
470
471	case BTimedEventQueue::B_STOP:
472		// When we handle a stop, we must ensure that downstream consumers don't
473		// get any more buffers from us.  This means we have to flush any
474		// pending buffer-producing events from the queue.
475		EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true,
476			BTimedEventQueue::B_HANDLE_BUFFER);
477		break;
478
479	case BTimedEventQueue::B_HANDLE_BUFFER:
480		{
481			// Ensure we're both started and connected before delivering buffer
482			if ((RunState() == BMediaEventLooper::B_STARTED)
483				&& (fOutput.destination != media_destination::null)) {
484				// Get the next buffer of data
485				BBuffer* buffer = FillNextBuffer(event->event_time);
486				if (buffer) {
487					// Send the buffer downstream if output is enabled
488					status_t err = B_ERROR;
489					if (fOutputEnabled) {
490						err = SendBuffer(buffer, fOutput.source,
491							fOutput.destination);
492					}
493					if (err) {
494						// we need to recycle the buffer ourselves if output is
495						// disabled or if the call to SendBuffer() fails
496						buffer->Recycle();
497					}
498				}
499
500				// track how much media we've delivered so far
501				size_t nFrames = fBufferSize / fFrameSize;
502				fFramesSent += nFrames;
503
504				// The buffer is on its way; now schedule the next one to go
505				bigtime_t nextEvent = fStartTime + bigtime_t(double(fFramesSent)
506					/ double(fOutput.format.u.raw_audio.frame_rate)
507					* 1000000.0);
508				media_timed_event nextBufferEvent(nextEvent,
509					BTimedEventQueue::B_HANDLE_BUFFER);
510				EventQueue()->AddEvent(nextBufferEvent);
511			}
512		}
513		break;
514
515	default:
516		break;
517	}
518}
519
520
521BBuffer*
522GameProducer::FillNextBuffer(bigtime_t event_time)
523{
524	// get a buffer from our buffer group
525	BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration());
526
527	// if we fail to get a buffer (for example, if the request times out), we
528	// skip this buffer and go on to the next, to avoid locking up the control
529	// thread.
530	if (!buf)
531		return NULL;
532
533	// we need to discribe the buffer
534	int64 frames = int64(fBufferSize / fFrameSize);
535	memset(buf->Data(), 0, fBufferSize);
536
537	// now fill the buffer with data, continuing where the last buffer left off
538	fObject->Play(buf->Data(), frames);
539
540	// fill in the buffer header
541	media_header* hdr = buf->Header();
542	hdr->type = B_MEDIA_RAW_AUDIO;
543	hdr->size_used = fBufferSize;
544	hdr->time_source = TimeSource()->ID();
545
546	bigtime_t stamp;
547	if (RunMode() == B_RECORDING) {
548		// In B_RECORDING mode, we stamp with the capture time.  We're not
549		// really a hardware capture node, but we simulate it by using the
550		// (precalculated) time at which this buffer "should" have been created.
551		stamp = event_time;
552	} else {
553		// okay, we're in one of the "live" performance run modes.  in these
554		// modes, we stamp the buffer with the time at which the buffer should
555		// be rendered to the output, not with the capture time. fStartTime is
556		// the cached value of the first buffer's performance time; we calculate
557		// this buffer's performance time as an offset from that time, based on
558		// the amount of media we've created so far.
559		// Recalculating every buffer like this avoids accumulation of error.
560		stamp = fStartTime + bigtime_t(double(fFramesSent)
561			/ double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
562	}
563	hdr->start_time = stamp;
564
565	return buf;
566}
567