1/*
2 * Copyright (c) 1999-2000, Eric Moon.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions, and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions, and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31
32// AudioFilterNode.h
33// * PURPOSE
34//   A framework class designed to make it easy to develop simple
35//   audio filters in the BeOS Media Kit.  The actual DSP work
36//   is done externally by an IAudioOp object.
37//
38// * NOT SUPPORTED YET...
39//   - multiple inputs or outputs
40//   - chaining multiple operations (yum)
41//
42// * HISTORY
43//   e.moon		7sep99		Begun: abstracting from FlangerNode and
44//                      implementing IAudioOp.
45
46#ifndef __AudioFilterNode_H__
47#define __AudioFilterNode_H__
48
49#include <BufferProducer.h>
50#include <BufferConsumer.h>
51#include <Controllable.h>
52#include <Debug.h>
53#include <MediaEventLooper.h>
54
55#include "IAudioOpHost.h"
56
57// forwards
58class BBufferGroup;
59class BMediaAddOn;
60
61class AudioBuffer;
62class IAudioOpFactory;
63class IAudioOp;
64class IParameterSet;
65
66class AudioFilterNode :
67	public		BBufferConsumer,
68	public		BBufferProducer,
69	public		BControllable,
70	public		BMediaEventLooper,
71	public		IAudioOpHost {
72
73public:													// *** HOOKS
74
75	// *** FORMAT NEGOTIATION
76
77	// +++++ 8sep99: there's currently no differentiation made between input
78	//               and output formats.  there probably should be, though for
79	//               now you can do extra format restriction in your factory's
80	//               createOp() implementation... which is too late.
81	// +++++
82	// +++++ okay, time to split out into getRequiredInputFormat(), ... etc
83	//       subclasses allowing format conversion will need to restrict buffer
84	//       sizes based on current connection (input or output.)
85
86	// requests the required format for the given type (ioFormat.type must be
87	// filled in!)
88	// upon returning, any fields left as wildcards are negotiable.
89	// Default implementation hands off to getPreferredFormat(),
90	// yielding a rather strict node.
91
92	virtual status_t getRequiredInputFormat(
93		media_format&								ioFormat) { return getPreferredInputFormat(ioFormat); }
94
95	virtual status_t getRequiredOutputFormat(
96		media_format&								ioFormat) { return getPreferredOutputFormat(ioFormat); }
97
98	// requests the required format for the given type (ioFormat.type must be
99	// filled in!)
100	// upon returning, all fields must be filled in.
101	//
102	// Default raw audio format:
103	//   44100hz
104	//   host-endian
105	//   1 channel
106	//   float
107	//   1k buffers
108	//
109	virtual status_t getPreferredInputFormat(
110		media_format&								ioFormat);
111	virtual status_t getPreferredOutputFormat(
112		media_format&								ioFormat);
113
114	// test the given template format against a proposed format.
115	// specialize wildcards for fields where the template contains
116	// non-wildcard data; write required fields into proposed format
117	// if they mismatch.
118	// Returns B_OK if the proposed format doesn't conflict with the
119	// template, or B_MEDIA_BAD_FORMAT otherwise.
120
121	virtual status_t validateProposedInputFormat(
122		const media_format&					preferredFormat,
123		media_format&								ioProposedFormat);
124
125	virtual status_t validateProposedOutputFormat(
126		const media_format&					preferredFormat,
127		media_format&								ioProposedFormat);
128
129	// fill in wildcards in the given format.
130	// (assumes the format passes validateProposedFormat().)
131	// Default implementation: specializes all wildcards to format returned by
132	//                         getPreferredXXXFormat()
133
134	virtual void specializeInputFormat(
135		media_format&								ioFormat) {
136
137		media_format preferred;
138		preferred.type = B_MEDIA_RAW_AUDIO;
139#ifdef DEBUG
140		status_t err =
141#endif
142		getPreferredInputFormat(preferred);
143		ASSERT(err == B_OK);
144		_specialize_raw_audio_format(preferred, ioFormat);
145	}
146
147	virtual void specializeOutputFormat(
148		media_format&								ioFormat) {
149
150		char fmt_buffer[256];
151		string_for_format(ioFormat, fmt_buffer, 255);
152		PRINT((
153			"### specializeOutputFormat:\n"
154			"    given '%s'\n", fmt_buffer));
155
156		media_format preferred;
157		preferred.type = B_MEDIA_RAW_AUDIO;
158#ifdef DEBUG
159		status_t err =
160#endif
161		getPreferredOutputFormat(preferred);
162		ASSERT(err == B_OK);
163
164		string_for_format(preferred, fmt_buffer, 255);
165		PRINT((
166			"    pref '%s'\n", fmt_buffer));
167
168//		ioFormat.SpecializeTo(&preferred);
169		_specialize_raw_audio_format(preferred, ioFormat);
170
171		string_for_format(ioFormat, fmt_buffer, 255);
172		PRINT((
173			"    writing '%s'\n", fmt_buffer));
174	}
175
176public:													// *** ctor/dtor
177
178	virtual ~AudioFilterNode();
179
180	// the node acquires ownership of opFactory
181	AudioFilterNode(
182		const char*									name,
183		IAudioOpFactory*						opFactory,
184		BMediaAddOn*								addOn=0);
185
186public:													// *** accessors
187	const media_input& input() const { return m_input; }
188	const media_output& output() const { return m_output; }
189
190public:													// *** BMediaNode
191
192	virtual status_t HandleMessage(
193		int32												code,
194		const void*									data,
195		size_t											size);
196
197	virtual BMediaAddOn* AddOn(
198		int32*											outID) const;
199
200	virtual void SetRunMode(
201		run_mode										mode);
202
203protected:											// *** BMediaEventLooper
204
205	virtual void HandleEvent(
206		const media_timed_event*		event,
207		bigtime_t										howLate,
208		bool												realTimeEvent=false);
209
210	// "The Media Server calls this hook function after the node has
211	//  been registered.  This is derived from BMediaNode; BMediaEventLooper
212	//  implements it to call Run() automatically when the node is registered;
213	//  if you implement NodeRegistered() you should call through to
214	//  BMediaEventLooper::NodeRegistered() after you've done your custom
215	//  operations."
216
217	virtual void NodeRegistered();
218
219	// "Augment OfflineTime() to compute the node's current time; it's called
220	//  by the Media Kit when it's in offline mode. Update any appropriate
221	//  internal information as well, then call through to the BMediaEventLooper
222	//  implementation."
223
224	virtual bigtime_t OfflineTime(); //nyi
225
226public:													// *** BBufferConsumer
227
228	virtual status_t AcceptFormat(
229		const media_destination&		destination,
230		media_format*								ioFormat);
231
232	// "If you're writing a node, and receive a buffer with the B_SMALL_BUFFER
233	//  flag set, you must recycle the buffer before returning."
234
235	virtual void BufferReceived(
236		BBuffer*										buffer);
237
238	// * make sure to fill in poInput->format with the contents of
239	//   pFormat; as of R4.5 the Media Kit passes poInput->format to
240	//   the producer in BBufferProducer::Connect().
241
242	virtual status_t Connected(
243		const media_source&					source,
244		const media_destination&		destination,
245		const media_format&					format,
246		media_input*								outInput);
247
248	virtual void Disconnected(
249		const media_source&					source,
250		const media_destination&		destination);
251
252	virtual void DisposeInputCookie(
253		int32												cookie);
254
255	// "You should implement this function so your node will know that the data
256	//  format is going to change. Note that this may be called in response to
257	//  your AcceptFormat() call, if your AcceptFormat() call alters any wildcard
258	//  fields in the specified format.
259	//
260	//  Because FormatChanged() is called by the producer, you don't need to (and
261	//  shouldn't) ask it if the new format is acceptable.
262	//
263	//  If the format change isn't possible, return an appropriate error from
264	//  FormatChanged(); this error will be passed back to the producer that
265	//  initiated the new format negotiation in the first place."
266
267	virtual status_t FormatChanged(
268		const media_source&					source,
269		const media_destination&		destination,
270		int32												changeTag,
271		const media_format&					newFormat);
272
273	virtual status_t GetLatencyFor(
274		const media_destination&		destination,
275		bigtime_t*									outLatency,
276		media_node_id*							outTimeSource);
277
278	virtual status_t GetNextInput(
279		int32*											ioCookie,
280		media_input*								outInput);
281
282	virtual void ProducerDataStatus(
283		const media_destination&		destination,
284		int32												status,
285		bigtime_t										tpWhen);
286
287	// "This function is provided to aid in supporting media formats in which the
288	//  outer encapsulation layer doesn't supply timing information. Producers will
289	//  tag the buffers they generate with seek tags; these tags can be used to
290	//  locate key frames in the media data."
291
292	virtual status_t SeekTagRequested(
293		const media_destination&		destination,
294		bigtime_t										targetTime,
295		uint32											flags,
296		media_seek_tag*							outSeekTag,
297		bigtime_t*									outTaggedTime,
298		uint32*											outFlags);
299
300public:													// *** BBufferProducer
301
302	// "When a consumer calls BBufferConsumer::RequestAdditionalBuffer(), this
303	//  function is called as a result. Its job is to call SendBuffer() to
304	//  immediately send the next buffer to the consumer. The previousBufferID,
305	//  previousTime, and previousTag arguments identify the last buffer the
306	//  consumer received. Your node should respond by sending the next buffer
307	//  after the one described.
308	//
309	//  The previousTag may be NULL.
310	//  Return B_OK if all is well; otherwise return an appropriate error code."
311	virtual void AdditionalBufferRequested(
312		const media_source&					source,
313		media_buffer_id							previousBufferID,
314		bigtime_t										previousTime,
315		const media_seek_tag*				previousTag); //nyi
316
317	virtual void Connect(
318		status_t										status,
319		const media_source&					source,
320		const media_destination&		destination,
321		const media_format&					format,
322		char*												ioName);
323
324	virtual void Disconnect(
325		const media_source&					source,
326		const media_destination&		destination);
327
328	virtual status_t DisposeOutputCookie(
329		int32												cookie);
330
331	virtual void EnableOutput(
332		const media_source&					source,
333		bool												enabled,
334		int32* _deprecated_);
335
336	virtual status_t FormatChangeRequested(
337		const media_source&					source,
338		const media_destination&		destination,
339		media_format*								ioFormat,
340		int32* _deprecated_); //nyi
341
342	virtual status_t FormatProposal(
343		const media_source&					source,
344		media_format*								ioFormat);
345
346	virtual status_t FormatSuggestionRequested(
347		media_type									type,
348		int32												quality,
349		media_format*								outFormat);
350
351	virtual status_t GetLatency(
352		bigtime_t*									outLatency);
353
354	virtual status_t GetNextOutput(
355		int32*											ioCookie,
356		media_output*								outOutput);
357
358	// "This hook function is called when a BBufferConsumer that's receiving data
359	//  from you determines that its latency has changed. It will call its
360	//  BBufferConsumer::SendLatencyChange() function, and in response, the Media
361	//  Server will call your LatencyChanged() function.  The source argument
362	//  indicates your output that's involved in the connection, and destination
363	//  specifies the input on the consumer to which the connection is linked.
364	//  newLatency is the consumer's new latency. The flags are currently unused."
365	virtual void LatencyChanged(
366		const media_source&					source,
367		const media_destination&		destination,
368		bigtime_t										newLatency,
369		uint32											flags);
370
371	virtual void LateNoticeReceived(
372		const media_source&					source,
373		bigtime_t										howLate,
374		bigtime_t										tpWhen);
375
376	// PrepareToConnect() is the second stage of format negotiations that happens
377	// inside BMediaRoster::Connect().  At this point, the consumer's AcceptFormat()
378	// method has been called, and that node has potentially changed the proposed
379	// format.  It may also have left wildcards in the format.  PrepareToConnect()
380	// *must* fully specialize the format before returning!
381
382	virtual status_t PrepareToConnect(
383		const media_source&					source,
384		const media_destination&		destination,
385		media_format*								ioFormat,
386		media_source*								outSource,
387		char*												outName);
388
389	virtual status_t SetBufferGroup(
390		const media_source&					source,
391		BBufferGroup*								group);
392
393	virtual status_t SetPlayRate(
394		int32												numerator,
395		int32												denominator);
396
397	virtual status_t VideoClippingChanged(
398		const media_source&					source,
399		int16												numShorts,
400		int16*											clipData,
401		const media_video_display_info& display,
402		int32*											outFromChangeTag);
403
404public:													// *** BControllable
405
406	virtual status_t GetParameterValue(
407		int32												id,
408		bigtime_t*									outLastChangeTime,
409		void*												outValue,
410		size_t*											ioSize);
411
412	virtual void SetParameterValue(
413		int32												id,
414		bigtime_t										changeTime,
415		const void*									value,
416		size_t											size);
417
418public:													// *** IAudioOpHost
419	virtual IParameterSet* parameterSet() const;
420
421protected:											// HandleEvent() impl.
422	void handleParameterEvent(
423		const media_timed_event*		event);
424
425	void handleStartEvent(
426		const media_timed_event*		event);
427
428	void handleStopEvent(
429		const media_timed_event*		event);
430
431	void ignoreEvent(
432		const media_timed_event*		event);
433
434protected:											// *** internal operations
435
436	status_t prepareFormatChange(
437		const media_format&         newFormat);
438	void doFormatChange(
439		const media_format&         newFormat);
440
441	// create and register a parameter web
442	// +++++ 7sep99: hand off to IParameterSet::makeGroup()
443	virtual void initParameterWeb();
444
445	// [re-]initialize operation if necessary
446	virtual void updateOperation();
447
448	// create or discard buffer group if necessary
449	virtual void updateBufferGroup();
450
451//	// construct delay line if necessary, reset filter state
452//	virtual void initFilter();
453//
454//	virtual void startFilter();
455//	virtual void stopFilter();
456
457	// figure processing latency by doing 'dry runs' of processBuffer()
458	virtual bigtime_t calcProcessingLatency();
459
460	// filter buffer data; inputBuffer and outputBuffer may be the same
461	virtual void processBuffer(
462		BBuffer*										inputBuffer,
463		BBuffer*										outputBuffer);
464
465
466	status_t _validate_raw_audio_format(
467		const media_format&					preferredFormat,
468		media_format&								ioProposedFormat);
469
470	void _specialize_raw_audio_format(
471		const media_format&					templateFormat,
472		media_format&								ioFormat);
473
474private:												// *** connection/format members
475
476	// +++++ expose input & output to subclasses [8sep99]
477
478	// Connections & associated state variables
479	media_input				m_input;
480
481	media_output			m_output;
482	bool							m_outputEnabled;
483
484	// Time required by downstream consumer(s) to properly deliver a buffer
485	bigtime_t					m_downstreamLatency;
486
487	// Worst-case time needed to fill a buffer
488	bigtime_t					m_processingLatency;
489
490	// buffer group for on-the-fly conversion [8sep99]
491	// (only created when necessary, ie. more data going out than coming in)
492	BBufferGroup*			m_bufferGroup;
493
494private:												// *** parameters
495	IParameterSet*								m_parameterSet;
496
497private:												// *** operations
498	IAudioOpFactory*	m_opFactory;
499
500	// the current operation
501	IAudioOp*					m_op;
502
503private:												// *** add-on stuff
504
505	// host add-on
506	BMediaAddOn*	m_addOn;
507};
508
509#endif /*__AudioFilterNode_H__*/
510