1/* 2 * Copyright (C) 2011, 2012 Igalia S.L 3 * Copyright (C) 2011 Zan Dobersek <zandobersek@gmail.com> 4 * 5 * This library is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU Lesser General Public 7 * License as published by the Free Software Foundation; either 8 * version 2 of the License, or (at your option) any later version. 9 * 10 * This library is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * Lesser General Public License for more details. 14 * 15 * You should have received a copy of the GNU Lesser General Public 16 * License along with this library; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 20#include "config.h" 21 22#if ENABLE(WEB_AUDIO) 23 24#include "AudioFileReader.h" 25 26#include "AudioBus.h" 27 28#include <gio/gio.h> 29#include <gst/app/gstappsink.h> 30#include <gst/audio/audio.h> 31#include <gst/gst.h> 32#include <gst/pbutils/pbutils.h> 33#include <wtf/Noncopyable.h> 34#include <wtf/gobject/GMainLoopSource.h> 35#include <wtf/gobject/GRefPtr.h> 36#include <wtf/gobject/GUniquePtr.h> 37 38namespace WebCore { 39 40class AudioFileReader { 41 WTF_MAKE_NONCOPYABLE(AudioFileReader); 42public: 43 AudioFileReader(const char* filePath); 44 AudioFileReader(const void* data, size_t dataSize); 45 ~AudioFileReader(); 46 47 PassRefPtr<AudioBus> createBus(float sampleRate, bool mixToMono); 48 49 GstFlowReturn handleSample(GstAppSink*); 50 gboolean handleMessage(GstMessage*); 51 void handleNewDeinterleavePad(GstPad*); 52 void deinterleavePadsConfigured(); 53 void plugDeinterleave(GstPad*); 54 void decodeAudioForBusCreation(); 55 56private: 57 const void* m_data; 58 size_t m_dataSize; 59 const char* m_filePath; 60 61 float m_sampleRate; 62 GstBufferList* m_frontLeftBuffers; 63 GstBufferList* m_frontRightBuffers; 64 65 GstElement* m_pipeline; 66 unsigned m_channelSize; 67 GRefPtr<GstElement> m_decodebin; 68 GRefPtr<GstElement> m_deInterleave; 69 GRefPtr<GMainLoop> m_loop; 70 bool m_errorOccurred; 71}; 72 73static void copyGstreamerBuffersToAudioChannel(GstBufferList* buffers, AudioChannel* audioChannel) 74{ 75 float* destination = audioChannel->mutableData(); 76 unsigned bufferCount = gst_buffer_list_length(buffers); 77 for (unsigned i = 0; i < bufferCount; ++i) { 78 GstBuffer* buffer = gst_buffer_list_get(buffers, i); 79 ASSERT(buffer); 80 gsize bufferSize = gst_buffer_get_size(buffer); 81 gst_buffer_extract(buffer, 0, destination, bufferSize); 82 destination += bufferSize / sizeof(float); 83 } 84} 85 86static GstFlowReturn onAppsinkPullRequiredCallback(GstAppSink* sink, gpointer userData) 87{ 88 return static_cast<AudioFileReader*>(userData)->handleSample(sink); 89} 90 91gboolean messageCallback(GstBus*, GstMessage* message, AudioFileReader* reader) 92{ 93 return reader->handleMessage(message); 94} 95 96static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioFileReader* reader) 97{ 98 reader->handleNewDeinterleavePad(pad); 99} 100 101static void onGStreamerDeinterleaveReadyCallback(GstElement*, AudioFileReader* reader) 102{ 103 reader->deinterleavePadsConfigured(); 104} 105 106static void onGStreamerDecodebinPadAddedCallback(GstElement*, GstPad* pad, AudioFileReader* reader) 107{ 108 reader->plugDeinterleave(pad); 109} 110 111AudioFileReader::AudioFileReader(const char* filePath) 112 : m_data(0) 113 , m_dataSize(0) 114 , m_filePath(filePath) 115 , m_channelSize(0) 116 , m_errorOccurred(false) 117{ 118} 119 120AudioFileReader::AudioFileReader(const void* data, size_t dataSize) 121 : m_data(data) 122 , m_dataSize(dataSize) 123 , m_filePath(0) 124 , m_channelSize(0) 125 , m_errorOccurred(false) 126{ 127} 128 129AudioFileReader::~AudioFileReader() 130{ 131 if (m_pipeline) { 132 GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline))); 133 ASSERT(bus); 134 g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this); 135 gst_bus_remove_signal_watch(bus.get()); 136 137 gst_element_set_state(m_pipeline, GST_STATE_NULL); 138 gst_object_unref(GST_OBJECT(m_pipeline)); 139 } 140 141 if (m_decodebin) { 142 g_signal_handlers_disconnect_by_func(m_decodebin.get(), reinterpret_cast<gpointer>(onGStreamerDecodebinPadAddedCallback), this); 143 m_decodebin.clear(); 144 } 145 146 if (m_deInterleave) { 147 g_signal_handlers_disconnect_by_func(m_deInterleave.get(), reinterpret_cast<gpointer>(onGStreamerDeinterleavePadAddedCallback), this); 148 g_signal_handlers_disconnect_by_func(m_deInterleave.get(), reinterpret_cast<gpointer>(onGStreamerDeinterleaveReadyCallback), this); 149 m_deInterleave.clear(); 150 } 151 152 gst_buffer_list_unref(m_frontLeftBuffers); 153 gst_buffer_list_unref(m_frontRightBuffers); 154} 155 156GstFlowReturn AudioFileReader::handleSample(GstAppSink* sink) 157{ 158 GstSample* sample = gst_app_sink_pull_sample(sink); 159 if (!sample) 160 return GST_FLOW_ERROR; 161 162 GstBuffer* buffer = gst_sample_get_buffer(sample); 163 if (!buffer) { 164 gst_sample_unref(sample); 165 return GST_FLOW_ERROR; 166 } 167 168 GstCaps* caps = gst_sample_get_caps(sample); 169 if (!caps) { 170 gst_sample_unref(sample); 171 return GST_FLOW_ERROR; 172 } 173 174 GstAudioInfo info; 175 gst_audio_info_from_caps(&info, caps); 176 int frames = GST_CLOCK_TIME_TO_FRAMES(GST_BUFFER_DURATION(buffer), GST_AUDIO_INFO_RATE(&info)); 177 178 // Check the first audio channel. The buffer is supposed to store 179 // data of a single channel anyway. 180 switch (GST_AUDIO_INFO_POSITION(&info, 0)) { 181 case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT: 182 gst_buffer_list_add(m_frontLeftBuffers, gst_buffer_ref(buffer)); 183 m_channelSize += frames; 184 break; 185 case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT: 186 gst_buffer_list_add(m_frontRightBuffers, gst_buffer_ref(buffer)); 187 break; 188 default: 189 break; 190 } 191 192 gst_sample_unref(sample); 193 return GST_FLOW_OK; 194 195} 196 197gboolean AudioFileReader::handleMessage(GstMessage* message) 198{ 199 GUniqueOutPtr<GError> error; 200 GUniqueOutPtr<gchar> debug; 201 202 switch (GST_MESSAGE_TYPE(message)) { 203 case GST_MESSAGE_EOS: 204 g_main_loop_quit(m_loop.get()); 205 break; 206 case GST_MESSAGE_WARNING: 207 gst_message_parse_warning(message, &error.outPtr(), &debug.outPtr()); 208 g_warning("Warning: %d, %s. Debug output: %s", error->code, error->message, debug.get()); 209 break; 210 case GST_MESSAGE_ERROR: 211 gst_message_parse_error(message, &error.outPtr(), &debug.outPtr()); 212 g_warning("Error: %d, %s. Debug output: %s", error->code, error->message, debug.get()); 213 m_errorOccurred = true; 214 g_main_loop_quit(m_loop.get()); 215 break; 216 default: 217 break; 218 } 219 return TRUE; 220} 221 222void AudioFileReader::handleNewDeinterleavePad(GstPad* pad) 223{ 224 // A new pad for a planar channel was added in deinterleave. Plug 225 // in an appsink so we can pull the data from each 226 // channel. Pipeline looks like: 227 // ... deinterleave ! queue ! appsink. 228 GstElement* queue = gst_element_factory_make("queue", 0); 229 GstElement* sink = gst_element_factory_make("appsink", 0); 230 231 GstAppSinkCallbacks callbacks; 232 callbacks.eos = 0; 233 callbacks.new_preroll = 0; 234 callbacks.new_sample = onAppsinkPullRequiredCallback; 235 gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, 0); 236 237 g_object_set(sink, "sync", FALSE, NULL); 238 239 gst_bin_add_many(GST_BIN(m_pipeline), queue, sink, NULL); 240 241 GstPad* sinkPad = gst_element_get_static_pad(queue, "sink"); 242 gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING); 243 gst_object_unref(GST_OBJECT(sinkPad)); 244 245 gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING); 246 247 gst_element_set_state(queue, GST_STATE_READY); 248 gst_element_set_state(sink, GST_STATE_READY); 249} 250 251void AudioFileReader::deinterleavePadsConfigured() 252{ 253 // All deinterleave src pads are now available, let's roll to 254 // PLAYING so data flows towards the sinks and it can be retrieved. 255 gst_element_set_state(m_pipeline, GST_STATE_PLAYING); 256} 257 258void AudioFileReader::plugDeinterleave(GstPad* pad) 259{ 260 // A decodebin pad was added, plug in a deinterleave element to 261 // separate each planar channel. Sub pipeline looks like 262 // ... decodebin2 ! audioconvert ! audioresample ! capsfilter ! deinterleave. 263 GstElement* audioConvert = gst_element_factory_make("audioconvert", 0); 264 GstElement* audioResample = gst_element_factory_make("audioresample", 0); 265 GstElement* capsFilter = gst_element_factory_make("capsfilter", 0); 266 m_deInterleave = gst_element_factory_make("deinterleave", "deinterleave"); 267 268 g_object_set(m_deInterleave.get(), "keep-positions", TRUE, NULL); 269 g_signal_connect(m_deInterleave.get(), "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this); 270 g_signal_connect(m_deInterleave.get(), "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this); 271 272 GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(m_sampleRate), 273 "channels", G_TYPE_INT, 2, 274 "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32), 275 "layout", G_TYPE_STRING, "interleaved", nullptr); 276 g_object_set(capsFilter, "caps", caps, NULL); 277 gst_caps_unref(caps); 278 279 gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioResample, capsFilter, m_deInterleave.get(), NULL); 280 281 GstPad* sinkPad = gst_element_get_static_pad(audioConvert, "sink"); 282 gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING); 283 gst_object_unref(GST_OBJECT(sinkPad)); 284 285 gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); 286 gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING); 287 gst_element_link_pads_full(capsFilter, "src", m_deInterleave.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); 288 289 gst_element_sync_state_with_parent(audioConvert); 290 gst_element_sync_state_with_parent(audioResample); 291 gst_element_sync_state_with_parent(capsFilter); 292 gst_element_sync_state_with_parent(m_deInterleave.get()); 293} 294 295void AudioFileReader::decodeAudioForBusCreation() 296{ 297 // Build the pipeline (giostreamsrc | filesrc) ! decodebin2 298 // A deinterleave element is added once a src pad becomes available in decodebin. 299 m_pipeline = gst_pipeline_new(0); 300 301 GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline))); 302 ASSERT(bus); 303 gst_bus_add_signal_watch(bus.get()); 304 g_signal_connect(bus.get(), "message", G_CALLBACK(messageCallback), this); 305 306 GstElement* source; 307 if (m_data) { 308 ASSERT(m_dataSize); 309 source = gst_element_factory_make("giostreamsrc", 0); 310 GRefPtr<GInputStream> memoryStream = adoptGRef(g_memory_input_stream_new_from_data(m_data, m_dataSize, 0)); 311 g_object_set(source, "stream", memoryStream.get(), NULL); 312 } else { 313 source = gst_element_factory_make("filesrc", 0); 314 g_object_set(source, "location", m_filePath, NULL); 315 } 316 317 m_decodebin = gst_element_factory_make("decodebin", "decodebin"); 318 g_signal_connect(m_decodebin.get(), "pad-added", G_CALLBACK(onGStreamerDecodebinPadAddedCallback), this); 319 320 gst_bin_add_many(GST_BIN(m_pipeline), source, m_decodebin.get(), NULL); 321 gst_element_link_pads_full(source, "src", m_decodebin.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); 322 gst_element_set_state(m_pipeline, GST_STATE_PAUSED); 323} 324 325PassRefPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono) 326{ 327 m_sampleRate = sampleRate; 328 329 m_frontLeftBuffers = gst_buffer_list_new(); 330 m_frontRightBuffers = gst_buffer_list_new(); 331 332 GRefPtr<GMainContext> context = adoptGRef(g_main_context_new()); 333 g_main_context_push_thread_default(context.get()); 334 m_loop = adoptGRef(g_main_loop_new(context.get(), FALSE)); 335 336 // Start the pipeline processing just after the loop is started. 337 GMainLoopSource source; 338 source.schedule("[WebKit] AudioFileReader::decodeAudioForBusCreation", std::function<void()>(std::bind(&AudioFileReader::decodeAudioForBusCreation, this)), G_PRIORITY_DEFAULT, nullptr, context.get()); 339 340 g_main_loop_run(m_loop.get()); 341 g_main_context_pop_thread_default(context.get()); 342 343 if (m_errorOccurred) 344 return 0; 345 346 unsigned channels = mixToMono ? 1 : 2; 347 RefPtr<AudioBus> audioBus = AudioBus::create(channels, m_channelSize, true); 348 audioBus->setSampleRate(m_sampleRate); 349 350 copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers, audioBus->channel(0)); 351 if (!mixToMono) 352 copyGstreamerBuffersToAudioChannel(m_frontRightBuffers, audioBus->channel(1)); 353 354 return audioBus; 355} 356 357PassRefPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate) 358{ 359 return AudioFileReader(filePath).createBus(sampleRate, mixToMono); 360} 361 362PassRefPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate) 363{ 364 return AudioFileReader(data, dataSize).createBus(sampleRate, mixToMono); 365} 366 367} // WebCore 368 369#endif // ENABLE(WEB_AUDIO) 370