1/*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.  Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25
26//#define USE_ERROR
27//#define USE_TRACE
28//#define USE_VERBOSE_TRACE
29
30#include <AudioUnit/AudioUnit.h>
31#include <AudioToolbox/AudioConverter.h>
32#include <pthread.h>
33#include <math.h>
34/*
35#if !defined(__COREAUDIO_USE_FLAT_INCLUDES__)
36#include <CoreAudio/CoreAudioTypes.h>
37#else
38#include <CoreAudioTypes.h>
39#endif
40*/
41
42#include "PLATFORM_API_MacOSX_Utils.h"
43
44extern "C" {
45#include "Utilities.h"
46#include "DirectAudio.h"
47}
48
49#if USE_DAUDIO == TRUE
50
51
52#ifdef USE_TRACE
53static void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) {
54    TRACE4("ID='%c%c%c%c'", (char)(inDesc->mFormatID >> 24), (char)(inDesc->mFormatID >> 16), (char)(inDesc->mFormatID >> 8), (char)(inDesc->mFormatID));
55    TRACE2(", %f Hz, flags=0x%lX", (float)inDesc->mSampleRate, (long unsigned)inDesc->mFormatFlags);
56    TRACE2(", %ld channels, %ld bits", (long)inDesc->mChannelsPerFrame, (long)inDesc->mBitsPerChannel);
57    TRACE1(", %ld bytes per frame\n", (long)inDesc->mBytesPerFrame);
58}
59#else
60static inline void PrintStreamDesc(const AudioStreamBasicDescription *inDesc) { }
61#endif
62
63
64#define MAX(x, y)   ((x) >= (y) ? (x) : (y))
65#define MIN(x, y)   ((x) <= (y) ? (x) : (y))
66
67
68// =======================================
69// MixerProvider functions implementation
70
71static DeviceList deviceCache;
72
73INT32 DAUDIO_GetDirectAudioDeviceCount() {
74    deviceCache.Refresh();
75    int count = deviceCache.GetCount();
76    if (count > 0) {
77        // add "default" device
78        count++;
79        TRACE1("DAUDIO_GetDirectAudioDeviceCount: returns %d devices\n", count);
80    } else {
81        TRACE0("DAUDIO_GetDirectAudioDeviceCount: no devices found\n");
82    }
83    return count;
84}
85
86INT32 DAUDIO_GetDirectAudioDeviceDescription(INT32 mixerIndex, DirectAudioDeviceDescription *desc) {
87    bool result = true;
88    desc->deviceID = 0;
89    if (mixerIndex == 0) {
90        // default device
91        strncpy(desc->name, "Default Audio Device", DAUDIO_STRING_LENGTH);
92        strncpy(desc->description, "Default Audio Device", DAUDIO_STRING_LENGTH);
93        desc->maxSimulLines = -1;
94    } else {
95        AudioDeviceID deviceID;
96        result = deviceCache.GetDeviceInfo(mixerIndex-1, &deviceID, DAUDIO_STRING_LENGTH,
97            desc->name, desc->vendor, desc->description, desc->version);
98        if (result) {
99            desc->deviceID = (INT32)deviceID;
100            desc->maxSimulLines = -1;
101        }
102    }
103    return result ? TRUE : FALSE;
104}
105
106
107void DAUDIO_GetFormats(INT32 mixerIndex, INT32 deviceID, int isSource, void* creator) {
108    TRACE3(">>DAUDIO_GetFormats mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (int)deviceID, isSource);
109
110    AudioDeviceID audioDeviceID = deviceID == 0 ? GetDefaultDevice(isSource) : (AudioDeviceID)deviceID;
111
112    if (audioDeviceID == 0) {
113        return;
114    }
115
116    int totalChannels = GetChannelCount(audioDeviceID, isSource);
117
118    if (totalChannels == 0) {
119        TRACE0("<<DAUDIO_GetFormats, no streams!\n");
120        return;
121    }
122
123    if (isSource && totalChannels < 2) {
124        // report 2 channels even if only mono is supported
125        totalChannels = 2;
126    }
127
128    int channels[] = {1, 2, totalChannels};
129    int channelsCount = MIN(totalChannels, 3);
130
131    float hardwareSampleRate = GetSampleRate(audioDeviceID, isSource);
132    TRACE2("  DAUDIO_GetFormats: got %d channels, sampleRate == %f\n", totalChannels, hardwareSampleRate);
133
134    // any sample rates are supported
135    float sampleRate = -1;
136
137    static int sampleBits[] = {8, 16, 24};
138    static int sampleBitsCount = sizeof(sampleBits)/sizeof(sampleBits[0]);
139
140    // the last audio format is the default one (used by DataLine.open() if format is not specified)
141    // consider as default 16bit PCM stereo (mono is stereo is not supported) with the current sample rate
142    int defBits = 16;
143    int defChannels = MIN(2, channelsCount);
144    float defSampleRate = hardwareSampleRate;
145    // don't add default format is sample rate is not specified
146    bool addDefault = defSampleRate > 0;
147
148    // TODO: CoreAudio can handle signed/unsigned, little-endian/big-endian
149    // TODO: register the formats (to prevent DirectAudio software conversion) - need to fix DirectAudioDevice.createDataLineInfo
150    // to avoid software conversions if both signed/unsigned or big-/little-endian are supported
151    for (int channelIndex = 0; channelIndex < channelsCount; channelIndex++) {
152        for (int bitIndex = 0; bitIndex < sampleBitsCount; bitIndex++) {
153            int bits = sampleBits[bitIndex];
154            if (addDefault && bits == defBits && channels[channelIndex] != defChannels && sampleRate == defSampleRate) {
155                // the format is the default one, don't add it now
156                continue;
157            }
158            DAUDIO_AddAudioFormat(creator,
159                bits,                       // sample size in bits
160                -1,                         // frame size (auto)
161                channels[channelIndex],     // channels
162                sampleRate,                 // sample rate
163                DAUDIO_PCM,                 // only accept PCM
164                bits == 8 ? FALSE : TRUE,   // signed
165                bits == 8 ? FALSE           // little-endian for 8bit
166                    : UTIL_IsBigEndianPlatform());
167        }
168    }
169    // add default format
170    if (addDefault) {
171        DAUDIO_AddAudioFormat(creator,
172            defBits,                        // 16 bits
173            -1,                             // automatically calculate frame size
174            defChannels,                    // channels
175            defSampleRate,                  // sample rate
176            DAUDIO_PCM,                     // PCM
177            TRUE,                           // signed
178            UTIL_IsBigEndianPlatform());    // native endianess
179    }
180
181    TRACE0("<<DAUDIO_GetFormats\n");
182}
183
184
185// =======================================
186// Source/Target DataLine functions implementation
187
188// ====
189/* 1writer-1reader ring buffer class with flush() support */
190class RingBuffer {
191public:
192    RingBuffer() : pBuffer(NULL), nBufferSize(0) {
193        pthread_mutex_init(&lockMutex, NULL);
194    }
195    ~RingBuffer() {
196        Deallocate();
197        pthread_mutex_destroy(&lockMutex);
198    }
199
200    // extraBytes: number of additionally allocated bytes to prevent data
201    // overlapping when almost whole buffer is filled
202    // (required only if Write() can override the buffer)
203    bool Allocate(int requestedBufferSize, int extraBytes) {
204        int fullBufferSize = requestedBufferSize + extraBytes;
205        int powerOfTwo = 1;
206        while (powerOfTwo < fullBufferSize) {
207            powerOfTwo <<= 1;
208        }
209        pBuffer = (Byte*)malloc(powerOfTwo);
210        if (pBuffer == NULL) {
211            ERROR0("RingBuffer::Allocate: OUT OF MEMORY\n");
212            return false;
213        }
214
215        nBufferSize = requestedBufferSize;
216        nAllocatedBytes = powerOfTwo;
217        nPosMask = powerOfTwo - 1;
218        nWritePos = 0;
219        nReadPos = 0;
220        nFlushPos = -1;
221
222        TRACE2("RingBuffer::Allocate: OK, bufferSize=%d, allocated:%d\n", nBufferSize, nAllocatedBytes);
223        return true;
224    }
225
226    void Deallocate() {
227        if (pBuffer) {
228            free(pBuffer);
229            pBuffer = NULL;
230            nBufferSize = 0;
231        }
232    }
233
234    inline int GetBufferSize() {
235        return nBufferSize;
236    }
237
238    inline int GetAllocatedSize() {
239        return nAllocatedBytes;
240    }
241
242    // gets number of bytes available for reading
243    int GetValidByteCount() {
244        lock();
245        INT64 result = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
246        unlock();
247        return result > (INT64)nBufferSize ? nBufferSize : (int)result;
248    }
249
250    int Write(void *srcBuffer, int len, bool preventOverflow) {
251        lock();
252        TRACE2("RingBuffer::Write (%d bytes, preventOverflow=%d)\n", len, preventOverflow ? 1 : 0);
253        TRACE2("  writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
254        TRACE2("  readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
255        TRACE2("  flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
256
257        INT64 writePos = nWritePos;
258        if (preventOverflow) {
259            INT64 avail_read = writePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
260            if (avail_read >= (INT64)nBufferSize) {
261                // no space
262                TRACE0("  preventOverlow: OVERFLOW => len = 0;\n");
263                len = 0;
264            } else {
265                int avail_write = nBufferSize - (int)avail_read;
266                if (len > avail_write) {
267                    TRACE2("  preventOverlow: desrease len: %d => %d\n", len, avail_write);
268                    len = avail_write;
269                }
270            }
271        }
272        unlock();
273
274        if (len > 0) {
275
276            write((Byte *)srcBuffer, Pos2Offset(writePos), len);
277
278            lock();
279            TRACE4("--RingBuffer::Write writePos: %lld (%d) => %lld, (%d)\n",
280                (long long)nWritePos, Pos2Offset(nWritePos), (long long)nWritePos + len, Pos2Offset(nWritePos + len));
281            nWritePos += len;
282            unlock();
283        }
284        return len;
285    }
286
287    int Read(void *dstBuffer, int len) {
288        lock();
289        TRACE1("RingBuffer::Read (%d bytes)\n", len);
290        TRACE2("  writePos = %lld (%d)", (long long)nWritePos, Pos2Offset(nWritePos));
291        TRACE2("  readPos=%lld (%d)", (long long)nReadPos, Pos2Offset(nReadPos));
292        TRACE2("  flushPos=%lld (%d)\n", (long long)nFlushPos, Pos2Offset(nFlushPos));
293
294        applyFlush();
295        INT64 avail_read = nWritePos - nReadPos;
296        // check for overflow
297        if (avail_read > (INT64)nBufferSize) {
298            nReadPos = nWritePos - nBufferSize;
299            avail_read = nBufferSize;
300            TRACE0("  OVERFLOW\n");
301        }
302        INT64 readPos = nReadPos;
303        unlock();
304
305        if (len > (int)avail_read) {
306            TRACE2("  RingBuffer::Read - don't have enough data, len: %d => %d\n", len, (int)avail_read);
307            len = (int)avail_read;
308        }
309
310        if (len > 0) {
311
312            read((Byte *)dstBuffer, Pos2Offset(readPos), len);
313
314            lock();
315            if (applyFlush()) {
316                // just got flush(), results became obsolete
317                TRACE0("--RingBuffer::Read, got Flush, return 0\n");
318                len = 0;
319            } else {
320                TRACE4("--RingBuffer::Read readPos: %lld (%d) => %lld (%d)\n",
321                    (long long)nReadPos, Pos2Offset(nReadPos), (long long)nReadPos + len, Pos2Offset(nReadPos + len));
322                nReadPos += len;
323            }
324            unlock();
325        } else {
326            // underrun!
327        }
328        return len;
329    }
330
331    // returns number of the flushed bytes
332    int Flush() {
333        lock();
334        INT64 flushedBytes = nWritePos - (nFlushPos >= 0 ? nFlushPos : nReadPos);
335        nFlushPos = nWritePos;
336        unlock();
337        return flushedBytes > (INT64)nBufferSize ? nBufferSize : (int)flushedBytes;
338    }
339
340private:
341    Byte *pBuffer;
342    int nBufferSize;
343    int nAllocatedBytes;
344    INT64 nPosMask;
345
346    pthread_mutex_t lockMutex;
347
348    volatile INT64 nWritePos;
349    volatile INT64 nReadPos;
350    // Flush() sets nFlushPos value to nWritePos;
351    // next Read() sets nReadPos to nFlushPos and resests nFlushPos to -1
352    volatile INT64 nFlushPos;
353
354    inline void lock() {
355        pthread_mutex_lock(&lockMutex);
356    }
357    inline void unlock() {
358        pthread_mutex_unlock(&lockMutex);
359    }
360
361    inline bool applyFlush() {
362        if (nFlushPos >= 0) {
363            nReadPos = nFlushPos;
364            nFlushPos = -1;
365            return true;
366        }
367        return false;
368    }
369
370    inline int Pos2Offset(INT64 pos) {
371        return (int)(pos & nPosMask);
372    }
373
374    void write(Byte *srcBuffer, int dstOffset, int len) {
375        int dstEndOffset = dstOffset + len;
376
377        int lenAfterWrap = dstEndOffset - nAllocatedBytes;
378        if (lenAfterWrap > 0) {
379            // dest.buffer does wrap
380            len = nAllocatedBytes - dstOffset;
381            memcpy(pBuffer+dstOffset, srcBuffer, len);
382            memcpy(pBuffer, srcBuffer+len, lenAfterWrap);
383        } else {
384            // dest.buffer does not wrap
385            memcpy(pBuffer+dstOffset, srcBuffer, len);
386        }
387    }
388
389    void read(Byte *dstBuffer, int srcOffset, int len) {
390        int srcEndOffset = srcOffset + len;
391
392        int lenAfterWrap = srcEndOffset - nAllocatedBytes;
393        if (lenAfterWrap > 0) {
394            // need to unwrap data
395            len = nAllocatedBytes - srcOffset;
396            memcpy(dstBuffer, pBuffer+srcOffset, len);
397            memcpy(dstBuffer+len, pBuffer, lenAfterWrap);
398        } else {
399            // source buffer is not wrapped
400            memcpy(dstBuffer, pBuffer+srcOffset, len);
401        }
402    }
403};
404
405
406class Resampler {
407private:
408    enum {
409        kResamplerEndOfInputData = 1 // error to interrupt conversion (end of input data)
410    };
411public:
412    Resampler() : converter(NULL), outBuffer(NULL) { }
413    ~Resampler() {
414        if (converter != NULL) {
415            AudioConverterDispose(converter);
416        }
417        if (outBuffer != NULL) {
418            free(outBuffer);
419        }
420    }
421
422    // inFormat & outFormat must be interleaved!
423    bool Init(const AudioStreamBasicDescription *inFormat, const AudioStreamBasicDescription *outFormat,
424            int inputBufferSizeInBytes)
425    {
426        TRACE0(">>Resampler::Init\n");
427        TRACE0("  inFormat: ");
428        PrintStreamDesc(inFormat);
429        TRACE0("  outFormat: ");
430        PrintStreamDesc(outFormat);
431        TRACE1("  inputBufferSize: %d bytes\n", inputBufferSizeInBytes);
432        OSStatus err;
433
434        if ((outFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && outFormat->mChannelsPerFrame != 1) {
435            ERROR0("Resampler::Init ERROR: outFormat is non-interleaved\n");
436            return false;
437        }
438        if ((inFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0 && inFormat->mChannelsPerFrame != 1) {
439            ERROR0("Resampler::Init ERROR: inFormat is non-interleaved\n");
440            return false;
441        }
442
443        memcpy(&asbdIn, inFormat, sizeof(AudioStreamBasicDescription));
444        memcpy(&asbdOut, outFormat, sizeof(AudioStreamBasicDescription));
445
446        err = AudioConverterNew(inFormat, outFormat, &converter);
447
448        if (err || converter == NULL) {
449            OS_ERROR1(err, "Resampler::Init (AudioConverterNew), converter=%p", converter);
450            return false;
451        }
452
453        // allocate buffer for output data
454        int maximumInFrames = inputBufferSizeInBytes / inFormat->mBytesPerFrame;
455        // take into account trailingFrames
456        AudioConverterPrimeInfo primeInfo = {0, 0};
457        UInt32 sizePrime = sizeof(primeInfo);
458        err = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &sizePrime, &primeInfo);
459        if (err) {
460            OS_ERROR0(err, "Resampler::Init (get kAudioConverterPrimeInfo)");
461            // ignore the error
462        } else {
463            // the default primeMethod is kConverterPrimeMethod_Normal, so we need only trailingFrames
464            maximumInFrames += primeInfo.trailingFrames;
465        }
466        float outBufferSizeInFrames = (outFormat->mSampleRate / inFormat->mSampleRate) * ((float)maximumInFrames);
467        // to avoid complex calculation just set outBufferSize as double of the calculated value
468        outBufferSize = (int)outBufferSizeInFrames * outFormat->mBytesPerFrame * 2;
469        // safety check - consider 256 frame as the minimum input buffer
470        int minOutSize = 256 * outFormat->mBytesPerFrame;
471        if (outBufferSize < minOutSize) {
472            outBufferSize = minOutSize;
473        }
474
475        outBuffer = malloc(outBufferSize);
476
477        if (outBuffer == NULL) {
478            ERROR1("Resampler::Init ERROR: malloc failed (%d bytes)\n", outBufferSize);
479            AudioConverterDispose(converter);
480            converter = NULL;
481            return false;
482        }
483
484        TRACE1("  allocated: %d bytes for output buffer\n", outBufferSize);
485
486        TRACE0("<<Resampler::Init: OK\n");
487        return true;
488    }
489
490    // returns size of the internal output buffer
491    int GetOutBufferSize() {
492        return outBufferSize;
493    }
494
495    // process next part of data (writes resampled data to the ringBuffer without overflow check)
496    int Process(void *srcBuffer, int len, RingBuffer *ringBuffer) {
497        int bytesWritten = 0;
498        TRACE2(">>Resampler::Process: %d bytes, converter = %p\n", len, converter);
499        if (converter == NULL) {    // sanity check
500            bytesWritten = ringBuffer->Write(srcBuffer, len, false);
501        } else {
502            InputProcData data;
503            data.pThis = this;
504            data.data = (Byte *)srcBuffer;
505            data.dataSize = len;
506
507            OSStatus err;
508            do {
509                AudioBufferList abl;    // by default it contains 1 AudioBuffer
510                abl.mNumberBuffers = 1;
511                abl.mBuffers[0].mNumberChannels = asbdOut.mChannelsPerFrame;
512                abl.mBuffers[0].mDataByteSize   = outBufferSize;
513                abl.mBuffers[0].mData           = outBuffer;
514
515                UInt32 packets = (UInt32)outBufferSize / asbdOut.mBytesPerPacket;
516
517                TRACE2(">>AudioConverterFillComplexBuffer: request %d packets, provide %d bytes buffer\n",
518                    (int)packets, (int)abl.mBuffers[0].mDataByteSize);
519
520                err = AudioConverterFillComplexBuffer(converter, ConverterInputProc, &data, &packets, &abl, NULL);
521
522                TRACE2("<<AudioConverterFillComplexBuffer: got %d packets (%d bytes)\n",
523                    (int)packets, (int)abl.mBuffers[0].mDataByteSize);
524                if (packets > 0) {
525                    int bytesToWrite = (int)(packets * asbdOut.mBytesPerPacket);
526                    bytesWritten += ringBuffer->Write(abl.mBuffers[0].mData, bytesToWrite, false);
527                }
528
529                // if outputBuffer is small to store all available frames,
530                // we get noErr here. In the case just continue the conversion
531            } while (err == noErr);
532
533            if (err != kResamplerEndOfInputData) {
534                // unexpected error
535                OS_ERROR0(err, "Resampler::Process (AudioConverterFillComplexBuffer)");
536            }
537        }
538        TRACE2("<<Resampler::Process: written %d bytes (converted from %d bytes)\n", bytesWritten, len);
539
540        return bytesWritten;
541    }
542
543    // resets internal bufferes
544    void Discontinue() {
545        TRACE0(">>Resampler::Discontinue\n");
546        if (converter != NULL) {
547            AudioConverterReset(converter);
548        }
549        TRACE0("<<Resampler::Discontinue\n");
550    }
551
552private:
553    AudioConverterRef converter;
554
555    // buffer for output data
556    // note that there is no problem if the buffer is not big enough to store
557    // all converted data - it's only performance issue
558    void *outBuffer;
559    int outBufferSize;
560
561    AudioStreamBasicDescription asbdIn;
562    AudioStreamBasicDescription asbdOut;
563
564    struct InputProcData {
565        Resampler *pThis;
566        Byte *data;     // data == NULL means we handle Discontinue(false)
567        int dataSize;   // == 0 if all data was already provided to the converted of we handle Discontinue(false)
568    };
569
570    static OSStatus ConverterInputProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets,
571            AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
572    {
573        InputProcData *data = (InputProcData *)inUserData;
574
575        TRACE3("  >>ConverterInputProc: requested %d packets, data contains %d bytes (%d packets)\n",
576            (int)*ioNumberDataPackets, (int)data->dataSize, (int)(data->dataSize / data->pThis->asbdIn.mBytesPerPacket));
577        if (data->dataSize == 0) {
578            // already called & provided all input data
579            // interrupt conversion by returning error
580            *ioNumberDataPackets = 0;
581            TRACE0("  <<ConverterInputProc: returns kResamplerEndOfInputData\n");
582            return kResamplerEndOfInputData;
583        }
584
585        ioData->mNumberBuffers = 1;
586        ioData->mBuffers[0].mNumberChannels = data->pThis->asbdIn.mChannelsPerFrame;
587        ioData->mBuffers[0].mDataByteSize   = data->dataSize;
588        ioData->mBuffers[0].mData           = data->data;
589
590        *ioNumberDataPackets = data->dataSize / data->pThis->asbdIn.mBytesPerPacket;
591
592        // all data has been provided to the converter
593        data->dataSize = 0;
594
595        TRACE1("  <<ConverterInputProc: returns %d packets\n", (int)(*ioNumberDataPackets));
596        return noErr;
597    }
598
599};
600
601
602struct OSX_DirectAudioDevice {
603    AudioUnit   audioUnit;
604    RingBuffer  ringBuffer;
605    AudioStreamBasicDescription asbd;
606
607    // only for target lines
608    UInt32      inputBufferSizeInBytes;
609    Resampler   *resampler;
610    // to detect discontinuity (to reset resampler)
611    SInt64      lastWrittenSampleTime;
612
613
614    OSX_DirectAudioDevice() : audioUnit(NULL), asbd(), resampler(NULL), lastWrittenSampleTime(0) {
615    }
616
617    ~OSX_DirectAudioDevice() {
618        if (audioUnit) {
619            AudioComponentInstanceDispose(audioUnit);
620        }
621        if (resampler) {
622            delete resampler;
623        }
624    }
625};
626
627static AudioUnit CreateOutputUnit(AudioDeviceID deviceID, int isSource)
628{
629    OSStatus err;
630    AudioUnit unit;
631
632    AudioComponentDescription desc;
633    desc.componentType         = kAudioUnitType_Output;
634    desc.componentSubType      = (deviceID == 0 && isSource) ? kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
635    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
636    desc.componentFlags        = 0;
637    desc.componentFlagsMask    = 0;
638
639    AudioComponent comp = AudioComponentFindNext(NULL, &desc);
640    err = AudioComponentInstanceNew(comp, &unit);
641
642    if (err) {
643        OS_ERROR0(err, "CreateOutputUnit:OpenAComponent");
644        return NULL;
645    }
646
647    if (!isSource) {
648        int enableIO = 0;
649        err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
650                                    0, &enableIO, sizeof(enableIO));
651        if (err) {
652            OS_ERROR0(err, "SetProperty (output EnableIO)");
653        }
654        enableIO = 1;
655        err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
656                                    1, &enableIO, sizeof(enableIO));
657        if (err) {
658            OS_ERROR0(err, "SetProperty (input EnableIO)");
659        }
660
661        if (!deviceID) {
662            // get real AudioDeviceID for default input device (macosx current input device)
663            deviceID = GetDefaultDevice(isSource);
664            if (!deviceID) {
665                AudioComponentInstanceDispose(unit);
666                return NULL;
667            }
668        }
669    }
670
671    if (deviceID) {
672        err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
673                                    0, &deviceID, sizeof(deviceID));
674        if (err) {
675            OS_ERROR0(err, "SetProperty (CurrentDevice)");
676            AudioComponentInstanceDispose(unit);
677            return NULL;
678        }
679    }
680
681    return unit;
682}
683
684static OSStatus OutputCallback(void                         *inRefCon,
685                               AudioUnitRenderActionFlags   *ioActionFlags,
686                               const AudioTimeStamp         *inTimeStamp,
687                               UInt32                       inBusNumber,
688                               UInt32                       inNumberFrames,
689                               AudioBufferList              *ioData)
690{
691    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
692
693    int nchannels = ioData->mNumberBuffers; // should be always == 1 (interleaved channels)
694    AudioBuffer *audioBuffer = ioData->mBuffers;
695
696    TRACE3(">>OutputCallback: busNum=%d, requested %d frames (%d bytes)\n",
697        (int)inBusNumber, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
698    TRACE3("  abl: %d buffers, buffer[0].channels=%d, buffer.size=%d\n",
699        nchannels, (int)audioBuffer->mNumberChannels, (int)audioBuffer->mDataByteSize);
700
701    int bytesToRead = inNumberFrames * device->asbd.mBytesPerFrame;
702    if (bytesToRead > (int)audioBuffer->mDataByteSize) {
703        TRACE0("--OutputCallback: !!! audioBuffer IS TOO SMALL!!!\n");
704        bytesToRead = audioBuffer->mDataByteSize / device->asbd.mBytesPerFrame * device->asbd.mBytesPerFrame;
705    }
706    int bytesRead = device->ringBuffer.Read(audioBuffer->mData, bytesToRead);
707    if (bytesRead < bytesToRead) {
708        // no enough data (underrun)
709        TRACE2("--OutputCallback: !!! UNDERRUN (read %d bytes of %d)!!!\n", bytesRead, bytesToRead);
710        // silence the rest
711        memset((Byte*)audioBuffer->mData + bytesRead, 0, bytesToRead-bytesRead);
712        bytesRead = bytesToRead;
713    }
714
715    audioBuffer->mDataByteSize = (UInt32)bytesRead;
716    // SAFETY: set mDataByteSize for all other AudioBuffer in the AudioBufferList to zero
717    while (--nchannels > 0) {
718        audioBuffer++;
719        audioBuffer->mDataByteSize = 0;
720    }
721    TRACE1("<<OutputCallback (returns %d)\n", bytesRead);
722
723    return noErr;
724}
725
726static OSStatus InputCallback(void                          *inRefCon,
727                              AudioUnitRenderActionFlags    *ioActionFlags,
728                              const AudioTimeStamp          *inTimeStamp,
729                              UInt32                        inBusNumber,
730                              UInt32                        inNumberFrames,
731                              AudioBufferList               *ioData)
732{
733    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)inRefCon;
734
735    TRACE4(">>InputCallback: busNum=%d, timeStamp=%lld, %d frames (%d bytes)\n",
736        (int)inBusNumber, (long long)inTimeStamp->mSampleTime, (int)inNumberFrames, (int)(inNumberFrames * device->asbd.mBytesPerFrame));
737
738    AudioBufferList abl;    // by default it contains 1 AudioBuffer
739    abl.mNumberBuffers = 1;
740    abl.mBuffers[0].mNumberChannels = device->asbd.mChannelsPerFrame;
741    abl.mBuffers[0].mDataByteSize   = device->inputBufferSizeInBytes;   // assume this is == (inNumberFrames * device->asbd.mBytesPerFrame)
742    abl.mBuffers[0].mData           = NULL;     // request for the audioUnit's buffer
743
744    OSStatus err = AudioUnitRender(device->audioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &abl);
745    if (err) {
746        OS_ERROR0(err, "<<InputCallback: AudioUnitRender");
747    } else {
748        if (device->resampler != NULL) {
749            // test for discontinuity
750            // AUHAL starts timestamps at zero, so test if the current timestamp less then the last written
751            SInt64 sampleTime = inTimeStamp->mSampleTime;
752            if (sampleTime < device->lastWrittenSampleTime) {
753                // discontinuity, reset the resampler
754                TRACE2("  InputCallback (RESAMPLED), DISCONTINUITY (%f -> %f)\n",
755                    (float)device->lastWrittenSampleTime, (float)sampleTime);
756
757                device->resampler->Discontinue();
758            } else {
759                TRACE2("  InputCallback (RESAMPLED), continuous: lastWrittenSampleTime = %f, sampleTime=%f\n",
760                    (float)device->lastWrittenSampleTime, (float)sampleTime);
761            }
762            device->lastWrittenSampleTime = sampleTime + inNumberFrames;
763
764            int bytesWritten = device->resampler->Process(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, &device->ringBuffer);
765            TRACE2("<<InputCallback (RESAMPLED, saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
766        } else {
767            int bytesWritten = device->ringBuffer.Write(abl.mBuffers[0].mData, (int)abl.mBuffers[0].mDataByteSize, false);
768            TRACE2("<<InputCallback (saved %d bytes of %d)\n", bytesWritten, (int)abl.mBuffers[0].mDataByteSize);
769        }
770    }
771
772    return noErr;
773}
774
775
776static void FillASBDForNonInterleavedPCM(AudioStreamBasicDescription& asbd,
777    float sampleRate, int channels, int sampleSizeInBits, bool isFloat, int isSigned, bool isBigEndian)
778{
779    // FillOutASBDForLPCM cannot produce unsigned integer format
780    asbd.mSampleRate = sampleRate;
781    asbd.mFormatID = kAudioFormatLinearPCM;
782    asbd.mFormatFlags = (isFloat ? kAudioFormatFlagIsFloat : (isSigned ? kAudioFormatFlagIsSignedInteger : 0))
783        | (isBigEndian ? (kAudioFormatFlagIsBigEndian) : 0)
784        | kAudioFormatFlagIsPacked;
785    asbd.mBytesPerPacket = channels * ((sampleSizeInBits + 7) / 8);
786    asbd.mFramesPerPacket = 1;
787    asbd.mBytesPerFrame = asbd.mBytesPerPacket;
788    asbd.mChannelsPerFrame = channels;
789    asbd.mBitsPerChannel = sampleSizeInBits;
790}
791
792void* DAUDIO_Open(INT32 mixerIndex, INT32 deviceID, int isSource,
793                  int encoding, float sampleRate, int sampleSizeInBits,
794                  int frameSize, int channels,
795                  int isSigned, int isBigEndian, int bufferSizeInBytes)
796{
797    TRACE3(">>DAUDIO_Open: mixerIndex=%d deviceID=0x%x isSource=%d\n", (int)mixerIndex, (unsigned int)deviceID, isSource);
798    TRACE3("  sampleRate=%d sampleSizeInBits=%d channels=%d\n", (int)sampleRate, sampleSizeInBits, channels);
799#ifdef USE_TRACE
800    {
801        AudioDeviceID audioDeviceID = deviceID;
802        if (audioDeviceID == 0) {
803            // default device
804            audioDeviceID = GetDefaultDevice(isSource);
805        }
806        char name[256];
807        OSStatus err = GetAudioObjectProperty(audioDeviceID, kAudioUnitScope_Global, kAudioDevicePropertyDeviceName, 256, &name, 0);
808        if (err != noErr) {
809            OS_ERROR1(err, "  audioDeviceID=0x%x, name is N/A:", (int)audioDeviceID);
810        } else {
811            TRACE2("  audioDeviceID=0x%x, name=%s\n", (int)audioDeviceID, name);
812        }
813    }
814#endif
815
816    if (encoding != DAUDIO_PCM) {
817        ERROR1("<<DAUDIO_Open: ERROR: unsupported encoding (%d)\n", encoding);
818        return NULL;
819    }
820    if (channels <= 0) {
821        ERROR1("<<DAUDIO_Open: ERROR: Invalid number of channels=%d!\n", channels);
822        return NULL;
823    }
824
825    OSX_DirectAudioDevice *device = new OSX_DirectAudioDevice();
826
827    AudioUnitScope scope = isSource ? kAudioUnitScope_Input : kAudioUnitScope_Output;
828    int element = isSource ? 0 : 1;
829    OSStatus err = noErr;
830    int extraBufferBytes = 0;
831
832    device->audioUnit = CreateOutputUnit(deviceID, isSource);
833
834    if (!device->audioUnit) {
835        delete device;
836        return NULL;
837    }
838
839    if (!isSource) {
840        AudioDeviceID actualDeviceID = deviceID != 0 ? deviceID : GetDefaultDevice(isSource);
841        float hardwareSampleRate = GetSampleRate(actualDeviceID, isSource);
842        TRACE2("--DAUDIO_Open: sampleRate = %f, hardwareSampleRate=%f\n", sampleRate, hardwareSampleRate);
843
844        if (fabs(sampleRate - hardwareSampleRate) > 1) {
845            device->resampler = new Resampler();
846
847            // request HAL for Float32 with native endianess
848            FillASBDForNonInterleavedPCM(device->asbd, hardwareSampleRate, channels, 32, true, false, kAudioFormatFlagsNativeEndian != 0);
849        } else {
850            sampleRate = hardwareSampleRate;    // in case sample rates are not exactly equal
851        }
852    }
853
854    if (device->resampler == NULL) {
855        // no resampling, request HAL for the requested format
856        FillASBDForNonInterleavedPCM(device->asbd, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
857    }
858
859    err = AudioUnitSetProperty(device->audioUnit, kAudioUnitProperty_StreamFormat, scope, element, &device->asbd, sizeof(device->asbd));
860    if (err) {
861        OS_ERROR0(err, "<<DAUDIO_Open set StreamFormat");
862        delete device;
863        return NULL;
864    }
865
866    AURenderCallbackStruct output;
867    output.inputProc       = isSource ? OutputCallback : InputCallback;
868    output.inputProcRefCon = device;
869
870    err = AudioUnitSetProperty(device->audioUnit,
871                                isSource
872                                    ? (AudioUnitPropertyID)kAudioUnitProperty_SetRenderCallback
873                                    : (AudioUnitPropertyID)kAudioOutputUnitProperty_SetInputCallback,
874                                kAudioUnitScope_Global, 0, &output, sizeof(output));
875    if (err) {
876        OS_ERROR0(err, "<<DAUDIO_Open set RenderCallback");
877        delete device;
878        return NULL;
879    }
880
881    err = AudioUnitInitialize(device->audioUnit);
882    if (err) {
883        OS_ERROR0(err, "<<DAUDIO_Open UnitInitialize");
884        delete device;
885        return NULL;
886    }
887
888    if (!isSource) {
889        // for target lines we need extra bytes in the ringBuffer
890        // to prevent collisions when InputCallback overrides data on overflow
891        UInt32 size;
892        OSStatus err;
893
894        size = sizeof(device->inputBufferSizeInBytes);
895        err  = AudioUnitGetProperty(device->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global,
896                                    0, &device->inputBufferSizeInBytes, &size);
897        if (err) {
898            OS_ERROR0(err, "<<DAUDIO_Open (TargetDataLine)GetBufferSize\n");
899            delete device;
900            return NULL;
901        }
902        device->inputBufferSizeInBytes *= device->asbd.mBytesPerFrame;  // convert frames to bytes
903        extraBufferBytes = (int)device->inputBufferSizeInBytes;
904    }
905
906    if (device->resampler != NULL) {
907        // resampler output format is a user requested format (== ringBuffer format)
908        AudioStreamBasicDescription asbdOut; // ringBuffer format
909        FillASBDForNonInterleavedPCM(asbdOut, sampleRate, channels, sampleSizeInBits, false, isSigned, isBigEndian);
910
911        // set resampler input buffer size to the HAL buffer size
912        if (!device->resampler->Init(&device->asbd, &asbdOut, (int)device->inputBufferSizeInBytes)) {
913            ERROR0("<<DAUDIO_Open: resampler.Init() FAILED.\n");
914            delete device;
915            return NULL;
916        }
917        // extra bytes in the ringBuffer (extraBufferBytes) should be equal resampler output buffer size
918        extraBufferBytes = device->resampler->GetOutBufferSize();
919    }
920
921    if (!device->ringBuffer.Allocate(bufferSizeInBytes, extraBufferBytes)) {
922        ERROR0("<<DAUDIO_Open: Ring buffer allocation error\n");
923        delete device;
924        return NULL;
925    }
926
927    TRACE0("<<DAUDIO_Open: OK\n");
928    return device;
929}
930
931int DAUDIO_Start(void* id, int isSource) {
932    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
933    TRACE0("DAUDIO_Start\n");
934
935    OSStatus err = AudioOutputUnitStart(device->audioUnit);
936
937    if (err != noErr) {
938        OS_ERROR0(err, "DAUDIO_Start");
939    }
940
941    return err == noErr ? TRUE : FALSE;
942}
943
944int DAUDIO_Stop(void* id, int isSource) {
945    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
946    TRACE0("DAUDIO_Stop\n");
947
948    OSStatus err = AudioOutputUnitStop(device->audioUnit);
949
950    return err == noErr ? TRUE : FALSE;
951}
952
953void DAUDIO_Close(void* id, int isSource) {
954    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
955    TRACE0("DAUDIO_Close\n");
956
957    delete device;
958}
959
960int DAUDIO_Write(void* id, char* data, int byteSize) {
961    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
962    TRACE1(">>DAUDIO_Write: %d bytes to write\n", byteSize);
963
964    int result = device->ringBuffer.Write(data, byteSize, true);
965
966    TRACE1("<<DAUDIO_Write: %d bytes written\n", result);
967    return result;
968}
969
970int DAUDIO_Read(void* id, char* data, int byteSize) {
971    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
972    TRACE1(">>DAUDIO_Read: %d bytes to read\n", byteSize);
973
974    int result = device->ringBuffer.Read(data, byteSize);
975
976    TRACE1("<<DAUDIO_Read: %d bytes has been read\n", result);
977    return result;
978}
979
980int DAUDIO_GetBufferSize(void* id, int isSource) {
981    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
982
983    int bufferSizeInBytes = device->ringBuffer.GetBufferSize();
984
985    TRACE1("DAUDIO_GetBufferSize returns %d\n", bufferSizeInBytes);
986    return bufferSizeInBytes;
987}
988
989int DAUDIO_StillDraining(void* id, int isSource) {
990    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
991
992    int draining = device->ringBuffer.GetValidByteCount() > 0 ? TRUE : FALSE;
993
994    TRACE1("DAUDIO_StillDraining returns %d\n", draining);
995    return draining;
996}
997
998int DAUDIO_Flush(void* id, int isSource) {
999    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1000    TRACE0("DAUDIO_Flush\n");
1001
1002    device->ringBuffer.Flush();
1003
1004    return TRUE;
1005}
1006
1007int DAUDIO_GetAvailable(void* id, int isSource) {
1008    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1009
1010    int bytesInBuffer = device->ringBuffer.GetValidByteCount();
1011    if (isSource) {
1012        return device->ringBuffer.GetBufferSize() - bytesInBuffer;
1013    } else {
1014        return bytesInBuffer;
1015    }
1016}
1017
1018INT64 DAUDIO_GetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1019    OSX_DirectAudioDevice *device = (OSX_DirectAudioDevice*)id;
1020    INT64 position;
1021
1022    if (isSource) {
1023        position = javaBytePos - device->ringBuffer.GetValidByteCount();
1024    } else {
1025        position = javaBytePos + device->ringBuffer.GetValidByteCount();
1026    }
1027
1028    TRACE2("DAUDIO_GetBytePosition returns %lld (javaBytePos = %lld)\n", (long long)position, (long long)javaBytePos);
1029    return position;
1030}
1031
1032void DAUDIO_SetBytePosition(void* id, int isSource, INT64 javaBytePos) {
1033    // no need javaBytePos (it's available in DAUDIO_GetBytePosition)
1034}
1035
1036int DAUDIO_RequiresServicing(void* id, int isSource) {
1037    return FALSE;
1038}
1039
1040void DAUDIO_Service(void* id, int isSource) {
1041    // unreachable
1042}
1043
1044#endif  // USE_DAUDIO == TRUE
1045