1/*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * Copyright (C) 2013-2014 Apple Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 *     * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 *     * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "config.h"
33#include "SourceBuffer.h"
34
35#if ENABLE(MEDIA_SOURCE)
36
37#include "AudioTrackList.h"
38#include "Event.h"
39#include "ExceptionCodePlaceholder.h"
40#include "GenericEventQueue.h"
41#include "HTMLMediaElement.h"
42#include "InbandTextTrack.h"
43#include "Logging.h"
44#include "MediaDescription.h"
45#include "MediaSample.h"
46#include "MediaSource.h"
47#include "SampleMap.h"
48#include "SourceBufferPrivate.h"
49#include "TextTrackList.h"
50#include "TimeRanges.h"
51#include "VideoTrackList.h"
52#include <limits>
53#include <map>
54#include <runtime/JSCInlines.h>
55#include <runtime/JSLock.h>
56#include <runtime/VM.h>
57#include <wtf/CurrentTime.h>
58#include <wtf/NeverDestroyed.h>
59#if !LOG_DISABLED
60#include <wtf/text/StringBuilder.h>
61#endif
62
63namespace WebCore {
64
65static double ExponentialMovingAverageCoefficient = 0.1;
66
67// Allow hasCurrentTime() to be off by as much as the length of a 24fps video frame
68static const MediaTime& currentTimeFudgeFactor()
69{
70    static NeverDestroyed<MediaTime> fudgeFactor(1, 24);
71    return fudgeFactor;
72}
73
74struct SourceBuffer::TrackBuffer {
75    MediaTime lastDecodeTimestamp;
76    MediaTime lastFrameDuration;
77    MediaTime highestPresentationTimestamp;
78    MediaTime lastEnqueuedPresentationTime;
79    MediaTime lastEnqueuedDecodeEndTime;
80    bool needRandomAccessFlag;
81    bool enabled;
82    bool needsReenqueueing;
83    SampleMap samples;
84    DecodeOrderSampleMap::MapType decodeQueue;
85    RefPtr<MediaDescription> description;
86
87    TrackBuffer()
88        : lastDecodeTimestamp(MediaTime::invalidTime())
89        , lastFrameDuration(MediaTime::invalidTime())
90        , highestPresentationTimestamp(MediaTime::invalidTime())
91        , lastEnqueuedPresentationTime(MediaTime::invalidTime())
92        , lastEnqueuedDecodeEndTime(MediaTime::invalidTime())
93        , needRandomAccessFlag(true)
94        , enabled(false)
95        , needsReenqueueing(false)
96    {
97    }
98};
99
100PassRef<SourceBuffer> SourceBuffer::create(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
101{
102    RefPtr<SourceBuffer> sourceBuffer(adoptRef(new SourceBuffer(WTF::move(sourceBufferPrivate), source)));
103    sourceBuffer->suspendIfNeeded();
104    return sourceBuffer.releaseNonNull();
105}
106
107SourceBuffer::SourceBuffer(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
108    : ActiveDOMObject(source->scriptExecutionContext())
109    , m_private(WTF::move(sourceBufferPrivate))
110    , m_source(source)
111    , m_asyncEventQueue(*this)
112    , m_appendBufferTimer(this, &SourceBuffer::appendBufferTimerFired)
113    , m_highestPresentationEndTimestamp(MediaTime::invalidTime())
114    , m_buffered(TimeRanges::create())
115    , m_appendState(WaitingForSegment)
116    , m_timeOfBufferingMonitor(monotonicallyIncreasingTime())
117    , m_bufferedSinceLastMonitor(0)
118    , m_averageBufferRate(0)
119    , m_reportedExtraMemoryCost(0)
120    , m_pendingRemoveStart(MediaTime::invalidTime())
121    , m_pendingRemoveEnd(MediaTime::invalidTime())
122    , m_removeTimer(this, &SourceBuffer::removeTimerFired)
123    , m_updating(false)
124    , m_receivedFirstInitializationSegment(false)
125    , m_active(false)
126    , m_bufferFull(false)
127{
128    ASSERT(m_source);
129
130    m_private->setClient(this);
131}
132
133SourceBuffer::~SourceBuffer()
134{
135    ASSERT(isRemoved());
136
137    m_private->setClient(0);
138}
139
140PassRefPtr<TimeRanges> SourceBuffer::buffered(ExceptionCode& ec) const
141{
142    // Section 3.1 buffered attribute steps.
143    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
144    // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
145    //    INVALID_STATE_ERR exception and abort these steps.
146    if (isRemoved()) {
147        ec = INVALID_STATE_ERR;
148        return nullptr;
149    }
150
151    // 2. Return a new static normalized TimeRanges object for the media segments buffered.
152    return m_buffered->copy();
153}
154
155const RefPtr<TimeRanges>& SourceBuffer::buffered() const
156{
157    return m_buffered;
158}
159
160double SourceBuffer::timestampOffset() const
161{
162    return m_timestampOffset.toDouble();
163}
164
165void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec)
166{
167    // Section 3.1 timestampOffset attribute setter steps.
168    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
169    // 1. Let new timestamp offset equal the new value being assigned to this attribute.
170    // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an
171    //    INVALID_STATE_ERR exception and abort these steps.
172    // 3. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
173    if (isRemoved() || m_updating) {
174        ec = INVALID_STATE_ERR;
175        return;
176    }
177
178    // 4. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
179    // 4.1 Set the readyState attribute of the parent media source to "open"
180    // 4.2 Queue a task to fire a simple event named sourceopen at the parent media source.
181    m_source->openIfInEndedState();
182
183    // 5. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps.
184    if (m_appendState == ParsingMediaSegment) {
185        ec = INVALID_STATE_ERR;
186        return;
187    }
188
189    // FIXME: Add step 6 text when mode attribute is implemented.
190    // 7. Update the attribute to the new value.
191    m_timestampOffset = MediaTime::createWithDouble(offset);
192}
193
194void SourceBuffer::appendBuffer(PassRefPtr<ArrayBuffer> data, ExceptionCode& ec)
195{
196    // Section 3.2 appendBuffer()
197    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
198    // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps.
199    if (!data) {
200        ec = INVALID_ACCESS_ERR;
201        return;
202    }
203
204    appendBufferInternal(static_cast<unsigned char*>(data->data()), data->byteLength(), ec);
205}
206
207void SourceBuffer::appendBuffer(PassRefPtr<ArrayBufferView> data, ExceptionCode& ec)
208{
209    // Section 3.2 appendBuffer()
210    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
211    // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps.
212    if (!data) {
213        ec = INVALID_ACCESS_ERR;
214        return;
215    }
216
217    appendBufferInternal(static_cast<unsigned char*>(data->baseAddress()), data->byteLength(), ec);
218}
219
220void SourceBuffer::abort(ExceptionCode& ec)
221{
222    // Section 3.2 abort() method steps.
223    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void
224    // 1. If this object has been removed from the sourceBuffers attribute of the parent media source
225    //    then throw an INVALID_STATE_ERR exception and abort these steps.
226    // 2. If the readyState attribute of the parent media source is not in the "open" state
227    //    then throw an INVALID_STATE_ERR exception and abort these steps.
228    if (isRemoved() || !m_source->isOpen()) {
229        ec = INVALID_STATE_ERR;
230        return;
231    }
232
233    // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
234    abortIfUpdating();
235
236    // 4. Run the reset parser state algorithm.
237    m_private->abort();
238
239    // FIXME(229408) Add steps 5-6 update appendWindowStart & appendWindowEnd.
240}
241
242void SourceBuffer::remove(double start, double end, ExceptionCode& ec)
243{
244    LOG(MediaSource, "SourceBuffer::remove(%p) - start(%lf), end(%lf)", this, start, end);
245
246    // Section 3.2 remove() method steps.
247    // 1. If start is negative or greater than duration, then throw an InvalidAccessError exception and abort these steps.
248    // 2. If end is less than or equal to start, then throw an InvalidAccessError exception and abort these steps.
249    if (start < 0 || (m_source && (std::isnan(m_source->duration()) || start > m_source->duration())) || end <= start) {
250        ec = INVALID_ACCESS_ERR;
251        return;
252    }
253
254    // 3. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
255    //    InvalidStateError exception and abort these steps.
256    // 4. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
257    if (isRemoved() || m_updating) {
258        ec = INVALID_STATE_ERR;
259        return;
260    }
261
262    // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
263    // 5.1. Set the readyState attribute of the parent media source to "open"
264    // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source .
265    m_source->openIfInEndedState();
266
267    // 6. Set the updating attribute to true.
268    m_updating = true;
269
270    // 7. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
271    scheduleEvent(eventNames().updatestartEvent);
272
273    // 8. Return control to the caller and run the rest of the steps asynchronously.
274    m_pendingRemoveStart = MediaTime::createWithDouble(start);
275    m_pendingRemoveEnd = MediaTime::createWithDouble(end);
276    m_removeTimer.startOneShot(0);
277}
278
279void SourceBuffer::abortIfUpdating()
280{
281    // Section 3.2 abort() method step 3 substeps.
282    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void
283
284    if (!m_updating)
285        return;
286
287    // 3.1. Abort the buffer append and stream append loop algorithms if they are running.
288    m_appendBufferTimer.stop();
289    m_pendingAppendData.clear();
290
291    m_removeTimer.stop();
292    m_pendingRemoveStart = MediaTime::invalidTime();
293    m_pendingRemoveEnd = MediaTime::invalidTime();
294
295    // 3.2. Set the updating attribute to false.
296    m_updating = false;
297
298    // 3.3. Queue a task to fire a simple event named abort at this SourceBuffer object.
299    scheduleEvent(eventNames().abortEvent);
300
301    // 3.4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
302    scheduleEvent(eventNames().updateendEvent);
303}
304
305void SourceBuffer::removedFromMediaSource()
306{
307    if (isRemoved())
308        return;
309
310    abortIfUpdating();
311
312    for (auto& trackBufferPair : m_trackBufferMap.values()) {
313        trackBufferPair.samples.clear();
314        trackBufferPair.decodeQueue.clear();
315    }
316
317    m_private->removedFromMediaSource();
318    m_source = 0;
319}
320
321void SourceBuffer::seekToTime(const MediaTime& time)
322{
323    LOG(MediaSource, "SourceBuffer::seekToTime(%p) - time(%s)", this, toString(time).utf8().data());
324
325    for (auto& trackBufferPair : m_trackBufferMap) {
326        TrackBuffer& trackBuffer = trackBufferPair.value;
327        const AtomicString& trackID = trackBufferPair.key;
328
329        trackBuffer.needsReenqueueing = true;
330        reenqueueMediaForTime(trackBuffer, trackID, time);
331    }
332}
333
334MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold)
335{
336    MediaTime seekTime = targetTime;
337    MediaTime lowerBoundTime = targetTime - negativeThreshold;
338    MediaTime upperBoundTime = targetTime + positiveThreshold;
339
340    for (auto& trackBuffer : m_trackBufferMap.values()) {
341        // Find the sample which contains the target time time.
342        auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold);
343        auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold);
344        auto upperBound = trackBuffer.samples.decodeOrder().end();
345        auto lowerBound = trackBuffer.samples.decodeOrder().rend();
346
347        if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound)
348            continue;
349
350        MediaTime futureSeekTime = MediaTime::positiveInfiniteTime();
351        if (futureSyncSampleIterator != upperBound) {
352            RefPtr<MediaSample>& sample = futureSyncSampleIterator->second;
353            futureSeekTime = sample->presentationTime();
354        }
355
356        MediaTime pastSeekTime = MediaTime::negativeInfiniteTime();
357        if (pastSyncSampleIterator != lowerBound) {
358            RefPtr<MediaSample>& sample = pastSyncSampleIterator->second;
359            pastSeekTime = sample->presentationTime();
360        }
361
362        MediaTime trackSeekTime = abs(targetTime - futureSeekTime) < abs(targetTime - pastSeekTime) ? futureSeekTime : pastSeekTime;
363        if (abs(targetTime - trackSeekTime) > abs(targetTime - seekTime))
364            seekTime = trackSeekTime;
365    }
366
367    return seekTime;
368}
369
370bool SourceBuffer::hasPendingActivity() const
371{
372    return m_source || m_asyncEventQueue.hasPendingEvents();
373}
374
375void SourceBuffer::stop()
376{
377    m_appendBufferTimer.stop();
378    m_removeTimer.stop();
379}
380
381bool SourceBuffer::isRemoved() const
382{
383    return !m_source;
384}
385
386void SourceBuffer::scheduleEvent(const AtomicString& eventName)
387{
388    RefPtr<Event> event = Event::create(eventName, false, false);
389    event->setTarget(this);
390
391    m_asyncEventQueue.enqueueEvent(event.release());
392}
393
394void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, ExceptionCode& ec)
395{
396    // Section 3.2 appendBuffer()
397    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
398
399    // Step 1 is enforced by the caller.
400    // 2. Run the prepare append algorithm.
401    // Section 3.5.4 Prepare AppendAlgorithm
402
403    // 1. If the SourceBuffer has been removed from the sourceBuffers attribute of the parent media source
404    // then throw an INVALID_STATE_ERR exception and abort these steps.
405    // 2. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
406    if (isRemoved() || m_updating) {
407        ec = INVALID_STATE_ERR;
408        return;
409    }
410
411    // 3. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
412    // 3.1. Set the readyState attribute of the parent media source to "open"
413    // 3.2. Queue a task to fire a simple event named sourceopen at the parent media source .
414    m_source->openIfInEndedState();
415
416    // 4. Run the coded frame eviction algorithm.
417    evictCodedFrames(size);
418
419    // FIXME: enable this code when MSE libraries have been updated to support it.
420#if 0
421    // 5. If the buffer full flag equals true, then throw a QUOTA_EXCEEDED_ERR exception and abort these step.
422    if (m_bufferFull) {
423        LOG(MediaSource, "SourceBuffer::appendBufferInternal(%p) -  buffer full, failing with QUOTA_EXCEEDED_ERR error", this);
424        ec = QUOTA_EXCEEDED_ERR;
425        return;
426    }
427#endif
428
429    // NOTE: Return to 3.2 appendBuffer()
430    // 3. Add data to the end of the input buffer.
431    m_pendingAppendData.append(data, size);
432
433    // 4. Set the updating attribute to true.
434    m_updating = true;
435
436    // 5. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
437    scheduleEvent(eventNames().updatestartEvent);
438
439    // 6. Asynchronously run the buffer append algorithm.
440    m_appendBufferTimer.startOneShot(0);
441
442    reportExtraMemoryCost();
443}
444
445void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&)
446{
447    if (isRemoved())
448        return;
449
450    ASSERT(m_updating);
451
452    // Section 3.5.5 Buffer Append Algorithm
453    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
454
455    // 1. Run the segment parser loop algorithm.
456    size_t appendSize = m_pendingAppendData.size();
457    if (!appendSize) {
458        // Resize buffer for 0 byte appends so we always have a valid pointer.
459        // We need to convey all appends, even 0 byte ones to |m_private| so
460        // that it can clear its end of stream state if necessary.
461        m_pendingAppendData.resize(1);
462    }
463
464    // Section 3.5.1 Segment Parser Loop
465    // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-segment-parser-loop
466    // When the segment parser loop algorithm is invoked, run the following steps:
467
468    // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below.
469    if (!m_pendingAppendData.size()) {
470        sourceBufferPrivateAppendComplete(&m_private.get(), AppendSucceeded);
471        return;
472    }
473
474    m_private->append(m_pendingAppendData.data(), appendSize);
475    m_pendingAppendData.clear();
476}
477
478void SourceBuffer::sourceBufferPrivateAppendComplete(SourceBufferPrivate*, AppendResult result)
479{
480    if (isRemoved())
481        return;
482
483    // Section 3.5.5 Buffer Append Algorithm, ctd.
484    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
485
486    // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification,
487    // then run the end of stream algorithm with the error parameter set to "decode" and abort this algorithm.
488    if (result == ParsingFailed) {
489        m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
490        return;
491    }
492
493    // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and
494    // sourceBufferPrivateDidReceiveSample below.
495
496    // 7. Need more data: Return control to the calling algorithm.
497
498    // NOTE: return to Section 3.5.5
499    // 2.If the segment parser loop algorithm in the previous step was aborted, then abort this algorithm.
500    if (result != AppendSucceeded)
501        return;
502
503    // 3. Set the updating attribute to false.
504    m_updating = false;
505
506    // 4. Queue a task to fire a simple event named update at this SourceBuffer object.
507    scheduleEvent(eventNames().updateEvent);
508
509    // 5. Queue a task to fire a simple event named updateend at this SourceBuffer object.
510    scheduleEvent(eventNames().updateendEvent);
511
512    if (m_source)
513        m_source->monitorSourceBuffers();
514
515    MediaTime currentMediaTime = MediaTime::createWithDouble(m_source->currentTime());
516    for (auto& trackBufferPair : m_trackBufferMap) {
517        TrackBuffer& trackBuffer = trackBufferPair.value;
518        const AtomicString& trackID = trackBufferPair.key;
519
520        if (trackBuffer.needsReenqueueing) {
521            LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - reenqueuing at time (%s)", this, toString(currentMediaTime).utf8().data());
522            reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime);
523        } else
524            provideMediaData(trackBuffer, trackID);
525    }
526
527    reportExtraMemoryCost();
528    if (extraMemoryCost() > this->maximumBufferSize())
529        m_bufferFull = true;
530
531    LOG(Media, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
532}
533
534void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(SourceBufferPrivate*, int)
535{
536    if (!isRemoved())
537        m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
538}
539
540static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b)
541{
542    return a.second->decodeTime() < b.second->decodeTime();
543}
544
545static PassRefPtr<TimeRanges> removeSamplesFromTrackBuffer(const DecodeOrderSampleMap::MapType& samples, SourceBuffer::TrackBuffer& trackBuffer, const SourceBuffer* buffer, const char* logPrefix)
546{
547#if !LOG_DISABLED
548    double earliestSample = std::numeric_limits<double>::infinity();
549    double latestSample = 0;
550    size_t bytesRemoved = 0;
551#else
552    UNUSED_PARAM(logPrefix);
553    UNUSED_PARAM(buffer);
554#endif
555
556    RefPtr<TimeRanges> erasedRanges = TimeRanges::create();
557    MediaTime microsecond(1, 1000000);
558    for (auto sampleIt : samples) {
559        const DecodeOrderSampleMap::KeyType& decodeKey = sampleIt.first;
560#if !LOG_DISABLED
561        size_t startBufferSize = trackBuffer.samples.sizeInBytes();
562#endif
563
564        RefPtr<MediaSample>& sample = sampleIt.second;
565        LOG(MediaSource, "SourceBuffer::%s(%p) - removing sample(%s)", logPrefix, buffer, toString(*sampleIt.second).utf8().data());
566
567        // Remove the erased samples from the TrackBuffer sample map.
568        trackBuffer.samples.removeSample(sample.get());
569
570        // Also remove the erased samples from the TrackBuffer decodeQueue.
571        trackBuffer.decodeQueue.erase(decodeKey);
572
573        double startTime = sample->presentationTime().toDouble();
574        double endTime = startTime + (sample->duration() + microsecond).toDouble();
575        erasedRanges->add(startTime, endTime);
576
577#if !LOG_DISABLED
578        bytesRemoved += startBufferSize - trackBuffer.samples.sizeInBytes();
579        if (startTime < earliestSample)
580            earliestSample = startTime;
581        if (endTime > latestSample)
582            latestSample = endTime;
583#endif
584    }
585
586#if !LOG_DISABLED
587    if (bytesRemoved)
588        LOG(MediaSource, "SourceBuffer::%s(%p) removed %zu bytes, start(%lf), end(%lf)", logPrefix, buffer, bytesRemoved, earliestSample, latestSample);
589#endif
590
591    return erasedRanges.release();
592}
593
594void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end)
595{
596    LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data());
597
598    // 3.5.9 Coded Frame Removal Algorithm
599    // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-coded-frame-removal
600
601    // 1. Let start be the starting presentation timestamp for the removal range.
602    MediaTime durationMediaTime = MediaTime::createWithDouble(m_source->duration());
603    MediaTime currentMediaTime = MediaTime::createWithDouble(m_source->currentTime());
604
605    // 2. Let end be the end presentation timestamp for the removal range.
606    // 3. For each track buffer in this source buffer, run the following steps:
607    for (auto& iter : m_trackBufferMap) {
608        TrackBuffer& trackBuffer = iter.value;
609
610        // 3.1. Let remove end timestamp be the current value of duration
611        // 3.2 If this track buffer has a random access point timestamp that is greater than or equal to end, then update
612        // remove end timestamp to that random access point timestamp.
613        // NOTE: findSyncSampleAfterPresentationTime will return the next sync sample on or after the presentation time
614        // or decodeOrder().end() if no sync sample exists after that presentation time.
615        DecodeOrderSampleMap::iterator removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(end);
616        PresentationOrderSampleMap::iterator removePresentationEnd;
617        if (removeDecodeEnd == trackBuffer.samples.decodeOrder().end())
618            removePresentationEnd = trackBuffer.samples.presentationOrder().end();
619        else
620            removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleWithPresentationTime(removeDecodeEnd->second->presentationTime());
621
622        PresentationOrderSampleMap::iterator removePresentationStart = trackBuffer.samples.presentationOrder().findSampleOnOrAfterPresentationTime(start);
623        if (removePresentationStart == removePresentationEnd)
624            continue;
625
626        // 3.3 Remove all media data, from this track buffer, that contain starting timestamps greater than or equal to
627        // start and less than the remove end timestamp.
628        // NOTE: frames must be removed in decode order, so that all dependant frames between the frame to be removed
629        // and the next sync sample frame are removed. But we must start from the first sample in decode order, not
630        // presentation order.
631        PresentationOrderSampleMap::iterator minDecodeTimeIter = std::min_element(removePresentationStart, removePresentationEnd, decodeTimeComparator);
632        DecodeOrderSampleMap::KeyType decodeKey(minDecodeTimeIter->second->decodeTime(), minDecodeTimeIter->second->presentationTime());
633        DecodeOrderSampleMap::iterator removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
634
635        DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd);
636        RefPtr<TimeRanges> erasedRanges = removeSamplesFromTrackBuffer(erasedSamples, trackBuffer, this, "removeCodedFrames");
637
638        // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
639        // not yet displayed samples.
640        if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
641            PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
642            possiblyEnqueuedRanges.intersectWith(erasedRanges->ranges());
643            if (possiblyEnqueuedRanges.length())
644                trackBuffer.needsReenqueueing = true;
645        }
646
647        erasedRanges->invert();
648        m_buffered->intersectWith(*erasedRanges);
649
650        // 3.4 If this object is in activeSourceBuffers, the current playback position is greater than or equal to start
651        // and less than the remove end timestamp, and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set
652        // the HTMLMediaElement.readyState attribute to HAVE_METADATA and stall playback.
653        if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata)
654            m_private->setReadyState(MediaPlayer::HaveMetadata);
655    }
656
657    // 4. If buffer full flag equals true and this object is ready to accept more bytes, then set the buffer full flag to false.
658    // No-op
659
660    LOG(Media, "SourceBuffer::removeCodedFrames(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
661}
662
663void SourceBuffer::removeTimerFired(Timer<SourceBuffer>*)
664{
665    ASSERT(m_updating);
666    ASSERT(m_pendingRemoveStart.isValid());
667    ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd);
668
669    // Section 3.2 remove() method steps
670    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-remove-void-double-start-double-end
671
672    // 9. Run the coded frame removal algorithm with start and end as the start and end of the removal range.
673    removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd);
674
675    // 10. Set the updating attribute to false.
676    m_updating = false;
677    m_pendingRemoveStart = MediaTime::invalidTime();
678    m_pendingRemoveEnd = MediaTime::invalidTime();
679
680    // 11. Queue a task to fire a simple event named update at this SourceBuffer object.
681    scheduleEvent(eventNames().updateEvent);
682
683    // 12. Queue a task to fire a simple event named updateend at this SourceBuffer object.
684    scheduleEvent(eventNames().updateendEvent);
685}
686
687void SourceBuffer::evictCodedFrames(size_t newDataSize)
688{
689    // 3.5.13 Coded Frame Eviction Algorithm
690    // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-eviction
691
692    if (isRemoved())
693        return;
694
695    // This algorithm is run to free up space in this source buffer when new data is appended.
696    // 1. Let new data equal the data that is about to be appended to this SourceBuffer.
697    // 2. If the buffer full flag equals false, then abort these steps.
698    if (!m_bufferFull)
699        return;
700
701    size_t maximumBufferSize = this->maximumBufferSize();
702
703    // 3. Let removal ranges equal a list of presentation time ranges that can be evicted from
704    // the presentation to make room for the new data.
705
706    // NOTE: begin by removing data from the beginning of the buffered ranges, 30 seconds at
707    // a time, up to 30 seconds before currentTime.
708    MediaTime thirtySeconds = MediaTime(30, 1);
709    MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
710    MediaTime maximumRangeEnd = currentTime - thirtySeconds;
711
712#if !LOG_DISABLED
713    LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - currentTime = %lf, require %zu bytes, maximum buffer size is %zu", this, m_source->currentTime(), extraMemoryCost() + newDataSize, maximumBufferSize);
714    size_t initialBufferedSize = extraMemoryCost();
715#endif
716
717    MediaTime rangeStart = MediaTime::zeroTime();
718    MediaTime rangeEnd = rangeStart + thirtySeconds;
719    while (rangeStart < maximumRangeEnd) {
720        // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
721        // end equal to the removal range start and end timestamp respectively.
722        removeCodedFrames(rangeStart, std::min(rangeEnd, maximumRangeEnd));
723        if (extraMemoryCost() + newDataSize < maximumBufferSize) {
724            m_bufferFull = false;
725            break;
726        }
727
728        rangeStart += thirtySeconds;
729        rangeEnd += thirtySeconds;
730    }
731
732    if (!m_bufferFull) {
733        LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes", this, initialBufferedSize - extraMemoryCost());
734        return;
735    }
736
737    // If there still isn't enough free space and there buffers in time ranges after the current range (ie. there is a gap after
738    // the current buffered range), delete 30 seconds at a time from duration back to the current time range or 30 seconds after
739    // currenTime whichever we hit first.
740    auto buffered = m_buffered->ranges();
741    size_t currentTimeRange = buffered.find(currentTime);
742    if (currentTimeRange == notFound || currentTimeRange == buffered.length() - 1) {
743        LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes but FAILED to free enough", this, initialBufferedSize - extraMemoryCost());
744        return;
745    }
746
747    MediaTime minimumRangeStart = currentTime + thirtySeconds;
748
749    rangeEnd = MediaTime::createWithDouble(m_source->duration());
750    rangeStart = rangeEnd - thirtySeconds;
751    while (rangeStart > minimumRangeStart) {
752
753        // Do not evict data from the time range that contains currentTime.
754        size_t startTimeRange = buffered.find(rangeStart);
755        if (startTimeRange == currentTimeRange) {
756            size_t endTimeRange = buffered.find(rangeEnd);
757            if (endTimeRange == currentTimeRange)
758                break;
759
760            rangeEnd = buffered.start(endTimeRange);
761        }
762
763        // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
764        // end equal to the removal range start and end timestamp respectively.
765        removeCodedFrames(std::max(minimumRangeStart, rangeStart), rangeEnd);
766        if (extraMemoryCost() + newDataSize < maximumBufferSize) {
767            m_bufferFull = false;
768            break;
769        }
770
771        rangeStart -= thirtySeconds;
772        rangeEnd -= thirtySeconds;
773    }
774
775    LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes%s", this, initialBufferedSize - extraMemoryCost(), m_bufferFull ? "" : " but FAILED to free enough");
776}
777
778size_t SourceBuffer::maximumBufferSize() const
779{
780    if (isRemoved())
781        return 0;
782
783    HTMLMediaElement* element = m_source->mediaElement();
784    if (!element)
785        return 0;
786
787    return element->maximumSourceBufferSize(*this);
788}
789
790const AtomicString& SourceBuffer::decodeError()
791{
792    static NeverDestroyed<AtomicString> decode("decode", AtomicString::ConstructFromLiteral);
793    return decode;
794}
795
796const AtomicString& SourceBuffer::networkError()
797{
798    static NeverDestroyed<AtomicString> network("network", AtomicString::ConstructFromLiteral);
799    return network;
800}
801
802VideoTrackList* SourceBuffer::videoTracks()
803{
804    if (!m_source || !m_source->mediaElement())
805        return nullptr;
806
807    if (!m_videoTracks)
808        m_videoTracks = VideoTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
809
810    return m_videoTracks.get();
811}
812
813AudioTrackList* SourceBuffer::audioTracks()
814{
815    if (!m_source || !m_source->mediaElement())
816        return nullptr;
817
818    if (!m_audioTracks)
819        m_audioTracks = AudioTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
820
821    return m_audioTracks.get();
822}
823
824TextTrackList* SourceBuffer::textTracks()
825{
826    if (!m_source || !m_source->mediaElement())
827        return nullptr;
828
829    if (!m_textTracks)
830        m_textTracks = TextTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
831
832    return m_textTracks.get();
833}
834
835void SourceBuffer::setActive(bool active)
836{
837    if (m_active == active)
838        return;
839
840    m_active = active;
841    m_private->setActive(active);
842    if (!isRemoved())
843        m_source->sourceBufferDidChangeAcitveState(this, active);
844}
845
846void SourceBuffer::sourceBufferPrivateDidEndStream(SourceBufferPrivate*, const WTF::AtomicString& error)
847{
848    if (!isRemoved())
849        m_source->streamEndedWithError(error, IgnorableExceptionCode());
850}
851
852void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment& segment)
853{
854    if (isRemoved())
855        return;
856
857    // 3.5.7 Initialization Segment Received
858    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received
859    // 1. Update the duration attribute if it currently equals NaN:
860    if (std::isnan(m_source->duration())) {
861        // ↳ If the initialization segment contains a duration:
862        //   Run the duration change algorithm with new duration set to the duration in the initialization segment.
863        // ↳ Otherwise:
864        //   Run the duration change algorithm with new duration set to positive Infinity.
865        MediaTime newDuration = segment.duration.isValid() ? segment.duration : MediaTime::positiveInfiniteTime();
866        m_source->setDurationInternal(newDuration.toDouble());
867    }
868
869    // 2. If the initialization segment has no audio, video, or text tracks, then run the end of stream
870    // algorithm with the error parameter set to "decode" and abort these steps.
871    if (!segment.audioTracks.size() && !segment.videoTracks.size() && !segment.textTracks.size())
872        m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
873
874
875    // 3. If the first initialization segment flag is true, then run the following steps:
876    if (m_receivedFirstInitializationSegment) {
877        if (!validateInitializationSegment(segment)) {
878            m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
879            return;
880        }
881        // 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers.
882        ASSERT(segment.audioTracks.size() == audioTracks()->length());
883        for (auto& audioTrackInfo : segment.audioTracks) {
884            if (audioTracks()->length() == 1) {
885                audioTracks()->item(0)->setPrivate(audioTrackInfo.track);
886                break;
887            }
888
889            auto audioTrack = audioTracks()->getTrackById(audioTrackInfo.track->id());
890            ASSERT(audioTrack);
891            audioTrack->setPrivate(audioTrackInfo.track);
892        }
893
894        ASSERT(segment.videoTracks.size() == videoTracks()->length());
895        for (auto& videoTrackInfo : segment.videoTracks) {
896            if (videoTracks()->length() == 1) {
897                videoTracks()->item(0)->setPrivate(videoTrackInfo.track);
898                break;
899            }
900
901            auto videoTrack = videoTracks()->getTrackById(videoTrackInfo.track->id());
902            ASSERT(videoTrack);
903            videoTrack->setPrivate(videoTrackInfo.track);
904        }
905
906        ASSERT(segment.textTracks.size() == textTracks()->length());
907        for (auto& textTrackInfo : segment.textTracks) {
908            if (textTracks()->length() == 1) {
909                toInbandTextTrack(textTracks()->item(0))->setPrivate(textTrackInfo.track);
910                break;
911            }
912
913            auto textTrack = textTracks()->getTrackById(textTrackInfo.track->id());
914            ASSERT(textTrack);
915            toInbandTextTrack(textTrack)->setPrivate(textTrackInfo.track);
916        }
917
918        for (auto& trackBuffer : m_trackBufferMap.values())
919            trackBuffer.needRandomAccessFlag = true;
920    }
921
922    // 4. Let active track flag equal false.
923    bool activeTrackFlag = false;
924
925    // 5. If the first initialization segment flag is false, then run the following steps:
926    if (!m_receivedFirstInitializationSegment) {
927        // 5.1 If the initialization segment contains tracks with codecs the user agent does not support,
928        // then run the end of stream algorithm with the error parameter set to "decode" and abort these steps.
929        // NOTE: This check is the responsibility of the SourceBufferPrivate.
930
931        // 5.2 For each audio track in the initialization segment, run following steps:
932        for (auto& audioTrackInfo : segment.audioTracks) {
933            AudioTrackPrivate* audioTrackPrivate = audioTrackInfo.track.get();
934
935            // 5.2.1 Let new audio track be a new AudioTrack object.
936            // 5.2.2 Generate a unique ID and assign it to the id property on new video track.
937            RefPtr<AudioTrack> newAudioTrack = AudioTrack::create(this, audioTrackPrivate);
938            newAudioTrack->setSourceBuffer(this);
939
940            // 5.2.3 If audioTracks.length equals 0, then run the following steps:
941            if (!audioTracks()->length()) {
942                // 5.2.3.1 Set the enabled property on new audio track to true.
943                newAudioTrack->setEnabled(true);
944
945                // 5.2.3.2 Set active track flag to true.
946                activeTrackFlag = true;
947            }
948
949            // 5.2.4 Add new audio track to the audioTracks attribute on this SourceBuffer object.
950            // 5.2.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
951            // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
952            // referenced by the audioTracks attribute on this SourceBuffer object.
953            audioTracks()->append(newAudioTrack);
954
955            // 5.2.6 Add new audio track to the audioTracks attribute on the HTMLMediaElement.
956            // 5.2.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
957            // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
958            // referenced by the audioTracks attribute on the HTMLMediaElement.
959            m_source->mediaElement()->audioTracks()->append(newAudioTrack);
960
961            // 5.2.8 Create a new track buffer to store coded frames for this track.
962            ASSERT(!m_trackBufferMap.contains(newAudioTrack->id()));
963            TrackBuffer& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value;
964
965            // 5.2.9 Add the track description for this track to the track buffer.
966            trackBuffer.description = audioTrackInfo.description;
967
968            m_audioCodecs.append(trackBuffer.description->codec());
969        }
970
971        // 5.3 For each video track in the initialization segment, run following steps:
972        for (auto& videoTrackInfo : segment.videoTracks) {
973            VideoTrackPrivate* videoTrackPrivate = videoTrackInfo.track.get();
974
975            // 5.3.1 Let new video track be a new VideoTrack object.
976            // 5.3.2 Generate a unique ID and assign it to the id property on new video track.
977            RefPtr<VideoTrack> newVideoTrack = VideoTrack::create(this, videoTrackPrivate);
978            newVideoTrack->setSourceBuffer(this);
979
980            // 5.3.3 If videoTracks.length equals 0, then run the following steps:
981            if (!videoTracks()->length()) {
982                // 5.3.3.1 Set the selected property on new video track to true.
983                newVideoTrack->setSelected(true);
984
985                // 5.3.3.2 Set active track flag to true.
986                activeTrackFlag = true;
987            }
988
989            // 5.3.4 Add new video track to the videoTracks attribute on this SourceBuffer object.
990            // 5.3.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
991            // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
992            // referenced by the videoTracks attribute on this SourceBuffer object.
993            videoTracks()->append(newVideoTrack);
994
995            // 5.3.6 Add new video track to the videoTracks attribute on the HTMLMediaElement.
996            // 5.3.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
997            // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
998            // referenced by the videoTracks attribute on the HTMLMediaElement.
999            m_source->mediaElement()->videoTracks()->append(newVideoTrack);
1000
1001            // 5.3.8 Create a new track buffer to store coded frames for this track.
1002            ASSERT(!m_trackBufferMap.contains(newVideoTrack->id()));
1003            TrackBuffer& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value;
1004
1005            // 5.3.9 Add the track description for this track to the track buffer.
1006            trackBuffer.description = videoTrackInfo.description;
1007
1008            m_videoCodecs.append(trackBuffer.description->codec());
1009        }
1010
1011        // 5.4 For each text track in the initialization segment, run following steps:
1012        for (auto& textTrackInfo : segment.textTracks) {
1013            InbandTextTrackPrivate* textTrackPrivate = textTrackInfo.track.get();
1014
1015            // 5.4.1 Let new text track be a new TextTrack object with its properties populated with the
1016            // appropriate information from the initialization segment.
1017            RefPtr<InbandTextTrack> newTextTrack = InbandTextTrack::create(scriptExecutionContext(), this, textTrackPrivate);
1018
1019            // 5.4.2 If the mode property on new text track equals "showing" or "hidden", then set active
1020            // track flag to true.
1021            if (textTrackPrivate->mode() != InbandTextTrackPrivate::Disabled)
1022                activeTrackFlag = true;
1023
1024            // 5.4.3 Add new text track to the textTracks attribute on this SourceBuffer object.
1025            // 5.4.4 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1026            // not cancelable, and that uses the TrackEvent interface, at textTracks attribute on this
1027            // SourceBuffer object.
1028            textTracks()->append(newTextTrack);
1029
1030            // 5.4.5 Add new text track to the textTracks attribute on the HTMLMediaElement.
1031            // 5.4.6 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1032            // not cancelable, and that uses the TrackEvent interface, at the TextTrackList object
1033            // referenced by the textTracks attribute on the HTMLMediaElement.
1034            m_source->mediaElement()->textTracks()->append(newTextTrack);
1035
1036            // 5.4.7 Create a new track buffer to store coded frames for this track.
1037            ASSERT(!m_trackBufferMap.contains(textTrackPrivate->id()));
1038            TrackBuffer& trackBuffer = m_trackBufferMap.add(textTrackPrivate->id(), TrackBuffer()).iterator->value;
1039
1040            // 5.4.8 Add the track description for this track to the track buffer.
1041            trackBuffer.description = textTrackInfo.description;
1042
1043            m_textCodecs.append(trackBuffer.description->codec());
1044        }
1045
1046        // 5.5 If active track flag equals true, then run the following steps:
1047        if (activeTrackFlag) {
1048            // 5.5.1 Add this SourceBuffer to activeSourceBuffers.
1049            setActive(true);
1050        }
1051
1052        // 5.6 Set first initialization segment flag to true.
1053        m_receivedFirstInitializationSegment = true;
1054    }
1055
1056    // 6. If the HTMLMediaElement.readyState attribute is HAVE_NOTHING, then run the following steps:
1057    if (m_private->readyState() == MediaPlayer::HaveNothing) {
1058        // 6.1 If one or more objects in sourceBuffers have first initialization segment flag set to false, then abort these steps.
1059        for (auto& sourceBuffer : *m_source->sourceBuffers()) {
1060            if (!sourceBuffer->m_receivedFirstInitializationSegment)
1061                return;
1062        }
1063
1064        // 6.2 Set the HTMLMediaElement.readyState attribute to HAVE_METADATA.
1065        // 6.3 Queue a task to fire a simple event named loadedmetadata at the media element.
1066        m_private->setReadyState(MediaPlayer::HaveMetadata);
1067    }
1068
1069    // 7. If the active track flag equals true and the HTMLMediaElement.readyState
1070    // attribute is greater than HAVE_CURRENT_DATA, then set the HTMLMediaElement.readyState
1071    // attribute to HAVE_METADATA.
1072    if (activeTrackFlag && m_private->readyState() > MediaPlayer::HaveCurrentData)
1073        m_private->setReadyState(MediaPlayer::HaveMetadata);
1074}
1075
1076bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment)
1077{
1078    // 3.5.7 Initialization Segment Received (ctd)
1079    // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received
1080
1081    // 3.1. Verify the following properties. If any of the checks fail then run the end of stream
1082    // algorithm with the error parameter set to "decode" and abort these steps.
1083    //   * The number of audio, video, and text tracks match what was in the first initialization segment.
1084    if (segment.audioTracks.size() != audioTracks()->length()
1085        || segment.videoTracks.size() != videoTracks()->length()
1086        || segment.textTracks.size() != textTracks()->length())
1087        return false;
1088
1089    //   * The codecs for each track, match what was specified in the first initialization segment.
1090    for (auto& audioTrackInfo : segment.audioTracks) {
1091        if (!m_audioCodecs.contains(audioTrackInfo.description->codec()))
1092            return false;
1093    }
1094
1095    for (auto& videoTrackInfo : segment.videoTracks) {
1096        if (!m_videoCodecs.contains(videoTrackInfo.description->codec()))
1097            return false;
1098    }
1099
1100    for (auto& textTrackInfo : segment.textTracks) {
1101        if (!m_textCodecs.contains(textTrackInfo.description->codec()))
1102            return false;
1103    }
1104
1105    //   * If more than one track for a single type are present (ie 2 audio tracks), then the Track
1106    //   IDs match the ones in the first initialization segment.
1107    if (segment.audioTracks.size() >= 2) {
1108        for (auto& audioTrackInfo : segment.audioTracks) {
1109            if (!m_trackBufferMap.contains(audioTrackInfo.track->id()))
1110                return false;
1111        }
1112    }
1113
1114    if (segment.videoTracks.size() >= 2) {
1115        for (auto& videoTrackInfo : segment.videoTracks) {
1116            if (!m_trackBufferMap.contains(videoTrackInfo.track->id()))
1117                return false;
1118        }
1119    }
1120
1121    if (segment.textTracks.size() >= 2) {
1122        for (auto& textTrackInfo : segment.videoTracks) {
1123            if (!m_trackBufferMap.contains(textTrackInfo.track->id()))
1124                return false;
1125        }
1126    }
1127
1128    return true;
1129}
1130
1131class SampleLessThanComparator {
1132public:
1133    bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
1134    {
1135        return value1.first < value2.first;
1136    }
1137
1138    bool operator()(MediaTime value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
1139    {
1140        return value1 < value2.first;
1141    }
1142
1143    bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, MediaTime value2)
1144    {
1145        return value1.first < value2;
1146    }
1147};
1148
1149void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, PassRefPtr<MediaSample> prpSample)
1150{
1151    if (isRemoved())
1152        return;
1153
1154    RefPtr<MediaSample> sample = prpSample;
1155
1156    // 3.5.8 Coded Frame Processing
1157    // When complete coded frames have been parsed by the segment parser loop then the following steps
1158    // are run:
1159    // 1. For each coded frame in the media segment run the following steps:
1160    // 1.1. Loop Top
1161    do {
1162        // 1.1 (ctd) Let presentation timestamp be a double precision floating point representation of
1163        // the coded frame's presentation timestamp in seconds.
1164        MediaTime presentationTimestamp = sample->presentationTime();
1165
1166        // 1.2 Let decode timestamp be a double precision floating point representation of the coded frame's
1167        // decode timestamp in seconds.
1168        MediaTime decodeTimestamp = sample->decodeTime();
1169
1170        // 1.3 Let frame duration be a double precision floating point representation of the coded frame's
1171        // duration in seconds.
1172        MediaTime frameDuration = sample->duration();
1173
1174        // 1.4 If mode equals "sequence" and group start timestamp is set, then run the following steps:
1175        // FIXME: add support for "sequence" mode
1176
1177        // 1.5 If timestampOffset is not 0, then run the following steps:
1178        if (m_timestampOffset != MediaTime::zeroTime()) {
1179            // 1.5.1 Add timestampOffset to the presentation timestamp.
1180            presentationTimestamp += m_timestampOffset;
1181
1182            // 1.5.2 Add timestampOffset to the decode timestamp.
1183            decodeTimestamp += m_timestampOffset;
1184
1185            // 1.5.3 If the presentation timestamp or decode timestamp is less than the presentation start
1186            // time, then run the end of stream algorithm with the error parameter set to "decode", and
1187            // abort these steps.
1188            MediaTime presentationStartTime = MediaTime::zeroTime();
1189            if (presentationTimestamp < presentationStartTime || decodeTimestamp < presentationStartTime) {
1190                m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
1191                return;
1192            }
1193        }
1194
1195        // 1.6 Let track buffer equal the track buffer that the coded frame will be added to.
1196        AtomicString trackID = sample->trackID();
1197        auto it = m_trackBufferMap.find(trackID);
1198        if (it == m_trackBufferMap.end())
1199            it = m_trackBufferMap.add(trackID, TrackBuffer()).iterator;
1200        TrackBuffer& trackBuffer = it->value;
1201
1202        // 1.7 If last decode timestamp for track buffer is set and decode timestamp is less than last
1203        // decode timestamp:
1204        // OR
1205        // If last decode timestamp for track buffer is set and the difference between decode timestamp and
1206        // last decode timestamp is greater than 2 times last frame duration:
1207        if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp
1208            || abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (trackBuffer.lastFrameDuration * 2))) {
1209            // 1.7.1 If mode equals "segments":
1210            // Set highest presentation end timestamp to presentation timestamp.
1211            m_highestPresentationEndTimestamp = presentationTimestamp;
1212
1213            // If mode equals "sequence":
1214            // Set group start timestamp equal to the highest presentation end timestamp.
1215            // FIXME: Add support for "sequence" mode.
1216
1217            for (auto& trackBuffer : m_trackBufferMap.values()) {
1218                // 1.7.2 Unset the last decode timestamp on all track buffers.
1219                trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime();
1220                // 1.7.3 Unset the last frame duration on all track buffers.
1221                trackBuffer.lastFrameDuration = MediaTime::invalidTime();
1222                // 1.7.4 Unset the highest presentation timestamp on all track buffers.
1223                trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime();
1224                // 1.7.5 Set the need random access point flag on all track buffers to true.
1225                trackBuffer.needRandomAccessFlag = true;
1226            }
1227
1228            // 1.7.6 Jump to the Loop Top step above to restart processing of the current coded frame.
1229            continue;
1230        }
1231
1232        // 1.8 Let frame end timestamp equal the sum of presentation timestamp and frame duration.
1233        MediaTime frameEndTimestamp = presentationTimestamp + frameDuration;
1234
1235        // 1.9 If presentation timestamp is less than appendWindowStart, then set the need random access
1236        // point flag to true, drop the coded frame, and jump to the top of the loop to start processing
1237        // the next coded frame.
1238        // 1.10 If frame end timestamp is greater than appendWindowEnd, then set the need random access
1239        // point flag to true, drop the coded frame, and jump to the top of the loop to start processing
1240        // the next coded frame.
1241        // FIXME: implement append windows
1242
1243        // 1.11 If the need random access point flag on track buffer equals true, then run the following steps:
1244        if (trackBuffer.needRandomAccessFlag) {
1245            // 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump
1246            // to the top of the loop to start processing the next coded frame.
1247            if (!sample->isSync()) {
1248                didDropSample();
1249                return;
1250            }
1251
1252            // 1.11.2 Set the need random access point flag on track buffer to false.
1253            trackBuffer.needRandomAccessFlag = false;
1254        }
1255
1256        // 1.12 Let spliced audio frame be an unset variable for holding audio splice information
1257        // 1.13 Let spliced timed text frame be an unset variable for holding timed text splice information
1258        // FIXME: Add support for sample splicing.
1259
1260        SampleMap erasedSamples;
1261        MediaTime microsecond(1, 1000000);
1262
1263        // 1.14 If last decode timestamp for track buffer is unset and presentation timestamp falls
1264        // falls within the presentation interval of a coded frame in track buffer, then run the
1265        // following steps:
1266        if (trackBuffer.lastDecodeTimestamp.isInvalid()) {
1267            auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp);
1268            if (iter != trackBuffer.samples.presentationOrder().end()) {
1269                // 1.14.1 Let overlapped frame be the coded frame in track buffer that matches the condition above.
1270                RefPtr<MediaSample> overlappedFrame = iter->second;
1271
1272                // 1.14.2 If track buffer contains audio coded frames:
1273                // Run the audio splice frame algorithm and if a splice frame is returned, assign it to
1274                // spliced audio frame.
1275                // FIXME: Add support for sample splicing.
1276
1277                // If track buffer contains video coded frames:
1278                if (trackBuffer.description->isVideo()) {
1279                    // 1.14.2.1 Let overlapped frame presentation timestamp equal the presentation timestamp
1280                    // of overlapped frame.
1281                    MediaTime overlappedFramePresentationTimestamp = overlappedFrame->presentationTime();
1282
1283                    // 1.14.2.2 Let remove window timestamp equal overlapped frame presentation timestamp
1284                    // plus 1 microsecond.
1285                    MediaTime removeWindowTimestamp = overlappedFramePresentationTimestamp + microsecond;
1286
1287                    // 1.14.2.3 If the presentation timestamp is less than the remove window timestamp,
1288                    // then remove overlapped frame and any coded frames that depend on it from track buffer.
1289                    if (presentationTimestamp < removeWindowTimestamp)
1290                        erasedSamples.addSample(iter->second);
1291                }
1292
1293                // If track buffer contains timed text coded frames:
1294                // Run the text splice frame algorithm and if a splice frame is returned, assign it to spliced timed text frame.
1295                // FIXME: Add support for sample splicing.
1296            }
1297        }
1298
1299        // 1.15 Remove existing coded frames in track buffer:
1300        // If highest presentation timestamp for track buffer is not set:
1301        if (trackBuffer.highestPresentationTimestamp.isInvalid()) {
1302            // Remove all coded frames from track buffer that have a presentation timestamp greater than or
1303            // equal to presentation timestamp and less than frame end timestamp.
1304            auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp);
1305            if (iter_pair.first != trackBuffer.samples.presentationOrder().end())
1306                erasedSamples.addRange(iter_pair.first, iter_pair.second);
1307        }
1308
1309        // If highest presentation timestamp for track buffer is set and less than presentation timestamp
1310        if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp <= presentationTimestamp) {
1311            // Remove all coded frames from track buffer that have a presentation timestamp greater than highest
1312            // presentation timestamp and less than or equal to frame end timestamp.
1313            do {
1314                // NOTE: Searching from the end of the trackBuffer will be vastly more efficient if the search range is
1315                // near the end of the buffered range. Use a linear-backwards search if the search range is within one
1316                // frame duration of the end:
1317                if (!m_buffered)
1318                    break;
1319
1320                unsigned bufferedLength = m_buffered->ranges().length();
1321                if (!bufferedLength)
1322                    break;
1323
1324                bool ignoreValid;
1325                MediaTime highestBufferedTime = m_buffered->ranges().end(bufferedLength - 1, ignoreValid);
1326
1327                PresentationOrderSampleMap::iterator_range range;
1328                if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration)
1329                    range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRangeFromEnd(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
1330                else
1331                    range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRange(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
1332
1333                if (range.first != trackBuffer.samples.presentationOrder().end())
1334                    erasedSamples.addRange(range.first, range.second);
1335            } while(false);
1336        }
1337
1338        // 1.16 Remove decoding dependencies of the coded frames removed in the previous step:
1339        DecodeOrderSampleMap::MapType dependentSamples;
1340        if (!erasedSamples.empty()) {
1341            // If detailed information about decoding dependencies is available:
1342            // FIXME: Add support for detailed dependency information
1343
1344            // Otherwise: Remove all coded frames between the coded frames removed in the previous step
1345            // and the next random access point after those removed frames.
1346            auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first);
1347            auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first);
1348            auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter);
1349            dependentSamples.insert(firstDecodeIter, nextSyncIter);
1350
1351            RefPtr<TimeRanges> erasedRanges = removeSamplesFromTrackBuffer(dependentSamples, trackBuffer, this, "sourceBufferPrivateDidReceiveSample");
1352
1353            // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
1354            // not yet displayed samples.
1355            MediaTime currentMediaTime = MediaTime::createWithDouble(m_source->currentTime());
1356            if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
1357                PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
1358                possiblyEnqueuedRanges.intersectWith(erasedRanges->ranges());
1359                if (possiblyEnqueuedRanges.length())
1360                    trackBuffer.needsReenqueueing = true;
1361            }
1362
1363            erasedRanges->invert();
1364            m_buffered->intersectWith(*erasedRanges.get());
1365        }
1366
1367        // 1.17 If spliced audio frame is set:
1368        // Add spliced audio frame to the track buffer.
1369        // If spliced timed text frame is set:
1370        // Add spliced timed text frame to the track buffer.
1371        // FIXME: Add support for sample splicing.
1372
1373        // Otherwise:
1374        // Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
1375        trackBuffer.samples.addSample(sample);
1376
1377        if (trackBuffer.lastEnqueuedDecodeEndTime.isInvalid() || decodeTimestamp >= trackBuffer.lastEnqueuedDecodeEndTime) {
1378            DecodeOrderSampleMap::KeyType decodeKey(decodeTimestamp, presentationTimestamp);
1379            trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, sample));
1380        }
1381
1382        // 1.18 Set last decode timestamp for track buffer to decode timestamp.
1383        trackBuffer.lastDecodeTimestamp = decodeTimestamp;
1384
1385        // 1.19 Set last frame duration for track buffer to frame duration.
1386        trackBuffer.lastFrameDuration = frameDuration;
1387
1388        // 1.20 If highest presentation timestamp for track buffer is unset or frame end timestamp is greater
1389        // than highest presentation timestamp, then set highest presentation timestamp for track buffer
1390        // to frame end timestamp.
1391        if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp)
1392            trackBuffer.highestPresentationTimestamp = frameEndTimestamp;
1393
1394        // 1.21 If highest presentation end timestamp is unset or frame end timestamp is greater than highest
1395        // presentation end timestamp, then set highest presentation end timestamp equal to frame end timestamp.
1396        if (m_highestPresentationEndTimestamp.isInvalid() || frameEndTimestamp > m_highestPresentationEndTimestamp)
1397            m_highestPresentationEndTimestamp = frameEndTimestamp;
1398
1399        m_buffered->add(presentationTimestamp.toDouble(), (presentationTimestamp + frameDuration + microsecond).toDouble());
1400        m_bufferedSinceLastMonitor += frameDuration.toDouble();
1401
1402        break;
1403    } while (1);
1404
1405    // Steps 2-4 will be handled by MediaSource::monitorSourceBuffers()
1406
1407    // 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with new
1408    // duration set to the maximum of the current duration and the highest end timestamp reported by HTMLMediaElement.buffered.
1409    if (highestPresentationEndTimestamp().toDouble() > m_source->duration())
1410        m_source->setDurationInternal(highestPresentationEndTimestamp().toDouble());
1411}
1412
1413bool SourceBuffer::hasAudio() const
1414{
1415    return m_audioTracks && m_audioTracks->length();
1416}
1417
1418bool SourceBuffer::hasVideo() const
1419{
1420    return m_videoTracks && m_videoTracks->length();
1421}
1422
1423bool SourceBuffer::sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const
1424{
1425    return hasAudio();
1426}
1427
1428bool SourceBuffer::sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const
1429{
1430    return hasVideo();
1431}
1432
1433void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track)
1434{
1435    // 2.4.5 Changes to selected/enabled track state
1436    // If the selected video track changes, then run the following steps:
1437    // 1. If the SourceBuffer associated with the previously selected video track is not associated with
1438    // any other enabled tracks, run the following steps:
1439    if (track->selected()
1440        && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1441        && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1442        && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1443        // 1.1 Remove the SourceBuffer from activeSourceBuffers.
1444        // 1.2 Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1445        setActive(false);
1446    } else if (!track->selected()) {
1447        // 2. If the SourceBuffer associated with the newly selected video track is not already in activeSourceBuffers,
1448        // run the following steps:
1449        // 2.1 Add the SourceBuffer to activeSourceBuffers.
1450        // 2.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1451        setActive(true);
1452    }
1453
1454    if (!isRemoved())
1455        m_source->mediaElement()->videoTrackSelectedChanged(track);
1456}
1457
1458void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track)
1459{
1460    // 2.4.5 Changes to selected/enabled track state
1461    // If an audio track becomes disabled and the SourceBuffer associated with this track is not
1462    // associated with any other enabled or selected track, then run the following steps:
1463    if (track->enabled()
1464        && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1465        && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1466        && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1467        // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
1468        // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1469        setActive(false);
1470    } else if (!track->enabled()) {
1471        // If an audio track becomes enabled and the SourceBuffer associated with this track is
1472        // not already in activeSourceBuffers, then run the following steps:
1473        // 1. Add the SourceBuffer associated with the audio track to activeSourceBuffers
1474        // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1475        setActive(true);
1476    }
1477
1478    if (!isRemoved())
1479        m_source->mediaElement()->audioTrackEnabledChanged(track);
1480}
1481
1482void SourceBuffer::textTrackModeChanged(TextTrack* track)
1483{
1484    // 2.4.5 Changes to selected/enabled track state
1485    // If a text track mode becomes "disabled" and the SourceBuffer associated with this track is not
1486    // associated with any other enabled or selected track, then run the following steps:
1487    if (track->mode() == TextTrack::disabledKeyword()
1488        && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1489        && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1490        && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1491        // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
1492        // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1493        setActive(false);
1494    } else {
1495        // If a text track mode becomes "showing" or "hidden" and the SourceBuffer associated with this
1496        // track is not already in activeSourceBuffers, then run the following steps:
1497        // 1. Add the SourceBuffer associated with the text track to activeSourceBuffers
1498        // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1499        setActive(true);
1500    }
1501
1502    if (!isRemoved())
1503        m_source->mediaElement()->textTrackModeChanged(track);
1504}
1505
1506void SourceBuffer::textTrackAddCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
1507{
1508    if (!isRemoved())
1509        m_source->mediaElement()->textTrackAddCue(track, cue);
1510}
1511
1512void SourceBuffer::textTrackAddCues(TextTrack* track, TextTrackCueList const* cueList)
1513{
1514    if (!isRemoved())
1515        m_source->mediaElement()->textTrackAddCues(track, cueList);
1516}
1517
1518void SourceBuffer::textTrackRemoveCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
1519{
1520    if (!isRemoved())
1521        m_source->mediaElement()->textTrackRemoveCue(track, cue);
1522}
1523
1524void SourceBuffer::textTrackRemoveCues(TextTrack* track, TextTrackCueList const* cueList)
1525{
1526    if (!isRemoved())
1527        m_source->mediaElement()->textTrackRemoveCues(track, cueList);
1528}
1529
1530void SourceBuffer::textTrackKindChanged(TextTrack* track)
1531{
1532    if (!isRemoved())
1533        m_source->mediaElement()->textTrackKindChanged(track);
1534}
1535
1536void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID)
1537{
1538    LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this);
1539    auto it = m_trackBufferMap.find(trackID);
1540    if (it == m_trackBufferMap.end())
1541        return;
1542
1543    TrackBuffer& trackBuffer = it->value;
1544    if (!trackBuffer.needsReenqueueing && !m_source->isSeeking())
1545        provideMediaData(trackBuffer, trackID);
1546}
1547
1548void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, AtomicString trackID)
1549{
1550#if !LOG_DISABLED
1551    unsigned enqueuedSamples = 0;
1552#endif
1553
1554    auto sampleIt = trackBuffer.decodeQueue.begin();
1555    for (auto sampleEnd = trackBuffer.decodeQueue.end(); sampleIt != sampleEnd; ++sampleIt) {
1556        if (!m_private->isReadyForMoreSamples(trackID)) {
1557            m_private->notifyClientWhenReadyForMoreSamples(trackID);
1558            break;
1559        }
1560
1561        RefPtr<MediaSample> sample = sampleIt->second;
1562        // Do not enqueue samples spanning a significant unbuffered gap.
1563        // NOTE: one second is somewhat arbitrary. MediaSource::monitorSourceBuffers() is run
1564        // on the playbackTimer, which is effectively every 350ms. Allowing > 350ms gap between
1565        // enqueued samples allows for situations where we overrun the end of a buffered range
1566        // but don't notice for 350s of playback time, and the client can enqueue data for the
1567        // new current time without triggering this early return.
1568        // FIXME(135867): Make this gap detection logic less arbitrary.
1569        MediaTime oneSecond(1, 1);
1570        if (trackBuffer.lastEnqueuedDecodeEndTime.isValid() && sample->decodeTime() - trackBuffer.lastEnqueuedDecodeEndTime > oneSecond)
1571            break;
1572
1573        trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime();
1574        trackBuffer.lastEnqueuedDecodeEndTime = sample->decodeTime() + sample->duration();
1575        m_private->enqueueSample(sample.release(), trackID);
1576#if !LOG_DISABLED
1577        ++enqueuedSamples;
1578#endif
1579
1580    }
1581    trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin(), sampleIt);
1582
1583    LOG(MediaSource, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples);
1584}
1585
1586void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, AtomicString trackID, const MediaTime& time)
1587{
1588    // Find the sample which contains the current presentation time.
1589    auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
1590
1591    if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()) {
1592        trackBuffer.decodeQueue.clear();
1593        m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
1594        return;
1595    }
1596
1597    // Seach backward for the previous sync sample.
1598    DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime());
1599    auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
1600    ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end());
1601
1602    auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator);
1603    auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter);
1604    if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend()) {
1605        trackBuffer.decodeQueue.clear();
1606        m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
1607        return;
1608    }
1609
1610    Vector<RefPtr<MediaSample>> nonDisplayingSamples;
1611    for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter)
1612        nonDisplayingSamples.append(iter->second);
1613
1614    m_private->flushAndEnqueueNonDisplayingSamples(nonDisplayingSamples, trackID);
1615
1616    if (!nonDisplayingSamples.isEmpty()) {
1617        trackBuffer.lastEnqueuedPresentationTime = nonDisplayingSamples.last()->presentationTime();
1618        trackBuffer.lastEnqueuedDecodeEndTime = nonDisplayingSamples.last()->decodeTime();
1619    } else {
1620        trackBuffer.lastEnqueuedPresentationTime = MediaTime::invalidTime();
1621        trackBuffer.lastEnqueuedDecodeEndTime = MediaTime::invalidTime();
1622    }
1623
1624    // Fill the decode queue with the remaining samples.
1625    trackBuffer.decodeQueue.clear();
1626    for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter)
1627        trackBuffer.decodeQueue.insert(*iter);
1628    provideMediaData(trackBuffer, trackID);
1629
1630    trackBuffer.needsReenqueueing = false;
1631}
1632
1633
1634void SourceBuffer::didDropSample()
1635{
1636    if (!isRemoved())
1637        m_source->mediaElement()->incrementDroppedFrameCount();
1638}
1639
1640void SourceBuffer::monitorBufferingRate()
1641{
1642    if (!m_bufferedSinceLastMonitor)
1643        return;
1644
1645    double now = monotonicallyIncreasingTime();
1646    double interval = now - m_timeOfBufferingMonitor;
1647    double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval;
1648
1649    m_timeOfBufferingMonitor = now;
1650    m_bufferedSinceLastMonitor = 0;
1651
1652    m_averageBufferRate = m_averageBufferRate * (1 - ExponentialMovingAverageCoefficient) + rateSinceLastMonitor * ExponentialMovingAverageCoefficient;
1653
1654    LOG(MediaSource, "SourceBuffer::monitorBufferingRate(%p) - m_avegareBufferRate: %lf", this, m_averageBufferRate);
1655}
1656
1657std::unique_ptr<PlatformTimeRanges> SourceBuffer::bufferedAccountingForEndOfStream() const
1658{
1659    // FIXME: Revisit this method once the spec bug <https://www.w3.org/Bugs/Public/show_bug.cgi?id=26436> is resolved.
1660    std::unique_ptr<PlatformTimeRanges> virtualRanges = PlatformTimeRanges::create(m_buffered->ranges());
1661    if (m_source->isEnded()) {
1662        MediaTime start = virtualRanges->maximumBufferedTime();
1663        MediaTime end = MediaTime::createWithDouble(m_source->duration());
1664        if (start <= end)
1665            virtualRanges->add(start, end);
1666    }
1667    return virtualRanges;
1668}
1669
1670bool SourceBuffer::hasCurrentTime() const
1671{
1672    if (isRemoved() || !m_buffered->length())
1673        return false;
1674
1675    MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
1676    MediaTime duration = MediaTime::createWithDouble(m_source->duration());
1677    if (currentTime >= duration)
1678        return true;
1679
1680    std::unique_ptr<PlatformTimeRanges> ranges = bufferedAccountingForEndOfStream();
1681    return abs(ranges->nearest(currentTime) - currentTime) <= currentTimeFudgeFactor();
1682}
1683
1684bool SourceBuffer::hasFutureTime() const
1685{
1686    if (isRemoved())
1687        return false;
1688
1689    std::unique_ptr<PlatformTimeRanges> ranges = bufferedAccountingForEndOfStream();
1690    if (!ranges->length())
1691        return false;
1692
1693    MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
1694    MediaTime duration = MediaTime::createWithDouble(m_source->duration());
1695    if (currentTime >= duration)
1696        return true;
1697
1698    MediaTime nearest = ranges->nearest(currentTime);
1699    if (abs(nearest - currentTime) > currentTimeFudgeFactor())
1700        return false;
1701
1702    size_t found = ranges->find(nearest);
1703    if (found == notFound)
1704        return false;
1705
1706    MediaTime localEnd = ranges->end(found);
1707    if (localEnd == duration)
1708        return true;
1709
1710    return localEnd - currentTime > currentTimeFudgeFactor();
1711}
1712
1713bool SourceBuffer::canPlayThrough()
1714{
1715    if (isRemoved())
1716        return false;
1717
1718    monitorBufferingRate();
1719
1720    // Assuming no fluctuations in the buffering rate, loading 1 second per second or greater
1721    // means indefinite playback. This could be improved by taking jitter into account.
1722    if (m_averageBufferRate > 1)
1723        return true;
1724
1725    // Add up all the time yet to be buffered.
1726    MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
1727    MediaTime duration = MediaTime::createWithDouble(m_source->duration());
1728
1729    std::unique_ptr<PlatformTimeRanges> unbufferedRanges = bufferedAccountingForEndOfStream();
1730    unbufferedRanges->invert();
1731    unbufferedRanges->intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration)));
1732    MediaTime unbufferedTime = unbufferedRanges->totalDuration();
1733    if (!unbufferedTime.isValid())
1734        return true;
1735
1736    MediaTime timeRemaining = duration - currentTime;
1737    return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble();
1738}
1739
1740size_t SourceBuffer::extraMemoryCost() const
1741{
1742    size_t extraMemoryCost = m_pendingAppendData.capacity();
1743    for (auto& trackBuffer : m_trackBufferMap.values())
1744        extraMemoryCost += trackBuffer.samples.sizeInBytes();
1745
1746    return extraMemoryCost;
1747}
1748
1749void SourceBuffer::reportExtraMemoryCost()
1750{
1751    size_t extraMemoryCost = this->extraMemoryCost();
1752    if (extraMemoryCost < m_reportedExtraMemoryCost)
1753        return;
1754
1755    size_t extraMemoryCostDelta = extraMemoryCost - m_reportedExtraMemoryCost;
1756    m_reportedExtraMemoryCost = extraMemoryCost;
1757
1758    JSC::JSLockHolder lock(scriptExecutionContext()->vm());
1759    if (extraMemoryCostDelta > 0)
1760        scriptExecutionContext()->vm().heap.reportExtraMemoryCost(extraMemoryCostDelta);
1761}
1762
1763Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID)
1764{
1765    auto it = m_trackBufferMap.find(trackID);
1766    if (it == m_trackBufferMap.end())
1767        return Vector<String>();
1768
1769    TrackBuffer& trackBuffer = it->value;
1770    Vector<String> sampleDescriptions;
1771    for (auto& pair : trackBuffer.samples.decodeOrder())
1772        sampleDescriptions.append(toString(*pair.second));
1773
1774    return sampleDescriptions;
1775}
1776
1777Document& SourceBuffer::document() const
1778{
1779    ASSERT(scriptExecutionContext()->isDocument());
1780    return *static_cast<Document*>(scriptExecutionContext());
1781}
1782
1783} // namespace WebCore
1784
1785#endif
1786