#include "config.h"
#include "SourceBuffer.h"
#if ENABLE(MEDIA_SOURCE)
#include "AudioTrackList.h"
#include "Event.h"
#include "ExceptionCodePlaceholder.h"
#include "GenericEventQueue.h"
#include "HTMLMediaElement.h"
#include "InbandTextTrack.h"
#include "Logging.h"
#include "MediaDescription.h"
#include "MediaSample.h"
#include "MediaSource.h"
#include "SampleMap.h"
#include "SourceBufferPrivate.h"
#include "TextTrackList.h"
#include "TimeRanges.h"
#include "VideoTrackList.h"
#include <map>
#include <runtime/JSCInlines.h>
#include <runtime/JSLock.h>
#include <runtime/VM.h>
#include <wtf/CurrentTime.h>
#include <wtf/NeverDestroyed.h>
namespace WebCore {
static double ExponentialMovingAverageCoefficient = 0.1;
static const MediaTime& currentTimeFudgeFactor()
{
static NeverDestroyed<MediaTime> fudgeFactor(1, 24);
return fudgeFactor;
}
struct SourceBuffer::TrackBuffer {
MediaTime lastDecodeTimestamp;
MediaTime lastFrameDuration;
MediaTime highestPresentationTimestamp;
MediaTime lastEnqueuedPresentationTime;
bool needRandomAccessFlag;
bool enabled;
bool needsReenqueueing;
SampleMap samples;
DecodeOrderSampleMap::MapType decodeQueue;
RefPtr<MediaDescription> description;
TrackBuffer()
: lastDecodeTimestamp(MediaTime::invalidTime())
, lastFrameDuration(MediaTime::invalidTime())
, highestPresentationTimestamp(MediaTime::invalidTime())
, lastEnqueuedPresentationTime(MediaTime::invalidTime())
, needRandomAccessFlag(true)
, enabled(false)
, needsReenqueueing(false)
{
}
};
PassRef<SourceBuffer> SourceBuffer::create(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
{
RefPtr<SourceBuffer> sourceBuffer(adoptRef(new SourceBuffer(WTF::move(sourceBufferPrivate), source)));
sourceBuffer->suspendIfNeeded();
return sourceBuffer.releaseNonNull();
}
SourceBuffer::SourceBuffer(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
: ActiveDOMObject(source->scriptExecutionContext())
, m_private(WTF::move(sourceBufferPrivate))
, m_source(source)
, m_asyncEventQueue(*this)
, m_updating(false)
, m_appendBufferTimer(this, &SourceBuffer::appendBufferTimerFired)
, m_highestPresentationEndTimestamp(MediaTime::invalidTime())
, m_receivedFirstInitializationSegment(false)
, m_buffered(TimeRanges::create())
, m_active(false)
, m_appendState(WaitingForSegment)
, m_timeOfBufferingMonitor(monotonicallyIncreasingTime())
, m_bufferedSinceLastMonitor(0)
, m_averageBufferRate(0)
, m_reportedExtraMemoryCost(0)
, m_pendingRemoveStart(MediaTime::invalidTime())
, m_pendingRemoveEnd(MediaTime::invalidTime())
, m_removeTimer(this, &SourceBuffer::removeTimerFired)
{
ASSERT(m_source);
m_private->setClient(this);
}
SourceBuffer::~SourceBuffer()
{
ASSERT(isRemoved());
m_private->setClient(0);
}
PassRefPtr<TimeRanges> SourceBuffer::buffered(ExceptionCode& ec) const
{
if (isRemoved()) {
ec = INVALID_STATE_ERR;
return nullptr;
}
return m_buffered->copy();
}
const RefPtr<TimeRanges>& SourceBuffer::buffered() const
{
return m_buffered;
}
double SourceBuffer::timestampOffset() const
{
return m_timestampOffset.toDouble();
}
void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec)
{
if (isRemoved() || m_updating) {
ec = INVALID_STATE_ERR;
return;
}
m_source->openIfInEndedState();
if (m_appendState == ParsingMediaSegment) {
ec = INVALID_STATE_ERR;
return;
}
m_timestampOffset = MediaTime::createWithDouble(offset);
}
void SourceBuffer::appendBuffer(PassRefPtr<ArrayBuffer> data, ExceptionCode& ec)
{
if (!data) {
ec = INVALID_ACCESS_ERR;
return;
}
appendBufferInternal(static_cast<unsigned char*>(data->data()), data->byteLength(), ec);
}
void SourceBuffer::appendBuffer(PassRefPtr<ArrayBufferView> data, ExceptionCode& ec)
{
if (!data) {
ec = INVALID_ACCESS_ERR;
return;
}
appendBufferInternal(static_cast<unsigned char*>(data->baseAddress()), data->byteLength(), ec);
}
void SourceBuffer::abort(ExceptionCode& ec)
{
if (isRemoved() || !m_source->isOpen()) {
ec = INVALID_STATE_ERR;
return;
}
abortIfUpdating();
m_private->abort();
}
void SourceBuffer::remove(double start, double end, ExceptionCode& ec)
{
LOG(MediaSource, "SourceBuffer::remove(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data());
if (start < 0 || (m_source && (std::isnan(m_source->duration()) || start > m_source->duration())) || end <= start) {
ec = INVALID_ACCESS_ERR;
return;
}
if (isRemoved() || m_updating) {
ec = INVALID_STATE_ERR;
return;
}
m_source->openIfInEndedState();
m_updating = true;
scheduleEvent(eventNames().updatestartEvent);
m_pendingRemoveStart = MediaTime::createWithDouble(start);
m_pendingRemoveEnd = MediaTime::createWithDouble(end);
m_removeTimer.startOneShot(0);
}
void SourceBuffer::abortIfUpdating()
{
if (!m_updating)
return;
m_appendBufferTimer.stop();
m_pendingAppendData.clear();
m_removeTimer.stop();
m_pendingRemoveStart = MediaTime::invalidTime();
m_pendingRemoveEnd = MediaTime::invalidTime();
m_updating = false;
scheduleEvent(eventNames().abortEvent);
scheduleEvent(eventNames().updateendEvent);
}
void SourceBuffer::removedFromMediaSource()
{
if (isRemoved())
return;
abortIfUpdating();
for (auto& trackBufferPair : m_trackBufferMap.values()) {
trackBufferPair.samples.clear();
trackBufferPair.decodeQueue.clear();
}
m_private->removedFromMediaSource();
m_source = 0;
}
void SourceBuffer::seekToTime(const MediaTime& time)
{
LOG(MediaSource, "SourceBuffer::seekToTime(%p) - time(%s)", this, toString(time).utf8().data());
for (auto& trackBufferPair : m_trackBufferMap) {
TrackBuffer& trackBuffer = trackBufferPair.value;
const AtomicString& trackID = trackBufferPair.key;
reenqueueMediaForTime(trackBuffer, trackID, time);
}
}
MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold)
{
MediaTime seekTime = targetTime;
MediaTime lowerBoundTime = targetTime - negativeThreshold;
MediaTime upperBoundTime = targetTime + positiveThreshold;
for (auto& trackBuffer : m_trackBufferMap.values()) {
auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold);
auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold);
auto upperBound = trackBuffer.samples.decodeOrder().end();
auto lowerBound = trackBuffer.samples.decodeOrder().rend();
if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound)
continue;
MediaTime futureSeekTime = MediaTime::positiveInfiniteTime();
if (futureSyncSampleIterator != upperBound) {
RefPtr<MediaSample>& sample = futureSyncSampleIterator->second;
futureSeekTime = sample->presentationTime();
}
MediaTime pastSeekTime = MediaTime::negativeInfiniteTime();
if (pastSyncSampleIterator != lowerBound) {
RefPtr<MediaSample>& sample = pastSyncSampleIterator->second;
pastSeekTime = sample->presentationTime();
}
MediaTime trackSeekTime = abs(targetTime - futureSeekTime) < abs(targetTime - pastSeekTime) ? futureSeekTime : pastSeekTime;
if (abs(targetTime - trackSeekTime) > abs(targetTime - seekTime))
seekTime = trackSeekTime;
}
return seekTime;
}
bool SourceBuffer::hasPendingActivity() const
{
return m_source || m_asyncEventQueue.hasPendingEvents();
}
void SourceBuffer::stop()
{
m_appendBufferTimer.stop();
m_removeTimer.stop();
}
bool SourceBuffer::isRemoved() const
{
return !m_source;
}
void SourceBuffer::scheduleEvent(const AtomicString& eventName)
{
RefPtr<Event> event = Event::create(eventName, false, false);
event->setTarget(this);
m_asyncEventQueue.enqueueEvent(event.release());
}
void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, ExceptionCode& ec)
{
if (isRemoved() || m_updating) {
ec = INVALID_STATE_ERR;
return;
}
m_source->openIfInEndedState();
m_private->evictCodedFrames();
if (m_private->isFull()) {
ec = QUOTA_EXCEEDED_ERR;
return;
}
m_pendingAppendData.append(data, size);
m_updating = true;
scheduleEvent(eventNames().updatestartEvent);
m_appendBufferTimer.startOneShot(0);
reportExtraMemoryCost();
}
void SourceBuffer::appendBufferTimerFired(Timer<SourceBuffer>&)
{
if (isRemoved())
return;
ASSERT(m_updating);
size_t appendSize = m_pendingAppendData.size();
if (!appendSize) {
m_pendingAppendData.resize(1);
}
if (!m_pendingAppendData.size()) {
sourceBufferPrivateAppendComplete(&m_private.get(), AppendSucceeded);
return;
}
m_private->append(m_pendingAppendData.data(), appendSize);
m_pendingAppendData.clear();
}
void SourceBuffer::sourceBufferPrivateAppendComplete(SourceBufferPrivate*, AppendResult result)
{
if (isRemoved())
return;
if (result == ParsingFailed) {
m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
return;
}
if (result != AppendSucceeded)
return;
m_updating = false;
scheduleEvent(eventNames().updateEvent);
scheduleEvent(eventNames().updateendEvent);
if (m_source)
m_source->monitorSourceBuffers();
MediaTime currentMediaTime = MediaTime::createWithDouble(m_source->currentTime());
for (auto& trackBufferPair : m_trackBufferMap) {
TrackBuffer& trackBuffer = trackBufferPair.value;
const AtomicString& trackID = trackBufferPair.key;
if (trackBuffer.needsReenqueueing) {
LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - reenqueuing at time (%s)", this, toString(currentMediaTime).utf8().data());
reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime);
} else
provideMediaData(trackBuffer, trackID);
}
reportExtraMemoryCost();
}
void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(SourceBufferPrivate*, int)
{
if (!isRemoved())
m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
}
static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b)
{
return a.second->decodeTime() < b.second->decodeTime();
}
void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end)
{
LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data());
MediaTime durationMediaTime = MediaTime::createWithDouble(m_source->duration());
MediaTime currentMediaTime = MediaTime::createWithDouble(m_source->currentTime());
for (auto& iter : m_trackBufferMap) {
TrackBuffer& trackBuffer = iter.value;
DecodeOrderSampleMap::iterator removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(end);
PresentationOrderSampleMap::iterator removePresentationEnd;
if (removeDecodeEnd == trackBuffer.samples.decodeOrder().end())
removePresentationEnd = trackBuffer.samples.presentationOrder().end();
else
removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleWithPresentationTime(removeDecodeEnd->second->presentationTime());
PresentationOrderSampleMap::iterator removePresentationStart = trackBuffer.samples.presentationOrder().findSampleOnOrAfterPresentationTime(start);
if (removePresentationStart == removePresentationEnd)
continue;
PresentationOrderSampleMap::iterator minDecodeTimeIter = std::min_element(removePresentationStart, removePresentationEnd, decodeTimeComparator);
DecodeOrderSampleMap::KeyType decodeKey(minDecodeTimeIter->second->decodeTime(), minDecodeTimeIter->second->presentationTime());
DecodeOrderSampleMap::iterator removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd);
RefPtr<TimeRanges> erasedRanges = TimeRanges::create();
MediaTime microsecond(1, 1000000);
for (auto erasedIt : erasedSamples) {
RefPtr<MediaSample>& sample = erasedIt.second;
trackBuffer.samples.removeSample(sample.get());
double startTime = sample->presentationTime().toDouble();
double endTime = startTime + (sample->duration() + microsecond).toDouble();
erasedRanges->add(startTime, endTime);
}
PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
possiblyEnqueuedRanges.intersectWith(erasedRanges->ranges());
if (possiblyEnqueuedRanges.length())
trackBuffer.needsReenqueueing = true;
erasedRanges->invert();
m_buffered->intersectWith(*erasedRanges);
if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata)
m_private->setReadyState(MediaPlayer::HaveMetadata);
}
}
void SourceBuffer::removeTimerFired(Timer<SourceBuffer>*)
{
ASSERT(m_updating);
ASSERT(m_pendingRemoveStart.isValid());
ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd);
removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd);
m_updating = false;
m_pendingRemoveStart = MediaTime::invalidTime();
m_pendingRemoveEnd = MediaTime::invalidTime();
scheduleEvent(eventNames().updateEvent);
scheduleEvent(eventNames().updateendEvent);
}
const AtomicString& SourceBuffer::decodeError()
{
static NeverDestroyed<AtomicString> decode("decode", AtomicString::ConstructFromLiteral);
return decode;
}
const AtomicString& SourceBuffer::networkError()
{
static NeverDestroyed<AtomicString> network("network", AtomicString::ConstructFromLiteral);
return network;
}
VideoTrackList* SourceBuffer::videoTracks()
{
if (!m_source || !m_source->mediaElement())
return nullptr;
if (!m_videoTracks)
m_videoTracks = VideoTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
return m_videoTracks.get();
}
AudioTrackList* SourceBuffer::audioTracks()
{
if (!m_source || !m_source->mediaElement())
return nullptr;
if (!m_audioTracks)
m_audioTracks = AudioTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
return m_audioTracks.get();
}
TextTrackList* SourceBuffer::textTracks()
{
if (!m_source || !m_source->mediaElement())
return nullptr;
if (!m_textTracks)
m_textTracks = TextTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
return m_textTracks.get();
}
void SourceBuffer::setActive(bool active)
{
if (m_active == active)
return;
m_active = active;
m_private->setActive(active);
if (!isRemoved())
m_source->sourceBufferDidChangeAcitveState(this, active);
}
void SourceBuffer::sourceBufferPrivateDidEndStream(SourceBufferPrivate*, const WTF::AtomicString& error)
{
if (!isRemoved())
m_source->streamEndedWithError(error, IgnorableExceptionCode());
}
void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment& segment)
{
if (isRemoved())
return;
if (std::isnan(m_source->duration())) {
MediaTime newDuration = segment.duration.isValid() ? segment.duration : MediaTime::positiveInfiniteTime();
m_source->setDurationInternal(newDuration.toDouble());
}
if (!segment.audioTracks.size() && !segment.videoTracks.size() && !segment.textTracks.size())
m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
if (m_receivedFirstInitializationSegment) {
if (!validateInitializationSegment(segment)) {
m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
return;
}
ASSERT(segment.audioTracks.size() == audioTracks()->length());
for (auto& audioTrackInfo : segment.audioTracks) {
if (audioTracks()->length() == 1) {
audioTracks()->item(0)->setPrivate(audioTrackInfo.track);
break;
}
auto audioTrack = audioTracks()->getTrackById(audioTrackInfo.track->id());
ASSERT(audioTrack);
audioTrack->setPrivate(audioTrackInfo.track);
}
ASSERT(segment.videoTracks.size() == videoTracks()->length());
for (auto& videoTrackInfo : segment.videoTracks) {
if (videoTracks()->length() == 1) {
videoTracks()->item(0)->setPrivate(videoTrackInfo.track);
break;
}
auto videoTrack = videoTracks()->getTrackById(videoTrackInfo.track->id());
ASSERT(videoTrack);
videoTrack->setPrivate(videoTrackInfo.track);
}
ASSERT(segment.textTracks.size() == textTracks()->length());
for (auto& textTrackInfo : segment.textTracks) {
if (textTracks()->length() == 1) {
toInbandTextTrack(textTracks()->item(0))->setPrivate(textTrackInfo.track);
break;
}
auto textTrack = textTracks()->getTrackById(textTrackInfo.track->id());
ASSERT(textTrack);
toInbandTextTrack(textTrack)->setPrivate(textTrackInfo.track);
}
for (auto& trackBuffer : m_trackBufferMap.values())
trackBuffer.needRandomAccessFlag = true;
}
bool activeTrackFlag = false;
if (!m_receivedFirstInitializationSegment) {
for (auto& audioTrackInfo : segment.audioTracks) {
AudioTrackPrivate* audioTrackPrivate = audioTrackInfo.track.get();
RefPtr<AudioTrack> newAudioTrack = AudioTrack::create(this, audioTrackPrivate);
newAudioTrack->setSourceBuffer(this);
if (!audioTracks()->length()) {
newAudioTrack->setEnabled(true);
activeTrackFlag = true;
}
audioTracks()->append(newAudioTrack);
m_source->mediaElement()->audioTracks()->append(newAudioTrack);
ASSERT(!m_trackBufferMap.contains(newAudioTrack->id()));
TrackBuffer& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value;
trackBuffer.description = audioTrackInfo.description;
m_audioCodecs.append(trackBuffer.description->codec());
}
for (auto& videoTrackInfo : segment.videoTracks) {
VideoTrackPrivate* videoTrackPrivate = videoTrackInfo.track.get();
RefPtr<VideoTrack> newVideoTrack = VideoTrack::create(this, videoTrackPrivate);
newVideoTrack->setSourceBuffer(this);
if (!videoTracks()->length()) {
newVideoTrack->setSelected(true);
activeTrackFlag = true;
}
videoTracks()->append(newVideoTrack);
m_source->mediaElement()->videoTracks()->append(newVideoTrack);
ASSERT(!m_trackBufferMap.contains(newVideoTrack->id()));
TrackBuffer& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value;
trackBuffer.description = videoTrackInfo.description;
m_videoCodecs.append(trackBuffer.description->codec());
}
for (auto& textTrackInfo : segment.textTracks) {
InbandTextTrackPrivate* textTrackPrivate = textTrackInfo.track.get();
RefPtr<InbandTextTrack> newTextTrack = InbandTextTrack::create(scriptExecutionContext(), this, textTrackPrivate);
if (textTrackPrivate->mode() != InbandTextTrackPrivate::Disabled)
activeTrackFlag = true;
textTracks()->append(newTextTrack);
m_source->mediaElement()->textTracks()->append(newTextTrack);
ASSERT(!m_trackBufferMap.contains(textTrackPrivate->id()));
TrackBuffer& trackBuffer = m_trackBufferMap.add(textTrackPrivate->id(), TrackBuffer()).iterator->value;
trackBuffer.description = textTrackInfo.description;
m_textCodecs.append(trackBuffer.description->codec());
}
if (activeTrackFlag) {
setActive(true);
}
m_receivedFirstInitializationSegment = true;
}
if (m_private->readyState() == MediaPlayer::HaveNothing) {
for (auto& sourceBuffer : *m_source->sourceBuffers()) {
if (!sourceBuffer->m_receivedFirstInitializationSegment)
return;
}
m_private->setReadyState(MediaPlayer::HaveMetadata);
}
if (activeTrackFlag && m_private->readyState() > MediaPlayer::HaveCurrentData)
m_private->setReadyState(MediaPlayer::HaveMetadata);
}
bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment)
{
if (segment.audioTracks.size() != audioTracks()->length()
|| segment.videoTracks.size() != videoTracks()->length()
|| segment.textTracks.size() != textTracks()->length())
return false;
for (auto& audioTrackInfo : segment.audioTracks) {
if (!m_audioCodecs.contains(audioTrackInfo.description->codec()))
return false;
}
for (auto& videoTrackInfo : segment.videoTracks) {
if (!m_videoCodecs.contains(videoTrackInfo.description->codec()))
return false;
}
for (auto& textTrackInfo : segment.textTracks) {
if (!m_textCodecs.contains(textTrackInfo.description->codec()))
return false;
}
if (segment.audioTracks.size() >= 2) {
for (auto& audioTrackInfo : segment.audioTracks) {
if (!m_trackBufferMap.contains(audioTrackInfo.track->id()))
return false;
}
}
if (segment.videoTracks.size() >= 2) {
for (auto& videoTrackInfo : segment.videoTracks) {
if (!m_trackBufferMap.contains(videoTrackInfo.track->id()))
return false;
}
}
if (segment.textTracks.size() >= 2) {
for (auto& textTrackInfo : segment.videoTracks) {
if (!m_trackBufferMap.contains(textTrackInfo.track->id()))
return false;
}
}
return true;
}
class SampleLessThanComparator {
public:
bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
{
return value1.first < value2.first;
}
bool operator()(MediaTime value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
{
return value1 < value2.first;
}
bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, MediaTime value2)
{
return value1.first < value2;
}
};
void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, PassRefPtr<MediaSample> prpSample)
{
if (isRemoved())
return;
RefPtr<MediaSample> sample = prpSample;
do {
MediaTime presentationTimestamp = sample->presentationTime();
MediaTime decodeTimestamp = sample->decodeTime();
MediaTime frameDuration = sample->duration();
if (m_timestampOffset != MediaTime::zeroTime()) {
presentationTimestamp += m_timestampOffset;
decodeTimestamp += m_timestampOffset;
MediaTime presentationStartTime = MediaTime::zeroTime();
if (presentationTimestamp < presentationStartTime || decodeTimestamp < presentationStartTime) {
m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
return;
}
}
AtomicString trackID = sample->trackID();
auto it = m_trackBufferMap.find(trackID);
if (it == m_trackBufferMap.end())
it = m_trackBufferMap.add(trackID, TrackBuffer()).iterator;
TrackBuffer& trackBuffer = it->value;
if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp
|| abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (trackBuffer.lastFrameDuration * 2))) {
m_highestPresentationEndTimestamp = presentationTimestamp;
for (auto& trackBuffer : m_trackBufferMap.values()) {
trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime();
trackBuffer.lastFrameDuration = MediaTime::invalidTime();
trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime();
trackBuffer.needRandomAccessFlag = true;
}
continue;
}
MediaTime frameEndTimestamp = presentationTimestamp + frameDuration;
if (trackBuffer.needRandomAccessFlag) {
if (!sample->isSync()) {
didDropSample();
return;
}
trackBuffer.needRandomAccessFlag = false;
}
SampleMap erasedSamples;
MediaTime microsecond(1, 1000000);
if (trackBuffer.lastDecodeTimestamp.isInvalid()) {
auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp);
if (iter != trackBuffer.samples.presentationOrder().end()) {
RefPtr<MediaSample> overlappedFrame = iter->second;
if (trackBuffer.description->isVideo()) {
MediaTime overlappedFramePresentationTimestamp = overlappedFrame->presentationTime();
MediaTime removeWindowTimestamp = overlappedFramePresentationTimestamp + microsecond;
if (presentationTimestamp < removeWindowTimestamp)
erasedSamples.addSample(iter->second);
}
}
}
if (trackBuffer.highestPresentationTimestamp.isInvalid()) {
auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp);
if (iter_pair.first != trackBuffer.samples.presentationOrder().end())
erasedSamples.addRange(iter_pair.first, iter_pair.second);
}
if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp <= presentationTimestamp) {
do {
if (!m_buffered)
break;
unsigned bufferedLength = m_buffered->ranges().length();
if (!bufferedLength)
break;
bool ignoreValid;
MediaTime highestBufferedTime = m_buffered->ranges().end(bufferedLength - 1, ignoreValid);
PresentationOrderSampleMap::iterator_range range;
if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration)
range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRangeFromEnd(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
else
range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRange(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
if (range.first != trackBuffer.samples.presentationOrder().end())
erasedSamples.addRange(range.first, range.second);
} while(false);
}
DecodeOrderSampleMap::MapType dependentSamples;
if (!erasedSamples.empty()) {
auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first);
auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first);
auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter);
dependentSamples.insert(firstDecodeIter, nextSyncIter);
RefPtr<TimeRanges> erasedRanges = TimeRanges::create();
for (auto& samplePair : dependentSamples) {
MediaTime startTime = samplePair.second->presentationTime();
MediaTime endTime = startTime + samplePair.second->duration() + microsecond;
erasedRanges->add(startTime.toDouble(), endTime.toDouble());
LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveSample(%p) - removing sample(%s)", this, toString(*samplePair.second).utf8().data());
trackBuffer.samples.removeSample(samplePair.second.get());
}
erasedRanges->invert();
m_buffered->intersectWith(*erasedRanges.get());
trackBuffer.needsReenqueueing = true;
}
trackBuffer.samples.addSample(sample);
DecodeOrderSampleMap::KeyType decodeKey(decodeTimestamp, presentationTimestamp);
trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, sample));
trackBuffer.lastDecodeTimestamp = decodeTimestamp;
trackBuffer.lastFrameDuration = frameDuration;
if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp)
trackBuffer.highestPresentationTimestamp = frameEndTimestamp;
if (m_highestPresentationEndTimestamp.isInvalid() || frameEndTimestamp > m_highestPresentationEndTimestamp)
m_highestPresentationEndTimestamp = frameEndTimestamp;
m_buffered->add(presentationTimestamp.toDouble(), (presentationTimestamp + frameDuration + microsecond).toDouble());
m_bufferedSinceLastMonitor += frameDuration.toDouble();
break;
} while (1);
if (highestPresentationEndTimestamp().toDouble() > m_source->duration())
m_source->setDurationInternal(highestPresentationEndTimestamp().toDouble());
}
bool SourceBuffer::sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const
{
return m_audioTracks && m_audioTracks->length();
}
bool SourceBuffer::sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const
{
return m_videoTracks && m_videoTracks->length();
}
void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track)
{
if (track->selected()
&& (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
&& (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
&& (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
setActive(false);
} else if (!track->selected()) {
setActive(true);
}
if (!isRemoved())
m_source->mediaElement()->videoTrackSelectedChanged(track);
}
void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track)
{
if (track->enabled()
&& (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
&& (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
&& (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
setActive(false);
} else if (!track->enabled()) {
setActive(true);
}
if (!isRemoved())
m_source->mediaElement()->audioTrackEnabledChanged(track);
}
void SourceBuffer::textTrackModeChanged(TextTrack* track)
{
if (track->mode() == TextTrack::disabledKeyword()
&& (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
&& (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
&& (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
setActive(false);
} else {
setActive(true);
}
if (!isRemoved())
m_source->mediaElement()->textTrackModeChanged(track);
}
void SourceBuffer::textTrackAddCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
{
if (!isRemoved())
m_source->mediaElement()->textTrackAddCue(track, cue);
}
void SourceBuffer::textTrackAddCues(TextTrack* track, TextTrackCueList const* cueList)
{
if (!isRemoved())
m_source->mediaElement()->textTrackAddCues(track, cueList);
}
void SourceBuffer::textTrackRemoveCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
{
if (!isRemoved())
m_source->mediaElement()->textTrackRemoveCue(track, cue);
}
void SourceBuffer::textTrackRemoveCues(TextTrack* track, TextTrackCueList const* cueList)
{
if (!isRemoved())
m_source->mediaElement()->textTrackRemoveCues(track, cueList);
}
void SourceBuffer::textTrackKindChanged(TextTrack* track)
{
if (!isRemoved())
m_source->mediaElement()->textTrackKindChanged(track);
}
void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID)
{
LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this);
auto it = m_trackBufferMap.find(trackID);
if (it == m_trackBufferMap.end())
return;
provideMediaData(it->value, trackID);
}
void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, AtomicString trackID)
{
#if !LOG_DISABLED
unsigned enqueuedSamples = 0;
#endif
auto sampleIt = trackBuffer.decodeQueue.begin();
for (auto sampleEnd = trackBuffer.decodeQueue.end(); sampleIt != sampleEnd; ++sampleIt) {
if (!m_private->isReadyForMoreSamples(trackID)) {
m_private->notifyClientWhenReadyForMoreSamples(trackID);
break;
}
RefPtr<MediaSample> sample = sampleIt->second;
trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime();
m_private->enqueueSample(sample.release(), trackID);
#if !LOG_DISABLED
++enqueuedSamples;
#endif
}
trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin(), sampleIt);
LOG(MediaSource, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples);
}
void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, AtomicString trackID, const MediaTime& time)
{
auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()) {
trackBuffer.decodeQueue.clear();
m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
return;
}
DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime());
auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end());
auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator);
auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter);
if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend()) {
trackBuffer.decodeQueue.clear();
m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
return;
}
Vector<RefPtr<MediaSample>> nonDisplayingSamples;
for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter)
nonDisplayingSamples.append(iter->second);
m_private->flushAndEnqueueNonDisplayingSamples(nonDisplayingSamples, trackID);
trackBuffer.decodeQueue.clear();
for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter)
trackBuffer.decodeQueue.insert(*iter);
provideMediaData(trackBuffer, trackID);
trackBuffer.needsReenqueueing = false;
}
void SourceBuffer::didDropSample()
{
if (!isRemoved())
m_source->mediaElement()->incrementDroppedFrameCount();
}
void SourceBuffer::monitorBufferingRate()
{
if (!m_bufferedSinceLastMonitor)
return;
double now = monotonicallyIncreasingTime();
double interval = now - m_timeOfBufferingMonitor;
double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval;
m_timeOfBufferingMonitor = now;
m_bufferedSinceLastMonitor = 0;
m_averageBufferRate = m_averageBufferRate * (1 - ExponentialMovingAverageCoefficient) + rateSinceLastMonitor * ExponentialMovingAverageCoefficient;
LOG(MediaSource, "SourceBuffer::monitorBufferingRate(%p) - m_avegareBufferRate: %lf", this, m_averageBufferRate);
}
std::unique_ptr<PlatformTimeRanges> SourceBuffer::bufferedAccountingForEndOfStream() const
{
std::unique_ptr<PlatformTimeRanges> virtualRanges = PlatformTimeRanges::create(m_buffered->ranges());
if (m_source->isEnded()) {
MediaTime start = virtualRanges->maximumBufferedTime();
MediaTime end = MediaTime::createWithDouble(m_source->duration());
if (start <= end)
virtualRanges->add(start, end);
}
return virtualRanges;
}
bool SourceBuffer::hasCurrentTime() const
{
if (isRemoved() || !m_buffered->length())
return false;
MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
std::unique_ptr<PlatformTimeRanges> ranges = bufferedAccountingForEndOfStream();
return abs(ranges->nearest(currentTime) - currentTime) <= currentTimeFudgeFactor();
}
bool SourceBuffer::hasFutureTime() const
{
if (isRemoved())
return false;
std::unique_ptr<PlatformTimeRanges> ranges = bufferedAccountingForEndOfStream();
if (!ranges->length())
return false;
MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
MediaTime nearest = ranges->nearest(currentTime);
if (abs(nearest - currentTime) > currentTimeFudgeFactor())
return false;
size_t found = ranges->find(nearest);
if (found == notFound)
return false;
bool ignoredValid = false;
return ranges->end(found, ignoredValid) - currentTime > currentTimeFudgeFactor();
}
bool SourceBuffer::canPlayThrough()
{
if (isRemoved())
return false;
monitorBufferingRate();
if (m_averageBufferRate > 1)
return true;
MediaTime currentTime = MediaTime::createWithDouble(m_source->currentTime());
MediaTime duration = MediaTime::createWithDouble(m_source->duration());
std::unique_ptr<PlatformTimeRanges> unbufferedRanges = bufferedAccountingForEndOfStream();
unbufferedRanges->invert();
unbufferedRanges->intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration)));
MediaTime unbufferedTime = unbufferedRanges->totalDuration();
if (!unbufferedTime.isValid())
return true;
MediaTime timeRemaining = duration - currentTime;
return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble();
}
void SourceBuffer::reportExtraMemoryCost()
{
size_t extraMemoryCost = m_pendingAppendData.capacity();
for (auto& trackBuffer : m_trackBufferMap.values())
extraMemoryCost += trackBuffer.samples.sizeInBytes();
if (extraMemoryCost < m_reportedExtraMemoryCost)
return;
size_t extraMemoryCostDelta = extraMemoryCost - m_reportedExtraMemoryCost;
m_reportedExtraMemoryCost = extraMemoryCost;
JSC::JSLockHolder lock(scriptExecutionContext()->vm());
if (extraMemoryCostDelta > 0)
scriptExecutionContext()->vm().heap.reportExtraMemoryCost(extraMemoryCostDelta);
}
Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID)
{
auto it = m_trackBufferMap.find(trackID);
if (it == m_trackBufferMap.end())
return Vector<String>();
TrackBuffer& trackBuffer = it->value;
Vector<String> sampleDescriptions;
for (auto& pair : trackBuffer.samples.decodeOrder())
sampleDescriptions.append(toString(*pair.second));
return sampleDescriptions;
}
}
#endif