#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioContext.h"
#include "AsyncAudioDecoder.h"
#include "AudioBuffer.h"
#include "AudioBufferCallback.h"
#include "AudioBufferSourceNode.h"
#include "AudioChannelMerger.h"
#include "AudioChannelSplitter.h"
#include "AudioGainNode.h"
#include "AudioListener.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
#include "AudioPannerNode.h"
#include "BiquadFilterNode.h"
#include "ConvolverNode.h"
#include "DefaultAudioDestinationNode.h"
#include "DelayNode.h"
#include "Document.h"
#include "DynamicsCompressorNode.h"
#include "ExceptionCode.h"
#include "FFTFrame.h"
#include "HRTFDatabaseLoader.h"
#include "HRTFPanner.h"
#include "JavaScriptAudioNode.h"
#include "OfflineAudioCompletionEvent.h"
#include "OfflineAudioDestinationNode.h"
#include "Oscillator.h"
#include "PlatformString.h"
#include "RealtimeAnalyserNode.h"
#include "ScriptCallStack.h"
#include "WaveShaperNode.h"
#include "WaveTable.h"
#if ENABLE(VIDEO)
#include "HTMLMediaElement.h"
#include "MediaElementAudioSourceNode.h"
#endif
#if DEBUG_AUDIONODE_REFERENCES
#include <stdio.h>
#endif
#if USE(GSTREAMER)
#include "GStreamerUtilities.h"
#endif
#include <wtf/ArrayBuffer.h>
#include <wtf/Atomics.h>
#include <wtf/MainThread.h>
#include <wtf/OwnPtr.h>
#include <wtf/PassOwnPtr.h>
#include <wtf/RefCounted.h>
const int UndefinedThreadIdentifier = 0xffffffff;
const unsigned MaxNodesToDeletePerQuantum = 10;
namespace WebCore {
namespace {
bool isSampleRateRangeGood(float sampleRate)
{
return sampleRate >= 44100 && sampleRate <= 96000;
}
}
const unsigned MaxHardwareContexts = 4;
unsigned AudioContext::s_hardwareContextCount = 0;
PassRefPtr<AudioContext> AudioContext::create(Document* document, ExceptionCode& ec)
{
UNUSED_PARAM(ec);
ASSERT(document);
ASSERT(isMainThread());
if (s_hardwareContextCount >= MaxHardwareContexts)
return 0;
RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
audioContext->suspendIfNeeded();
return audioContext.release();
}
PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
{
ASSERT(document);
HRTFDatabaseLoader* loader = HRTFDatabaseLoader::loader();
if (numberOfChannels > 10 || !isSampleRateRangeGood(sampleRate) || (loader && loader->databaseSampleRate() != sampleRate)) {
ec = SYNTAX_ERR;
return 0;
}
RefPtr<AudioContext> audioContext(new AudioContext(document, numberOfChannels, numberOfFrames, sampleRate));
audioContext->suspendIfNeeded();
return audioContext.release();
}
AudioContext::AudioContext(Document* document)
: ActiveDOMObject(document, this)
, m_isInitialized(false)
, m_isAudioThreadFinished(false)
, m_document(document)
, m_destinationNode(0)
, m_isDeletionScheduled(false)
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(false)
, m_activeSourceCount(0)
{
constructCommon();
m_destinationNode = DefaultAudioDestinationNode::create(this);
m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
}
AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: ActiveDOMObject(document, this)
, m_isInitialized(false)
, m_isAudioThreadFinished(false)
, m_document(document)
, m_destinationNode(0)
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
, m_activeSourceCount(0)
{
constructCommon();
m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
}
void AudioContext::constructCommon()
{
#if USE(GSTREAMER)
initializeGStreamer();
#endif
FFTFrame::initialize();
m_listener = AudioListener::create();
}
AudioContext::~AudioContext()
{
#if DEBUG_AUDIONODE_REFERENCES
printf("%p: AudioContext::~AudioContext()\n", this);
#endif
ASSERT(!m_nodesToDelete.size());
ASSERT(!m_referencedNodes.size());
ASSERT(!m_finishedNodes.size());
ASSERT(!m_automaticPullNodes.size());
ASSERT(!m_renderingAutomaticPullNodes.size());
}
void AudioContext::lazyInitialize()
{
if (!m_isInitialized) {
ASSERT(!m_isAudioThreadFinished);
if (!m_isAudioThreadFinished) {
if (m_destinationNode.get()) {
m_destinationNode->initialize();
if (!isOfflineContext()) {
m_destinationNode->startRendering();
++s_hardwareContextCount;
}
}
m_isInitialized = true;
}
}
}
void AudioContext::uninitialize()
{
ASSERT(isMainThread());
if (m_isInitialized) {
RefPtr<AudioContext> protect(this);
m_destinationNode->uninitialize();
m_isAudioThreadFinished = true;
m_destinationNode.clear();
if (!isOfflineContext()) {
ASSERT(s_hardwareContextCount);
--s_hardwareContextCount;
}
derefUnfinishedSourceNodes();
deleteMarkedNodes();
m_isInitialized = false;
}
}
bool AudioContext::isInitialized() const
{
return m_isInitialized;
}
bool AudioContext::isRunnable() const
{
if (!isInitialized())
return false;
return m_hrtfDatabaseLoader->isLoaded();
}
void AudioContext::uninitializeDispatch(void* userData)
{
AudioContext* context = reinterpret_cast<AudioContext*>(userData);
ASSERT(context);
if (!context)
return;
context->uninitialize();
}
void AudioContext::stop()
{
m_document = 0;
callOnMainThread(uninitializeDispatch, this);
}
Document* AudioContext::document() const
{
ASSERT(m_document);
return m_document;
}
bool AudioContext::hasDocument()
{
return m_document;
}
PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
{
RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
if (!audioBuffer.get()) {
ec = SYNTAX_ERR;
return 0;
}
return audioBuffer;
}
PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
{
ASSERT(arrayBuffer);
if (!arrayBuffer) {
ec = SYNTAX_ERR;
return 0;
}
RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
if (!audioBuffer.get()) {
ec = SYNTAX_ERR;
return 0;
}
return audioBuffer;
}
void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
{
if (!audioData) {
ec = SYNTAX_ERR;
return;
}
m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
}
PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
refNode(node.get());
return node;
}
#if ENABLE(VIDEO)
PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
{
ASSERT(mediaElement);
if (!mediaElement) {
ec = INVALID_STATE_ERR;
return 0;
}
ASSERT(isMainThread());
lazyInitialize();
if (mediaElement->audioSourceNode()) {
ec = INVALID_STATE_ERR;
return 0;
}
RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
mediaElement->setAudioSourceNode(node.get());
refNode(node.get()); return node;
}
#endif
PassRefPtr<JavaScriptAudioNode> AudioContext::createJavaScriptNode(size_t bufferSize, ExceptionCode& ec)
{
return createJavaScriptNode(bufferSize, 2, 2, ec);
}
PassRefPtr<JavaScriptAudioNode> AudioContext::createJavaScriptNode(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
{
return createJavaScriptNode(bufferSize, numberOfInputChannels, 2, ec);
}
PassRefPtr<JavaScriptAudioNode> AudioContext::createJavaScriptNode(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<JavaScriptAudioNode> node = JavaScriptAudioNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
if (!node.get()) {
ec = SYNTAX_ERR;
return 0;
}
refNode(node.get()); return node;
}
PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
{
ASSERT(isMainThread());
lazyInitialize();
return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
{
ASSERT(isMainThread());
lazyInitialize();
return WaveShaperNode::create(this);
}
PassRefPtr<AudioPannerNode> AudioContext::createPanner()
{
ASSERT(isMainThread());
lazyInitialize();
return AudioPannerNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<ConvolverNode> AudioContext::createConvolver()
{
ASSERT(isMainThread());
lazyInitialize();
return ConvolverNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
{
ASSERT(isMainThread());
lazyInitialize();
return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<RealtimeAnalyserNode> AudioContext::createAnalyser()
{
ASSERT(isMainThread());
lazyInitialize();
return RealtimeAnalyserNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<AudioGainNode> AudioContext::createGainNode()
{
ASSERT(isMainThread());
lazyInitialize();
return AudioGainNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<DelayNode> AudioContext::createDelayNode()
{
const double defaultMaxDelayTime = 1;
return createDelayNode(defaultMaxDelayTime);
}
PassRefPtr<DelayNode> AudioContext::createDelayNode(double maxDelayTime)
{
ASSERT(isMainThread());
lazyInitialize();
return DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime);
}
PassRefPtr<AudioChannelSplitter> AudioContext::createChannelSplitter(ExceptionCode& ec)
{
const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
}
PassRefPtr<AudioChannelSplitter> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<AudioChannelSplitter> node = AudioChannelSplitter::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
if (!node.get()) {
ec = SYNTAX_ERR;
return 0;
}
return node;
}
PassRefPtr<AudioChannelMerger> AudioContext::createChannelMerger(ExceptionCode& ec)
{
const unsigned ChannelMergerDefaultNumberOfInputs = 6;
return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
}
PassRefPtr<AudioChannelMerger> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<AudioChannelMerger> node = AudioChannelMerger::create(this, m_destinationNode->sampleRate(), numberOfInputs);
if (!node.get()) {
ec = SYNTAX_ERR;
return 0;
}
return node;
}
PassRefPtr<Oscillator> AudioContext::createOscillator()
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<Oscillator> node = Oscillator::create(this, m_destinationNode->sampleRate());
refNode(node.get());
return node;
}
PassRefPtr<WaveTable> AudioContext::createWaveTable(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
{
ASSERT(isMainThread());
if (!real || !imag || (real->length() != imag->length())) {
ec = SYNTAX_ERR;
return 0;
}
lazyInitialize();
return WaveTable::create(sampleRate(), real, imag);
}
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
{
ASSERT(isAudioThread());
m_finishedNodes.append(node);
}
void AudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
ASSERT(isAudioThread() || isAudioThreadFinished());
for (unsigned i = 0; i < m_finishedNodes.size(); i++)
derefNode(m_finishedNodes[i]);
m_finishedNodes.clear();
}
void AudioContext::refNode(AudioNode* node)
{
ASSERT(isMainThread());
AutoLocker locker(this);
node->ref(AudioNode::RefTypeConnection);
m_referencedNodes.append(node);
}
void AudioContext::derefNode(AudioNode* node)
{
ASSERT(isGraphOwner());
node->deref(AudioNode::RefTypeConnection);
for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
if (node == m_referencedNodes[i]) {
m_referencedNodes.remove(i);
break;
}
}
}
void AudioContext::derefUnfinishedSourceNodes()
{
ASSERT(isMainThread() && isAudioThreadFinished());
for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
m_referencedNodes.clear();
}
void AudioContext::lock(bool& mustReleaseLock)
{
ASSERT(isMainThread());
ThreadIdentifier thisThread = currentThread();
if (thisThread == m_graphOwnerThread) {
mustReleaseLock = false;
} else {
m_contextGraphMutex.lock();
m_graphOwnerThread = thisThread;
mustReleaseLock = true;
}
}
bool AudioContext::tryLock(bool& mustReleaseLock)
{
ThreadIdentifier thisThread = currentThread();
bool isAudioThread = thisThread == audioThread();
ASSERT(isAudioThread || isAudioThreadFinished());
if (!isAudioThread) {
lock(mustReleaseLock);
return true;
}
bool hasLock;
if (thisThread == m_graphOwnerThread) {
hasLock = true;
mustReleaseLock = false;
} else {
hasLock = m_contextGraphMutex.tryLock();
if (hasLock)
m_graphOwnerThread = thisThread;
mustReleaseLock = hasLock;
}
return hasLock;
}
void AudioContext::unlock()
{
ASSERT(currentThread() == m_graphOwnerThread);
m_graphOwnerThread = UndefinedThreadIdentifier;
m_contextGraphMutex.unlock();
}
bool AudioContext::isAudioThread() const
{
return currentThread() == m_audioThread;
}
bool AudioContext::isGraphOwner() const
{
return currentThread() == m_graphOwnerThread;
}
void AudioContext::addDeferredFinishDeref(AudioNode* node, AudioNode::RefType refType)
{
ASSERT(isAudioThread());
m_deferredFinishDerefList.append(AudioContext::RefInfo(node, refType));
}
void AudioContext::handlePreRenderTasks()
{
ASSERT(isAudioThread());
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
handleDirtyAudioNodeInputs();
handleDirtyAudioNodeOutputs();
updateAutomaticPullNodes();
if (mustReleaseLock)
unlock();
}
}
void AudioContext::handlePostRenderTasks()
{
ASSERT(isAudioThread());
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
handleDeferredFinishDerefs();
derefFinishedSourceNodes();
scheduleNodeDeletion();
handleDirtyAudioNodeInputs();
handleDirtyAudioNodeOutputs();
updateAutomaticPullNodes();
if (mustReleaseLock)
unlock();
}
}
void AudioContext::handleDeferredFinishDerefs()
{
ASSERT(isAudioThread() && isGraphOwner());
for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
AudioNode* node = m_deferredFinishDerefList[i].m_node;
AudioNode::RefType refType = m_deferredFinishDerefList[i].m_refType;
node->finishDeref(refType);
}
m_deferredFinishDerefList.clear();
}
void AudioContext::markForDeletion(AudioNode* node)
{
ASSERT(isGraphOwner());
m_nodesToDelete.append(node);
removeAutomaticPullNode(node);
}
void AudioContext::scheduleNodeDeletion()
{
bool isGood = m_isInitialized && isGraphOwner();
ASSERT(isGood);
if (!isGood)
return;
if (m_nodesToDelete.size() && !m_isDeletionScheduled) {
m_isDeletionScheduled = true;
ref();
callOnMainThread(deleteMarkedNodesDispatch, this);
}
}
void AudioContext::deleteMarkedNodesDispatch(void* userData)
{
AudioContext* context = reinterpret_cast<AudioContext*>(userData);
ASSERT(context);
if (!context)
return;
context->deleteMarkedNodes();
context->deref();
}
void AudioContext::deleteMarkedNodes()
{
ASSERT(isMainThread());
AutoLocker locker(this);
while (size_t n = m_nodesToDelete.size()) {
AudioNode* node = m_nodesToDelete[n - 1];
m_nodesToDelete.removeLast();
unsigned numberOfInputs = node->numberOfInputs();
for (unsigned i = 0; i < numberOfInputs; ++i)
m_dirtyAudioNodeInputs.remove(node->input(i));
unsigned numberOfOutputs = node->numberOfOutputs();
for (unsigned i = 0; i < numberOfOutputs; ++i)
m_dirtyAudioNodeOutputs.remove(node->output(i));
delete node;
}
m_isDeletionScheduled = false;
}
void AudioContext::markAudioNodeInputDirty(AudioNodeInput* input)
{
ASSERT(isGraphOwner());
m_dirtyAudioNodeInputs.add(input);
}
void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
{
ASSERT(isGraphOwner());
m_dirtyAudioNodeOutputs.add(output);
}
void AudioContext::handleDirtyAudioNodeInputs()
{
ASSERT(isGraphOwner());
for (HashSet<AudioNodeInput*>::iterator i = m_dirtyAudioNodeInputs.begin(); i != m_dirtyAudioNodeInputs.end(); ++i)
(*i)->updateRenderingState();
m_dirtyAudioNodeInputs.clear();
}
void AudioContext::handleDirtyAudioNodeOutputs()
{
ASSERT(isGraphOwner());
for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
(*i)->updateRenderingState();
m_dirtyAudioNodeOutputs.clear();
}
void AudioContext::addAutomaticPullNode(AudioNode* node)
{
ASSERT(isGraphOwner());
if (!m_automaticPullNodes.contains(node)) {
m_automaticPullNodes.add(node);
m_automaticPullNodesNeedUpdating = true;
}
}
void AudioContext::removeAutomaticPullNode(AudioNode* node)
{
ASSERT(isGraphOwner());
if (m_automaticPullNodes.contains(node)) {
m_automaticPullNodes.remove(node);
m_automaticPullNodesNeedUpdating = true;
}
}
void AudioContext::updateAutomaticPullNodes()
{
ASSERT(isGraphOwner());
if (m_automaticPullNodesNeedUpdating) {
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
unsigned j = 0;
for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
AudioNode* output = *i;
m_renderingAutomaticPullNodes[j] = output;
}
m_automaticPullNodesNeedUpdating = false;
}
}
void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
{
ASSERT(isAudioThread());
for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
}
const AtomicString& AudioContext::interfaceName() const
{
return eventNames().interfaceForAudioContext;
}
ScriptExecutionContext* AudioContext::scriptExecutionContext() const
{
return document();
}
void AudioContext::startRendering()
{
destination()->startRendering();
}
void AudioContext::fireCompletionEvent()
{
ASSERT(isMainThread());
if (!isMainThread())
return;
AudioBuffer* renderedBuffer = m_renderTarget.get();
ASSERT(renderedBuffer);
if (!renderedBuffer)
return;
if (hasDocument()) {
dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
}
}
void AudioContext::incrementActiveSourceCount()
{
atomicIncrement(&m_activeSourceCount);
}
void AudioContext::decrementActiveSourceCount()
{
atomicDecrement(&m_activeSourceCount);
}
}
#endif // ENABLE(WEB_AUDIO)