#ifndef AudioContext_h
#define AudioContext_h
#include "ActiveDOMObject.h"
#include "AudioBus.h"
#include "AudioDestinationNode.h"
#include "EventListener.h"
#include "EventTarget.h"
#include "HRTFDatabaseLoader.h"
#include <wtf/HashSet.h>
#include <wtf/OwnPtr.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
#include <wtf/Threading.h>
#include <wtf/Vector.h>
#include <wtf/text/AtomicStringHash.h>
namespace WebCore {
class AudioBuffer;
class AudioBufferSourceNode;
class AudioChannelMerger;
class AudioChannelSplitter;
class AudioGainNode;
class AudioPannerNode;
class AudioListener;
class DelayNode;
class Document;
class LowPass2FilterNode;
class HighPass2FilterNode;
class ConvolverNode;
class DynamicsCompressorNode;
class RealtimeAnalyserNode;
class JavaScriptAudioNode;
class AudioContext : public ActiveDOMObject, public RefCounted<AudioContext>, public EventTarget {
public:
static PassRefPtr<AudioContext> create(Document*);
static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
virtual ~AudioContext();
bool isInitialized() const;
bool isOfflineContext() { return m_isOfflineContext; }
bool isRunnable() const;
virtual void stop();
Document* document() const; bool hasDocument();
AudioDestinationNode* destination() { return m_destinationNode.get(); }
double currentTime() { return m_destinationNode->currentTime(); }
double sampleRate() { return m_destinationNode->sampleRate(); }
PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono);
void refBuffer(PassRefPtr<AudioBuffer> buffer);
AudioListener* listener() { return m_listener.get(); }
PassRefPtr<AudioBufferSourceNode> createBufferSource();
PassRefPtr<AudioGainNode> createGainNode();
PassRefPtr<DelayNode> createDelayNode();
PassRefPtr<LowPass2FilterNode> createLowPass2Filter();
PassRefPtr<HighPass2FilterNode> createHighPass2Filter();
PassRefPtr<AudioPannerNode> createPanner();
PassRefPtr<ConvolverNode> createConvolver();
PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor();
PassRefPtr<RealtimeAnalyserNode> createAnalyser();
PassRefPtr<JavaScriptAudioNode> createJavaScriptNode(size_t bufferSize);
PassRefPtr<AudioChannelSplitter> createChannelSplitter();
PassRefPtr<AudioChannelMerger> createChannelMerger();
AudioBus* temporaryMonoBus() { return m_temporaryMonoBus.get(); }
AudioBus* temporaryStereoBus() { return m_temporaryStereoBus.get(); }
void notifyNodeFinishedProcessing(AudioNode*);
void handlePreRenderTasks();
void handlePostRenderTasks();
void derefFinishedSourceNodes();
void markForDeletion(AudioNode*);
void deleteMarkedNodes();
void incrementConnectionCount()
{
ASSERT(isMainThread());
m_connectionCount++;
}
unsigned connectionCount() const { return m_connectionCount; }
void setAudioThread(ThreadIdentifier thread) { m_audioThread = thread; } ThreadIdentifier audioThread() const { return m_audioThread; }
bool isAudioThread() const;
bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
void lock(bool& mustReleaseLock);
bool tryLock(bool& mustReleaseLock);
void unlock();
bool isGraphOwner() const;
class AutoLocker {
public:
AutoLocker(AudioContext* context)
: m_context(context)
{
ASSERT(context);
context->lock(m_mustReleaseLock);
}
~AutoLocker()
{
if (m_mustReleaseLock)
m_context->unlock();
}
private:
AudioContext* m_context;
bool m_mustReleaseLock;
};
void addDeferredFinishDeref(AudioNode*, AudioNode::RefType);
void handleDeferredFinishDerefs();
void markAudioNodeInputDirty(AudioNodeInput*);
void markAudioNodeOutputDirty(AudioNodeOutput*);
virtual ScriptExecutionContext* scriptExecutionContext() const;
virtual AudioContext* toAudioContext();
virtual EventTargetData* eventTargetData() { return &m_eventTargetData; }
virtual EventTargetData* ensureEventTargetData() { return &m_eventTargetData; }
DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
using RefCounted<AudioContext>::ref;
using RefCounted<AudioContext>::deref;
void startRendering();
void fireCompletionEvent();
private:
AudioContext(Document*);
AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
void constructCommon();
void lazyInitialize();
void uninitialize();
bool m_isInitialized;
bool m_isAudioThreadFinished;
bool m_isAudioThreadShutdown;
Document* m_document;
void refNode(AudioNode*);
void derefNode(AudioNode*);
void derefUnfinishedSourceNodes();
RefPtr<AudioDestinationNode> m_destinationNode;
RefPtr<AudioListener> m_listener;
Vector<RefPtr<AudioBuffer> > m_allocatedBuffers;
Vector<AudioNode*> m_finishedNodes;
Vector<AudioNode*> m_referencedNodes;
Vector<AudioNode*> m_nodesToDelete;
HashSet<AudioNodeInput*> m_dirtyAudioNodeInputs;
HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
void handleDirtyAudioNodeInputs();
void handleDirtyAudioNodeOutputs();
OwnPtr<AudioBus> m_temporaryMonoBus;
OwnPtr<AudioBus> m_temporaryStereoBus;
unsigned m_connectionCount;
Mutex m_contextGraphMutex;
volatile ThreadIdentifier m_audioThread;
volatile ThreadIdentifier m_graphOwnerThread;
struct RefInfo {
RefInfo(AudioNode* node, AudioNode::RefType refType)
: m_node(node)
, m_refType(refType)
{
}
AudioNode* m_node;
AudioNode::RefType m_refType;
};
Vector<RefInfo> m_deferredFinishDerefList;
RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
virtual void refEventTarget() { ref(); }
virtual void derefEventTarget() { deref(); }
EventTargetData m_eventTargetData;
RefPtr<AudioBuffer> m_renderTarget;
bool m_isOfflineContext;
};
}
#endif // AudioContext_h