LibWeb: Align AudioContext constructor with current spec steps

This commit is contained in:
Tim Ledbetter 2025-01-09 15:17:45 +00:00 committed by Tim Ledbetter
parent c33d72bb41
commit f01ccf5424
Notes: github-actions[bot] 2025-01-16 13:57:27 +00:00
6 changed files with 341 additions and 53 deletions

View file

@ -8,6 +8,8 @@
#include <LibWeb/Bindings/Intrinsics.h>
#include <LibWeb/DOM/Event.h>
#include <LibWeb/HTML/HTMLMediaElement.h>
#include <LibWeb/HTML/MessageChannel.h>
#include <LibWeb/HTML/MessagePort.h>
#include <LibWeb/HTML/Scripting/TemporaryExecutionContext.h>
#include <LibWeb/HTML/Window.h>
#include <LibWeb/WebAudio/AudioContext.h>
@ -19,29 +21,45 @@ namespace Web::WebAudio {
GC_DEFINE_ALLOCATOR(AudioContext);
// https://webaudio.github.io/web-audio-api/#dom-audiocontext-audiocontext
WebIDL::ExceptionOr<GC::Ref<AudioContext>> AudioContext::construct_impl(JS::Realm& realm, AudioContextOptions const& context_options)
WebIDL::ExceptionOr<GC::Ref<AudioContext>> AudioContext::construct_impl(JS::Realm& realm, Optional<AudioContextOptions> const& context_options)
{
auto context = realm.create<AudioContext>(realm, context_options);
context->m_destination = TRY(AudioDestinationNode::construct_impl(realm, context));
return context;
// If the current settings objects responsible document is NOT fully active, throw an InvalidStateError and abort these steps.
auto& settings = HTML::current_principal_settings_object();
// FIXME: Not all settings objects currently return a responsible document.
// Therefore we only fail this check if responsible document is not null.
if (!settings.responsible_document() || !settings.responsible_document()->is_fully_active()) {
return WebIDL::InvalidStateError::create(realm, "Document is not fully active"_string);
}
AudioContext::AudioContext(JS::Realm& realm, AudioContextOptions const& context_options)
: BaseAudioContext(realm)
{
// FIXME: If the current settings objects responsible document is NOT fully active, throw an InvalidStateError and abort these steps.
// AD-HOC: The spec doesn't currently require the sample rate to be validated here,
// but other browsers do perform a check and there is a WPT test that expects this.
if (context_options.has_value() && context_options->sample_rate.has_value())
TRY(verify_audio_options_inside_nominal_range(realm, *context_options->sample_rate));
// 1: Set a [[control thread state]] to suspended on the AudioContext.
BaseAudioContext::set_control_state(Bindings::AudioContextState::Suspended);
// 1. Let context be a new AudioContext object.
auto context = realm.create<AudioContext>(realm);
context->m_destination = TRY(AudioDestinationNode::construct_impl(realm, context));
// 2: Set a [[rendering thread state]] to suspended on the AudioContext.
BaseAudioContext::set_rendering_state(Bindings::AudioContextState::Suspended);
// 2. Set a [[control thread state]] to suspended on context.
context->set_control_state(Bindings::AudioContextState::Suspended);
// 3: Let [[pending resume promises]] be a slot on this AudioContext, that is an initially empty ordered list of promises.
// 3. Set a [[rendering thread state]] to suspended on context.
context->set_rendering_state(Bindings::AudioContextState::Suspended);
// 4: If contextOptions is given, apply the options:
// 4.1: Set the internal latency of this AudioContext according to contextOptions.latencyHint, as described in latencyHint.
switch (context_options.latency_hint) {
// FIXME: 4. Let messageChannel be a new MessageChannel.
// FIXME: 5. Let controlSidePort be the value of messageChannels port1 attribute.
// FIXME: 6. Let renderingSidePort be the value of messageChannels port2 attribute.
// FIXME: 7. Let serializedRenderingSidePort be the result of StructuredSerializeWithTransfer(renderingSidePort, « renderingSidePort »).
// FIXME: 8. Set this audioWorklet's port to controlSidePort.
// FIXME: 9. Queue a control message to set the MessagePort on the AudioContextGlobalScope, with serializedRenderingSidePort.
// 10. If contextOptions is given, apply the options:
if (context_options.has_value()) {
// 1. If sinkId is specified, let sinkId be the value of contextOptions.sinkId and run the following substeps:
// 2. Set the internal latency of context according to contextOptions.latencyHint, as described in latencyHint.
switch (context_options->latency_hint) {
case Bindings::AudioContextLatencyCategory::Balanced:
// FIXME: Determine optimal settings for balanced.
break;
@ -55,34 +73,40 @@ AudioContext::AudioContext(JS::Realm& realm, AudioContextOptions const& context_
VERIFY_NOT_REACHED();
}
// 4.2: If contextOptions.sampleRate is specified, set the sampleRate of this AudioContext to this value. Otherwise,
// use the sample rate of the default output device. If the selected sample rate differs from the sample rate of the output device,
// this AudioContext MUST resample the audio output to match the sample rate of the output device.
if (context_options.sample_rate.has_value()) {
BaseAudioContext::set_sample_rate(context_options.sample_rate.value());
} else {
// FIXME: This would ideally be coming from the default output device, but we can only get this on Serenity
// For now we'll just have to resample
BaseAudioContext::set_sample_rate(44100);
// 3: If contextOptions.sampleRate is specified, set the sampleRate of context to this value.
if (context_options->sample_rate.has_value()) {
context->set_sample_rate(context_options->sample_rate.value());
}
// Otherwise, follow these substeps:
else {
// FIXME: 1. If sinkId is the empty string or a type of AudioSinkOptions, use the sample rate of the default output device. Abort these substeps.
// FIXME: 2. If sinkId is a DOMString, use the sample rate of the output device identified by sinkId. Abort these substeps.
// If contextOptions.sampleRate differs from the sample rate of the output device, the user agent MUST resample the audio output to match the sample rate of the output device.
context->set_sample_rate(44100);
}
}
// FIXME: 5: If the context is allowed to start, send a control message to start processing.
// FIXME: 11. If context is allowed to start, send a control message to start processing.
// FIXME: Implement control message queue to run following steps on the rendering thread
if (m_allowed_to_start) {
// FIXME: 5.1: Attempt to acquire system resources. In case of failure, abort the following steps.
if (context->m_allowed_to_start) {
// FIXME: 1. Let document be the current settings object's relevant global object's associated Document.
// FIXME: 2. Attempt to acquire system resources to use a following audio output device based on [[sink ID]] for rendering
// 5.2: Set the [[rendering thread state]] to "running" on the AudioContext.
BaseAudioContext::set_rendering_state(Bindings::AudioContextState::Running);
// 2. Set this [[rendering thread state]] to running on the AudioContext.
context->set_rendering_state(Bindings::AudioContextState::Running);
// 5.3: queue a media element task to execute the following steps:
queue_a_media_element_task(GC::create_function(heap(), [&realm, this]() {
// 5.3.1: Set the state attribute of the AudioContext to "running".
BaseAudioContext::set_control_state(Bindings::AudioContextState::Running);
// 3. Queue a media element task to execute the following steps:
context->queue_a_media_element_task(GC::create_function(context->heap(), [&realm, context]() {
// 1. Set the state attribute of the AudioContext to "running".
context->set_control_state(Bindings::AudioContextState::Running);
// 5.3.2: queue a media element task to fire an event named statechange at the AudioContext.
this->dispatch_event(DOM::Event::create(realm, HTML::EventNames::statechange));
// 2. Fire an event named statechange at the AudioContext.
context->dispatch_event(DOM::Event::create(realm, HTML::EventNames::statechange));
}));
}
// 12. Return context.
return context;
}
AudioContext::~AudioContext() = default;

View file

@ -28,7 +28,7 @@ class AudioContext final : public BaseAudioContext {
GC_DECLARE_ALLOCATOR(AudioContext);
public:
static WebIDL::ExceptionOr<GC::Ref<AudioContext>> construct_impl(JS::Realm&, AudioContextOptions const& context_options = {});
static WebIDL::ExceptionOr<GC::Ref<AudioContext>> construct_impl(JS::Realm&, Optional<AudioContextOptions> const& context_options = {});
virtual ~AudioContext() override;
@ -40,7 +40,10 @@ public:
WebIDL::ExceptionOr<GC::Ref<WebIDL::Promise>> close();
private:
explicit AudioContext(JS::Realm&, AudioContextOptions const& context_options);
explicit AudioContext(JS::Realm& realm)
: BaseAudioContext(realm)
{
}
virtual void initialize(JS::Realm&) override;
virtual void visit_edges(Cell::Visitor&) override;

View file

@ -153,6 +153,14 @@ WebIDL::ExceptionOr<GC::Ref<PeriodicWave>> BaseAudioContext::create_periodic_wav
return PeriodicWave::construct_impl(realm(), *this, options);
}
WebIDL::ExceptionOr<void> BaseAudioContext::verify_audio_options_inside_nominal_range(JS::Realm& realm, float sample_rate)
{
if (sample_rate < MIN_SAMPLE_RATE || sample_rate > MAX_SAMPLE_RATE)
return WebIDL::NotSupportedError::create(realm, "Sample rate is outside of allowed range"_string);
return {};
}
// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer
WebIDL::ExceptionOr<void> BaseAudioContext::verify_audio_options_inside_nominal_range(JS::Realm& realm, WebIDL::UnsignedLong number_of_channels, WebIDL::UnsignedLong length, float sample_rate)
{
@ -167,8 +175,7 @@ WebIDL::ExceptionOr<void> BaseAudioContext::verify_audio_options_inside_nominal_
if (length == 0)
return WebIDL::NotSupportedError::create(realm, "Length of buffer must be at least 1"_string);
if (sample_rate < MIN_SAMPLE_RATE || sample_rate > MAX_SAMPLE_RATE)
return WebIDL::NotSupportedError::create(realm, "Sample rate is outside of allowed range"_string);
TRY(verify_audio_options_inside_nominal_range(realm, sample_rate));
return {};
}

View file

@ -57,6 +57,7 @@ public:
void set_control_state(Bindings::AudioContextState state) { m_control_thread_state = state; }
void set_rendering_state(Bindings::AudioContextState state) { m_rendering_thread_state = state; }
static WebIDL::ExceptionOr<void> verify_audio_options_inside_nominal_range(JS::Realm&, float sample_rate);
static WebIDL::ExceptionOr<void> verify_audio_options_inside_nominal_range(JS::Realm&, WebIDL::UnsignedLong number_of_channels, WebIDL::UnsignedLong length, float sample_rate);
WebIDL::ExceptionOr<GC::Ref<BiquadFilterNode>> create_biquad_filter();

View file

@ -0,0 +1,38 @@
Harness status: Error
Found 32 tests
27 Pass
5 Fail
Pass # AUDIT TASK RUNNER STARTED.
Pass Executing "test-audiocontextoptions-latencyHint-basic"
Fail Executing "test-audiocontextoptions-latencyHint-double"
Pass Executing "test-audiocontextoptions-sampleRate"
Pass Audit report
Pass > [test-audiocontextoptions-latencyHint-basic] Test creating contexts with basic latencyHint types.
Pass context = new AudioContext() did not throw an exception.
Pass context.sampleRate (44100 Hz) is greater than 0.
Pass default baseLatency is greater than or equal to 0.
Pass context = new AudioContext({'latencyHint': 'interactive'}) did not throw an exception.
Pass interactive baseLatency is equal to 0.
Pass context = new AudioContext({'latencyHint': 'balanced'}) did not throw an exception.
Pass balanced baseLatency is greater than or equal to 0.
Pass context = new AudioContext({'latencyHint': 'playback'}) did not throw an exception.
Pass playback baseLatency is greater than or equal to 0.
Pass < [test-audiocontextoptions-latencyHint-basic] All assertions passed. (total 9 assertions)
Pass > [test-audiocontextoptions-latencyHint-double] Test creating contexts with explicit latencyHint values.
Fail X context = new AudioContext({'latencyHint': interactiveLatency/2}) incorrectly threw TypeError: "Invalid value '0' for enumeration type 'AudioContextLatencyCategory'".
Pass double-constructor baseLatency small is less than or equal to 0.
Fail X context = new AudioContext({'latencyHint': validLatency}) incorrectly threw TypeError: "Invalid value '0' for enumeration type 'AudioContextLatencyCategory'".
Pass double-constructor baseLatency inrange 1 is greater than or equal to 0.
Pass double-constructor baseLatency inrange 2 is less than or equal to 0.
Fail X creating two high latency contexts incorrectly threw TypeError: "Invalid value '0' for enumeration type 'AudioContextLatencyCategory'".
Pass > [test-audiocontextoptions-sampleRate] Test creating contexts with non-default sampleRate values.
Pass context = new AudioContext({sampleRate: 1}) threw NotSupportedError: "Sample rate is outside of allowed range".
Pass context = new AudioContext({sampleRate: 1000000}) threw NotSupportedError: "Sample rate is outside of allowed range".
Pass context = new AudioContext({sampleRate: -1}) threw NotSupportedError: "Sample rate is outside of allowed range".
Pass context = new AudioContext({sampleRate: 0}) threw NotSupportedError: "Sample rate is outside of allowed range".
Pass context = new AudioContext({sampleRate: 24000}) did not throw an exception.
Pass sampleRate inrange is equal to 24000.
Pass < [test-audiocontextoptions-sampleRate] All assertions passed. (total 6 assertions)
Fail # AUDIT TASK RUNNER FINISHED: 1 out of 3 tasks were failed.

View file

@ -0,0 +1,215 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test AudioContextOptions
</title>
<script src="../../../resources/testharness.js"></script>
<script src="../../../resources/testharnessreport.js"></script>
<script src="../../../webaudio/resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let context;
let defaultLatency;
let interactiveLatency;
let balancedLatency;
let playbackLatency;
let audit = Audit.createTaskRunner();
audit.define(
{
label: 'test-audiocontextoptions-latencyHint-basic',
description: 'Test creating contexts with basic latencyHint types.'
},
function(task, should) {
let closingPromises = [];
// Verify that an AudioContext can be created with default options.
should(function() {
context = new AudioContext()
}, 'context = new AudioContext()').notThrow();
should(context.sampleRate,
`context.sampleRate (${context.sampleRate} Hz)`).beGreaterThan(0);
defaultLatency = context.baseLatency;
should(defaultLatency, 'default baseLatency').beGreaterThanOrEqualTo(0);
// Verify that an AudioContext can be created with the expected
// latency types.
should(
function() {
context = new AudioContext({'latencyHint': 'interactive'})
},
'context = new AudioContext({\'latencyHint\': \'interactive\'})')
.notThrow();
interactiveLatency = context.baseLatency;
should(interactiveLatency, 'interactive baseLatency')
.beEqualTo(defaultLatency);
closingPromises.push(context.close());
should(
function() {
context = new AudioContext({'latencyHint': 'balanced'})
},
'context = new AudioContext({\'latencyHint\': \'balanced\'})')
.notThrow();
balancedLatency = context.baseLatency;
should(balancedLatency, 'balanced baseLatency')
.beGreaterThanOrEqualTo(interactiveLatency);
closingPromises.push(context.close());
should(
function() {
context = new AudioContext({'latencyHint': 'playback'})
},
'context = new AudioContext({\'latencyHint\': \'playback\'})')
.notThrow();
playbackLatency = context.baseLatency;
should(playbackLatency, 'playback baseLatency')
.beGreaterThanOrEqualTo(balancedLatency);
closingPromises.push(context.close());
Promise.all(closingPromises).then(function() {
task.done();
});
});
audit.define(
{
label: 'test-audiocontextoptions-latencyHint-double',
description:
'Test creating contexts with explicit latencyHint values.'
},
function(task, should) {
let closingPromises = [];
// Verify too small exact latency clamped to 'interactive'
should(
function() {
context =
new AudioContext({'latencyHint': interactiveLatency / 2})
},
'context = new AudioContext({\'latencyHint\': ' +
'interactiveLatency/2})')
.notThrow();
should(context.baseLatency, 'double-constructor baseLatency small')
.beLessThanOrEqualTo(interactiveLatency);
closingPromises.push(context.close());
// Verify that exact latency in range works as expected
let validLatency = (interactiveLatency + playbackLatency) / 2;
should(
function() {
context = new AudioContext({'latencyHint': validLatency})
},
'context = new AudioContext({\'latencyHint\': validLatency})')
.notThrow();
should(
context.baseLatency, 'double-constructor baseLatency inrange 1')
.beGreaterThanOrEqualTo(interactiveLatency);
should(
context.baseLatency, 'double-constructor baseLatency inrange 2')
.beLessThanOrEqualTo(playbackLatency);
closingPromises.push(context.close());
// Verify too big exact latency clamped to some value
let context1;
let context2;
should(function() {
context1 =
new AudioContext({'latencyHint': playbackLatency * 10});
context2 =
new AudioContext({'latencyHint': playbackLatency * 20});
}, 'creating two high latency contexts').notThrow();
should(context1.baseLatency, 'high latency context baseLatency')
.beEqualTo(context2.baseLatency);
should(context1.baseLatency, 'high latency context baseLatency')
.beGreaterThanOrEqualTo(interactiveLatency);
closingPromises.push(context1.close());
closingPromises.push(context2.close());
// Verify that invalid latencyHint values are rejected.
should(
function() {
context = new AudioContext({'latencyHint': 'foo'})
},
'context = new AudioContext({\'latencyHint\': \'foo\'})')
.throw(TypeError);
// Verify that no extra options can be passed into the
// AudioContextOptions.
should(
function() {
context = new AudioContext('latencyHint')
},
'context = new AudioContext(\'latencyHint\')')
.throw(TypeError);
Promise.all(closingPromises).then(function() {
task.done();
});
});
audit.define(
{
label: 'test-audiocontextoptions-sampleRate',
description:
'Test creating contexts with non-default sampleRate values.'
},
function(task, should) {
// A sampleRate of 1 is unlikely to be supported on any browser,
// test that this rate is rejected.
should(
() => {
context = new AudioContext({sampleRate: 1})
},
'context = new AudioContext({sampleRate: 1})')
.throw(DOMException, 'NotSupportedError');
// A sampleRate of 1,000,000 is unlikely to be supported on any
// browser, test that this rate is also rejected.
should(
() => {
context = new AudioContext({sampleRate: 1000000})
},
'context = new AudioContext({sampleRate: 1000000})')
.throw(DOMException, 'NotSupportedError');
// A negative sample rate should not be accepted
should(
() => {
context = new AudioContext({sampleRate: -1})
},
'context = new AudioContext({sampleRate: -1})')
.throw(DOMException, 'NotSupportedError');
// A null sample rate should not be accepted
should(
() => {
context = new AudioContext({sampleRate: 0})
},
'context = new AudioContext({sampleRate: 0})')
.throw(DOMException, 'NotSupportedError');
should(
() => {
context = new AudioContext({sampleRate: 24000})
},
'context = new AudioContext({sampleRate: 24000})')
.notThrow();
should(
context.sampleRate, 'sampleRate inrange')
.beEqualTo(24000);
context.close();
task.done();
});
audit.run();
</script>
</body>
</html>