mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-22 09:12:13 -05:00
LibWeb/WebAudio: Define and partially implement AnalyserNode
https://webaudio.github.io/web-audio-api/#AnalyserNode Most of the interface is naively implemented. Container types probably need adjusted (Vector<double> is used for all the processing). A Fourier Transform is needed, but that's waiting on either a 3rd party library or a complex number type. There are lots of simple miscellaneous filters that need to be applied. It could be reasonable to implement from scratch, supposing that it can be parallelized. It might be hard to find one library with everything. Not my call though. Some additional scaffolding around blocks and render quanta is probably needed before this is developed much further, which probably comes in at the level of the AudioNode. Co-authored-by: Tim Ledbetter <tim.ledbetter@ladybird.org>
This commit is contained in:
parent
50445dc9ef
commit
6c6bf322ea
Notes:
github-actions[bot]
2025-01-17 09:15:57 +00:00
Author: https://github.com/tcl3 Commit: https://github.com/LadybirdBrowser/ladybird/commit/6c6bf322eac Pull-request: https://github.com/LadybirdBrowser/ladybird/pull/3265
16 changed files with 911 additions and 5 deletions
|
@ -786,6 +786,7 @@ set(SOURCES
|
|||
WebAssembly/Module.cpp
|
||||
WebAssembly/Table.cpp
|
||||
WebAssembly/WebAssembly.cpp
|
||||
WebAudio/AnalyserNode.cpp
|
||||
WebAudio/AudioBuffer.cpp
|
||||
WebAudio/AudioBufferSourceNode.cpp
|
||||
WebAudio/AudioContext.cpp
|
||||
|
|
340
Libraries/LibWeb/WebAudio/AnalyserNode.cpp
Normal file
340
Libraries/LibWeb/WebAudio/AnalyserNode.cpp
Normal file
|
@ -0,0 +1,340 @@
|
|||
/*
|
||||
* Copyright (c) 2024, Noah Bright <noah.bright.1@gmail.com>
|
||||
* Copyright (c) 2025, Tim Ledbetter <tim.ledbetter@ladybird.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <AK/ByteBuffer.h>
|
||||
#include <AK/Math.h>
|
||||
#include <AK/Vector.h>
|
||||
#include <LibJS/Runtime/ArrayBuffer.h>
|
||||
#include <LibJS/Runtime/TypedArray.h>
|
||||
#include <LibWeb/Bindings/AnalyserNodePrototype.h>
|
||||
#include <LibWeb/Bindings/Intrinsics.h>
|
||||
#include <LibWeb/WebAudio/AnalyserNode.h>
|
||||
#include <LibWeb/WebIDL/Buffers.h>
|
||||
#include <LibWeb/WebIDL/DOMException.h>
|
||||
|
||||
namespace Web::WebAudio {
|
||||
|
||||
GC_DEFINE_ALLOCATOR(AnalyserNode);
|
||||
|
||||
AnalyserNode::AnalyserNode(JS::Realm& realm, GC::Ref<BaseAudioContext> context, AnalyserOptions const& options)
|
||||
: AudioNode(realm, context)
|
||||
, m_fft_size(options.fft_size)
|
||||
, m_max_decibels(options.max_decibels)
|
||||
, m_min_decibels(options.min_decibels)
|
||||
, m_smoothing_time_constant(options.smoothing_time_constant)
|
||||
{
|
||||
}
|
||||
|
||||
AnalyserNode::~AnalyserNode() = default;
|
||||
|
||||
WebIDL::ExceptionOr<GC::Ref<AnalyserNode>> AnalyserNode::create(JS::Realm& realm, GC::Ref<BaseAudioContext> context, AnalyserOptions const& options)
|
||||
{
|
||||
return construct_impl(realm, context, options);
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#current-time-domain-data
|
||||
Vector<f32> AnalyserNode::current_time_domain_data()
|
||||
{
|
||||
dbgln("FIXME: Analyser node: implement current time domain data");
|
||||
// The input signal must be down-mixed to mono as if channelCount is 1, channelCountMode is "max" and channelInterpretation is "speakers".
|
||||
// This is independent of the settings for the AnalyserNode itself.
|
||||
// The most recent fftSize frames are used for the down-mixing operation.
|
||||
|
||||
// FIXME: definition of "input signal" above unclear
|
||||
// need to implement up/down mixing somewhere
|
||||
// https://webaudio.github.io/web-audio-api/#channel-up-mixing-and-down-mixing
|
||||
Vector<f32> result;
|
||||
result.resize(m_fft_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#blackman-window
|
||||
Vector<f32> AnalyserNode::apply_a_blackman_window(Vector<f32> const& x) const
|
||||
{
|
||||
f32 const a = 0.16;
|
||||
f32 const a0 = 0.5f * (1 - a);
|
||||
f32 const a1 = 0.5;
|
||||
f32 const a2 = a * 0.5f;
|
||||
unsigned long const N = m_fft_size;
|
||||
|
||||
auto w = [&](unsigned long n) {
|
||||
return a0 - a1 * cos(2 * AK::Pi<f32> * (f32)n / (f32)N) + a2 * cos(4 * AK::Pi<f32> * (f32)n / (f32)N);
|
||||
};
|
||||
|
||||
Vector<f32> x_hat;
|
||||
x_hat.resize(m_fft_size);
|
||||
|
||||
// FIXME: Naive
|
||||
for (unsigned long i = 0; i < m_fft_size; i++) {
|
||||
x_hat[i] = x[i] * w(i);
|
||||
};
|
||||
|
||||
return x_hat;
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#fourier-transform
|
||||
static Vector<f32> apply_a_fourier_transform(Vector<f32> const& input)
|
||||
{
|
||||
dbgln("FIXME: Analyser node: implement apply a fourier transform");
|
||||
auto result = Vector<f32>();
|
||||
result.resize(input.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#smoothing-over-time
|
||||
Vector<f32> AnalyserNode::smoothing_over_time(Vector<f32> const& current_block)
|
||||
{
|
||||
auto X = apply_a_fourier_transform(current_block);
|
||||
|
||||
// FIXME: Naive
|
||||
Vector<f32> result;
|
||||
result.ensure_capacity(m_fft_size);
|
||||
for (unsigned long i = 0; i < m_fft_size; i++) {
|
||||
// FIMXE: Complex modulus on X[i]
|
||||
result.unchecked_append(m_smoothing_time_constant * m_previous_block[i] + (1.f - m_smoothing_time_constant) * abs(X[i]));
|
||||
}
|
||||
|
||||
m_previous_block = result;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#conversion-to-db
|
||||
Vector<f32> AnalyserNode::conversion_to_dB(Vector<f32> const& X_hat) const
|
||||
{
|
||||
Vector<f32> result;
|
||||
result.ensure_capacity(X_hat.size());
|
||||
// FIXME: Naive
|
||||
for (auto x : X_hat)
|
||||
result.unchecked_append(20.0f * AK::log(x));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#current-frequency-data
|
||||
Vector<f32> AnalyserNode::current_frequency_data()
|
||||
{
|
||||
// 1. Compute the current time-domain data.
|
||||
auto current_time_domain_dat = current_time_domain_data();
|
||||
|
||||
// 2. Apply a Blackman window to the time domain input data.
|
||||
auto blackman_windowed_input = apply_a_blackman_window(current_time_domain_dat);
|
||||
|
||||
// 3. Apply a Fourier transform to the windowed time domain input data to get real and imaginary frequency data.
|
||||
auto frequency_domain_dat = apply_a_fourier_transform(blackman_windowed_input);
|
||||
|
||||
// 4. Smooth over time the frequency domain data.
|
||||
auto smoothed_data = smoothing_over_time(frequency_domain_dat);
|
||||
|
||||
// 5. Convert to dB.
|
||||
return conversion_to_dB(smoothed_data);
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-analysernode-getfloatfrequencydata
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::get_float_frequency_data(GC::Root<WebIDL::BufferSource> const& array)
|
||||
{
|
||||
|
||||
// Write the current frequency data into array. If array has fewer elements than the frequencyBinCount,
|
||||
// the excess elements will be dropped. If array has more elements than the frequencyBinCount, the
|
||||
// excess elements will be ignored. The most recent fftSize frames are used in computing the frequency data.
|
||||
auto const frequency_data = current_frequency_data();
|
||||
|
||||
// FIXME: If another call to getFloatFrequencyData() or getByteFrequencyData() occurs within the same render
|
||||
// quantum as a previous call, the current frequency data is not updated with the same data. Instead, the
|
||||
// previously computed data is returned.
|
||||
|
||||
auto& vm = this->vm();
|
||||
|
||||
if (!is<JS::Float32Array>(*array->raw_object()))
|
||||
return vm.throw_completion<JS::TypeError>(JS::ErrorType::NotAnObjectOfType, "Float32Array");
|
||||
auto& output_array = static_cast<JS::Float32Array&>(*array->raw_object());
|
||||
|
||||
size_t floats_to_write = min(output_array.data().size(), frequency_bin_count());
|
||||
for (size_t i = 0; i < floats_to_write; i++) {
|
||||
output_array.data()[i] = frequency_data[i];
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-analysernode-getbytefrequencydata
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::get_byte_frequency_data(GC::Root<WebIDL::BufferSource> const& array)
|
||||
{
|
||||
// FIXME: If another call to getByteFrequencyData() or getFloatFrequencyData() occurs within the same render
|
||||
// quantum as a previous call, the current frequency data is not updated with the same data. Instead,
|
||||
// the previously computed data is returned.
|
||||
// Need to implement some kind of blocking mechanism, I guess
|
||||
// Might be more obvious how to handle this when render quantua have some
|
||||
// more scaffolding
|
||||
//
|
||||
|
||||
// current_frequency_data returns a vector of size m_fftSize
|
||||
// FIXME: Ensure sizes are correct after the fourier transform is implemented
|
||||
// Spec says to write frequencyBinCount bytes, not fftSize
|
||||
Vector<f32> dB_data = current_frequency_data();
|
||||
Vector<u8> byte_data;
|
||||
byte_data.ensure_capacity(dB_data.size());
|
||||
|
||||
// For getByteFrequencyData(), the 𝑌[𝑘] is clipped to lie between minDecibels and maxDecibels
|
||||
// and then scaled to fit in an unsigned byte such that minDecibels is represented by the
|
||||
// value 0 and maxDecibels is represented by the value 255.
|
||||
// FIXME: Naive
|
||||
f32 delta_dB = m_max_decibels - m_min_decibels;
|
||||
for (auto x : dB_data) {
|
||||
x = max(x, m_min_decibels);
|
||||
x = min(x, m_max_decibels);
|
||||
|
||||
byte_data.unchecked_append(static_cast<u8>(255 * (x - m_min_decibels) / delta_dB));
|
||||
}
|
||||
|
||||
// Write the current frequency data into array. If array’s byte length is less than frequencyBinCount,
|
||||
// the excess elements will be dropped. If array’s byte length is greater than the frequencyBinCount ,
|
||||
// the excess elements will be ignored. The most recent fftSize frames are used in computing the frequency data.
|
||||
auto& output_buffer = array->viewed_array_buffer()->buffer();
|
||||
size_t bytes_to_write = min(array->byte_length(), frequency_bin_count());
|
||||
|
||||
for (size_t i = 0; i < bytes_to_write; i++)
|
||||
output_buffer[i] = byte_data[i];
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-analysernode-getfloattimedomaindata
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::get_float_time_domain_data(GC::Root<WebIDL::BufferSource> const& array)
|
||||
{
|
||||
// Write the current time-domain data (waveform data) into array. If array has fewer elements than the
|
||||
// value of fftSize, the excess elements will be dropped. If array has more elements than the value of
|
||||
// fftSize, the excess elements will be ignored. The most recent fftSize frames are written (after downmixing).
|
||||
|
||||
Vector<f32> time_domain_data = current_time_domain_data();
|
||||
|
||||
auto& vm = this->vm();
|
||||
|
||||
if (!is<JS::Float32Array>(*array->raw_object()))
|
||||
return vm.throw_completion<JS::TypeError>(JS::ErrorType::NotAnObjectOfType, "Float32Array");
|
||||
auto& output_array = static_cast<JS::Float32Array&>(*array->raw_object());
|
||||
|
||||
size_t floats_to_write = min(output_array.data().size(), frequency_bin_count());
|
||||
for (size_t i = 0; i < floats_to_write; i++) {
|
||||
output_array.data()[i] = time_domain_data[i];
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-analysernode-getbytetimedomaindata
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::get_byte_time_domain_data(GC::Root<WebIDL::BufferSource> const& array)
|
||||
{
|
||||
// Write the current time-domain data (waveform data) into array. If array’s byte length is less than
|
||||
// fftSize, the excess elements will be dropped. If array’s byte length is greater than the fftSize,
|
||||
// the excess elements will be ignored. The most recent fftSize frames are used in computing the byte data.
|
||||
|
||||
Vector<f32> time_domain_data = current_time_domain_data();
|
||||
VERIFY(time_domain_data.size() == m_fft_size);
|
||||
|
||||
Vector<u8> byte_data;
|
||||
byte_data.ensure_capacity(m_fft_size);
|
||||
|
||||
// FIXME: Naive
|
||||
for (size_t i = 0; i < m_fft_size; i++) {
|
||||
auto x = 128 * (1 + time_domain_data[i]);
|
||||
x = max(x, 0);
|
||||
x = min(x, 255);
|
||||
byte_data.unchecked_append(static_cast<u8>(x));
|
||||
}
|
||||
|
||||
auto& output_buffer = array->viewed_array_buffer()->buffer();
|
||||
size_t bytes_to_write = min(array->byte_length(), fft_size());
|
||||
|
||||
for (size_t i = 0; i < bytes_to_write; i++)
|
||||
output_buffer[i] = byte_data[i];
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-analysernode-fftsize
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::set_fft_size(unsigned long fft_size)
|
||||
{
|
||||
if (fft_size < 32 || fft_size > 32768 || (fft_size & (fft_size - 1)) != 0)
|
||||
return WebIDL::IndexSizeError::create(realm(), "Analyser node fftSize not a power of 2 between 32 and 32768"_string);
|
||||
|
||||
// reset previous block to 0s
|
||||
m_previous_block = Vector<f32>();
|
||||
m_previous_block.resize(fft_size);
|
||||
|
||||
m_fft_size = fft_size;
|
||||
|
||||
// FIXME: Check this:
|
||||
// Note that increasing fftSize does mean that the current time-domain data must be expanded
|
||||
// to include past frames that it previously did not. This means that the AnalyserNode
|
||||
// effectively MUST keep around the last 32768 sample-frames and the current time-domain
|
||||
// data is the most recent fftSize sample-frames out of that.
|
||||
return {};
|
||||
}
|
||||
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::set_max_decibels(double max_decibels)
|
||||
{
|
||||
if (m_min_decibels >= max_decibels)
|
||||
return WebIDL::IndexSizeError::create(realm(), "Analyser node minDecibels greater than maxDecibels"_string);
|
||||
m_max_decibels = max_decibels;
|
||||
return {};
|
||||
}
|
||||
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::set_min_decibels(double min_decibels)
|
||||
{
|
||||
if (min_decibels >= m_max_decibels)
|
||||
return WebIDL::IndexSizeError::create(realm(), "Analyser node minDecibels greater than maxDecibels"_string);
|
||||
|
||||
m_min_decibels = min_decibels;
|
||||
return {};
|
||||
}
|
||||
|
||||
WebIDL::ExceptionOr<void> AnalyserNode::set_smoothing_time_constant(double smoothing_time_constant)
|
||||
{
|
||||
if (smoothing_time_constant > 1.0 || smoothing_time_constant < 0.0)
|
||||
return WebIDL::IndexSizeError::create(realm(), "Analyser node smoothingTimeConstant not between 0.0 and 1.0"_string);
|
||||
|
||||
m_smoothing_time_constant = smoothing_time_constant;
|
||||
return {};
|
||||
}
|
||||
|
||||
WebIDL::ExceptionOr<GC::Ref<AnalyserNode>> AnalyserNode::construct_impl(JS::Realm& realm, GC::Ref<BaseAudioContext> context, AnalyserOptions const& options)
|
||||
{
|
||||
if (options.fft_size < 32 || options.fft_size > 32768 || !is_power_of_two(options.fft_size))
|
||||
return WebIDL::IndexSizeError::create(realm, "Analyser node fftSize not a power of 2 between 32 and 32768"_string);
|
||||
|
||||
if (options.min_decibels >= options.max_decibels)
|
||||
return WebIDL::IndexSizeError::create(realm, "Analyser node minDecibels greater than maxDecibels"_string);
|
||||
|
||||
if (options.smoothing_time_constant > 1.0 || options.smoothing_time_constant < 0.0)
|
||||
return WebIDL::IndexSizeError::create(realm, "Analyser node smoothingTimeConstant not between 0.0 and 1.0"_string);
|
||||
|
||||
// When the constructor is called with a BaseAudioContext c and an option object option, the user agent
|
||||
// MUST initialize the AudioNode this, with context and options as arguments.
|
||||
|
||||
auto node = realm.create<AnalyserNode>(realm, context, options);
|
||||
|
||||
// Default options for channel count and interpretation
|
||||
// https://webaudio.github.io/web-audio-api/#AnalyserNode
|
||||
AudioNodeDefaultOptions default_options;
|
||||
default_options.channel_count_mode = Bindings::ChannelCountMode::Max;
|
||||
default_options.channel_interpretation = Bindings::ChannelInterpretation::Speakers;
|
||||
default_options.channel_count = 2;
|
||||
// FIXME: Set tail-time to no
|
||||
|
||||
TRY(node->initialize_audio_node_options(options, default_options));
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
void AnalyserNode::initialize(JS::Realm& realm)
|
||||
{
|
||||
Base::initialize(realm);
|
||||
WEB_SET_PROTOTYPE_FOR_INTERFACE(AnalyserNode);
|
||||
}
|
||||
|
||||
}
|
84
Libraries/LibWeb/WebAudio/AnalyserNode.h
Normal file
84
Libraries/LibWeb/WebAudio/AnalyserNode.h
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (c) 2024, Noah Bright <noah.bright.1@gmail.com>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <LibJS/Forward.h>
|
||||
#include <LibWeb/Bindings/PlatformObject.h>
|
||||
#include <LibWeb/WebAudio/AudioNode.h>
|
||||
#include <LibWeb/WebIDL/Buffers.h>
|
||||
#include <LibWeb/WebIDL/ExceptionOr.h>
|
||||
|
||||
namespace Web::WebAudio {
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#AnalyserOptions
|
||||
struct AnalyserOptions : AudioNodeOptions {
|
||||
unsigned long fft_size { 2048 };
|
||||
double max_decibels { -30 };
|
||||
double min_decibels { -100 };
|
||||
double smoothing_time_constant { 0.8 };
|
||||
};
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#AnalyserNode
|
||||
class AnalyserNode : public AudioNode {
|
||||
WEB_PLATFORM_OBJECT(AnalyserNode, AudioNode);
|
||||
GC_DECLARE_ALLOCATOR(AnalyserNode);
|
||||
|
||||
public:
|
||||
virtual ~AnalyserNode() override;
|
||||
|
||||
virtual WebIDL::UnsignedLong number_of_inputs() override { return 1; }
|
||||
virtual WebIDL::UnsignedLong number_of_outputs() override { return 1; }
|
||||
|
||||
WebIDL::ExceptionOr<void> get_float_frequency_data(GC::Root<WebIDL::BufferSource> const& array); // Float32Array
|
||||
WebIDL::ExceptionOr<void> get_byte_frequency_data(GC::Root<WebIDL::BufferSource> const& array); // Uint8Array
|
||||
WebIDL::ExceptionOr<void> get_float_time_domain_data(GC::Root<WebIDL::BufferSource> const& array); // Float32Array
|
||||
WebIDL::ExceptionOr<void> get_byte_time_domain_data(GC::Root<WebIDL::BufferSource> const& array); // Uint8Array
|
||||
|
||||
unsigned long fft_size() const { return m_fft_size; }
|
||||
unsigned long frequency_bin_count() const { return m_fft_size / 2; }
|
||||
double max_decibels() const { return m_max_decibels; }
|
||||
double min_decibels() const { return m_min_decibels; }
|
||||
double smoothing_time_constant() const { return m_smoothing_time_constant; }
|
||||
|
||||
WebIDL::ExceptionOr<void> set_fft_size(unsigned long);
|
||||
WebIDL::ExceptionOr<void> set_max_decibels(double);
|
||||
WebIDL::ExceptionOr<void> set_min_decibels(double);
|
||||
WebIDL::ExceptionOr<void> set_smoothing_time_constant(double);
|
||||
|
||||
static WebIDL::ExceptionOr<GC::Ref<AnalyserNode>> create(JS::Realm&, GC::Ref<BaseAudioContext>, AnalyserOptions const& = {});
|
||||
static WebIDL::ExceptionOr<GC::Ref<AnalyserNode>> construct_impl(JS::Realm&, GC::Ref<BaseAudioContext>, AnalyserOptions const& = {});
|
||||
|
||||
protected:
|
||||
AnalyserNode(JS::Realm&, GC::Ref<BaseAudioContext>, AnalyserOptions const& = {});
|
||||
|
||||
virtual void initialize(JS::Realm&) override;
|
||||
|
||||
private:
|
||||
unsigned long m_fft_size;
|
||||
double m_max_decibels;
|
||||
double m_min_decibels;
|
||||
double m_smoothing_time_constant;
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#current-frequency-data
|
||||
Vector<f32> current_frequency_data();
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#current-time-domain-data
|
||||
Vector<f32> current_time_domain_data();
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#blackman-window
|
||||
Vector<f32> apply_a_blackman_window(Vector<f32> const& x) const;
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#smoothing-over-time
|
||||
Vector<f32> smoothing_over_time(Vector<f32> const& current_block);
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#previous-block
|
||||
Vector<f32> m_previous_block;
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#conversion-to-db
|
||||
Vector<f32> conversion_to_dB(Vector<f32> const& X_hat) const;
|
||||
};
|
||||
}
|
25
Libraries/LibWeb/WebAudio/AnalyserNode.idl
Normal file
25
Libraries/LibWeb/WebAudio/AnalyserNode.idl
Normal file
|
@ -0,0 +1,25 @@
|
|||
#import <WebAudio/AudioNode.idl>
|
||||
#import <WebAudio/BaseAudioContext.idl>
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#AnalyserOptions
|
||||
dictionary AnalyserOptions : AudioNodeOptions {
|
||||
unsigned long fftSize = 2048;
|
||||
double maxDecibels = -30;
|
||||
double minDecibels = -100;
|
||||
double smoothingTimeConstant = 0.8;
|
||||
};
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#AnalyserNode
|
||||
[Exposed=Window]
|
||||
interface AnalyserNode : AudioNode {
|
||||
constructor (BaseAudioContext context, optional AnalyserOptions options = {});
|
||||
undefined getFloatFrequencyData (Float32Array array);
|
||||
undefined getByteFrequencyData (Uint8Array array);
|
||||
undefined getFloatTimeDomainData (Float32Array array);
|
||||
undefined getByteTimeDomainData (Uint8Array array);
|
||||
attribute unsigned long fftSize;
|
||||
readonly attribute unsigned long frequencyBinCount;
|
||||
attribute double minDecibels;
|
||||
attribute double maxDecibels;
|
||||
attribute double smoothingTimeConstant;
|
||||
};
|
|
@ -11,6 +11,7 @@
|
|||
#include <LibWeb/HTML/EventNames.h>
|
||||
#include <LibWeb/HTML/Scripting/ExceptionReporter.h>
|
||||
#include <LibWeb/HTML/Window.h>
|
||||
#include <LibWeb/WebAudio/AnalyserNode.h>
|
||||
#include <LibWeb/WebAudio/AudioBuffer.h>
|
||||
#include <LibWeb/WebAudio/AudioBufferSourceNode.h>
|
||||
#include <LibWeb/WebAudio/AudioDestinationNode.h>
|
||||
|
@ -59,6 +60,12 @@ WebIDL::CallbackType* BaseAudioContext::onstatechange()
|
|||
return event_handler_attribute(HTML::EventNames::statechange);
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createanalyser
|
||||
WebIDL::ExceptionOr<GC::Ref<AnalyserNode>> BaseAudioContext::create_analyser()
|
||||
{
|
||||
return AnalyserNode::create(realm(), *this);
|
||||
}
|
||||
|
||||
// https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbiquadfilter
|
||||
WebIDL::ExceptionOr<GC::Ref<BiquadFilterNode>> BaseAudioContext::create_biquad_filter()
|
||||
{
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <LibWeb/Bindings/BaseAudioContextPrototype.h>
|
||||
#include <LibWeb/DOM/EventTarget.h>
|
||||
#include <LibWeb/WebAudio/AnalyserNode.h>
|
||||
#include <LibWeb/WebAudio/AudioListener.h>
|
||||
#include <LibWeb/WebAudio/BiquadFilterNode.h>
|
||||
#include <LibWeb/WebAudio/ChannelMergerNode.h>
|
||||
|
@ -60,6 +61,7 @@ public:
|
|||
static WebIDL::ExceptionOr<void> verify_audio_options_inside_nominal_range(JS::Realm&, float sample_rate);
|
||||
static WebIDL::ExceptionOr<void> verify_audio_options_inside_nominal_range(JS::Realm&, WebIDL::UnsignedLong number_of_channels, WebIDL::UnsignedLong length, float sample_rate);
|
||||
|
||||
WebIDL::ExceptionOr<GC::Ref<AnalyserNode>> create_analyser();
|
||||
WebIDL::ExceptionOr<GC::Ref<BiquadFilterNode>> create_biquad_filter();
|
||||
WebIDL::ExceptionOr<GC::Ref<AudioBuffer>> create_buffer(WebIDL::UnsignedLong number_of_channels, WebIDL::UnsignedLong length, float sample_rate);
|
||||
WebIDL::ExceptionOr<GC::Ref<AudioBufferSourceNode>> create_buffer_source();
|
||||
|
|
|
@ -30,7 +30,7 @@ interface BaseAudioContext : EventTarget {
|
|||
[FIXME] readonly attribute AudioWorklet audioWorklet;
|
||||
attribute EventHandler onstatechange;
|
||||
|
||||
[FIXME] AnalyserNode createAnalyser ();
|
||||
AnalyserNode createAnalyser ();
|
||||
BiquadFilterNode createBiquadFilter ();
|
||||
AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
|
||||
AudioBufferSourceNode createBufferSource ();
|
||||
|
|
|
@ -357,6 +357,7 @@ libweb_js_bindings(WebAssembly/Memory)
|
|||
libweb_js_bindings(WebAssembly/Module)
|
||||
libweb_js_bindings(WebAssembly/Table)
|
||||
libweb_js_bindings(WebAssembly/WebAssembly NAMESPACE)
|
||||
libweb_js_bindings(WebAudio/AnalyserNode)
|
||||
libweb_js_bindings(WebAudio/AudioBuffer)
|
||||
libweb_js_bindings(WebAudio/AudioBufferSourceNode)
|
||||
libweb_js_bindings(WebAudio/AudioContext)
|
||||
|
|
|
@ -2,6 +2,7 @@ AbortController
|
|||
AbortSignal
|
||||
AbstractRange
|
||||
AggregateError
|
||||
AnalyserNode
|
||||
Animation
|
||||
AnimationEffect
|
||||
AnimationEvent
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
Harness status: OK
|
||||
|
||||
Found 78 tests
|
||||
|
||||
78 Pass
|
||||
Pass # AUDIT TASK RUNNER STARTED.
|
||||
Pass Executing "initialize"
|
||||
Pass Executing "invalid constructor"
|
||||
Pass Executing "default constructor"
|
||||
Pass Executing "test AudioNodeOptions"
|
||||
Pass Executing "constructor with options"
|
||||
Pass Executing "construct invalid options"
|
||||
Pass Executing "setting min/max"
|
||||
Pass Audit report
|
||||
Pass > [initialize]
|
||||
Pass context = new OfflineAudioContext(...) did not throw an exception.
|
||||
Pass < [initialize] All assertions passed. (total 1 assertions)
|
||||
Pass > [invalid constructor]
|
||||
Pass new AnalyserNode() threw TypeError: "AnalyserNode() needs one argument".
|
||||
Pass new AnalyserNode(1) threw TypeError: "Not an object of type BaseAudioContext".
|
||||
Pass new AnalyserNode(context, 42) threw TypeError: "Not an object of type AnalyserOptions".
|
||||
Pass < [invalid constructor] All assertions passed. (total 3 assertions)
|
||||
Pass > [default constructor]
|
||||
Pass node0 = new AnalyserNode(context) did not throw an exception.
|
||||
Pass node0 instanceof AnalyserNode is equal to true.
|
||||
Pass node0.numberOfInputs is equal to 1.
|
||||
Pass node0.numberOfOutputs is equal to 1.
|
||||
Pass node0.channelCount is equal to 2.
|
||||
Pass node0.channelCountMode is equal to max.
|
||||
Pass node0.channelInterpretation is equal to speakers.
|
||||
Pass node0.fftSize is equal to 2048.
|
||||
Pass node0.frequencyBinCount is equal to 1024.
|
||||
Pass node0.minDecibels is equal to -100.
|
||||
Pass node0.maxDecibels is equal to -30.
|
||||
Pass node0.smoothingTimeConstant is equal to 0.8.
|
||||
Pass < [default constructor] All assertions passed. (total 12 assertions)
|
||||
Pass > [test AudioNodeOptions]
|
||||
Pass new AnalyserNode(c, {channelCount: 17}) did not throw an exception.
|
||||
Pass node.channelCount is equal to 17.
|
||||
Pass new AnalyserNode(c, {channelCount: 0}) threw NotSupportedError: "Invalid channel count".
|
||||
Pass new AnalyserNode(c, {channelCount: 99}) threw NotSupportedError: "Invalid channel count".
|
||||
Pass new AnalyserNode(c, {channelCountMode: "max"} did not throw an exception.
|
||||
Pass node.channelCountMode is equal to max.
|
||||
Pass new AnalyserNode(c, {channelCountMode: "max"}) did not throw an exception.
|
||||
Pass node.channelCountMode after valid setter is equal to max.
|
||||
Pass new AnalyserNode(c, {channelCountMode: "clamped-max"}) did not throw an exception.
|
||||
Pass node.channelCountMode after valid setter is equal to clamped-max.
|
||||
Pass new AnalyserNode(c, {channelCountMode: "explicit"}) did not throw an exception.
|
||||
Pass node.channelCountMode after valid setter is equal to explicit.
|
||||
Pass new AnalyserNode(c, {channelCountMode: "foobar"} threw TypeError: "Invalid value 'foobar' for enumeration type 'ChannelCountMode'".
|
||||
Pass node.channelCountMode after invalid setter is equal to explicit.
|
||||
Pass new AnalyserNode(c, {channelInterpretation: "speakers"}) did not throw an exception.
|
||||
Pass node.channelInterpretation is equal to speakers.
|
||||
Pass new AnalyserNode(c, {channelInterpretation: "discrete"}) did not throw an exception.
|
||||
Pass node.channelInterpretation is equal to discrete.
|
||||
Pass new AnalyserNode(c, {channelInterpretation: "foobar"}) threw TypeError: "Invalid value 'foobar' for enumeration type 'ChannelInterpretation'".
|
||||
Pass node.channelInterpretation after invalid setter is equal to discrete.
|
||||
Pass < [test AudioNodeOptions] All assertions passed. (total 20 assertions)
|
||||
Pass > [constructor with options]
|
||||
Pass node1 = new AnalyserNode(c, {"fftSize":32,"maxDecibels":1,"minDecibels":-13,"smoothingTimeConstant":0.125}) did not throw an exception.
|
||||
Pass node1 instanceof AnalyserNode is equal to true.
|
||||
Pass node1.fftSize is equal to 32.
|
||||
Pass node1.maxDecibels is equal to 1.
|
||||
Pass node1.minDecibels is equal to -13.
|
||||
Pass node1.smoothingTimeConstant is equal to 0.125.
|
||||
Pass < [constructor with options] All assertions passed. (total 6 assertions)
|
||||
Pass > [construct invalid options]
|
||||
Pass node = new AnalyserNode(c, { fftSize: 33 }) threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass node = new AnalyserNode(c, { maxDecibels: -500 }) threw IndexSizeError: "Analyser node minDecibels greater than maxDecibels".
|
||||
Pass node = new AnalyserNode(c, { minDecibels: -10 }) threw IndexSizeError: "Analyser node minDecibels greater than maxDecibels".
|
||||
Pass node = new AnalyserNode(c, { smoothingTimeConstant: 2 }) threw IndexSizeError: "Analyser node smoothingTimeConstant not between 0.0 and 1.0".
|
||||
Pass node = new AnalyserNode(c, { frequencyBinCount: 33 }) did not throw an exception.
|
||||
Pass node.frequencyBinCount is equal to 1024.
|
||||
Pass < [construct invalid options] All assertions passed. (total 6 assertions)
|
||||
Pass > [setting min/max]
|
||||
Pass node = new AnalyserNode(c, {"minDecibels":-10,"maxDecibels":20}) did not throw an exception.
|
||||
Pass node = new AnalyserNode(c, {"maxDecibels":20,"minDecibels":-10}) did not throw an exception.
|
||||
Pass node = new AnalyserNode(c, {"minDecibels":-200,"maxDecibels":-150}) did not throw an exception.
|
||||
Pass node = new AnalyserNode(c, {"maxDecibels":-150,"minDecibels":-200}) did not throw an exception.
|
||||
Pass node = new AnalyserNode(c, {"maxDecibels":-150,"minDecibels":-10}) threw IndexSizeError: "Analyser node minDecibels greater than maxDecibels".
|
||||
Pass node = new AnalyserNode(c, {"minDecibels":-10,"maxDecibels":-150}) threw IndexSizeError: "Analyser node minDecibels greater than maxDecibels".
|
||||
Pass < [setting min/max] All assertions passed. (total 6 assertions)
|
||||
Pass # AUDIT TASK RUNNER FINISHED: 7 tasks ran successfully.
|
|
@ -0,0 +1,18 @@
|
|||
Harness status: OK
|
||||
|
||||
Found 13 tests
|
||||
|
||||
13 Pass
|
||||
Pass # AUDIT TASK RUNNER STARTED.
|
||||
Pass Executing "Basic AnalyserNode test"
|
||||
Pass Audit report
|
||||
Pass > [Basic AnalyserNode test]
|
||||
Pass Number of inputs for AnalyserNode is equal to 1.
|
||||
Pass Number of outputs for AnalyserNode is equal to 1.
|
||||
Pass Default minDecibels value is equal to -100.
|
||||
Pass Default maxDecibels value is equal to -30.
|
||||
Pass Default smoothingTimeConstant value is equal to 0.8.
|
||||
Pass node.minDecibels = -50.333333333333336 is equal to -50.333333333333336.
|
||||
Pass node.maxDecibels = -40.333333333333336 is equal to -40.333333333333336.
|
||||
Pass < [Basic AnalyserNode test] All assertions passed. (total 7 assertions)
|
||||
Pass # AUDIT TASK RUNNER FINISHED: 1 tasks ran successfully.
|
|
@ -0,0 +1,48 @@
|
|||
Harness status: OK
|
||||
|
||||
Found 43 tests
|
||||
|
||||
43 Pass
|
||||
Pass # AUDIT TASK RUNNER STARTED.
|
||||
Pass Executing "FFT size test"
|
||||
Pass Audit report
|
||||
Pass > [FFT size test] Test that re-sizing the FFT arrays does not fail.
|
||||
Pass Setting fftSize to -1 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 0 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 1 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 2 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 3 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 4 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 5 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 8 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 9 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 16 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 17 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 32 did not throw an exception.
|
||||
Pass Setting fftSize to 33 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 64 did not throw an exception.
|
||||
Pass Setting fftSize to 65 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 128 did not throw an exception.
|
||||
Pass Setting fftSize to 129 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 256 did not throw an exception.
|
||||
Pass Setting fftSize to 257 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 512 did not throw an exception.
|
||||
Pass Setting fftSize to 513 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 1024 did not throw an exception.
|
||||
Pass Setting fftSize to 1025 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 2048 did not throw an exception.
|
||||
Pass Setting fftSize to 2049 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 4096 did not throw an exception.
|
||||
Pass Setting fftSize to 4097 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 8192 did not throw an exception.
|
||||
Pass Setting fftSize to 8193 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 16384 did not throw an exception.
|
||||
Pass Setting fftSize to 16385 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 32768 did not throw an exception.
|
||||
Pass Setting fftSize to 32769 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 65536 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 65537 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 131072 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass Setting fftSize to 131073 threw IndexSizeError: "Analyser node fftSize not a power of 2 between 32 and 32768".
|
||||
Pass < [FFT size test] All assertions passed. (total 37 assertions)
|
||||
Pass # AUDIT TASK RUNNER FINISHED: 1 tasks ran successfully.
|
|
@ -1,9 +1,9 @@
|
|||
Harness status: OK
|
||||
|
||||
Found 302 tests
|
||||
Found 304 tests
|
||||
|
||||
291 Pass
|
||||
11 Fail
|
||||
294 Pass
|
||||
10 Fail
|
||||
Pass # AUDIT TASK RUNNER STARTED.
|
||||
Pass Executing "initialize"
|
||||
Pass Executing "Offline createGain"
|
||||
|
@ -19,7 +19,7 @@ Pass Executing "Offline createBuffer"
|
|||
Fail Executing "Offline createIIRFilter"
|
||||
Fail Executing "Offline createWaveShaper"
|
||||
Fail Executing "Offline createConvolver"
|
||||
Fail Executing "Offline createAnalyser"
|
||||
Pass Executing "Offline createAnalyser"
|
||||
Fail Executing "Offline createScriptProcessor"
|
||||
Pass Executing "Offline createPeriodicWave"
|
||||
Pass Executing "Offline createChannelSplitter"
|
||||
|
@ -230,6 +230,8 @@ Pass > [Offline createIIRFilter]
|
|||
Pass > [Offline createWaveShaper]
|
||||
Pass > [Offline createConvolver]
|
||||
Pass > [Offline createAnalyser]
|
||||
Pass AnalyserNode has no AudioParams as expected
|
||||
Pass < [Offline createAnalyser] All assertions passed. (total 1 assertions)
|
||||
Pass > [Offline createScriptProcessor]
|
||||
Pass > [Offline createPeriodicWave]
|
||||
Pass PeriodicWave has no AudioParams as expected
|
||||
|
|
|
@ -0,0 +1,183 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
Test Constructor: AnalyserNode
|
||||
</title>
|
||||
<script src="../../../resources/testharness.js"></script>
|
||||
<script src="../../../resources/testharnessreport.js"></script>
|
||||
<script src="../../../webaudio/resources/audit-util.js"></script>
|
||||
<script src="../../../webaudio/resources/audit.js"></script>
|
||||
<script src="../../../webaudio/resources/audionodeoptions.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<script id="layout-test-code">
|
||||
let context;
|
||||
|
||||
let audit = Audit.createTaskRunner();
|
||||
|
||||
audit.define('initialize', (task, should) => {
|
||||
context = initializeContext(should);
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.define('invalid constructor', (task, should) => {
|
||||
testInvalidConstructor(should, 'AnalyserNode', context);
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.define('default constructor', (task, should) => {
|
||||
let prefix = 'node0';
|
||||
let node = testDefaultConstructor(should, 'AnalyserNode', context, {
|
||||
prefix: prefix,
|
||||
numberOfInputs: 1,
|
||||
numberOfOutputs: 1,
|
||||
channelCount: 2,
|
||||
channelCountMode: 'max',
|
||||
channelInterpretation: 'speakers'
|
||||
});
|
||||
|
||||
testDefaultAttributes(should, node, prefix, [
|
||||
{name: 'fftSize', value: 2048},
|
||||
{name: 'frequencyBinCount', value: 1024},
|
||||
{name: 'minDecibels', value: -100}, {name: 'maxDecibels', value: -30},
|
||||
{name: 'smoothingTimeConstant', value: 0.8}
|
||||
]);
|
||||
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.define('test AudioNodeOptions', (task, should) => {
|
||||
testAudioNodeOptions(should, context, 'AnalyserNode');
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.define('constructor with options', (task, should) => {
|
||||
let options = {
|
||||
fftSize: 32,
|
||||
maxDecibels: 1,
|
||||
minDecibels: -13,
|
||||
// Choose a value that can be represented the same as a float and as a
|
||||
// double.
|
||||
smoothingTimeConstant: 0.125
|
||||
};
|
||||
|
||||
let node;
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node1 = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.notThrow();
|
||||
|
||||
should(node instanceof AnalyserNode, 'node1 instanceof AnalyserNode')
|
||||
.beEqualTo(true);
|
||||
should(node.fftSize, 'node1.fftSize').beEqualTo(options.fftSize);
|
||||
should(node.maxDecibels, 'node1.maxDecibels')
|
||||
.beEqualTo(options.maxDecibels);
|
||||
should(node.minDecibels, 'node1.minDecibels')
|
||||
.beEqualTo(options.minDecibels);
|
||||
should(node.smoothingTimeConstant, 'node1.smoothingTimeConstant')
|
||||
.beEqualTo(options.smoothingTimeConstant);
|
||||
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.define('construct invalid options', (task, should) => {
|
||||
let node;
|
||||
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, {fftSize: 33});
|
||||
},
|
||||
'node = new AnalyserNode(c, { fftSize: 33 })')
|
||||
.throw(DOMException, 'IndexSizeError');
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, {maxDecibels: -500});
|
||||
},
|
||||
'node = new AnalyserNode(c, { maxDecibels: -500 })')
|
||||
.throw(DOMException, 'IndexSizeError');
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, {minDecibels: -10});
|
||||
},
|
||||
'node = new AnalyserNode(c, { minDecibels: -10 })')
|
||||
.throw(DOMException, 'IndexSizeError');
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, {smoothingTimeConstant: 2});
|
||||
},
|
||||
'node = new AnalyserNode(c, { smoothingTimeConstant: 2 })')
|
||||
.throw(DOMException, 'IndexSizeError');
|
||||
should(function() {
|
||||
node = new AnalyserNode(context, {frequencyBinCount: 33});
|
||||
}, 'node = new AnalyserNode(c, { frequencyBinCount: 33 })').notThrow();
|
||||
should(node.frequencyBinCount, 'node.frequencyBinCount')
|
||||
.beEqualTo(1024);
|
||||
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.define('setting min/max', (task, should) => {
|
||||
let node;
|
||||
|
||||
// Recall the default values of minDecibels and maxDecibels are -100,
|
||||
// and -30, respectively. Setting both values in the constructor should
|
||||
// not signal an error in any of the following cases.
|
||||
let options = {minDecibels: -10, maxDecibels: 20};
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.notThrow();
|
||||
|
||||
options = {maxDecibels: 20, minDecibels: -10};
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.notThrow();
|
||||
|
||||
options = {minDecibels: -200, maxDecibels: -150};
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.notThrow();
|
||||
|
||||
options = {maxDecibels: -150, minDecibels: -200};
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.notThrow();
|
||||
|
||||
// But these should signal because minDecibel > maxDecibel
|
||||
options = {maxDecibels: -150, minDecibels: -10};
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.throw(DOMException, 'IndexSizeError');
|
||||
|
||||
options = {minDecibels: -10, maxDecibels: -150};
|
||||
should(
|
||||
() => {
|
||||
node = new AnalyserNode(context, options);
|
||||
},
|
||||
'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
|
||||
.throw(DOMException, 'IndexSizeError');
|
||||
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.run();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,57 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
realtimeanalyser-basic.html
|
||||
</title>
|
||||
<script src="../../../resources/testharness.js"></script>
|
||||
<script src="../../../resources/testharnessreport.js"></script>
|
||||
<script src="../../../webaudio/resources/audit-util.js"></script>
|
||||
<script src="../../../webaudio/resources/audit.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<script id="layout-test-code">
|
||||
let context = 0;
|
||||
|
||||
let audit = Audit.createTaskRunner();
|
||||
|
||||
audit.define('Basic AnalyserNode test', function(task, should) {
|
||||
context = new AudioContext();
|
||||
let analyser = context.createAnalyser();
|
||||
|
||||
should(analyser.numberOfInputs, 'Number of inputs for AnalyserNode')
|
||||
.beEqualTo(1);
|
||||
|
||||
should(analyser.numberOfOutputs, 'Number of outputs for AnalyserNode')
|
||||
.beEqualTo(1);
|
||||
|
||||
should(analyser.minDecibels, 'Default minDecibels value')
|
||||
.beEqualTo(-100);
|
||||
|
||||
should(analyser.maxDecibels, 'Default maxDecibels value')
|
||||
.beEqualTo(-30);
|
||||
|
||||
should(
|
||||
analyser.smoothingTimeConstant,
|
||||
'Default smoothingTimeConstant value')
|
||||
.beEqualTo(0.8);
|
||||
|
||||
let expectedValue = -50 - (1 / 3);
|
||||
analyser.minDecibels = expectedValue;
|
||||
|
||||
should(analyser.minDecibels, 'node.minDecibels = ' + expectedValue)
|
||||
.beEqualTo(expectedValue);
|
||||
|
||||
expectedValue = -40 - (1 / 3);
|
||||
analyser.maxDecibels = expectedValue;
|
||||
|
||||
should(analyser.maxDecibels, 'node.maxDecibels = ' + expectedValue)
|
||||
.beEqualTo(expectedValue);
|
||||
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.run();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,54 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
realtimeanalyser-fft-sizing.html
|
||||
</title>
|
||||
<script src="../../../resources/testharness.js"></script>
|
||||
<script src="../../../resources/testharnessreport.js"></script>
|
||||
<script src="../../../webaudio/resources/audit-util.js"></script>
|
||||
<script src="../../../webaudio/resources/audit.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<script id="layout-test-code">
|
||||
let audit = Audit.createTaskRunner();
|
||||
|
||||
function doTest(fftSize, illegal, should) {
|
||||
let c = new OfflineAudioContext(1, 1000, 44100);
|
||||
let a = c.createAnalyser();
|
||||
let message = 'Setting fftSize to ' + fftSize;
|
||||
let tester = function() {
|
||||
a.fftSize = fftSize;
|
||||
};
|
||||
|
||||
if (illegal) {
|
||||
should(tester, message).throw(DOMException, 'IndexSizeError');
|
||||
} else {
|
||||
should(tester, message).notThrow();
|
||||
}
|
||||
}
|
||||
|
||||
audit.define(
|
||||
{
|
||||
label: 'FFT size test',
|
||||
description: 'Test that re-sizing the FFT arrays does not fail.'
|
||||
},
|
||||
function(task, should) {
|
||||
doTest(-1, true, should);
|
||||
doTest(0, true, should);
|
||||
doTest(1, true, should);
|
||||
for (let i = 2; i <= 0x20000; i *= 2) {
|
||||
if (i >= 32 && i <= 32768)
|
||||
doTest(i, false, should);
|
||||
else
|
||||
doTest(i, true, should);
|
||||
doTest(i + 1, true, should);
|
||||
}
|
||||
|
||||
task.done();
|
||||
});
|
||||
|
||||
audit.run();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
Loading…
Reference in a new issue