diff --git a/apps/common-app/src/examples/AudioFile/AudioPlayer.ts b/apps/common-app/src/examples/AudioFile/AudioPlayer.ts
index 4e8a757cf..a71faecf7 100644
--- a/apps/common-app/src/examples/AudioFile/AudioPlayer.ts
+++ b/apps/common-app/src/examples/AudioFile/AudioPlayer.ts
@@ -37,9 +37,7 @@ class AudioPlayer {
await this.audioContext.resume();
}
- this.sourceNode = this.audioContext.createBufferSource({
- pitchCorrection: true,
- });
+ this.sourceNode = this.audioContext.createBufferSource(true);
this.sourceNode.buffer = this.audioBuffer;
this.sourceNode.playbackRate.value = this.playbackRate;
diff --git a/apps/common-app/src/examples/AudioVisualizer/AudioVisualizer.tsx b/apps/common-app/src/examples/AudioVisualizer/AudioVisualizer.tsx
index 638239b00..9a11f9af2 100644
--- a/apps/common-app/src/examples/AudioVisualizer/AudioVisualizer.tsx
+++ b/apps/common-app/src/examples/AudioVisualizer/AudioVisualizer.tsx
@@ -105,10 +105,7 @@ const AudioVisualizer: React.FC = () => {
}
if (!analyserRef.current) {
- analyserRef.current = audioContextRef.current.createAnalyser();
- analyserRef.current.fftSize = FFT_SIZE;
- analyserRef.current.smoothingTimeConstant = 0.2;
-
+ analyserRef.current = new AnalyserNode(audioContextRef.current, { fftSize: FFT_SIZE, smoothingTimeConstant: 0.2 });
analyserRef.current.connect(audioContextRef.current.destination);
}
diff --git a/apps/fabric-example/ios/Podfile.lock b/apps/fabric-example/ios/Podfile.lock
index fd9a098a5..fd0bbb755 100644
--- a/apps/fabric-example/ios/Podfile.lock
+++ b/apps/fabric-example/ios/Podfile.lock
@@ -2675,7 +2675,7 @@ PODS:
- RNWorklets
- SocketRocket
- Yoga
- - RNScreens (4.18.0):
+ - RNScreens (4.17.1):
- boost
- DoubleConversion
- fast_float
@@ -2702,10 +2702,10 @@ PODS:
- ReactCodegen
- ReactCommon/turbomodule/bridging
- ReactCommon/turbomodule/core
- - RNScreens/common (= 4.18.0)
+ - RNScreens/common (= 4.17.1)
- SocketRocket
- Yoga
- - RNScreens/common (4.18.0):
+ - RNScreens/common (4.17.1):
- boost
- DoubleConversion
- fast_float
@@ -3216,10 +3216,10 @@ SPEC CHECKSUMS:
ReactAppDependencyProvider: c5c4f5280e4ae0f9f4a739c64c4260fe0b3edaf1
ReactCodegen: 096bbbb2498ca55f385e2fbd465bfa0211ee8295
ReactCommon: 25c7f94aee74ddd93a8287756a8ac0830a309544
- RNAudioAPI: c763dbacdb8d89b7ce829484306df54322a7d951
+ RNAudioAPI: c7dc7b491a0e4b23535a55fd9b4a00d0f803f4bb
RNGestureHandler: f1dd7f92a0faa2868a919ab53bb9d66eb4ebfcf5
RNReanimated: e4993dd98196c698cbacc1441a4ac5b855ae56dc
- RNScreens: d821082c6dd1cb397cc0c98b026eeafaa68be479
+ RNScreens: 833237c48c756d40764540246a501b47dadb2cac
RNSVG: 8c0bbfa480a24b24468f1c76bd852a4aac3178e6
RNWorklets: d4553da98908962b6b834d5f2d26525b0d6840ad
SocketRocket: d4aabe649be1e368d1318fdf28a022d714d65748
diff --git a/packages/audiodocs/docs/analysis/analyser-node.mdx b/packages/audiodocs/docs/analysis/analyser-node.mdx
index dbb01c474..baed61620 100644
--- a/packages/audiodocs/docs/analysis/analyser-node.mdx
+++ b/packages/audiodocs/docs/analysis/analyser-node.mdx
@@ -3,7 +3,7 @@ sidebar_position: 1
---
import AudioNodePropsTable from "@site/src/components/AudioNodePropsTable"
-import { ReadOnly } from '@site/src/components/Badges';
+import { Optional, ReadOnly } from '@site/src/components/Badges';
# AnalyserNode
@@ -23,7 +23,23 @@ In contrast, a frequency-domain graph reveals how the signal's energy or power i
## Constructor
-[`BaseAudioContext.createAnalyser()`](/docs/core/base-audio-context#createanalyser)
+```tsx
+constructor(context: BaseAudioContext, options?: AnalyserOptions)
+```
+
+### `AnalyserOptions`
+
+Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions)
+
+| Parameter | Type | Default | |
+| :---: | :---: | :----: | :---- |
+| `fftSize` | `number` | 2048 | Number representing size of fast fourier transform |
+| `minDecibels` | `number` | -100 | Initial minimum power in dB for FFT analysis |
+| `maxDecibels` | `number` | -30 | Initial maximum power in dB for FFT analysis |
+| `smoothingTimeConstant` | `number` | 0.8 | Initial smoothing constant for the FFT analysis |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createAnalyser()`](/docs/core/base-audio-context#createanalyser) that creates node with default values.
## Properties
@@ -95,24 +111,20 @@ Each value in the array is within the range 0 to 255, where value of 127 indicat
## Remarks
#### `fftSize`
-- Default value is 2048.
- Must be a power of 2 between 32 and 32768.
- Throws `IndexSizeError` if set value is not power of 2, or is outside the allowed range.
#### `minDecibels`
-- Default value is -100 dB.
- 0 dB([decibel](https://en.wikipedia.org/wiki/Decibel)) is the loudest possible sound, -10 dB is a 10th of that.
- When getting data from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude lower then `minDecibels` will be returned as 0.
- Throws `IndexSizeError` if set value is greater than or equal to `maxDecibels`.
#### `maxDecibels`
-- Default value is -30 dB.
- 0 dB([decibel](https://en.wikipedia.org/wiki/Decibel)) is the loudest possible sound, -10 dB is a 10th of that.
- When getting data from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude higher then `maxDecibels` will be returned as 255.
- Throws `IndexSizeError` if set value is less then or equal to `minDecibels`.
#### `smoothingTimeConstant`
-- Default value is 0.8.
- Nominal range is 0 to 1.
- 0 means no averaging, 1 means "overlap the previous and current buffer quite a lot while computing the value".
- Throws `IndexSizeError` if set value is outside the allowed range.
diff --git a/packages/audiodocs/docs/core/audio-node.mdx b/packages/audiodocs/docs/core/audio-node.mdx
index 26aa21223..b6db096bc 100644
--- a/packages/audiodocs/docs/core/audio-node.mdx
+++ b/packages/audiodocs/docs/core/audio-node.mdx
@@ -111,6 +111,18 @@ If no arguments provided node disconnects from all outgoing connections.
#### Returns `undefined`.
+### `AudioNodeOptions`
+
+It is used to constructing majority of all `AudioNodes`.
+
+| Parameter | Type | Default | Description |
+| :---: | :---: | :----: | :---- |
+| `channelCount` | `number` | 2 | Indicates number of channels used in mixing of node. |
+| `channelCountMode` | [`ChannelCountMode`](/docs/types/channel-count-mode) | `max` | Determines how the number of input channels affects the number of output channels in an audio node. |
+| `channelInterpretation` | [`ChannelInterpretation`](/docs/types/channel-interpretation) | `speakers` | Specifies how input channels are mapped out to output channels when the number of them are different. |
+
+If any of these values are not provided, default values are used.
+
## Remarks
#### `numberOfInputs`
diff --git a/packages/audiodocs/docs/core/base-audio-context.mdx b/packages/audiodocs/docs/core/base-audio-context.mdx
index 2e6a098bf..e75511f49 100644
--- a/packages/audiodocs/docs/core/base-audio-context.mdx
+++ b/packages/audiodocs/docs/core/base-audio-context.mdx
@@ -84,7 +84,7 @@ Creates [`AudioBufferSourceNode`](/docs/sources/audio-buffer-source-node).
| Parameter | Type | Description |
| :---: | :---: | :---- |
-| `options` | [`AudioBufferBaseSourceNodeOptions`](/docs/sources/audio-buffer-source-node#constructor) | Dictionary object that specifies if pitch correction has to be available. |
+| `pitchCorrection` | `boolean` | Boolean that specifies if pitch correction has to be available. |
#### Returns `AudioBufferSourceNode`.
@@ -94,7 +94,7 @@ Creates [`AudioBufferQueueSourceNode`](/docs/sources/audio-buffer-queue-source-n
| Parameter | Type | Description |
| :---: | :---: | :---- |
-| `options` | [`AudioBufferBaseSourceNodeOptions`](/docs/sources/audio-buffer-queue-source-node#constructor) | Dictionary object that specifies if pitch correction has to be available. |
+| `pitchCorrection` | `boolean` | Boolean that specifies if pitch correction has to be available. |
#### Returns `AudioBufferQueueSourceNode`.
@@ -108,16 +108,6 @@ Creates [`ConstantSourceNode`](/docs/sources/constant-source-node).
Creates [`ConvolverNode`](/docs/effects/convolver-node).
-| Parameter | Type | Description |
-| :---: | :---: | :---- |
-| `options` | [`ConvolverNodeOptions`](/docs/effects/convolver-node#constructor) | Dictionary object that specifies associated buffer and normalization. |
-
-#### Errors
-
-| Error type | Description |
-| :---: | :---- |
-| `NotSupportedError` | `numOfChannels` of buffer is not 1, 2 or 4. |
-
#### Returns `ConvolverNode`.
### `createDelay`
@@ -140,17 +130,6 @@ Creates [`GainNode`](/docs/effects/gain-node).
Creates [`IIRFilterNode`](/docs/effects/iir-filter-node).
-| Parameter | Type | Description |
-| :---: | :---: | :---- |
-| `options` | [`IIRFilterNodeOptions`](/docs/effects/iir-filter-node#constructor) | Dictionary object that specifies the feedforward (numerator) and feedback (denominator) coefficients for the transfer function of the IIR filter. |
-
-#### Errors
-
-| Error type | Description |
-| :---: | :---- |
-| `NotSupportedError` | One or both of the input arrays exceeds 20 members. |
-| `InvalidStateError` | All of the feedforward coefficients are 0, or the first feedback coefficient is 0. |
-
#### Returns `IIRFilterNode`.
### `createOscillator`
diff --git a/packages/audiodocs/docs/effects/biquad-filter-node.mdx b/packages/audiodocs/docs/effects/biquad-filter-node.mdx
index aa125904e..8d123bb4e 100644
--- a/packages/audiodocs/docs/effects/biquad-filter-node.mdx
+++ b/packages/audiodocs/docs/effects/biquad-filter-node.mdx
@@ -3,7 +3,7 @@ sidebar_position: 1
---
import AudioNodePropsTable from "@site/src/components/AudioNodePropsTable"
-import { ReadOnly } from '@site/src/components/Badges';
+import { Optional, ReadOnly } from '@site/src/components/Badges';
import InteractiveExample from '@site/src/components/InteractiveExample';
import AudioApiExample from '@site/src/components/AudioApiExample'
import VinylPlayer from '@site/src/components/RecordPlayerExample/VinylAnimation';
@@ -23,7 +23,24 @@ Multiple `BiquadFilterNode` instances can be combined to create more complex fil
## Constructor
-[`BaseAudioContext.createBiquadFilter()`](/docs/core/base-audio-context#createbiquadfilter)
+```tsx
+constructor(context: BaseAudioContext, options?: BiquadFilterOptions)
+```
+
+### `BiquadFilterOptions`
+
+Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions)
+
+| Parameter | Type | Default | |
+| :---: | :---: | :----: | :---- |
+| `Q` | `number` | 1 | Initial value for [`Q`](/docs/effects/biquad-filter-node#properties) |
+| `detune` | `number` | 0 | Initial value for [`detune`](/docs/effects/biquad-filter-node#properties) |
+| `frequency` | `number` | 350 | Initial value for [`frequency`](/docs/effects/biquad-filter-node#properties) |
+| `gain` | `number` | 0 | Initial value for [`gain`](/docs/effects/biquad-filter-node#properties) |
+| `type` | `BiquadFilterType` | `lowpass` | Initial value for [`type`](/docs/effects/biquad-filter-node#properties) |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createBiquadFilter()`](/docs/core/base-audio-context#createbiquadfilter) that creates node with default values.
## Properties
@@ -72,14 +89,9 @@ It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods).
## Remarks
#### `frequency`
-- Float. Default: 350.
- Range: [10, $\frac{sampleRate}{2}$].
-#### `detune`
-- Float. Default: 0.
-
#### `Q`
-- Float. Default: 1.
- Range:
- For `lowpass` and `highpass` is [-Q, Q], where Q is the largest value for which $10^{Q/20}$ does not overflow the single-precision floating-point representation.
Numerically: Q ≈ 770.63678.
@@ -87,9 +99,5 @@ Numerically: Q ≈ 770.63678.
- Not used for `lowshelf` and `highshelf`.
#### `gain`
-- Float. Default: 0.
- Range: [-40, 40].
- Positive values correspond to amplification; negative to attenuation.
-
-#### `type`
-- [`BiquadFilterType`](#biquadfiltertype-enumeration-description). Default: `"lowpass"`.
diff --git a/packages/audiodocs/docs/effects/convolver-node.mdx b/packages/audiodocs/docs/effects/convolver-node.mdx
index 88a57983e..c6ea63fcc 100644
--- a/packages/audiodocs/docs/effects/convolver-node.mdx
+++ b/packages/audiodocs/docs/effects/convolver-node.mdx
@@ -3,6 +3,7 @@ sidebar_position: 2
---
import AudioNodePropsTable from "@site/src/components/AudioNodePropsTable"
+import { Optional } from '@site/src/components/Badges';
# ConvolverNode
@@ -19,18 +20,30 @@ Convolver is a node with tail-time, which means, that it continues to output non
## Constructor
-[`BaseAudioContext.createConvolver(options: ConvolverNodeOptions)`](/docs/core/base-audio-context#createconvolver)
-
-```jsx
-interface ConvolverNodeOptions {
- buffer?: AudioBuffer | null; // impulse response
- disableNormalization?: boolean; // if normalization of output should be applied, true by default
-}
+```tsx
+constructor(context: BaseAudioContext, options?: ConvolverOptions)
```
+### `ConvolverOptions`
+
+Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions)
+
+| Parameter | Type | Default | |
+| :---: | :---: | :----: | :---- |
+| `buffer` | `number` | | Initial value for [`buffer`](/docs/effects/convolver-node#properties). |
+| `normalize` | `boolean` | true | Initial value for [`normalize`](/docs/effects/convolver-node#properties). |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createConvolver()`](/docs/core/base-audio-context#createconvolver)
+
## Properties
-It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties) and has no individual ones.
+It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties).
+
+| Name | Type | Description |
+| :----: | :----: | :-------- |
+| `buffer` | [`AudioBuffer`](/docs/sources/audio-buffer) | Associated AudioBuffer. |
+| `normalize` | `boolean` | Whether the impulse response from the buffer will be scaled by an equal-power normalization when the buffer attribute is set. |
:::caution
Linear convolution is a heavy computational process, so if your audio has some weird artefacts that should not be there, try to decrease the duration of impulse response buffer.
diff --git a/packages/audiodocs/docs/effects/gain-node.mdx b/packages/audiodocs/docs/effects/gain-node.mdx
index fb816544b..c3f87c13b 100644
--- a/packages/audiodocs/docs/effects/gain-node.mdx
+++ b/packages/audiodocs/docs/effects/gain-node.mdx
@@ -3,7 +3,7 @@ sidebar_position: 3
---
import AudioNodePropsTable from "@site/src/components/AudioNodePropsTable"
-import { ReadOnly } from '@site/src/components/Badges';
+import { Optional, ReadOnly } from '@site/src/components/Badges';
import { useGainAdsrPlayground } from '@site/src/components/InteractivePlayground/GainAdsrExample/useGainAdsrPlayground';
import InteractivePlayground from '@site/src/components/InteractivePlayground';
@@ -43,7 +43,20 @@ You can read more about envelopes and ADSR on [Wikipedia]( | `number` | 1.0 | Initial value for [`gain`](/docs/effects/gain-node#properties) |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createGain()`](/docs/core/base-audio-context#creategain) that creates node with default values.
## Properties
@@ -61,5 +74,4 @@ It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods).
## Remarks
#### `gain`
-- Default value is 1.0.
- Nominal range is -∞ to ∞.
diff --git a/packages/audiodocs/docs/effects/iir-filter-node.mdx b/packages/audiodocs/docs/effects/iir-filter-node.mdx
index 660209321..dba61194c 100644
--- a/packages/audiodocs/docs/effects/iir-filter-node.mdx
+++ b/packages/audiodocs/docs/effects/iir-filter-node.mdx
@@ -24,6 +24,13 @@ interface IIRFilterNodeOptions {
}
```
+#### Errors
+
+| Error type | Description |
+| :---: | :---- |
+| `NotSupportedError` | One or both of the input arrays exceeds 20 members. |
+| `InvalidStateError` | All of the feedforward coefficients are 0, or the first feedback coefficient is 0. |
+
## Properties
It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties).
diff --git a/packages/audiodocs/docs/effects/periodic-wave.mdx b/packages/audiodocs/docs/effects/periodic-wave.mdx
index 9f7e2cd9f..f59b9a993 100644
--- a/packages/audiodocs/docs/effects/periodic-wave.mdx
+++ b/packages/audiodocs/docs/effects/periodic-wave.mdx
@@ -2,24 +2,39 @@
sidebar_position: 5
---
+import { Optional } from '@site/src/components/Badges';
+
# PeriodicWave
The `PeriodicWave` interface defines a periodic waveform that can be used to shape the output of an OscillatorNode.
## Constructor
-[`BaseAudioContext.createPeriodicWave(real, imag)`](/docs/core/base-audio-context#createperiodicwave)
+```tsx
+constructor(context: BaseAudioContext, options: PeriodicWaveOptions)
+```
-[`BaseAudioContext.createPeriodicWave(real, imag, constraints: PeriodicWaveConstraints)`](/docs/core/base-audio-context#createperiodicwave)
+### `PeriodicWaveOptions`
-```jsx
-interface PeriodicWaveConstraints {
- disableNormalization: boolean; // default set to false (normalization is enabled)
-}
-```
+| Parameter | Type | Default | Description |
+| :---: | :---: | :----: | :---- |
+| `real` | `Float32Array` | - | [Cosine terms](/docs/core/base-audio-context#createperiodicwave) |
+| `imag` | `Float32Array` | - | [Sine terms](/docs/core/base-audio-context#createperiodicwave) |
+| `disableNormalization` | `boolean` | false | Whether the periodic wave is [normalized](/docs/core/base-audio-context#createperiodicwave) or not. |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createPeriodicWave(real, imag, constraints?: PeriodicWaveConstraints)`](/docs/core/base-audio-context#createperiodicwave)
## Properties
None. `PeriodicWave` has no own or inherited properties.
## Methods
None. `PeriodicWave` has no own or inherited methods.
+
+## Remarks
+
+#### `real` and `imag`
+- if only one is specified, the other one is treated as array of 0s of the same length
+- if neither is given values are equivalent to the sine wave
+- if both given, they have to have the same length
+- to see how values corresponds to the output wave [see](https://webaudio.github.io/web-audio-api/#waveform-generation) for more information
diff --git a/packages/audiodocs/docs/effects/stereo-panner-node.mdx b/packages/audiodocs/docs/effects/stereo-panner-node.mdx
index f0a1db0c0..81495a002 100644
--- a/packages/audiodocs/docs/effects/stereo-panner-node.mdx
+++ b/packages/audiodocs/docs/effects/stereo-panner-node.mdx
@@ -3,7 +3,7 @@ sidebar_position: 6
---
import AudioNodePropsTable from "@site/src/components/AudioNodePropsTable"
-import { ReadOnly } from '@site/src/components/Badges';
+import { Optional, ReadOnly } from '@site/src/components/Badges';
# StereoPannerNode
@@ -15,6 +15,19 @@ The `StereoPannerNode` interface represents the change in ratio between two outp
## Constructor
+```tsx
+constructor(context: BaseAudioContext, stereoPannerOptions?: StereoPannerOptions)
+```
+
+### `StereoPannerOptions`
+
+Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions)
+
+| Parameter | Type | Default | Description |
+| :---: | :---: | :----: | :---- |
+| `pan` | `number` | 0.0 | Number representing pan value |
+
+Or by using `BaseAudioContext` factory method:
[`BaseAudioContext.createStereoPanner()`](/docs/core/base-audio-context#createstereopanner)
## Properties
diff --git a/packages/audiodocs/docs/sources/audio-buffer.mdx b/packages/audiodocs/docs/sources/audio-buffer.mdx
index 8451207c4..26efefb1f 100644
--- a/packages/audiodocs/docs/sources/audio-buffer.mdx
+++ b/packages/audiodocs/docs/sources/audio-buffer.mdx
@@ -17,7 +17,20 @@ Once you have data in `AudioBuffer`, audio can be played by passing it to [`Audi
## Constructor
-[`BaseAudioContext.createBuffer(numChannels, length, sampleRate)`](/docs/core/base-audio-context#createbuffer)
+```tsx
+constructor(context: BaseAudioContext, options: AudioBufferOptions)
+```
+
+### `AudioBufferOptions`
+
+| Parameter | Type | Default | Description |
+| :---: | :---: | :----: | :---- |
+| `numberOfChannels` | `number` | 1.0 | Number of [`channels`](/docs/sources/audio-buffer#properties) in buffer |
+| `length` | `number` | - | [`Length`](/docs/sources/audio-buffer#properties) of the buffer |
+| `sampleRate` | `number` | - | [`Sample rate`](/docs/sources/audio-buffer#properties) of the buffer in Hz |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createBuffer(numChannels, length, sampleRate)`](/docs/core/base-audio-context#createbuffer) that creates buffer with default values.
## Decoding
diff --git a/packages/audiodocs/docs/sources/constant-source-node.mdx b/packages/audiodocs/docs/sources/constant-source-node.mdx
index 87b89c81a..98d73c5e9 100644
--- a/packages/audiodocs/docs/sources/constant-source-node.mdx
+++ b/packages/audiodocs/docs/sources/constant-source-node.mdx
@@ -18,7 +18,18 @@ Just like `AudioScheduledSourceNode`, it can be started only once.
## Constructor
-[`BaseAudioContext.createConstantSource()`](/docs/core/base-audio-context#createconstantsource)
+```tsx
+constructor(context: BaseAudioContext, options?: ConstantSourceOptions)
+```
+
+### `ConstantSourceOptions`
+
+| Parameter | Type | Default | |
+| :---: | :---: | :----: | :---- |
+| `offset` | `number` | 1 | Initial value for [`offset`](/docs/sources/constant-source-node#properties) |
+
+Or by using `BaseAudioContext` factory method:
+[`BaseAudioContext.createConstantSource()`](/docs/core/base-audio-context#createconstantsource) that creates node with default values.
## Example
@@ -27,7 +38,7 @@ import React, { useRef } from 'react';
import { Text } from 'react-native';
import {
AudioContext,
- OscillatorNode,
+ OscillatorNode,
GainNode,
ConstantSourceNode
} from 'react-native-audio-api';
@@ -55,7 +66,7 @@ function App() {
oscillator2.connect(gainNode2);
gainNode2.connect(audioContext.destination);
- // We connect the constant source to the gain nodes gain AudioParams
+ // We connect the constant source to the gain nodes gain AudioParams
// to control both of them at the same time
constantSource.connect(gainNode1.gain);
constantSource.connect(gainNode2.gain);
@@ -71,14 +82,9 @@ function App() {
It inherits all properties from [`AudioScheduledSourceNode`](/docs/sources/audio-scheduled-source-node#properties).
| Name | Type | Default value | Description |
-| :----: | :----: | :-------- | :------- |
-| `offset` | [`AudioParam`](/docs/core/audio-param) | 1 |[`a-rate`](/docs/core/audio-param#a-rate-vs-k-rate) `AudioParam` representing the value which the node constantly outputs. |
+| :----: | :----: | :--------: | :------- |
+| `offset` | [`AudioParam`](/docs/core/audio-param) | 1.0 |[`a-rate`](/docs/core/audio-param#a-rate-vs-k-rate) `AudioParam` representing the value which the node constantly outputs. |
## Methods
It inherits all methods from [`AudioScheduledSourceNode`](/docs/sources/audio-scheduled-source-node#methods).
-
-## Remarks
-
-#### `offset`
-- Float. Default: 1.
diff --git a/packages/audiodocs/docs/sources/oscillator-node.mdx b/packages/audiodocs/docs/sources/oscillator-node.mdx
index e36a7f339..ad9075862 100644
--- a/packages/audiodocs/docs/sources/oscillator-node.mdx
+++ b/packages/audiodocs/docs/sources/oscillator-node.mdx
@@ -25,6 +25,21 @@ Similar to all of `AudioScheduledSourceNodes`, it can be started only once. If y
## Constructor
+```tsx
+constructor(context: BaseAudioContext, options?: OscillatorOptions)
+```
+
+### `OscillatorOptions`
+
+Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions)
+
+| Parameter | Type | Default | |
+| :---: | :---: | :----: | :---- |
+| `type` | [`OscillatorType`](/docs/types/oscillator-type) | `sine` | Initial value for [`type`](/docs/sources/oscillator-node#properties). |
+| `frequency` | `number` | 440 | Initial value for [`frequency`](/docs/sources/oscillator-node#properties). |
+| `detune` | `number` | 0 | Initial value for [`detune`](/docs/sources/oscillator-node#properties). |
+
+Or by using `BaseAudioContext` factory method:
[`BaseAudioContext.createOscillator()`](/docs/core/base-audio-context#createoscillator)
## Example
diff --git a/packages/audiodocs/docs/sources/recorder-adapter-node.mdx b/packages/audiodocs/docs/sources/recorder-adapter-node.mdx
index 6f3d57cd9..cbc03c48e 100644
--- a/packages/audiodocs/docs/sources/recorder-adapter-node.mdx
+++ b/packages/audiodocs/docs/sources/recorder-adapter-node.mdx
@@ -12,6 +12,11 @@ It lets you compose audio input from recorder into an audio graph.
## Constructor
+```tsx
+constructor(context: BaseAudioContext)
+```
+
+Or by using `BaseAudioContext` factory method:
[`BaseAudioContext.createRecorderAdapter()`](/docs/core/base-audio-context#createrecorderadapter)
## Example
diff --git a/packages/audiodocs/docs/sources/streamer-node.mdx b/packages/audiodocs/docs/sources/streamer-node.mdx
index 0d22e0e62..d3cb176f9 100644
--- a/packages/audiodocs/docs/sources/streamer-node.mdx
+++ b/packages/audiodocs/docs/sources/streamer-node.mdx
@@ -20,7 +20,19 @@ Similar to all of `AudioScheduledSourceNodes`, it can be started only once. If y
## Constructor
-[`BaseAudioContext.createStreamer()`](/docs/core/base-audio-context#createostreamer)
+```tsx
+constructor(context: BaseAudioContext, options?: StreamerOptions)
+```
+
+### `StreamerOptions`
+
+| Parameter | Type | Default | |
+| :---: | :---: | :----: | :---- |
+| `streamPath` | `string` | - | Initial value for [`streamPath`](/docs/sources/streamer-node#properties) |
+
+Or by using `BaseAudioContext` factory method:
+
+[`BaseAudioContext.createStreamer()`](/docs/core/base-audio-context#createstreamer-).
## Example
@@ -45,18 +57,21 @@ function App() {
## Properties
-`StreamerNode` does not define any additional properties.
It inherits all properties from [`AudioScheduledSourceNode`](/docs/sources/audio-scheduled-source-node#properties).
+| Name | Type | Description |
+| :----: | :----: | :------- |
+| `streamPath` | `string` | String value representing url to stream. |
+
## Methods
It inherits all methods from [`AudioScheduledSourceNode`](/docs/sources/audio-scheduled-source-node#methods).
### `initialize`
-Initializes the streamer with a link to an external HLS source.
+Initializes the streamer with a link to an external source.
| Parameter | Type | Description |
| :---: | :---: | :---- |
-| `streamPath` | `string` | Link pointing to an external HLS source |
+| `streamPath` | `string` | Link pointing to an external source |
#### Returns `boolean` indicating if setup of streaming has worked.
diff --git a/packages/audiodocs/docs/spatialization/_category_.json b/packages/audiodocs/docs/spatialization/_category_.json
deleted file mode 100644
index b988e18a3..000000000
--- a/packages/audiodocs/docs/spatialization/_category_.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "label": "Spatialization",
- "position": 6,
- "link": {
- "type": "generated-index"
- }
-}
diff --git a/packages/audiodocs/docs/worklets/worklet-node.mdx b/packages/audiodocs/docs/worklets/worklet-node.mdx
index 8f06fed10..ccd04ce6b 100644
--- a/packages/audiodocs/docs/worklets/worklet-node.mdx
+++ b/packages/audiodocs/docs/worklets/worklet-node.mdx
@@ -17,6 +17,15 @@ This node lets you execute a worklet on the UI thread. bufferLength specifies th
## Constructor
+```tsx
+constructor(
+ context: BaseAudioContext,
+ runtime: AudioWorkletRuntime,
+ callback: (audioData: Array, channelCount: number) => void,
+ bufferLength: number,
+ inputChannelCount: number)
+```
+Or by using `BaseAudioContext` factory method:
[`BaseAudioContext.createWorkletNode(worklet, bufferLength, inputChannelCount, workletRuntime)`](/docs/core/base-audio-context#createworkletnode-)
## Example
diff --git a/packages/audiodocs/docs/worklets/worklet-processing-node.mdx b/packages/audiodocs/docs/worklets/worklet-processing-node.mdx
index f98ff5343..3ae2725d1 100644
--- a/packages/audiodocs/docs/worklets/worklet-processing-node.mdx
+++ b/packages/audiodocs/docs/worklets/worklet-processing-node.mdx
@@ -18,6 +18,18 @@ For more information about worklets, see our [Introduction to worklets](/docs/wo
## Constructor
+```tsx
+constructor(
+ context: BaseAudioContext,
+ runtime: AudioWorkletRuntime,
+ callback: (
+ inputData: Array,
+ outputData: Array,
+ framesToProcess: number,
+ currentTime: number
+ ) => void)
+```
+Or by using `BaseAudioContext` factory method:
[`BaseAudioContext.createWorkletProcessingNode(worklet, workletRuntime)`](/docs/core/base-audio-context#createworkletprocessingnode-)
## Example
diff --git a/packages/audiodocs/docs/worklets/worklet-source-node.mdx b/packages/audiodocs/docs/worklets/worklet-source-node.mdx
index 3ebef4a02..f223cfada 100644
--- a/packages/audiodocs/docs/worklets/worklet-source-node.mdx
+++ b/packages/audiodocs/docs/worklets/worklet-source-node.mdx
@@ -18,6 +18,18 @@ For more information about worklets, see our [Introduction to worklets](/docs/wo
## Constructor
+```tsx
+constructor(
+ context: BaseAudioContext,
+ runtime: AudioWorkletRuntime,
+ callback: (
+ audioData: Array,
+ framesToProcess: number,
+ currentTime: number,
+ startOffset: number
+ ) => void)
+```
+Or by using `BaseAudioContext` factory method:
[`BaseAudioContext.createWorkletSourceNode(worklet, workletRuntime)`](/docs/core/base-audio-context#createworkletsourcenode-)
## Example
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
index 8487e859c..fe9e921cc 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
@@ -21,7 +21,9 @@
#include
#include
+#include
#include
+#include
#include
namespace audioapi {
@@ -157,14 +159,21 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createRecorderAdapter) {
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createOscillator) {
- auto oscillator = context_->createOscillator();
+ auto options = args[0].asObject(runtime);
+ auto oscillatorOptions = audioapi::option_parser::parseOscillatorOptions(runtime, options);
+ auto oscillator = context_->createOscillator(oscillatorOptions);
auto oscillatorHostObject = std::make_shared(oscillator);
return jsi::Object::createFromHostObject(runtime, oscillatorHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createStreamer) {
#if !RN_AUDIO_API_FFMPEG_DISABLED
- auto streamer = context_->createStreamer();
+ auto streamerOptions = StreamerOptions();
+ if (!args[0].isUndefined()) {
+ auto options = args[0].asObject(runtime);
+ streamerOptions = audioapi::option_parser::parseStreamerOptions(runtime, options);
+ }
+ auto streamer = context_->createStreamer(streamerOptions);
auto streamerHostObject = std::make_shared(streamer);
auto object = jsi::Object::createFromHostObject(runtime, streamerHostObject);
object.setExternalMemoryPressure(runtime, StreamerNodeHostObject::getSizeInBytes());
@@ -175,20 +184,26 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createStreamer) {
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createConstantSource) {
- auto constantSource = context_->createConstantSource();
+ auto options = args[0].asObject(runtime);
+ auto constantSourceOptions =
+ audioapi::option_parser::parseConstantSourceOptions(runtime, options);
+ auto constantSource = context_->createConstantSource(constantSourceOptions);
auto constantSourceHostObject = std::make_shared(constantSource);
return jsi::Object::createFromHostObject(runtime, constantSourceHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createGain) {
- auto gain = context_->createGain();
+ auto options = args[0].asObject(runtime);
+ auto gainOptions = audioapi::option_parser::parseGainOptions(runtime, options);
+ auto gain = context_->createGain(std::move(gainOptions));
auto gainHostObject = std::make_shared(gain);
return jsi::Object::createFromHostObject(runtime, gainHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createDelay) {
- auto maxDelayTime = static_cast(args[0].getNumber());
- auto delayNode = context_->createDelay(maxDelayTime);
+ auto options = args[0].asObject(runtime);
+ auto delayOptions = audioapi::option_parser::parseDelayOptions(runtime, options);
+ auto delayNode = context_->createDelay(delayOptions);
auto delayNodeHostObject = std::make_shared(delayNode);
auto jsiObject = jsi::Object::createFromHostObject(runtime, delayNodeHostObject);
jsiObject.setExternalMemoryPressure(runtime, delayNodeHostObject->getSizeInBytes());
@@ -196,63 +211,52 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createDelay) {
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createStereoPanner) {
- auto stereoPanner = context_->createStereoPanner();
+ auto options = args[0].asObject(runtime);
+ auto stereoPannerOptions = audioapi::option_parser::parseStereoPannerOptions(runtime, options);
+ auto stereoPanner = context_->createStereoPanner(stereoPannerOptions);
auto stereoPannerHostObject = std::make_shared(stereoPanner);
return jsi::Object::createFromHostObject(runtime, stereoPannerHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBiquadFilter) {
- auto biquadFilter = context_->createBiquadFilter();
+ auto options = args[0].asObject(runtime);
+ auto biquadFilterOptions = audioapi::option_parser::parseBiquadFilterOptions(runtime, options);
+ auto biquadFilter = context_->createBiquadFilter(biquadFilterOptions);
auto biquadFilterHostObject = std::make_shared(biquadFilter);
return jsi::Object::createFromHostObject(runtime, biquadFilterHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createIIRFilter) {
- auto feedforwardArray = args[0].asObject(runtime).asArray(runtime);
- auto feedbackArray = args[1].asObject(runtime).asArray(runtime);
-
- size_t feedforwardLength = feedforwardArray.length(runtime);
- size_t feedbackLength = feedbackArray.length(runtime);
-
- std::vector feedforward;
- std::vector feedback;
-
- feedforward.reserve(feedforwardLength);
- feedback.reserve(feedbackLength);
-
- for (size_t i = 0; i < feedforwardLength; ++i) {
- feedforward.push_back(feedforwardArray.getValueAtIndex(runtime, i).asNumber());
- }
-
- for (size_t i = 0; i < feedbackLength; ++i) {
- feedback.push_back(feedbackArray.getValueAtIndex(runtime, i).asNumber());
- }
-
- auto iirFilter = context_->createIIRFilter(feedforward, feedback);
+ auto options = args[0].asObject(runtime);
+ auto iirFilterOptions = audioapi::option_parser::parseIIRFilterOptions(runtime, options);
+ auto iirFilter = context_->createIIRFilter(iirFilterOptions);
auto iirFilterHostObject = std::make_shared(iirFilter);
return jsi::Object::createFromHostObject(runtime, iirFilterHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBufferSource) {
- auto pitchCorrection = args[0].asBool();
- auto bufferSource = context_->createBufferSource(pitchCorrection);
+ auto options = args[0].asObject(runtime);
+ auto audioBufferSourceOptions =
+ audioapi::option_parser::parseAudioBufferSourceOptions(runtime, options);
+ auto bufferSource = context_->createBufferSource(audioBufferSourceOptions);
auto bufferSourceHostObject = std::make_shared(bufferSource);
return jsi::Object::createFromHostObject(runtime, bufferSourceHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBufferQueueSource) {
- auto pitchCorrection = args[0].asBool();
- auto bufferSource = context_->createBufferQueueSource(pitchCorrection);
+ auto options = args[0].asObject(runtime);
+ auto baseAudioBufferSourceOptions =
+ audioapi::option_parser::parseBaseAudioBufferSourceOptions(runtime, options);
+ auto bufferSource = context_->createBufferQueueSource(baseAudioBufferSourceOptions);
auto bufferStreamSourceHostObject =
std::make_shared(bufferSource);
return jsi::Object::createFromHostObject(runtime, bufferStreamSourceHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBuffer) {
- auto numberOfChannels = static_cast(args[0].getNumber());
- auto length = static_cast(args[1].getNumber());
- auto sampleRate = static_cast(args[2].getNumber());
- auto buffer = BaseAudioContext::createBuffer(numberOfChannels, length, sampleRate);
+ auto options = args[0].asObject(runtime);
+ auto audioBufferOptions = audioapi::option_parser::parseAudioBufferOptions(runtime, options);
+ auto buffer = BaseAudioContext::createBuffer(audioBufferOptions);
auto bufferHostObject = std::make_shared(buffer);
auto jsiObject = jsi::Object::createFromHostObject(runtime, bufferHostObject);
@@ -286,20 +290,17 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createPeriodicWave) {
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createAnalyser) {
- auto analyser = context_->createAnalyser();
+ auto options = args[0].asObject(runtime);
+ auto analyserOptions = audioapi::option_parser::parseAnalyserOptions(runtime, options);
+ auto analyser = context_->createAnalyser(analyserOptions);
auto analyserHostObject = std::make_shared(analyser);
return jsi::Object::createFromHostObject(runtime, analyserHostObject);
}
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createConvolver) {
- auto disableNormalization = args[1].getBool();
- std::shared_ptr convolver;
- if (args[0].isUndefined()) {
- convolver = context_->createConvolver(nullptr, disableNormalization);
- } else {
- auto bufferHostObject = args[0].getObject(runtime).asHostObject(runtime);
- convolver = context_->createConvolver(bufferHostObject->audioBuffer_, disableNormalization);
- }
+ auto options = args[0].asObject(runtime);
+ auto convolverOptions = audioapi::option_parser::parseConvolverOptions(runtime, options);
+ auto convolver = context_->createConvolver(convolverOptions);
auto convolverHostObject = std::make_shared(convolver);
auto jsiObject = jsi::Object::createFromHostObject(runtime, convolverHostObject);
if (!args[0].isUndefined()) {
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.cpp
index fad23a17e..3a9da1c3c 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.cpp
@@ -10,6 +10,12 @@ namespace audioapi {
StreamerNodeHostObject::StreamerNodeHostObject(const std::shared_ptr &node)
: AudioScheduledSourceNodeHostObject(node) {
addFunctions(JSI_EXPORT_FUNCTION(StreamerNodeHostObject, initialize));
+ addGetters(JSI_EXPORT_PROPERTY_GETTER(StreamerNodeHostObject, streamPath));
+}
+
+JSI_PROPERTY_GETTER_IMPL(StreamerNodeHostObject, streamPath) {
+ auto streamerNode = std::static_pointer_cast(node_);
+ return jsi::String::createFromUtf8(runtime, streamerNode->getStreamPath());
}
JSI_HOST_FUNCTION_IMPL(StreamerNodeHostObject, initialize) {
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.h
index 33e691679..43118bc0e 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/StreamerNodeHostObject.h
@@ -19,6 +19,7 @@ class StreamerNodeHostObject : public AudioScheduledSourceNodeHostObject {
return SIZE;
}
+ JSI_PROPERTY_GETTER_DECL(streamPath);
JSI_HOST_FUNCTION_DECL(initialize);
private:
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptions.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptions.h
new file mode 100644
index 000000000..b1eb35453
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptions.h
@@ -0,0 +1,104 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace audioapi {
+struct AudioNodeOptions {
+ int channelCount = 2;
+ ChannelCountMode channelCountMode = ChannelCountMode::MAX;
+ ChannelInterpretation channelInterpretation = ChannelInterpretation::SPEAKERS;
+};
+
+struct GainOptions : AudioNodeOptions {
+ float gain = 1.0f;
+};
+
+struct StereoPannerOptions : AudioNodeOptions {
+ float pan = 0.0f;
+};
+
+struct ConvolverOptions : AudioNodeOptions {
+ std::shared_ptr bus = nullptr;
+ bool disableNormalization = false;
+};
+
+struct ConstantSourceOptions {
+ float offset = 1.0f;
+};
+
+struct AnalyserOptions : AudioNodeOptions {
+ int fftSize = 2048;
+ float minDecibels = -100.0f;
+ float maxDecibels = -30.0f;
+ float smoothingTimeConstant = 0.8f;
+};
+
+struct BiquadFilterOptions : AudioNodeOptions {
+ BiquadFilterType type = BiquadFilterType::LOWPASS;
+ float frequency = 350.0f;
+ float detune = 0.0f;
+ float Q = 1.0f;
+ float gain = 0.0f;
+};
+
+struct OscillatorOptions {
+ std::shared_ptr periodicWave = nullptr;
+ float frequency = 440.0f;
+ float detune = 0.0f;
+ OscillatorType type = OscillatorType::SINE;
+};
+
+struct BaseAudioBufferSourceOptions {
+ float detune = 0.0f;
+ bool pitchCorrection = false;
+ float playbackRate = 1.0f;
+};
+
+struct AudioBufferSourceOptions : BaseAudioBufferSourceOptions {
+ std::shared_ptr buffer = nullptr;
+ bool loop = false;
+ float loopStart = 0.0f;
+ float loopEnd = 0.0f;
+};
+
+struct StreamerOptions {
+ std::string streamPath = "";
+};
+
+struct AudioBufferOptions {
+ int numberOfChannels = 1;
+ size_t length = 0;
+ float sampleRate = 44100.0f;
+};
+
+struct DelayOptions : AudioNodeOptions {
+ float maxDelayTime = 1.0f;
+ float delayTime = 0.0f;
+};
+
+struct IIRFilterOptions : AudioNodeOptions {
+ std::vector feedforward;
+ std::vector feedback;
+
+ IIRFilterOptions() = default;
+
+ explicit IIRFilterOptions(const AudioNodeOptions options) : AudioNodeOptions(options) {}
+
+ IIRFilterOptions(const std::vector &ff, const std::vector &fb)
+ : feedforward(ff), feedback(fb) {}
+
+ IIRFilterOptions(std::vector &&ff, std::vector &&fb)
+ : feedforward(std::move(ff)), feedback(std::move(fb)) {}
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h
new file mode 100644
index 000000000..109410987
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h
@@ -0,0 +1,238 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+namespace audioapi::option_parser {
+AudioNodeOptions parseAudioNodeOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ AudioNodeOptions options;
+
+ options.channelCount =
+ static_cast(optionsObject.getProperty(runtime, "channelCount").getNumber());
+
+ auto channelCountModeStr =
+ optionsObject.getProperty(runtime, "channelCountMode").asString(runtime).utf8(runtime);
+
+ if (channelCountModeStr == "max") {
+ options.channelCountMode = ChannelCountMode::MAX;
+ } else if (channelCountModeStr == "clamped-max") {
+ options.channelCountMode = ChannelCountMode::CLAMPED_MAX;
+ } else if (channelCountModeStr == "explicit") {
+ options.channelCountMode = ChannelCountMode::EXPLICIT;
+ }
+
+ auto channelInterpretationStr =
+ optionsObject.getProperty(runtime, "channelInterpretation").asString(runtime).utf8(runtime);
+
+ if (channelInterpretationStr == "speakers") {
+ options.channelInterpretation = ChannelInterpretation::SPEAKERS;
+ } else if (channelInterpretationStr == "discrete") {
+ options.channelInterpretation = ChannelInterpretation::DISCRETE;
+ }
+
+ return options;
+}
+
+GainOptions parseGainOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ GainOptions options(parseAudioNodeOptions(runtime, optionsObject));
+ options.gain = static_cast(optionsObject.getProperty(runtime, "gain").getNumber());
+ return options;
+}
+
+StereoPannerOptions parseStereoPannerOptions(
+ jsi::Runtime &runtime,
+ const jsi::Object &optionsObject) {
+ StereoPannerOptions options(parseAudioNodeOptions(runtime, optionsObject));
+ options.pan = static_cast(optionsObject.getProperty(runtime, "pan").getNumber());
+ return options;
+}
+
+ConvolverOptions parseConvolverOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ ConvolverOptions options(parseAudioNodeOptions(runtime, optionsObject));
+ options.disableNormalization =
+ static_cast(optionsObject.getProperty(runtime, "disableNormalization").getNumber());
+ if (optionsObject.hasProperty(runtime, "buffer")) {
+ auto bufferHostObject = optionsObject.getProperty(runtime, "buffer")
+ .getObject(runtime)
+ .asHostObject(runtime);
+ options.bus = bufferHostObject->audioBuffer_;
+ }
+ return options;
+}
+
+ConstantSourceOptions parseConstantSourceOptions(
+ jsi::Runtime &runtime,
+ const jsi::Object &optionsObject) {
+ ConstantSourceOptions options;
+ options.offset = static_cast(optionsObject.getProperty(runtime, "offset").getNumber());
+ return options;
+}
+
+AnalyserOptions parseAnalyserOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ AnalyserOptions options(parseAudioNodeOptions(runtime, optionsObject));
+ options.fftSize = static_cast(optionsObject.getProperty(runtime, "fftSize").getNumber());
+ options.minDecibels =
+ static_cast(optionsObject.getProperty(runtime, "minDecibels").getNumber());
+ options.maxDecibels =
+ static_cast(optionsObject.getProperty(runtime, "maxDecibels").getNumber());
+ options.smoothingTimeConstant =
+ static_cast(optionsObject.getProperty(runtime, "smoothingTimeConstant").getNumber());
+ return options;
+}
+
+BiquadFilterOptions parseBiquadFilterOptions(
+ jsi::Runtime &runtime,
+ const jsi::Object &optionsObject) {
+ BiquadFilterOptions options(parseAudioNodeOptions(runtime, optionsObject));
+
+ auto typeStr = optionsObject.getProperty(runtime, "type").asString(runtime).utf8(runtime);
+
+ if (typeStr == "lowpass") {
+ options.type = BiquadFilterType::LOWPASS;
+ } else if (typeStr == "highpass") {
+ options.type = BiquadFilterType::HIGHPASS;
+ } else if (typeStr == "bandpass") {
+ options.type = BiquadFilterType::BANDPASS;
+ } else if (typeStr == "lowshelf") {
+ options.type = BiquadFilterType::LOWSHELF;
+ } else if (typeStr == "highshelf") {
+ options.type = BiquadFilterType::HIGHSHELF;
+ } else if (typeStr == "peaking") {
+ options.type = BiquadFilterType::PEAKING;
+ } else if (typeStr == "notch") {
+ options.type = BiquadFilterType::NOTCH;
+ } else if (typeStr == "allpass") {
+ options.type = BiquadFilterType::ALLPASS;
+ }
+
+ options.frequency =
+ static_cast(optionsObject.getProperty(runtime, "frequency").getNumber());
+ options.detune = static_cast(optionsObject.getProperty(runtime, "detune").getNumber());
+ options.Q = static_cast(optionsObject.getProperty(runtime, "Q").getNumber());
+ options.gain = static_cast(optionsObject.getProperty(runtime, "gain").getNumber());
+
+ return options;
+}
+
+OscillatorOptions parseOscillatorOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ OscillatorOptions options;
+
+ auto typeStr = optionsObject.getProperty(runtime, "type").asString(runtime).utf8(runtime);
+
+ if (typeStr == "sine") {
+ options.type = OscillatorType::SINE;
+ } else if (typeStr == "square") {
+ options.type = OscillatorType::SQUARE;
+ } else if (typeStr == "sawtooth") {
+ options.type = OscillatorType::SAWTOOTH;
+ } else if (typeStr == "triangle") {
+ options.type = OscillatorType::TRIANGLE;
+ } else if (typeStr == "custom") {
+ options.type = OscillatorType::CUSTOM;
+ }
+
+ options.frequency =
+ static_cast(optionsObject.getProperty(runtime, "frequency").getNumber());
+ options.detune = static_cast(optionsObject.getProperty(runtime, "detune").getNumber());
+
+ if (optionsObject.hasProperty(runtime, "periodicWave")) {
+ auto periodicWaveHostObject = optionsObject.getProperty(runtime, "periodicWave")
+ .getObject(runtime)
+ .asHostObject(runtime);
+ options.periodicWave = periodicWaveHostObject->periodicWave_;
+ }
+
+ return options;
+}
+
+BaseAudioBufferSourceOptions parseBaseAudioBufferSourceOptions(
+ jsi::Runtime &runtime,
+ const jsi::Object &optionsObject) {
+ BaseAudioBufferSourceOptions options;
+ options.detune = static_cast(optionsObject.getProperty(runtime, "detune").getNumber());
+ options.playbackRate =
+ static_cast(optionsObject.getProperty(runtime, "playbackRate").getNumber());
+ options.pitchCorrection =
+ static_cast(optionsObject.getProperty(runtime, "pitchCorrection").getBool());
+ return options;
+}
+
+AudioBufferSourceOptions parseAudioBufferSourceOptions(
+ jsi::Runtime &runtime,
+ const jsi::Object &optionsObject) {
+ AudioBufferSourceOptions options(parseBaseAudioBufferSourceOptions(runtime, optionsObject));
+ if (optionsObject.hasProperty(runtime, "buffer")) {
+ auto bufferHostObject = optionsObject.getProperty(runtime, "buffer")
+ .getObject(runtime)
+ .asHostObject(runtime);
+ options.buffer = bufferHostObject->audioBuffer_;
+ }
+ options.loop = static_cast(optionsObject.getProperty(runtime, "loop").getBool());
+ options.loopStart =
+ static_cast(optionsObject.getProperty(runtime, "loopStart").getNumber());
+ options.loopEnd = static_cast(optionsObject.getProperty(runtime, "loopEnd").getNumber());
+ return options;
+}
+
+StreamerOptions parseStreamerOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ auto options = StreamerOptions();
+ if (optionsObject.hasProperty(runtime, "streamPath")) {
+ options.streamPath =
+ optionsObject.getProperty(runtime, "streamPath").asString(runtime).utf8(runtime);
+ }
+ return options;
+}
+
+AudioBufferOptions parseAudioBufferOptions(
+ jsi::Runtime &runtime,
+ const jsi::Object &optionsObject) {
+ AudioBufferOptions options;
+ options.numberOfChannels =
+ static_cast(optionsObject.getProperty(runtime, "numberOfChannels").getNumber());
+ options.length = static_cast(optionsObject.getProperty(runtime, "length").getNumber());
+ options.sampleRate =
+ static_cast(optionsObject.getProperty(runtime, "sampleRate").getNumber());
+ return options;
+}
+
+DelayOptions parseDelayOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ DelayOptions options(parseAudioNodeOptions(runtime, optionsObject));
+ options.maxDelayTime =
+ static_cast(optionsObject.getProperty(runtime, "maxDelayTime").getNumber());
+ options.delayTime =
+ static_cast(optionsObject.getProperty(runtime, "delayTime").getNumber());
+ return options;
+}
+
+IIRFilterOptions parseIIRFilterOptions(jsi::Runtime &runtime, const jsi::Object &optionsObject) {
+ IIRFilterOptions options(parseAudioNodeOptions(runtime, optionsObject));
+
+ auto feedforwardArray =
+ optionsObject.getProperty(runtime, "feedforward").asObject(runtime).asArray(runtime);
+ size_t feedforwardLength = feedforwardArray.size(runtime);
+ options.feedforward.reserve(feedforwardLength);
+ for (size_t i = 0; i < feedforwardLength; ++i) {
+ options.feedforward.push_back(
+ static_cast(feedforwardArray.getValueAtIndex(runtime, i).getNumber()));
+ }
+
+ auto feedbackArray =
+ optionsObject.getProperty(runtime, "feedback").asObject(runtime).asArray(runtime);
+ size_t feedbackLength = feedbackArray.size(runtime);
+ options.feedback.reserve(feedbackLength);
+ for (size_t i = 0; i < feedbackLength; ++i) {
+ options.feedback.push_back(
+ static_cast(feedbackArray.getValueAtIndex(runtime, i).getNumber()));
+ }
+
+ return options;
+}
+} // namespace audioapi::option_parser
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp
index 47ff549b3..c616d5c92 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp
@@ -1,3 +1,4 @@
+#include
#include
#include
#include
@@ -15,6 +16,15 @@ AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {
std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
}
+AudioNode::AudioNode(BaseAudioContext *context, const AudioNodeOptions &options)
+ : context_(context),
+ channelCount_(options.channelCount),
+ channelCountMode_(options.channelCountMode),
+ channelInterpretation_(options.channelInterpretation) {
+ audioBus_ =
+ std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
+}
+
AudioNode::~AudioNode() {
if (isInitialized_) {
cleanup();
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h
index a56e79454..8110e226a 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h
@@ -16,10 +16,13 @@ namespace audioapi {
class AudioBus;
class BaseAudioContext;
class AudioParam;
+class AudioNodeOptions;
class AudioNode : public std::enable_shared_from_this {
public:
explicit AudioNode(BaseAudioContext *context);
+ // usually options are passed as derived class, keep in mind that object passed as options will be sliced
+ explicit AudioNode(BaseAudioContext *context, const AudioNodeOptions &options);
virtual ~AudioNode();
int getNumberOfInputs() const;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
index afebaf2b5..0389a25dc 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
@@ -1,3 +1,4 @@
+#include
#include
#include
#include
@@ -69,7 +70,7 @@ double BaseAudioContext::getCurrentTime() const {
return destination_->getCurrentTime();
}
-std::shared_ptr BaseAudioContext::getDestination() {
+std::shared_ptr BaseAudioContext::getDestination() const {
return destination_;
}
@@ -113,21 +114,22 @@ std::shared_ptr BaseAudioContext::createRecorderAdapter() {
return recorderAdapter;
}
-std::shared_ptr BaseAudioContext::createOscillator() {
- auto oscillator = std::make_shared(this);
+std::shared_ptr BaseAudioContext::createOscillator(OscillatorOptions options) {
+ auto oscillator = std::make_shared(this, std::move(options));
nodeManager_->addSourceNode(oscillator);
return oscillator;
}
-std::shared_ptr BaseAudioContext::createConstantSource() {
- auto constantSource = std::make_shared(this);
+std::shared_ptr BaseAudioContext::createConstantSource(
+ ConstantSourceOptions options) {
+ auto constantSource = std::make_shared(this, std::move(options));
nodeManager_->addSourceNode(constantSource);
return constantSource;
}
-std::shared_ptr BaseAudioContext::createStreamer() {
+std::shared_ptr BaseAudioContext::createStreamer(StreamerOptions options) {
#if !RN_AUDIO_API_FFMPEG_DISABLED
- auto streamer = std::make_shared(this);
+ auto streamer = std::make_shared(this, std::move(options));
nodeManager_->addSourceNode(streamer);
return streamer;
#else
@@ -135,54 +137,54 @@ std::shared_ptr BaseAudioContext::createStreamer() {
#endif // RN_AUDIO_API_FFMPEG_DISABLED
}
-std::shared_ptr BaseAudioContext::createGain() {
- auto gain = std::make_shared(this);
+std::shared_ptr BaseAudioContext::createGain(GainOptions options) {
+ auto gain = std::make_shared(this, std::move(options));
nodeManager_->addProcessingNode(gain);
return gain;
}
-std::shared_ptr BaseAudioContext::createDelay(float maxDelayTime) {
- auto delay = std::make_shared(this, maxDelayTime);
- nodeManager_->addProcessingNode(delay);
- return delay;
-}
-
-std::shared_ptr BaseAudioContext::createStereoPanner() {
- auto stereoPanner = std::make_shared(this);
+std::shared_ptr BaseAudioContext::createStereoPanner(
+ StereoPannerOptions options) {
+ auto stereoPanner = std::make_shared(this, std::move(options));
nodeManager_->addProcessingNode(stereoPanner);
return stereoPanner;
}
-std::shared_ptr BaseAudioContext::createBiquadFilter() {
- auto biquadFilter = std::make_shared(this);
- nodeManager_->addProcessingNode(biquadFilter);
- return biquadFilter;
+std::shared_ptr BaseAudioContext::createDelay(DelayOptions options) {
+ auto delay = std::make_shared(this, std::move(options));
+ nodeManager_->addProcessingNode(delay);
+ return delay;
}
-std::shared_ptr BaseAudioContext::createIIRFilter(
- const std::vector &feedforward,
- const std::vector &feedback) {
- auto iirFilter = std::make_shared(this, feedforward, feedback);
- nodeManager_->addProcessingNode(iirFilter);
- return iirFilter;
+std::shared_ptr BaseAudioContext::createBiquadFilter(
+ BiquadFilterOptions options) {
+ auto biquadFilter = std::make_shared(this, std::move(options));
+ nodeManager_->addProcessingNode(biquadFilter);
+ return biquadFilter;
}
-std::shared_ptr BaseAudioContext::createBufferSource(bool pitchCorrection) {
- auto bufferSource = std::make_shared(this, pitchCorrection);
+std::shared_ptr BaseAudioContext::createBufferSource(
+ AudioBufferSourceOptions options) {
+ auto bufferSource = std::make_shared(this, std::move(options));
nodeManager_->addSourceNode(bufferSource);
return bufferSource;
}
+std::shared_ptr BaseAudioContext::createIIRFilter(IIRFilterOptions options) {
+ auto iirFilter = std::make_shared(this, std::move(options));
+ nodeManager_->addProcessingNode(iirFilter);
+ return iirFilter;
+}
+
std::shared_ptr BaseAudioContext::createBufferQueueSource(
- bool pitchCorrection) {
- auto bufferSource = std::make_shared(this, pitchCorrection);
+ BaseAudioBufferSourceOptions options) {
+ auto bufferSource = std::make_shared(this, std::move(options));
nodeManager_->addSourceNode(bufferSource);
return bufferSource;
}
-std::shared_ptr
-BaseAudioContext::createBuffer(int numberOfChannels, size_t length, float sampleRate) {
- return std::make_shared(numberOfChannels, length, sampleRate);
+std::shared_ptr BaseAudioContext::createBuffer(AudioBufferOptions options) {
+ return std::make_shared(std::move(options));
}
std::shared_ptr BaseAudioContext::createPeriodicWave(
@@ -192,16 +194,14 @@ std::shared_ptr BaseAudioContext::createPeriodicWave(
return std::make_shared(sampleRate_, complexData, length, disableNormalization);
}
-std::shared_ptr BaseAudioContext::createAnalyser() {
- auto analyser = std::make_shared(this);
+std::shared_ptr BaseAudioContext::createAnalyser(AnalyserOptions options) {
+ auto analyser = std::make_shared(this, std::move(options));
nodeManager_->addProcessingNode(analyser);
return analyser;
}
-std::shared_ptr BaseAudioContext::createConvolver(
- std::shared_ptr buffer,
- bool disableNormalization) {
- auto convolver = std::make_shared(this, buffer, disableNormalization);
+std::shared_ptr BaseAudioContext::createConvolver(ConvolverOptions options) {
+ auto convolver = std::make_shared(this, std::move(options));
nodeManager_->addProcessingNode(convolver);
return convolver;
}
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
index b17304977..902df1b9f 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
@@ -37,6 +37,19 @@ class WorkletSourceNode;
class WorkletNode;
class WorkletProcessingNode;
class StreamerNode;
+class GainOptions;
+class StereoPannerOptions;
+class ConvolverOptions;
+class ConstantSourceOptions;
+class AnalyserOptions;
+class BiquadFilterOptions;
+class OscillatorOptions;
+class BaseAudioBufferSourceOptions;
+class AudioBufferSourceOptions;
+class StreamerOptions;
+class AudioBufferOptions;
+class DelayOptions;
+class IIRFilterOptions;
class BaseAudioContext {
public:
@@ -49,7 +62,7 @@ class BaseAudioContext {
[[nodiscard]] float getSampleRate() const;
[[nodiscard]] double getCurrentTime() const;
[[nodiscard]] std::size_t getCurrentSampleFrame() const;
- std::shared_ptr getDestination();
+ std::shared_ptr getDestination() const;
std::shared_ptr createRecorderAdapter();
std::shared_ptr createWorkletSourceNode(
@@ -66,28 +79,24 @@ class BaseAudioContext {
std::shared_ptr &shareableWorklet,
std::weak_ptr runtime,
bool shouldLockRuntime = true);
- std::shared_ptr createOscillator();
- std::shared_ptr createConstantSource();
- std::shared_ptr createStreamer();
- std::shared_ptr createGain();
- std::shared_ptr createDelay(float maxDelayTime);
- std::shared_ptr createStereoPanner();
- std::shared_ptr createBiquadFilter();
- std::shared_ptr createIIRFilter(
- const std::vector &feedforward,
- const std::vector &feedback);
- std::shared_ptr createBufferSource(bool pitchCorrection);
- std::shared_ptr createBufferQueueSource(bool pitchCorrection);
- static std::shared_ptr
- createBuffer(int numberOfChannels, size_t length, float sampleRate);
+ std::shared_ptr createDelay(DelayOptions options);
+ std::shared_ptr createIIRFilter(IIRFilterOptions options);
+ std::shared_ptr createOscillator(OscillatorOptions options);
+ std::shared_ptr createConstantSource(ConstantSourceOptions options);
+ std::shared_ptr createStreamer(StreamerOptions options);
+ std::shared_ptr createGain(GainOptions options);
+ std::shared_ptr createStereoPanner(StereoPannerOptions options);
+ std::shared_ptr createBiquadFilter(BiquadFilterOptions options);
+ std::shared_ptr createBufferSource(AudioBufferSourceOptions options);
+ std::shared_ptr createBufferQueueSource(
+ BaseAudioBufferSourceOptions options);
+ static std::shared_ptr createBuffer(AudioBufferOptions options);
std::shared_ptr createPeriodicWave(
const std::vector> &complexData,
bool disableNormalization,
int length);
- std::shared_ptr createAnalyser();
- std::shared_ptr createConvolver(
- std::shared_ptr buffer,
- bool disableNormalization);
+ std::shared_ptr createAnalyser(AnalyserOptions options);
+ std::shared_ptr createConvolver(ConvolverOptions options);
std::shared_ptr getBasicWaveForm(OscillatorType type);
[[nodiscard]] float getNyquistFrequency() const;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp
index c01d3f5bd..9738de3c6 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp
@@ -1,3 +1,4 @@
+#include
#include
#include
#include
@@ -12,12 +13,12 @@
#include
namespace audioapi {
-AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context)
- : AudioNode(context),
- fftSize_(2048),
- minDecibels_(-100),
- maxDecibels_(-30),
- smoothingTimeConstant_(0.8),
+AnalyserNode::AnalyserNode(audioapi::BaseAudioContext *context, AnalyserOptions options)
+ : AudioNode(context, options),
+ fftSize_(options.fftSize),
+ minDecibels_(options.minDecibels),
+ maxDecibels_(options.maxDecibels),
+ smoothingTimeConstant_(options.smoothingTimeConstant),
windowType_(WindowType::BLACKMAN) {
inputBuffer_ = std::make_unique(MAX_FFT_SIZE * 2);
tempBuffer_ = std::make_unique(fftSize_);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h
index b62d0058a..cc85bd2d7 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h
@@ -15,11 +15,12 @@ namespace audioapi {
class AudioBus;
class AudioArray;
class CircularAudioArray;
+class AnalyserOptions;
class AnalyserNode : public AudioNode {
public:
enum class WindowType { BLACKMAN, HANN };
- explicit AnalyserNode(BaseAudioContext *context);
+ explicit AnalyserNode(BaseAudioContext *context, AnalyserOptions options);
int getFftSize() const;
int getFrequencyBinCount() const;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp
index 33f1f3aaa..0bb2a1767 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp
@@ -26,6 +26,7 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include
#include
#include
#include
@@ -38,21 +39,21 @@
namespace audioapi {
-BiquadFilterNode::BiquadFilterNode(BaseAudioContext *context) : AudioNode(context) {
- frequencyParam_ =
- std::make_shared(350.0, 0.0f, context->getNyquistFrequency(), context);
+BiquadFilterNode::BiquadFilterNode(BaseAudioContext *context, BiquadFilterOptions options)
+ : AudioNode(context, options) {
+ frequencyParam_ = std::make_shared(
+ options.frequency, 0.0f, context->getNyquistFrequency(), context);
detuneParam_ = std::make_shared(
- 0.0f,
+ options.detune,
-1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT,
1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT,
context);
QParam_ = std::make_shared(
- 1.0f, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT, context);
+ options.Q, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT, context);
gainParam_ = std::make_shared(
- 0.0f, MOST_NEGATIVE_SINGLE_FLOAT, 40 * LOG10_MOST_POSITIVE_SINGLE_FLOAT, context);
- type_ = BiquadFilterType::LOWPASS;
+ options.gain, MOST_NEGATIVE_SINGLE_FLOAT, 40 * LOG10_MOST_POSITIVE_SINGLE_FLOAT, context);
+ type_ = options.type;
isInitialized_ = true;
- channelCountMode_ = ChannelCountMode::MAX;
}
std::string BiquadFilterNode::getType() {
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h
index 6a7a606d1..90ffe00ec 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h
@@ -46,6 +46,7 @@
namespace audioapi {
class AudioBus;
+class BiquadFilterOptions;
class BiquadFilterNode : public AudioNode {
#ifdef AUDIO_API_TEST_SUITE
@@ -54,7 +55,7 @@ class BiquadFilterNode : public AudioNode {
#endif
public:
- explicit BiquadFilterNode(BaseAudioContext *context);
+ explicit BiquadFilterNode(BaseAudioContext *context, BiquadFilterOptions options);
[[nodiscard]] std::string getType();
void setType(const std::string &type);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp
index 2692beeab..492d8eeff 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp
@@ -1,3 +1,4 @@
+#include
#include
#include
#include
@@ -11,11 +12,8 @@
#include
namespace audioapi {
-ConvolverNode::ConvolverNode(
- BaseAudioContext *context,
- const std::shared_ptr &buffer,
- bool disableNormalization)
- : AudioNode(context),
+ConvolverNode::ConvolverNode(BaseAudioContext *context, ConvolverOptions options)
+ : AudioNode(context, options),
remainingSegments_(0),
internalBufferIndex_(0),
signalledToStop_(false),
@@ -23,11 +21,9 @@ ConvolverNode::ConvolverNode(
intermediateBus_(nullptr),
buffer_(nullptr),
internalBuffer_(nullptr) {
- channelCount_ = 2;
- channelCountMode_ = ChannelCountMode::CLAMPED_MAX;
- normalize_ = !disableNormalization;
+ normalize_ = !options.disableNormalization;
gainCalibrationSampleRate_ = context->getSampleRate();
- setBuffer(buffer);
+ setBuffer(options.bus);
audioBus_ =
std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate());
requiresTailProcessing_ = true;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h
index 92a45d609..42460b17f 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h
@@ -17,13 +17,11 @@ namespace audioapi {
class AudioBus;
class AudioBuffer;
+class ConvolverOptions;
class ConvolverNode : public AudioNode {
public:
- explicit ConvolverNode(
- BaseAudioContext *context,
- const std::shared_ptr &buffer,
- bool disableNormalization);
+ explicit ConvolverNode(BaseAudioContext *context, ConvolverOptions options);
[[nodiscard]] bool getNormalize_() const;
[[nodiscard]] const std::shared_ptr &getBuffer() const;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp
index cf2e44c5b..728e68f93 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp
@@ -1,3 +1,4 @@
+#include
#include
#include
#include
@@ -7,11 +8,13 @@
namespace audioapi {
-DelayNode::DelayNode(BaseAudioContext *context, float maxDelayTime) : AudioNode(context) {
- delayTimeParam_ = std::make_shared(0, 0, maxDelayTime, context);
+DelayNode::DelayNode(BaseAudioContext *context, DelayOptions options)
+ : AudioNode(context, options) {
+ delayTimeParam_ =
+ std::make_shared(options.delayTime, 0, options.maxDelayTime, context);
delayBuffer_ = std::make_shared(
static_cast(
- maxDelayTime * context->getSampleRate() +
+ options.maxDelayTime * context->getSampleRate() +
1), // +1 to enable delayTime equal to maxDelayTime
channelCount_,
context->getSampleRate());
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h
index 15ab28f10..d564035a2 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h
@@ -9,10 +9,11 @@
namespace audioapi {
class AudioBus;
+class DelayOptions;
class DelayNode : public AudioNode {
public:
- explicit DelayNode(BaseAudioContext *context, float maxDelayTime);
+ explicit DelayNode(BaseAudioContext *context, DelayOptions options);
[[nodiscard]] std::shared_ptr getDelayTimeParam() const;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp
index 6e1c01d45..17741b238 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp
@@ -1,3 +1,4 @@
+#include
#include
#include
#include
@@ -7,9 +8,9 @@
namespace audioapi {
-GainNode::GainNode(BaseAudioContext *context) : AudioNode(context) {
+GainNode::GainNode(BaseAudioContext *context, GainOptions options) : AudioNode(context, options) {
gainParam_ = std::make_shared(
- 1.0, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT, context);
+ options.gain, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT, context);
isInitialized_ = true;
}
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h
index de3b0d7b0..93c321654 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h
@@ -8,10 +8,11 @@
namespace audioapi {
class AudioBus;
+class GainOptions;
class GainNode : public AudioNode {
public:
- explicit GainNode(BaseAudioContext *context);
+ explicit GainNode(BaseAudioContext *context, GainOptions options);
[[nodiscard]] std::shared_ptr getGainParam() const;
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp
index b9d22aea3..cafd1dbfa 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp
@@ -23,6 +23,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include
#include
#include
#include
@@ -30,17 +31,16 @@
#include
#include
#include
+#include
#include
namespace audioapi {
-IIRFilterNode::IIRFilterNode(
- BaseAudioContext *context,
- const std::vector &feedforward,
- const std::vector &feedback)
- : AudioNode(context), feedforward_(feedforward), feedback_(feedback) {
+IIRFilterNode::IIRFilterNode(BaseAudioContext *context, IIRFilterOptions options)
+ : AudioNode(context, options),
+ feedforward_(std::move(options.feedforward)),
+ feedback_(std::move(options.feedback)) {
isInitialized_ = true;
- channelCountMode_ = ChannelCountMode::MAX;
int maxChannels = MAX_CHANNEL_COUNT;
xBuffers_.resize(maxChannels);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h
index f9ec07a19..c9b1d7916 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h
@@ -33,13 +33,12 @@
namespace audioapi {
+class IIRFilterOptions;
+
class IIRFilterNode : public AudioNode {
public:
- explicit IIRFilterNode(
- BaseAudioContext *context,
- const std::vector &feedforward,
- const std::vector