Skip to content

Commit 5dbf74c

Browse files
committed
Merge branch 'main' into feat/opus-decoder
2 parents 14dba8b + 15284b2 commit 5dbf74c

File tree

201 files changed

+6208
-832
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

201 files changed

+6208
-832
lines changed

README.md

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ allowing developers to generate and modify audio in exact same way it is possibl
1313

1414
## Installation
1515

16-
check out the [Getting Started](https://docs.swmansion.com/react-native-audio-api/fundamentals/getting-started) section of our documentation for detailed instructions!
16+
check out the [Getting Started](https://docs.swmansion.com/react-native-audio-api/docs/fundamentals/getting-started) section of our documentation for detailed instructions!
1717

1818
## Roadmap
1919

@@ -42,16 +42,21 @@ check out the [Getting Started](https://docs.swmansion.com/react-native-audio-ap
4242
- <sub>[![Released in 0.6.0](https://img.shields.io/badge/Released_in-0.6.0-green)](https://github.com/software-mansion/react-native-audio-api/releases/tag/0.6.0)</sub> **Connect audio param** 🤞 <br />
4343
Ability to connect Audio nodes to audio params, which will allow for powerful and efficient modulation of audio parameters, creating effects like tremolo, vibrato or complex envelope followers. <br />
4444

45-
- **Microphone support** 🎙️ <br />
45+
- <sub>[![Released in 0.7.0](https://img.shields.io/badge/Released_in-0.7.0-green)](https://github.com/software-mansion/react-native-audio-api/releases/tag/0.7.0)</sub>
46+
**Microphone support** 🎙️ <br />
4647
Grab audio data from device microphone or connected device, connect it to the audio graph or stream through the internet <br />
4748

49+
- <sub>[![Released in 0.7.0](https://img.shields.io/badge/Released_in-0.7.0-green)](https://github.com/software-mansion/react-native-audio-api/releases/tag/0.7.0)</sub>
50+
**Custom Audio Processor** 🎙️ <br />
51+
Write your own processing AudioNode <br />
52+
4853
- **JS Audio Worklets** 🐎 <br />
4954
Ability to run JS functions connected to the audio graph running on audio thread allowing for full customization of what happens to the audio signal.
5055
<br />
5156

5257
## Web Audio API Specification Coverage
5358

54-
Our current coverage of Web Audio API specification can be found here: [Web Audio API coverage](https://software-mansion.github.io/react-native-audio-api/other/web-audio-api-coverage).
59+
Our current coverage of Web Audio API specification can be found here: [Web Audio API coverage](https://docs.swmansion.com/react-native-audio-api/docs/other/web-audio-api-coverage).
5560

5661
## Examples
5762

apps/common-app/src/examples/AudioFile/AudioFile.tsx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ const AudioFile: FC = () => {
5454
AudioManager.enableRemoteCommand('remoteSkipForward', true);
5555
AudioManager.enableRemoteCommand('remoteSkipBackward', true);
5656
AudioManager.observeAudioInterruptions(true);
57+
AudioManager.activelyReclaimSession(true);
5758

5859
const remotePlaySubscription = AudioManager.addSystemEventListener(
5960
'remotePlay',

apps/common-app/src/examples/Record/Record.tsx

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import {
55
AudioRecorder,
66
RecorderAdapterNode,
77
AudioBufferSourceNode,
8-
AudioBuffer
8+
AudioBuffer,
99
} from 'react-native-audio-api';
1010

1111
import { Container, Button } from '../../components';
@@ -21,21 +21,21 @@ const Record: FC = () => {
2121
const audioBuffersRef = useRef<AudioBuffer[]>([]);
2222
const sourcesRef = useRef<AudioBufferSourceNode[]>([]);
2323

24-
2524
useEffect(() => {
2625
AudioManager.setAudioSessionOptions({
2726
iosCategory: 'playAndRecord',
2827
iosMode: 'spokenAudio',
2928
iosOptions: ['defaultToSpeaker', 'allowBluetoothA2DP'],
3029
});
31-
30+
31+
AudioManager.requestRecordingPermissions();
32+
3233
recorderRef.current = new AudioRecorder({
3334
sampleRate: SAMPLE_RATE,
3435
bufferLengthInSamples: SAMPLE_RATE,
3536
});
3637
}, []);
3738

38-
3939
const startEcho = () => {
4040
if (!recorderRef.current) {
4141
console.error('AudioContext or AudioRecorder is not initialized');
@@ -46,15 +46,15 @@ const Record: FC = () => {
4646
recorderAdapterRef.current = aCtxRef.current.createRecorderAdapter();
4747
recorderAdapterRef.current.connect(aCtxRef.current.destination);
4848
recorderRef.current.connect(recorderAdapterRef.current);
49-
49+
5050
recorderRef.current.start();
51-
console.log('Recording started');
51+
console.log('Recording started');
5252
console.log('Audio context state:', aCtxRef.current.state);
5353
if (aCtxRef.current.state === 'suspended') {
5454
console.log('Resuming audio context');
5555
aCtxRef.current.resume();
5656
}
57-
}
57+
};
5858

5959
/// This stops only the recording, not the audio context
6060
const stopEcho = () => {
@@ -66,7 +66,7 @@ const Record: FC = () => {
6666
aCtxRef.current = null;
6767
recorderAdapterRef.current = null;
6868
console.log('Recording stopped');
69-
}
69+
};
7070

7171
const startRecordReplay = () => {
7272
if (!recorderRef.current) {
@@ -92,8 +92,7 @@ const Record: FC = () => {
9292
recorderRef.current?.stop();
9393
console.log('Recording stopped');
9494
}, 5000);
95-
96-
}
95+
};
9796

9897
const stopRecordReplay = () => {
9998
const aCtx = new AudioContext({ sampleRate: SAMPLE_RATE });
@@ -128,19 +127,22 @@ const Record: FC = () => {
128127
},
129128
(nextStartAt - tNow) * 1000
130129
);
131-
132-
}
130+
};
133131

134132
return (
135133
<Container style={{ gap: 40 }}>
136-
<Text style={{ color: colors.white, fontSize: 24, textAlign: 'center' }}>Sample rate: {SAMPLE_RATE}</Text>
134+
<Text style={{ color: colors.white, fontSize: 24, textAlign: 'center' }}>
135+
Sample rate: {SAMPLE_RATE}
136+
</Text>
137137
<View style={{ alignItems: 'center', justifyContent: 'center', gap: 5 }}>
138138
<Text style={{ color: colors.white, fontSize: 24 }}>Echo example</Text>
139139
<Button title="Start Recording" onPress={startEcho} />
140140
<Button title="Stop Recording" onPress={stopEcho} />
141141
</View>
142142
<View style={{ alignItems: 'center', justifyContent: 'center', gap: 5 }}>
143-
<Text style={{ color: colors.white, fontSize: 24 }}>Record & replay example</Text>
143+
<Text style={{ color: colors.white, fontSize: 24 }}>
144+
Record & replay example
145+
</Text>
144146
<Button title="Record for Replay" onPress={startRecordReplay} />
145147
<Button title="Replay" onPress={stopRecordReplay} />
146148
</View>

apps/fabric-example/ios/Podfile.lock

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2243,7 +2243,7 @@ PODS:
22432243
- React-perflogger (= 0.80.0)
22442244
- React-utils (= 0.80.0)
22452245
- SocketRocket
2246-
- RNAudioAPI (0.7.0):
2246+
- RNAudioAPI (0.7.1):
22472247
- boost
22482248
- DoubleConversion
22492249
- fast_float
@@ -2270,10 +2270,10 @@ PODS:
22702270
- ReactCodegen
22712271
- ReactCommon/turbomodule/bridging
22722272
- ReactCommon/turbomodule/core
2273-
- RNAudioAPI/audioapi (= 0.7.0)
2273+
- RNAudioAPI/audioapi (= 0.7.1)
22742274
- SocketRocket
22752275
- Yoga
2276-
- RNAudioAPI/audioapi (0.7.0):
2276+
- RNAudioAPI/audioapi (0.7.1):
22772277
- boost
22782278
- DoubleConversion
22792279
- fast_float
@@ -2300,10 +2300,10 @@ PODS:
23002300
- ReactCodegen
23012301
- ReactCommon/turbomodule/bridging
23022302
- ReactCommon/turbomodule/core
2303-
- RNAudioAPI/audioapi/ios (= 0.7.0)
2303+
- RNAudioAPI/audioapi/ios (= 0.7.1)
23042304
- SocketRocket
23052305
- Yoga
2306-
- RNAudioAPI/audioapi/ios (0.7.0):
2306+
- RNAudioAPI/audioapi/ios (0.7.1):
23072307
- boost
23082308
- DoubleConversion
23092309
- fast_float

packages/audiodocs/docs/analysis/analyser-node.mdx

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@ import { ReadOnly } from '@site/src/components/Badges';
88
# AnalyserNode
99

1010
The `AnalyserNode` interface represents a node providing two core functionalities: extracting time-domain data and frequency-domain data from audio signals.
11-
It is an [`AudioNode`](/core/audio-node) that passes the audio data unchanged from input to output, but allows you to take passed data and process it.
11+
It is an [`AudioNode`](/docs/core/audio-node) that passes the audio data unchanged from input to output, but allows you to take passed data and process it.
1212

13-
#### [`AudioNode`](/core/audio-node#read-only-properties) properties
13+
#### [`AudioNode`](/docs/core/audio-node#read-only-properties) properties
1414

1515
<AudioNodePropsTable numberOfInputs={1} numberOfOutputs={1} channelCount={2} channelCountMode={"max"} channelInterpretation={"speakers"} />
1616

@@ -23,17 +23,17 @@ In contrast, a frequency-domain graph reveals how the signal's energy or power i
2323

2424
## Constructor
2525

26-
[`BaseAudioContext.createAnalyser()`](/core/base-audio-context#createanalyser)
26+
[`BaseAudioContext.createAnalyser()`](/docs/core/base-audio-context#createanalyser)
2727

2828
## Properties
2929

3030
| Name | Type | Description | |
3131
| :----: | :----: | :-------- | :-: |
3232
| `fftSize` | `number` | Integer value representing size of [Fast Fourier Transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) used to determine frequency domain. In general it is size of returning time-domain data. |
33-
| `minDecibels` | `number` | Float value representing the minimum value for the range of results from [`getByteFrequencyData()`](/analysis/analyser-node#getbytefrequencydata). |
34-
| `maxDecibels` | `number` | Float value representing the maximum value for the range of results from [`getByteFrequencyData()`](/analysis/analyser-node#getbytefrequencydata). |
33+
| `minDecibels` | `number` | Float value representing the minimum value for the range of results from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata). |
34+
| `maxDecibels` | `number` | Float value representing the maximum value for the range of results from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata). |
3535
| `smoothingTimeConstant` | `number` | Float value representing averaging constant with the last analysis frame. In general the higher value the smoother is the transition between values over time. |
36-
| `window` | [`WindowType`](/types/window-type) | Enumerated value that specifies the type of window function applied when extracting frequency data. |
36+
| `window` | [`WindowType`](/docs/types/window-type) | Enumerated value that specifies the type of window function applied when extracting frequency data. |
3737
| `frequencyBinCount` | `number` | Integer value representing amount of the data obtained in frequency domain, half of the `fftSize` property. | <ReadOnly /> |
3838

3939
:::caution
@@ -98,13 +98,13 @@ Each value in the array is within the range 0 to 255, where value of 127 indicat
9898
#### `minDecibels`
9999
- Default value is -100 dB.
100100
- 0 dB([decibel](https://en.wikipedia.org/wiki/Decibel)) is the loudest possible sound, -10 dB is a 10th of that.
101-
- When getting data from [`getByteFrequencyData()`](/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude lower then `minDecibels` will be returned as 0.
101+
- When getting data from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude lower then `minDecibels` will be returned as 0.
102102
- Throws `IndexSizeError` if set value is greater than or equal to `maxDecibels`.
103103

104104
#### `maxDecibels`
105105
- Default value is -30 dB.
106106
- 0 dB([decibel](https://en.wikipedia.org/wiki/Decibel)) is the loudest possible sound, -10 dB is a 10th of that.
107-
- When getting data from [`getByteFrequencyData()`](/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude higher then `maxDecibels` will be returned as 255.
107+
- When getting data from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude higher then `maxDecibels` will be returned as 255.
108108
- Throws `IndexSizeError` if set value is less then or equal to `minDecibels`.
109109

110110
#### `smoothingTimeConstant`

packages/audiodocs/docs/core/audio-context.mdx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,14 @@ sidebar_position: 2
44

55
# AudioContext
66

7-
The `AudioContext` interface inherits from [`BaseAudioContext`](/core/base-audio-context).
7+
The `AudioContext` interface inherits from [`BaseAudioContext`](/docs/core/base-audio-context).
88
It is responsible for supervising and managing audio-processing graph.
99

1010
## Constructor
1111

1212
`new AudioContext()`
1313

14-
[`new AudioContext(options: AudioContextOptions)`](/core/audio-context#audiocontextoptions)
14+
[`new AudioContext(options: AudioContextOptions)`](/docs/core/audio-context#audiocontextoptions)
1515

1616
#### Errors
1717

packages/audiodocs/docs/core/audio-node.mdx

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,19 +21,19 @@ We usually represent the channels with the standard abbreviations detailed in th
2121
#### Mixing
2222

2323
When node has more then one input or number of inputs channels differs from output up-mixing or down-mixing must be conducted.
24-
There are three properties involved in mixing process: `channelCount`, [`ChannelCountMode`](/types/channel-count-mode), [`ChannelInterpretation`](/types/channel-interpretation).
24+
There are three properties involved in mixing process: `channelCount`, [`ChannelCountMode`](/docs/types/channel-count-mode), [`ChannelInterpretation`](/docs/types/channel-interpretation).
2525
Based on them we can obtain output's number of channels and mixing strategy.
2626

2727
## Properties
2828

2929
| Name | Type | Description | |
3030
| :----: | :----: | :-------- | :-: |
31-
| `context` | [`BaseAudioContext`](/core/base-audio-context) | Associated context. | <ReadOnly /> |
31+
| `context` | [`BaseAudioContext`](/docs/core/base-audio-context) | Associated context. | <ReadOnly /> |
3232
| `numberOfInputs` | `number` | Integer value representing the number of input connections for the node. | <ReadOnly /> |
3333
| `numberOfOutputs` | `number` | Integer value representing the number of output connections for the node. | <ReadOnly /> |
3434
| `channelCount` | `number` | Integer used to determine how many channels are used when up-mixing or down-mixing node's inputs. | <ReadOnly /> |
35-
| `channelCountMode` | [`ChannelCountMode`](/types/channel-count-mode) | Enumerated value that specifies the method by which channels are mixed between the node's inputs and outputs. | <ReadOnly /> |
36-
| `channelInterpretation` | [`ChannelInterpretation`](/types/channel-interpretation) | Enumerated value that specifies how input channels are mapped to output channels when number of them is different. | <ReadOnly /> |
35+
| `channelCountMode` | [`ChannelCountMode`](/docs/types/channel-count-mode) | Enumerated value that specifies the method by which channels are mixed between the node's inputs and outputs. | <ReadOnly /> |
36+
| `channelInterpretation` | [`ChannelInterpretation`](/docs/types/channel-interpretation) | Enumerated value that specifies how input channels are mapped to output channels when number of them is different. | <ReadOnly /> |
3737

3838
## Examples
3939

@@ -89,7 +89,7 @@ The above method lets you connect one of the node's outputs to a destination.
8989

9090
| Parameters | Type | Description |
9191
| :---: | :---: | :---- |
92-
| `destination` | [`AudioNode`](/core/audio-node) or [`AudioParam`](/core/audio-param) | `AudioNode` or `AudioParam` to which to connect. |
92+
| `destination` | [`AudioNode`](/docs/core/audio-node) or [`AudioParam`](/docs/core/audio-param) | `AudioNode` or `AudioParam` to which to connect. |
9393

9494
#### Errors:
9595

@@ -105,7 +105,7 @@ The above method lets you disconnect one or more nodes from the node.
105105

106106
| Parameters | Type | Description |
107107
| :---: | :---: | :---- |
108-
| `destination` <Optional /> | [`AudioNode`](/core/audio-node) or [`AudioParam`](/core/audio-param) | `AudioNode` or `AudioParam` from which to disconnect. |
108+
| `destination` <Optional /> | [`AudioNode`](/docs/core/audio-node) or [`AudioParam`](/docs/core/audio-param) | `AudioNode` or `AudioParam` from which to disconnect. |
109109

110110
If no arguments provided node disconnects from all outgoing connections.
111111

packages/audiodocs/docs/core/audio-param.mdx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import { Optional, ReadOnly } from '@site/src/components/Badges';
66

77
# AudioParam
88

9-
The `AudioParam` interface represents audio-related parameter (such as `gain` property of [GainNode`](/effects/gain-node)).
9+
The `AudioParam` interface represents audio-related parameter (such as `gain` property of [GainNode`](/docs/effects/gain-node)).
1010
It can be set to specific value or schedule value change to happen at specific time, and following specific pattern.
1111

1212
#### a-rate vs k-rate
@@ -86,7 +86,7 @@ The change begins at the time designated for the previous event. It follows a ex
8686
### `setTargetAtTime`
8787

8888
The above method schedules a gradual change to the new value at the start time.
89-
This method is useful for decay or release portions of [ADSR envelopes](/effects/gain-node#envelope---adsr).
89+
This method is useful for decay or release portions of [ADSR envelopes](/docs/effects/gain-node#envelope---adsr).
9090

9191
![](/img/setTargetAtTime.png)
9292

@@ -159,4 +159,4 @@ The above method cancels all scheduled changes after given cancel time, but hold
159159

160160
## Remarks
161161

162-
All time parameters should be in the same time coordinate system as [`BaseAudioContext.currentTime`](/core/base-audio-context).
162+
All time parameters should be in the same time coordinate system as [`BaseAudioContext.currentTime`](/docs/core/base-audio-context).

0 commit comments

Comments
 (0)