diff --git a/packages-user/client-base/package.json b/packages-user/client-base/package.json index d09d55b..8c38f2b 100644 --- a/packages-user/client-base/package.json +++ b/packages-user/client-base/package.json @@ -1,6 +1,7 @@ { "name": "@user/client-base", "dependencies": { + "@motajs/audio": "workspace:*", "@motajs/render": "workspace:*", "@motajs/client-base": "workspace:*" } diff --git a/packages-user/client-base/src/index.ts b/packages-user/client-base/src/index.ts index ddf0bf0..2f3ff66 100644 --- a/packages-user/client-base/src/index.ts +++ b/packages-user/client-base/src/index.ts @@ -5,3 +5,5 @@ export function create() { } export * from './material'; + +export * from './ins'; diff --git a/packages-user/client-base/src/ins.ts b/packages-user/client-base/src/ins.ts new file mode 100644 index 0000000..5e036e2 --- /dev/null +++ b/packages-user/client-base/src/ins.ts @@ -0,0 +1,8 @@ +import { BGMPlayer, MotaAudioContext, SoundPlayer } from '@motajs/audio'; + +/** 游戏全局音频上下文 */ +export const audioContext = new MotaAudioContext(); +/** 音效播放器 */ +export const soundPlayer = new SoundPlayer(audioContext); +/** 音乐播放器 */ +export const bgmPlayer = new BGMPlayer(audioContext); diff --git a/packages-user/client-modules/src/audio/bgm.ts b/packages-user/client-modules/src/audio/bgm.ts deleted file mode 100644 index f983414..0000000 --- a/packages-user/client-modules/src/audio/bgm.ts +++ /dev/null @@ -1,268 +0,0 @@ -import EventEmitter from 'eventemitter3'; -import { audioPlayer, AudioPlayer, AudioRoute, AudioStatus } from './player'; -import { guessTypeByExt, isAudioSupport } from './support'; -import { logger } from '@motajs/common'; -import { StreamLoader } from '../loader'; -import { linear, sleep, Transition } from 'mutate-animate'; -import { VolumeEffect } from './effect'; - -interface BgmVolume { - effect: VolumeEffect; - transition: Transition; -} - -interface BgmControllerEvent { - play: []; - pause: []; - resume: []; - stop: []; -} - -export class BgmController< - T extends string = BgmIds -> extends EventEmitter { - /** bgm音频名称的前缀 */ - prefix: string = 'bgms.'; - /** 每个 bgm 的音量控制器 */ - readonly gain: Map = new Map(); - - /** 正在播放的 bgm */ - playingBgm?: T; - /** 是否正在播放 */ - playing: boolean = false; - - /** 是否已经启用 */ - enabled: boolean = true; - /** 主音量控制器 */ - private readonly mainGain: VolumeEffect; - /** 是否屏蔽所有的音乐切换 */ - private blocking: boolean = false; - /** 渐变时长 */ - private transitionTime: number = 2000; - - constructor(public readonly player: AudioPlayer) { - super(); - this.mainGain = player.createVolumeEffect(); - } - - /** - * 设置音频渐变时长 - * @param time 渐变时长 - */ - setTransitionTime(time: number) { - this.transitionTime = time; - for (const [, value] of this.gain) { - value.transition.time(time); - } - } - - /** - * 屏蔽音乐切换 - */ - blockChange() { - this.blocking = true; - } - - /** - * 取消屏蔽音乐切换 - */ - unblockChange() { - this.blocking = false; - } - - /** - * 设置总音量大小 - * @param volume 音量大小 - */ - setVolume(volume: number) { - this.mainGain.setVolume(volume); - } - - /** - * 获取总音量大小 - */ - getVolume() { - return this.mainGain.getVolume(); - } - - /** - * 设置是否启用 - * @param enabled 是否启用 - */ - setEnabled(enabled: boolean) { - if (enabled) this.resume(); - else this.stop(); - this.enabled = enabled; - } - - /** - * 设置 bgm 音频名称的前缀 - */ - setPrefix(prefix: string) { - this.prefix = prefix; - } - - private getId(name: T) { - return `${this.prefix}${name}`; - } - - /** - * 根据 bgm 名称获取其 AudioRoute 实例 - * @param id 音频名称 - */ - get(id: T) { - return this.player.getRoute(this.getId(id)); - } - - /** - * 添加一个 bgm - * @param id 要添加的 bgm 的名称 - * @param url 指定 bgm 的加载地址 - */ - addBgm(id: T, url: string = `project/bgms/${id}`) { - const type = guessTypeByExt(id); - if (!type) { - logger.warn(50, id.split('.').slice(0, -1).join('.')); - return; - } - const gain = this.player.createVolumeEffect(); - if (isAudioSupport(type)) { - const source = audioPlayer.createElementSource(); - source.setSource(url); - source.setLoop(true); - const route = new AudioRoute(source, audioPlayer); - route.addEffect([gain, this.mainGain]); - audioPlayer.addRoute(this.getId(id), route); - this.setTransition(id, route, gain); - } else { - const source = audioPlayer.createStreamSource(); - const stream = new StreamLoader(url); - stream.pipe(source); - source.setLoop(true); - const route = new AudioRoute(source, audioPlayer); - route.addEffect([gain, this.mainGain]); - audioPlayer.addRoute(this.getId(id), route); - this.setTransition(id, route, gain); - } - } - - /** - * 移除一个 bgm - * @param id 要移除的 bgm 的名称 - */ - removeBgm(id: T) { - this.player.removeRoute(this.getId(id)); - const gain = this.gain.get(id); - gain?.transition.ticker.destroy(); - this.gain.delete(id); - } - - private setTransition(id: T, route: AudioRoute, gain: VolumeEffect) { - const transition = new Transition(); - transition - .time(this.transitionTime) - .mode(linear()) - .transition('volume', 0); - - const tick = () => { - gain.setVolume(transition.value.volume); - }; - - /** - * @param expect 在结束时应该是正在播放还是停止 - */ - const setTick = async (expect: AudioStatus) => { - transition.ticker.remove(tick); - transition.ticker.add(tick); - const identifier = route.stopIdentifier; - await sleep(this.transitionTime + 500); - if ( - route.status === expect && - identifier === route.stopIdentifier - ) { - transition.ticker.remove(tick); - if (route.status === AudioStatus.Playing) { - gain.setVolume(1); - } else { - gain.setVolume(0); - } - } - }; - - route.onStart(async () => { - transition.transition('volume', 1); - setTick(AudioStatus.Playing); - }); - route.onEnd(() => { - transition.transition('volume', 0); - setTick(AudioStatus.Paused); - }); - route.setEndTime(this.transitionTime); - - this.gain.set(id, { effect: gain, transition }); - } - - /** - * 播放一个 bgm - * @param id 要播放的 bgm 名称 - */ - play(id: T, when?: number) { - if (this.blocking) return; - if (id !== this.playingBgm && this.playingBgm) { - this.player.pause(this.getId(this.playingBgm)); - } - this.playingBgm = id; - if (!this.enabled) return; - this.player.play(this.getId(id), when); - this.playing = true; - this.emit('play'); - } - - /** - * 继续当前的 bgm - */ - resume() { - if (this.blocking || !this.enabled || this.playing) return; - if (this.playingBgm) { - this.player.resume(this.getId(this.playingBgm)); - } - this.playing = true; - this.emit('resume'); - } - - /** - * 暂停当前的 bgm - */ - pause() { - if (this.blocking || !this.enabled) return; - if (this.playingBgm) { - this.player.pause(this.getId(this.playingBgm)); - } - this.playing = false; - this.emit('pause'); - } - - /** - * 停止当前的 bgm - */ - stop() { - if (this.blocking || !this.enabled) return; - if (this.playingBgm) { - this.player.stop(this.getId(this.playingBgm)); - } - this.playing = false; - this.emit('stop'); - } -} - -export const bgmController = new BgmController(audioPlayer); - -export function loadAllBgm() { - const { loading } = Mota.require('@user/data-base'); - loading.once('coreInit', () => { - const data = data_a1e2fb4a_e986_4524_b0da_9b7ba7c0874d; - for (const bgm of data.main.bgms) { - bgmController.addBgm(bgm); - } - }); -} diff --git a/packages-user/client-modules/src/audio/decoder.ts b/packages-user/client-modules/src/audio/decoder.ts deleted file mode 100644 index 104dfa0..0000000 --- a/packages-user/client-modules/src/audio/decoder.ts +++ /dev/null @@ -1,203 +0,0 @@ -import { logger } from '@motajs/common'; -import { OggVorbisDecoderWebWorker } from '@wasm-audio-decoders/ogg-vorbis'; -import { OggOpusDecoderWebWorker } from 'ogg-opus-decoder'; -import { AudioType, isAudioSupport } from './support'; -import type { AudioPlayer } from './player'; - -const fileSignatures: [AudioType, number[]][] = [ - [AudioType.Mp3, [0x49, 0x44, 0x33]], - [AudioType.Ogg, [0x4f, 0x67, 0x67, 0x53]], - [AudioType.Wav, [0x52, 0x49, 0x46, 0x46]], - [AudioType.Flac, [0x66, 0x4c, 0x61, 0x43]], - [AudioType.Aac, [0xff, 0xf1]], - [AudioType.Aac, [0xff, 0xf9]] -]; -const oggHeaders: [AudioType, number[]][] = [ - [AudioType.Opus, [0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64]] -]; - -export function checkAudioType(data: Uint8Array) { - let audioType: AudioType | '' = ''; - // 检查头文件获取音频类型,仅检查前256个字节 - const toCheck = data.slice(0, 256); - for (const [type, value] of fileSignatures) { - if (value.every((v, i) => toCheck[i] === v)) { - audioType = type; - break; - } - } - if (audioType === AudioType.Ogg) { - // 如果是ogg的话,进一步判断是不是opus - for (const [key, value] of oggHeaders) { - const has = toCheck.some((_, i) => { - return value.every((v, ii) => toCheck[i + ii] === v); - }); - if (has) { - audioType = key; - break; - } - } - } - return audioType; -} - -export interface IAudioDecodeError { - /** 错误信息 */ - message: string; -} - -export interface IAudioDecodeData { - /** 每个声道的音频信息 */ - channelData: Float32Array[]; - /** 已经被解码的 PCM 采样数 */ - samplesDecoded: number; - /** 音频采样率 */ - sampleRate: number; - /** 解码错误信息 */ - errors: IAudioDecodeError[]; -} - -export abstract class AudioDecoder { - static readonly decoderMap: Map AudioDecoder> = - new Map(); - - /** - * 注册一个解码器 - * @param type 要注册的解码器允许解码的类型 - * @param decoder 解码器对象 - */ - static registerDecoder(type: AudioType, decoder: new () => AudioDecoder) { - if (this.decoderMap.has(type)) { - logger.warn(47, type); - return; - } - this.decoderMap.set(type, decoder); - } - - /** - * 解码音频数据 - * @param data 音频文件数据 - * @param player AudioPlayer实例 - */ - static async decodeAudioData(data: Uint8Array, player: AudioPlayer) { - // 检查头文件获取音频类型,仅检查前256个字节 - const toCheck = data.slice(0, 256); - const type = checkAudioType(data); - if (type === '') { - logger.error( - 25, - [...toCheck] - .map(v => v.toString(16).padStart(2, '0')) - .join(' ') - .toUpperCase() - ); - return null; - } - if (isAudioSupport(type)) { - if (data.buffer instanceof ArrayBuffer) { - return player.ac.decodeAudioData(data.buffer); - } else { - return null; - } - } else { - const Decoder = this.decoderMap.get(type); - if (!Decoder) { - return null; - } else { - const decoder = new Decoder(); - await decoder.create(); - const decodedData = await decoder.decodeAll(data); - if (!decodedData) return null; - const buffer = player.ac.createBuffer( - decodedData.channelData.length, - decodedData.channelData[0].length, - decodedData.sampleRate - ); - decodedData.channelData.forEach((v, i) => { - buffer.copyToChannel(v, i); - }); - decoder.destroy(); - return buffer; - } - } - } - - /** - * 创建音频解码器 - */ - abstract create(): Promise; - - /** - * 摧毁这个解码器 - */ - abstract destroy(): void; - - /** - * 解码流数据 - * @param data 流数据 - */ - abstract decode(data: Uint8Array): Promise; - - /** - * 解码整个文件 - * @param data 文件数据 - */ - abstract decodeAll(data: Uint8Array): Promise; - - /** - * 当音频解码完成后,会调用此函数,需要返回之前还未解析或未返回的音频数据。调用后,该解码器将不会被再次使用 - */ - abstract flush(): Promise; -} - -export class VorbisDecoder extends AudioDecoder { - decoder?: OggVorbisDecoderWebWorker; - - async create(): Promise { - this.decoder = new OggVorbisDecoderWebWorker(); - await this.decoder.ready; - } - - destroy(): void { - this.decoder?.free(); - } - - async decode(data: Uint8Array): Promise { - return this.decoder?.decode(data) as Promise; - } - - async decodeAll(data: Uint8Array): Promise { - return this.decoder?.decodeFile(data) as Promise; - } - - async flush(): Promise { - return this.decoder?.flush() as Promise; - } -} - -export class OpusDecoder extends AudioDecoder { - decoder?: OggOpusDecoderWebWorker; - - async create(): Promise { - this.decoder = new OggOpusDecoderWebWorker({ - speechQualityEnhancement: 'none' - }); - await this.decoder.ready; - } - - destroy(): void { - this.decoder?.free(); - } - - async decode(data: Uint8Array): Promise { - return this.decoder?.decode(data) as Promise; - } - - async decodeAll(data: Uint8Array): Promise { - return this.decoder?.decodeFile(data) as Promise; - } - - async flush(): Promise { - return this.decoder?.flush() as Promise; - } -} diff --git a/packages-user/client-modules/src/audio/index.ts b/packages-user/client-modules/src/audio/index.ts deleted file mode 100644 index c09a229..0000000 --- a/packages-user/client-modules/src/audio/index.ts +++ /dev/null @@ -1,18 +0,0 @@ -import { loadAllBgm } from './bgm'; -import { OpusDecoder, VorbisDecoder } from './decoder'; -import { AudioType } from './support'; -import { AudioDecoder } from './decoder'; - -export function createAudio() { - loadAllBgm(); - AudioDecoder.registerDecoder(AudioType.Ogg, VorbisDecoder); - AudioDecoder.registerDecoder(AudioType.Opus, OpusDecoder); -} - -export * from './support'; -export * from './effect'; -export * from './player'; -export * from './source'; -export * from './bgm'; -export * from './decoder'; -export * from './sound'; diff --git a/packages-user/client-modules/src/audio/player.ts b/packages-user/client-modules/src/audio/player.ts deleted file mode 100644 index fc7100f..0000000 --- a/packages-user/client-modules/src/audio/player.ts +++ /dev/null @@ -1,605 +0,0 @@ -import EventEmitter from 'eventemitter3'; -import { - AudioBufferSource, - AudioElementSource, - AudioSource, - AudioStreamSource -} from './source'; -import { - AudioEffect, - ChannelVolumeEffect, - DelayEffect, - EchoEffect, - IAudioOutput, - StereoEffect, - VolumeEffect -} from './effect'; -import { isNil } from 'lodash-es'; -import { logger } from '@motajs/common'; -import { sleep } from 'mutate-animate'; -import { AudioDecoder } from './decoder'; - -interface AudioPlayerEvent {} - -export class AudioPlayer extends EventEmitter { - /** 音频播放上下文 */ - readonly ac: AudioContext; - - /** 所有的音频播放路由 */ - readonly audioRoutes: Map = new Map(); - /** 音量节点 */ - readonly gain: GainNode; - - constructor() { - super(); - this.ac = new AudioContext(); - this.gain = this.ac.createGain(); - this.gain.connect(this.ac.destination); - } - - /** - * 解码音频数据 - * @param data 音频数据 - */ - decodeAudioData(data: Uint8Array) { - return AudioDecoder.decodeAudioData(data, this); - } - - /** - * 设置音量 - * @param volume 音量 - */ - setVolume(volume: number) { - this.gain.gain.value = volume; - } - - /** - * 获取音量 - */ - getVolume() { - return this.gain.gain.value; - } - - /** - * 创建一个音频源 - * @param Source 音频源类 - */ - createSource( - Source: new (ac: AudioContext) => T - ): T { - return new Source(this.ac); - } - - /** - * 创建一个兼容流式音频源,可以与流式加载相结合,主要用于处理 opus ogg 不兼容的情况 - */ - createStreamSource() { - return new AudioStreamSource(this.ac); - } - - /** - * 创建一个通过 audio 元素播放的音频源 - */ - createElementSource() { - return new AudioElementSource(this.ac); - } - - /** - * 创建一个通过 AudioBuffer 播放的音频源 - */ - createBufferSource() { - return new AudioBufferSource(this.ac); - } - - /** - * 获取音频目的地 - */ - getDestination() { - return this.gain; - } - - /** - * 创建一个音频效果器 - * @param Effect 效果器类 - */ - createEffect( - Effect: new (ac: AudioContext) => T - ): T { - return new Effect(this.ac); - } - - /** - * 创建一个修改音量的效果器 - * ```txt - * |----------| - * Input ----> | GainNode | ----> Output - * |----------| - * ``` - */ - createVolumeEffect() { - return new VolumeEffect(this.ac); - } - - /** - * 创建一个立体声效果器 - * ```txt - * |------------| - * Input ----> | PannerNode | ----> Output - * |------------| - * ``` - */ - createStereoEffect() { - return new StereoEffect(this.ac); - } - - /** - * 创建一个修改单个声道音量的效果器 - * ```txt - * |----------| - * -> | GainNode | \ - * |--------------| / |----------| -> |------------| - * Input ----> | SplitterNode | ...... | MergerNode | ----> Output - * |--------------| \ |----------| -> |------------| - * -> | GainNode | / - * |----------| - * ``` - */ - createChannelVolumeEffect() { - return new ChannelVolumeEffect(this.ac); - } - - /** - * 创建一个延迟效果器 - * ```txt - * |-----------| - * Input ----> | DelayNode | ----> Output - * |-----------| - * ``` - */ - createDelayEffect() { - return new DelayEffect(this.ac); - } - - /** - * 创建一个回声效果器 - * ```txt - * |----------| - * Input ----> | GainNode | ----> Output - * ^ |----------| | - * | | - * | |------------| ↓ - * |-- | Delay Node | <-- - * |------------| - * ``` - */ - createEchoEffect() { - return new EchoEffect(this.ac); - } - - /** - * 创建一个音频播放路由 - * @param source 音频源 - */ - createRoute(source: AudioSource) { - return new AudioRoute(source, this); - } - - /** - * 添加一个音频播放路由,可以直接被播放 - * @param id 这个音频播放路由的名称 - * @param route 音频播放路由对象 - */ - addRoute(id: string, route: AudioRoute) { - if (this.audioRoutes.has(id)) { - logger.warn(45, id); - } - this.audioRoutes.set(id, route); - } - - /** - * 根据名称获取音频播放路由对象 - * @param id 音频播放路由的名称 - */ - getRoute(id: string) { - return this.audioRoutes.get(id); - } - - /** - * 移除一个音频播放路由 - * @param id 要移除的播放路由的名称 - */ - removeRoute(id: string) { - const route = this.audioRoutes.get(id); - if (route) { - route.destroy(); - } - this.audioRoutes.delete(id); - } - - /** - * 播放音频 - * @param id 音频名称 - * @param when 从音频的哪个位置开始播放,单位秒 - */ - play(id: string, when: number = 0) { - const route = this.getRoute(id); - if (!route) { - logger.warn(53, 'play', id); - return; - } - route.play(when); - } - - /** - * 暂停音频播放 - * @param id 音频名称 - * @returns 当音乐真正停止时兑现 - */ - pause(id: string) { - const route = this.getRoute(id); - if (!route) { - logger.warn(53, 'pause', id); - return; - } - return route.pause(); - } - - /** - * 停止音频播放 - * @param id 音频名称 - * @returns 当音乐真正停止时兑现 - */ - stop(id: string) { - const route = this.getRoute(id); - if (!route) { - logger.warn(53, 'stop', id); - return; - } - return route.stop(); - } - - /** - * 继续音频播放 - * @param id 音频名称 - */ - resume(id: string) { - const route = this.getRoute(id); - if (!route) { - logger.warn(53, 'resume', id); - return; - } - route.resume(); - } - - /** - * 设置听者位置,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 - * @param x 位置x坐标 - * @param y 位置y坐标 - * @param z 位置z坐标 - */ - setListenerPosition(x: number, y: number, z: number) { - const listener = this.ac.listener; - listener.positionX.value = x; - listener.positionY.value = y; - listener.positionZ.value = z; - } - - /** - * 设置听者朝向,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 - * @param x 朝向x坐标 - * @param y 朝向y坐标 - * @param z 朝向z坐标 - */ - setListenerOrientation(x: number, y: number, z: number) { - const listener = this.ac.listener; - listener.forwardX.value = x; - listener.forwardY.value = y; - listener.forwardZ.value = z; - } - - /** - * 设置听者头顶朝向,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 - * @param x 头顶朝向x坐标 - * @param y 头顶朝向y坐标 - * @param z 头顶朝向z坐标 - */ - setListenerUp(x: number, y: number, z: number) { - const listener = this.ac.listener; - listener.upX.value = x; - listener.upY.value = y; - listener.upZ.value = z; - } -} - -export const enum AudioStatus { - Playing, - Pausing, - Paused, - Stoping, - Stoped -} - -type AudioStartHook = (route: AudioRoute) => void; -type AudioEndHook = (time: number, route: AudioRoute) => void; - -interface AudioRouteEvent { - updateEffect: []; - play: []; - stop: []; - pause: []; - resume: []; -} - -export class AudioRoute - extends EventEmitter - implements IAudioOutput -{ - output: AudioNode; - - /** 效果器路由图 */ - readonly effectRoute: AudioEffect[] = []; - - /** 结束时长,当音频暂停或停止时,会经过这么长时间之后才真正终止播放,期间可以做音频淡入淡出等效果 */ - endTime: number = 0; - - /** 当前播放状态 */ - status: AudioStatus = AudioStatus.Stoped; - /** 暂停时刻 */ - private pauseTime: number = 0; - /** 暂停时播放了多长时间 */ - private pauseCurrentTime: number = 0; - - /** 音频时长,单位秒 */ - get duration() { - return this.source.duration; - } - /** 当前播放了多长时间,单位秒 */ - get currentTime() { - if (this.status === AudioStatus.Paused) { - return this.pauseCurrentTime; - } else { - return this.source.currentTime; - } - } - set currentTime(time: number) { - this.source.stop(); - this.source.play(time); - } - - private shouldStop: boolean = false; - /** - * 每次暂停或停止时自增,用于判断当前正在处理的情况。 - * 假如暂停后很快播放,然后很快暂停,那么需要根据这个来判断实际是否应该执行暂停后操作 - */ - stopIdentifier: number = 0; - - private audioStartHook?: AudioStartHook; - private audioEndHook?: AudioEndHook; - - constructor( - public readonly source: AudioSource, - public readonly player: AudioPlayer - ) { - super(); - this.output = source.output; - source.on('end', () => { - if (this.status === AudioStatus.Playing) { - this.status = AudioStatus.Stoped; - } - }); - source.on('play', () => { - if (this.status !== AudioStatus.Playing) { - this.status = AudioStatus.Playing; - } - }); - } - - /** - * 设置结束时间,暂停或停止时,会经过这么长时间才终止音频的播放,这期间可以做一下音频淡出的效果。 - * @param time 暂停或停止时,经过多长时间之后才会结束音频的播放 - */ - setEndTime(time: number) { - this.endTime = time; - } - - /** - * 当音频播放时执行的函数,可以用于音频淡入效果 - * @param fn 音频开始播放时执行的函数 - */ - onStart(fn?: AudioStartHook) { - this.audioStartHook = fn; - } - - /** - * 当音频暂停或停止时执行的函数,可以用于音频淡出效果 - * @param fn 音频在暂停或停止时执行的函数,不填时表示取消这个钩子。 - * 包含两个参数,第一个参数是结束时长,第二个参数是当前音频播放路由对象 - */ - onEnd(fn?: AudioEndHook) { - this.audioEndHook = fn; - } - - /** - * 开始播放这个音频 - * @param when 从音频的什么时候开始播放,单位秒 - */ - async play(when: number = 0) { - if (this.status === AudioStatus.Playing) return; - this.link(); - await this.player.ac.resume(); - if (this.effectRoute.length > 0) { - const first = this.effectRoute[0]; - this.source.connect(first); - const last = this.effectRoute.at(-1)!; - last.connect({ input: this.player.getDestination() }); - } else { - this.source.connect({ input: this.player.getDestination() }); - } - this.source.play(when); - this.status = AudioStatus.Playing; - this.pauseTime = 0; - this.audioStartHook?.(this); - this.startAllEffect(); - this.emit('play'); - } - - /** - * 暂停音频播放 - */ - async pause() { - if (this.status !== AudioStatus.Playing) return; - this.status = AudioStatus.Pausing; - this.stopIdentifier++; - const identifier = this.stopIdentifier; - if (this.audioEndHook) { - this.audioEndHook(this.endTime, this); - await sleep(this.endTime); - } - if ( - this.status !== AudioStatus.Pausing || - this.stopIdentifier !== identifier - ) { - return; - } - this.pauseCurrentTime = this.source.currentTime; - const time = this.source.stop(); - this.pauseTime = time; - if (this.shouldStop) { - this.status = AudioStatus.Stoped; - this.endAllEffect(); - this.emit('stop'); - this.shouldStop = false; - } else { - this.status = AudioStatus.Paused; - this.endAllEffect(); - this.emit('pause'); - } - } - - /** - * 继续音频播放 - */ - resume() { - if (this.status === AudioStatus.Playing) return; - if ( - this.status === AudioStatus.Pausing || - this.status === AudioStatus.Stoping - ) { - this.audioStartHook?.(this); - this.emit('resume'); - return; - } - if (this.status === AudioStatus.Paused) { - this.play(this.pauseTime); - } else { - this.play(0); - } - this.status = AudioStatus.Playing; - this.pauseTime = 0; - this.audioStartHook?.(this); - this.startAllEffect(); - this.emit('resume'); - } - - /** - * 停止音频播放 - */ - async stop() { - if (this.status !== AudioStatus.Playing) { - if (this.status === AudioStatus.Pausing) { - this.shouldStop = true; - } - return; - } - this.status = AudioStatus.Stoping; - this.stopIdentifier++; - const identifier = this.stopIdentifier; - if (this.audioEndHook) { - this.audioEndHook(this.endTime, this); - await sleep(this.endTime); - } - if ( - this.status !== AudioStatus.Stoping || - this.stopIdentifier !== identifier - ) { - return; - } - this.source.stop(); - this.status = AudioStatus.Stoped; - this.pauseTime = 0; - this.endAllEffect(); - this.emit('stop'); - } - - /** - * 添加效果器 - * @param effect 要添加的效果,可以是数组,表示一次添加多个 - * @param index 从哪个位置开始添加,如果大于数组长度,那么加到末尾,如果小于0,那么将会从后面往前数。默认添加到末尾 - */ - addEffect(effect: AudioEffect | AudioEffect[], index?: number) { - if (isNil(index)) { - if (effect instanceof Array) { - this.effectRoute.push(...effect); - } else { - this.effectRoute.push(effect); - } - } else { - if (effect instanceof Array) { - this.effectRoute.splice(index, 0, ...effect); - } else { - this.effectRoute.splice(index, 0, effect); - } - } - this.setOutput(); - if (this.source.playing) this.link(); - this.emit('updateEffect'); - } - - /** - * 移除一个效果器 - * @param effect 要移除的效果 - */ - removeEffect(effect: AudioEffect) { - const index = this.effectRoute.indexOf(effect); - if (index === -1) return; - this.effectRoute.splice(index, 1); - effect.disconnect(); - this.setOutput(); - if (this.source.playing) this.link(); - this.emit('updateEffect'); - } - - destroy() { - this.effectRoute.forEach(v => v.disconnect()); - } - - private setOutput() { - const effect = this.effectRoute.at(-1); - if (!effect) this.output = this.source.output; - else this.output = effect.output; - } - - /** - * 连接音频路由图 - */ - private link() { - this.effectRoute.forEach(v => v.disconnect()); - this.effectRoute.forEach((v, i) => { - const next = this.effectRoute[i + 1]; - if (next) { - v.connect(next); - } - }); - } - - private startAllEffect() { - this.effectRoute.forEach(v => v.start()); - } - - private endAllEffect() { - this.effectRoute.forEach(v => v.end()); - } -} - -export const audioPlayer = new AudioPlayer(); -// window.audioPlayer = audioPlayer; diff --git a/packages-user/client-modules/src/fallback/audio.ts b/packages-user/client-modules/src/fallback/audio.ts index 7a14a14..7e309fc 100644 --- a/packages-user/client-modules/src/fallback/audio.ts +++ b/packages-user/client-modules/src/fallback/audio.ts @@ -1,7 +1,7 @@ import { Patch, PatchClass } from '@motajs/legacy-common'; -import { audioPlayer, bgmController, soundPlayer } from '../audio'; +import { audioContext, bgmPlayer, soundPlayer } from '@user/client-base'; import { mainSetting } from '@motajs/legacy-ui'; -import { sleep } from 'mutate-animate'; +import { sleep } from '@motajs/common'; import { isNil } from 'lodash-es'; // todo: 添加弃用警告 logger.warn(56) @@ -10,10 +10,10 @@ export function patchAudio() { const patch = new Patch(PatchClass.Control); const play = (bgm: BgmIds, when?: number) => { - bgmController.play(bgm, when); + bgmPlayer.play(bgm, when); }; const pause = () => { - bgmController.pause(); + bgmPlayer.pause(); }; patch.add('playBgm', function (bgm, startTime) { @@ -23,13 +23,13 @@ export function patchAudio() { pause(); }); patch.add('resumeBgm', function () { - bgmController.resume(); + bgmPlayer.resume(); }); patch.add('checkBgm', function () { - if (bgmController.playing) return; + if (bgmPlayer.playing) return; if (mainSetting.getValue('audio.bgmEnabled')) { - if (bgmController.playingBgm) { - bgmController.play(bgmController.playingBgm); + if (bgmPlayer.playingBgm) { + bgmPlayer.play(bgmPlayer.playingBgm); } else { play(main.startBgm, 0); } @@ -38,8 +38,8 @@ export function patchAudio() { } }); patch.add('triggerBgm', function () { - if (bgmController.playing) bgmController.pause(); - else bgmController.resume(); + if (bgmPlayer.playing) bgmPlayer.pause(); + else bgmPlayer.resume(); }); patch.add( @@ -47,7 +47,7 @@ export function patchAudio() { function (sound, _pitch, callback, position, orientation) { const name = core.getMappedName(sound) as SoundIds; const num = soundPlayer.play(name, position, orientation); - const route = audioPlayer.getRoute(`sounds.${num}`); + const route = audioContext.getRoute(`sounds.${num}`); if (!route) { callback?.(); return -1; diff --git a/packages-user/client-modules/src/index.ts b/packages-user/client-modules/src/index.ts index 9f8fc7c..47e6c8a 100644 --- a/packages-user/client-modules/src/index.ts +++ b/packages-user/client-modules/src/index.ts @@ -1,11 +1,9 @@ import { loading } from '@user/data-base'; -import { createAudio } from './audio'; import { patchAll } from './fallback'; import { createGameRenderer, createRender } from './render'; export function create() { patchAll(); - createAudio(); createRender(); loading.once('coreInit', () => { createGameRenderer(); @@ -13,7 +11,5 @@ export function create() { } export * from './action'; -export * from './audio'; export * from './fallback'; -export * from './loader'; export * from './render'; diff --git a/packages-user/entry-data/src/mota.ts b/packages-user/entry-data/src/mota.ts index 17d3d20..1a467e9 100644 --- a/packages-user/entry-data/src/mota.ts +++ b/packages-user/entry-data/src/mota.ts @@ -8,6 +8,7 @@ import type * as LegacyUI from '@motajs/legacy-ui'; import type * as Render from '@motajs/render'; import type * as RenderVue from '@motajs/render-vue'; import type * as System from '@motajs/system'; +import type * as UserClientBase from '@user/client-base'; import type * as ClientModules from '@user/client-modules'; import type * as DataBase from '@user/data-base'; import type * as DataFallback from '@user/data-fallback'; @@ -31,6 +32,7 @@ interface ModuleInterface { '@motajs/render': typeof Render; '@motajs/render-vue': typeof RenderVue; '@motajs/system': typeof System; + '@user/client-base': typeof UserClientBase; '@user/client-modules': typeof ClientModules; '@user/data-base': typeof DataBase; '@user/data-fallback': typeof DataFallback; diff --git a/packages/animate/src/excitation.ts b/packages/animate/src/excitation.ts index 9bba2d4..4d94cb2 100644 --- a/packages/animate/src/excitation.ts +++ b/packages/animate/src/excitation.ts @@ -6,7 +6,8 @@ import { IExcitationVariator, ExcitationCurve, VariatorCurveMode, - IExcitationDivider + IExcitationDivider, + IIntervalExcitation } from './types'; import { excited } from './utils'; @@ -108,6 +109,30 @@ export class RafExcitation extends ExcitationBase { } } +export class IntervalExcitation + extends ExcitationBase + implements IIntervalExcitation +{ + private now: number = 0; + private readonly id: number; + + constructor(readonly interval: number) { + super(); + this.id = window.setInterval(() => { + this.excite(this.now); + this.now += interval; + }, interval); + } + + payload(): number { + return this.now; + } + + override destroy(): void { + window.clearInterval(this.id); + } +} + interface CurveQueue { /** 速率曲线 */ curve: ExcitationCurve; diff --git a/packages/animate/src/types.ts b/packages/animate/src/types.ts index d2a2fda..2e2bbe6 100644 --- a/packages/animate/src/types.ts +++ b/packages/animate/src/types.ts @@ -158,6 +158,11 @@ export interface IExcitationDivider extends IExcitation { setDivider(divider: number): void; } +export interface IIntervalExcitation extends IExcitation { + /** 两次触发之间的时间间隔 */ + readonly interval: number; +} + //#endregion //#region 动画类 diff --git a/packages/audio/package.json b/packages/audio/package.json new file mode 100644 index 0000000..1623ed7 --- /dev/null +++ b/packages/audio/package.json @@ -0,0 +1,7 @@ +{ + "name": "@motajs/audio", + "dependencies": { + "@motajs/common": "workspace:*", + "@motajs/loader": "workspace:*" + } +} diff --git a/packages/audio/src/bgm.ts b/packages/audio/src/bgm.ts new file mode 100644 index 0000000..616f041 --- /dev/null +++ b/packages/audio/src/bgm.ts @@ -0,0 +1,309 @@ +import { guessTypeByExt, isAudioSupport } from './support'; +import { logger } from '@motajs/common'; +import { StreamLoader } from '@motajs/loader'; +import { + IAudioRoute, + IAudioVolumeEffect, + IBGMPlayer, + IMotaAudioContext +} from './types'; +import { AudioElementSource, AudioStreamSource } from './source'; + +interface BGMInfo { + /** 音频路由 */ + readonly route: IAudioRoute; + /** 音频播放时执行的函数 */ + readonly startFn: () => void; + /** 音频结束时执行的函数 */ + readonly endFn: () => void; +} + +interface AudioCacheInfo { + /** 音频路由 */ + readonly route: IAudioRoute; + /** 当前其占用的内存,如果是 `AudioElementSource`,那么此值为估计值,并非准确值 */ + size: number; +} + +export class BGMPlayer implements IBGMPlayer { + /** bgm音频名称的前缀 */ + prefix: string = 'bgms.'; + /** 每个 bgm 的音量控制器 */ + private readonly gain: Map = new Map(); + + /** 正在播放的 bgm */ + playingBgm?: T; + /** 是否正在播放 */ + playing: boolean = false; + + /** 是否已经启用 */ + enabled: boolean = true; + /** 主音量控制器 */ + private readonly mainGain: IAudioVolumeEffect; + /** 是否屏蔽所有的音乐切换 */ + private blocking: boolean = false; + /** 渐变时长 */ + private transitionTime: number = 2000; + + /** 最大缓存容量 */ + maxCacheSize: number = 256; + /** 音频缓存池 */ + private readonly cachePool: AudioCacheInfo[] = []; + + constructor(public readonly ac: IMotaAudioContext) { + this.mainGain = ac.createVolumeEffect(); + } + + setMaxCacheSize(size: number): void { + this.maxCacheSize = size; + this.checkMaxCache(); + } + + private checkMaxCache() { + if (this.cachePool.length <= 1) return; + let total = 0; + let toDelete = 0; + for (let i = this.cachePool.length - 1; i >= 0; i--) { + total += this.cachePool[i].size; + if (total >= this.maxCacheSize) { + toDelete = i + 1; + break; + } + } + for (let i = 0; i < toDelete; i++) { + const data = this.cachePool.shift(); + if (!data) continue; + data.route.source.free(); + data.size = 0; + } + } + + /** + * 设置音频渐变时长 + * @param time 渐变时长 + */ + setTransitionTime(time: number) { + this.transitionTime = time; + } + + /** + * 屏蔽音乐切换 + */ + blockChange() { + this.blocking = true; + } + + /** + * 取消屏蔽音乐切换 + */ + unblockChange() { + this.blocking = false; + } + + /** + * 设置总音量大小 + * @param volume 音量大小 + */ + setVolume(volume: number) { + this.mainGain.setVolume(volume); + } + + /** + * 获取总音量大小 + */ + getVolume() { + return this.mainGain.getVolume(); + } + + /** + * 设置是否启用 + * @param enabled 是否启用 + */ + setEnabled(enabled: boolean) { + if (enabled) this.resume(); + else this.stop(); + this.enabled = enabled; + } + + /** + * 设置 bgm 音频名称的前缀 + */ + setPrefix(prefix: string) { + this.prefix = prefix; + } + + private getId(name: T) { + return `${this.prefix}${name}`; + } + + /** + * 根据 bgm 名称获取其 AudioRoute 实例 + * @param id 音频名称 + */ + get(id: T) { + return this.ac.getRoute(this.getId(id)); + } + + /** + * 添加一个 bgm + * @param id 要添加的 bgm 的名称 + * @param url 指定 bgm 的加载地址 + */ + addBGMFromURL(id: T, url: string) { + const type = guessTypeByExt(id); + if (!type) { + logger.warn(50, id.split('.').slice(0, -1).join('.')); + return; + } + const gain = this.ac.createVolumeEffect(); + if (isAudioSupport(type)) { + const source = this.ac.createElementSource(); + source.setSource(url); + source.setLoop(true); + const route = this.ac.createRoute(source); + route.addEffect([gain, this.mainGain]); + this.ac.addRoute(this.getId(id), route); + this.setTransition(id, route, gain); + } else { + const source = this.ac.createStreamSource(); + const stream = new StreamLoader(url); + stream.pipe(source); + source.setLoop(true); + const route = this.ac.createRoute(source); + route.addEffect([gain, this.mainGain]); + this.ac.addRoute(this.getId(id), route); + this.setTransition(id, route, gain); + } + } + + /** + * 移除一个 bgm + * @param id 要移除的 bgm 的名称 + */ + removeBgm(id: T) { + this.ac.removeRoute(this.getId(id)); + const gain = this.gain.get(id); + if (gain) { + gain.route.off('start', gain.startFn); + gain.route.off('end', gain.endFn); + } + this.gain.delete(id); + } + + private setTransition(id: T, route: IAudioRoute, gain: IAudioVolumeEffect) { + const startFn = () => { + gain.output.gain.cancelScheduledValues(this.ac.ac.currentTime); + gain.output.gain.setTargetAtTime( + 1, + this.ac.ac.currentTime, + this.transitionTime / 1000 / 3 + ); + }; + const endFn = () => { + gain.output.gain.cancelScheduledValues(this.ac.ac.currentTime); + gain.output.gain.setTargetAtTime( + 0, + this.ac.ac.currentTime, + this.transitionTime / 1000 / 3 + ); + }; + route.on('start', startFn); + route.on('end', endFn); + route.setEndTime(this.transitionTime); + + this.gain.set(id, { route, startFn, endFn }); + } + + /** + * 播放一个 bgm + * @param id 要播放的 bgm 名称 + */ + play(id: T, when?: number) { + if (this.blocking) return; + if (id !== this.playingBgm && this.playingBgm) { + this.ac.pause(this.getId(this.playingBgm)); + } + this.playingBgm = id; + if (!this.enabled) return; + const full = this.getId(id); + this.ac.play(full, when); + this.playing = true; + const route = this.ac.getRoute(full); + if (!route) return; + const index = this.cachePool.findIndex(v => v.route === route); + if (index !== -1) { + // 说明还在缓冲区内,将其移动至最后面 + const [data] = this.cachePool.splice(index, 1); + this.cachePool.push(data); + } else { + // 不在缓冲区内,则执行加载,加载完毕后检查尺寸 + const cacheInfo: AudioCacheInfo = { + route, + size: 0 + }; + const source = route.source; + if (source instanceof AudioElementSource) { + // audio 元素音频源 + source.once('load', () => { + const duration = source.audio.duration; + const estimatedSize = duration * 48000 * 2 * 4; + cacheInfo.size = estimatedSize; + this.checkMaxCache(); + }); + } else if (source instanceof AudioStreamSource) { + // 流式加载音频源 + source.once('load', () => { + if (!source.buffer) return; + const buffer = source.buffer; + const size = buffer.numberOfChannels * buffer.length * 4; + cacheInfo.size = size; + this.checkMaxCache(); + }); + } else { + // 其他音频源 + source.once('load', () => { + const duration = source.duration; + const estimatedSize = duration * 48000 * 2 * 4; + cacheInfo.size = estimatedSize; + this.checkMaxCache(); + }); + } + this.cachePool.push(cacheInfo); + } + } + + /** + * 继续当前的 bgm + */ + resume() { + if (this.blocking || !this.enabled || this.playing) return; + if (this.playingBgm) { + this.ac.resume(this.getId(this.playingBgm)); + } + this.playing = true; + } + + /** + * 暂停当前的 bgm + */ + pause() { + if (this.blocking || !this.enabled) return; + if (this.playingBgm) { + this.ac.pause(this.getId(this.playingBgm)); + } + this.playing = false; + } + + /** + * 停止当前的 bgm + */ + stop() { + if (this.blocking || !this.enabled) return; + if (this.playingBgm) { + this.ac.stop(this.getId(this.playingBgm)); + } + this.playing = false; + } + + destroy(): void {} +} diff --git a/packages/audio/src/context.ts b/packages/audio/src/context.ts new file mode 100644 index 0000000..fb815f5 --- /dev/null +++ b/packages/audio/src/context.ts @@ -0,0 +1,449 @@ +import { + AudioBufferSource, + AudioElementSource, + AudioStreamSource +} from './source'; +import { + ChannelVolumeEffect, + DelayEffect, + EchoEffect, + StereoEffect, + VolumeEffect +} from './effect'; +import { logger } from '@motajs/common'; +import { VanillaDecoder } from './decoder'; +import { + AudioDecoderCreateFunc, + AudioType, + IAudioBufferSource, + IAudioChannelVolumeEffect, + IAudioDecodeData, + IAudioDecoder, + IAudioDelayEffect, + IAudioEchoEffect, + IAudioEffect, + IAudioElementSource, + IAudioRoute, + IAudioSource, + IAudioStereoEffect, + IAudioStreamSource, + IAudioVolumeEffect, + IMotaAudioContext, + ISoundPlayer +} from './types'; +import { SoundPlayer } from './sound'; +import { AudioRoute } from './route'; + +const fileSignatures: [AudioType, number[]][] = [ + [AudioType.Mp3, [0x49, 0x44, 0x33]], + [AudioType.Ogg, [0x4f, 0x67, 0x67, 0x53]], + [AudioType.Wav, [0x52, 0x49, 0x46, 0x46]], + [AudioType.Flac, [0x66, 0x4c, 0x61, 0x43]], + [AudioType.Aac, [0xff, 0xf1]], + [AudioType.Aac, [0xff, 0xf9]] +]; +const oggHeaders: [AudioType, number[]][] = [ + [AudioType.Opus, [0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64]] +]; + +export class MotaAudioContext implements IMotaAudioContext { + /** 音频播放上下文 */ + readonly ac: AudioContext; + + /** 所有的音频播放路由 */ + readonly audioRoutes: Map = new Map(); + /** 音量节点 */ + readonly gain: GainNode; + + /** 测试用 audio 元素 */ + private readonly testAudio: HTMLAudioElement = new Audio(); + /** 所有注册的解码器 */ + private readonly decoders: Map = + new Map(); + + /** 最小音量 */ + private readonly minDb = -60; + + constructor() { + this.ac = new AudioContext(); + this.gain = this.ac.createGain(); + this.gain.connect(this.ac.destination); + } + + /** + * 设置音量,音量映射采用 `gain = 10 ** (dB / 20), where minDB = -60` + * @param volume 音量 + */ + setVolume(volume: number): void { + if (volume === 0) this.gain.gain.value = 0; + else { + const db = this.minDb + -this.minDb * volume; + const gain = 10 ** (db / 20); + this.gain.gain.value = gain; + } + } + + /** + * 获取音量,音量映射采用 `gain = 10 ** (dB / 20), where minDB = -60` + */ + getVolume(): number { + if (this.gain.gain.value === 0) return 0; + const db = -Math.log10(this.gain.gain.value) * 20; + return db / this.minDb; + } + + /** + * 创建音效播放器 + */ + createSoundPlayer(): ISoundPlayer { + return new SoundPlayer(this); + } + + /** + * 创建一个音频源 + * @param Source 音频源类 + */ + createSource( + Source: new (ac: IMotaAudioContext) => T + ): T { + return new Source(this); + } + + /** + * 创建一个兼容流式音频源,可以与流式加载相结合,主要用于处理 opus ogg 不兼容的情况 + */ + createStreamSource(): IAudioStreamSource { + return new AudioStreamSource(this); + } + + /** + * 创建一个通过 audio 元素播放的音频源 + */ + createElementSource(): IAudioElementSource { + return new AudioElementSource(this); + } + + /** + * 创建一个通过 AudioBuffer 播放的音频源 + */ + createBufferSource(): IAudioBufferSource { + return new AudioBufferSource(this); + } + + /** + * 获取音频目的地 + */ + getDestination(): GainNode { + return this.gain; + } + + /** + * 创建一个音频效果器 + * @param Effect 效果器类 + */ + createEffect( + Effect: new (ac: IMotaAudioContext) => T + ): T { + return new Effect(this); + } + + /** + * 创建一个修改音量的效果器 + * ```txt + * |----------| + * Input ----> | GainNode | ----> Output + * |----------| + * ``` + */ + createVolumeEffect(): IAudioVolumeEffect { + return new VolumeEffect(this); + } + + /** + * 创建一个立体声效果器 + * ```txt + * |------------| + * Input ----> | PannerNode | ----> Output + * |------------| + * ``` + */ + createStereoEffect(): IAudioStereoEffect { + return new StereoEffect(this); + } + + /** + * 创建一个修改单个声道音量的效果器 + * ```txt + * |----------| + * -> | GainNode | \ + * |--------------| / |----------| -> |------------| + * Input ----> | SplitterNode | ...... | MergerNode | ----> Output + * |--------------| \ |----------| -> |------------| + * -> | GainNode | / + * |----------| + * ``` + */ + createChannelVolumeEffect(): IAudioChannelVolumeEffect { + return new ChannelVolumeEffect(this); + } + + /** + * 创建一个延迟效果器 + * ```txt + * |-----------| + * Input ----> | DelayNode | ----> Output + * |-----------| + * ``` + */ + createDelayEffect(): IAudioDelayEffect { + return new DelayEffect(this); + } + + /** + * 创建一个回声效果器 + * ```txt + * |----------| + * Input ----> | GainNode | ----> Output + * ^ |----------| | + * | | + * | |------------| ↓ + * |-- | Delay Node | <-- + * |------------| + * ``` + */ + createEchoEffect(): IAudioEchoEffect { + return new EchoEffect(this); + } + + /** + * 创建一个音频播放路由 + * @param source 音频源 + */ + createRoute(source: IAudioSource): IAudioRoute { + return new AudioRoute(source, this); + } + + /** + * 添加一个音频播放路由,可以直接被播放 + * @param id 这个音频播放路由的名称 + * @param route 音频播放路由对象 + */ + addRoute(id: string, route: IAudioRoute): void { + if (this.audioRoutes.has(id)) { + logger.warn(45, id); + } + this.audioRoutes.set(id, route); + } + + /** + * 根据名称获取音频播放路由对象 + * @param id 音频播放路由的名称 + */ + getRoute(id: string): IAudioRoute | null { + return this.audioRoutes.get(id) ?? null; + } + + /** + * 移除一个音频播放路由 + * @param id 要移除的播放路由的名称 + */ + removeRoute(id: string): void { + const route = this.audioRoutes.get(id); + if (route) { + route.destroy(); + } + this.audioRoutes.delete(id); + } + + /** + * 播放音频 + * @param id 音频名称 + * @param when 从音频的哪个位置开始播放,单位秒 + */ + play(id: string, when: number = 0): void { + const route = this.getRoute(id); + if (!route) { + logger.warn(53, 'play', id); + return; + } + route.play(when); + } + + /** + * 暂停音频播放 + * @param id 音频名称 + * @returns 当音乐真正停止时兑现 + */ + pause(id: string): Promise { + const route = this.getRoute(id); + if (!route) { + logger.warn(53, 'pause', id); + return Promise.resolve(); + } + return route.pause(); + } + + /** + * 停止音频播放 + * @param id 音频名称 + * @returns 当音乐真正停止时兑现 + */ + stop(id: string): Promise { + const route = this.getRoute(id); + if (!route) { + logger.warn(53, 'stop', id); + return Promise.resolve(); + } + return route.stop(); + } + + /** + * 继续音频播放 + * @param id 音频名称 + */ + resume(id: string): void { + const route = this.getRoute(id); + if (!route) { + logger.warn(53, 'resume', id); + return; + } + route.resume(); + } + + /** + * 设置听者位置,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 位置x坐标 + * @param y 位置y坐标 + * @param z 位置z坐标 + */ + setListenerPosition(x: number, y: number, z: number) { + const listener = this.ac.listener; + listener.positionX.value = x; + listener.positionY.value = y; + listener.positionZ.value = z; + } + + /** + * 设置听者朝向,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 朝向x坐标 + * @param y 朝向y坐标 + * @param z 朝向z坐标 + */ + setListenerOrientation(x: number, y: number, z: number) { + const listener = this.ac.listener; + listener.forwardX.value = x; + listener.forwardY.value = y; + listener.forwardZ.value = z; + } + + /** + * 设置听者头顶朝向,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 头顶朝向x坐标 + * @param y 头顶朝向y坐标 + * @param z 头顶朝向z坐标 + */ + setListenerUp(x: number, y: number, z: number) { + const listener = this.ac.listener; + listener.upX.value = x; + listener.upY.value = y; + listener.upZ.value = z; + } + + isAudioVanillaSupport(type: AudioType): boolean { + const support = this.testAudio.canPlayType(type); + return support === 'probably' || support === 'maybe'; + } + + registerDecoder(type: AudioType, decoder: AudioDecoderCreateFunc): void { + if (this.isAudioVanillaSupport(type)) return; + this.decoders.set(type, decoder); + } + + createDecoder(type: AudioType): IAudioDecoder | null { + if (this.isAudioVanillaSupport(type)) { + return new VanillaDecoder(this); + } else { + const create = this.decoders.get(type); + if (!create) return null; + return create(this); + } + } + + getAudioTypeFromData(data: Uint8Array): AudioType { + let audioType: AudioType = AudioType.Unknown; + // 检查头文件获取音频类型,仅检查前256个字节 + const toCheck = data.slice(0, 256); + for (const [type, value] of fileSignatures) { + if (value.every((v, i) => toCheck[i] === v)) { + audioType = type; + break; + } + } + if (audioType === AudioType.Ogg) { + // 如果是ogg的话,进一步判断是不是opus + for (const [key, value] of oggHeaders) { + const has = toCheck.some((_, i) => { + return value.every((v, ii) => toCheck[i + ii] === v); + }); + if (has) { + audioType = key; + break; + } + } + } + return audioType; + } + + private getErrorHeaderInfo(data: Uint8Array) { + const toCheck = data.slice(0, 256); + return [...toCheck] + .map(v => v.toString(16).padStart(2, '0')) + .join(' ') + .toUpperCase(); + } + + async decodeAudio(data: Uint8Array): Promise { + const type = this.getAudioTypeFromData(data); + if (type === AudioType.Unknown) { + logger.error(25, this.getErrorHeaderInfo(data)); + return null; + } + const decoder = this.createDecoder(type); + if (!decoder) { + logger.error(25, this.getErrorHeaderInfo(data)); + return null; + } + await decoder.create(); + const decoded = await decoder.decodeAll(data); + await decoder.destroy(); + return decoded; + } + + toAudioBuffer(data: IAudioDecodeData): AudioBuffer { + const buffer = this.ac.createBuffer( + data.channelData.length, + data.samplesDecoded, + data.sampleRate + ); + for (let i = 0; i < data.channelData.length; i++) { + buffer.copyToChannel(data.channelData[i], i); + } + return buffer; + } + + async decodeToAudioBuffer(data: Uint8Array): Promise { + const type = this.getAudioTypeFromData(data); + if (type === AudioType.Unknown) { + logger.error(53, this.getErrorHeaderInfo(data)); + return null; + } + if (!(data.buffer instanceof ArrayBuffer)) return null; + if (this.isAudioVanillaSupport(type)) { + return this.ac.decodeAudioData(data.buffer); + } else { + const decoded = await this.decodeAudio(data); + if (!decoded) return null; + else return this.toAudioBuffer(decoded); + } + } +} diff --git a/packages/audio/src/decoder.ts b/packages/audio/src/decoder.ts new file mode 100644 index 0000000..ebf368a --- /dev/null +++ b/packages/audio/src/decoder.ts @@ -0,0 +1,110 @@ +import { OggVorbisDecoderWebWorker } from '@wasm-audio-decoders/ogg-vorbis'; +import { OggOpusDecoderWebWorker } from 'ogg-opus-decoder'; +import { IAudioDecodeData, IAudioDecoder, IMotaAudioContext } from './types'; + +export class VorbisDecoder implements IAudioDecoder { + decoder?: OggVorbisDecoderWebWorker; + + async create(): Promise { + this.decoder = new OggVorbisDecoderWebWorker(); + await this.decoder.ready; + } + + async destroy(): Promise { + if (!this.decoder) return; + else return this.decoder.free(); + } + + async decode(data: Uint8Array): Promise { + if (!this.decoder) return Promise.resolve(null); + else return this.decoder.decode(data) as Promise; + } + + async decodeAll(data: Uint8Array): Promise { + if (!this.decoder) return Promise.resolve(null); + else return this.decoder.decodeFile(data) as Promise; + } + + async flush(): Promise { + if (!this.decoder) return Promise.resolve(null); + else return this.decoder.flush() as Promise; + } +} + +export class OpusDecoder implements IAudioDecoder { + decoder?: OggOpusDecoderWebWorker; + + async create(): Promise { + this.decoder = new OggOpusDecoderWebWorker({ + speechQualityEnhancement: 'none' + }); + await this.decoder.ready; + } + + async destroy(): Promise { + if (!this.decoder) return; + else return this.decoder.free(); + } + + async decode(data: Uint8Array): Promise { + if (!this.decoder) return Promise.resolve(null); + else return this.decoder.decode(data) as Promise; + } + + async decodeAll(data: Uint8Array): Promise { + if (!this.decoder) return Promise.resolve(null); + else return this.decoder.decodeFile(data) as Promise; + } + + async flush(): Promise { + if (!this.decoder) return Promise.resolve(null); + else return this.decoder.flush() as Promise; + } +} + +export class VanillaDecoder implements IAudioDecoder { + constructor(readonly ac: IMotaAudioContext) {} + + create(): Promise { + return Promise.resolve(); + } + + destroy(): Promise { + return Promise.resolve(); + } + + private async decodeData( + data: Uint8Array + ): Promise { + if (data.buffer instanceof ArrayBuffer) { + const buffer = await this.ac.ac.decodeAudioData(data.buffer); + const decodedData: Float32Array[] = []; + for (let i = 0; i < buffer.numberOfChannels; i++) { + const data = buffer.getChannelData(i); + decodedData.push(data); + } + const sampled = decodedData[0].length; + const sampleRate = buffer.sampleRate; + return { + errors: [], + channelData: decodedData, + samplesDecoded: sampled, + sampleRate + }; + } else { + return Promise.resolve(null); + } + } + + decode(data: Uint8Array): Promise { + return this.decodeData(data); + } + + decodeAll(data: Uint8Array): Promise { + return this.decodeData(data); + } + + flush(): Promise { + return Promise.resolve(null); + } +} diff --git a/packages-user/client-modules/src/audio/effect.ts b/packages/audio/src/effect.ts similarity index 82% rename from packages-user/client-modules/src/audio/effect.ts rename to packages/audio/src/effect.ts index 1471058..953aac5 100644 --- a/packages-user/client-modules/src/audio/effect.ts +++ b/packages/audio/src/effect.ts @@ -1,23 +1,26 @@ import { isNil } from 'lodash-es'; -import { sleep } from 'mutate-animate'; +import { + IAudioEffect, + IAudioInput, + IAudioStereoEffect, + IAudioChannelVolumeEffect, + IAudioDelayEffect, + IMotaAudioContext, + IAudioEchoEffect +} from './types'; +import { sleep } from '@motajs/common'; -export interface IAudioInput { - /** 输入节点 */ - input: AudioNode; -} - -export interface IAudioOutput { - /** 输出节点 */ - output: AudioNode; -} - -export abstract class AudioEffect implements IAudioInput, IAudioOutput { +export abstract class AudioEffect implements IAudioEffect { /** 输出节点 */ abstract output: AudioNode; /** 输入节点 */ abstract input: AudioNode; - constructor(public readonly ac: AudioContext) {} + readonly ac: AudioContext; + + constructor(public readonly motaAC: IMotaAudioContext) { + this.ac = motaAC.ac; + } /** * 当音频播放结束时触发,可以用于节点结束后处理 @@ -66,13 +69,13 @@ export abstract class AudioEffect implements IAudioInput, IAudioOutput { } } -export class StereoEffect extends AudioEffect { +export class StereoEffect extends AudioEffect implements IAudioStereoEffect { output: PannerNode; input: PannerNode; - constructor(ac: AudioContext) { + constructor(ac: IMotaAudioContext) { super(ac); - const panner = ac.createPanner(); + const panner = ac.ac.createPanner(); this.input = panner; this.output = panner; } @@ -110,9 +113,9 @@ export class VolumeEffect extends AudioEffect { output: GainNode; input: GainNode; - constructor(ac: AudioContext) { + constructor(ac: IMotaAudioContext) { super(ac); - const gain = ac.createGain(); + const gain = ac.ac.createGain(); this.input = gain; this.output = gain; } @@ -137,21 +140,24 @@ export class VolumeEffect extends AudioEffect { start(): void {} } -export class ChannelVolumeEffect extends AudioEffect { +export class ChannelVolumeEffect + extends AudioEffect + implements IAudioChannelVolumeEffect +{ output: ChannelMergerNode; input: ChannelSplitterNode; /** 所有的音量控制节点 */ private readonly gain: GainNode[] = []; - constructor(ac: AudioContext) { + constructor(ac: IMotaAudioContext) { super(ac); - const splitter = ac.createChannelSplitter(); - const merger = ac.createChannelMerger(); + const splitter = ac.ac.createChannelSplitter(); + const merger = ac.ac.createChannelMerger(); this.output = merger; this.input = splitter; for (let i = 0; i < 6; i++) { - const gain = ac.createGain(); + const gain = ac.ac.createGain(); splitter.connect(gain, i); gain.connect(merger, 0, i); this.gain.push(gain); @@ -182,13 +188,13 @@ export class ChannelVolumeEffect extends AudioEffect { start(): void {} } -export class DelayEffect extends AudioEffect { +export class DelayEffect extends AudioEffect implements IAudioDelayEffect { output: DelayNode; input: DelayNode; - constructor(ac: AudioContext) { + constructor(ac: IMotaAudioContext) { super(ac); - const delay = ac.createDelay(); + const delay = ac.ac.createDelay(); this.input = delay; this.output = delay; } @@ -213,7 +219,7 @@ export class DelayEffect extends AudioEffect { start(): void {} } -export class EchoEffect extends AudioEffect { +export class EchoEffect extends AudioEffect implements IAudioEchoEffect { output: GainNode; input: GainNode; @@ -226,10 +232,10 @@ export class EchoEffect extends AudioEffect { /** 是否正在播放 */ private playing: boolean = false; - constructor(ac: AudioContext) { + constructor(ac: IMotaAudioContext) { super(ac); - const delay = ac.createDelay(); - const gain = ac.createGain(); + const delay = ac.ac.createDelay(); + const gain = ac.ac.createGain(); gain.gain.value = 0.5; delay.delayTime.value = 0.05; delay.connect(gain); diff --git a/packages/audio/src/index.ts b/packages/audio/src/index.ts new file mode 100644 index 0000000..e1b7993 --- /dev/null +++ b/packages/audio/src/index.ts @@ -0,0 +1,9 @@ +export * from './bgm'; +export * from './context'; +export * from './decoder'; +export * from './effect'; +export * from './route'; +export * from './sound'; +export * from './source'; +export * from './support'; +export * from './types'; diff --git a/packages/audio/src/route.ts b/packages/audio/src/route.ts new file mode 100644 index 0000000..e90d68b --- /dev/null +++ b/packages/audio/src/route.ts @@ -0,0 +1,258 @@ +import EventEmitter from 'eventemitter3'; +import { isNil } from 'lodash-es'; +import { sleep } from '@motajs/common'; +import { AudioEffect } from './effect'; +import { + IAudioRoute, + AudioStatus, + IAudioSource, + IMotaAudioContext, + EAudioRouteEvent +} from './types'; + +export class AudioRoute + extends EventEmitter + implements IAudioRoute +{ + output: AudioNode; + + /** 效果器路由图 */ + readonly effectRoute: AudioEffect[] = []; + + /** 结束时长,当音频暂停或停止时,会经过这么长时间之后才真正终止播放,期间可以做音频淡入淡出等效果 */ + endTime: number = 0; + + /** 当前播放状态 */ + status: AudioStatus = AudioStatus.Stoped; + /** 暂停时刻 */ + private pauseTime: number = 0; + /** 暂停时播放了多长时间 */ + private pauseCurrentTime: number = 0; + + /** 音频时长,单位秒 */ + get duration() { + return this.source.duration; + } + /** 当前播放了多长时间,单位秒 */ + get currentTime() { + if (this.status === AudioStatus.Paused) { + return this.pauseCurrentTime; + } else { + return this.source.currentTime; + } + } + set currentTime(time: number) { + this.source.stop(); + this.source.play(time); + } + + private shouldStop: boolean = false; + /** + * 每次暂停或停止时自增,用于判断当前正在处理的情况。 + * 假如暂停后很快播放,然后很快暂停,那么需要根据这个来判断实际是否应该执行暂停后操作 + */ + stopIdentifier: number = 0; + + constructor( + public readonly source: IAudioSource, + public readonly player: IMotaAudioContext + ) { + super(); + this.output = source.output; + source.on('end', () => { + if (this.status === AudioStatus.Playing) { + this.status = AudioStatus.Stoped; + } + }); + source.on('play', () => { + if (this.status !== AudioStatus.Playing) { + this.status = AudioStatus.Playing; + } + }); + } + + /** + * 设置结束时间,暂停或停止时,会经过这么长时间才终止音频的播放,这期间可以做一下音频淡出的效果。 + * @param time 暂停或停止时,经过多长时间之后才会结束音频的播放 + */ + setEndTime(time: number) { + this.endTime = time; + } + + /** + * 开始播放这个音频 + * @param when 从音频的什么时候开始播放,单位秒 + */ + async play(when: number = 0) { + if (this.status === AudioStatus.Playing) return; + this.link(); + await this.player.ac.resume(); + if (this.effectRoute.length > 0) { + const first = this.effectRoute[0]; + this.source.connect(first); + const last = this.effectRoute.at(-1)!; + last.connect({ input: this.player.getDestination() }); + } else { + this.source.connect({ input: this.player.getDestination() }); + } + this.source.play(when); + this.status = AudioStatus.Playing; + this.pauseTime = 0; + this.emit('start', this); + this.startAllEffect(); + this.emit('play'); + } + + /** + * 暂停音频播放 + */ + async pause() { + if (this.status !== AudioStatus.Playing) return; + this.status = AudioStatus.Pausing; + this.stopIdentifier++; + const identifier = this.stopIdentifier; + this.emit('end', this.endTime, this); + await sleep(this.endTime); + if ( + this.status !== AudioStatus.Pausing || + this.stopIdentifier !== identifier + ) { + return; + } + this.pauseCurrentTime = this.source.currentTime; + const time = this.source.stop(); + this.pauseTime = time; + if (this.shouldStop) { + this.status = AudioStatus.Stoped; + this.endAllEffect(); + this.emit('stop'); + this.shouldStop = false; + } else { + this.status = AudioStatus.Paused; + this.endAllEffect(); + this.emit('pause'); + } + } + + /** + * 继续音频播放 + */ + resume() { + if (this.status === AudioStatus.Playing) return; + if ( + this.status === AudioStatus.Pausing || + this.status === AudioStatus.Stoping + ) { + this.emit('start', this); + this.emit('resume'); + return; + } + if (this.status === AudioStatus.Paused) { + this.play(this.pauseTime); + } else { + this.play(0); + } + this.status = AudioStatus.Playing; + this.pauseTime = 0; + this.emit('start', this); + this.startAllEffect(); + this.emit('resume'); + } + + /** + * 停止音频播放 + */ + async stop() { + if (this.status !== AudioStatus.Playing) { + if (this.status === AudioStatus.Pausing) { + this.shouldStop = true; + } + return; + } + this.status = AudioStatus.Stoping; + this.stopIdentifier++; + const identifier = this.stopIdentifier; + this.emit('end', this.endTime, this); + await sleep(this.endTime); + if ( + this.status !== AudioStatus.Stoping || + this.stopIdentifier !== identifier + ) { + return; + } + this.source.stop(); + this.status = AudioStatus.Stoped; + this.pauseTime = 0; + this.endAllEffect(); + this.emit('stop'); + } + + /** + * 添加效果器 + * @param effect 要添加的效果,可以是数组,表示一次添加多个 + * @param index 从哪个位置开始添加,如果大于数组长度,那么加到末尾,如果小于0,那么将会从后面往前数。默认添加到末尾 + */ + addEffect(effect: AudioEffect | AudioEffect[], index?: number) { + if (isNil(index)) { + if (effect instanceof Array) { + this.effectRoute.push(...effect); + } else { + this.effectRoute.push(effect); + } + } else { + if (effect instanceof Array) { + this.effectRoute.splice(index, 0, ...effect); + } else { + this.effectRoute.splice(index, 0, effect); + } + } + this.setOutput(); + if (this.source.playing) this.link(); + this.emit('updateEffect'); + } + + /** + * 移除一个效果器 + * @param effect 要移除的效果 + */ + removeEffect(effect: AudioEffect) { + const index = this.effectRoute.indexOf(effect); + if (index === -1) return; + this.effectRoute.splice(index, 1); + effect.disconnect(); + this.setOutput(); + if (this.source.playing) this.link(); + this.emit('updateEffect'); + } + + destroy() { + this.effectRoute.forEach(v => v.disconnect()); + } + + private setOutput() { + const effect = this.effectRoute.at(-1); + if (!effect) this.output = this.source.output; + else this.output = effect.output; + } + + /** + * 连接音频路由图 + */ + private link() { + this.effectRoute.forEach(v => v.disconnect()); + this.effectRoute.forEach((v, i) => { + const next = this.effectRoute[i + 1]; + if (next) { + v.connect(next); + } + }); + } + + private startAllEffect() { + this.effectRoute.forEach(v => v.start()); + } + + private endAllEffect() { + this.effectRoute.forEach(v => v.end()); + } +} diff --git a/packages-user/client-modules/src/audio/sound.ts b/packages/audio/src/sound.ts similarity index 72% rename from packages-user/client-modules/src/audio/sound.ts rename to packages/audio/src/sound.ts index 0033554..07ad605 100644 --- a/packages-user/client-modules/src/audio/sound.ts +++ b/packages/audio/src/sound.ts @@ -1,15 +1,9 @@ -import EventEmitter from 'eventemitter3'; -import { audioPlayer, AudioPlayer } from './player'; import { logger } from '@motajs/common'; -import { VolumeEffect } from './effect'; +import { IAudioVolumeEffect, IMotaAudioContext } from './types'; type LocationArray = [number, number, number]; -interface SoundPlayerEvent {} - -export class SoundPlayer< - T extends string = SoundIds -> extends EventEmitter { +export class SoundPlayer { /** 每个音效的唯一标识符 */ private num: number = 0; @@ -18,14 +12,13 @@ export class SoundPlayer< /** 所有正在播放的音乐 */ readonly playing: Set = new Set(); /** 音量节点 */ - readonly gain: VolumeEffect; + readonly gain: IAudioVolumeEffect; /** 是否已经启用 */ enabled: boolean = true; - constructor(public readonly player: AudioPlayer) { - super(); - this.gain = player.createVolumeEffect(); + constructor(public readonly ac: IMotaAudioContext) { + this.gain = ac.createVolumeEffect(); } /** @@ -58,7 +51,7 @@ export class SoundPlayer< * @param data 音效的Uint8Array数据 */ async add(id: T, data: Uint8Array) { - const buffer = await this.player.decodeAudioData(data); + const buffer = await this.ac.decodeToAudioBuffer(data); if (!buffer) { logger.warn(51, id); return; @@ -84,19 +77,19 @@ export class SoundPlayer< return -1; } const soundNum = this.num++; - const source = this.player.createBufferSource(); + const source = this.ac.createBufferSource(); source.setBuffer(buffer); - const route = this.player.createRoute(source); - const stereo = this.player.createStereoEffect(); + const route = this.ac.createRoute(source); + const stereo = this.ac.createStereoEffect(); stereo.setPosition(position[0], position[1], position[2]); stereo.setOrientation(orientation[0], orientation[1], orientation[2]); route.addEffect([stereo, this.gain]); - this.player.addRoute(`sounds.${soundNum}`, route); + this.ac.addRoute(`sounds.${soundNum}`, route); route.play(); // 清理垃圾 source.output.addEventListener('ended', () => { this.playing.delete(soundNum); - this.player.removeRoute(`sounds.${soundNum}`); + this.ac.removeRoute(`sounds.${soundNum}`); }); this.playing.add(soundNum); return soundNum; @@ -108,10 +101,10 @@ export class SoundPlayer< */ stop(num: number) { const id = `sounds.${num}`; - const route = this.player.getRoute(id); + const route = this.ac.getRoute(id); if (route) { route.stop(); - this.player.removeRoute(id); + this.ac.removeRoute(id); this.playing.delete(num); } } @@ -122,14 +115,12 @@ export class SoundPlayer< stopAllSounds() { this.playing.forEach(v => { const id = `sounds.${v}`; - const route = this.player.getRoute(id); + const route = this.ac.getRoute(id); if (route) { route.stop(); - this.player.removeRoute(id); + this.ac.removeRoute(id); } }); this.playing.clear(); } } - -export const soundPlayer = new SoundPlayer(audioPlayer); diff --git a/packages-user/client-modules/src/audio/source.ts b/packages/audio/src/source.ts similarity index 81% rename from packages-user/client-modules/src/audio/source.ts rename to packages/audio/src/source.ts index 5040219..f373606 100644 --- a/packages-user/client-modules/src/audio/source.ts +++ b/packages/audio/src/source.ts @@ -1,61 +1,22 @@ -import EventEmitter from 'eventemitter3'; -import { IStreamController, IStreamReader } from '../loader'; -import { IAudioInput, IAudioOutput } from './effect'; +import { IStreamController, IStreamReader } from '@motajs/loader'; import { logger } from '@motajs/common'; -import { AudioType } from './support'; import CodecParser, { CodecFrame, MimeType, OggPage } from 'codec-parser'; import { isNil } from 'lodash-es'; -import { IAudioDecodeData, AudioDecoder, checkAudioType } from './decoder'; +import { + AudioType, + EAudioSourceEvent, + IAudioBufferSource, + IAudioDecodeData, + IAudioDecoder, + IAudioElementSource, + IAudioInput, + IAudioStreamSource, + IMotaAudioContext +} from './types'; +import EventEmitter from 'eventemitter3'; -interface AudioSourceEvent { - play: []; - end: []; -} - -export abstract class AudioSource - extends EventEmitter - implements IAudioOutput -{ - /** 音频源的输出节点 */ - abstract readonly output: AudioNode; - - /** 是否正在播放 */ - playing: boolean = false; - - /** 获取音频时长 */ - abstract get duration(): number; - /** 获取当前音频播放了多长时间 */ - abstract get currentTime(): number; - - constructor(public readonly ac: AudioContext) { - super(); - } - - /** - * 开始播放这个音频源 - */ - abstract play(when?: number): void; - - /** - * 停止播放这个音频源 - * @returns 音频暂停的时刻 - */ - abstract stop(): number; - - /** - * 连接到音频路由图上,每次调用播放的时候都会执行一次 - * @param target 连接至的目标 - */ - abstract connect(target: IAudioInput): void; - - /** - * 设置是否循环播放 - * @param loop 是否循环 - */ - abstract setLoop(loop: boolean): void; -} - -const mimeTypeMap: Record = { +const mimeTypeMap: Record = { + [AudioType.Unknown]: 'unknown', [AudioType.Aac]: 'audio/aac', [AudioType.Flac]: 'audio/flac', [AudioType.Mp3]: 'audio/mpeg', @@ -68,11 +29,16 @@ function isOggPage(data: any): data is OggPage { return !isNil(data.isFirstPage); } -export class AudioStreamSource extends AudioSource implements IStreamReader { +export class AudioStreamSource + extends EventEmitter + implements IAudioStreamSource, IStreamReader +{ + readonly ac: AudioContext; + /** 音频源节点 */ output: AudioBufferSourceNode; /** 音频数据 */ - buffer?: AudioBuffer; + buffer: AudioBuffer | null = null; /** 是否已经完全加载完毕 */ loaded: boolean = false; @@ -80,6 +46,8 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { buffered: number = 0; /** 已经缓冲的采样点数量 */ bufferedSamples: number = 0; + /** 当前是否正在播放 */ + playing: boolean = false; /** 歌曲时长,加载完毕之前保持为 0 */ duration: number = 0; /** 当前已经播放了多长时间 */ @@ -91,7 +59,7 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { /** 音频的采样率,未成功解析出之前保持为 0 */ sampleRate: number = 0; - private controller?: IStreamController; + private controller: IStreamController | null = null; private loop: boolean = false; private target?: IAudioInput; @@ -108,9 +76,9 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { /** 音频类型 */ private audioType: AudioType | '' = ''; /** 音频解码器 */ - private decoder?: AudioDecoder; + private decoder: IAudioDecoder | null = null; /** 音频解析器 */ - private parser?: CodecParser; + private parser: CodecParser | null = null; /** 每多长时间组成一个缓存 Float32Array */ private bufferChunkSize: number = 10; /** 缓存音频数据,每 bufferChunkSize 秒钟组成一个 Float32Array,用于流式解码 */ @@ -118,9 +86,10 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { private errored: boolean = false; - constructor(context: AudioContext) { - super(context); - this.output = context.createBufferSource(); + constructor(readonly motaAC: IMotaAudioContext) { + super(); + this.ac = motaAC.ac; + this.output = motaAC.ac.createBufferSource(); } /** @@ -132,6 +101,24 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { this.bufferChunkSize = size; } + free(): void { + this.stop(); + this.audioData = []; + this.decoder?.destroy(); + this.decoder = null; + this.parser = null; + this.audioType = ''; + this.headerRecieved = false; + this.errored = false; + this.duration = 0; + this.buffered = 0; + this.bufferedSamples = 0; + this.loaded = false; + this.sampleRate = 0; + this.buffer = null; + this.output.buffer = null; + } + piped(controller: IStreamController): void { this.controller = controller; } @@ -141,8 +128,9 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { if (!this.headerRecieved) { // 检查头文件获取音频类型,仅检查前256个字节 const toCheck = data.slice(0, 256); - this.audioType = checkAudioType(data); - if (!this.audioType) { + const type = this.motaAC.getAudioTypeFromData(data); + this.audioType = type; + if (type === AudioType.Unknown) { logger.error( 25, [...toCheck] @@ -152,22 +140,23 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { ); return; } - // 创建解码器 - const Decoder = AudioDecoder.decoderMap.get(this.audioType); - if (!Decoder) { + const decoder = this.motaAC.createDecoder(type); + if (!decoder) { this.errored = true; logger.error(24, this.audioType); return Promise.reject( `Cannot decode stream source type of '${this.audioType}', since there is no registered decoder for that type.` ); } - this.decoder = new Decoder(); + this.decoder = decoder; // 创建数据解析器 const mime = mimeTypeMap[this.audioType]; - const parser = new CodecParser(mime); - this.parser = parser; - await this.decoder.create(); - this.headerRecieved = true; + if (mime !== 'unknown') { + const parser = new CodecParser(mime); + this.parser = parser; + await decoder.create(); + this.headerRecieved = true; + } } const decoder = this.decoder; @@ -209,7 +198,7 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { */ private async decodeData( data: Uint8Array, - decoder: AudioDecoder, + decoder: IAudioDecoder, parser: CodecParser ) { // 解析音频数据 @@ -230,7 +219,7 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { /** * 解码剩余数据 */ - private async decodeFlushData(decoder: AudioDecoder, parser: CodecParser) { + private async decodeFlushData(decoder: IAudioDecoder, parser: CodecParser) { const audioData = await decoder.flush(); if (!audioData) return; // @ts-expect-error 库类型声明错误 @@ -348,7 +337,7 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { } async start() { - delete this.buffer; + this.buffer = null; this.headerRecieved = false; this.audioType = ''; this.errored = false; @@ -365,13 +354,14 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { end(done: boolean, reason?: string): void { if (done && this.buffer) { this.loaded = true; - delete this.controller; + this.controller = null; this.mergeBuffers(); this.duration = this.buffered; this.audioData = []; this.decoder?.destroy(); - delete this.decoder; - delete this.parser; + this.decoder = null; + this.parser = null; + this.emit('load'); } else { logger.warn(44, reason ?? ''); } @@ -381,14 +371,14 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { if (!this.buffer) return; this.lastStartTime = this.ac.currentTime; if (this.playing) this.output.stop(); - this.emit('play'); this.createSourceNode(this.buffer); this.output.start(0, when); this.playing = true; + this.emit('play'); this.output.addEventListener('ended', () => { this.playing = false; - this.emit('end'); if (this.loop && !this.output.loop) this.play(0); + this.emit('end'); }); } @@ -428,9 +418,16 @@ export class AudioStreamSource extends AudioSource implements IStreamReader { } } -export class AudioElementSource extends AudioSource { +export class AudioElementSource + extends EventEmitter + implements IAudioElementSource +{ + readonly ac: AudioContext; output: MediaElementAudioSourceNode; + /** 当前是否正在播放 */ + playing: boolean = false; + /** audio 元素 */ readonly audio: HTMLAudioElement; @@ -441,11 +438,12 @@ export class AudioElementSource extends AudioSource { return this.audio.currentTime; } - constructor(context: AudioContext) { - super(context); + constructor(readonly motaAC: IMotaAudioContext) { + super(); + this.ac = motaAC.ac; const audio = new Audio(); audio.preload = 'none'; - this.output = context.createMediaElementSource(audio); + this.output = motaAC.ac.createMediaElementSource(audio); this.audio = audio; audio.addEventListener('play', () => { this.playing = true; @@ -455,6 +453,11 @@ export class AudioElementSource extends AudioSource { this.playing = false; this.emit('end'); }); + audio.addEventListener('load', () => { + if (audio.src.length > 0) { + this.emit('load'); + } + }); } /** @@ -465,6 +468,12 @@ export class AudioElementSource extends AudioSource { this.audio.src = url; } + free(): void { + this.stop(); + this.audio.src = ''; + this.audio.load(); + } + play(when: number = 0): void { if (this.playing) return; this.audio.currentTime = when; @@ -474,7 +483,6 @@ export class AudioElementSource extends AudioSource { stop(): number { this.audio.pause(); this.playing = false; - this.emit('end'); return this.audio.currentTime; } @@ -487,14 +495,21 @@ export class AudioElementSource extends AudioSource { } } -export class AudioBufferSource extends AudioSource { +export class AudioBufferSource + extends EventEmitter + implements IAudioBufferSource +{ + readonly ac: AudioContext; output: AudioBufferSourceNode; /** 音频数据 */ - buffer?: AudioBuffer; + buffer: AudioBuffer | null = null; /** 是否循环 */ private loop: boolean = false; + /** 当前是否正在播放 */ + playing: boolean = false; + duration: number = 0; get currentTime(): number { return this.ac.currentTime - this.lastStartTime + this.lastStartWhen; @@ -506,9 +521,10 @@ export class AudioBufferSource extends AudioSource { private lastStartTime: number = 0; private target?: IAudioInput; - constructor(context: AudioContext) { - super(context); - this.output = context.createBufferSource(); + constructor(readonly motaAC: IMotaAudioContext) { + super(); + this.ac = motaAC.ac; + this.output = motaAC.ac.createBufferSource(); } /** @@ -522,19 +538,26 @@ export class AudioBufferSource extends AudioSource { this.buffer = buffer; } this.duration = this.buffer.duration; + this.emit('load'); + } + + free(): void { + this.stop(); + this.output.buffer = null; + this.buffer = null; } play(when?: number): void { if (this.playing || !this.buffer) return; this.playing = true; this.lastStartTime = this.ac.currentTime; - this.emit('play'); this.createSourceNode(this.buffer); this.output.start(0, when); + this.emit('play'); this.output.addEventListener('ended', () => { this.playing = false; - this.emit('end'); if (this.loop && !this.output.loop) this.play(0); + this.emit('end'); }); } diff --git a/packages-user/client-modules/src/audio/support.ts b/packages/audio/src/support.ts similarity index 84% rename from packages-user/client-modules/src/audio/support.ts rename to packages/audio/src/support.ts index 66ba851..3717bf7 100644 --- a/packages-user/client-modules/src/audio/support.ts +++ b/packages/audio/src/support.ts @@ -1,16 +1,9 @@ +import { AudioType } from './types'; + const audio = new Audio(); const supportMap = new Map(); -export const enum AudioType { - Mp3 = 'audio/mpeg', - Wav = 'audio/wav; codecs="1"', - Flac = 'audio/flac', - Opus = 'audio/ogg; codecs="opus"', - Ogg = 'audio/ogg; codecs="vorbis"', - Aac = 'audio/aac' -} - /** * 检查一种音频类型是否能被播放 * @param type 音频类型 diff --git a/packages/audio/src/types.ts b/packages/audio/src/types.ts new file mode 100644 index 0000000..9399445 --- /dev/null +++ b/packages/audio/src/types.ts @@ -0,0 +1,792 @@ +import EventEmitter from 'eventemitter3'; +import { IStreamReader } from '@motajs/loader'; + +export interface IAudioInput { + /** 输入节点 */ + readonly input: AudioNode; +} + +export interface IAudioOutput { + /** 输出节点 */ + readonly output: AudioNode; +} + +export const enum AudioType { + Unknown = 'unknown', + Mp3 = 'audio/mpeg', + Wav = 'audio/wav; codecs="1"', + Flac = 'audio/flac', + Opus = 'audio/ogg; codecs="opus"', + Ogg = 'audio/ogg; codecs="vorbis"', + Aac = 'audio/aac' +} + +//#region 音频源 + +export interface EAudioSourceEvent { + play: []; + end: []; + load: []; +} + +export interface IAudioSource + extends IAudioOutput, EventEmitter { + /** 所属的 {@link IMotaAudioContext} 上下文 */ + readonly motaAC: IMotaAudioContext; + /** 音频播放上下文 */ + readonly ac: AudioContext; + /** 音频源对应的的音频节点 */ + readonly output: AudioNode; + /** 当前是否正在播放 */ + readonly playing: boolean; + /** 音频总时长 */ + readonly duration: number; + /** 当前播放时长 */ + readonly currentTime: number; + + /** + * 开始播放这个音频源 + */ + play(when?: number): void; + + /** + * 停止播放这个音频源 + * @returns 音频暂停的时刻 + */ + stop(): number; + + /** + * 连接到音频路由图上,每次调用播放的时候都会执行一次 + * @param target 连接至的目标 + */ + connect(target: IAudioInput): void; + + /** + * 设置是否循环播放 + * @param loop 是否循环 + */ + setLoop(loop: boolean): void; + + /** + * 清空此音频源的缓存,释放其占用的内存 + */ + free(): void; +} + +export interface IAudioStreamSource extends IAudioSource, IStreamReader { + /** 流式加载的输出节点 */ + readonly output: AudioBufferSourceNode; + /** 音频缓冲区 */ + readonly buffer: AudioBuffer | null; + /** 当前是否已经加载完毕 */ + readonly loaded: boolean; + /** 已缓冲时长 */ + readonly buffered: number; + /** 已缓冲的采样点数量 */ + readonly bufferedSamples: number; + /** 音频采样率 */ + readonly sampleRate: number; + + /** + * 设置每个缓存数据的大小,默认为10秒钟一个缓存数据,只能在加载开始前设置 + * @param size 每个缓存数据的时长,单位秒 + */ + setChunkSize(size: number): void; +} + +export interface IAudioElementSource extends IAudioSource { + /** `audio` 元素音频源节点 */ + readonly output: MediaElementAudioSourceNode; + /** `audio` 元素对象 */ + readonly audio: HTMLAudioElement; + + /** + * 设置音频源的路径 + * @param url 音频路径 + */ + setSource(url: string): void; +} + +export interface IAudioBufferSource extends IAudioSource { + /** 音频源节点 */ + readonly output: AudioBufferSourceNode; + /** 音频数据缓冲区 */ + readonly buffer: AudioBuffer | null; + + /** + * 设置音频源数据 + * @param buffer 音频源,可以是未解析的 ArrayBuffer,也可以是已解析的 AudioBuffer + */ + setBuffer(buffer: ArrayBuffer | AudioBuffer): Promise; +} + +//#endregion + +//#region 音频路由 + +export const enum AudioStatus { + Playing, + Pausing, + Paused, + Stoping, + Stoped +} + +export interface EAudioRouteEvent { + updateEffect: []; + play: []; + stop: []; + pause: []; + resume: []; + + start: [route: IAudioRoute]; + end: [time: number, route: IAudioRoute]; +} + +export interface IAudioRoute + extends IAudioOutput, EventEmitter { + /** 音频路由图 */ + readonly effectRoute: readonly IAudioEffect[]; + /** 结束时长,当音频暂停或停止时,会经过这么长时间之后才真正终止播放,期间可以做音频淡入淡出等效果 */ + readonly endTime: number; + /** 当前音频播放状态 */ + readonly status: AudioStatus; + /** 音频总时长 */ + readonly duration: number; + /** 当前音频播放时长 */ + readonly currentTime: number; + /** 音频路由的音频源 */ + readonly source: IAudioSource; + + /** + * 设置结束时间,暂停或停止时,会经过这么长时间才终止音频的播放,这期间可以做一下音频淡出的效果。 + * @param time 暂停或停止时,经过多长时间之后才会结束音频的播放 + */ + setEndTime(time: number): void; + + /** + * 开始播放这个音频 + * @param when 从音频的什么时候开始播放,单位秒 + */ + play(when?: number): Promise; + + /** + * 暂停音频播放 + */ + pause(): Promise; + + /** + * 继续音频播放 + */ + resume(): void; + + /** + * 停止音频播放 + */ + stop(): Promise; + + /** + * 添加效果器 + * @param effect 要添加的效果,可以是数组,表示一次添加多个 + * @param index 从哪个位置开始添加,如果大于数组长度,那么加到末尾,如果小于0,那么将会从后面往前数。默认添加到末尾 + */ + addEffect(effect: IAudioEffect | IAudioEffect[], index?: number): void; + + /** + * 移除一个效果器 + * @param effect 要移除的效果 + */ + removeEffect(effect: IAudioEffect): void; + + /** + * 销毁此音频路由 + */ + destroy(): void; +} + +//#endregion + +//#region 音频效果 + +export interface IAudioEffect extends IAudioInput, IAudioOutput { + /** 所属的 {@link IMotaAudioContext} 上下文 */ + readonly motaAC: IMotaAudioContext; + /** 音频播放上下文 */ + readonly ac: AudioContext; + + /** + * 当音频播放结束时触发,可以用于节点结束后处理 + */ + end(): void; + + /** + * 当音频开始播放时触发,可以用于节点初始化 + */ + start(): void; + + /** + * 连接至其他效果器 + * @param target 目标输入 + * @param output 当前效果器输出通道 + * @param input 目标效果器的输入通道 + */ + connect(target: IAudioInput, output?: number, input?: number): void; + + /** + * 与其他效果器取消连接 + * @param target 目标输入 + * @param output 当前效果器输出通道 + * @param input 目标效果器的输入通道 + */ + disconnect(target?: IAudioInput, output?: number, input?: number): void; +} + +export interface IAudioStereoEffect extends IAudioEffect { + /** + * 设置音频朝向,x正方形水平向右,y正方形垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 朝向x坐标 + * @param y 朝向y坐标 + * @param z 朝向z坐标 + */ + setOrientation(x: number, y: number, z: number): void; + + /** + * 设置音频位置,x正方形水平向右,y正方形垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 位置x坐标 + * @param y 位置y坐标 + * @param z 位置z坐标 + */ + setPosition(x: number, y: number, z: number): void; +} + +export interface IAudioVolumeEffect extends IAudioEffect { + /** 输入增益节点 */ + readonly input: GainNode; + /** 输出增益节点 */ + readonly output: GainNode; + + /** + * 设置音量大小,不采用音量映射 + * @param volume 音量大小 + */ + setVolume(volume: number): void; + + /** + * 获取音量大小,不采用音量映射 + */ + getVolume(): number; +} + +export interface IAudioChannelVolumeEffect extends IAudioEffect { + /** + * 设置某个声道的音量大小 + * @param channel 要设置的声道,可填0-5 + * @param volume 这个声道的音量大小 + */ + setVolume(channel: number, volume: number): void; + + /** + * 获取某个声道的音量大小,可填0-5 + * @param channel 要获取的声道 + */ + getVolume(channel: number): number; +} + +export interface IAudioDelayEffect extends IAudioEffect { + /** + * 设置延迟时长 + * @param delay 延迟时长,单位秒 + */ + setDelay(delay: number): void; + + /** + * 获取延迟时长 + */ + getDelay(): number; +} + +export interface IAudioEchoEffect extends IAudioEffect { + /** + * 设置回声反馈增益大小 + * @param gain 增益大小,范围 0-1,大于等于1的视为0.5,小于0的视为0 + */ + setFeedbackGain(gain: number): void; + + /** + * 设置回声间隔时长 + * @param delay 回声时长,范围 0.01-Infinity,小于0.01的视为0.01 + */ + setEchoDelay(delay: number): void; + + /** + * 获取反馈节点增益 + */ + getFeedbackGain(): number; + + /** + * 获取回声间隔时长 + */ + getEchoDelay(): number; +} + +//#endregion + +//#region 音频上下文 + +export interface IMotaAudioContextCreator { + /** + * 创建一个音频源 + * @param Source 音频源类 + */ + createSource( + Source: new (ac: IMotaAudioContext) => T + ): T; + + /** + * 创建一个兼容流式音频源,可以与流式加载相结合,主要用于处理 opus ogg 不兼容的情况 + */ + createStreamSource(): IAudioStreamSource; + + /** + * 创建一个通过 audio 元素播放的音频源 + */ + createElementSource(): IAudioElementSource; + + /** + * 创建一个通过 AudioBuffer 播放的音频源 + */ + createBufferSource(): IAudioBufferSource; + + /** + * 创建一个音频效果器 + * @param Effect 效果器类 + */ + createEffect( + Effect: new (ac: IMotaAudioContext) => T + ): T; + + /** + * 创建一个修改音量的效果器 + * ```txt + * |----------| + * Input ----> | GainNode | ----> Output + * |----------| + * ``` + */ + createVolumeEffect(): IAudioVolumeEffect; + + /** + * 创建一个立体声效果器 + * ```txt + * |------------| + * Input ----> | PannerNode | ----> Output + * |------------| + * ``` + */ + createStereoEffect(): IAudioStereoEffect; + + /** + * 创建一个修改单个声道音量的效果器 + * ```txt + * |----------| + * -> | GainNode | \ + * |--------------| / |----------| -> |------------| + * Input ----> | SplitterNode | ...... | MergerNode | ----> Output + * |--------------| \ |----------| -> |------------| + * -> | GainNode | / + * |----------| + * ``` + */ + createChannelVolumeEffect(): IAudioChannelVolumeEffect; + + /** + * 创建一个延迟效果器 + * ```txt + * |-----------| + * Input ----> | DelayNode | ----> Output + * |-----------| + * ``` + */ + createDelayEffect(): IAudioDelayEffect; + + /** + * 创建一个回声效果器 + * ```txt + * |----------| + * Input ----> | GainNode | ----> Output + * ^ |----------| | + * | | + * | |------------| ↓ + * |-- | Delay Node | <-- + * |------------| + * ``` + */ + createEchoEffect(): IAudioEchoEffect; +} + +export type AudioDecoderCreateFunc = ( + context: IMotaAudioContext +) => IAudioDecoder; + +export interface IMotaAudioContext extends IMotaAudioContextCreator { + /** 音频上下文 */ + readonly ac: AudioContext; + /** 音频播放路由 */ + readonly audioRoutes: Map; + /** 音频增益节点 */ + readonly gain: GainNode; + + /** + * 设置音量,音量映射采用 `gain = 10 ** (dB / 20), where minDB = -60` + * @param volume 音量 + */ + setVolume(volume: number): void; + + /** + * 获取音量,音量映射采用 `gain = 10 ** (dB / 20), where minDB = -60` + */ + getVolume(): number; + + /** + * 获取音频目的地 + */ + getDestination(): AudioNode; + + /** + * 创建音效播放器 + */ + createSoundPlayer(): ISoundPlayer; + + /** + * 创建一个音频播放路由 + * @param source 音频源 + */ + createRoute(source: IAudioSource): IAudioRoute; + + /** + * 添加一个音频播放路由,可以直接被播放 + * @param id 这个音频播放路由的名称 + * @param route 音频播放路由对象 + */ + addRoute(id: string, route: IAudioRoute): void; + + /** + * 根据名称获取音频播放路由对象 + * @param id 音频播放路由的名称 + */ + getRoute(id: string): IAudioRoute | null; + + /** + * 移除一个音频播放路由 + * @param id 要移除的播放路由的名称 + */ + removeRoute(id: string): void; + + /** + * 播放音频 + * @param id 音频名称 + * @param when 从音频的哪个位置开始播放,单位秒 + */ + play(id: string, when?: number): void; + + /** + * 暂停音频播放 + * @param id 音频名称 + * @returns 当音乐真正停止时兑现 + */ + pause(id: string): Promise; + + /** + * 停止音频播放 + * @param id 音频名称 + * @returns 当音乐真正停止时兑现 + */ + stop(id: string): Promise; + + /** + * 继续音频播放 + * @param id 音频名称 + */ + resume(id: string): void; + + /** + * 设置听者位置,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 位置x坐标 + * @param y 位置y坐标 + * @param z 位置z坐标 + */ + setListenerPosition(x: number, y: number, z: number): void; + + /** + * 设置听者朝向,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 朝向x坐标 + * @param y 朝向y坐标 + * @param z 朝向z坐标 + */ + setListenerOrientation(x: number, y: number, z: number): void; + + /** + * 设置听者头顶朝向,x正方向水平向右,y正方向垂直于地面向上,z正方向垂直屏幕远离用户 + * @param x 头顶朝向x坐标 + * @param y 头顶朝向y坐标 + * @param z 头顶朝向z坐标 + */ + setListenerUp(x: number, y: number, z: number): void; + + /** + * 检查音频格式是否由浏览器所支持 + * @param type 音频格式 + */ + isAudioVanillaSupport(type: AudioType): boolean; + + /** + * 注册一个音频解码器 + * @param type 解码器解码的音频格式 + * @param decoder 解码器对象 + */ + registerDecoder(type: AudioType, decoder: AudioDecoderCreateFunc): void; + + /** + * 为指定音频格式创建解码器 + * @param type 音频格式 + */ + createDecoder(type: AudioType): IAudioDecoder | null; + + /** + * 根据音频未解码二进制数据获取其格式 + * @param data 音频数据 + */ + getAudioTypeFromData(data: Uint8Array): AudioType; + + /** + * 解码一个完整文件的音频二进制数据,对于浏览器支持的格式会使用浏览器内置接口 + * @param data 音频二进制数据 + */ + decodeAudio(data: Uint8Array): Promise; + + /** + * 将解码出的音频数据转换为 `AudioBuffer` + * @param data 音频解码数据 + */ + toAudioBuffer(data: IAudioDecodeData): AudioBuffer; + + /** + * 将音频完整文件直接解码为 `AudioBuffer`,如果浏览器本身支持传入的格式,那么可以减少转换次数提高性能 + * @param data 音频二进制数据 + */ + decodeToAudioBuffer(data: Uint8Array): Promise; +} + +//#endregion + +//#region 音效播放器 + +export type AudioLocationArray = [number, number, number]; + +export interface ISoundPlayer { + /** 音频上下文 */ + readonly ac: IMotaAudioContext; + /** 当前是否启用此音效播放器 */ + readonly enabled: boolean; + + /** + * 设置是否启用音效 + * @param enabled 是否启用音效 + */ + setEnabled(enabled: boolean): void; + + /** + * 设置音量大小 + * @param volume 音量大小 + */ + setVolume(volume: number): void; + + /** + * 获取音量大小 + */ + getVolume(): void; + + /** + * 添加一个音效 + * @param id 音效名称 + * @param data 音效的Uint8Array数据 + */ + add(id: T, data: Uint8Array): Promise; + + /** + * 播放一个音效 + * @param id 音效名称 + * @param position 音频位置,[0, 0, 0]表示正中心,x轴指向水平向右,y轴指向水平向上,z轴指向竖直向上 + * @param orientation 音频朝向,[0, 1, 0]表示朝向前方 + */ + play( + id: T, + position?: AudioLocationArray, + orientation?: AudioLocationArray + ): number; + + /** + * 停止一个音效 + * @param num 音效的唯一 id + */ + stop(num: number): void; + + /** + * 停止播放所有音效 + */ + stopAllSounds(): void; +} + +//#endregion + +//#region bgm 播放器 + +export interface IBGMPlayer { + /** 当前是否启用此播放器 */ + readonly enabled: boolean; + /** 当前正在播放的音乐 */ + readonly playingBGM?: T; + /** 当前是否有音乐正在播放 */ + readonly playing: boolean; + /** 最大缓存容量,默认 256MB */ + readonly maxCacheSize: number; + + /** + * 设置音乐的最大缓存容量,当已加载的音乐占用内存大于指定值时将会优先释放最久未被使用的音乐的缓存。 + * 建议大小为 128MB 或 256MB,超过此值有可能导致内存占用过大。系统会至少缓存一个音频,哪怕其大小超过缓存容量。 + * + * 大小设置参考:一段时长三分钟,采样率为 48000 的单声道音乐会占用约 33MB 的内存空间。 + * @param size 最大缓存容量,单位为 MB,最大可设置为 512M,最小可设置为 32M。 + */ + setMaxCacheSize(size: number): void; + + /** + * 设置音频渐变时长 + * @param time 渐变时长 + */ + setTransitionTime(time: number): void; + + /** + * 屏蔽音乐切换 + */ + blockChange(): void; + + /** + * 取消屏蔽音乐切换 + */ + unblockChange(): void; + + /** + * 设置总音量大小,不进行音量映射 + * @param volume 音量大小 + */ + setVolume(volume: number): void; + + /** + * 获取总音量大小,不进行音量映射 + */ + getVolume(): number; + + /** + * 设置是否启用 + * @param enabled 是否启用 + */ + setEnabled(enabled: boolean): void; + + /** + * 根据 bgm 名称获取其 AudioRoute 实例 + * @param id 音频名称 + */ + get(id: T): IAudioRoute | null; + + /** + * 添加一个 bgm + * @param id 要添加的 bgm 的名称 + * @param url 指定 bgm 的加载地址 + */ + addBGMFromURL(id: T, url: string): void; + + /** + * 移除一个 bgm + * @param id 要移除的 bgm 的名称 + */ + removeBgm(id: T): void; + + /** + * 播放一个 bgm + * @param id 要播放的 bgm 名称 + * @param when 播放开始时刻,单位秒 + */ + play(id: T, when?: number): void; + + /** + * 继续当前的 bgm + */ + resume(): void; + + /** + * 暂停当前的 bgm + */ + pause(): void; + + /** + * 停止当前的 bgm + */ + stop(): void; + + /** + * 销毁此音乐播放器,释放相关资源 + */ + destroy(): void; +} + +//#endregion + +//#region 解码器 + +export interface IAudioDecodeError { + /** 错误信息 */ + readonly message: string; +} + +export interface IAudioDecodeData { + /** 每个声道的音频信息 */ + readonly channelData: Float32Array[]; + /** 已经被解码的 PCM 采样数 */ + readonly samplesDecoded: number; + /** 音频采样率 */ + readonly sampleRate: number; + /** 解码错误信息 */ + readonly errors: IAudioDecodeError[]; +} + +export interface IAudioDecoder { + /** + * 创建音频解码器 + */ + create(): Promise; + + /** + * 摧毁这个解码器 + */ + destroy(): Promise; + + /** + * 解码流数据 + * @param data 流数据 + */ + decode(data: Uint8Array): Promise; + + /** + * 解码整个文件 + * @param data 文件数据 + */ + decodeAll(data: Uint8Array): Promise; + + /** + * 当音频解码完成后,会调用此函数,需要返回之前还未解析或未返回的音频数据。调用后,该解码器将不会被再次使用 + */ + flush(): Promise; +} + +//#endregion diff --git a/packages/common/src/logger.json b/packages/common/src/logger.json index b143a1e..aaebf31 100644 --- a/packages/common/src/logger.json +++ b/packages/common/src/logger.json @@ -24,7 +24,7 @@ "22": "Incorrect props for custom tag. Please ensure you have delivered 'item' prop and other required props.", "23": "Cannot get reader when fetching '$1'.", "24": "Cannot decode source type of '$1', since there is no registered decoder for that type.", - "25": "Unknown audio type. Header: '$1'", + "25": "Cannot decode audio binary, since the audio format may not be supported. File header: '$1'", "26": "Uncaught error when fetching stream data from '$1'. Error info: $2.", "27": "No autotile connection data, please ensure you have created autotile connection map.", "28": "Cannot compile map render shader.", diff --git a/packages/legacy-common/src/resource.ts b/packages/legacy-common/src/resource.ts index 73483da..803020e 100644 --- a/packages/legacy-common/src/resource.ts +++ b/packages/legacy-common/src/resource.ts @@ -530,7 +530,7 @@ export function loadDefaultResource() { const res = LoadTask.add('byte', `byte/project/sounds/${v}`); Mota.r(() => { res.once('load', res => { - const { soundPlayer } = Mota.require('@user/client-modules'); + const { soundPlayer } = Mota.require('@user/client-base'); soundPlayer.add(v, res.resource!); }); }); @@ -696,9 +696,8 @@ export async function loadCompressedResource() { new FontFace(name.slice(0, -4), font) ); } else if (usage === 'sound' && main.mode === 'play') { - const { soundPlayer } = Mota.require( - '@user/client-modules' - ); + const { soundPlayer } = + Mota.require('@user/client-base'); soundPlayer.add(name as SoundIds, value as Uint8Array); } else if (usage === 'animate') { const ani = value as string; diff --git a/packages/legacy-ui/src/preset/ui.ts b/packages/legacy-ui/src/preset/ui.ts index a803482..80e5733 100644 --- a/packages/legacy-ui/src/preset/ui.ts +++ b/packages/legacy-ui/src/preset/ui.ts @@ -110,12 +110,12 @@ function handleAudioSetting( n: T, _o: T ) { - const { bgmController, soundPlayer } = Mota.require('@user/client-modules'); + const { bgmPlayer, soundPlayer } = Mota.require('@user/client-base'); if (key === 'bgmEnabled') { - bgmController.setEnabled(n as boolean); + bgmPlayer.setEnabled(n as boolean); core.checkBgm(); } else if (key === 'bgmVolume') { - bgmController.setVolume((n as number) / 100); + bgmPlayer.setVolume((n as number) / 100); } else if (key === 'soundEnabled') { soundPlayer.setEnabled(n as boolean); } else if (key === 'soundVolume') { diff --git a/packages/loader/package.json b/packages/loader/package.json new file mode 100644 index 0000000..ac5c734 --- /dev/null +++ b/packages/loader/package.json @@ -0,0 +1,6 @@ +{ + "name": "@motajs/loader", + "dependencies": { + "@motajs/common": "workspace:*" + } +} diff --git a/packages-user/client-modules/src/loader/index.ts b/packages/loader/src/index.ts similarity index 100% rename from packages-user/client-modules/src/loader/index.ts rename to packages/loader/src/index.ts diff --git a/packages-user/client-modules/src/loader/stream.ts b/packages/loader/src/stream.ts similarity index 100% rename from packages-user/client-modules/src/loader/stream.ts rename to packages/loader/src/stream.ts