我有下面的逻辑来记录(在AudioContext中设置samplerate = 16000,单通道记录,只考虑一个通道)
process(inputs, outputs, parameters) {
const isRecordingValues = parameters.isRecording;
//taking first input
var input0 = inputs[0];
var inputChannel = input0[0];
if (isRecordingValues.length ===1){
const shouldRecord = isRecordingValues[0] === 1;
if (!shouldRecord && !this._isBufferEmpty()) {
this._flush();
this._recordingStopped();
}
if (shouldRecord) {
this._appendToBuffer(inputChannel);
}
}
return true;
}
}_appendToBuffer如下所示:
_appendToBuffer(value) {
if (this._isBufferFull()) {
this._flush();
}
// Here _buffer is of type Float32Array
this._buffer.set(value, this._bytesWritten);
this._bytesWritten += value.length;
} var blob = this._exportWAV(buffer, this._bytesWritten);
this.port.postMessage({
eventType: 'data',
audioBuffer: blob
});这里的缓冲区包含-1.0到1.0之间的值。
我相信我在process方法中做错了什么,在缓冲区中记录的数据格式不正确。
我在这里做错什么了?
发布于 2019-06-24 06:11:58
我将_flush方法更改如下:
_flush() {
let buffer = this._buffer;
if (this._bytesWritten < this._bufferSize) {
buffer = buffer.slice(0, this._bytesWritten);
}
this.port.postMessage({
eventType: 'data',
audioBuffer: buffer
});
this._initBuffer();}因此,我将直接将缓冲区发送到AudioWorklet。当我在AudioWorklet中接收到这个缓冲区时,我将它作为Blob发送到Flask应用程序,如下所示
const audioData = e.data.audioBuffer.buffer;
socket.emit( 'my event', {
blob : new Blob([audioData], { type: 'audio/wav' })
});这给了我简单的浮点数,在-1.0到1.0之间可以在Flask应用程序中使用。然后,我将这些浮点数转换为以下功能
def convert(raw_floats):
data = raw_floats
floats = array.array('f', data)
samples = [int(sample * 32767)
for sample in floats]
raw_ints = struct.pack("<%dh" % len(samples), *samples)
return raw_ints我将这些raw_ints保存到WAVE文件,这是可在中播放的。
https://stackoverflow.com/questions/56592566
复制相似问题