问题
I am using wavesurfer.js to create a multitrack player online and want to export a remixed version of the combined tracks with levels panning etc.
First I have an array of audioFiles and use this to create an array of wavesurfer elements.
for(var i=0; i<audiofiles.length; i++){
spectrum[i] = WaveSurfer.create({
});
}
I then create a buffer for each of these from wavesurfer backend
for(var i=0; i<audiofiles.length; i++){
var ctx = spectrum[i].backend.ac;
var length = spectrum[i].getDuration() * sample_rate * 2;
var ctx_buffer = ctx.createBuffer(2, length, ctx.sampleRate);
// pass raw pcm buffer to download function
}
And then finally I got some help with the download function here Downloading audio from web that has been modified with wavesurfer.js
My issue at this point is that what I'm passing to the download function doesn't seem to be in the correct format. I'm new to working with Audio and not sure what I'm doing.
If I pass the ctx_buffer variable to the function in the other question (and use it instead of the buffer variable there which is got directly from a pcm file) I will get a successful download but the file is empty, although it is the correct length (leaving out the *2 in the length above will have an empty file exactly half the length of my original).
There is an exportPCM() function in wavesurfer.js here https://wavesurfer-js.org/docs/methods.html but I'm not sure how this works either.
Final Code thanks to @AnthumChris
buttons.save_all.addEventListener("click", function(){
document.getElementById("download_icon").className = "fas fa-spinner loader";
document.getElementById("download_text").innerText = "Loading...";
for (let i=0; i<audiofiles.length; i++) {
const track = spectrum[i];
const sampleRate = track.backend.ac.sampleRate;
const audioBuffer = track.backend.buffer;
const interleavedSamples = getInterleavedStereo(audioBuffer);
const file_name = `track-${i+1}.wav`;
download_init(interleavedSamples.buffer, sampleRate, file_name);
}
console.log(blobs);
}, false);
function showError(e) {
console.log(`ERROR: ${e}`);
}
var x = 0;
function download_init(ctx_buffer, sample_rate, file_name) {
const wavBytes = getWavBytes(ctx_buffer, {
numChannels: 2,
sampleRate: sample_rate,
isFloat: true
});
blobs[x] = URL.createObjectURL(
new Blob([wavBytes], { type: 'audio/wav' })
)
buttons.download.href = blobs[0];
buttons.download.setAttribute('download', file_name);
buttons.save_all.hidden = true;
buttons.download.hidden = false;
x++;
}
function getInterleavedStereo(audioBuffer) {
if (audioBuffer.numberOfChannels !== 2) {
throw Error('source audio is not stereo');
}
const [left, right] = [audioBuffer.getChannelData(0), audioBuffer.getChannelData(1)];
const interleaved = new Float32Array(left.length + right.length);
for (let src=0, dst=0; src < left.length; src++, dst+=2) {
interleaved[dst] = left[src];
interleaved[dst+1] = right[src];
}
return interleaved;
}
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array;
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT;
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }));
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
wavBytes.set(headerBytes, 0);
wavBytes.set(new Uint8Array(buffer), headerBytes.length);
return wavBytes;
}
function getWavHeader(options) {
const numFrames = options.numFrames;
const numChannels = options.numChannels || 2;
const sampleRate = options.sampleRate || 44100;
const bytesPerSample = options.isFloat? 4 : 2;
const format = options.isFloat? 3 : 1;
const blockAlign = numChannels * bytesPerSample;
const byteRate = sampleRate * blockAlign;
const dataSize = numFrames * blockAlign;
const buffer = new ArrayBuffer(44);
const dv = new DataView(buffer);
let p = 0;
function writeString(s) {
for (let i=0; i<s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i));
}
p += s.length;
}
function writeUint32(d) {
dv.setUint32(p, d, true);
p += 4;
}
function writeUint16(d) {
dv.setUint16(p, d, true);
p += 2;
}
writeString('RIFF'); // ChunkID
writeUint32(dataSize + 36); // ChunkSize
writeString('WAVE'); // Format
writeString('fmt '); // Subchunk1ID
writeUint32(16); // Subchunk1Size
writeUint16(format); // AudioFormat
writeUint16(numChannels); // NumChannels
writeUint32(sampleRate); // SampleRate
writeUint32(byteRate); // ByteRate
writeUint16(blockAlign); // BlockAlign
writeUint16(bytesPerSample * 8); // BitsPerSample
writeString('data'); // Subchunk2ID
writeUint32(dataSize); // Subchunk2Size
return new Uint8Array(buffer);
}
回答1:
Given that you currently have an array of AudioBuffer
objects, you can interleave the Float32Array PCM data contained within each AudioBuffer
, and then use that interleaved PCM to create a RIFF/Wav file to download. If each AudioBuffer
is a track, then all of the left/right channels in the array must be combined separately and interleaved at the end. Here's how to start with one AudioBuffer
track:
Convert AudioBuffer to ArrayBuffer / Blob for WAV Download
来源:https://stackoverflow.com/questions/62212995/getting-pcm-data-from-wavesurfer-js-backend-web-audio-api