帮我写一个javascript的前端网页采集麦克风声音,并将输出流转换为16khz 16bits 单声道的格式,用formdata将流传输给后端的代码
时间: 2023-08-31 17:21:25 浏览: 190
### 回答1:
好的,下面是用 JavaScript 编写前端页面采集麦克风声音,并将其转换为 16kHz 16bits 单声道格式,然后使用 FormData 对象将数据传输到后端的示例代码:
```html
<!DOCTYPE html>
<html>
<head>
<title>录音机</title>
</head>
<body>
<h1>录音机</h1>
<button onclick="startRecording()">开始录音</button>
<button onclick="stopRecording()">停止录音</button>
<p id="status">未录音</p>
<script>
let recorder;
let audioChunks = [];
const sampleRate = 16000;
const bitDepth = 16;
const numChannels = 1;
async function startRecording() {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
recorder = new MediaRecorder(stream);
recorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
recorder.start();
document.getElementById("status").textContent = "正在录音";
}
function stopRecording() {
recorder.stop();
document.getElementById("status").textContent = "录音已停止";
recorder.addEventListener("stop", async () => {
const blob = new Blob(audioChunks);
const arrayBuffer = await blob.arrayBuffer();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
const resampledBuffer = await resampleAudio(audioBuffer, audioBuffer.sampleRate, sampleRate);
const int16Array = convertFloat32ToInt16(resampledBuffer.getChannelData(0));
const formData = new FormData();
formData.append("audioData", new Blob([int16Array], { type: "audio/wav" }), "recording.wav");
const response = await fetch("/upload-audio", { method: "POST", body: formData });
});
}
function convertFloat32ToInt16(float32Array) {
const int16Array = new Int16Array(float32Array.length);
for (let i = 0; i < float32Array.length; i++) {
const floatSample = float32Array[i];
const intSample = Math.max(-1, Math.min(1, floatSample)) * 0x7fff;
int16Array[i] = intSample;
}
return int16Array;
}
function resampleAudio(audioBuffer, fromSampleRate, toSampleRate) {
return new Promise(resolve => {
const numChannels = audioBuffer.numberOfChannels;
const channelData = [];
for (let channel = 0; channel < numChannels; channel++) {
channelData.push(audioBuffer.getChannelData(channel));
}
const resampledChannelData = [];
const offlineCtx = new OfflineAudioContext(numChannels, 0, toSampleRate);
const source = offlineCtx.createBufferSource();
source.buffer = audioBuffer;
offlineCtx.oncomplete = event => {
for (let channel = 0; channel < numChannels; channel++) {
resampledChannelData.push(event.renderedBuffer.getChannelData(channel));
}
const resampledBuffer = new AudioBuffer({ length: event.renderedBuffer.length, numberOfChannels: numChannels, sampleRate: toSampleRate });
for (let channel = 0; channel < numChannels; channel++) {
resampledBuffer.copyToChannel(resampledChannelData[channel], channel);
}
resolve(resampledBuffer);
};
const desiredDuration = audioBuffer.duration * toSampleRate / fromSampleRate
### 回答2:
以下是一个使用JavaScript前端代码,通过麦克风捕捉音频流,并将其转换为16kHz 16bits 单声道格式,并使用FormData将流传输给后端的示例代码:
```javascript
// 用于存储音频流的变量
let audioChunks = [];
// 创建一个MediaRecorder对象,以捕捉音频流
navigator.mediaDevices.getUserMedia({ audio: true })
.then(function (stream) {
// 创建一个MediaRecorder对象
const mediaRecorder = new MediaRecorder(stream);
// 当有音频数据可用时,将其存储在audioChunks数组中
mediaRecorder.addEventListener("dataavailable", function (event) {
audioChunks.push(event.data);
});
// 完成录制后,将音频流转换为16kHz 16bits 单声道格式
mediaRecorder.addEventListener("stop", function () {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio();
audio.src = audioUrl;
// 创建一个AudioContext对象,用于转换音频流
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const source = audioContext.createMediaElementSource(audio);
const destination = audioContext.createMediaStreamDestination();
// 设置音频流转换的参数
const desiredSampleRate = 16000;
const desiredChannels = 1;
// 创建一个StereoPannerNode对象,用于将音频流转换为单声道
const stereoPanner = audioContext.createStereoPanner();
stereoPanner.pan.value = 0; // 在单声道中心位置
// 连接音频流
source.connect(stereoPanner);
stereoPanner.connect(destination);
// 创建一个新的MediaRecorder对象,以捕捉转换后的音频流
const convertedMediaRecorder = new MediaRecorder(destination.stream);
let convertedAudioChunks = [];
convertedMediaRecorder.addEventListener("dataavailable", function (event) {
convertedAudioChunks.push(event.data);
});
convertedMediaRecorder.addEventListener("stop", function () {
const convertedAudioBlob = new Blob(convertedAudioChunks);
// 创建一个FormData对象,用于传输转换后的音频流
const formData = new FormData();
formData.append("audio", convertedAudioBlob, "audio.wav");
// 使用fetch或XMLHttpRequest将FormData发送给后端
// fetch示例:
fetch("upload-url", {
method: "POST",
body: formData,
})
.then(function (response) {
// 处理响应
})
.catch(function (error) {
// 处理错误
});
});
// 开始录制转换后的音频流
convertedMediaRecorder.start();
// 播放音频流
audio.play();
// 停止录制转换后的音频流
setTimeout(function () {
convertedMediaRecorder.stop();
}, 5000); // 设置转换时间(5秒)
});
// 开始录制音频流
mediaRecorder.start();
// 停止录制音频流
setTimeout(function () {
mediaRecorder.stop();
}, 5000); // 设置录制时间(5秒)
})
.catch(function (error) {
// 处理错误
});
```
注意:这只是一个示例代码,具体实现可能需要根据您的具体需求进行调整和完善。同时,请确保您在使用网页采集用户音频流时,遵守相关隐私和数据保护法律法规,以保障用户的隐私安全。
阅读全文