移动端H5录音组件开发(react版本)

时间:2020-05-20
本文章向大家介绍移动端H5录音组件开发(react版本),主要包括移动端H5录音组件开发(react版本)使用实例、应用技巧、基本知识点总结和需要注意事项,具有一定的参考价值,需要的朋友可以参考一下。

基于AudioContextmediaDevices实现的原生js的录音功能

// recorder.js,这个是在网上找的,具体地址不记得了,这个存在一个问题就是,他分段之后会把audioData清空,导致最后结束的时候,audioData是一个空值,如果需要把整段的录音转化成一个音频文件,不考虑分片的话,可以把onaudioprocess里面的sendData注释掉,没错,我就是这样搞的,只需要一个完整的音频,如果需要分段传送,就把注释打开,然后作出对应的处理
const Recorder = function (stream, callback) {
  const sampleBits = 16; //输出采样数位 8, 16
  const sampleRate = 8000; //输出采样率
  const context = new AudioContext();
  const audioInput = context.createMediaStreamSource(stream);
  const recorder = context.createScriptProcessor(4096, 1, 1);
  const audioData = {
    size: 0, //录音文件长度
    buffer: [], //录音缓存
    inputSampleRate: 48000, //输入采样率
    inputSampleBits: 16, //输入采样数位 8, 16
    outputSampleRate: sampleRate, //输出采样数位
    oututSampleBits: sampleBits, //输出采样率
    clear: function () {
      this.buffer = [];
      this.size = 0;
    },
    input: function (data) {
      this.buffer.push(new Float32Array(data));
      this.size += data.length;
    },
    compress: function () { //合并压缩
      //合并
      const data = new Float32Array(this.size);
      let offset = 0;
      for (let i = 0; i < this.buffer.length; i++) {
        data.set(this.buffer[i], offset);
        offset += this.buffer[i].length;
      }
      //压缩
      const compression = parseInt(this.inputSampleRate / this.outputSampleRate);
      const length = data.length / compression;
      const result = new Float32Array(length);
      let index = 0,
        j = 0;
      while (index < length) {
        result[index] = data[j];
        j += compression;
        index++;
      }
      return result;
    },
    encodePCM: function () { //这里不对采集到的数据进行其他格式处理,如有需要均交给服务器端处理。
      const sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
      const sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
      const bytes = this.compress();
      const dataLength = bytes.length * (sampleBits / 8);
      const buffer = new ArrayBuffer(dataLength);
      const data = new DataView(buffer);
      let offset = 0;
      for (let i = 0; i < bytes.length; i++, offset += 2) {
        const s = Math.max(-1, Math.min(1, bytes[i]));
        data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
      }
      return new Blob([data], { 'type': 'audio/pcm' });
    }
  };

  const sendData = function () { //对以获取的数据进行处理(分包)
    const reader = new FileReader();
    reader.onload = e => {
      const outbuffer = e.target.result;
      // callback && callback(outbuffer);
      const arr = new Int8Array(outbuffer);
      if (arr.length > 0) {
        let tmparr = new Int8Array(1024);
        let j = 0;
        for (let i = 0; i < arr.byteLength; i++) {
          tmparr[j++] = arr[i];
          if (((i + 1) % 1024) == 0) {
            callback && callback(tmparr);
            if (arr.byteLength - i - 1 >= 1024) {
              tmparr = new Int8Array(1024);
            } else {
              tmparr = new Int8Array(arr.byteLength - i - 1);
            }
            j = 0;
          }
          if ((i + 1 == arr.byteLength) && ((i + 1) % 1024) != 0) {
            callback && callback(tmparr);
          }
        }
      }
    };
    reader.readAsArrayBuffer(audioData.encodePCM());
    audioData.clear();//每次发送完成则清理掉旧数据
  };

  this.start = function () {
    audioInput.connect(recorder);
    recorder.connect(context.destination);
  }

  this.stop = function () {
    recorder.disconnect();
  }

  this.getBlob = function () {
    return audioData.encodePCM();
  }

  this.clear = function () {
    audioData.clear();
  }

  recorder.onaudioprocess = function (e) {
    const inputBuffer = e.inputBuffer.getChannelData(0);
    audioData.input(inputBuffer);
    // sendData();
  }
}

export default Recorder;
/**
 * 录音组件
 */
// RecordItem.js
import React, { Component } from 'react';
import { Icon } from 'antd';
import { Toast } from 'antd-mobile';

import Recorder from './Recorder';

import './RecordItem.less';

class RecordItem extends Component {

  state = {
    isRecording: false, // 是否正在录音
  }

  timer = null; // 判断长按的定时器

  handleTouchStart = () => {
    this.timer = setTimeout(() => {
      this.recorder.start();
      this.setState({
        isRecording: true
      });
    }, 300);
  }

  handleTouchEnd = () => {
    if (this.timer) {
      clearTimeout(this.timer);
    }
    this.recorder.stop();
    this.setState({
      isRecording: false
    }, () => {
      const { onEnd } = this.props;
      onEnd && onEnd(this.recorder.getBlob());
    });
  }

  // 处理录音的回调
  handleMsg = (data) => {
    const { onProgress } = this.props;
    onProgress && onProgress(data);
  }

  componentDidMount() {
    const constraints = { audio: true };
    navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
      this.recorder = new Recorder(stream, this.handleMsg);
    }, err => {
      switch (err.message || err.name) {
        case 'PERMISSION_DENIED':
        case 'PermissionDeniedError':
          Toast.info('用户拒绝提供信息。');
          break;
        case 'NOT_SUPPORTED_ERROR':
        case 'NotSupportedError':
          Toast.info('浏览器不支持硬件设备。');
          break;
        case 'MANDATORY_UNSATISFIED_ERROR':
        case 'MandatoryUnsatisfiedError':
          Toast.info('无法发现指定的硬件设备。');
          break;
        default:
          Toast.info('无法打开麦克风。异常信息:' + (err.code || err.name));
          break;
      }
    });
    // this.recorder = new Recorder({
    //   callback: this.handleMsg
    // });
  }

  render() {
    const { isRecording } = this.state;
    return (
      <div
        className={`RecordItem ${isRecording ? 'recording' : ''}`}
        onTouchStart={this.handleTouchStart}
        onTouchEnd={this.handleTouchEnd}
      >
        <Icon type="audio" />
      </div>
    );
  }
}

export default RecordItem;
// RecordItem.less
.RecordItem {
  color: #333333;

  &.recording {
    color: #10C0DC;
  }

  .anticon {
    font-size: 1.5rem;
  }
}

原文地址:https://www.cnblogs.com/aloneMing/p/12924678.html