generated from template/vite-react-template
temp
This commit is contained in:
910
public/extensions/asr.aliyun.short.js
Normal file
910
public/extensions/asr.aliyun.short.js
Normal file
@@ -0,0 +1,910 @@
|
||||
/*
|
||||
录音 Recorder扩展,ASR,阿里云语音识别(语音转文字),支持实时语音识别、单个音频文件转文字
|
||||
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
|
||||
- 本扩展通过调用 阿里云-智能语音交互-一句话识别 接口来进行语音识别,无时长限制。
|
||||
- 识别过程中采用WebSocket直连阿里云,语音数据无需经过自己服务器。
|
||||
- 自己服务器仅需提供一个Token生成接口即可(本库已实现一个本地测试NodeJs后端程序 /assets/demo-asr/NodeJsServer_asr.aliyun.short.js)。
|
||||
|
||||
本扩展单次语音识别时虽长无限制,最佳使用场景还是1-5分钟内的语音识别;60分钟以上的语音识别本扩展也能胜任(需自行进行重试容错处理),但太长的识别场景不太适合使用阿里云一句话识别(阿里云单次一句话识别最长60秒,本扩展自带拼接过程,所以无时长限制);为什么采用一句话识别:因为便宜。
|
||||
|
||||
|
||||
【对接流程】
|
||||
1. 到阿里云开通 一句话识别 服务(可试用一段时间,正式使用时应当开通商用版,很便宜),得到AccessKey、Secret,参考:https://help.aliyun.com/document_detail/324194.html ;
|
||||
2. 到阿里云智能语音交互控制台创建相应的语音识别项目,并配置好项目,得到Appkey,每个项目可以设置一种语言模型,要支持多种语言就创建多个项目;
|
||||
3. 需要后端提供一个Token生成接口(用到上面的Key和Secret),可直接参考或本地运行此NodeJs后端测试程序:/assets/demo-asr/NodeJsServer_asr.aliyun.short.js,配置好代码里的阿里云账号后,在目录内直接命令行执行`node NodeJsServer_asr.aliyun.short.js`即可运行提供本地测试接口;
|
||||
4. 前端调用ASR_Aliyun_Short,传入tokenApi,即可很简单的实现语音识别功能;
|
||||
|
||||
在线测试例子:
|
||||
https://xiangyuecn.gitee.io/recorder/assets/工具-代码运行和静态分发Runtime.html?jsname=teach.realtime.asr.aliyun.short
|
||||
调用示例:
|
||||
var rec=Recorder(recSet);rec.open(...) //进行语音识别前,先打开录音,获得录音权限
|
||||
|
||||
var asr=Recorder.ASR_Aliyun_Short(set); //创建asr对象,参数详情请参考下面的源码
|
||||
|
||||
//asr创建好后,随时调用strat,开始进行语音识别
|
||||
asr.start(function(){
|
||||
rec.start();//一般在start成功之后,调用rec.start()开始录音,此时可以通知用户讲话了
|
||||
},fail);
|
||||
|
||||
//实时处理输入音频数据,一般是在rec.set.onProcess中调用本方法,输入实时录制的音频数据,输入的数据将会发送语音识别;不管有没有start,都可以调用本方法,start前输入的数据会缓冲起来等到start后进行识别
|
||||
asr.input([[Int16,...],...],48000,0);
|
||||
|
||||
//话讲完后,调用stop结束语音识别,得到识别到的内容文本
|
||||
asr.stop(function(text,abortMsg){
|
||||
//text为识别到的最终完整内容;如果存在abortMsg代表识别中途被某种错误停止了,text是停止前的内容识别到的完整内容,一般早在asrProcess中会收到abort事件然后要停止录音
|
||||
},fail);
|
||||
|
||||
更多的方法:
|
||||
asr.inputDuration() 获取input已输入的音频数据总时长,单位ms
|
||||
asr.sendDuration() 获取已发送识别的音频数据总时长,存在重发重叠部分,因此比inputDuration长
|
||||
asr.asrDuration() 获取已识别的音频数据总时长,去除了sendDuration的重叠部分,值<=inputDuration
|
||||
asr.getText() 获取实时结果文本,如果已stop返回的就是最终文本,一般无需调用此方法,因为回调中都提供了此方法的返回值
|
||||
|
||||
//一次性将单个完整音频Blob文件转成文字,无需start、stop,创建好asr后直接调用本方法即可
|
||||
asr.audioToText(audioBlob,success,fail)
|
||||
//一次性的将单个完整PCM音频数据转成文字,无需start、stop,创建好asr后直接调用本方法即可
|
||||
asr.pcmToText(buffer,sampleRate,success,fail)
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
var ASR_Aliyun_Short=function(set){
|
||||
return new fn(set);
|
||||
};
|
||||
var ASR_Aliyun_ShortTxt="ASR_Aliyun_Short";
|
||||
var fn=function(set){
|
||||
var This=this;
|
||||
var o={
|
||||
tokenApi:"" /*必填,调用阿里云一句话识别需要的token获取api地址
|
||||
接口实现请参考本地测试NodeJs后端程序:/assets/demo-asr/NodeJsServer_asr.aliyun.short.js
|
||||
此接口默认需要返回数据格式:
|
||||
{
|
||||
c:0 //code,0接口调用正常,其他数值接口调用出错
|
||||
,m:"" //message,接口调用出错时的错误消息
|
||||
,v:{ //value,接口成功调用返回的结果【结果中必须包含下面两个值】
|
||||
appkey:"aaaa" //lang语言模型对应的项目appkey
|
||||
,token:"bbbb" //语音识别Access Token
|
||||
}
|
||||
}
|
||||
如果不是返回的这个格式的数据,必须提供apiRequest配置,自行请求api*/
|
||||
,apiArgs:{ //请求tokenApi时要传的参数
|
||||
action:"token"
|
||||
,lang:"普通话" //语言模型设置(具体取值取决于tokenApi支持了哪些语言)
|
||||
}
|
||||
,apiRequest:null /*tokenApi的请求实现方法,默认使用简单的ajax实现
|
||||
如果你接口返回的数据格式和默认格式不一致,必须提供一个函数来自行请求api
|
||||
方法参数:fn(url,args,success,fail)
|
||||
url:"" == tokenApi
|
||||
args:{} == apiArgs
|
||||
success:fn(value) 接口调用成功回调,value={appkey:"", token:""}
|
||||
fail:fn(errMsg) 接口调用出错回调,errMsg="错误消息"
|
||||
*/
|
||||
,compatibleWebSocket:null /*提供一个函数返回兼容WebSocket的对象,一般也需要提供apiRequest
|
||||
如果你使用的环境不支持WebSocket,需要提供一个函数来返回一个兼容实现对象
|
||||
方法参数:fn(url) url为连接地址,返回一个对象,需支持的回调和方法:{
|
||||
onopen:fn() 连接成功回调
|
||||
onerror:fn({message}) 连接失败回调
|
||||
onclose:fn({code, reason}) 连接关闭回调
|
||||
onmessage:fn({data}) 收到消息回调
|
||||
connect:fn() 进行连接
|
||||
close:fn(code,reason) 关闭连接
|
||||
send:fn(data) 发送数据,data为字符串或者arraybuffer
|
||||
}
|
||||
binaryType固定使用arraybuffer类型
|
||||
*/
|
||||
|
||||
//,asrProcess:null //fn(text,nextDuration,abortMsg) 当实时接收到语音识别结果时的回调函数(对单个完整音频文件的识别也有效)
|
||||
//此方法需要返回true才会继续识别,否则立即当做识别超时处理,你应当通过nextDuration来决定是否继续识别,避免无限制的识别大量消耗阿里云资源额度;如果不提供本回调,默认1分钟超时后终止识别(因为没有绑定回调,你不知道已经被终止了)
|
||||
//text为中间识别到的内容(并非已有录音片段的最终结果,后续可能会根据语境修整)
|
||||
//nextDuration 为当前回调时下次即将进行识别的总时长,单位毫秒,通过这个参数来限制识别总时长,超过时长就返回false终止识别(第二分钟开始每分钟会多识别前一分钟结尾的5秒数据,用于两分钟之间的拼接,相当于第二分钟最多识别55秒的新内容)
|
||||
//abortMsg如不为空代表识别中途因为某种原因终止了识别(比如超时、接口调用失败),收到此信息时应当立即调用asr的stop方法得到最终结果,并且终止录音
|
||||
|
||||
,log:NOOP //fn(msg,color)提供一个日志输出接口,默认只会输出到控制台,color: 1:红色,2绿色,不为空时为颜色字符串
|
||||
|
||||
//高级选项
|
||||
,fileSpeed:6 //单个文件识别发送速度控制,取值1-n;1:为按播放速率发送,最慢,识别精度完美;6:按六倍播放速度发送,花10秒识别60秒文件比较快,精度还行;再快测试发现似乎会缺失内容,可能是发送太快底层识别不过来导致返回的结果缺失。
|
||||
};
|
||||
for(var k in set){
|
||||
o[k]=set[k];
|
||||
};
|
||||
This.set=set=o;
|
||||
This.state=0;//0 未start,1 start,2 stop
|
||||
This.started=0;
|
||||
|
||||
This.sampleRate=16000;//发送的采样率
|
||||
//This.tokenData
|
||||
|
||||
This.pcmBuffers=[];//等待发送的缓冲数据
|
||||
This.pcmTotal=0;//输入的总量
|
||||
This.pcmOffset=0;//缓冲[0]的已发送位置
|
||||
This.pcmSend=0;//发送的总量,不会重复计算重发的量
|
||||
|
||||
This.joinBuffers=[];//下一分钟左移5秒,和上一分钟重叠5秒
|
||||
This.joinSize=0;//左移的数据量
|
||||
This.joinSend=0;//单次已发送量
|
||||
This.joinOffset=-1;//左移[0]的已发送位置,-1代表可以进行整理buffers
|
||||
This.joinIsOpen=0;//是否开始发送
|
||||
This.joinSendTotal=0;//已发送重叠的总量
|
||||
|
||||
This.sendCurSize=0;//单个wss发送量,不能超过1分钟的量
|
||||
This.sendTotal=0;//总计的发送量,存在重发重叠部分
|
||||
|
||||
//This.stopWait=null
|
||||
//This.sendWait=0
|
||||
//This.sendAbort=false
|
||||
//This.sendAbortMsg=""
|
||||
|
||||
//This.wsCur 当前的wss
|
||||
//This.wsLock 新的一分钟wss准备
|
||||
This.resTxts=[];//每分钟结果列表 resTxt object: {tempTxt:"efg",okTxt:"efgh",fullTxt:"abcdefgh"}
|
||||
|
||||
if(!set.asrProcess){
|
||||
This.log("未绑定asrProcess回调无法感知到abort事件",3);
|
||||
};
|
||||
};
|
||||
var CLog=function(){
|
||||
var v=arguments; v[0]="["+ASR_Aliyun_ShortTxt+"]"+v[0];
|
||||
Recorder.CLog.apply(null,v);
|
||||
};
|
||||
fn.prototype=ASR_Aliyun_Short.prototype={
|
||||
log:function(msg,color){
|
||||
CLog(msg,typeof color=="number"?color:0);
|
||||
this.set.log("["+ASR_Aliyun_ShortTxt+"]"+msg,color==3?"#f60":color);
|
||||
}
|
||||
|
||||
|
||||
//input已输入的音频数据总时长
|
||||
,inputDuration:function(){
|
||||
return Math.round(this.pcmTotal/this.sampleRate*1000);
|
||||
}
|
||||
//已发送识别的音频数据总时长,存在重发重叠部分,因此比inputDuration长
|
||||
,sendDuration:function(add){
|
||||
var size=this.sendTotal;
|
||||
size+=add||0;
|
||||
return Math.round(size/this.sampleRate*1000);
|
||||
}
|
||||
//已识别的音频数据总时长,去除了sendDuration的重叠部分,值<=inputDuration
|
||||
,asrDuration:function(){
|
||||
return this.sendDuration(-this.joinSendTotal);
|
||||
}
|
||||
|
||||
|
||||
/**一次性将单个完整音频文件转成文字,支持的文件类型由具体的浏览器决定,因此存在兼容性问题,兼容性mp3最好,wav次之,其他格式不一定能够解码。实际就是调用:浏览器解码音频得到PCM -> start -> input ... input -> stop
|
||||
blob:Blob 音频文件Blob对象,如:rec.stop得到的录音结果、file input选择的文件、XMLHttpRequest的blob结果、new Blob([TypedArray])创建的blob
|
||||
success fn(text,abortMsg) text为识别到的完整内容,abortMsg参考stop
|
||||
fail:fn(errMsg)
|
||||
**/
|
||||
,audioToText:function(blob,success,fail){
|
||||
var This=this;
|
||||
var failCall=function(err){
|
||||
This.log(err,1);
|
||||
fail&&fail(err);
|
||||
};
|
||||
if(!Recorder.GetContext()){//强制激活Recorder.Ctx 不支持大概率也不支持解码
|
||||
failCall("浏览器不支持音频解码");
|
||||
return;
|
||||
};
|
||||
|
||||
var reader=new FileReader();
|
||||
reader.onloadend=function(){
|
||||
var ctx=Recorder.Ctx;
|
||||
ctx.decodeAudioData(reader.result,function(raw){
|
||||
var src=raw.getChannelData(0);
|
||||
var sampleRate=raw.sampleRate;
|
||||
|
||||
var pcm=new Int16Array(src.length);
|
||||
for(var i=0;i<src.length;i++){//floatTo16BitPCM
|
||||
var s=Math.max(-1,Math.min(1,src[i]));
|
||||
s=s<0?s*0x8000:s*0x7FFF;
|
||||
pcm[i]=s;
|
||||
};
|
||||
|
||||
This.pcmToText(pcm,sampleRate,success,fail);
|
||||
},function(e){
|
||||
failCall("音频解码失败["+blob.type+"]:"+e.message);
|
||||
});
|
||||
};
|
||||
reader.readAsArrayBuffer(blob);
|
||||
}
|
||||
/**一次性的将单个完整音频转成文字。实际就是调用:start -> input ... input -> stop
|
||||
buffer:[Int16,...] 16位单声道音频pcm数据,一维数组
|
||||
sampleRate pcm的采样率
|
||||
success fn(text,abortMsg) text为识别到的完整内容,abortMsg参考stop
|
||||
fail:fn(errMsg)
|
||||
**/
|
||||
,pcmToText:function(buffer,sampleRate,success,fail){
|
||||
var This=this;
|
||||
This.start(function(){
|
||||
This.log("单个文件"+Math.round(buffer.length/sampleRate*1000)+"ms转文字");
|
||||
This.sendSpeed=This.set.fileSpeed;
|
||||
This.input([buffer],sampleRate);
|
||||
This.stop(success,fail);
|
||||
},fail);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**开始识别,开始后需要调用input输入录音数据,结束时调用stop来停止识别。如果start之前调用了input输入数据,这些数据将会等到start成功之后进行识别。
|
||||
建议在success回调中开始录音(即rec.start);当然asr.start和rec.start同时进行调用,或者任意一个先调用都是允许的,不过当出现fail时,需要处理好asr和rec各自的状态。
|
||||
无需特殊处理start和stop的关系,只要调用了stop,会阻止未完成的start,不会执行回调。
|
||||
success:fn()
|
||||
fail:fn(errMsg)
|
||||
**/
|
||||
,start:function(success,fail){
|
||||
var This=this,set=This.set;
|
||||
var failCall=function(err){
|
||||
This.sendAbortMsg=err;
|
||||
fail&&fail(err);
|
||||
};
|
||||
if(!set.compatibleWebSocket){
|
||||
if(!isBrowser){
|
||||
failCall("非浏览器环境,请提供compatibleWebSocket配置来返回一个兼容的WebSocket");
|
||||
return;
|
||||
};
|
||||
};
|
||||
|
||||
if(This.state!=0){
|
||||
failCall("ASR对象不可重复start");
|
||||
return;
|
||||
};
|
||||
This.state=1;
|
||||
|
||||
var stopCancel=function(){
|
||||
This.log("ASR start被stop中断",1);
|
||||
This._send();//调用了再说,不管什么状态
|
||||
};
|
||||
This._token(function(){
|
||||
if(This.state!=1){
|
||||
stopCancel();
|
||||
}else{
|
||||
This.log("OK start",2);
|
||||
This.started=1;
|
||||
success&&success();
|
||||
|
||||
This._send();//调用了再说,不管什么状态
|
||||
};
|
||||
},function(err){
|
||||
err="语音识别token接口出错:"+err;
|
||||
This.log(err,1);
|
||||
if(This.state!=1){
|
||||
stopCancel();
|
||||
}else{
|
||||
failCall(err);
|
||||
This._send();//调用了再说,不管什么状态
|
||||
};
|
||||
});
|
||||
}
|
||||
/**结束识别,一般在调用了本方法后,下一行代码立即调用录音rec.stop结束录音
|
||||
success:fn(text,abortMsg) text为识别到的最终完整内容;如果存在abortMsg代表识别中途被某种错误停止了,text是停止前的内容识别到的完整内容,一般早在asrProcess中会收到abort事件然后要停止录音
|
||||
fail:fn(errMsg)
|
||||
**/
|
||||
,stop:function(success,fail){
|
||||
success=success||NOOP;
|
||||
fail=fail||NOOP;
|
||||
var This=this;
|
||||
var failCall=function(err){
|
||||
err="语音识别stop出错:"+err;
|
||||
This.log(err,1);
|
||||
fail(err);
|
||||
};
|
||||
|
||||
if(This.state==2){
|
||||
failCall("ASR对象不可重复stop");
|
||||
return;
|
||||
};
|
||||
This.state=2;
|
||||
|
||||
This.stopWait=function(){
|
||||
This.stopWait=null;
|
||||
if(!This.started){
|
||||
fail(This.sendAbortMsg||"未开始语音识别");
|
||||
return;
|
||||
};
|
||||
var txt=This.getText();
|
||||
if(!txt && This.sendAbortMsg){
|
||||
fail(This.sendAbortMsg);//仅没有内容时,才走异常
|
||||
}else{
|
||||
success(txt, This.sendAbortMsg||"");//尽力返回已有内容
|
||||
};
|
||||
};
|
||||
//等待数据发送完
|
||||
This._send();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**实时处理输入音频数据;不管有没有start,都可以调用本方法,start前输入的数据会缓冲起来等到start后进行识别
|
||||
buffers:[[Int16...],...] pcm片段列表,为二维数组,第一维数组内存放1个或多个pcm数据;比如可以是:rec.buffers、onProcess中的buffers截取的一段新二维数组
|
||||
sampleRate:48000 buffers中pcm的采样率
|
||||
|
||||
buffersOffset:0 可选,默认0,从buffers第一维的这个位置开始识别,方便rec的onProcess中使用
|
||||
**/
|
||||
,input:function(buffers,sampleRate ,buffersOffset){
|
||||
var This=this;
|
||||
|
||||
if(This.state==2){//已停止,停止输入数据
|
||||
This._send();
|
||||
return;
|
||||
};
|
||||
var msg="input输入的采样率低于"+This.sampleRate;
|
||||
if(sampleRate<This.sampleRate){
|
||||
CLog(msg+",数据已丢弃",3);
|
||||
if(!This.pcmTotal){
|
||||
This.sendAbortMsg=msg;
|
||||
};
|
||||
This._send();
|
||||
return;
|
||||
};
|
||||
if(This.sendAbortMsg==msg){
|
||||
This.sendAbortMsg="";
|
||||
};
|
||||
|
||||
if(buffersOffset){
|
||||
var newBuffers=[];
|
||||
for(var idx=buffersOffset;idx<buffers.length;idx++){
|
||||
newBuffers.push(buffers[idx]);
|
||||
};
|
||||
buffers=newBuffers;
|
||||
};
|
||||
|
||||
var pcm=Recorder.SampleData(buffers,sampleRate,This.sampleRate).data;
|
||||
This.pcmTotal+=pcm.length;
|
||||
This.pcmBuffers.push(pcm);
|
||||
This._send();
|
||||
}
|
||||
,_send:function(){
|
||||
var This=this,set=This.set;
|
||||
if(This.sendWait){
|
||||
//阻塞中
|
||||
return;
|
||||
};
|
||||
var tryStopEnd=function(){
|
||||
This.stopWait&&This.stopWait();
|
||||
};
|
||||
if(This.state==2 && (!This.started || !This.stopWait)){
|
||||
//已经stop了,并且未ok开始 或者 未在等待结果
|
||||
tryStopEnd();
|
||||
return;
|
||||
};
|
||||
if(This.sendAbort){
|
||||
//已异常中断了
|
||||
tryStopEnd();
|
||||
return;
|
||||
};
|
||||
|
||||
//异常提前终止
|
||||
var abort=function(err){
|
||||
if(!This.sendAbort){
|
||||
This.sendAbort=1;
|
||||
This.sendAbortMsg=err||"-";
|
||||
processCall(0,1);//abort后只调用最后一次
|
||||
};
|
||||
This._send();
|
||||
};
|
||||
var processCall=function(addSize,abortLast){
|
||||
if(!abortLast && This.sendAbort){
|
||||
return false;
|
||||
};
|
||||
addSize=addSize||0;
|
||||
if(!set.asrProcess){
|
||||
//默认超过1分钟自动停止
|
||||
return This.sendTotal+addSize<=size60s;
|
||||
};
|
||||
//实时回调
|
||||
var val=set.asrProcess(This.getText()
|
||||
,This.sendDuration(addSize)
|
||||
,This.sendAbort?This.sendAbortMsg:"");
|
||||
if(!This._prsw && typeof(val)!="boolean"){
|
||||
CLog("asrProcess返回值必须是boolean类型,true才能继续识别,否则立即超时",1);
|
||||
};
|
||||
This._prsw=1;
|
||||
return val;
|
||||
};
|
||||
var size5s=This.sampleRate*5;
|
||||
var size60s=This.sampleRate*60;
|
||||
|
||||
//建立ws连接
|
||||
var ws=This.wsCur;
|
||||
if(!ws){
|
||||
if(This.started){//已start才创建ws
|
||||
var resTxt={};
|
||||
This.resTxts.push(resTxt);
|
||||
ws=This.wsCur=This._wsNew(
|
||||
This.tokenData
|
||||
,"ws:"+This.resTxts.length
|
||||
,resTxt
|
||||
,function(){
|
||||
processCall();
|
||||
}
|
||||
,function(){
|
||||
This._send();
|
||||
}
|
||||
,function(err){
|
||||
//异常中断
|
||||
if(ws==This.wsCur){
|
||||
abort(err);
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
return;
|
||||
};
|
||||
|
||||
//正在新建新1分钟连接,等着
|
||||
if(This.wsLock){
|
||||
return;
|
||||
};
|
||||
//已有ok的连接,直接陆续将所有缓冲分段发送完
|
||||
if(ws._s!=2 || ws.isStop){
|
||||
//正在关闭或者其他状态不管,等着
|
||||
return;
|
||||
};
|
||||
//没有数据了
|
||||
if(This.pcmSend>=This.pcmTotal){
|
||||
if(This.state==1){
|
||||
//缓冲数据已发送完,等待新数据
|
||||
return;
|
||||
};
|
||||
|
||||
//已stop,结束识别得到最终结果
|
||||
ws.stopWs(function(){
|
||||
tryStopEnd();
|
||||
},function(err){
|
||||
abort(err);
|
||||
});
|
||||
return;
|
||||
};
|
||||
|
||||
//准备本次发送数据块
|
||||
var minSize=This.sampleRate/1000*50;//最小发送量50ms ≈1.6k
|
||||
var maxSize=This.sampleRate;//最大发送量1000ms ≈32k
|
||||
//速度控制1,取决于网速
|
||||
if((ws.bufferedAmount||0)/2>maxSize*3){
|
||||
//传输太慢,阻塞一会再发送
|
||||
This.sendWait=setTimeout(function(){
|
||||
This.sendWait=0;
|
||||
This._send();
|
||||
},100);
|
||||
return;
|
||||
};
|
||||
//速度控制2,取决于已发送时长,单个文件才会被控制速率
|
||||
if(This.sendSpeed){
|
||||
var spMaxMs=(Date.now()-ws.okTime)*This.sendSpeed;
|
||||
var nextMs=(This.sendCurSize+maxSize/3)/This.sampleRate*1000;
|
||||
var delay=Math.floor((nextMs-spMaxMs)/This.sendSpeed);
|
||||
if(delay>0){
|
||||
//传输太快,怕底层识别不过来,降低发送速度
|
||||
CLog("[ASR]延迟"+delay+"ms发送");
|
||||
This.sendWait=setTimeout(function(){
|
||||
This.sendWait=0;
|
||||
This._send();
|
||||
},delay);
|
||||
return;
|
||||
};
|
||||
};
|
||||
|
||||
var needSend=1;
|
||||
var copyBuffers=function(offset,buffers,dist){
|
||||
var size=dist.length;
|
||||
for(var i=0,idx=0;idx<size&&i<buffers.length;){
|
||||
var pcm=buffers[i];
|
||||
if(pcm.length-offset<=size-idx){
|
||||
dist.set(offset==0?pcm:pcm.subarray(offset),idx);
|
||||
idx+=pcm.length-offset;
|
||||
offset=0;
|
||||
buffers.splice(i,1);
|
||||
}else{
|
||||
dist.set(pcm.subarray(offset,offset+(size-idx)),idx);
|
||||
offset+=size-idx;
|
||||
break;
|
||||
};
|
||||
};
|
||||
return offset;
|
||||
};
|
||||
if(This.joinIsOpen){
|
||||
//发送新1分钟的开头重叠5秒数据
|
||||
if(This.joinOffset==-1){
|
||||
//精准定位5秒
|
||||
This.joinSend=0;
|
||||
This.joinOffset=0;
|
||||
This.log("发送上1分钟结尾5秒数据...");
|
||||
var total=0;
|
||||
for(var i=This.joinBuffers.length-1;i>=0;i--){
|
||||
total+=This.joinBuffers[i].length;
|
||||
if(total>=size5s){
|
||||
This.joinBuffers.splice(0, i);
|
||||
This.joinSize=total;
|
||||
This.joinOffset=total-size5s;
|
||||
break;
|
||||
};
|
||||
};
|
||||
};
|
||||
var buffersSize=This.joinSize-This.joinOffset;//缓冲余量
|
||||
var size=Math.min(maxSize,buffersSize);
|
||||
if(size<=0){
|
||||
//重叠5秒数据发送完毕
|
||||
This.log("发送新1分钟数据(重叠"+Math.round(This.joinSend/This.sampleRate*1000)+"ms)...");
|
||||
This.joinBuffers=[];
|
||||
This.joinSize=0;
|
||||
This.joinOffset=-1;
|
||||
This.joinIsOpen=0;
|
||||
This._send();
|
||||
return;
|
||||
};
|
||||
|
||||
//创建块数据,消耗掉buffers
|
||||
var chunk=new Int16Array(size);
|
||||
This.joinSend+=size;
|
||||
This.joinSendTotal+=size;
|
||||
This.joinOffset=copyBuffers(This.joinOffset,This.joinBuffers,chunk);
|
||||
|
||||
This.joinSize=0;
|
||||
for(var i=0;i<This.joinBuffers.length;i++){
|
||||
This.joinSize+=This.joinBuffers[i].length;
|
||||
};
|
||||
}else{
|
||||
var buffersSize=This.pcmTotal-This.pcmSend;//缓冲余量
|
||||
var buffersDur=Math.round(buffersSize/This.sampleRate*1000);
|
||||
var curHasSize=size60s-This.sendCurSize;//当前连接剩余能发送的量
|
||||
var sizeNext=Math.min(maxSize,buffersSize);//不管连接剩余数时本应当发送的数量
|
||||
var size=Math.min(sizeNext,curHasSize);
|
||||
if(This.state==1 && size<Math.min(minSize,curHasSize)){
|
||||
//不够发送一次的,等待新数据
|
||||
return;
|
||||
};
|
||||
var needNew=0;
|
||||
if(curHasSize<=0){
|
||||
//当前连接一分钟已消耗完
|
||||
if(This.state==2 && buffersSize<This.sampleRate*1.2){
|
||||
//剩余的量太少,并且已stop,没必要再新建连接,直接丢弃
|
||||
size=buffersSize;
|
||||
This.log("丢弃结尾"+buffersDur+"ms数据","#999");
|
||||
needSend=0;
|
||||
}else{
|
||||
//开始新1分钟的连接,等到实时回调后再看要不要新建
|
||||
needNew=true;
|
||||
};
|
||||
};
|
||||
//回调看看是否要超时终止掉
|
||||
if(needSend && !processCall(sizeNext)){//用本应当的发送量来计算
|
||||
//超时,终止识别
|
||||
var durS=Math.round(This.asrDuration()/1000);
|
||||
This.log("已主动超时,共识别"+durS+"秒,丢弃缓冲"+buffersDur+"ms,正在终止...");
|
||||
This.wsLock=1;//阻塞住后续调用
|
||||
ws.stopWs(function(){
|
||||
abort("已主动超时,共识别"+durS+"秒,终止识别");
|
||||
},function(err){
|
||||
abort(err);
|
||||
});
|
||||
return;
|
||||
};
|
||||
//开始新1分钟的连接
|
||||
if(needNew){
|
||||
CLog("[ASR]新1分钟接续,当前缓冲"+buffersDur+"ms...");
|
||||
This.wsLock=1;//阻塞住后续调用
|
||||
ws.stopWs(function(){
|
||||
This._token(function(){
|
||||
This.log("新1分钟接续OK,当前缓冲"+buffersDur+"ms",2);
|
||||
This.wsLock=0;
|
||||
This.wsCur=0;//重置当前连接
|
||||
This.sendCurSize=0;
|
||||
|
||||
This.joinIsOpen=1;//新1分钟先发重叠的5秒数据
|
||||
This.joinOffset=-1;
|
||||
|
||||
This._send();
|
||||
},function(err){
|
||||
abort("语音识别新1分钟token接口出错:"+err);
|
||||
});
|
||||
},function(err){
|
||||
abort(err);
|
||||
});
|
||||
return;
|
||||
};
|
||||
|
||||
//创建块数据,消耗掉buffers
|
||||
var chunk=new Int16Array(size);
|
||||
This.pcmOffset=copyBuffers(This.pcmOffset,This.pcmBuffers,chunk);
|
||||
This.pcmSend+=size;
|
||||
|
||||
//写入到下一分钟的头5秒重叠区域中,不管写了多少,写就完了
|
||||
This.joinBuffers.push(chunk);
|
||||
This.joinSize+=size;
|
||||
};
|
||||
|
||||
This.sendCurSize+=chunk.length;
|
||||
This.sendTotal+=chunk.length;
|
||||
if(needSend){
|
||||
try{
|
||||
ws.send(chunk.buffer);
|
||||
}catch(e){CLog("ws.send",1,e);};
|
||||
};
|
||||
|
||||
//不要停
|
||||
This.sendWait=setTimeout(function(){
|
||||
This.sendWait=0;
|
||||
This._send();
|
||||
});//仅退出调用堆栈
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**返回实时结果文本,如果已stop返回的就是最终文本**/
|
||||
,getText:function(){
|
||||
var arr=this.resTxts;
|
||||
var txt="";
|
||||
for(var i=0;i<arr.length;i++){
|
||||
var obj=arr[i];
|
||||
if(obj.fullTxt){
|
||||
txt=obj.fullTxt;
|
||||
}else{
|
||||
var tmp=obj.tempTxt||"";
|
||||
if(obj.okTxt){
|
||||
tmp=obj.okTxt;
|
||||
};
|
||||
//5秒重叠进行模糊拼接
|
||||
if(!txt){
|
||||
txt=tmp;
|
||||
}else{
|
||||
var left=txt.substr(-20);//240字/分
|
||||
var finds=[];
|
||||
for(var x=0,max=Math.min(17,tmp.length-3);x<=max;x++){
|
||||
for(var i0=0;i0<17;i0++){
|
||||
if(left[i0]==tmp[x]){
|
||||
var n=1;
|
||||
for(;n<17;n++){
|
||||
if(left[i0+n]!=tmp[x+n]){
|
||||
break;
|
||||
};
|
||||
};
|
||||
if(n>=3){//3字相同即匹配
|
||||
finds.push({x:x,i0:i0,n:n});
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
finds.sort(function(a,b){
|
||||
var v=b.n-a.n;
|
||||
return v!=0?v:b.i0-a.i0;//越长越好,越靠后越好
|
||||
});
|
||||
var f0=finds[0];
|
||||
if(f0){
|
||||
txt=txt.substr(0,txt.length-left.length+f0.i0);
|
||||
txt+=tmp.substr(f0.x);
|
||||
}else{
|
||||
txt+=tmp;
|
||||
};
|
||||
};
|
||||
//存起来
|
||||
if(obj.okTxt!=null && tmp==obj.okTxt){
|
||||
obj.fullTxt=txt;
|
||||
};
|
||||
};
|
||||
};
|
||||
return txt;
|
||||
}
|
||||
|
||||
//创建新的wss连接
|
||||
,_wsNew:function(sData,id,resTxt,process,connOk,connFail){
|
||||
var uuid=function(){
|
||||
var s=[];
|
||||
for(var i=0,r;i<32;i++){
|
||||
r=Math.floor(Math.random()*16);
|
||||
s.push(String.fromCharCode(r<10?r+48:r-10+97));
|
||||
};
|
||||
return s.join("");
|
||||
};
|
||||
var This=this,set=This.set;
|
||||
CLog("[ASR "+id+"]正在连接...");
|
||||
var url="wss://nls-gateway.cn-shanghai.aliyuncs.com/ws/v1?token="+sData.token;
|
||||
if(set.compatibleWebSocket){
|
||||
var ws=set.compatibleWebSocket(url);
|
||||
}else{
|
||||
var ws=new WebSocket(url);
|
||||
}
|
||||
|
||||
//ws._s=0 0连接中 1opening 2openOK 3stoping 4closeing -1closed
|
||||
//ws.isStop=0 1已停止识别
|
||||
ws.onclose=function(){
|
||||
if(ws._s==-1)return;
|
||||
var isFail=ws._s!=4;
|
||||
ws._s=-1;
|
||||
This.log("["+id+"]close");
|
||||
|
||||
isFail&&connFail(ws._err||"连接"+id+"已关闭");
|
||||
};
|
||||
ws.onerror=function(e){
|
||||
if(ws._s==-1)return;
|
||||
var msg="网络连接错误";
|
||||
ws._err||(ws._err=msg);
|
||||
This.log("["+id+"]"+msg,1);
|
||||
ws.onclose();
|
||||
};
|
||||
ws.onopen=function(){
|
||||
if(ws._s==-1)return;
|
||||
ws._s=1;
|
||||
CLog("[ASR "+id+"]open");
|
||||
ws._task=uuid();
|
||||
ws.send(JSON.stringify({
|
||||
header:{
|
||||
message_id:uuid()
|
||||
,task_id:ws._task
|
||||
,appkey:sData.appkey
|
||||
|
||||
,namespace:"SpeechRecognizer"
|
||||
,name:"StartRecognition"
|
||||
}
|
||||
,payload:{
|
||||
format:"pcm"
|
||||
,sample_rate:This.sampleRate
|
||||
,enable_intermediate_result:true //返回中间识别结果
|
||||
,enable_punctuation_prediction:true //添加标点
|
||||
,enable_inverse_text_normalization:true //后处理中将数值处理
|
||||
}
|
||||
,context:{ }
|
||||
}));
|
||||
};
|
||||
ws.onmessage=function(e){
|
||||
var data=e.data;
|
||||
var logMsg=true;
|
||||
if(typeof(data)=="string" && data[0]=="{"){
|
||||
data=JSON.parse(data);
|
||||
var header=data.header||{};
|
||||
var payload=data.payload||{};
|
||||
var name=header.name||"";
|
||||
var status=header.status||0;
|
||||
|
||||
var isFail=name=="TaskFailed";
|
||||
var errMsg="";
|
||||
|
||||
//init
|
||||
if(ws._s==1 && (name=="RecognitionStarted" || isFail)){
|
||||
if(isFail){
|
||||
errMsg="连接"+id+"失败["+status+"]"+header.status_text;
|
||||
}else{
|
||||
ws._s=2;
|
||||
This.log("["+id+"]连接OK");
|
||||
ws.okTime=Date.now();
|
||||
connOk();
|
||||
};
|
||||
};
|
||||
//中间结果
|
||||
if(ws._s==2 && (name=="RecognitionResultChanged" || isFail)){
|
||||
if(isFail){
|
||||
errMsg="识别出现错误["+status+"]"+header.status_text;
|
||||
}else{
|
||||
logMsg=!ws._clmsg;
|
||||
ws._clmsg=1;
|
||||
resTxt.tempTxt=payload.result||"";
|
||||
process();
|
||||
};
|
||||
};
|
||||
//stop
|
||||
if(ws._s==3 && (name=="RecognitionCompleted" || isFail)){
|
||||
var txt="";
|
||||
if(isFail){
|
||||
errMsg="停止识别出现错误["+status+"]"+header.status_text;
|
||||
}else{
|
||||
txt=payload.result||"";
|
||||
This.log("["+id+"]最终识别结果:"+txt);
|
||||
};
|
||||
ws.stopCall&&ws.stopCall(txt,errMsg);
|
||||
};
|
||||
|
||||
if(errMsg){
|
||||
This.log("["+id+"]"+errMsg,1);
|
||||
ws._err||(ws._err=errMsg);
|
||||
};
|
||||
};
|
||||
if(logMsg){
|
||||
CLog("[ASR "+id+"]msg",data);
|
||||
};
|
||||
};
|
||||
ws.stopWs=function(True,False){
|
||||
if(ws._s!=2){
|
||||
False(id+"状态不正确["+ws._s+"]");
|
||||
return;
|
||||
};
|
||||
ws._s=3;
|
||||
ws.isStop=1;
|
||||
|
||||
ws.stopCall=function(txt,err){
|
||||
clearTimeout(ws.stopInt);
|
||||
ws.stopCall=0;
|
||||
ws._s=4;
|
||||
ws.close();
|
||||
|
||||
resTxt.okTxt=txt;
|
||||
process();
|
||||
|
||||
if(err){
|
||||
False(err);
|
||||
}else{
|
||||
True();
|
||||
};
|
||||
};
|
||||
ws.stopInt=setTimeout(function(){
|
||||
ws.stopCall&&ws.stopCall("","停止识别返回结果超时");
|
||||
},10000);
|
||||
|
||||
CLog("[ASR "+id+"]send stop");
|
||||
ws.send(JSON.stringify({
|
||||
header:{
|
||||
message_id:uuid()
|
||||
,task_id:ws._task
|
||||
,appkey:sData.appkey
|
||||
|
||||
,namespace:"SpeechRecognizer"
|
||||
,name:"StopRecognition"
|
||||
}
|
||||
}));
|
||||
};
|
||||
if(ws.connect)ws.connect(); //兼容时会有这个方法
|
||||
return ws;
|
||||
}
|
||||
|
||||
|
||||
|
||||
//获得开始识别的token信息
|
||||
,_token:function(True,False){
|
||||
var This=this,set=This.set;
|
||||
if(!set.tokenApi){
|
||||
False("未配置tokenApi");return;
|
||||
};
|
||||
|
||||
(set.apiRequest||DefaultPost)(set.tokenApi,set.apiArgs||{},function(data){
|
||||
if(!data || !data.appkey || !data.token){
|
||||
False("apiRequest回调的数据格式不正确");return;
|
||||
};
|
||||
This.tokenData=data;
|
||||
True();
|
||||
},False);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
//手撸一个ajax
|
||||
function DefaultPost(url,args,success,fail){
|
||||
var xhr=new XMLHttpRequest();
|
||||
xhr.timeout=20000;
|
||||
xhr.open("POST",url);
|
||||
xhr.onreadystatechange=function(){
|
||||
if(xhr.readyState==4){
|
||||
if(xhr.status==200){
|
||||
try{
|
||||
var o=JSON.parse(xhr.responseText);
|
||||
}catch(e){};
|
||||
|
||||
if(o.c!==0 || !o.v){
|
||||
fail(o.m||"接口返回非预定义json数据");
|
||||
return;
|
||||
};
|
||||
success(o.v);
|
||||
}else{
|
||||
fail("请求失败["+xhr.status+"]");
|
||||
}
|
||||
}
|
||||
};
|
||||
var arr=[];
|
||||
for(var k in args){
|
||||
arr.push(k+"="+encodeURIComponent(args[k]));
|
||||
};
|
||||
xhr.setRequestHeader("Content-Type","application/x-www-form-urlencoded");
|
||||
xhr.send(arr.join("&"));
|
||||
};
|
||||
|
||||
function NOOP(){};
|
||||
|
||||
Recorder[ASR_Aliyun_ShortTxt]=ASR_Aliyun_Short;
|
||||
|
||||
|
||||
}));
|
||||
887
public/extensions/buffer_stream.player.js
Normal file
887
public/extensions/buffer_stream.player.js
Normal file
@@ -0,0 +1,887 @@
|
||||
/*
|
||||
录音 Recorder扩展,实时播放录音片段文件,把片段文件转换成MediaStream流
|
||||
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
|
||||
BufferStreamPlayer可以通过input方法一次性输入整个音频文件,或者实时输入音频片段文件,然后播放出来;输入支持格式:pcm、wav、mp3等浏览器支持的音频格式,非pcm格式会自动解码成pcm(播放音质效果比pcm、wav格式差点);输入前输入后都可进行处理要播放的音频,比如:混音、变速、变调;输入的音频会写入到内部的MediaStream流中,完成将连续的音频片段文件转换成流。
|
||||
|
||||
BufferStreamPlayer可以用于:
|
||||
1. Recorder onProcess等实时处理中,将实时处理好的音频片段转直接换成MediaStream,此流可以作为WebRTC的local流发送到对方,或播放出来;
|
||||
2. 接收到的音频片段文件的实时播放,比如:WebSocket接收到的录音片段文件播放、WebRTC remote流(Recorder支持对这种流进行实时处理)实时处理后的播放;
|
||||
3. 单个音频文件的实时播放处理,比如:播放一段音频,并同时进行可视化绘制(其实自己解码+播放绘制比直接调用这个更有趣,但这个省事、配套功能多点)。
|
||||
|
||||
在线测试例子:
|
||||
https://xiangyuecn.github.io/Recorder/assets/工具-代码运行和静态分发Runtime.html?jsname=teach.realtime.decode_buffer_stream_player
|
||||
调用示例:
|
||||
var stream=Recorder.BufferStreamPlayer(set)
|
||||
//创建好后第一件事就是start打开流,打开后就会开始播放input输入的音频,set具体配置看下面源码;注意:start需要在用户操作(触摸、点击等)时进行调用,原因参考runningContext配置
|
||||
stream.start(()=>{
|
||||
stream.currentTime;//当前已播放的时长,单位ms,数值变化时会有onUpdateTime事件
|
||||
stream.duration;//已输入的全部数据总时长,单位ms,数值变化时会有onUpdateTime事件;实时模式下意义不大,会比实际播放的长,因为实时播放时卡了就会丢弃部分数据不播放
|
||||
stream.isStop;//是否已停止,调用了stop方法时会设为true
|
||||
stream.isPause;//是否已暂停,调用了pause方法时会设为true
|
||||
stream.isPlayEnd;//已输入的数据是否播放到了结尾(没有可播放的数据了),input后又会变成false;可代表正在缓冲中或播放结束,状态变更时会有onPlayEnd事件
|
||||
|
||||
//如果不要默认的播放,可以设置set.play为false,这种情况下只拿到MediaStream来用
|
||||
stream.getMediaStream() //通过getMediaStream方法得到MediaStream流,此流可以作为WebRTC的local流发送到对方,或者直接拿来赋值给audio.srcObject来播放(和赋值audio.src作用一致);未start时调用此方法将会抛异常
|
||||
|
||||
stream.getAudioSrc() //【已过时】超低版本浏览器中得到MediaStream流的字符串播放地址,可赋值给audio标签的src,直接播放音频;未start时调用此方法将会抛异常;新版本浏览器已停止支持将MediaStream转换成url字符串,调用本方法新浏览器会抛异常,因此在不需要兼容不支持srcObject的超低版本浏览器时,请直接使用getMediaStream然后赋值给auido.srcObject来播放
|
||||
},(errMsg)=>{
|
||||
//start失败,无法播放
|
||||
});
|
||||
|
||||
//随时都能调用input,会等到start成功后播放出来,不停的调用input,就能持续的播放出声音了,需要暂停播放就不要调用input就行了
|
||||
stream.input(anyData); //anyData数据格式 和更多说明,请阅读下面的input方法源码注释
|
||||
stream.clearInput(keepDuration); //清除已输入但还未播放的数据,一般用于非实时模式打断老的播放;返回清除的音频时长,默认会从总时长duration中减去此时长,keepDuration=true时不减去
|
||||
|
||||
//暂停播放,暂停后:实时模式下会丢弃所有input输入的数据(resume时只播放新input的数据),非实时模式下所有input输入的数据会保留到resume时继续播放
|
||||
stream.pause();
|
||||
//恢复播放,实时模式下只会从最新input的数据开始播放,非实时模式下会从暂停的位置继续播放
|
||||
stream.resume();
|
||||
|
||||
//不要播放了就调用stop停止播放,关闭所有资源
|
||||
stream.stop();
|
||||
|
||||
|
||||
|
||||
注意:已知Firefox的AudioBuffer没法动态修改数据,所以对于带有这种特性的浏览器将采用先缓冲后再播放(类似assets/runtime-codes/fragment.playbuffer.js),音质会相对差一点;其他浏览器测试Android、IOS、Chrome无此问题;start方法中有一大段代码给浏览器做了特性检测并进行兼容处理。
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
var BufferStreamPlayer=function(set){
|
||||
return new fn(set);
|
||||
};
|
||||
var BufferStreamPlayerTxt="BufferStreamPlayer";
|
||||
var fn=function(set){
|
||||
var This=this;
|
||||
var o={
|
||||
play:true //要播放声音,设为false不播放,只提供MediaStream
|
||||
,realtime:true /*默认为true实时模式,设为false为非实时模式
|
||||
实时模式:设为 true 或 {maxDelay:300,discardAll:false}配置对象
|
||||
如果有新的input输入数据,但之前输入的数据还未播放完的时长不超过maxDelay时(缓冲播放延迟默认限制在300ms内),如果积压的数据量过大则积压的数据将会被直接丢弃,少量积压会和新数据一起加速播放,最终达到尽快播放新输入的数据的目的;这在网络不流畅卡顿时会发挥很大作用,可有效降低播放延迟;出现加速播放时声音听起来会比较怪异,可配置discardAll=true来关闭此特性,少量积压的数据也直接丢弃,不会加速播放;如果你的音频数据块超过200ms,需要调大maxDelay(取值100-800ms)
|
||||
非实时模式:设为 false
|
||||
连续完整的播放完所有input输入的数据,之前输入的还未播放完又有新input输入会加入队列排队播放,比如用于:一次性同时输入几段音频完整播放
|
||||
*/
|
||||
|
||||
|
||||
//,onInputError:fn(errMsg, inputIndex) //当input输入出错时回调,参数为input第几次调用和错误消息
|
||||
//,onUpdateTime:fn() //已播放时长、总时长更新回调(stop、pause、resume后一定会回调),this.currentTime为已播放时长,this.duration为已输入的全部数据总时长(实时模式下意义不大,会比实际播放的长),单位都是ms
|
||||
//,onPlayEnd:fn() //没有可播放的数据时回调(stop后一定会回调),已输入的数据已全部播放完了,可代表正在缓冲中或播放结束;之后如果继续input输入了新数据,播放完后会再次回调,因此会多次回调;非实时模式一次性输入了数据时,此回调相当于播放完成,可以stop掉,重新创建对象来input数据可达到循环播放效果
|
||||
|
||||
//,decode:false //input输入的数据在调用transform之前是否要进行一次音频解码成pcm [Int16,...]
|
||||
//mp3、wav等都可以设为true、或设为{fadeInOut:true}配置对象,会自动解码成pcm;默认会开启fadeInOut对解码的pcm首尾进行淡入淡出处理,减少爆音(wav等解码后和原始pcm一致的音频,可以把fadeInOut设为false)
|
||||
|
||||
//transform:fn(inputData,sampleRate,True,False)
|
||||
//将input输入的data(如果开启了decode将是解码后的pcm)转换处理成要播放的pcm数据;如果没有解码也没有提供本方法,input的data必须是[Int16,...]并且设置set.sampleRate
|
||||
//inputData:any input方法输入的任意格式数据,只要这个转换函数支持处理;如果开启了decode,此数据为input输入的数据解码后的pcm [Int16,...]
|
||||
//sampleRate:123 如果设置了decode为解码后的采样率,否则为set.sampleRate || null
|
||||
//True(pcm,sampleRate) 回调处理好的pcm数据([Int16,...])和pcm的采样率
|
||||
//False(errMsg) 处理失败回调
|
||||
|
||||
//sampleRate:16000 //可选input输入的数据默认的采样率,当没有设置解码也没有提供transform时应当明确设置采样率
|
||||
|
||||
//runningContext:AudioContext //可选提供一个state为running状态的AudioContext对象(ctx),默认会在start时自动创建一个新的ctx,这个配置的作用请参阅Recorder的runningContext配置
|
||||
};
|
||||
for(var k in set){
|
||||
o[k]=set[k];
|
||||
};
|
||||
This.set=set=o;
|
||||
|
||||
if(!set.onInputError){
|
||||
set.onInputError=function(err,n){ CLog(err,1); };
|
||||
}
|
||||
};
|
||||
fn.prototype=BufferStreamPlayer.prototype={
|
||||
/**【已过时】获取MediaStream的audio播放地址,新版浏览器、未start将会抛异常**/
|
||||
getAudioSrc:function(){
|
||||
CLog($T("0XYC::getAudioSrc方法已过时:请直接使用getMediaStream然后赋值给audio.srcObject,仅允许在不支持srcObject的浏览器中调用本方法赋值给audio.src以做兼容"),3);
|
||||
if(!this._src){
|
||||
//新版chrome调用createObjectURL会直接抛异常了 https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL#using_object_urls_for_media_streams
|
||||
this._src=(window.URL||webkitURL).createObjectURL(this.getMediaStream());
|
||||
}
|
||||
return this._src;
|
||||
}
|
||||
/**获取MediaStream流对象,未start将会抛异常**/
|
||||
,getMediaStream:function(){
|
||||
if(!this._dest){
|
||||
throw new Error(NoStartMsg());
|
||||
}
|
||||
return this._dest.stream;
|
||||
}
|
||||
|
||||
|
||||
/**打开音频流,打开后就会开始播放input输入的音频;注意:start需要在用户操作(触摸、点击等)时进行调用,原因参考runningContext配置
|
||||
* True() 打开成功回调
|
||||
* False(errMsg) 打开失败回调**/
|
||||
,start:function(True,False){
|
||||
var falseCall=function(msg,noClear){
|
||||
var next=!checkStop();
|
||||
if(!noClear)This._clear();
|
||||
CLog(msg,1);
|
||||
next&&False&&False(msg);
|
||||
};
|
||||
var checkStop=function(){
|
||||
if(This.isStop){
|
||||
CLog($T("6DDt::start被stop终止"),3);
|
||||
return true;
|
||||
};
|
||||
};
|
||||
var This=this,set=This.set,__abTest=This.__abTest;
|
||||
if(This._Tc!=null){
|
||||
falseCall($T("I4h4::{1}多次start",0,BufferStreamPlayerTxt),1);
|
||||
return;
|
||||
}
|
||||
if(!isBrowser){
|
||||
falseCall($T.G("NonBrowser-1",[BufferStreamPlayerTxt]));
|
||||
return;
|
||||
}
|
||||
This._Tc=0;//currentTime 对应的采样数
|
||||
This._Td=0;//duration 对应的采样数
|
||||
|
||||
This.currentTime=0;//当前已播放的时长,单位ms
|
||||
This.duration=0;//已输入的全部数据总时长,单位ms;实时模式下意义不大,会比实际播放的长,因为实时播放时卡了就会丢弃部分数据不播放
|
||||
This.isStop=0;//是否已停止
|
||||
This.isPause=0;//是否已暂停
|
||||
This.isPlayEnd=0;//已输入的数据是否播放到了结尾(没有可播放的数据了),input后又会变成false;可代表正在缓冲中或播放结束
|
||||
|
||||
This.inputN=0;//第n次调用input
|
||||
|
||||
This.inputQueueIdx=0;//input调用队列当前已处理到的位置
|
||||
This.inputQueue=[];//input调用队列,用于纠正执行顺序
|
||||
|
||||
This.bufferSampleRate=0;//audioBuffer的采样率,首次input后就会固定下来
|
||||
This.audioBuffer=0;
|
||||
This.pcmBuffer=[[],[]];//未推入audioBuffer的pcm数据缓冲
|
||||
|
||||
var fail=function(msg){
|
||||
falseCall($T("P6Gs::浏览器不支持打开{1}",0,BufferStreamPlayerTxt)+(msg?": "+msg:""));
|
||||
};
|
||||
|
||||
var ctx=set.runningContext || Recorder.GetContext(true); This._ctx=ctx;
|
||||
var sVal=ctx.state,spEnd=Recorder.CtxSpEnd(sVal);
|
||||
!__abTest&&CLog("start... ctx.state="+sVal+(
|
||||
spEnd?$T("JwDm::(注意:ctx不是running状态,start需要在用户操作(触摸、点击等)时进行调用,否则会尝试进行ctx.resume,可能会产生兼容性问题(仅iOS),请参阅文档中runningContext配置)"):""
|
||||
));
|
||||
|
||||
var support=1;
|
||||
if(!ctx || !ctx.createMediaStreamDestination){
|
||||
support=0;
|
||||
}else{
|
||||
var source=ctx.createBufferSource();
|
||||
if(!source.start || source.onended===undefined){
|
||||
support=0;//createBufferSource版本太低,难兼容
|
||||
}
|
||||
};
|
||||
if(!support){
|
||||
fail("");
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
var end=function(){
|
||||
if(checkStop())return;
|
||||
//创建MediaStream
|
||||
var dest=ctx.createMediaStreamDestination();
|
||||
dest.channelCount=1;
|
||||
This._dest=dest;
|
||||
|
||||
!__abTest&&CLog("start ok");
|
||||
True&&True();
|
||||
|
||||
This._inputProcess();//处理未完成start前的input调用
|
||||
This._updateTime();//更新时间
|
||||
|
||||
//定时在没有input输入时,将未写入buffer的数据写进去
|
||||
if(!badAB){
|
||||
This._writeInt=setInterval(function(){
|
||||
This._writeBuffer();
|
||||
},100);
|
||||
}else{
|
||||
CLog($T("qx6X::此浏览器的AudioBuffer实现不支持动态特性,采用兼容模式"),3);
|
||||
This._writeInt=setInterval(function(){
|
||||
This._writeBad();
|
||||
},10);//定时调用进行数据写入播放
|
||||
}
|
||||
};
|
||||
var abTest=function(){
|
||||
//浏览器实现检测,已知Firefox的AudioBuffer没法在_writeBuffer中动态修改数据;检测方法:直接新开一个,输入一段测试数据,看看能不能拿到流中的数据
|
||||
var testStream=BufferStreamPlayer({ play:false,sampleRate:8000,runningContext:ctx });
|
||||
testStream.__abTest=1; var testRec;
|
||||
testStream.start(function(){
|
||||
testRec=Recorder({
|
||||
type:"unknown"
|
||||
,sourceStream:testStream.getMediaStream()
|
||||
,runningContext:ctx
|
||||
,onProcess:function(buffers){
|
||||
var bf=buffers[buffers.length-1],all0=1;
|
||||
for(var i=0;i<bf.length;i++){
|
||||
if(bf[i]!=0){ all0=0; break; }
|
||||
}
|
||||
if(all0 && buffers.length<5){
|
||||
return;//再等等看,最长约等500ms
|
||||
}
|
||||
testRec.close();
|
||||
testStream.stop();
|
||||
|
||||
if(testInt){ clearTimeout(testInt); testInt=0;
|
||||
//全部是0就是浏览器不行,要缓冲一次性播放进行兼容
|
||||
badAB=all0;
|
||||
BufferStreamPlayer.BadAudioBuffer=badAB;
|
||||
end();
|
||||
}
|
||||
}
|
||||
});
|
||||
testRec.open(function(){
|
||||
testRec.start();
|
||||
},function(msg){
|
||||
testStream.stop(); fail(msg);
|
||||
});
|
||||
},fail);
|
||||
//超时没有回调
|
||||
var testInt=setTimeout(function(){
|
||||
testInt=0; testStream.stop(); testRec&&testRec.close();
|
||||
fail($T("cdOx::环境检测超时"));
|
||||
},1500);
|
||||
//随机生成1秒的数据,rec有一次回调即可
|
||||
var data=new Int16Array(8000);
|
||||
for(var i=0;i<8000;i++){
|
||||
data[i]=~~(Math.random()*0x7fff*2-0x7fff);
|
||||
}
|
||||
testStream.input(data);
|
||||
};
|
||||
|
||||
var badAB=BufferStreamPlayer.BadAudioBuffer;
|
||||
var ctxNext=function(){
|
||||
if(__abTest || badAB!=null){
|
||||
setTimeout(end); //应当setTimeout一下强转成异步,统一调用代码时的行为
|
||||
}else{
|
||||
abTest();
|
||||
};
|
||||
};
|
||||
var tag="AudioContext resume: ";
|
||||
Recorder.ResumeCtx(ctx,function(runC){
|
||||
runC&&CLog(tag+"wait...");
|
||||
return !This.isStop;
|
||||
},function(runC){
|
||||
runC&&CLog(tag+ctx.state);
|
||||
ctxNext();
|
||||
},function(err){ //比较少见,可能没有影响
|
||||
CLog(tag+ctx.state+" "+$T("S2Bu::可能无法播放:{1}",0,err),1);
|
||||
ctxNext();
|
||||
});
|
||||
}
|
||||
,_clear:function(){
|
||||
var This=this;
|
||||
This.isStop=1;
|
||||
clearInterval(This._writeInt);
|
||||
This.inputQueue=0;
|
||||
|
||||
if(This._src){
|
||||
(window.URL||webkitURL).revokeObjectURL(This._src);
|
||||
This._src=0;
|
||||
}
|
||||
if(This._dest){
|
||||
Recorder.StopS_(This._dest.stream);
|
||||
This._dest=0;
|
||||
}
|
||||
if(!This.set.runningContext && This._ctx){
|
||||
Recorder.CloseNewCtx(This._ctx);
|
||||
}
|
||||
This._ctx=0;
|
||||
|
||||
var source=This.bufferSource;
|
||||
if(source){
|
||||
source.disconnect();
|
||||
source.stop();
|
||||
}
|
||||
This.bufferSource=0;
|
||||
This.audioBuffer=0;
|
||||
}
|
||||
/**停止播放,关闭所有资源**/
|
||||
,stop:function(){
|
||||
var This=this;
|
||||
This._clear();
|
||||
|
||||
!This.__abTest&&CLog("stop");
|
||||
This._playEnd(1);
|
||||
}
|
||||
/**暂停播放,暂停后:实时模式下会丢弃所有input输入的数据(resume时只播放新input的数据),非实时模式下所有input输入的数据会保留到resume时继续播放**/
|
||||
,pause:function(){
|
||||
CLog("pause");
|
||||
this.isPause=1;
|
||||
this._updateTime(1);
|
||||
}
|
||||
/**恢复播放,实时模式下只会从最新input的数据开始播放,非实时模式下会从暂停的位置继续播放**/
|
||||
,resume:function(){
|
||||
var This=this,tag="resume",tag3=tag+"(wait ctx)";
|
||||
CLog(tag);
|
||||
This.isPause=0;
|
||||
This._updateTime(1);
|
||||
|
||||
var ctx=This._ctx;
|
||||
if(ctx){ //AudioContext如果被暂停,尽量恢复
|
||||
Recorder.ResumeCtx(ctx,function(runC){
|
||||
runC&&CLog(tag3+"...");
|
||||
return !This.isStop && !This.isPause;
|
||||
},function(runC){
|
||||
runC&&CLog(tag3+ctx.state);
|
||||
},function(err){
|
||||
CLog(tag3+ctx.state+"[err]"+err,1);
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
//当前输入的数据播放到结尾时触发回调,stop时永远会触发回调
|
||||
,_playEnd:function(stop){
|
||||
var This=this,startTime=This._PNs,call=This.set.onPlayEnd;
|
||||
if(stop || !This.isPause){//暂停播到结尾不算
|
||||
if(stop || !This.isPlayEnd){
|
||||
if(stop || (startTime && Date.now()-startTime>500)){//已停止或者延迟确认成功
|
||||
This._PNs=0;
|
||||
This.isPlayEnd=1;
|
||||
call&&call();
|
||||
This._updateTime(1);
|
||||
}else if(!startTime){//刚检测到的没有数据了,开始延迟确认
|
||||
This._PNs=Date.now();
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
//有数据播放时,取消已到结尾状态
|
||||
,_playLive:function(){
|
||||
var This=this;
|
||||
This.isPlayEnd=0;
|
||||
This._PNs=0;
|
||||
}
|
||||
//时间更新时触发回调,没有更新时不会触发回调
|
||||
,_updateTime:function(must){
|
||||
var This=this,sampleRate=This.bufferSampleRate||9e9,call=This.set.onUpdateTime;
|
||||
This.currentTime=Math.round(This._Tc/sampleRate*1000);
|
||||
This.duration=Math.round(This._Td/sampleRate*1000);
|
||||
|
||||
var s=""+This.currentTime+This.duration;
|
||||
if(must || This._UTs!=s){
|
||||
This._UTs=s;
|
||||
call&&call();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**输入任意格式的音频数据,未完成start前调用会等到start成功后生效
|
||||
anyData: any 具体类型取决于:
|
||||
set.decode为false时:
|
||||
未提供set.transform,数据必须是pcm[Int16,...],此时的set必须提供sampleRate;
|
||||
提供了set.transform,数据为transform方法支持的任意格式。
|
||||
set.decode为true时:
|
||||
数据必须是ArrayBuffer,会自动解码成pcm[Int16,...];注意输入的每一片数据都应该是完整的一个音频片段文件,否则可能会解码失败;注意ArrayBuffer对象是Transferable object,参与解码后此对象将不可用,因为内存数据已被转移到了解码线程,可通过 stream.input(arrayBuffer.slice(0)) 形式复制一份再解码就没有这个问题了。
|
||||
|
||||
关于anyData的二进制长度:
|
||||
如果是提供的pcm、wav格式数据,数据长度对播放无太大影响,很短的数据也能很好的连续播放。
|
||||
如果是提供的mp3这种必须解码才能获得pcm的数据,数据应当尽量长点,测试发现片段有300ms以上解码后能很好的连续播放,低于100ms解码后可能会有明显的杂音,更低的可能会解码失败;当片段确实太小时,可以将本来会多次input调用的数据缓冲起来,等数据量达到了300ms再来调用一次input,能比较显著的改善播放音质。
|
||||
**/
|
||||
,input:function(anyData){
|
||||
var This=this,set=This.set;
|
||||
var inputN=++This.inputN;
|
||||
if(!This.inputQueue){
|
||||
throw new Error(NoStartMsg());
|
||||
}
|
||||
|
||||
var decSet=set.decode;
|
||||
if(decSet){
|
||||
//先解码
|
||||
DecodeAudio(anyData, function(data){
|
||||
if(!This.inputQueue)return;//stop了
|
||||
if(decSet.fadeInOut==null || decSet.fadeInOut){
|
||||
FadeInOut(data.data, data.sampleRate);//解码后的数据进行一下淡入淡出处理,减少爆音
|
||||
}
|
||||
This._input2(inputN, data.data, data.sampleRate);
|
||||
},function(err){
|
||||
This._inputErr(err, inputN);
|
||||
});
|
||||
}else{
|
||||
This._input2(inputN, anyData, set.sampleRate);
|
||||
}
|
||||
}
|
||||
//transform处理
|
||||
,_input2:function(inputN, anyData, sampleRate){
|
||||
var This=this,set=This.set;
|
||||
|
||||
if(set.transform){
|
||||
set.transform(anyData, sampleRate, function(pcm, sampleRate2){
|
||||
if(!This.inputQueue)return;//stop了
|
||||
|
||||
sampleRate=sampleRate2||sampleRate;
|
||||
This._input3(inputN, pcm, sampleRate);
|
||||
},function(err){
|
||||
This._inputErr(err, inputN);
|
||||
});
|
||||
}else{
|
||||
This._input3(inputN, anyData, sampleRate);
|
||||
}
|
||||
}
|
||||
//转换好的pcm加入input队列,纠正调用顺序,未start时等待
|
||||
,_input3:function(inputN, pcm, sampleRate){
|
||||
var This=this;
|
||||
|
||||
if(!pcm || !pcm.subarray){
|
||||
This._inputErr($T("ZfGG::input调用失败:非pcm[Int16,...]输入时,必须解码或者使用transform转换"), inputN);
|
||||
return;
|
||||
}
|
||||
if(!sampleRate){
|
||||
This._inputErr($T("N4ke::input调用失败:未提供sampleRate"), inputN);
|
||||
return;
|
||||
}
|
||||
if(This.bufferSampleRate && This.bufferSampleRate!=sampleRate){
|
||||
This._inputErr($T("IHZd::input调用失败:data的sampleRate={1}和之前的={2}不同",0,sampleRate,This.bufferSampleRate), inputN);
|
||||
return;
|
||||
}
|
||||
if(!This.bufferSampleRate){
|
||||
This.bufferSampleRate=sampleRate;//首次处理后,固定下来,后续的每次输入都是相同的
|
||||
}
|
||||
|
||||
//加入队列,纠正input执行顺序,解码、transform均有可能会导致顺序不一致
|
||||
if(inputN>This.inputQueueIdx){ //clearInput移动了队列位置的丢弃
|
||||
This.inputQueue[inputN]=pcm;
|
||||
}
|
||||
|
||||
if(This._dest){//已start,可以开始处理队列
|
||||
This._inputProcess();
|
||||
}
|
||||
}
|
||||
,_inputErr:function(errMsg, inputN){
|
||||
if(!this.inputQueue) return;//stop了
|
||||
this.inputQueue[inputN]=1;//出错了,队列里面也要占个位
|
||||
this.set.onInputError(errMsg, inputN);
|
||||
}
|
||||
//处理input队列
|
||||
,_inputProcess:function(){
|
||||
var This=this;
|
||||
if(!This.bufferSampleRate){
|
||||
return;
|
||||
}
|
||||
|
||||
var queue=This.inputQueue;
|
||||
for(var i=This.inputQueueIdx+1;i<queue.length;i++){ //inputN是从1开始,所以+1
|
||||
var pcm=queue[i];
|
||||
if(pcm==1){
|
||||
This.inputQueueIdx=i;//跳过出错的input
|
||||
continue;
|
||||
}
|
||||
if(!pcm){
|
||||
return;//之前的input还未进入本方法,退出等待
|
||||
}
|
||||
|
||||
This.inputQueueIdx=i;
|
||||
queue[i]=null;
|
||||
|
||||
//推入缓冲,最多两个元素 [堆积的,新的]
|
||||
var pcms=This.pcmBuffer;
|
||||
var pcm0=pcms[0],pcm1=pcms[1];
|
||||
if(pcm0.length){
|
||||
if(pcm1.length){
|
||||
var tmp=new Int16Array(pcm0.length+pcm1.length);
|
||||
tmp.set(pcm0);
|
||||
tmp.set(pcm1,pcm0.length);
|
||||
pcms[0]=tmp;
|
||||
}
|
||||
}else{
|
||||
pcms[0]=pcm1;
|
||||
}
|
||||
pcms[1]=pcm;
|
||||
|
||||
This._Td+=pcm.length;//更新已输入总时长
|
||||
This._updateTime();
|
||||
This._playLive();//有播放数据了
|
||||
}
|
||||
|
||||
if(!BufferStreamPlayer.BadAudioBuffer){
|
||||
if(!This.audioBuffer){
|
||||
This._createBuffer(true);
|
||||
}else{
|
||||
This._writeBuffer();
|
||||
}
|
||||
}else{
|
||||
This._writeBad();
|
||||
}
|
||||
}
|
||||
|
||||
/**清除已输入但还未播放的数据,一般用于非实时模式打断老的播放;返回清除的音频时长,默认会从总时长duration中减去此时长,keepDuration时不减去*/
|
||||
,clearInput:function(keepDuration){
|
||||
var This=this, sampleRate=This.bufferSampleRate, size=0;
|
||||
if(This.inputQueue){//未stop
|
||||
This.inputQueueIdx=This.inputN;//队列位置移到结尾
|
||||
|
||||
var pcms=This.pcmBuffer;
|
||||
size=pcms[0].length+pcms[1].length;
|
||||
This._subClear();
|
||||
if(!keepDuration) This._Td-=size;//减掉已输入总时长
|
||||
This._updateTime(1);
|
||||
}
|
||||
var dur = size? Math.round(size/sampleRate*1000) : 0;
|
||||
CLog("clearInput "+dur+"ms "+size);
|
||||
return dur;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/****************正常的播放处理****************/
|
||||
//创建播放buffer
|
||||
,_createBuffer:function(init){
|
||||
var This=this,set=This.set;
|
||||
if(!init && !This.audioBuffer){
|
||||
return;
|
||||
}
|
||||
|
||||
var ctx=This._ctx;
|
||||
var sampleRate=This.bufferSampleRate;
|
||||
var bufferSize=sampleRate*(set.bufferSecond||60);//建一个可以持续播放60秒的buffer,循环写入数据播放,大点好简单省事
|
||||
var buffer=ctx.createBuffer(1, bufferSize,sampleRate);
|
||||
|
||||
var source=ctx.createBufferSource();
|
||||
source.channelCount=1;
|
||||
source.buffer=buffer;
|
||||
source.connect(This._dest);
|
||||
if(set.play){//播放出声音
|
||||
source.connect(ctx.destination);
|
||||
}
|
||||
source.onended=function(){
|
||||
source.disconnect();
|
||||
source.stop();
|
||||
|
||||
This._createBuffer();//重新创建buffer
|
||||
};
|
||||
source.start();//古董 source.noteOn(0) 不支持onended 放弃支持
|
||||
|
||||
This.bufferSource=source;
|
||||
This.audioBuffer=buffer;
|
||||
This.audioBufferIdx=0;
|
||||
This._createBufferTime=Date.now();
|
||||
|
||||
This._writeBuffer();
|
||||
}
|
||||
,_writeBuffer:function(){
|
||||
var This=this,set=This.set;
|
||||
var buffer=This.audioBuffer;
|
||||
var sampleRate=This.bufferSampleRate;
|
||||
var oldAudioBufferIdx=This.audioBufferIdx;
|
||||
if(!buffer){
|
||||
return;
|
||||
}
|
||||
|
||||
//计算已播放的量,可能已播放过头了,卡了没有数据
|
||||
var playSize=Math.floor((Date.now()-This._createBufferTime)/1000*sampleRate);
|
||||
if(This.audioBufferIdx+0.005*sampleRate<playSize){//5ms动态区间
|
||||
This.audioBufferIdx=playSize;//将写入位置修正到当前播放位置
|
||||
}
|
||||
//写进去了,但还未被播放的量
|
||||
var wnSize=Math.max(0, This.audioBufferIdx-playSize);
|
||||
|
||||
//这次最大能写入多少;限制到800ms,包括写入了还未播放的
|
||||
var maxSize=buffer.length-This.audioBufferIdx;
|
||||
maxSize=Math.min(maxSize, ~~(0.8*sampleRate)-wnSize);
|
||||
if(maxSize<1){//写不下了,退出
|
||||
return;
|
||||
}
|
||||
|
||||
if(This._subPause()){//暂停了,不消费缓冲数据
|
||||
return;
|
||||
};
|
||||
var pcms=This.pcmBuffer;
|
||||
var pcm0=pcms[0],pcm1=pcms[1],pcm1Len=pcm1.length;
|
||||
if(pcm0.length+pcm1Len==0){//无可用数据,退出
|
||||
This._playEnd();//无可播放数据回调
|
||||
return;
|
||||
};
|
||||
This._playLive();//有播放数据了
|
||||
|
||||
var pcmSize=0,speed=1;
|
||||
var realMode=set.realtime;
|
||||
while(realMode){
|
||||
//************实时模式************
|
||||
//尽量同步播放,避免过大延迟,但始终保持延迟150ms播放新数据,这样每次添加进新数据都是接到还未播放到的最后面,减少引入的杂音,减少网络波动的影响
|
||||
var delaySecond=0.15;
|
||||
|
||||
//计算当前堆积的量
|
||||
var dSize=wnSize+pcm0.length;
|
||||
var dMax=(realMode.maxDelay||300)/1000 *sampleRate;
|
||||
|
||||
//堆积的在300ms内按正常播放
|
||||
if(dSize<dMax){
|
||||
//至少要延迟播放新数据
|
||||
var d150Size=Math.floor(delaySecond*sampleRate-dSize-pcm1Len);
|
||||
if(oldAudioBufferIdx==0 && d150Size>0){
|
||||
//开头加上少了的延迟
|
||||
This.audioBufferIdx=Math.max(This.audioBufferIdx, d150Size);
|
||||
}
|
||||
|
||||
realMode=false;//切换成顺序播放
|
||||
break;
|
||||
}
|
||||
//堆积的太多,配置为全丢弃
|
||||
if(realMode.discardAll){
|
||||
if(dSize>dMax*1.333){//超过400ms,取200ms正常播放,300ms中位数
|
||||
pcm0=This._cutPcm0(Math.round(dMax*0.666-wnSize-pcm1Len));
|
||||
}
|
||||
realMode=false;//切换成顺序播放
|
||||
break;
|
||||
}
|
||||
|
||||
//堆积的太多,要加速播放了,最多播放积压最后3秒的量,超过的直接丢弃
|
||||
pcm0=This._cutPcm0(3*sampleRate-wnSize-pcm1Len);
|
||||
|
||||
speed=1.6;//倍速,重采样
|
||||
//计算要截取出来量
|
||||
pcmSize=Math.min(maxSize, Math.floor((pcm0.length+pcm1Len)/speed));
|
||||
break;
|
||||
}
|
||||
if(!realMode){
|
||||
//*******按顺序取数据播放*********
|
||||
//计算要截取出来量
|
||||
pcmSize=Math.min(maxSize, pcm0.length+pcm1Len);
|
||||
}
|
||||
if(!pcmSize){
|
||||
return;
|
||||
}
|
||||
|
||||
//截取数据并写入到audioBuffer中
|
||||
This.audioBufferIdx=This._subWrite(buffer,pcmSize,This.audioBufferIdx,speed);
|
||||
}
|
||||
|
||||
|
||||
/****************兼容播放处理,播放音质略微差点****************/
|
||||
,_writeBad:function(){
|
||||
var This=this,set=This.set;
|
||||
var buffer=This.audioBuffer;
|
||||
var sampleRate=This.bufferSampleRate;
|
||||
var ctx=This._ctx;
|
||||
|
||||
//正在播放,5ms不能结束就等待播放完,定时器是10ms
|
||||
if(buffer){
|
||||
var ms=buffer.length/sampleRate*1000;
|
||||
if(Date.now()-This._createBufferTime<ms-5){
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
//这次最大能写入多少;限制到800ms
|
||||
var maxSize=~~(0.8*sampleRate);
|
||||
var st=set.PlayBufferDisable?0:sampleRate/1000*300;//缓冲播放,不然间隔太短接续爆音明显
|
||||
|
||||
if(This._subPause()){//暂停了,不消费缓冲数据
|
||||
return;
|
||||
};
|
||||
var pcms=This.pcmBuffer;
|
||||
var pcm0=pcms[0],pcm1=pcms[1],pcm1Len=pcm1.length;
|
||||
var allSize=pcm0.length+pcm1Len;
|
||||
if(allSize==0 || allSize<st){//无可用数据 不够缓冲量,退出
|
||||
This._playEnd();//无可播放数据回调,最后一丁点会始终等缓冲满导致卡住
|
||||
return;
|
||||
};
|
||||
This._playLive();//有播放数据了
|
||||
|
||||
var pcmSize=0,speed=1;
|
||||
var realMode=set.realtime;
|
||||
while(realMode){
|
||||
//************实时模式************
|
||||
//计算当前堆积的量
|
||||
var dSize=pcm0.length;
|
||||
var dMax=(realMode.maxDelay||300)/1000 *sampleRate;
|
||||
|
||||
//堆积的在300ms内按正常播放
|
||||
if(dSize<dMax){
|
||||
realMode=false;//切换成顺序播放
|
||||
break;
|
||||
}
|
||||
//堆积的太多,配置为全丢弃
|
||||
if(realMode.discardAll){
|
||||
if(dSize>dMax*1.333){//超过400ms,取200ms正常播放,300ms中位数
|
||||
pcm0=This._cutPcm0(Math.round(dMax*0.666-pcm1Len));
|
||||
}
|
||||
realMode=false;//切换成顺序播放
|
||||
break;
|
||||
}
|
||||
|
||||
//堆积的太多,要加速播放了,最多播放积压最后3秒的量,超过的直接丢弃
|
||||
pcm0=This._cutPcm0(3*sampleRate-pcm1Len);
|
||||
|
||||
speed=1.6;//倍速,重采样
|
||||
//计算要截取出来量
|
||||
pcmSize=Math.min(maxSize, Math.floor((pcm0.length+pcm1Len)/speed));
|
||||
break;
|
||||
}
|
||||
if(!realMode){
|
||||
//*******按顺序取数据播放*********
|
||||
//计算要截取出来量
|
||||
pcmSize=Math.min(maxSize, pcm0.length+pcm1Len);
|
||||
}
|
||||
if(!pcmSize){
|
||||
return;
|
||||
}
|
||||
|
||||
//新建buffer,一次性完整播放当前的数据
|
||||
buffer=ctx.createBuffer(1,pcmSize,sampleRate);
|
||||
|
||||
//截取数据并写入到audioBuffer中
|
||||
This._subWrite(buffer,pcmSize,0,speed);
|
||||
|
||||
//首尾进行1ms的淡入淡出 大幅减弱爆音
|
||||
FadeInOut(buffer.getChannelData(0), sampleRate);
|
||||
|
||||
var source=ctx.createBufferSource();
|
||||
source.channelCount=1;
|
||||
source.buffer=buffer;
|
||||
source.connect(This._dest);
|
||||
if(set.play){//播放出声音
|
||||
source.connect(ctx.destination);
|
||||
}
|
||||
source.start();//古董 source.noteOn(0) 不支持onended 放弃支持
|
||||
|
||||
This.bufferSource=source;
|
||||
This.audioBuffer=buffer;
|
||||
This._createBufferTime=Date.now();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
,_cutPcm0:function(pcmNs){//保留堆积的数据到指定的时长数量
|
||||
var pcms=this.pcmBuffer,pcm0=pcms[0];
|
||||
if(pcmNs<0)pcmNs=0;
|
||||
if(pcm0.length>pcmNs){//丢弃超过秒数的
|
||||
var size=pcm0.length-pcmNs, dur=Math.round(size/this.bufferSampleRate*1000);
|
||||
pcm0=pcm0.subarray(size);
|
||||
pcms[0]=pcm0;
|
||||
CLog($T("L8sC::延迟过大,已丢弃{1}ms {2}",0,dur,size),3);
|
||||
}
|
||||
return pcm0;
|
||||
}
|
||||
,_subPause:function(){//暂停了,就不要消费掉缓冲数据了,等待resume再来消费
|
||||
var This=this;
|
||||
if(!This.isPause){
|
||||
return 0;
|
||||
};
|
||||
if(This.set.realtime){//实时模式,丢弃所有未消费的数据,resume时从最新input的数据开始播放
|
||||
This._subClear();
|
||||
};
|
||||
return 1;
|
||||
}
|
||||
,_subClear:function(){ //清除缓冲数据
|
||||
this.pcmBuffer=[[],[]];
|
||||
}
|
||||
,_subWrite:function(buffer, pcmSize, offset, speed){
|
||||
var This=this;
|
||||
var pcms=This.pcmBuffer;
|
||||
var pcm0=pcms[0],pcm1=pcms[1];
|
||||
|
||||
//截取数据
|
||||
var pcm=new Int16Array(pcmSize);
|
||||
var i=0,n=0;
|
||||
for(var j=0;n<pcmSize && j<pcm0.length;){//简单重采样
|
||||
pcm[n++]=pcm0[i];
|
||||
j+=speed; i=Math.round(j);
|
||||
}
|
||||
if(i>=pcm0.length){//堆积的消耗完了
|
||||
pcm0=new Int16Array(0);
|
||||
|
||||
for(j=0,i=0;n<pcmSize && j<pcm1.length;){
|
||||
pcm[n++]=pcm1[i];
|
||||
j+=speed; i=Math.round(j);
|
||||
}
|
||||
if(i>=pcm1.length){
|
||||
pcm1=new Int16Array(0);
|
||||
}else{
|
||||
pcm1=pcm1.subarray(i);
|
||||
}
|
||||
pcms[1]=pcm1;
|
||||
}else{
|
||||
pcm0=pcm0.subarray(i);
|
||||
}
|
||||
pcms[0]=pcm0;
|
||||
|
||||
|
||||
//写入到audioBuffer中
|
||||
var channel=buffer.getChannelData(0);
|
||||
for(var i=0;i<pcmSize;i++,offset++){
|
||||
channel[offset]=pcm[i]/0x7FFF;
|
||||
}
|
||||
|
||||
This._Tc+=pcmSize;//更新已播放时长
|
||||
This._updateTime();
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
var NoStartMsg=function(){
|
||||
return $T("TZPq::{1}未调用start方法",0,BufferStreamPlayerTxt);
|
||||
};
|
||||
|
||||
|
||||
|
||||
/**pcm数据进行首尾1ms淡入淡出处理,播放时可以大幅减弱爆音**/
|
||||
var FadeInOut=BufferStreamPlayer.FadeInOut=function(arr,sampleRate){
|
||||
var sd=sampleRate/1000*1;//浮点数,arr是Int16或者Float32
|
||||
for(var i=0;i<sd;i++){
|
||||
arr[i]*=i/sd;
|
||||
}
|
||||
for(var l=arr.length,i=~~(l-sd);i<l;i++){
|
||||
arr[i]*=(l-i)/sd;
|
||||
}
|
||||
};
|
||||
|
||||
/**解码音频文件成pcm**/
|
||||
var DecodeAudio=BufferStreamPlayer.DecodeAudio=function(arrayBuffer,True,False){
|
||||
var ctx=Recorder.GetContext();
|
||||
if(!ctx){//强制激活Recorder.Ctx 不支持大概率也不支持解码
|
||||
False&&False($T("iCFC::浏览器不支持音频解码"));
|
||||
return;
|
||||
};
|
||||
if(!arrayBuffer || !(arrayBuffer instanceof ArrayBuffer)){
|
||||
False&&False($T("wE2k::音频解码数据必须是ArrayBuffer"));
|
||||
return;//非ArrayBuffer 有日志但不抛异常 不会走回调
|
||||
};
|
||||
|
||||
ctx.decodeAudioData(arrayBuffer,function(raw){
|
||||
var src=raw.getChannelData(0);
|
||||
var sampleRate=raw.sampleRate;
|
||||
|
||||
var pcm=new Int16Array(src.length);
|
||||
for(var i=0;i<src.length;i++){//floatTo16BitPCM
|
||||
var s=Math.max(-1,Math.min(1,src[i]));
|
||||
s=s<0?s*0x8000:s*0x7FFF;
|
||||
pcm[i]=s;
|
||||
};
|
||||
|
||||
True&&True({
|
||||
sampleRate:sampleRate
|
||||
,duration:Math.round(src.length/sampleRate*1000)
|
||||
,data:pcm
|
||||
});
|
||||
},function(e){
|
||||
False&&False($T("mOaT::音频解码失败:{1}",0,e&&e.message||"-"));
|
||||
});
|
||||
};
|
||||
|
||||
var CLog=function(){
|
||||
var v=arguments; v[0]="["+BufferStreamPlayerTxt+"]"+v[0];
|
||||
Recorder.CLog.apply(null,v);
|
||||
};
|
||||
Recorder[BufferStreamPlayerTxt]=BufferStreamPlayer;
|
||||
|
||||
|
||||
}));
|
||||
372
public/extensions/create-audio.nmn2pcm.js
Normal file
372
public/extensions/create-audio.nmn2pcm.js
Normal file
@@ -0,0 +1,372 @@
|
||||
/***
|
||||
简单用 正弦波、方波、锯齿波、三角波 函数生成一段音乐简谱的pcm数据,主要用于测试时提供音频数据。本可音频生成插件可以移植到其他语言环境,如需定制可联系作者
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
|
||||
此插件在线生成测试:assets/runtime-codes/test.create-audio.nmn2pcm.js
|
||||
|
||||
var pcmData=Recorder.NMN2PCM(set);
|
||||
set配置:{
|
||||
texts:""|["",""] 简谱格式化文本,如果格式不符合要求,将会抛异常
|
||||
sampleRate: 生成pcm的采样率,默认48000;取值不能过低,否则会削除高音
|
||||
timbre: 音色,默认2.0(使用音符对应频率的一个倍频),取值>=1.0
|
||||
meterDuration: 一拍时长,毫秒,默认600ms
|
||||
muteDuration: 音符之间的静默,毫秒,0时无静默,默认meterDur/4(最大50ms)
|
||||
beginDuration: 开头的静默时长,毫秒,0时无静默,默认为200ms
|
||||
endDuration: 结尾的静默时长,毫秒,0时无静默,默认为200ms
|
||||
|
||||
volume: 音量,默认0.3,取值范围0.0-1.0(最大值1)
|
||||
waveType: 波形发生器类型,默认"sine",取值:sine(正弦波)、square(方波,volume应当减半)、sawtooth(锯齿波)、triangle(三角波)
|
||||
}
|
||||
|
||||
texts格式:单个文本,或文本数组
|
||||
- 四分音符(一拍):低音: 1.-7. 中音: 1-7 高音: 1'-7' 休止符(静音):0
|
||||
- 音符后面用 "." 表示低音(尽量改用".":".." 倍低音,"..." 超低音)
|
||||
- 音符后面用 "'" 表示高音(尽量改用"'":"''" 倍高音,"'''" 超高音)
|
||||
- 音符之间用 "|" 或 " " 分隔一拍
|
||||
- 一拍里面多个音符用 "," 分隔,每个音按权重分配这一拍的时长占比,如:“6,7”为一拍,6、7各占1/2拍,相当于八分音符
|
||||
|
||||
- 音符后面用 "-" 表示二分音符,简单计算为1+1=2拍时长,几个-就加几拍
|
||||
- 音符后面用 "_" 表示八分音符;两两在一拍里面的音符可以免写_,自动会按1/2分配;一拍里面只有一个音时这拍会被简单计算为1/2=0.5拍;其他情况计算会按权重分配这一拍的时长(复杂),如:“6,7_”为1/2+1/2/2=0.75拍(“6*,7_”才是(1+0.5)/2+1/2/2=1拍),其中6权重1分配1/2=0.5拍,7权重0.5分配1/2/2=0.25拍;多加一个"_"就多除个2:“6_,7_”是1/2+1/2=1拍(等同于“6,7”可免写_);“6__,7__”是1/2/2+1/2/2=0.5拍;只要权重加起来是整数就算作完整的1拍
|
||||
- 音符后面用 "*" 表示1+0.5=1.5拍,多出来的1/2计算和_相同(复杂),"**"两个表示加0.25
|
||||
|
||||
- 可以使用 "S"(sine) "Q"(square) "A"(sawtooth) "T"(triangle) 来切换后续波形发生器类型(按一拍来书写,但不占用时长),类型后面可以接 "(2.0)" 来设置音色,接 "[0.5]" 来设置音量(为set.volume*0.5);特殊值 "R"(reset) 可重置类型成set配置值,如果R后面没有接音色或音量也会被重置;比如:"1 2|A(4.0)[0.6] 3 4 R|5 6",其中12 56使用set配置的类型和音色音量,34使用锯齿波、音色4.0、音量0.18=0.3*0.6
|
||||
|
||||
- 如果同时有多个音,必须提供数组格式,每个音单独提供一个完整简谱(必须同步对齐)
|
||||
|
||||
返回结果:{
|
||||
pcm: Int16Array,pcm数据
|
||||
duration: 123 pcm的时长,单位毫秒
|
||||
set: {...} 使用的set配置
|
||||
warns: [] 不适合抛异常的提示消息
|
||||
}
|
||||
|
||||
Recorder.NMN2PCM.GetExamples() 可获取内置的简谱
|
||||
***/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
var NMN2PCM=function(set){
|
||||
var texts=set.texts||[]; if(typeof(texts)=="string") texts=[texts];
|
||||
var setSR=set.sampleRate, sampleRate=setSR; if(!sampleRate || sampleRate<1)sampleRate=48000;
|
||||
var meterDur=set.meterDuration||600;
|
||||
var timbre=set.timbre||2; if(timbre<1)timbre=1;
|
||||
|
||||
var volume=set.volume; if(volume==null)volume=0.3;
|
||||
volume=Math.max(0,volume); volume=Math.min(1,volume);
|
||||
|
||||
var waveType=set.waveType||"";
|
||||
if(",sine,square,sawtooth,triangle,".indexOf(","+waveType+",")==-1)waveType="";
|
||||
waveType=waveType||"sine";
|
||||
|
||||
var muteDur=set.muteDuration;
|
||||
if(muteDur==null || muteDur<0){
|
||||
muteDur=meterDur/4; if(muteDur>50)muteDur=50;
|
||||
}
|
||||
var mute0=new Int16Array(sampleRate*muteDur/1000);
|
||||
|
||||
var beginDur=set.beginDuration;
|
||||
if(beginDur==null || beginDur<0) beginDur=200;
|
||||
var beginMute=new Int16Array(sampleRate*beginDur/1000);
|
||||
var endDur=set.endDuration;
|
||||
if(endDur==null || endDur<0) endDur=200;
|
||||
var endMute=new Int16Array(sampleRate*endDur/1000);
|
||||
|
||||
//生成C调频率 A=440 国际标准音
|
||||
var s=function(s){ return 440/Math.pow(2,s/12) };
|
||||
var Freqs=[s(9),s(7),s(5),s(4),s(2),s(0),s(-2)];
|
||||
var FreqMP={};
|
||||
for(var i=1;i<=7;i++){
|
||||
var v=Freqs[i-1];
|
||||
FreqMP[i+"..."]=v/8;
|
||||
FreqMP[i+".."]=v/4;
|
||||
FreqMP[i+"."]=v/2;
|
||||
FreqMP[i]=v;
|
||||
FreqMP[i+"'"]=v*2;
|
||||
FreqMP[i+"''"]=v*4;
|
||||
FreqMP[i+"'''"]=v*8;
|
||||
}
|
||||
|
||||
var tracks=[],freqMax=0,freqMin=90000;
|
||||
for(var iT=0;setSR!=-1 && iT<texts.length;iT++){
|
||||
var meters=texts[iT].split(/[\s\|]+/);
|
||||
var buffers=[],size=0,wType=waveType,wTimbre=timbre,wVol=volume;
|
||||
for(var i0=0;i0<meters.length;i0++){
|
||||
var txt0=meters[i0]; if(!txt0)continue;
|
||||
var v0=txt0.charCodeAt(0);
|
||||
if(v0<48 || v0>55){//不是0-7,切换波形或音色
|
||||
var m=/^(\w)(?:\((.+)\)|\[(.+)\])*$/.exec(txt0)||[],mT=m[1];
|
||||
var m=/\((.+)\)/.exec(txt0)||[],mTb=m[1];
|
||||
var m=/\[(.+)\]/.exec(txt0)||[],mVol=m[1];
|
||||
if(mT=="R"){ wType=waveType;wTimbre=timbre;wVol=volume; }
|
||||
else if(mT=="S") wType="sine";
|
||||
else if(mT=="Q") wType="square";
|
||||
else if(mT=="A") wType="sawtooth";
|
||||
else if(mT=="T") wType="triangle";
|
||||
else mT="";
|
||||
if(!mT||mTb&&!+mTb||mVol&&!+mVol)throw new Error("Invalid: "+txt0);
|
||||
if(mTb)wTimbre=+mTb;
|
||||
if(mVol)wVol=volume*mVol;
|
||||
continue;
|
||||
}
|
||||
var ys=txt0.split(",");//一拍里面的音符
|
||||
var durTotal=meterDur; //一拍的时长,如果里面有+,代表多拍
|
||||
var bTotal=0,hasG=0,hasX=0;
|
||||
for(var i2=0;i2<ys.length;i2++){//先计算出每个音符的占用时长比例
|
||||
var vs=ys[i2].split("");
|
||||
var o={ y:vs[0],b:1,t:wType,tb:wTimbre,vol:wVol }; ys[i2]=o;
|
||||
for(var i3=1;i3<vs.length;i3++){
|
||||
var v=vs[i3];
|
||||
if(v=="'") o.y+="'";
|
||||
else if(v==".") o.y+=".";
|
||||
else if(v=="-"){ o.b+=1; durTotal+=meterDur; }
|
||||
else if(v=="_"){ o.b/=2; hasG=1; }
|
||||
else if(v=="*" && !hasX){ o.b+=0.5; hasX=0.5;
|
||||
if(vs[i3+1]=="*"){ o.b-=0.25; hasX=0.25; i3++; } }
|
||||
else throw new Error($T("3RBa::符号[{1}]无效:{2}",0,v,txt0));
|
||||
}
|
||||
bTotal+=o.b;
|
||||
}
|
||||
if(bTotal%1>0){
|
||||
if(hasG){//"_"不够数量,减掉时间
|
||||
durTotal*=bTotal/Math.ceil(bTotal);
|
||||
}else if(hasX){//"*"加上1/2|1/4拍的时间
|
||||
durTotal+=meterDur*hasX;
|
||||
}
|
||||
}
|
||||
durTotal-=ys.length*muteDur;//减掉中间的静默
|
||||
for(var i2=0;i2<ys.length;i2++){//生成每个音符的pcm
|
||||
var o=ys[i2],wType=o.t,wTimbre=o.tb,wVol=o.vol,freq=FreqMP[o.y]||0;
|
||||
if(!freq && o.y!="0") throw new Error($T("U212::音符[{1}]无效:{2}",0,o.y,txt0));
|
||||
freq=freq*wTimbre;
|
||||
var dur=durTotal*o.b/bTotal;
|
||||
var pcm=new Int16Array(Math.round(dur/1000*sampleRate));
|
||||
if(freq){
|
||||
freqMax=Math.max(freqMax,freq);
|
||||
freqMin=Math.min(freqMin,freq);
|
||||
//不同波形算法取自 https://github.com/cristovao-trevisan/wave-generator/blob/master/index.js
|
||||
if(wType=="sine"){//正弦波
|
||||
var V=(2 * Math.PI) * freq / sampleRate;
|
||||
for(var i=0;i<pcm.length;i++){
|
||||
var v=wVol*Math.sin(V * i);
|
||||
pcm[i]=Math.max(-1,Math.min(1,v))*0x7FFF;
|
||||
}
|
||||
}else if(wType=="square"){//方波
|
||||
var V=sampleRate / freq;
|
||||
for(var i=0;i<pcm.length;i++){
|
||||
var v=wVol*((i % V) < (V / 2) ? 1 : -1);
|
||||
pcm[i]=Math.max(-1,Math.min(1,v))*0x7FFF;
|
||||
}
|
||||
}else if(wType=="sawtooth"){//锯齿波
|
||||
var V=sampleRate / freq;
|
||||
for(var i=0;i<pcm.length;i++){
|
||||
var v=wVol*(-1 + 2 * (i % V) / V);
|
||||
pcm[i]=Math.max(-1,Math.min(1,v))*0x7FFF;
|
||||
}
|
||||
}else if(wType=="triangle"){//三角波
|
||||
var V=sampleRate / freq;
|
||||
for(var i=0;i<pcm.length;i++){
|
||||
var Vi = (i + V / 4) % V;
|
||||
var v=wVol*(Vi<V/2?(-1+4*Vi/V):(3-4*Vi/V));
|
||||
pcm[i]=Math.max(-1,Math.min(1,v))*0x7FFF;
|
||||
}
|
||||
}
|
||||
var pcmDur4=~~(pcm.length/sampleRate*1000/4)||1;
|
||||
FadeInOut(pcm,sampleRate,Math.min(pcmDur4, 10));
|
||||
}
|
||||
|
||||
var mute=mute0; if(!buffers.length)mute=beginMute;
|
||||
buffers.push(mute); size+=mute.length;
|
||||
|
||||
buffers.push(pcm); size+=pcm.length;
|
||||
}
|
||||
}
|
||||
if(size>0){
|
||||
buffers.push(endMute); size+=endMute.length;
|
||||
tracks.push({buffers:buffers,size:size});
|
||||
}
|
||||
}
|
||||
tracks.sort(function(a,b){return b.size-a.size});
|
||||
|
||||
var pcm=new Int16Array(tracks[0]&&tracks[0].size||0);
|
||||
for(var iT=0;iT<tracks.length;iT++){
|
||||
var o=tracks[iT],buffers=o.buffers,size=o.size;
|
||||
if(iT==0){
|
||||
for(var i=0,offset=0;i<buffers.length;i++){
|
||||
var buf=buffers[i];
|
||||
pcm.set(buf,offset);
|
||||
offset+=buf.length;
|
||||
}
|
||||
}else{
|
||||
var diffMs=(pcm.length-size)/sampleRate*1000;
|
||||
if(diffMs>10){//10毫秒误差
|
||||
throw new Error($T("7qAD::多个音时必须对齐,相差{1}ms",0,diffMs));
|
||||
};
|
||||
for(var i=0,offset=0;i<buffers.length;i++){
|
||||
var buf=buffers[i];
|
||||
for(var j=0;j<buf.length;j++){
|
||||
var data_mix,data1=pcm[offset],data2=buf[j];
|
||||
|
||||
//简单混音算法 https://blog.csdn.net/dancing_night/article/details/53080819
|
||||
if(data1<0 && data2<0){
|
||||
data_mix = data1+data2 - (data1 * data2 / -0x7FFF);
|
||||
}else{
|
||||
data_mix = data1+data2 - (data1 * data2 / 0x7FFF);
|
||||
};
|
||||
|
||||
pcm[offset++]=data_mix;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var dur=Math.round(pcm.length/sampleRate*1000);
|
||||
var Warns=[],minSR=~~(freqMax*2);
|
||||
if(freqMax && sampleRate<minSR){
|
||||
var msg="sampleRate["+sampleRate+"] should be greater than "+minSR;
|
||||
Warns.push(msg); Recorder.CLog("NMN2PCM: "+msg,3);
|
||||
}
|
||||
|
||||
return {pcm:pcm, duration:dur, warns:Warns, set:{
|
||||
texts:texts, sampleRate:sampleRate, timbre:timbre, meterDuration:meterDur
|
||||
,muteDuration:muteDur, beginDuration:beginDur, endDuration:endDur
|
||||
,volume:volume,waveType:waveType
|
||||
}};
|
||||
};
|
||||
|
||||
|
||||
/**pcm数据进行首尾1ms淡入淡出处理,播放时可以大幅减弱爆音**/
|
||||
var FadeInOut=NMN2PCM.FadeInOut=function(arr,sampleRate,dur){
|
||||
var sd=sampleRate/1000*(dur||1);//浮点数,arr是Int16或者Float32
|
||||
for(var i=0;i<sd;i++){
|
||||
arr[i]*=i/sd;
|
||||
}
|
||||
for(var l=arr.length,i=~~(l-sd);i<l;i++){
|
||||
arr[i]*=(l-i)/sd;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/***内置部分简谱*****/
|
||||
NMN2PCM.GetExamples=function(){ return {
|
||||
|
||||
DFH:{//前3句,https://www.hnchemeng.com/liux/201807/68393.html
|
||||
name:"东方红"
|
||||
,get:function(sampleRate){
|
||||
return NMN2PCM({ //https://www.bilibili.com/video/BV1VW4y1v7nY?p=2
|
||||
sampleRate:sampleRate
|
||||
,meterDuration:1000
|
||||
,timbre:3
|
||||
,texts:"5 5,6|2-|1 1,6.|2-|5 5|6,1' 6,5|1 1,6.|2-"
|
||||
});
|
||||
}
|
||||
}
|
||||
,HappyBirthday:{//4句,https://www.zaoxu.com/jjsh/bkdq/310228.html
|
||||
name:$T("QGsW::祝你生日快乐")
|
||||
,get:function(sampleRate){
|
||||
return NMN2PCM({
|
||||
sampleRate:sampleRate
|
||||
,meterDuration:450
|
||||
,timbre:4
|
||||
,waveType:"triangle", volume:0.15
|
||||
,texts:"5.,5. 6. 5.|1 7.-|5.,5. 6. 5.|2 1-|5.,5. 5 3|1 7. 6.|4*,4_ 3 1|2 1-"
|
||||
});
|
||||
}
|
||||
}
|
||||
,LHC:{//节选一段,https://www.qinyipu.com/jianpu/jianpudaquan/41703.html
|
||||
name:"兰花草(洒水版)"
|
||||
,get:function(sampleRate){
|
||||
return NMN2PCM({
|
||||
sampleRate:sampleRate
|
||||
,meterDuration:650
|
||||
,timbre:4
|
||||
,texts:"6.,3 3,3|3* 2_|1*,2_ 1,7.|6.-|6,6 6,6|6* 5_|3_,5_,5 5,4|3-|3,3_,6_ 6,5|3* 2_|1*,2_ 1,7.|6. 3.|3.,1 1,7.|6.* 2__,3__|2*,1_ 7._,7._,5.|6.-"
|
||||
});
|
||||
}
|
||||
}
|
||||
,ForElise:{//节选一段,https://www.qinyipu.com/jianpu/chunyinle/3023.html
|
||||
name:$T("emJR::致爱丽丝")
|
||||
,get:function(sampleRate){
|
||||
return NMN2PCM({
|
||||
sampleRate:sampleRate
|
||||
,meterDuration:550
|
||||
,muteDuration:20
|
||||
,timbre:6
|
||||
,texts:"3',2'|3',2' 3',7 2',1'|"
|
||||
+"6 0,1 3,6|7 0,3 5,7|1' 0 3',2'|"
|
||||
+"3',2' 3',7 2',1'|6 0,1 3,6|7 0,3 1',7|"
|
||||
+"6 0,7 1',2'|3' 0,5 4',3'|2' 0,4 3',2'|1' 0,3 2',1'|"
|
||||
+"7"
|
||||
});
|
||||
}
|
||||
}
|
||||
,Canon_Right:{//节选一段,https://www.cangqiang.com.cn/d/32153.html
|
||||
name:$T("GsYy::卡农-右手简谱")
|
||||
,get:function(sampleRate){
|
||||
return NMN2PCM({
|
||||
sampleRate:sampleRate
|
||||
,meterDuration:700
|
||||
,texts:"1',7 1',3 5 6,7|"
|
||||
+"1' 3' 5',3' 5',6'|4',3' 2',4' 3',2' 1',7| 7 1',2'|"
|
||||
+"5',3'_,4'_ 5',3'_,4'_ 5',5,6,7 1',2',3',4'|3',1'_,2'_ 3',3_,4_ 5,6,5,4 5,1',7,1'|"
|
||||
+"6,1'_,7_ 6,5_,4_ 5,4,3,4 5,6,7,1'|6,1'_,7_ 1',7_,6_ 7,6,7,1' 2'_,1'_,7|1'-"
|
||||
});
|
||||
}
|
||||
}
|
||||
,Canon:{//开头一段,https://www.kanpula.com/jianpu/21316.html
|
||||
name:$T("bSFZ::卡农")
|
||||
,get:function(sampleRate){
|
||||
var txt1="",txt2="",txt3="",txt4="";
|
||||
//(1)
|
||||
txt1+="3'---|2'---|1'---|7---|";
|
||||
txt2+="1'---|7---|6---|5---|";
|
||||
txt3+="5---|5---|3---|3---|";
|
||||
txt4+="R[0.3] 1. 5. 1 3|5.. 2. 5. 7.|6.. 3. 6. 1|3.. 7.. 3. 5.|";
|
||||
//(5)
|
||||
txt1+="6---|5---|6---|7---|";
|
||||
txt2+="4---|3---|4---|5---|";
|
||||
txt3+="1---|1---|1---|2---|";
|
||||
txt4+="4.. 1. 4. 6.|1. 5. 1 3|4.. 1. 4. 6.|5.. 2. 5. 7.|";
|
||||
//(9)
|
||||
txt1+="3'---|2'---|1'---|7---|";
|
||||
txt2+="1'---|7---|6---|5---|";
|
||||
txt3+="5---|5---|3---|3-- 5'|";
|
||||
txt4+="1. 5. 1 3|5.. 2. 5. 7.|6.. 3. 6. 1|3.. 7.. 3. 5.|";
|
||||
//(13)
|
||||
txt1+="4' 3' 2' 4'|3' 2' 1' 5|6- 6 1'|7 1' 2'-|";
|
||||
txt2+="4.. 1. 4. 6.|1. 5. 1 3|4.. 1. 4. 6.|5.. 2. 5. 7.|";
|
||||
txt3+="0---|0---|0---|0---|";
|
||||
txt4+="0---|0---|0---|0---|";
|
||||
//(17)
|
||||
txt1+="3',5 1'_ 5' 5_ 3'|3' 4' 3' 2'|1',3 6_ 3' 3_ 1'|1' 2' 1' 7|";
|
||||
txt2+="1. 5. 1 3|5.. 2. 5. 7.|6.. 3. 6. 1|3.. 7.. 3. 5.|";
|
||||
txt3+="0---|0---|0---|0---|";
|
||||
txt4+="0---|0---|0---|0---|";
|
||||
//(21)
|
||||
txt1+="6,1 4_ 1' 1_ 6|5,1 3_ 1' 1_ 5|6,1 4_ 1' 1_ 6|7 7 1' 2'|";
|
||||
txt2+="4.. 1. 4. 6.|1. 5. 1 3|4.. 1. 4. 6.|5..,5. 5..,5. 6..,6. 6..,6.|";
|
||||
txt3+="0---|0---|0---|0---|";
|
||||
txt4+="0---|0---|0---|0---|";
|
||||
|
||||
return NMN2PCM({
|
||||
sampleRate:sampleRate
|
||||
,meterDuration:500
|
||||
,texts:[txt1,txt2,txt3,txt4]
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Recorder.NMN2PCM=NMN2PCM;
|
||||
|
||||
}));
|
||||
268
public/extensions/dtmf.decode.js
Normal file
268
public/extensions/dtmf.decode.js
Normal file
@@ -0,0 +1,268 @@
|
||||
/*
|
||||
录音 Recorder扩展,DTMF(电话拨号按键信号)解码器,解码得到按键值
|
||||
使用本扩展需要引入lib.fft.js支持
|
||||
|
||||
本扩展识别DTMF按键准确度高,误识别率低,支持识别120ms以上按键间隔+30ms以上的按键音,纯js实现易于移植
|
||||
|
||||
使用场景:电话录音软解,软电话实时提取DTMF按键信号等
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
/*
|
||||
参数:
|
||||
pcmData:[Int16,...] pcm一维数组,原则上一次处理的数据量不要超过10秒,太长的数据应当分段延时处理
|
||||
sampleRate: 123 pcm的采样率
|
||||
prevChunk: null || {} 上次的返回值,用于连续识别
|
||||
|
||||
返回:
|
||||
chunk:{
|
||||
keys:[keyItem,...] 识别到的按键,如果未识别到数组长度为0
|
||||
keyItem:{
|
||||
key:"" //按键值 0-9 #*
|
||||
time:123 //所在的时间位置,ms
|
||||
}
|
||||
|
||||
//以下用于下次接续识别
|
||||
lastIs:"" "":mute {}:match 结尾处是什么
|
||||
lastCheckCount:0 结尾如果是key,此时的检查次数
|
||||
prevIs:"" "":null {}:match 上次疑似检测到了什么
|
||||
totalLen:0 总采样数,相对4khz
|
||||
pcm:[Int16,...] 4khz pcm数据
|
||||
checkFactor:3 信号检查因子,取值1,2,3,默认为3不支持低于32ms的按键音检测,当需要检测时可以设为2,当信号更恶劣时设为1,这样将会减少检查的次数,导致错误识别率变高
|
||||
debug:false 是否开启调试日志
|
||||
}
|
||||
*/
|
||||
Recorder.DTMF_Decode=function(pcmData,sampleRate,prevChunk){
|
||||
prevChunk||(prevChunk={});
|
||||
var lastIs=prevChunk.lastIs||"";
|
||||
var lastCheckCount=prevChunk.lastCheckCount==null?99:prevChunk.lastCheckCount;
|
||||
var prevIs=prevChunk.prevIs||"";
|
||||
var totalLen=prevChunk.totalLen||0;
|
||||
var prevPcm=prevChunk.pcm;
|
||||
var checkFactor=prevChunk.checkFactor||0;
|
||||
var debug=prevChunk.debug;
|
||||
|
||||
var keys=[];
|
||||
|
||||
if(!Recorder.LibFFT){
|
||||
throw new Error($T.G("NeedImport-2",["DTMF_Decode","src/extensions/lib.fft.js"]));
|
||||
};
|
||||
var bufferSize=256;//小一点每次处理的时长不会太长,也不要太小影响分辨率
|
||||
var fft=Recorder.LibFFT(bufferSize);
|
||||
|
||||
|
||||
/****初始值计算****/
|
||||
var windowSize=bufferSize/4;//滑动窗口大小,取值为4的原因:64/4=16ms,16ms*(3-1)=32ms,保证3次取值判断有效性
|
||||
var checkCount=checkFactor||3;//只有3次连续窗口内计算结果相同判定为有效信号或间隔
|
||||
var muteCount=3;//两个信号间的最小间隔,3个窗口大小
|
||||
var startTotal=totalLen;
|
||||
|
||||
/****将采样率降低到4khz,单次fft处理1000/(4000/256)=64ms,分辨率4000/256=15.625hz,允许连续dtmf信号间隔128ms****/
|
||||
var stepFloat=sampleRate/4000;
|
||||
|
||||
var newSize=Math.floor(pcmData.length/stepFloat);
|
||||
totalLen+=newSize;
|
||||
var pos=0;
|
||||
if(prevPcm&&prevPcm.length>bufferSize){//接上上次的数据,继续滑动
|
||||
pos=windowSize*(checkCount+1);
|
||||
newSize+=pos;
|
||||
startTotal-=pos;
|
||||
};
|
||||
var arr=new Int16Array(newSize);
|
||||
if(pos){
|
||||
arr.set(prevPcm.subarray(prevPcm.length-pos));//接上上次的数据,继续滑动
|
||||
};
|
||||
|
||||
for(var idxFloat=0;idxFloat<pcmData.length;pos++,idxFloat+=stepFloat){
|
||||
//简单抽样
|
||||
arr[pos]=pcmData[Math.round(idxFloat)];
|
||||
};
|
||||
pcmData=arr;
|
||||
sampleRate=4000;
|
||||
|
||||
|
||||
var freqStep=sampleRate/bufferSize;//分辨率
|
||||
var logMin=20;//粗略计算信号强度最小值,此值是先给0再根据下面的Math.log(fv)多次【测试】(下面一个log)出来的
|
||||
|
||||
|
||||
/****循环处理所有数据,识别出所有按键信号****/
|
||||
for(var i0=0; i0+bufferSize<=pcmData.length; i0+=windowSize){
|
||||
var arr=pcmData.subarray(i0,i0+bufferSize);
|
||||
var freqs=fft.transform(arr);
|
||||
var freqPs=[];
|
||||
|
||||
var fv0=0,p0=0,v0=0,vi0=0, fv1=0,p1=0,v1=0,vi1=0;//查找高群和低群
|
||||
for(var i2=0;i2<freqs.length;i2++){
|
||||
var fv=freqs[i2];
|
||||
var p=Math.log(fv);//粗略计算信号强度
|
||||
freqPs.push(p);
|
||||
var v=(i2+1)*freqStep;
|
||||
if(p>logMin){
|
||||
if(fv>fv0 && v<1050){
|
||||
fv0=fv;
|
||||
p0=p;
|
||||
v0=v;
|
||||
vi0=i2;
|
||||
}else if(fv>fv1 && v>1050){
|
||||
fv1=fv;
|
||||
p1=p;
|
||||
v1=v;
|
||||
vi1=i2;
|
||||
};
|
||||
};
|
||||
};
|
||||
var pv0 =-1, pv1=-1;
|
||||
if(v0>600 && v1<1700 && Math.abs(p0-p1)<2.5){//高低频的幅度相差不能太大,此值是先给个大值再多次【测试】(下面一个log)得出来的
|
||||
//波形匹配度:两个峰值之间应当是深V型曲线,如果出现大幅杂波,可以直接排除掉
|
||||
var isV=1;
|
||||
//先找出谷底
|
||||
var pMin=p0,minI=0;
|
||||
for(var i2=vi0;i2<vi1;i2++){
|
||||
var v=freqPs[i2];
|
||||
if(v && v<pMin){//0不作数
|
||||
pMin=v;
|
||||
minI=i2;
|
||||
};
|
||||
};
|
||||
var xMax=(p0-pMin)*0.5//允许幅度变化最大值
|
||||
//V左侧,下降段
|
||||
var curMin=p0;
|
||||
for(var i2=vi0;isV&&i2<minI;i2++){
|
||||
var v=freqPs[i2];
|
||||
if(v<=curMin){
|
||||
curMin=v;
|
||||
}else if(v-curMin>xMax){
|
||||
isV=0;//下降段检测到过度上升
|
||||
};
|
||||
};
|
||||
//V右侧,上升段
|
||||
var curMax=pMin;
|
||||
for(var i2=minI;isV&&i2<vi1;i2++){
|
||||
var v=freqPs[i2];
|
||||
if(v>=curMax){
|
||||
curMax=v;
|
||||
}else if(curMax-v>xMax){
|
||||
isV=0;//上升段检测到过度下降
|
||||
};
|
||||
};
|
||||
|
||||
if(isV){
|
||||
pv0=FindIndex(v0, DTMF_Freqs[0], freqStep);
|
||||
pv1=FindIndex(v1, DTMF_Freqs[1], freqStep);
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
var key="";
|
||||
if (pv0 >= 0 && pv1 >= 0) {
|
||||
key = DTMF_Chars[pv0][pv1];
|
||||
if(debug)console.log(key,Math.round((startTotal+i0)/sampleRate*1000),p0.toFixed(2),p1.toFixed(2),Math.abs(p0-p1).toFixed(2)); //【测试】得出数值
|
||||
|
||||
if(lastIs){
|
||||
if(lastIs.key==key){//有效,增加校验次数
|
||||
lastCheckCount++;
|
||||
}else{//异常数据,恢复间隔计数
|
||||
key="";
|
||||
lastCheckCount=lastIs.old+lastCheckCount;
|
||||
};
|
||||
}else{
|
||||
//没有连续的信号,检查是否在100ms内有检测到信号,当中间是断开的那种
|
||||
if(prevIs && prevIs.old2 && prevIs.key==key){
|
||||
if(startTotal+i0-prevIs.start<100*sampleRate/1000){
|
||||
lastIs=prevIs;
|
||||
lastCheckCount=prevIs.old2+1;
|
||||
if(debug)console.warn("接续了开叉的信号"+lastCheckCount);
|
||||
};
|
||||
};
|
||||
if(!lastIs){
|
||||
if(lastCheckCount>=muteCount){//间隔够了,开始按键识别计数
|
||||
lastIs={key:key,old:lastCheckCount,old2:lastCheckCount,start:startTotal+i0,pcms:[],use:0};
|
||||
lastCheckCount=1;
|
||||
}else{//上次识别以来间隔不够,重置间隔计数
|
||||
key="";
|
||||
lastCheckCount=0;
|
||||
};
|
||||
};
|
||||
};
|
||||
}else{
|
||||
if(lastIs){//下一个,恢复间隔计数
|
||||
lastIs.old2=lastCheckCount;
|
||||
lastCheckCount=lastIs.old+lastCheckCount;
|
||||
};
|
||||
};
|
||||
|
||||
if(key){
|
||||
if(debug)lastIs.pcms.push(arr);
|
||||
//按键有效,并且未push过
|
||||
if(lastCheckCount>=checkCount && !lastIs.use){
|
||||
lastIs.use=1;
|
||||
keys.push({
|
||||
key:key
|
||||
,time:Math.round(lastIs.start/sampleRate*1000)
|
||||
});
|
||||
};
|
||||
//重置间隔数据
|
||||
if(lastIs.use){
|
||||
if(debug)console.log(key+"有效按键",lastIs);
|
||||
lastIs.old=0;
|
||||
lastIs.old2=0;
|
||||
lastCheckCount=0;
|
||||
};
|
||||
}else{
|
||||
//未发现按键
|
||||
if(lastIs){
|
||||
if(debug)console.log(lastIs) //测试,输出疑似key
|
||||
prevIs=lastIs;
|
||||
};
|
||||
lastIs="";
|
||||
lastCheckCount++;
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
keys:keys
|
||||
|
||||
,lastIs:lastIs
|
||||
,lastCheckCount:lastCheckCount
|
||||
,prevIs:prevIs
|
||||
,totalLen:totalLen
|
||||
,pcm:pcmData
|
||||
,checkFactor:checkFactor
|
||||
,debug:debug
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
var DTMF_Freqs = [
|
||||
[697, 770, 852, 941],
|
||||
[1209, 1336, 1477, 1633]
|
||||
];
|
||||
var DTMF_Chars = [
|
||||
["1", "2", "3", "A"],
|
||||
["4", "5", "6", "B"],
|
||||
["7", "8", "9", "C"],
|
||||
["*", "0", "#", "D"],
|
||||
];
|
||||
var FindIndex=function(freq, freqs, freqStep){
|
||||
var idx=-1,idxb=1000;
|
||||
for(var i=0;i<freqs.length;i++){
|
||||
var xb=Math.abs(freqs[i]-freq);
|
||||
if(idxb>xb){
|
||||
idxb=xb;
|
||||
if(xb<freqStep*2){//最多2个分辨率内误差
|
||||
idx=i;
|
||||
};
|
||||
};
|
||||
};
|
||||
return idx;
|
||||
};
|
||||
|
||||
}));
|
||||
196
public/extensions/dtmf.encode.js
Normal file
196
public/extensions/dtmf.encode.js
Normal file
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
录音 Recorder扩展,DTMF(电话拨号按键信号)编码生成器,生成按键对应的音频PCM信号
|
||||
|
||||
本扩展分两个功能:
|
||||
DTMF_Encode
|
||||
DTMF_EncodeMix
|
||||
|
||||
本扩展生成信号代码、原理简单粗暴,纯js实现易于移植,0依赖
|
||||
|
||||
使用场景:DTMF按键信号生成,软电话实时发送DTMF按键信号等
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
本方法用来生成单个按键信号pcm数据,属于底层方法,要混合多个按键信号到别的pcm中请用封装好的DTMF_EncodeMix方法
|
||||
|
||||
参数:
|
||||
key: 单个按键0-9#*
|
||||
sampleRate:123 要生成的pcm采样率
|
||||
duration:100 按键音持续时间
|
||||
mute:50 按键音前后静音时长
|
||||
返回:
|
||||
pcm:[Int16,...],生成单个按键信号
|
||||
**/
|
||||
Recorder.DTMF_Encode=function(key,sampleRate,duration,mute){
|
||||
var durSize=Math.floor(sampleRate*(duration||100)/1000);
|
||||
var muteSize=Math.floor(sampleRate*(mute==null?50:mute)/1000);
|
||||
var pcm0=new Int16Array(durSize+muteSize*2);
|
||||
var pcm1=new Int16Array(durSize+muteSize*2);
|
||||
|
||||
// https://github.com/watilde/node-dtfm/blob/master/encode.js
|
||||
var f0=DTMF_Freqs[key][0];
|
||||
var f1=DTMF_Freqs[key][1];
|
||||
var vol=0.3;
|
||||
for(var i=0;i<durSize;i++){
|
||||
var v0=vol*Math.sin((2 * Math.PI) * f0 * (i / sampleRate));
|
||||
var v1=vol*Math.sin((2 * Math.PI) * f1 * (i / sampleRate));
|
||||
pcm0[i+muteSize]=Math.max(-1,Math.min(1,v0))*0x7FFF;
|
||||
pcm1[i+muteSize]=Math.max(-1,Math.min(1,v1))*0x7FFF;
|
||||
};
|
||||
|
||||
//简单叠加 低群 和 高群 信号
|
||||
Mix(pcm0,0,pcm1,0);
|
||||
return pcm0;
|
||||
};
|
||||
|
||||
|
||||
/**返回EncodeMix对象,将输入的按键信号混合到持续输入的pcm流中,当.mix(inputPcms)提供的太短的pcm会无法完整放下一个完整的按键信号,所以需要不停调用.mix(inputPcms)进行混合**/
|
||||
Recorder.DTMF_EncodeMix=function(set){
|
||||
return new EncodeMix(set);
|
||||
};
|
||||
var EncodeMix=function(set){
|
||||
var This=this;
|
||||
This.set={
|
||||
duration:100 //按键信号持续时间 ms,最小值为30ms
|
||||
,mute:25 //按键音前后静音时长 ms,取值为0也是可以的
|
||||
,interval:200 //两次按键信号间隔时长 ms,间隔内包含了duration+mute*2,最小值为120ms
|
||||
};
|
||||
for(var k in set){
|
||||
This.set[k]=set[k];
|
||||
};
|
||||
|
||||
This.keys="";
|
||||
This.idx=0;
|
||||
This.state={keyIdx:-1,skip:0};
|
||||
};
|
||||
EncodeMix.prototype={
|
||||
/** 添加一个按键或多个按键 "0" "123#*",后面慢慢通过mix方法混合到pcm中,无返回值 **/
|
||||
add:function(keys){
|
||||
this.keys+=keys;
|
||||
}
|
||||
/** 将已添加的按键信号混合到pcm中,pcms:[[Int16,...],...]二维数组,sampleRate:pcm的采样率,index:pcms第一维开始索引,将从这个pcm开始混合。
|
||||
返回混合状态对象。
|
||||
注意:调用本方法会修改pcms中的内容,因此混合结果就在pcms内。 **/
|
||||
,mix:function(pcms,sampleRate,index){
|
||||
index||(index=0);
|
||||
var This=this,set=This.set;
|
||||
var newEncodes=[];
|
||||
|
||||
var state=This.state;
|
||||
var pcmPos=0;
|
||||
loop:
|
||||
for(var i0=index;i0<pcms.length;i0++){
|
||||
var pcm=pcms[i0];
|
||||
|
||||
var key=This.keys.charAt(This.idx);
|
||||
if(!key){//没有需要处理的按键,把间隔消耗掉
|
||||
state.skip=Math.max(0, state.skip-pcm.length);
|
||||
} else while(key){
|
||||
//按键间隔处理
|
||||
if(state.skip){
|
||||
var op=pcm.length-pcmPos;
|
||||
if(op<=state.skip){
|
||||
state.skip-=op;
|
||||
pcmPos=0;
|
||||
continue loop;
|
||||
};
|
||||
pcmPos+=state.skip;
|
||||
state.skip=0;
|
||||
};
|
||||
|
||||
var keyPcm=state.keyPcm;
|
||||
|
||||
//这个key已经混合过,看看有没有剩余的信号
|
||||
if(state.keyIdx==This.idx){
|
||||
if(state.cur>=keyPcm.length){
|
||||
state.keyIdx=-1;
|
||||
};
|
||||
};
|
||||
//新的key,生成信号
|
||||
if(state.keyIdx!=This.idx){
|
||||
keyPcm=Recorder.DTMF_Encode(key,sampleRate,set.duration,set.mute);
|
||||
state.keyIdx=This.idx;
|
||||
state.cur=0;
|
||||
state.keyPcm=keyPcm;
|
||||
|
||||
newEncodes.push({
|
||||
key:key
|
||||
,data:keyPcm
|
||||
});
|
||||
};
|
||||
|
||||
//将keyPcm混合到当前pcm中,实际是替换逻辑
|
||||
var res=Mix(pcm,pcmPos,keyPcm,state.cur,true);
|
||||
state.cur=res.cur;
|
||||
pcmPos=res.last;
|
||||
|
||||
//下一个按键
|
||||
if(res.cur>=keyPcm.length){
|
||||
This.idx++;
|
||||
key=This.keys.charAt(This.idx);
|
||||
state.skip=Math.floor(sampleRate*(set.interval-set.duration-set.mute*2)/1000);
|
||||
};
|
||||
|
||||
//当前pcm的位置已消耗完
|
||||
if(res.last>=pcm.length){
|
||||
pcmPos=0;
|
||||
continue loop;//下一个pcm
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
newEncodes:newEncodes //本次混合新生成的按键信号列表 [{key:"*",data:[Int16,...]},...],如果没有产生新信号将为空数组
|
||||
,hasNext:This.idx<This.keys.length //是否还有未混合完的信号
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
//teach.realtime.mix_multiple 抄过来的简单混合算法
|
||||
var Mix=function(buffer,pos1,add,pos2,mute){
|
||||
for(var j=pos1,cur=pos2;;j++,cur++){
|
||||
if(j>=buffer.length || cur>=add.length){
|
||||
return {
|
||||
last:j
|
||||
,cur:cur
|
||||
};
|
||||
};
|
||||
|
||||
if(mute){
|
||||
buffer[j]=0;//置为0即为静音
|
||||
};
|
||||
var data_mix,data1=buffer[j],data2=add[cur];
|
||||
|
||||
//简单混音算法 https://blog.csdn.net/dancing_night/article/details/53080819
|
||||
if(data1<0 && data2<0){
|
||||
data_mix = data1+data2 - (data1 * data2 / -0x7FFF);
|
||||
}else{
|
||||
data_mix = data1+data2 - (data1 * data2 / 0x7FFF);
|
||||
};
|
||||
|
||||
buffer[j]=data_mix;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
var DTMF_Freqs={
|
||||
'1': [697, 1209] ,'2': [697, 1336] ,'3': [697, 1477] ,'A': [697, 1633]
|
||||
,'4': [770, 1209] ,'5': [770, 1336] ,'6': [770, 1477] ,'B': [770, 1633]
|
||||
,'7': [852, 1209] ,'8': [852, 1336] ,'9': [852, 1477] ,'C': [852, 1633]
|
||||
,'*': [941, 1209] ,'0': [941, 1336] ,'#': [941, 1477] ,'D': [941, 1633]
|
||||
};
|
||||
|
||||
|
||||
}));
|
||||
377
public/extensions/frequency.histogram.view.js
Normal file
377
public/extensions/frequency.histogram.view.js
Normal file
@@ -0,0 +1,377 @@
|
||||
/*
|
||||
录音 Recorder扩展,频率直方图显示
|
||||
使用本扩展需要引入src/extensions/lib.fft.js支持,直方图特意优化主要显示0-5khz语音部分(线性),其他高频显示区域较小,不适合用来展示音乐频谱,可通过配置fullFreq来恢复成完整的线性频谱,或自行修改源码修改成倍频程频谱(伯德图、对数频谱);本可视化插件可以移植到其他语言环境,如需定制可联系作者
|
||||
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
|
||||
本扩展核心算法主要参考了Java开源库jmp123 版本0.3 的代码:
|
||||
https://www.iteye.com/topic/851459
|
||||
https://sourceforge.net/projects/jmp123/files/
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
var FrequencyHistogramView=function(set){
|
||||
return new fn(set);
|
||||
};
|
||||
var ViewTxt="FrequencyHistogramView";
|
||||
var fn=function(set){
|
||||
var This=this;
|
||||
var o={
|
||||
/*
|
||||
elem:"css selector" //自动显示到dom,并以此dom大小为显示大小
|
||||
//或者配置显示大小,手动把frequencyObj.elem显示到别的地方
|
||||
,width:0 //显示宽度
|
||||
,height:0 //显示高度
|
||||
|
||||
H5环境以上配置二选一
|
||||
|
||||
compatibleCanvas: CanvasObject //提供一个兼容H5的canvas对象,需支持getContext("2d"),支持设置width、height,支持drawImage(canvas,...)
|
||||
,width:0 //canvas显示宽度
|
||||
,height:0 //canvas显示高度
|
||||
非H5环境使用以上配置
|
||||
*/
|
||||
|
||||
scale:2 //缩放系数,应为正整数,使用2(3? no!)倍宽高进行绘制,避免移动端绘制模糊
|
||||
|
||||
,fps:20 //绘制帧率,不可过高
|
||||
|
||||
,lineCount:30 //直方图柱子数量,数量的多少对性能影响不大,密集运算集中在FFT算法中
|
||||
,widthRatio:0.6 //柱子线条宽度占比,为所有柱子占用整个视图宽度的比例,剩下的空白区域均匀插入柱子中间;默认值也基本相当于一根柱子占0.6,一根空白占0.4;设为1不留空白,当视图不足容下所有柱子时也不留空白
|
||||
,spaceWidth:0 //柱子间空白固定基础宽度,柱子宽度自适应,当不为0时widthRatio无效,当视图不足容下所有柱子时将不会留空白,允许为负数,让柱子发生重叠
|
||||
,minHeight:0 //柱子保留基础高度,position不为±1时应该保留点高度
|
||||
,position:-1 //绘制位置,取值-1到1,-1为最底下,0为中间,1为最顶上,小数为百分比
|
||||
,mirrorEnable:false //是否启用镜像,如果启用,视图宽度会分成左右两块,右边这块进行绘制,左边这块进行镜像(以中间这根柱子的中心进行镜像)
|
||||
|
||||
,stripeEnable:true //是否启用柱子顶上的峰值小横条,position不是-1时应当关闭,否则会很丑
|
||||
,stripeHeight:3 //峰值小横条基础高度
|
||||
,stripeMargin:6 //峰值小横条和柱子保持的基础距离
|
||||
|
||||
,fallDuration:1000 //柱子从最顶上下降到最底部最长时间ms
|
||||
,stripeFallDuration:3500 //峰值小横条从最顶上下降到底部最长时间ms
|
||||
|
||||
//柱子颜色配置:[位置,css颜色,...] 位置: 取值0.0-1.0之间
|
||||
,linear:[0,"rgba(0,187,17,1)",0.5,"rgba(255,215,0,1)",1,"rgba(255,102,0,1)"]
|
||||
//峰值小横条渐变颜色配置,取值格式和linear一致,留空为柱子的渐变颜色
|
||||
,stripeLinear:null
|
||||
|
||||
,shadowBlur:0 //柱子阴影基础大小,设为0不显示阴影,如果柱子数量太多时请勿开启,非常影响性能
|
||||
,shadowColor:"#bbb" //柱子阴影颜色
|
||||
,stripeShadowBlur:-1 //峰值小横条阴影基础大小,设为0不显示阴影,-1为柱子的大小,如果柱子数量太多时请勿开启,非常影响性能
|
||||
,stripeShadowColor:"" //峰值小横条阴影颜色,留空为柱子的阴影颜色
|
||||
|
||||
,fullFreq:false //是否要绘制所有频率;默认false主要绘制5khz以下的频率,高频部分占比很少,此时不同的采样率对频谱显示几乎没有影响;设为true后不同采样率下显示的频谱是不一样的,因为 最大频率=采样率/2 会有差异
|
||||
//当发生绘制时会回调此方法,参数为当前绘制的频率数据和采样率,可实现多个直方图同时绘制,只消耗一个input输入和计算时间
|
||||
,onDraw:function(frequencyData,sampleRate){}
|
||||
};
|
||||
for(var k in set){
|
||||
o[k]=set[k];
|
||||
};
|
||||
This.set=set=o;
|
||||
|
||||
var cCanvas="compatibleCanvas";
|
||||
if(set[cCanvas]){
|
||||
var canvas=This.canvas=set[cCanvas];
|
||||
}else{
|
||||
if(!isBrowser)throw new Error($T.G("NonBrowser-1",[ViewTxt]));
|
||||
var elem=set.elem;
|
||||
if(elem){
|
||||
if(typeof(elem)=="string"){
|
||||
elem=document.querySelector(elem);
|
||||
}else if(elem.length){
|
||||
elem=elem[0];
|
||||
};
|
||||
};
|
||||
if(elem){
|
||||
set.width=elem.offsetWidth;
|
||||
set.height=elem.offsetHeight;
|
||||
};
|
||||
|
||||
var thisElem=This.elem=document.createElement("div");
|
||||
thisElem.style.fontSize=0;
|
||||
thisElem.innerHTML='<canvas style="width:100%;height:100%;"/>';
|
||||
|
||||
var canvas=This.canvas=thisElem.querySelector("canvas");
|
||||
|
||||
if(elem){
|
||||
elem.innerHTML="";
|
||||
elem.appendChild(thisElem);
|
||||
};
|
||||
};
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var height=set.height*scale;
|
||||
if(!width || !height){
|
||||
throw new Error($T.G("IllegalArgs-1",[ViewTxt+" width=0 height=0"]));
|
||||
};
|
||||
|
||||
canvas.width=width;
|
||||
canvas.height=height;
|
||||
var ctx=This.ctx=canvas.getContext("2d");
|
||||
|
||||
if(!Recorder.LibFFT){
|
||||
throw new Error($T.G("NeedImport-2",[ViewTxt,"src/extensions/lib.fft.js"]));
|
||||
};
|
||||
This.fft=Recorder.LibFFT(1024);
|
||||
|
||||
//柱子所在高度
|
||||
This.lastH=[];
|
||||
//峰值小横条所在高度
|
||||
This.stripesH=[];
|
||||
};
|
||||
fn.prototype=FrequencyHistogramView.prototype={
|
||||
genLinear:function(ctx,colors,from,to){
|
||||
var rtv=ctx.createLinearGradient(0,from,0,to);
|
||||
for(var i=0;i<colors.length;){
|
||||
rtv.addColorStop(colors[i++],colors[i++]);
|
||||
};
|
||||
return rtv;
|
||||
}
|
||||
,input:function(pcmData,powerLevel,sampleRate){
|
||||
var This=this;
|
||||
This.sampleRate=sampleRate;
|
||||
This.pcmData=pcmData;
|
||||
This.pcmPos=0;
|
||||
|
||||
This.inputTime=Date.now();
|
||||
This.schedule();
|
||||
}
|
||||
,schedule:function(){
|
||||
var This=this,set=This.set;
|
||||
var interval=Math.floor(1000/set.fps);
|
||||
if(!This.timer){
|
||||
This.timer=setInterval(function(){
|
||||
This.schedule();
|
||||
},interval);
|
||||
};
|
||||
|
||||
var now=Date.now();
|
||||
var drawTime=This.drawTime||0;
|
||||
if(now-This.inputTime>set.stripeFallDuration*1.3){
|
||||
//超时没有输入,顶部横条已全部落下,干掉定时器
|
||||
clearInterval(This.timer);
|
||||
This.timer=0;
|
||||
|
||||
This.lastH=[];//重置高度再绘制一次,避免定时不准没到底就停了
|
||||
This.stripesH=[];
|
||||
This.draw(null,This.sampleRate);
|
||||
return;
|
||||
};
|
||||
if(now-drawTime<interval){
|
||||
//没到间隔时间,不绘制
|
||||
return;
|
||||
};
|
||||
This.drawTime=now;
|
||||
|
||||
//调用FFT计算频率数据
|
||||
var bufferSize=This.fft.bufferSize;
|
||||
var pcm=This.pcmData;
|
||||
var pos=This.pcmPos;
|
||||
var arr=new Int16Array(bufferSize);
|
||||
for(var i=0;i<bufferSize&&pos<pcm.length;i++,pos++){
|
||||
arr[i]=pcm[pos];
|
||||
};
|
||||
This.pcmPos=pos;
|
||||
|
||||
var frequencyData=This.fft.transform(arr);
|
||||
|
||||
//推入绘制
|
||||
This.draw(frequencyData,This.sampleRate);
|
||||
}
|
||||
,draw:function(frequencyData,sampleRate){
|
||||
var This=this,set=This.set;
|
||||
var ctx=This.ctx;
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var height=set.height*scale;
|
||||
var lineCount=set.lineCount;
|
||||
var bufferSize=This.fft.bufferSize;
|
||||
|
||||
|
||||
//计算高度位置
|
||||
var position=set.position;
|
||||
var posAbs=Math.abs(set.position);
|
||||
var originY=position==1?0:height;//y轴原点
|
||||
var heightY=height;//最高的一边高度
|
||||
if(posAbs<1){
|
||||
heightY=heightY/2;
|
||||
originY=heightY;
|
||||
heightY=Math.floor(heightY*(1+posAbs));
|
||||
originY=Math.floor(position>0?originY*(1-posAbs):originY*(1+posAbs));
|
||||
};
|
||||
|
||||
var lastH=This.lastH;
|
||||
var stripesH=This.stripesH;
|
||||
var speed=Math.ceil(heightY/(set.fallDuration/(1000/set.fps)));
|
||||
var stripeSpeed=Math.ceil(heightY/(set.stripeFallDuration/(1000/set.fps)));
|
||||
var stripeMargin=set.stripeMargin*scale;
|
||||
|
||||
var Y0=1 << (Math.round(Math.log(bufferSize)/Math.log(2) + 3) << 1);
|
||||
var logY0 = Math.log(Y0)/Math.log(10);
|
||||
var dBmax=20*Math.log(0x7fff)/Math.log(10);
|
||||
|
||||
var fftSize=bufferSize/2,fftSize5k=fftSize;
|
||||
if(!set.fullFreq){//非绘制所有频率时,计算5khz所在位置,8000采样率及以下最高只有4khz
|
||||
fftSize5k=Math.min(fftSize,Math.floor(fftSize*5000/(sampleRate/2)));
|
||||
}
|
||||
var isFullFreq=fftSize5k==fftSize;
|
||||
var line80=isFullFreq?lineCount:Math.round(lineCount*0.8);//80%的柱子位置
|
||||
var fftSizeStep1=fftSize5k/line80;
|
||||
var fftSizeStep2=isFullFreq?0:(fftSize-fftSize5k)/(lineCount-line80);
|
||||
var fftIdx=0;
|
||||
for(var i=0;i<lineCount;i++){
|
||||
// !fullFreq 时不采用jmp123的非线性划分频段,录音语音并不适用于音乐的频率,应当弱化高频部分
|
||||
//80%关注0-5khz主要人声部分 20%关注剩下的高频,这样不管什么采样率都能做到大部分频率显示一致。
|
||||
var start=Math.ceil(fftIdx);
|
||||
if(i<line80){
|
||||
//5khz以下
|
||||
fftIdx+=fftSizeStep1;
|
||||
}else{
|
||||
//5khz以上
|
||||
fftIdx+=fftSizeStep2;
|
||||
};
|
||||
var end=Math.ceil(fftIdx); if(end==start)end++;
|
||||
end=Math.min(end,fftSize);
|
||||
|
||||
|
||||
//参考AudioGUI.java .drawHistogram方法
|
||||
|
||||
//查找当前频段的最大"幅值"
|
||||
var maxAmp=0;
|
||||
if(frequencyData){
|
||||
for (var j=start; j<end; j++) {
|
||||
maxAmp=Math.max(maxAmp,Math.abs(frequencyData[j]));
|
||||
};
|
||||
};
|
||||
|
||||
//计算音量
|
||||
var dB= (maxAmp > Y0) ? Math.floor((Math.log(maxAmp)/Math.log(10) - logY0) * 17) : 0;
|
||||
var h=heightY*Math.min(dB/dBmax,1);
|
||||
|
||||
//使柱子匀速下降
|
||||
lastH[i]=(lastH[i]||0)-speed;
|
||||
if(h<lastH[i]){h=lastH[i];};
|
||||
if(h<0){h=0;};
|
||||
lastH[i]=h;
|
||||
|
||||
var shi=stripesH[i]||0;
|
||||
if(h&&h+stripeMargin>shi) {
|
||||
stripesH[i]=h+stripeMargin;
|
||||
}else{
|
||||
//使峰值小横条匀速度下落
|
||||
var sh =shi-stripeSpeed;
|
||||
if(sh < 0){sh = 0;};
|
||||
stripesH[i] = sh;
|
||||
};
|
||||
};
|
||||
|
||||
//开始绘制图形
|
||||
ctx.clearRect(0,0,width,height);
|
||||
|
||||
var linear1=This.genLinear(ctx,set.linear,originY,originY-heightY);//上半部分的填充
|
||||
var stripeLinear1=set.stripeLinear&&This.genLinear(ctx,set.stripeLinear,originY,originY-heightY)||linear1;//上半部分的峰值小横条填充
|
||||
|
||||
var linear2=This.genLinear(ctx,set.linear,originY,originY+heightY);//下半部分的填充
|
||||
var stripeLinear2=set.stripeLinear&&This.genLinear(ctx,set.stripeLinear,originY,originY+heightY)||linear2;//上半部分的峰值小横条填充
|
||||
|
||||
//计算柱子间距
|
||||
var mirrorEnable=set.mirrorEnable;
|
||||
var mirrorCount=mirrorEnable?lineCount*2-1:lineCount;//镜像柱子数量翻一倍-1根
|
||||
|
||||
var widthRatio=set.widthRatio;
|
||||
var spaceWidth=set.spaceWidth*scale;
|
||||
if(spaceWidth!=0){
|
||||
widthRatio=(width-spaceWidth*(mirrorCount+1))/width;
|
||||
};
|
||||
|
||||
for(var i=0;i<2;i++){
|
||||
var lineFloat=Math.max(1*scale,(width*widthRatio)/mirrorCount);//柱子宽度至少1个单位
|
||||
var lineWN=Math.floor(lineFloat),lineWF=lineFloat-lineWN;//提取出小数部分
|
||||
var spaceFloat=(width-mirrorCount*lineFloat)/(mirrorCount+1);//均匀间隔,首尾都留空,可能为负数,柱子将发生重叠
|
||||
if(spaceFloat>0 && spaceFloat<1){
|
||||
widthRatio=1; spaceFloat=0; //不够一个像素,丢弃不绘制间隔,重新计算
|
||||
}else break;
|
||||
};
|
||||
|
||||
//绘制
|
||||
var minHeight=set.minHeight*scale;
|
||||
var XFloat=mirrorEnable?(width-lineWN)/2-spaceFloat:0;//镜像时,中间柱子位于正中心
|
||||
for(var iMirror=0;iMirror<2;iMirror++){
|
||||
if(iMirror){ ctx.save(); ctx.scale(-1,1); }
|
||||
var xMirror=iMirror?width:0; //绘制镜像部分,不用drawImage(canvas)进行镜像绘制,提升兼容性(iOS微信小程序bug https://developers.weixin.qq.com/community/develop/doc/000aaca2148dc8a235a0fb8c66b000)
|
||||
|
||||
//绘制柱子
|
||||
ctx.shadowBlur=set.shadowBlur*scale;
|
||||
ctx.shadowColor=set.shadowColor;
|
||||
for(var i=0,xFloat=XFloat,wFloat=0,x,y,w,h;i<lineCount;i++){
|
||||
xFloat+=spaceFloat;
|
||||
x=Math.floor(xFloat)-xMirror;
|
||||
w=lineWN; wFloat+=lineWF; if(wFloat>=1){ w++; wFloat--; } //小数凑够1像素
|
||||
h=Math.max(lastH[i],minHeight);
|
||||
|
||||
//绘制上半部分
|
||||
if(originY!=0){
|
||||
y=originY-h;
|
||||
ctx.fillStyle=linear1;
|
||||
ctx.fillRect(x, y, w, h);
|
||||
};
|
||||
//绘制下半部分
|
||||
if(originY!=height){
|
||||
ctx.fillStyle=linear2;
|
||||
ctx.fillRect(x, originY, w, h);
|
||||
};
|
||||
|
||||
xFloat+=w;
|
||||
};
|
||||
|
||||
//绘制柱子顶上峰值小横条
|
||||
if(set.stripeEnable){
|
||||
var stripeShadowBlur=set.stripeShadowBlur;
|
||||
ctx.shadowBlur=(stripeShadowBlur==-1?set.shadowBlur:stripeShadowBlur)*scale;
|
||||
ctx.shadowColor=set.stripeShadowColor||set.shadowColor;
|
||||
var stripeHeight=set.stripeHeight*scale;
|
||||
for(var i=0,xFloat=XFloat,wFloat=0,x,y,w,h;i<lineCount;i++){
|
||||
xFloat+=spaceFloat;
|
||||
x=Math.floor(xFloat)-xMirror;
|
||||
w=lineWN; wFloat+=lineWF; if(wFloat>=1){ w++; wFloat--; } //小数凑够1像素
|
||||
h=stripesH[i];
|
||||
|
||||
//绘制上半部分
|
||||
if(originY!=0){
|
||||
y=originY-h-stripeHeight;
|
||||
if(y<0){y=0;};
|
||||
ctx.fillStyle=stripeLinear1;
|
||||
ctx.fillRect(x, y, w, stripeHeight);
|
||||
};
|
||||
//绘制下半部分
|
||||
if(originY!=height){
|
||||
y=originY+h;
|
||||
if(y+stripeHeight>height){
|
||||
y=height-stripeHeight;
|
||||
};
|
||||
ctx.fillStyle=stripeLinear2;
|
||||
ctx.fillRect(x, y, w, stripeHeight);
|
||||
};
|
||||
|
||||
xFloat+=w;
|
||||
};
|
||||
};
|
||||
|
||||
if(iMirror){ ctx.restore(); }
|
||||
if(!mirrorEnable) break;
|
||||
};
|
||||
|
||||
if(frequencyData){
|
||||
set.onDraw(frequencyData,sampleRate);
|
||||
};
|
||||
}
|
||||
};
|
||||
Recorder[ViewTxt]=FrequencyHistogramView;
|
||||
|
||||
|
||||
}));
|
||||
118
public/extensions/lib.fft.js
Normal file
118
public/extensions/lib.fft.js
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
时域转频域,快速傅里叶变换(FFT)
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
|
||||
var fft=Recorder.LibFFT(bufferSize)
|
||||
bufferSize取值2的n次方
|
||||
|
||||
fft.bufferSize 实际采用的bufferSize
|
||||
fft.transform(inBuffer)
|
||||
inBuffer:[Int16,...] 数组长度必须是bufferSize
|
||||
返回[Float64(Long),...],长度为bufferSize/2
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
/*
|
||||
从FFT.java 移植,Java开源库:jmp123 版本0.3
|
||||
https://www.iteye.com/topic/851459
|
||||
https://sourceforge.net/projects/jmp123/files/
|
||||
*/
|
||||
Recorder.LibFFT=function(bufferSize){
|
||||
var FFT_N_LOG,FFT_N,MINY;
|
||||
var real, imag, sintable, costable;
|
||||
var bitReverse;
|
||||
|
||||
var FFT_Fn=function(bufferSize) {//bufferSize只能取值2的n次方
|
||||
FFT_N_LOG=Math.round(Math.log(bufferSize)/Math.log(2));
|
||||
FFT_N = 1 << FFT_N_LOG;
|
||||
MINY = ((FFT_N << 2) * Math.sqrt(2));
|
||||
|
||||
real = [];
|
||||
imag = [];
|
||||
sintable = [0];
|
||||
costable = [0];
|
||||
bitReverse = [];
|
||||
|
||||
var i, j, k, reve;
|
||||
for (i = 0; i < FFT_N; i++) {
|
||||
k = i;
|
||||
for (j = 0, reve = 0; j != FFT_N_LOG; j++) {
|
||||
reve <<= 1;
|
||||
reve |= (k & 1);
|
||||
k >>>= 1;
|
||||
}
|
||||
bitReverse[i] = reve;
|
||||
}
|
||||
|
||||
var theta, dt = 2 * Math.PI / FFT_N;
|
||||
for (i = (FFT_N >> 1) - 1; i > 0; i--) {
|
||||
theta = i * dt;
|
||||
costable[i] = Math.cos(theta);
|
||||
sintable[i] = Math.sin(theta);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
用于频谱显示的快速傅里叶变换
|
||||
inBuffer 输入FFT_N个实数,返回 FFT_N/2个输出值(复数模的平方)。
|
||||
*/
|
||||
var getModulus=function(inBuffer) {
|
||||
var i, j, k, ir, j0 = 1, idx = FFT_N_LOG - 1;
|
||||
var cosv, sinv, tmpr, tmpi;
|
||||
for (i = 0; i != FFT_N; i++) {
|
||||
real[i] = inBuffer[bitReverse[i]];
|
||||
imag[i] = 0;
|
||||
}
|
||||
|
||||
for (i = FFT_N_LOG; i != 0; i--) {
|
||||
for (j = 0; j != j0; j++) {
|
||||
cosv = costable[j << idx];
|
||||
sinv = sintable[j << idx];
|
||||
for (k = j; k < FFT_N; k += j0 << 1) {
|
||||
ir = k + j0;
|
||||
tmpr = cosv * real[ir] - sinv * imag[ir];
|
||||
tmpi = cosv * imag[ir] + sinv * real[ir];
|
||||
real[ir] = real[k] - tmpr;
|
||||
imag[ir] = imag[k] - tmpi;
|
||||
real[k] += tmpr;
|
||||
imag[k] += tmpi;
|
||||
}
|
||||
}
|
||||
j0 <<= 1;
|
||||
idx--;
|
||||
}
|
||||
|
||||
j = FFT_N >> 1;
|
||||
var outBuffer=new Float64Array(j);
|
||||
/*
|
||||
* 输出模的平方:
|
||||
* for(i = 1; i <= j; i++)
|
||||
* inBuffer[i-1] = real[i] * real[i] + imag[i] * imag[i];
|
||||
*
|
||||
* 如果FFT只用于频谱显示,可以"淘汰"幅值较小的而减少浮点乘法运算. MINY的值
|
||||
* 和Spectrum.Y0,Spectrum.logY0对应.
|
||||
*/
|
||||
sinv = MINY;
|
||||
cosv = -MINY;
|
||||
for (i = j; i != 0; i--) {
|
||||
tmpr = real[i];
|
||||
tmpi = imag[i];
|
||||
if (tmpr > cosv && tmpr < sinv && tmpi > cosv && tmpi < sinv)
|
||||
outBuffer[i - 1] = 0;
|
||||
else
|
||||
outBuffer[i - 1] = Math.round(tmpr * tmpr + tmpi * tmpi);
|
||||
}
|
||||
return outBuffer;
|
||||
}
|
||||
|
||||
FFT_Fn(bufferSize);
|
||||
return {transform:getModulus,bufferSize:FFT_N};
|
||||
};
|
||||
|
||||
}));
|
||||
1155
public/extensions/sonic.js
Normal file
1155
public/extensions/sonic.js
Normal file
File diff suppressed because it is too large
Load Diff
278
public/extensions/wavesurfer.view.js
Normal file
278
public/extensions/wavesurfer.view.js
Normal file
@@ -0,0 +1,278 @@
|
||||
/*
|
||||
录音 Recorder扩展,音频可视化波形显示
|
||||
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
|
||||
外观和名称来源于:
|
||||
https://github.com/katspaugh/wavesurfer.js https://github.com/collab-project/videojs-record
|
||||
|
||||
本扩展的波形绘制直接简单的使用PCM的采样数值大小来进行线条的绘制,同一段音频绘制出的波形和Audition内显示的波形外观上几乎没有差异。
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
var WaveSurferView=function(set){
|
||||
return new fn(set);
|
||||
};
|
||||
var ViewTxt="WaveSurferView";
|
||||
var fn=function(set){
|
||||
var This=this;
|
||||
var o={
|
||||
/*
|
||||
elem:"css selector" //自动显示到dom,并以此dom大小为显示大小
|
||||
//或者配置显示大小,手动把surferObj.elem显示到别的地方
|
||||
,width:0 //显示宽度
|
||||
,height:0 //显示高度
|
||||
|
||||
H5环境以上配置二选一
|
||||
|
||||
compatibleCanvas: CanvasObject //提供一个兼容H5的canvas对象,需支持getContext("2d"),支持设置width、height,支持drawImage(canvas,...)
|
||||
,compatibleCanvas_2x: CanvasObject //提供一个宽度是compatibleCanvas的2倍canvas对象
|
||||
,width:0 //canvas显示宽度
|
||||
,height:0 //canvas显示高度
|
||||
非H5环境使用以上配置
|
||||
*/
|
||||
|
||||
scale:2 //缩放系数,应为正整数,使用2(3? no!)倍宽高进行绘制,避免移动端绘制模糊
|
||||
|
||||
,fps:50 //绘制帧率,不可过高,50-60fps运动性质动画明显会流畅舒适,实际显示帧率达不到这个值也并无太大影响
|
||||
|
||||
,duration:2500 //当前视图窗口内最大绘制的波形的持续时间,此处决定了移动速率
|
||||
,direction:1 //波形前进方向,取值:1由左往右,-1由右往左
|
||||
,position:0 //绘制位置,取值-1到1,-1为最底下,0为中间,1为最顶上,小数为百分比
|
||||
|
||||
,centerHeight:1 //中线基础粗细,如果为0不绘制中线,position=±1时应当设为0
|
||||
|
||||
//波形颜色配置:[位置,css颜色,...] 位置: 取值0.0-1.0之间
|
||||
,linear:[0,"rgba(0,187,17,1)",0.7,"rgba(255,215,0,1)",1,"rgba(255,102,0,1)"]
|
||||
,centerColor:"" //中线css颜色,留空取波形第一个渐变颜色
|
||||
};
|
||||
for(var k in set){
|
||||
o[k]=set[k];
|
||||
};
|
||||
This.set=set=o;
|
||||
|
||||
var cCanvas="compatibleCanvas";
|
||||
if(set[cCanvas]){
|
||||
var canvas=This.canvas=set[cCanvas];
|
||||
var canvas2=This.canvas2=set[cCanvas+"_2x"];
|
||||
}else{
|
||||
if(!isBrowser)throw new Error($T.G("NonBrowser-1",[ViewTxt]));
|
||||
var elem=set.elem;
|
||||
if(elem){
|
||||
if(typeof(elem)=="string"){
|
||||
elem=document.querySelector(elem);
|
||||
}else if(elem.length){
|
||||
elem=elem[0];
|
||||
};
|
||||
};
|
||||
if(elem){
|
||||
set.width=elem.offsetWidth;
|
||||
set.height=elem.offsetHeight;
|
||||
};
|
||||
|
||||
var thisElem=This.elem=document.createElement("div");
|
||||
thisElem.style.fontSize=0;
|
||||
thisElem.innerHTML='<canvas style="width:100%;height:100%;"/>';
|
||||
|
||||
var canvas=This.canvas=thisElem.querySelector("canvas");
|
||||
var canvas2=This.canvas2=document.createElement("canvas");
|
||||
|
||||
if(elem){
|
||||
elem.innerHTML="";
|
||||
elem.appendChild(thisElem);
|
||||
};
|
||||
};
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var height=set.height*scale;
|
||||
if(!width || !height){
|
||||
throw new Error($T.G("IllegalArgs-1",[ViewTxt+" width=0 height=0"]));
|
||||
};
|
||||
|
||||
canvas.width=width;
|
||||
canvas.height=height;
|
||||
var ctx=This.ctx=canvas.getContext("2d");
|
||||
|
||||
canvas2.width=width*2;//卷轴,后台绘制画布能容纳两块窗口内容,进行无缝滚动
|
||||
canvas2.height=height;
|
||||
var ctx2=This.ctx2=canvas2.getContext("2d");
|
||||
|
||||
This.x=0;
|
||||
};
|
||||
fn.prototype=WaveSurferView.prototype={
|
||||
genLinear:function(ctx,colors,from,to){
|
||||
var rtv=ctx.createLinearGradient(0,from,0,to);
|
||||
for(var i=0;i<colors.length;){
|
||||
rtv.addColorStop(colors[i++],colors[i++]);
|
||||
};
|
||||
return rtv;
|
||||
}
|
||||
,input:function(pcmData,powerLevel,sampleRate){
|
||||
var This=this;
|
||||
This.sampleRate=sampleRate;
|
||||
This.pcmData=pcmData;
|
||||
This.pcmPos=0;
|
||||
|
||||
This.inputTime=Date.now();
|
||||
This.schedule();
|
||||
}
|
||||
,schedule:function(){
|
||||
var This=this,set=This.set;
|
||||
var interval=Math.floor(1000/set.fps);
|
||||
if(!This.timer){
|
||||
This.timer=setInterval(function(){
|
||||
This.schedule();
|
||||
},interval);
|
||||
};
|
||||
|
||||
var now=Date.now();
|
||||
var drawTime=This.drawTime||0;
|
||||
if(now-drawTime<interval){
|
||||
//没到间隔时间,不绘制
|
||||
return;
|
||||
};
|
||||
This.drawTime=now;
|
||||
|
||||
//切分当前需要的绘制数据
|
||||
var bufferSize=This.sampleRate/set.fps;
|
||||
var pcm=This.pcmData;
|
||||
var pos=This.pcmPos;
|
||||
var arr=new Int16Array(Math.min(bufferSize,pcm.length-pos));
|
||||
for(var i=0;i<arr.length;i++,pos++){
|
||||
arr[i]=pcm[pos];
|
||||
};
|
||||
This.pcmPos=pos;
|
||||
|
||||
//推入绘制
|
||||
if(arr.length){
|
||||
This.draw(arr,This.sampleRate);
|
||||
}else{
|
||||
if(now-This.inputTime>1300){
|
||||
//超时没有输入,干掉定时器
|
||||
clearInterval(This.timer);
|
||||
This.timer=0;
|
||||
};
|
||||
};
|
||||
}
|
||||
,draw:function(pcmData,sampleRate){
|
||||
var This=this,set=This.set;
|
||||
var ctx=This.ctx2;
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var width2=width*2;
|
||||
var height=set.height*scale;
|
||||
var lineWidth=1*scale;//一条线占用1个单位长度
|
||||
|
||||
//计算高度位置
|
||||
var position=set.position;
|
||||
var posAbs=Math.abs(set.position);
|
||||
var originY=position==1?0:height;//y轴原点
|
||||
var heightY=height;//最高的一边高度
|
||||
if(posAbs<1){
|
||||
heightY=heightY/2;
|
||||
originY=heightY;
|
||||
heightY=Math.floor(heightY*(1+posAbs));
|
||||
originY=Math.floor(position>0?originY*(1-posAbs):originY*(1+posAbs));
|
||||
};
|
||||
|
||||
//计算绘制占用长度
|
||||
var pcmDuration=pcmData.length*1000/sampleRate;
|
||||
var pcmWidth=pcmDuration*width/set.duration;
|
||||
pcmWidth+=This.drawLoss||0;
|
||||
var pointCount=0;
|
||||
if(pcmWidth<lineWidth){
|
||||
This.drawLoss=pcmWidth;
|
||||
//pointCount=0; 不够一根不绘制
|
||||
}else{
|
||||
This.drawLoss=0;
|
||||
pointCount=Math.floor(pcmWidth/lineWidth);
|
||||
};
|
||||
|
||||
//***后台卷轴连续绘制***
|
||||
var linear1=This.genLinear(ctx,set.linear,originY,originY-heightY);//上半部分的填充
|
||||
var linear2=This.genLinear(ctx,set.linear,originY,originY+heightY);//下半部分的填充
|
||||
|
||||
var x=This.x;
|
||||
var step=pcmData.length/pointCount;
|
||||
for(var i=0,idx=0;i<pointCount;i++){
|
||||
var j=Math.floor(idx);
|
||||
var end=Math.floor(idx+step);
|
||||
idx+=step;
|
||||
|
||||
//寻找区间内最大值
|
||||
var max=0;
|
||||
for(;j<end;j++){
|
||||
max=Math.max(max,Math.abs(pcmData[j]));
|
||||
};
|
||||
|
||||
//计算高度
|
||||
var h=heightY*Math.min(1,max/0x7fff);
|
||||
|
||||
//绘制当前线条,不管方向,从x:0往x:max方向画就是了
|
||||
//绘制上半部分
|
||||
if(originY!=0){
|
||||
ctx.fillStyle=linear1;
|
||||
ctx.fillRect(x, originY-h, lineWidth, h);
|
||||
};
|
||||
//绘制下半部分
|
||||
if(originY!=height){
|
||||
ctx.fillStyle=linear2;
|
||||
ctx.fillRect(x, originY, lineWidth, h);
|
||||
};
|
||||
|
||||
x+=lineWidth;
|
||||
//超过卷轴宽度,移动画布第二个窗口内容到第一个窗口
|
||||
if(x>=width2){
|
||||
ctx.clearRect(0,0,width,height);
|
||||
ctx.drawImage(This.canvas2,width,0,width,height,0,0,width,height);
|
||||
ctx.clearRect(width,0,width,height);
|
||||
x=width;
|
||||
};
|
||||
};
|
||||
This.x=x;
|
||||
|
||||
//***画回到显示区域***
|
||||
ctx=This.ctx;
|
||||
ctx.clearRect(0,0,width,height);
|
||||
|
||||
//绘制一条中线
|
||||
var centerHeight=set.centerHeight*scale;
|
||||
if(centerHeight){
|
||||
var y=originY-Math.floor(centerHeight/2);
|
||||
y=Math.max(y,0);
|
||||
y=Math.min(y,height-centerHeight);
|
||||
|
||||
ctx.fillStyle=set.centerColor||set.linear[1];
|
||||
ctx.fillRect(0, y, width, centerHeight);
|
||||
};
|
||||
|
||||
//画回画布
|
||||
var srcX=0,srcW=x,destX=0;
|
||||
if(srcW>width){
|
||||
srcX=srcW-width;
|
||||
srcW=width;
|
||||
}else{
|
||||
destX=width-srcW;
|
||||
};
|
||||
|
||||
var direction=set.direction;
|
||||
if(direction==-1){//由右往左
|
||||
ctx.drawImage(This.canvas2,srcX,0,srcW,height,destX,0,srcW,height);
|
||||
}else{//由左往右
|
||||
ctx.save();
|
||||
ctx.scale(-1,1);
|
||||
ctx.drawImage(This.canvas2,srcX,0,srcW,height,-width+destX,0,srcW,height);
|
||||
ctx.restore();
|
||||
};
|
||||
}
|
||||
};
|
||||
Recorder[ViewTxt]=WaveSurferView;
|
||||
|
||||
|
||||
}));
|
||||
229
public/extensions/waveview.js
Normal file
229
public/extensions/waveview.js
Normal file
@@ -0,0 +1,229 @@
|
||||
/*
|
||||
录音 Recorder扩展,动态波形显示
|
||||
https://github.com/xiangyuecn/Recorder
|
||||
*/
|
||||
(function(factory){
|
||||
var browser=typeof window=="object" && !!window.document;
|
||||
var win=browser?window:Object; //非浏览器环境,Recorder挂载在Object下面
|
||||
var rec=win.Recorder,ni=rec.i18n;
|
||||
factory(rec,ni,ni.$T,browser);
|
||||
}(function(Recorder,i18n,$T,isBrowser){
|
||||
"use strict";
|
||||
|
||||
var WaveView=function(set){
|
||||
return new fn(set);
|
||||
};
|
||||
var ViewTxt="WaveView";
|
||||
var fn=function(set){
|
||||
var This=this;
|
||||
var o={
|
||||
/*
|
||||
elem:"css selector" //自动显示到dom,并以此dom大小为显示大小
|
||||
//或者配置显示大小,手动把waveviewObj.elem显示到别的地方
|
||||
,width:0 //显示宽度
|
||||
,height:0 //显示高度
|
||||
|
||||
H5环境以上配置二选一
|
||||
|
||||
compatibleCanvas: CanvasObject //提供一个兼容H5的canvas对象,需支持getContext("2d"),支持设置width、height,支持drawImage(canvas,...)
|
||||
,width:0 //canvas显示宽度
|
||||
,height:0 //canvas显示高度
|
||||
非H5环境使用以上配置
|
||||
*/
|
||||
|
||||
scale:2 //缩放系数,应为正整数,使用2(3? no!)倍宽高进行绘制,避免移动端绘制模糊
|
||||
,speed:9 //移动速度系数,越大越快
|
||||
,phase:21.8 //相位,调整了速度后,调整这个值得到一个看起来舒服的波形
|
||||
|
||||
,fps:20 //绘制帧率,调整后也需调整phase值
|
||||
,keep:true //当停止了input输入时,是否保持波形,设为false停止后将变成一条线
|
||||
|
||||
,lineWidth:3 //线条基础粗细
|
||||
|
||||
//渐变色配置:[位置,css颜色,...] 位置: 取值0.0-1.0之间
|
||||
,linear1:[0,"rgba(150,96,238,1)",0.2,"rgba(170,79,249,1)",1,"rgba(53,199,253,1)"] //线条渐变色1,从左到右
|
||||
,linear2:[0,"rgba(209,130,255,0.6)",1,"rgba(53,199,255,0.6)"] //线条渐变色2,从左到右
|
||||
,linearBg:[0,"rgba(255,255,255,0.2)",1,"rgba(54,197,252,0.2)"] //背景渐变色,从上到下
|
||||
};
|
||||
for(var k in set){
|
||||
o[k]=set[k];
|
||||
};
|
||||
This.set=set=o;
|
||||
|
||||
var cCanvas="compatibleCanvas";
|
||||
if(set[cCanvas]){
|
||||
var canvas=This.canvas=set[cCanvas];
|
||||
}else{
|
||||
if(!isBrowser)throw new Error($T.G("NonBrowser-1",[ViewTxt]));
|
||||
var elem=set.elem;
|
||||
if(elem){
|
||||
if(typeof(elem)=="string"){
|
||||
elem=document.querySelector(elem);
|
||||
}else if(elem.length){
|
||||
elem=elem[0];
|
||||
};
|
||||
};
|
||||
if(elem){
|
||||
set.width=elem.offsetWidth;
|
||||
set.height=elem.offsetHeight;
|
||||
};
|
||||
|
||||
var thisElem=This.elem=document.createElement("div");
|
||||
thisElem.style.fontSize=0;
|
||||
thisElem.innerHTML='<canvas style="width:100%;height:100%;"/>';
|
||||
|
||||
var canvas=This.canvas=thisElem.querySelector("canvas");
|
||||
|
||||
if(elem){
|
||||
elem.innerHTML="";
|
||||
elem.appendChild(thisElem);
|
||||
};
|
||||
};
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var height=set.height*scale;
|
||||
if(!width || !height){
|
||||
throw new Error($T.G("IllegalArgs-1",[ViewTxt+" width=0 height=0"]));
|
||||
};
|
||||
|
||||
canvas.width=width;
|
||||
canvas.height=height;
|
||||
var ctx=This.ctx=canvas.getContext("2d");
|
||||
|
||||
This.linear1=This.genLinear(ctx,width,set.linear1);
|
||||
This.linear2=This.genLinear(ctx,width,set.linear2);
|
||||
This.linearBg=This.genLinear(ctx,height,set.linearBg,true);
|
||||
|
||||
This._phase=0;
|
||||
};
|
||||
fn.prototype=WaveView.prototype={
|
||||
genLinear:function(ctx,size,colors,top){
|
||||
var rtv=ctx.createLinearGradient(0,0,top?0:size,top?size:0);
|
||||
for(var i=0;i<colors.length;){
|
||||
rtv.addColorStop(colors[i++],colors[i++]);
|
||||
};
|
||||
return rtv;
|
||||
}
|
||||
,genPath:function(frequency,amplitude,phase){
|
||||
//曲线生成算法参考 https://github.com/HaloMartin/MCVoiceWave/blob/f6dc28975fbe0f7fc6cc4dbc2e61b0aa5574e9bc/MCVoiceWave/MCVoiceWaveView.m#L268
|
||||
var rtv=[];
|
||||
var This=this,set=This.set;
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var maxAmplitude=set.height*scale/2;
|
||||
|
||||
for(var x=0;x<=width;x+=scale) {
|
||||
var scaling=(1+Math.cos(Math.PI+(x/width)*2*Math.PI))/2;
|
||||
var y=scaling*maxAmplitude*amplitude*Math.sin(2*Math.PI*(x/width)*frequency+phase)+maxAmplitude;
|
||||
rtv.push(y);
|
||||
}
|
||||
return rtv;
|
||||
}
|
||||
,input:function(pcmData,powerLevel,sampleRate){
|
||||
var This=this;
|
||||
This.sampleRate=sampleRate;
|
||||
This.pcmData=pcmData;
|
||||
This.pcmPos=0;
|
||||
|
||||
This.inputTime=Date.now();
|
||||
This.schedule();
|
||||
}
|
||||
,schedule:function(){
|
||||
var This=this,set=This.set;
|
||||
var interval=Math.floor(1000/set.fps);
|
||||
if(!This.timer){
|
||||
This.timer=setInterval(function(){
|
||||
This.schedule();
|
||||
},interval);
|
||||
};
|
||||
|
||||
var now=Date.now();
|
||||
var drawTime=This.drawTime||0;
|
||||
if(now-drawTime<interval){
|
||||
//没到间隔时间,不绘制
|
||||
return;
|
||||
};
|
||||
This.drawTime=now;
|
||||
|
||||
//切分当前需要的绘制数据
|
||||
var bufferSize=This.sampleRate/set.fps;
|
||||
var pcm=This.pcmData;
|
||||
var pos=This.pcmPos;
|
||||
var len=Math.max(0, Math.min(bufferSize,pcm.length-pos));
|
||||
var sum=0;
|
||||
for(var i=0;i<len;i++,pos++){
|
||||
sum+=Math.abs(pcm[pos]);
|
||||
};
|
||||
This.pcmPos=pos;
|
||||
|
||||
//推入绘制
|
||||
if(len || !set.keep){
|
||||
This.draw(Recorder.PowerLevel(sum, len));
|
||||
}
|
||||
if(!len && now-This.inputTime>1300){
|
||||
//超时没有输入,干掉定时器
|
||||
clearInterval(This.timer);
|
||||
This.timer=0;
|
||||
}
|
||||
}
|
||||
,draw:function(powerLevel){
|
||||
var This=this,set=This.set;
|
||||
var ctx=This.ctx;
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
var height=set.height*scale;
|
||||
|
||||
var speedx=set.speed/set.fps;
|
||||
var phase=This._phase-=speedx;//位移速度
|
||||
var phase2=phase+speedx*set.phase;
|
||||
var amplitude=powerLevel/100;
|
||||
var path1=This.genPath(2,amplitude,phase);
|
||||
var path2=This.genPath(1.8,amplitude,phase2);
|
||||
|
||||
//开始绘制图形
|
||||
ctx.clearRect(0,0,width,height);
|
||||
|
||||
//绘制包围背景
|
||||
ctx.beginPath();
|
||||
for(var i=0,x=0;x<=width;i++,x+=scale) {
|
||||
if (x==0) {
|
||||
ctx.moveTo(x,path1[i]);
|
||||
}else {
|
||||
ctx.lineTo(x,path1[i]);
|
||||
};
|
||||
};
|
||||
i--;
|
||||
for(var x=width-1;x>=0;i--,x-=scale) {
|
||||
ctx.lineTo(x,path2[i]);
|
||||
};
|
||||
ctx.closePath();
|
||||
ctx.fillStyle=This.linearBg;
|
||||
ctx.fill();
|
||||
|
||||
//绘制线
|
||||
This.drawPath(path2,This.linear2);
|
||||
This.drawPath(path1,This.linear1);
|
||||
}
|
||||
,drawPath:function(path,linear){
|
||||
var This=this,set=This.set;
|
||||
var ctx=This.ctx;
|
||||
var scale=set.scale;
|
||||
var width=set.width*scale;
|
||||
|
||||
ctx.beginPath();
|
||||
for(var i=0,x=0;x<=width;i++,x+=scale) {
|
||||
if (x==0) {
|
||||
ctx.moveTo(x,path[i]);
|
||||
}else {
|
||||
ctx.lineTo(x,path[i]);
|
||||
};
|
||||
};
|
||||
ctx.lineWidth=set.lineWidth*scale;
|
||||
ctx.strokeStyle=linear;
|
||||
ctx.stroke();
|
||||
}
|
||||
};
|
||||
Recorder[ViewTxt]=WaveView;
|
||||
|
||||
|
||||
}));
|
||||
Reference in New Issue
Block a user