[英]MediaCodec - How do I mix audio samples from two Decoder MediaCodec objects in sync?
[英]How do I use JTransforms to analyze and display the frequency spectrum of audio samples from ExoPlayer?
目前,我正在使用JTransforms
對使用RTMP MediaSource
從ExoPlayer播放器復制自MediaCodecAudioRenderer
音頻樣本執行FFT,該音頻樣本使用JTransforms
進行了定制,以復制解碼的緩沖區。
我得到的是4096(或4608 ...是的,有些MP3奇怪地具有非2冪的樣本大小,我不知道為什么)長度為ByteBuffer
。 那就是我要輸入的FloatFFT_1D
或DoubleFFT_1D
對象,對嗎?
現在,我的代碼如下:
crf.setHook((dupe, format) -> {
if(currentMediaSource == mMediaSourceAudio) {
byte[] data = new byte[dupe.limit()];
dupe.position(0);
dupe.get(data);
if(format != null) {
new FFTTask(data, format).execute();
}
Log.i("straight_from_renderer", data.length+" "+format);
}
});
...
...
private class FFTTask extends AsyncTask<Void, Void, float[]> {
byte[] bufferContents;
MediaFormat format;
FFTTask(byte[] samples, MediaFormat format) {
this.bufferContents = samples;
this.format = format;
}
float[] floatMe(short[] pcms) {
float[] floaters = new float[pcms.length];
for (int i = 0; i < pcms.length; i++) {
floaters[i] = pcms[i];
}
return floaters;
}
short[] shortMe(byte[] bytes) {
short[] out = new short[bytes.length / 2]; // will drop last byte if odd number
ByteBuffer bb = ByteBuffer.wrap(bytes);
for (int i = 0; i < out.length; i++) {
out[i] = bb.getShort();
}
return out;
}
float[] directFloatMe(byte[] bytes) {
float[] out = new float[bytes.length / 2]; // will drop last byte if odd number
ByteBuffer bb = ByteBuffer.wrap(bytes);
for (int i = 0; i < out.length; i++) {
out[i] = bb.getFloat();
}
return out;
}
private double db2(double r, double i, double maxSquared) {
return 5.0 * Math.log10((r * r + i * i) / maxSquared);
}
double[] convertToDb(double[] data, double maxSquared) {
data[0] = db2(data[0], 0.0, maxSquared);
int j = 1;
for (int i=1; i < data.length - 1; i+=2, j++) {
data[j] = db2(data[i], data[i+1], maxSquared);
}
data[j] = data[0];
return data;
}
@Override
protected float[] doInBackground(Void... voids) {
//WARNING: bufferContents is from a 2-channel 48k bitrate audio, so convert to mono first?
/*
byte[] oneChannel = new byte[bufferContents.length/2];
for(int i = 0; i < oneChannel.length; i+=2) {
oneChannel[i] = bufferContents[i*2+2];
oneChannel[i+1] = bufferContents[i*2+3];
}
*/
float[] dataAsFloats = floatMe(shortMe(bufferContents));
int fftLen = dataAsFloats.length/2;
fft = new FloatFFT_1D(fftLen);
fft.complexForward(dataAsFloats);
String log = "";
float[] magnitudes = new float[dataAsFloats.length/2];
float magMax = 0;
int maxIndex = 0;
float dominantFreq = 0;
for(int i = 0; i < dataAsFloats.length/2; i++) {
float re = dataAsFloats[2*i];
float im = dataAsFloats[2*i+1];
magnitudes[i] = (float)(Math.sqrt(re * re + im * im) / 1e7);
//log += re+" "+im+" "+magnitudes[i]+"\n";
if(magnitudes[i] > magMax) {
magMax = (float)(magnitudes[i]);
maxIndex = i;
}
}
dominantFreq = format.getInteger(MediaFormat.KEY_SAMPLE_RATE) * maxIndex / fftLen;
Log.i("fft_results", magMax+" "+dominantFreq);
return magnitudes;
}
@Override
protected void onPostExecute(float[] res) {
super.onPostExecute(res);
//fftListener.onFFTResultsAvailable(res);
caView.feedFFTMagnitudes(res);
}
}
我注釋掉了通道拆分代碼,因為我不確定是將字節數組批發還是按通道拆分,然后將其中一個通道的數據放入。
但是,最終的res
值確實很嘈雜-幅值混亂,並且沒有顯示通常音頻分析圖像應該顯示的明顯模式-相反,它只是一個緊密的鋸齒形,具有高於20000 Hz的較高值。
我究竟做錯了什么?
事實證明,我做錯的是字節到短的轉換。 現在,我僅使用此函數將兩個字節轉換為短字節:
private short getSixteenBitSample(byte high, byte low) {
return (short)((high << 8) | (low & 0xff));
}
然后將short轉換為轉換數組中的float。 然后將該轉換數組通過FFT進行處理。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.