引入 maven 依赖:
<dependency>
<groupId>net.sourceforge.jtransforms</groupId>
<artifactId>jtransforms</artifactId>
<version>2.4.0</version>
</dependency>
测试类 FourierTransformTest.java:
import edu.emory.mathcs.jtransforms.fft.DoubleFFT_1D;
import javax.sound.sampled.UnsupportedAudioFileException;
import java.io.File;
import java.io.IOException;
public class FourierTransformTest {
public static void main(String[] args) throws UnsupportedAudioFileException, IOException {
// 假设有一个音频的采样数据,存储在一个double数组中
File srcWavFile = new File("C:\\E\\素材\\音频\\wav\\audio.wav");
double[] audioData = AudioUtils.wavToDoubleArray( srcWavFile );
System.out.println( "audioData.length = " + audioData.length );
// 对音频数据进行傅里叶变换
new DoubleFFT_1D(audioData.length).realForward(audioData);
/*
经过傅里叶变换后生成的double[]中的每个元素表示频谱图中对应频率的幅度。
傅里叶变换将时域信号转换为频域信号,将波形图转换为频谱图。
在频谱图中,横轴表示频率,纵轴表示幅度。double[]中的每个元素对应频谱图中的一个频率,其数值表示该频率对应的幅度大小。
通过分析这些幅度值,可以了解波形图中不同频率成分的强弱程度。
*/
// 将傅里叶变换后的频谱数据分成三个频段
int segmentSize = audioData.length / 3;
double[] audio1 = new double[segmentSize];
double[] audio2 = new double[segmentSize];
double[] audio3 = new double[segmentSize];
System.arraycopy(audioData, segmentSize * 0, audio1, 0, segmentSize);
System.arraycopy(audioData, segmentSize * 1, audio2, 0, segmentSize);
System.arraycopy(audioData, segmentSize * 2, audio3, 0, segmentSize);
// 对分离出的音频数据进行逆傅里叶变换,得到分离后的音频
new DoubleFFT_1D( audio1.length ).realInverse( audio1, true );
new DoubleFFT_1D( audio2.length ).realInverse( audio2, true );
new DoubleFFT_1D( audio3.length ).realInverse( audio3, true );
System.out.println("转换完毕");
// 分离后的音频数据存储在audio1、audio2和audio3数组中,可将其转换成 wav文件
AudioUtils.doubleArrayToWAV( audio1,new File( "C:\\E\\素材\\音频\\wav\\part1.wav" ) );
AudioUtils.doubleArrayToWAV( audio2,new File( "C:\\E\\素材\\音频\\wav\\part2.wav" ) );
AudioUtils.doubleArrayToWAV( audio3,new File( "C:\\E\\素材\\音频\\wav\\part3.wav" ) );
System.out.println("生成完毕");
}
}
用到的工具类 AudioUtils.java:
import javax.sound.sampled.*;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
public class AudioUtils {
public static double[] wavToDoubleArray(File mp3File) throws UnsupportedAudioFileException, IOException {
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(mp3File);
AudioFormat audioFormat = audioInputStream.getFormat();
int numChannels = audioFormat.getChannels();
int sampleSizeInBytes = audioFormat.getSampleSizeInBits() / 8;
int frameSize = numChannels * sampleSizeInBytes;
int bufferSize = (int) (audioInputStream.getFrameLength() * frameSize);
byte[] audioBytes = new byte[bufferSize];
audioInputStream.read(audioBytes);
double[] audioData = new double[audioBytes.length / 2];
for (int i = 0, j = 0; i < audioBytes.length; i += 2, j++) {
int sample = (audioBytes[i + 1] << 8) | (audioBytes[i] & 0xFF);
audioData[j] = sample / 32768.0;
}
return audioData;
}
public static void doubleArrayToWAV(double[] audioData, File outputFile) throws IOException, UnsupportedAudioFileException {
AudioFormat audioFormat = new AudioFormat(44100, 16, 2, true, false);
byte[] audioBytes = new byte[audioData.length * 2];
for (int i = 0, j = 0; i < audioData.length; i++, j += 2) {
short sample = (short) (audioData[i] * 32767);
audioBytes[j] = (byte) (sample & 0xFF);
audioBytes[j + 1] = (byte) ((sample >> 8) & 0xFF);
}
AudioInputStream audioInputStream = new AudioInputStream(new ByteArrayInputStream(audioBytes), audioFormat, audioData.length);
AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, outputFile);
}
}