我正在尝试在python中创建图形均衡器。一切进展顺利,但我无法实时运行音频。我设计了5个滑条,分别代表每个频段(共5个频段)。每次我操作这些滑块时,scikits音频播放都会重新计算一次。这是我的代码,以便您可以查看更多详细信息

均衡器功能:

from scipy.io import wavfile
from scipy import signal
import pyaudio
import wave
import sys
# import matplotlib.pyplot as plt
# import numpy as np
import scikits.audiolab


def processAudio(audio, X, X2, X3, X4, X5):

## Import Audio Signal



samplerate, data = wavfile.read(audio)


data = data.astype(float) ## floating points

d = (data[:,0] + data[:,1]) / 2 ## make it mono

# d = d/max(d)


## Create the Low Pass filter

centFL = 45.0 ## center frequency of the lowpass

R = float((centFL * 2)/samplerate)


b, a = signal.butter(4,R,'low', analog=False) ### create a lowpass filter

# w,h = signal.freqs(b,a) ## frequency response coefficients to be plot

# plt.semilogx(w, 20 * np.log10(abs(h)))
# plt.title('Butterworth filter frequency response')
# plt.xlabel('Frequency [radians / second]')
# plt.ylabel('Amplitude [dB]')
# plt.margins(0, 0.1)
# plt.grid(which='both', axis='both')
# plt.axvline(100, color='green') # cutoff frequency
# plt.show()

YL = signal.filtfilt(b, a, d)  ## signal fiter with low pass coefficients

YL = YL/max(YL) ## normalize YL

G = 2**(X/6)



YL = YL * G ### mutliplicacion filtro uno



#scikits.audiolab.play(YL,samplerate)

## High pass filter

centFH = 15000.0 ## center frequency of the highpass

RH = float((centFH * 2)/samplerate)

bH,aH = signal.butter(4,RH, 'high', analog=False) ## create a highpass filter

YH = signal.filtfilt(bH,aH,d) ## apply the filter

YH = YH/max(YH) ##normalize

G2 = 2**(X2/6)

YH = YH * G2 ## multiplicacion filtro 2

# scikits.audiolab.play(YH,samplerate)


# w2,h2 = signal.freqs(bH,aH) ## frequency response coefficients to be plot

##  Three Band Pass filter

## 700 central freq
centFB = 700.0

RcentFB = float((centFB*2)/samplerate)

# start = centFL ## start of the first filter = cutoff of the low pass

start = float((centFL*2)/samplerate) ## convert to radiants

end = 2*RcentFB - start



bB, aB = signal.butter(4,[start,end],btype='band',analog=False) ## fitler coefficients

YB = signal.filtfilt(bB,aB,d) ## fitler the signal

YB = YB/max(YB) ####normalize

G3 = 2**(X3/6)

YB = YB * G3 ### multiplicacion filtro tres

# scikits.audiolab.play(YB,samplerate)

# W3, h3 = signal.freqs(bB,aB)

## Band Pass Filter 2khz

centFB2 = 2000.0

RcentFB2 = float((centFB2*2)/samplerate)

start2 = end

end2 = 2*RcentFB2 - start2

bB2,aA2 = signal.butter(4,[start2, end2],btype = 'band', analog=False)

YB2 = signal.filtfilt(bB2,aA2,d)

YB2 = YB2/max(YB2)

G4 = 2**(X4/6)

YB2 = YB2 * G4 ### multiplicacion filtro 4

# scikits.audiolab.play(YB2,samplerate)

centFB3 = 8000.0

RcentFB3 = float((centFB3*2)/samplerate)

start3 = end2

end3 =  RH

bB3, aA3 = signal.butter(4,[start3, end3], btype='band',analog=False)

YB3 = signal.filtfilt(bB3,aA3,d)
YB3 = YB3/max(YB3)

G5 = 2**(X5/6) #### tranformacion de db a linear

YB3 = YB3 * G5 ### multiplicacion filtro 5

# scikits.audiolab.play(YB3,samplerate)



Equalizer = YL + YB + YB2 + YB3 + YH


#### Play Audio

GUI功能
from Tkinter import *
import tkFileDialog
import threading
from Equalizer import processAudio

# GUI assosiated Functions
filepath = ''

def choosefile():
    try:
        global filepath
        filepath = tkFileDialog.askopenfilename(filetypes=[('Audio Files', '*.wav')])
        filenamelabel.config(text=filepath)
    except ValueError:
        pass


def getSliderValues():
    sliderVector = []
    sliderVector.append(Value45hz.get())
    sliderVector.append(Value700hz.get())
    sliderVector.append(Value2khz.get())
    sliderVector.append(Value8khz.get())
    sliderVector.append(Value15khz.get())
    if filepath:
        audioThread = threading.Thread(target=processAudio, args=(filepath, sliderVector[0], sliderVector[1], sliderVector[2], sliderVector[3], sliderVector[4]))
        audioThread.start()
    test45hzslider.after(500, getSliderValues)

root = Tk()
root.title("Graphic Equalizer")
# Initiation of Frames
sliderframes = Frame(root)
sliderframes.pack(side=TOP)
slider1frame = Frame(sliderframes)
slider1frame.pack(side=LEFT)
slider2frame = Frame(sliderframes)
slider2frame.pack(side=LEFT)
slider3frame = Frame(sliderframes)
slider3frame.pack(side=LEFT)
slider4frame = Frame(sliderframes)
slider4frame.pack(side=LEFT)
slider5frame = Frame(sliderframes)
slider5frame.pack(side=LEFT)

importframe = Frame(root)
importframe.pack(side=BOTTOM)

# Variable creation
## En estas variables es que se guarda el valor del slider cuando le des al boton o hagan refresh con el timer
Value45hz = DoubleVar()
Value700hz = DoubleVar()
Value2khz = DoubleVar()
Value8khz = DoubleVar()
Value15khz = DoubleVar()

# All sliderframes widgets

# Slider 1
w = Scale(slider1frame, from_=12, to=-12, variable=Value45hz) ### slider label =45hz
w.set(0)
w.pack(side=TOP)
# Label for slider 1
label45hz = Label(slider1frame, text='45Hz')
label45hz.pack(side=TOP)

# Slider 2
w2 = Scale(slider2frame, from_=12, to=-12, variable=Value700hz)  #slider label = 700hz
w2.set(0)
w2.pack(side=TOP)
# Label for slider 2
label700hz = Label(slider2frame, text='700Hz')
label700hz.pack(side=TOP)

# Slider 3
w3 = Scale(slider3frame, from_=12, to=-12, variable=Value2khz)  # slider label = 2khz
w3.set(0)
w3.pack(side=TOP)
# Label for slider 3
label2khz = Label(slider3frame, text='2kHz')
label2khz.pack(side=TOP)

# Slider 4
w4 = Scale(slider4frame, from_=12, to=-12, variable=Value8khz)  # slider label = 8k
w4.set(0)
w4.pack(side=TOP)
# Label for slider 4
label8khz = Label(slider4frame, text='8kHz')
label8khz.pack(side=TOP)

# Slider 5
w5 = Scale(slider5frame, from_=12, to=-12, variable=Value15khz) ### slider label= 15khz
w5.set(0)
w5.pack(side=TOP)
# Label for slider 5
label15khz = Label(slider5frame, text='15kHz')
label15khz.pack(side=TOP)

# Audio Import Button
audiobutton = Button(importframe, text='Import Audio...', command=choosefile)
audiobutton.pack(side=LEFT)
filenamelabel = Label(importframe, text='No File Chosen.')
filenamelabel.pack(side=LEFT)
test45hzslider = Label(importframe, text='No Value')
test45hzslider.pack(side=LEFT)

#

getSliderValues()
mainloop()

## dale run a ver si el gas pela

###45Hz, 700Hz, 2KHz 8k, 15KHz

最佳答案

I filter the signal using overlap and add but I still cant find a way to hook up pyaudio for real time processing.

#### Overlap And ADD
import numpy as np
import math as m
from scipy.io import wavfile
from scipy import signal



import pyaudio
import wave
import sys
import time
# import matplotlib.pyplot as plt
# import numpy as np
import scikits.audiolab

# x = np.array([3,-2,4,1,1,0,-3,5])
# y = np.array([1, -1, 2])


##############################################Importa el audio###################################################
## Import Audio

fs, x = wavfile.read('Firstsaturday.wav')

###### PyAudio



# floating point

x = x.astype(float)


##make it mono
x = (x[:,0] + x[:,1])/2

#############################################Importa el audio###################################################



##########################################LOW PASS FILTER###################################################
numtaps = 100
## nyquist
nyq = fs/2.
##cutofffrequency
cutoff = 45.0
### create the filter
y = signal.firwin(numtaps,cutoff/nyq)

###########################################LOW PASS FILTER###################################################


######################################PRIMER BAND PASS FILTER#####################################################

centFB = 700.0

FB1S = cutoff ### start at the end of the cutoff

FB1E = 2*centFB - FB1S ### dos veces la frecuencia central - el cutoff frequency


y2 = signal.firwin(numtaps,[FB1S,FB1E],nyq=fs/2,pass_zero=False)

# trata a ver como suena
######################################PRIMER BAND PASS FILTER#####################################################



######################################SEGUNDO BAND PASS FILTER#####################################################

### center frequency
centFB2 = 2000.0 ## 2khz

FB2S = FB1E ### empieza el segundo filto

FB2E = 2*centFB2 - FB2S ### termina el filtro

y3 = signal.firwin(numtaps,[FB2S,FB2E],nyq=fs/2,pass_zero=False)

######################################SEGUNDO BAND PASS FILTER#####################################################


######################################TERCER BAND PASS FILTER#######################################################

centFB3 = 8000.0 ### 8khz

FB3S = FB2S

FB3E = 2*centFB3 - FB3S

y4 = signal.firwin(numtaps,[FB3S,FB3E], nyq=fs/2,pass_zero=False)

######################################TERCER BAND PASS FILTER#######################################################


######################################HIGH PASS FILTER #############################################################

centHP = 15000.0 #### 15khz
HPS = FB3E

HPE = 2*centHP - HPS

y5 = signal.firwin(numtaps,[HPS,HPE],nyq=fs/2,pass_zero=False)


######################################HIGH PASS FILTER #############################################################

# print(y5)
Equalizer = np.concatenate((y,y2,y3,y4,y5),axis=0)
# print(Equalizer)




# Equalizer = [y,y2,y3,y4,y5]




convlen = len(x) + len(Equalizer) - 1


### result of zeros

ConvResult = np.zeros(convlen,dtype=float,order='F')






# determine the segment length

seglen = 1024
# determine how many segments we have in audio signal x

Segments = m.ceil(float(len(x))/float(seglen))

SegmentINT = int(Segments)
# print(SegmentINT)

for i in range(0,SegmentINT):
    ## stablish indexes for dry audio
    dryStart = i * seglen



    dryEnd = dryStart + seglen - 1
    if dryEnd > len(x) - 1:
        dryEnd = len(x) - 1


    convsegment = signal.fftconvolve(x[dryStart:dryEnd + 1],Equalizer)



    ## add convresult

    StartConv = dryStart
    EndConv = dryEnd + len(Equalizer) - 1

    ConvResult[StartConv:EndConv + 1]= ConvResult[StartConv:EndConv + 1] + convsegment


ConvResult = ConvResult/max(ConvResult)

scikits.audiolab.play(ConvResult,fs)

关于user-interface - 图形均衡器,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/43429710/

10-11 00:27