渐变的更新在某种程度上是错误的。
我已经实现了下面给出的算法。我做错了
'''
Implementation of PALM- proximal alternating linearisation method
'''
def palm(X,m,niter,lamda):
X = X.T
l = X.shape[0]
n = X.shape[1]
W = np.random.rand(m,n)
D = np.random.rand(l,m)
for i in range(niter):
'''
Update dictionary D
'''
tau_d = np.linalg.norm(W,2)**-2
D = D - tau_d * np.matmul((np.matmul(D,W)-X),W.T)
for j in range(1,m):
D[:,j] = D[:,j] - (np.ones((l,1)).T*D[:,j])/l
for j in range(m):
D[:,j] = D[:,j]/max(1,np.linalg.norm(D[:,j],2))
'''
Update coefficients W
'''
tau_w = np.linalg.norm(D,2)**-2
W = W - tau_w * np.matmul(D.T,(np.matmul(D,W)-X))
for j in range(m):
W[j,:] = np.multiply(np.maximum(np.zeros(W[j,:].shape[0]),np.absolute(W[j,:])-lamda),np.sign(W[j,:]))
return D,W
我相信W的第二行和D的第二列的D和W更新是错误的
最佳答案
import numpy as np
def palm(X,m,niter,lamda):
X = X.T
l = X.shape[0]
n = X.shape[1]
W = np.random.rand(m,n)
D = np.random.rand(l,m)
for i in range(niter):
'''
Update dictionary D
'''
tau_d = np.linalg.norm(W,2)**-2
D = D - tau_d * np.matmul((np.matmul(D,W)-X),W.T)
for j in range(1,m):
D[:,j] = D[:,j] - (np.ones((l,1)).T*D[:,j])/l
for j in range(1,m):
D[:,j] = D[:,j] - D[:,j]/max(1,np.linalg.norm(D[:,j],2))
'''
Update coefficients W
'''
tau_w = np.linalg.norm(D,2)**-2
W = W - tau_w * np.matmul(D.T,(np.matmul(D,W)-X))
for j in range(1,m):
W[j,:] = W[j,:] - np.multiply(np.maximum(np.zeros(W[j,:].shape[0]),np.absolute(W[j,:])-lamda),np.sign(W[j,:]))
return D,W
关于python - 交替交替线性化最小化算法的python实现,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/56176647/