我尝试用梯度下降来实现多项式回归。我要适合以下功能:

python - 具有梯度下降的Python多项式回归-LMLPHP

我使用的代码是:

import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(seed=42)


def create_data():
    x = PolynomialFeatures(degree=5).fit_transform(np.linspace(-10,10,100).reshape(100,-1))
    l = lambda x_i: (1/3)*x_i**3-2*x_i**2+2*x_i+2
    data = l(x[:,1])
    noise = np.random.normal(0,0.1,size=np.shape(data))
    y = data+noise
    y= y.reshape(100,1)
    return {'x':x,'y':y}

def plot_function(x,y):
    fig = plt.figure(figsize=(10,10))
    plt.plot(x[:,1],[(1/3)*x_i**3-2*x_i**2+2*x_i+2 for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0)
    plt.scatter(x[:,1],y)
    plt.show()


def w_update(y,x,batch,w_old,eta):
    derivative = np.sum([(y[i]-np.dot(w_old.T,x[i,:]))*x[i,:] for i in range(np.shape(x)[0])])
    print(derivative)
    return w_old+eta*(1/batch)*derivative



# initialize variables
w = np.random.normal(size=(6,1))
data = create_data()
x = data['x']
y = data['y']
plot_function(x,y)



# Update w
w_s = []
Error = []
for i in range(500):
    error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))])
    Error.append(error)
    w_prime = w_update(y,x,np.shape(x)[0],w,0.001)
    w = w_prime
    w_s.append(w)
# Plot the predicted function
plt.plot(x[:,1],np.dot(x,w))
plt.show()

# Plot the error
fig3 = plt.figure()
plt.scatter(range(len(Error[10:])),Error[10:])
plt.show()


但是结果我收到了水。奇怪的是,这完全超出范围...我也尝试过更改迭代次数以及参数theta,但这没有帮助。我认为我在更新w时犯了一个错误。
python - 具有梯度下降的Python多项式回归-LMLPHP

最佳答案

我找到了解决方案。问题确实出在我计算权重的部分。具体在:

np.sum([(y[d]-np.dot(w_old.T,x[d,:]))*x[d,:] for d in range(np.shape(x)[0])])


应该是这样的:

np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0)


我们必须添加np.sum(axis = 0)以获得所需的维数->维数必须等于w。 numpy sum文档sais


  默认值axis = None将对输入的所有元素求和
  数组。


这不是我们想要实现的。相加的轴= 0在数组的第一个轴上求和(100,7,1),因此将100个维度(7,1)元素相加,得到的数组在维度(7,1)中正是我们想要的。实现这一点并清理代码将产生:

import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
np.random.seed(seed=42)


def create_data():
    x = PolynomialFeatures(degree=6).fit_transform(np.linspace(-2,2,100).reshape(100,-1))
    x[:,1:] = MinMaxScaler(feature_range=(-2,2),copy=False).fit_transform(x[:,1:])
    l = lambda x_i: np.cos(0.8*np.pi*x_i)
    data = l(x[:,1])
    noise = np.random.normal(0,0.1,size=np.shape(data))
    y = data+noise
    y= y.reshape(100,1)
    # Normalize Data
    return {'x':x,'y':y}

def plot_function(x,y,w,Error,w_s):
    fig,ax = plt.subplots(nrows=1,ncols=2,figsize=(40,10))
    ax[0].plot(x[:,1],[np.cos(0.8*np.pi*x_i) for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0)
    ax[0].scatter(x[:,1],y)
    ax[0].plot(x[:,1],np.dot(x,w))
    ax[0].set_title('Function')
    ax[1].scatter(range(iterations),Error)
    ax[1].set_title('Error')



    plt.show()



# initialize variables
data = create_data()
x = data['x']
y = data['y']
w = np.random.normal(size=(np.shape(x)[1],1))
eta = 0.1
iterations = 10000
batch = 10



def stochastic_gradient_descent(x,y,w,eta):
    derivative = -(y-np.dot(w.T,x))*x.reshape(np.shape(w))
    return eta*derivative


def batch_gradient_descent(x,y,w,eta):
    derivative = np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0)
    return eta*(1/len(x))*derivative


def mini_batch_gradient_descent(x,y,w,eta,batch):
    gradient_sum = np.zeros(shape=np.shape(w))
    for b in range(batch):
        choice = np.random.choice(list(range(len(x))))
        gradient_sum += -(y[choice]-np.dot(w.T,x[choice,:]))*x[choice,:].reshape(np.shape(w))
        return eta*(1/batch)*gradient_sum

# Update w
w_s = []
Error = []
for i in range(iterations):
    # Calculate error
    error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))])
    Error.append(error)
    # Stochastic Gradient Descent
    """
    for d in range(len(x)):
        w-= stochastic_gradient_descent(x[d,:],y[d],w,eta)
        w_s.append(w.copy())
    """
    # Minibatch Gradient Descent
    """
    w-= mini_batch_gradient_descent(x,y,w,eta,batch)
    """

    # Batch Gradient Descent

    w -= batch_gradient_descent(x,y,w,eta)




# Show predicted weights
print(w_s)

# Plot the predicted function and the Error
plot_function(x,y,w,Error,w_s)


结果,我们收到:
python - 具有梯度下降的Python多项式回归-LMLPHP

当然,可以通过更改eta和迭代次数以及切换到随机或小批量梯度下降或更复杂的优化算法来改善这一点。

关于python - 具有梯度下降的Python多项式回归,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/57232821/

10-12 18:09