我正在尝试使用 tensorflow 训练带有正则化的多元线性回归模型。由于某种原因,我无法获得以下代码的训练内容来计算要用于梯度下降更新的误差。建立图表时我做错什么了吗?
def normalize_data(matrix):
averages = np.average(matrix,0)
mins = np.min(matrix,0)
maxes = np.max(matrix,0)
ranges = maxes - mins
return ((matrix - averages)/ranges)
def run_regression(X, Y, X_test, Y_test, lambda_value = 0.1, normalize=False, batch_size=10):
x_train = normalize_data(X) if normalize else X
y_train = Y
x_test = X_test
y_test = Y_test
session = tf.Session()
# Calculate number of features for X and Y
x_features_length = len(X[0])
y_features_length = len(Y[0])
# Build Tensorflow graph parts
x = tf.placeholder('float', [None, x_features_length], name="X")
y = tf.placeholder('float', [None, y_features_length], name="Y")
theta = tf.Variable(tf.random_normal([x_features_length, y_features_length], stddev=0.01), name="Theta")
lambda_val = tf.constant(lambda_value)
# Trying to implement this way http://openclassroom.stanford.edu/MainFolder/DocumentPage.php?course=MachineLearning&doc=exercises/ex5/ex5.html
y_predicted = tf.matmul(x, theta, name="y_predicted")
regularization_cost_part = tf.cast(tf.mul(lambda_val,tf.reduce_sum(tf.pow(theta,2)), name="regularization_param"), 'float')
polynomial_cost_part = tf.reduce_sum(tf.pow(tf.sub(y_predicted, y), 2), name="polynomial_sum")
# Set up some summary info to debug
with tf.name_scope('cost') as scope:
cost_func = tf.mul(tf.cast(1/(2*batch_size), 'float'), tf.cast(tf.add(polynomial_cost_part, regularization_cost_part), 'float'))
cost_summary = tf.scalar_summary("cost", cost_func)
training_func = tf.train.GradientDescentOptimizer(0.03).minimize(cost_func)
with tf.name_scope("test") as scope:
correct_prediction = tf.sub(tf.cast(1, 'float'), tf.reduce_mean(tf.sub(y_predicted, y)))
accuracy = tf.cast(correct_prediction, "float")
accuracy_summary = tf.scalar_summary("accuracy", accuracy)
saver = tf.train.Saver()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("/tmp/football_logs", session.graph_def)
init = tf.initialize_all_variables()
session.run(init)
for i in range(0, (len(x_train)/batch_size)):
session.run(training_func, feed_dict={x: x_train[i*batch_size:i*batch_size+batch_size], y: y_train[i*batch_size:i*batch_size+batch_size]})
if i % batch_size == 0:
result = session.run([merged, accuracy], feed_dict={x: x_test, y: y_test})
writer.add_summary(result[0], i)
print "step %d, training accuracy %g"%(i, result[1])
print "test accuracy %g"%session.run(accuracy, feed_dict={x: x_test, y: y_test})
save_path = saver.save(session, "/tmp/football.ckpt")
print "Model saved in file: ", save_path
session.close()
我的输出看起来像这样
step 0, training accuracy 39.1802
step 10, training accuracy 39.1802
step 20, training accuracy 39.1802
...
step 210, training accuracy 39.1802
test accuracy 39.1802
Model saved in file: /tmp/football.ckpt
最佳答案
学习率确实存在问题:0.03
可能过高,具体取决于数据的外观。此外,如果数据集具有中/低维度,则可能要以更明确的方式创建与 session 分离的图形,甚至可以使用正态方程式来获得最佳解决方案,而无需进行迭代。 Here我发布了一些示例,希望对您有所帮助!另外,TF tutorials很好地覆盖了该页面(在该页面中搜索“Complete program”)。
但是关于您的代码,这是一个对我有用的版本:我更改了一些不推荐使用的函数,并将学习率基本上设置为更低的值alpha=1e-8
,ojit_code(在代码中还生成的综合数据集上)似乎收敛了:
test accuracy 2176.11
test accuracy 1898.6
test accuracy 1663.69
test accuracy 1458.53
test accuracy 1287.57
test accuracy 1116.9
test accuracy 969.474
test accuracy 841.028
test accuracy 738.592
test accuracy 649.891
test accuracy 565.188
test accuracy 495.33
test accuracy 438.351
test accuracy 381.161
test accuracy 333.213
test accuracy 289.575
test accuracy 254.394
test accuracy 222.836
test accuracy 197.36
test accuracy 172.788
test accuracy 152.251
test accuracy 132.664
test accuracy 115.982
test accuracy 101.021
final test accuracy 90.2555
代码:
import tensorflow as tf
import numpy as np
# generate some dataset
DIMENSIONS = 5
DS_SIZE = 5000
TRAIN_RATIO = 0.5 # 50% of the dataset isused for training
_train_size = int(DS_SIZE*TRAIN_RATIO)
_test_size = DS_SIZE - _train_size
f = lambda(x): sum(x) # the "true" function: f = 0 + 1*x1 + 1*x2 + 1*x3 ...
noise = lambda: np.random.normal(0,10) # some noise
# training globals
LAMBDA = 1e6 # L2 regularization factor
# generate the dataset, the labels and split into train/test
ds = [[np.random.rand()*1000 for d in range(DIMENSIONS)] for _ in range(DS_SIZE)]
ds = [([1]+x, [f(x)+noise()]) for x in ds] # add x[0]=1 dimension and labels
np.random.shuffle(ds)
train_data, train_labels = zip(*ds[0:_train_size])
test_data, test_labels = zip(*ds[_train_size:])
def normalize_data(matrix):
averages = np.average(matrix,0)
mins = np.min(matrix,0)
maxes = np.max(matrix,0)
ranges = maxes - mins
return ((matrix - averages)/ranges)
def run_regression(X, Y, X_test, Y_test, lambda_value = 0.1, normalize=False, batch_size=10, alpha=1e-8):
x_train = normalize_data(X) if normalize else X
y_train = Y
x_test = X_test
y_test = Y_test
session = tf.Session()
# Calculate number of features for X and Y
x_features_length = len(X[0])
y_features_length = len(Y[0])
# Build Tensorflow graph parts
x = tf.placeholder('float', [None, x_features_length], name="X")
y = tf.placeholder('float', [None, y_features_length], name="Y")
theta = tf.Variable(tf.random_normal([x_features_length, y_features_length], stddev=0.01), name="Theta")
lambda_val = tf.constant(lambda_value)
# Trying to implement this way http://openclassroom.stanford.edu/MainFolder/DocumentPage.php?course=MachineLearning&doc=exercises/ex5/ex5.html
y_predicted = tf.matmul(x, theta, name="y_predicted")
#regularization_cost_part = tf.cast(tf.multiply(lambda_val,tf.reduce_sum(tf.pow(theta,2)), name="regularization_param"), 'float')
#polynomial_cost_part = tf.reduce_sum(tf.pow(tf.subtract(y_predicted, y), 2), name="polynomial_sum")
# Set up some summary info to debug
with tf.name_scope('cost') as scope:
#cost_func = tf.multiply(tf.cast(1/(2*batch_size), 'float'), tf.cast(tf.add(polynomial_cost_part, regularization_cost_part), 'float'))
cost_func = (tf.nn.l2_loss(y_predicted - y)+lambda_val*tf.nn.l2_loss(theta))/float(batch_size)
#DEPRECATED*** cost_summary = tf.scalar_summary("cost", cost_func)
cost_summary = tf.summary.scalar('cost', cost_func)# Add a scalar summary for the snapshot loss.
training_func = tf.train.GradientDescentOptimizer(alpha).minimize(cost_func)
with tf.name_scope("test") as scope:
correct_prediction = tf.subtract(tf.cast(1, 'float'), tf.reduce_mean(tf.subtract(y_predicted, y)))
accuracy = tf.cast(correct_prediction, "float")
#DEPRECATED*** accuracy_summary = tf.scalar_summary("accuracy", accuracy)
#accuracy_summary = tf.summary.scalar("accuracy", accuracy)
saver = tf.train.Saver()
#DEPRECATED*** merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
#DEPRECATED*** writer = tf.train.SummaryWriter("/tmp/football_logs", session.graph_def)
writer = tf.summary.FileWriter("/tmp/football_logs", session.graph)
#DEPRECATED*** init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
session.run(init)
for i in range(1, (len(x_train)/batch_size)):
session.run(training_func, feed_dict={x: x_train[i*batch_size:i*batch_size+batch_size], y: y_train[i*batch_size:i*batch_size+batch_size]})
if i % batch_size == 0:
print "test accuracy %g"%session.run(accuracy, feed_dict={x: x_test, y: y_test})
#result = session.run([merged, accuracy], feed_dict={x: x_test, y: y_test})
# writer.add_summary(result[0], i)
# print "step %d, training accuracy %g"%(i, result[1])
#writer.flush()
print "final test accuracy %g"%session.run(accuracy, feed_dict={x: x_test, y: y_test})
# save_path = saver.save(session, "/tmp/football.ckpt")
# print "Model saved in file: ", save_path
session.close()
run_regression(train_data, train_labels, test_data, test_labels, normalize=False, alpha=1e-8)
就像我说的那样,您可能希望更改结构以提高可读性和可伸缩性,但是希望这会有所帮助!
干杯,
安德烈斯
关于python - Tensorflow多元线性回归不收敛,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/34208336/