[딥러닝] Multi Variable Regression
Multi Variable Regression
import tensorflow as tf
#Hypothesis using matrix
#Lession1 스파게티, 나중에 사용하지 않을 부분
#x1_data = [73., 93., 89., 96., 73.]
#x2_data = [80., 88., 91., 98., 66.]
#x3_data = [75., 93., 90., 100., 70.,]
#y_data = [152., 185., 180., 196., 142]
#x1 = tf.placeholder(tf.float32)
#x2 = tf.placeholder(tf.float32)
#x3 = tf.placeholder(tf.float32)
#Y = tf.placeholder(tf.float32)
#w1 = tf.Variable(tf.random_normal([1]), name='weight1')
#w2 = tf.Variable(tf.random_normal([1]), name='weight2')
#w3 = tf.Variable(tf.random_normal([1]), name='weight3')
#b = tf.Variable(tf.random_normal([1]), name='bias')
#hypothesis = x1*w1 + x2*w3 + x3*w3 + b
#Lession1 End
#Lession2
x_data = [[73., 80., 75.], [93., 88., 93.], [89., 91., 90.], [96., 98., 100.],
[73., 66., 70.]]
y_data = [[152.], [185.], [180.], [196.], [142.]]
X = tf.placeholder(tf.float32, shape=[None, 3]) # None : n개, 3 : 3개의 요소가짐
Y = tf.placeholder(tf.float32, shape=[None, 1]) # None : n개, 1 : 1개의 요소가짐
W = tf.Variable(tf.random_normal([3,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
#Lession2 End
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
#Minimize. need a very small learning rate for this data set
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train=optimizer.minimize(cost)
#launch the graph in a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict={X: x_data, Y: y_data})
# cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
# feed_dict={x1: x1_data, x2: x2_data, x3: x3_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)
#목표인 y_data와 유사하게 수렴한다.
Multi Variable Regression FileLoad (numpy 활용)
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1] #n개의 마지막 1개를 제외한 전체
y_data = xy[:, [-1]] #n개의 마지막 1개만 가져옴.
print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data)
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer= tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train],
feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)
print("Your score will be ", sess.run(hypothesis, feed_dict={X:[[100, 70, 101]]}))
print("Other score will be ", sess.run(hypothesis, feed_dict={X:[[60, 70, 110], [90, 100, 80]]}))
Multi Variable Regression Fileload Queue이용하기 (Numpy 활용)
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
#xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype=np.float32)
#x_data = xy[:, 0:-1] #n개의 마지막 1개를 제외한 전체
#y_data = xy[:, [-1]] #n개의 마지막 1개만 가져옴.
filename_queue = tf.train.string_input_producer(
['data-01-test-score.csv'], shuffle=False, name='filename_queue')
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_defaults = [[0.], [0.], [0.], [0.]]
xy = tf.decode_csv(value, record_defaults=record_defaults)
#파일내용 10개씩 배치로 읽어
train_x_batch, train_y_batch = \
tf.train.batch([xy[0:-1], xy[-1:]], batch_size=10)
#print(x_data.shape, x_data, len(x_data))
#print(y_data.shape, y_data)
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer= tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#filename queue시작
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for step in range(2001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train],
feed_dict={X: x_batch, Y: y_batch})
# cost_val, hy_val, _ = sess.run(
# [cost, hypothesis, train],
# feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)
coord.requset_stop()
coord.join(threads)
#print("Your score will be ", sess.run(hypothesis, feed_dict={X:[[100, 70, 101]]}))
#print("Other score will be ", sess.run(hypothesis, feed_dict={X:[[60, 70, 110], [90, 100, 80]]}))
이 포스트는 김성훈 교수님의 ‘모두의 딥러닝’ 강의를 학습하며 정리한 노트입니다.
최근 댓글