You are on page 1of 4

3.

LINEAR REGRESSION

September 22, 2017

In [1]: %matplotlib inline


#https://stackoverflow.com/questions/43027980/purpose-of-matplotlib-inline
import numpy as np
import matplotlib.pyplot as plt
csv = 'https://www.dropbox.com/s/oqoyy9p849ewzt2/linear.csv?dl=1'
data = np.genfromtxt(csv,delimiter=',')
X = data[:,1:]
Y = data[:,0]

In [2]: import theano


import theano.tensor as T
d = X.shape[1] #columns of features
n = X.shape[0] #no. of training samples
learn_rate = 0.5

In [3]: x = T.matrix(name='x') #feature matrix


y = T.vector(name='y') #response vector
w = theano.shared(np.zeros((d,1)),name='w')
#https://www.quora.com/What-is-the-meaning-and-benefit-of-shared-variables-in-Theano
risk = T.sum((T.dot(x,w).T-y)**2)/2/n
#https://stackoverflow.com/questions/38185601/theano-using-dot-t-at-the-end-of-a-dot-pr
grad_risk = T.grad(risk,wrt=w) #gradient of risk

In [4]: #gradient descent


train_model = theano.function(
inputs=[],
outputs=risk,
updates=[(w,w-learn_rate*grad_risk)],
givens={x:X,y:Y}
)
n_steps = 50
for i in range(n_steps):
print(train_model())

print('\n(a) Coefficients of vector w: \n\n', w.get_value())

2.619322008585456
0.7587559422725656

1
0.23542812224973012
0.07939576047330808
0.030096896364576606
0.013695201975103268
0.008006092308871296
0.005970446508700003
0.005225932117259178
0.004949550127903776
0.004845924776165537
0.0048068130299667704
0.004791984242871621
0.004786344307210614
0.0047841942688007875
0.0047833731708215
0.004783059133878644
0.004782938874883769
0.004782892769416027
0.004782875074349405
0.00478286827612244
0.004782865661743263
0.0047828646553662564
0.004782864267605403
0.00478286411806061
0.004782864060333996
0.004782864038030564
0.004782864029405724
0.004782864026067559
0.004782864024774452
0.004782864024273129
0.004782864024078616
0.004782864024003076
0.004782864023973726
0.004782864023962313
0.004782864023957866
0.004782864023956134
0.0047828640239554595
0.004782864023955196
0.004782864023955098
0.004782864023955059
0.004782864023955038
0.0047828640239550345
0.004782864023955031
0.004782864023955031
0.004782864023955033
0.0047828640239550285
0.004782864023955029
0.004782864023955031
0.004782864023955036

2
(a) Coefficients of vector w:

[[-0.57392068]
[ 1.35757059]
[ 0.01527565]
[-1.88288076]]

In [5]: #Least squares estimates


from theano.tensor.nlinalg import matrix_inverse

xt = T.matrix('xt')
x = T.matrix('x')
y = T.vector('y')
a = matrix_inverse(T.dot(xt,x))
f = theano.function([xt, x], a)
gramian = f(X.T,X)

b = T.dot(xt,y)
g = theano.function([xt, y], b)
moment = g(X.T,Y)

theta = g(gramian,moment)
print('(b) Exact optimal solution: ', theta)
print('Solution is same as coefficients of w through gradient descent')

(b) Exact optimal solution: [-0.57392068 1.35757059 0.01527565 -1.88288076]


Solution is same as coefficients of w through gradient descent

In [6]: from sklearn import linear_model

regr = linear_model.LinearRegression()
regr.fit(X, Y)

print('(c) Using sci-kit learn linear regression\n')


print('Coefficients: \n', regr.coef_)

print('\nSci-kit learn linear regression uses ordinary least squares regression and its
print('coefficient estimates depend on independence of features. Since the last attribu
print('is a column of ones, it makes sense that there will be an error of 0. in the coe

(c) Using sci-kit learn linear regression

Coefficients:
[-0.57392068 1.35757059 0.01527565 0. ]

Sci-kit learn linear regression uses ordinary least squares regression and its

3
coefficient estimates depend on independence of features. Since the last attribute
is a column of ones, it makes sense that there will be an error of 0. in the coefficient.

You might also like