importnumpyasnpimportpandasaspdimportmatplotlib.pyplotaspltimportpylabasplbdefcost_function(theta):theta=np.atleast_2d(np.asarray(theta))returnnp.average((y-(x*theta))**2,axis=1)/2# datasetdataset=[[1,1],[2,3],[3,2],[4,3],[5,5]]# x and y arraysx=np.array([row[0]forrowindataset])y=np.array([row[1]forrowindataset])# Take N steps with learning rate alpha# down the steepest gradient, starting at theta = 0 with m size.N=5alpha=.1m=10theta=[0]J=[cost_function(theta[0])[0]]forjinrange(N-1):last_theta=theta[-1]this_theta=last_theta-alpha/m*np.sum(((x*last_theta)-y)*x)theta.append(this_theta)J.append(cost_function(this_theta))# print cost and thetaprint('Initial cost: ',J[0])print('Theta: ',theta[-1])