| 
						
						
						
					 | 
				
				 | 
				
					@ -0,0 +1,30 @@ | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					import numpy as np | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					import matplotlib.pyplot as plt | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					############################### | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					#Datos originales | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					############################### | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					X = 2 * np.random.rand(100, 1) | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					y = 4 + 3 * X + np.random.randn(100,1) | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					
 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					plt.plot(X,y,".") | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					############################### | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					X_b = np.c_[np.ones((100,1)), X] #Se agrega x0=1 para cada instancia | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					m=100 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					n_epochs = 50 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					t0, t1 = 5, 50 # learning schedule hyperparameters | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					
 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					def learning_schedule(t): | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
						return t0 / (t + t1) | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					
 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					theta = np.random.randn(2,1) # random initialization | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					
 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					for epoch in range(n_epochs): | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
						for i in range(m): | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
							random_index = np.random.randint(m) | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
							xi = X_b[random_index:random_index+1] | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
							yi = y[random_index:random_index+1] | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
							gradients = 2 * xi.T.dot(xi.dot(theta) - yi) | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
							eta = learning_schedule(epoch * m + i) | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
							theta = theta - eta * gradients | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					
 | 
				
			
			
		
	
		
			
				
					 | 
					 | 
				
				 | 
				
					print(theta) |