Understanding Unexplained
  • Home
  • Categories
  • Tags
  • Archives

Regression from scratch

Regression From scratch¶

Let's load the required library

In [3]:
import numpy as np
import math
#import MLModelFromScratch.utils import normalize,polynomial_features

We write some util function required in pre-processing

In [4]:
def normalize(X,axis=-1,order=2):
    "Normalize Dataset X"
    l2=np.atleast_1d(np.linalg.norm(X,order,axis))
    l2[l2==0]=1
    return X/np.expand_dims(l2,order)
In [5]:
def polynomial_features(X,degree):
    """create polynomial features
    X: Input data
    degree: degree used to transform the feature
    """
    n_samples,n_features=np.shape(X)
    def index_combinations():
        combs=[combination_with_replacement(range(n_features),i) for i in range(0,degree+1)]
        flat_combs=[item for sublist in combs for item in sublist]
        return flat_combs
    combinations=index_combinations()
    n_output_features=len(combinations)
    X_new=np.empty((n_samples,n_output_features))
    
    for i,index_combs in enumerate(combinations):
        X_new[:,i]=np.prod(X[:,index_combs],axis=1)
    return X_new
In [6]:
class Regression(object):
    """ Base regression model
    Parameters:
    ietration: float
    Number of training iterations that algorithm will tune weights 
    learning_rate: float
    The step length that will be used when updating weights
    """
    def __init__(self,n_iterations,learning_rate):
        #Assign n_iterations and learning_rate to class variable
        self.n_iterations=n_iterations
        self.learning_rate=learning_rate
        self.regularization=lambda x:0
        self.regularization.grad=lambda x:0
        
    def initialize_weights(self,n_features):
        # initialize weights randomly between [-1/N,1/N]
        limit=1/n_features
        self.w=np.random.uniform(-limit,limit,(n_features))
        
    def fit(self,X,y):
        """ model relationship between scalar dependent variable and independent variable
        """
        #Insert constant 1 for bias
        X=np.insert(X,0,1,axis=1)
        self.training_errors=[]
        self.initialize_weights(n_features=X.shape[1])
        
        #Do gradient descent for n_iterations
        for i in range(self.n_iterations):
            y_pred=X.dot(self.w)
            #Calculate l2-loss
            mse=np.mean(np.power(y-y_pred,2)+self.regularization(self.w))
            self.training_errors.append(mse)
            #gradient l2 loss w.r.t w
            grad_w=-(y-y_pred).dot(X)+self.regularization.grad(self.w)
            
            #update weights
            self.w-=self.learning_rate*grad_w
    def predict(self,X):
        #Insert constant 1 for bias
        X=np.insert(X,0,1,axis=1)
        y_pred=X.dot(self.w)
        return y_pred
    

Published

Sep 6, 2018

Category

posts

Tags

  • regression 2

Contact

  • Powered by Pelican. Theme: Elegant by Talha Mansoor