regression_nodes.py :  » Math » Modular-toolkit-for-Data-Processing » MDP-2.6 » mdp » nodes » Python Open Source

Home
Python Open Source
1.3.1.2 Python
2.Ajax
3.Aspect Oriented
4.Blog
5.Build
6.Business Application
7.Chart Report
8.Content Management Systems
9.Cryptographic
10.Database
11.Development
12.Editor
13.Email
14.ERP
15.Game 2D 3D
16.GIS
17.GUI
18.IDE
19.Installer
20.IRC
21.Issue Tracker
22.Language Interface
23.Log
24.Math
25.Media Sound Audio
26.Mobile
27.Network
28.Parser
29.PDF
30.Project Management
31.RSS
32.Search
33.Security
34.Template Engines
35.Test
36.UML
37.USB Serial
38.Web Frameworks
39.Web Server
40.Web Services
41.Web Unit
42.Wiki
43.Windows
44.XML
Python Open Source » Math » Modular toolkit for Data Processing 
Modular toolkit for Data Processing » MDP 2.6 » mdp » nodes » regression_nodes.py
from mdp import numx,numx_linalg,utils,Node,NodeException,TrainingException
from mdp.utils import mult

# ??? For the future: add an optional second phase to compute
# residuals, significance of the slope.

class LinearRegressionNode(Node):
    """Compute least-square, multivariate linear regression on the input
    data, i.e., learn coefficients b_j so that

      y_i = b_0 + b_1 x_1 + ... b_N x_N ,

    for i = 1 ... M, minimizes the square error given the training x's
    and y's.

    This is a supervised learning node, and requires input data x and
    target data y to be supplied during training (see 'train'
    docstring).

    Internal variables of interest:
    self.beta -- the coefficients of the linear regression
    """

    def __init__(self, with_bias=True, use_pinv=False,
                 input_dim=None, output_dim=None, dtype=None):
        """
        Input arguments:

        with_bias -- If True, the linear model includes a constant term
                         True:  y_i = b_0 + b_1 x_1 + ... b_N x_N
                         False: y_i =       b_1 x_1 + ... b_N x_N
                     If present, the constant term is stored in the first
                     column of self.beta

        use_pinv -- If true, uses the pseudo-inverse function to compute
                    the linear regression coefficients, which is more robust
                    in some cases
        """
        super(LinearRegressionNode, self).__init__(input_dim, output_dim, dtype)
        
        self.with_bias = with_bias
        self.use_pinv = use_pinv

        # for the linear regression estimator we need two terms
        # the first one is X^T X
        self._xTx = None
        # the second one is X^T Y
        self._xTy = None

        # keep track of how many data points have been sent
        self._tlen = 0

        # final regression coefficients
        # if with_bias=True, beta includes the bias term in the first column
        self.beta = None

    def _get_supported_dtypes(self):
        return ['float32', 'float64']

    def is_invertible(self):
        return False

    def _check_train_args(self, x, y):
        # set output_dim if necessary
        if self._output_dim is None:
            self._set_output_dim(y.shape[1])
        # check output dimensionality
        self._check_output(y)
        if y.shape[0] != x.shape[0]:
            msg = ("The number of output points should be equal to the "
                   "number of datapoints (%d != %d)" % (y.shape[0], x.shape[0]))
            raise TrainingException(msg)

    def _train(self, x, y):
        """
        Additional input arguments:
        y -- array of size (x.shape[0], output_dim) that contains the observed
             output to the input x's.
        """
        # initialize internal vars if necessary
        if self._xTx is None:
            if self.with_bias:
                x_size = self._input_dim + 1
            else:
                x_size = self._input_dim
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        if self.with_bias:
            x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]

    def _stop_training(self):
        try:
            if self.use_pinv:
                invfun = utils.pinv
            else:
                invfun = utils.inv
            inv_xTx = invfun(self._xTx)
        except numx_linalg.LinAlgError, exception:
            errstr = (str(exception) + 
                      "\n Input data may be redundant (i.e., some of the " +
                      "variables may be linearly dependent).")
            raise NodeException(errstr)

        self.beta = mult(inv_xTx, self._xTy)

        # remove junk
        del self._xTx
        del self._xTy

    def _execute(self, x):
        if self.with_bias:
            x = self._add_constant(x)
        return mult(x, self.beta)

    def _add_constant(self, x):
        """Add a constant term to the vector 'x'.
        x -> [1 x]
        """
        return numx.concatenate((numx.ones((x.shape[0], 1),
                                           dtype=self.dtype), x), axis=1)


www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.