24 #ifndef MLPACK_METHODS_LARS_LARS_HPP    25 #define MLPACK_METHODS_LARS_LARS_HPP    30 namespace regression {
   102   LARS(
const bool useCholesky = 
false,
   103        const double lambda1 = 0.0,
   104        const double lambda2 = 0.0,
   105        const double tolerance = 1e-16);
   119   LARS(
const bool useCholesky,
   120        const arma::mat& gramMatrix,
   121        const double lambda1 = 0.0,
   122        const double lambda2 = 0.0,
   123        const double tolerance = 1e-16);
   140   LARS(
const arma::mat& data,
   141        const arma::rowvec& responses,
   142        const bool transposeData = 
true,
   143        const bool useCholesky = 
false,
   144        const double lambda1 = 0.0,
   145        const double lambda2 = 0.0,
   146        const double tolerance = 1e-16);
   164   LARS(
const arma::mat& data,
   165        const arma::rowvec& responses,
   166        const bool transposeData,
   167        const bool useCholesky,
   168        const arma::mat& gramMatrix,
   169        const double lambda1 = 0.0,
   170        const double lambda2 = 0.0,
   171        const double tolerance = 1e-16);
   216   double Train(
const arma::mat& data,
   217                const arma::rowvec& responses,
   219                const bool transposeData = 
true);
   235   double Train(
const arma::mat& data,
   236                const arma::rowvec& responses,
   237                const bool transposeData = 
true);
   248   void Predict(
const arma::mat& points,
   249                arma::rowvec& predictions,
   250                const bool rowMajor = 
false) 
const;
   273   const std::vector<size_t>& 
ActiveSet()
 const { 
return activeSet; }
   277   const std::vector<arma::vec>& 
BetaPath()
 const { 
return betaPath; }
   280   const arma::vec& 
Beta()
 const { 
return betaPath.back(); }
   284   const std::vector<double>& 
LambdaPath()
 const { 
return lambdaPath; }
   292   template<
typename Archive>
   293   void serialize(Archive& ar, 
const uint32_t );
   308                       const arma::rowvec& y,
   309                       const bool rowMajor = 
false);
   313   arma::mat matGramInternal;
   316   const arma::mat* matGram;
   319   arma::mat matUtriCholFactor;
   338   std::vector<arma::vec> betaPath;
   341   std::vector<double> lambdaPath;
   344   std::vector<size_t> activeSet;
   347   std::vector<bool> isActive;
   352   std::vector<size_t> ignoreSet;
   355   std::vector<bool> isIgnored;
   362   void Deactivate(
const size_t activeVarInd);
   369   void Activate(
const size_t varInd);
   376   void Ignore(
const size_t varInd);
   379   void ComputeYHatDirection(
const arma::mat& matX,
   380                             const arma::vec& betaDirection,
   381                             arma::vec& yHatDirection);
   384   void InterpolateBeta();
   386   void CholeskyInsert(
const arma::vec& newX, 
const arma::mat& X);
   388   void CholeskyInsert(
double sqNormNewX, 
const arma::vec& newGramCol);
   390   void GivensRotate(
const arma::vec::fixed<2>& x,
   391                     arma::vec::fixed<2>& rotatedX,
   394   void CholeskyDelete(
const size_t colToKill);
   401 #include "lars_impl.hpp" double & Lambda1()
Modify the L1 regularization coefficient. 
 
bool & UseCholesky()
Modify whether to use the Cholesky decomposition. 
 
Linear algebra utility functions, generally performed on matrices or vectors. 
 
double Lambda1() const
Get the L1 regularization coefficient. 
 
void Predict(const arma::mat &points, arma::rowvec &predictions, const bool rowMajor=false) const
Predict y_i for each data point in the given data matrix using the currently-trained LARS model...
 
The core includes that mlpack expects; standard C++ includes and Armadillo. 
 
const std::vector< arma::vec > & BetaPath() const
Access the set of coefficients after each iteration; the solution is the last element. 
 
double Train(const arma::mat &data, const arma::rowvec &responses, arma::vec &beta, const bool transposeData=true)
Run LARS. 
 
double Tolerance() const
Get the tolerance for maximum correlation during training. 
 
LARS & operator=(const LARS &other)
Copy the given LARS object. 
 
LARS(const bool useCholesky=false, const double lambda1=0.0, const double lambda2=0.0, const double tolerance=1e-16)
Set the parameters to LARS. 
 
An implementation of LARS, a stage-wise homotopy-based algorithm for l1-regularized linear regression...
 
double & Lambda2()
Modify the L2 regularization coefficient. 
 
const arma::mat & MatUtriCholFactor() const
Access the upper triangular cholesky factor. 
 
double ComputeError(const arma::mat &matX, const arma::rowvec &y, const bool rowMajor=false)
Compute cost error of the given data matrix using the currently-trained LARS model. 
 
const std::vector< double > & LambdaPath() const
Access the set of values for lambda1 after each iteration; the solution is the last element...
 
const std::vector< size_t > & ActiveSet() const
Access the set of active dimensions. 
 
double Lambda2() const
Get the L2 regularization coefficient. 
 
void serialize(Archive &ar, const uint32_t)
Serialize the LARS model. 
 
bool UseCholesky() const
Get whether to use the Cholesky decomposition. 
 
const arma::vec & Beta() const
Access the solution coefficients. 
 
double & Tolerance()
Modify the tolerance for maximum correlation during training.