3 #include <Eigen/Cholesky>
4 #include <Eigen/Eigenvalues>
12 namespace pt = boost::property_tree;
18 Regression::Regression(pt::ptree
const& pt) :
WorkPiece(), order(pt.
get<unsigned int>(
"Order")), inputDim(pt.
get<unsigned int>(
"InputSize")), alpha(std::fmin(pt.
get<double>(
"MaxPoisednessRadius", 1.0), 1.0)) {
23 const std::string type = pt.get<std::string>(
"ExpansionType",
"TotalOrder");
24 if( type==
"TotalOrder" ) {
26 }
else if( type==
"Hyperbolic" ) {
27 const double q = pt.get<
double>(
"NormScale", 1.0);
29 }
else if( type==
"Diagonal" ) {
33 std::shared_ptr<MultiIndex> constantIndex = std::make_shared<MultiIndex>(
inputDim);
34 multi->AddActive(constantIndex);
37 for(
unsigned int i=1; i<=
order; ++i ) {
38 for(
unsigned int d=0; d<
inputDim; ++d ) {
39 auto singleIndex = MultiIndexFactory::CreateSingleTerm(
inputDim, d, i);
40 multi->AddActive(singleIndex);
44 std::cerr << std::endl;
45 std::cerr <<
"ERROR: invalid polynomial expansion type in Regression.cpp" << std::endl;
46 std::cerr <<
"\tChoose from: 'TotalOrder', 'Hyperbolic', or 'Diagonal'" << std::endl;
47 std::cerr << std::endl;
52 optPt.put(
"Ftol.AbsoluteTolerance", pt.get<
double>(
"PoisednessConstant.Ftol.AbsoluteTolerance", 1.0e-8));
53 optPt.put(
"Ftol.RelativeTolerance", pt.get<
double>(
"PoisednessConstant.Ftol.RelativeTolerance", 1.0e-8));
54 optPt.put(
"Xtol.AbsoluteTolerance", pt.get<
double>(
"PoisednessConstant.Xtol.AbsoluteTolerance", 1.0e-8));
55 optPt.put(
"Xtol.RelativeTolerance", pt.get<
double>(
"PoisednessConstant.Xtol.RelativeTolerance", 1.0e-8));
56 optPt.put(
"ConstraintTolerance", pt.get<
double>(
"PoisednessConstant.ConstraintTolerance", 1.0e-8));
57 optPt.put(
"MaxEvaluations", pt.get<
unsigned int>(
"PoisednessConstant.MaxEvaluations", 1000));
58 optPt.put(
"Algorithm", pt.get<std::string>(
"PoisednessConstant.Algorithm",
"MMA"));
63 if(inputs.size()==0) {
return; }
68 std::vector<Eigen::VectorXd> centered(inputs.size());
69 for(
unsigned int i=0; i<inputs.size(); ++i ) {
76 assert(
coeff.cols()==vand.cols());
88 std::cerr << std::endl << std::endl <<
"ERROR: Not able to compute the number of points required for interpolation" <<
89 std::endl <<
"\tPolynomialRegressor.cpp NumInterpolationPoints()" << std::endl;
95 void Regression::Fit(std::vector<Eigen::VectorXd> xs, std::vector<Eigen::VectorXd>
const& ys, Eigen::VectorXd
const& center) {
97 assert(xs.size()==ys.size());
109 void Regression::Fit(std::vector<Eigen::VectorXd>
const& xs, std::vector<Eigen::VectorXd>
const& ys) {
111 assert(xs.size()==ys.size());
114 Fit(xs, ys, Eigen::VectorXd::Zero(xs[0].size()));
118 assert(xs.size()==ys.size());
122 if( xs.size()<interp ) {
123 std::cerr << std::endl <<
"ERROR: Regression requires " << interp <<
" points to interpolate but only " << xs.size() <<
" are given." << std::endl;
124 std::cerr <<
"\tTry fitting the regression with at least " << interp+1 <<
" points." << std::endl << std::endl;
131 vand = vand.transpose()*vand;
134 auto solver = vand.colPivHouseholderQr();
137 return solver.solve(rhs).transpose();
142 assert(ys_data.size()>0);
143 const unsigned int dim = ys_data[0].size();
146 Eigen::MatrixXd ys = Eigen::MatrixXd::Constant(ys_data.size(), dim, std::numeric_limits<double>::quiet_NaN());
149 for(
unsigned int i=0; i<ys_data.size(); ++i ) {
150 ys.row(i) = ys_data[i];
154 return vand.transpose()*ys;
161 const unsigned int N = xs.size();
162 const unsigned int M =
multi->Size();
166 Eigen::MatrixXd vand = Eigen::MatrixXd::Ones(N, M);
169 for(
unsigned int i=0; i<M; ++i ) {
171 const Eigen::RowVectorXi
alpha =
multi->at(i)->GetVector();
173 for(
unsigned int pt=0; pt<N; ++pt ) {
175 const Eigen::VectorXd& pnt = xs[pt];
176 assert(
alpha.size()==pnt.size());
179 for(
unsigned int v=0;
v<
alpha.size(); ++
v ) {
181 vand(pt, i) *= boost::any_cast<double const>(
poly->Evaluate((
unsigned int)
alpha(
v), pnt[
v]) [0]);
193 Eigen::ArrayXd
Regression::CenterPoints(std::vector<Eigen::VectorXd>& xs, Eigen::VectorXd
const& center,
unsigned int const kn)
const {
196 const bool zeroCenter = center.norm()<std::numeric_limits<double>::epsilon();
199 for(
auto it=xs.begin(); it!=xs.end(); ++it ) {
207 Eigen::ArrayXd radius = Eigen::ArrayXd::Zero(center.size());
208 for(
auto it : xs ) { radius = radius.max(it.array().abs()); }
210 for(
auto it=xs.begin(); it!=xs.end(); ++it ) { (*it).array() *= radius.inverse(); }
225 const unsigned int N = xs.size();
226 if( kn<0 ) { kn = N; }
227 assert(xs.size()>=kn);
229 const Eigen::ArrayXd radius =
CenterPoints(xs, center, kn);
232 std::vector<Eigen::VectorXd> euclidean(N, Eigen::VectorXd::Zero(1));
235 std::vector<Eigen::RowVectorXd> lagrangeCoeff(N);
238 for(
unsigned int i=0; i<N; ++i ) {
239 if( i>0 ) { euclidean[i-1] = Eigen::VectorXd::Zero(1); }
240 euclidean[i] = Eigen::VectorXd::Ones(1);
246 auto cost = std::make_shared<PoisednessCost>(shared_from_this(), lagrangeCoeff,
inputDim);
247 auto constraint = std::make_shared<PoisednessConstraint>(
inputDim,
alpha);
250 std::make_shared<muq::Optimization::NLoptOptimizer>(cost,
optPt);
251 opt->AddInequalityConstraint(constraint);
253 std::vector<Eigen::VectorXd> inputs;
254 inputs.push_back((Eigen::VectorXd)Eigen::VectorXd::Zero(
inputDim));
256 const std::pair<Eigen::VectorXd, double>& soln =
258 assert(soln.second<=0.0);
260 return std::pair<Eigen::VectorXd, double>((radius*soln.first.array()).matrix()+center, -soln.second);
265 const unsigned int numCoeff =
multi->Size();
266 gradient.resize(numCoeff);
269 for(
unsigned int i=0; i<numCoeff; ++i ) {
271 gradient.at(i) = Eigen::VectorXd::Constant(
inputDim, std::numeric_limits<double>::quiet_NaN());
274 const Eigen::VectorXi multiIndex =
multi->at(i)->GetVector();
276 for(
unsigned int d1=0; d1<
inputDim; ++d1 ) {
281 result *=
poly->DerivativeEvaluate(multiIndex(
v), 1, point(
v));
283 result *= boost::any_cast<double const>(
poly->Evaluate((
unsigned int)multiIndex(
v), point(
v)) [0]);
288 gradient.at(i) (d1) = result;
297 const Eigen::RowVectorXd phi = parent->VandermondeMatrix(std::vector<Eigen::VectorXd>(1, x));
299 Eigen::VectorXd lambda(lagrangeCoeff.size());
300 for(
unsigned int i=0; i<lagrangeCoeff.size(); ++i ) { lambda(i) = phi.dot(lagrangeCoeff[i]); }
302 return -1.0*lambda.norm();
307 const Eigen::VectorXd& x = input[0];
309 const Eigen::RowVectorXd phi = parent->VandermondeMatrix(std::vector<Eigen::VectorXd>(1, x));
312 std::vector<Eigen::VectorXd> gradBasis;
313 parent->ComputeBasisDerivatives(x, gradBasis);
314 assert(gradBasis.size()==lagrangeCoeff[0].size());
317 gradient = Eigen::VectorXd::Zero(
inputSizes(0));
319 Eigen::VectorXd lambda(lagrangeCoeff.size());
320 for(
unsigned int i=0; i<lagrangeCoeff.size(); ++i ) {
321 lambda(i) = phi.dot(lagrangeCoeff[i]);
322 for(
unsigned int j=0; j<gradBasis.size(); ++j ) {
323 gradient += lambda(i)*lagrangeCoeff[i](j)*gradBasis[j];
327 gradient *= -1.0*sensitivity(0)/lambda.norm();
339 ModPiece(Eigen::VectorXi::Constant(1, inDim), Eigen::VectorXi::Ones(1)),
alpha(
alpha) {}
342 const Eigen::VectorXd& x = input[0];
343 outputs.resize(outputSizes[0]);
348 unsigned int const inputDimWrt,
351 assert(inputDimWrt==0);
352 const Eigen::VectorXd& x = input[0];
354 jacobian.resize(
inputSizes[0], outputSizes[0]);
static std::shared_ptr< IndexedScalarBasis > Construct(std::string const &polyName)
virtual void JacobianImpl(unsigned int const outputDimWrt, unsigned int const inputDimWrt, muq::Modeling::ref_vector< Eigen::VectorXd > const &input) override
PoisednessConstraint(unsigned int const inDim, double const alpha)
virtual void EvaluateImpl(muq::Modeling::ref_vector< Eigen::VectorXd > const &input) override
virtual void GradientImpl(unsigned int outWrt, unsigned int inWrt, muq::Modeling::ref_vector< Eigen::VectorXd > const &input, Eigen::VectorXd const &sensitivity) override
Compute the gradient of the cost function.
PoisednessCost(std::shared_ptr< Regression const > parent, std::vector< Eigen::RowVectorXd > const &lagrangeCoeff, unsigned int const inDim)
virtual double Cost() override
Eigen::ArrayXd currentRadius
Current radius of inputs.
boost::property_tree::ptree optPt
Parameters for the poisedness cosntant optimization.
const double alpha
The radius of the poisedness constraint.
void ComputeBasisDerivatives(Eigen::VectorXd const &point, std::vector< Eigen::VectorXd > &gradient) const
virtual void EvaluateImpl(muq::Modeling::ref_vector< boost::any > const &inputs) override
User-implemented function that determines the behavior of this muq::Modeling::WorkPiece.
const unsigned int order
The order of the regression.
std::shared_ptr< IndexedScalarBasis > poly
The polynomial basis (in one variable) used to compute the Vandermonde matrix.
Eigen::MatrixXd ComputeCoefficients(std::vector< Eigen::VectorXd > const &xs, std::vector< Eigen::VectorXd > const &ys) const
Compute the coefficients for the basis functions.
int NumInterpolationPoints() const
Eigen::ArrayXd CenterPoints(std::vector< Eigen::VectorXd > &xs)
Center the input points.
const unsigned int inputDim
The input dimension.
Eigen::MatrixXd ComputeCoefficientsRHS(Eigen::MatrixXd const &vand, std::vector< Eigen::VectorXd > const &ys_data) const
Compute the right hand side given data to compute the polynomial coefficients.
void Fit(std::vector< Eigen::VectorXd > xs, std::vector< Eigen::VectorXd > const &ys, Eigen::VectorXd const ¢er)
Compute the coeffiecents of the polynomial given data.
Eigen::MatrixXd coeff
Coeffients for the polynomial basis.
std::pair< Eigen::VectorXd, double > PoisednessConstant(std::vector< Eigen::VectorXd > xs, Eigen::VectorXd const ¢er, int kn=-1) const
Eigen::MatrixXd VandermondeMatrix(std::vector< Eigen::VectorXd > const &xs) const
Create the Vandermonde matrix.
Eigen::VectorXd currentCenter
Current center of the inputs.
std::shared_ptr< muq::Utilities::MultiIndexSet > multi
The multi-index to so we know the order of each term.
Provides an abstract interface for defining vector-valued model components.
Base class for MUQ's modelling envronment.
std::map< unsigned int, int > inputSizes
std::vector< boost::any > outputs
The outputs.
The cost function for an optimization routine.
std::vector< std::reference_wrapper< const T > > ref_vector
A vector of references to something ...
auto get(const nlohmann::detail::iteration_proxy_value< IteratorType > &i) -> decltype(i.key())