Example
Parallel Multilevel MCMC with parallel model

Defines a hierarchy of simple Gaussian models and applies Multilevel MCMC to it. The MLMCMC method itself allows massive parallelization, and the forward model may be parallelized as well. We use simple Gaussian target densities here, but the example can serve as a template for actually parallel models.


#include "MUQ/SamplingAlgorithms/SLMCMC.h"
#include "MUQ/SamplingAlgorithms/GreedyMLMCMC.h"
#include "MUQ/SamplingAlgorithms/MIMCMC.h"

#include "MUQ/Modeling/Distributions/Gaussian.h"
#include "MUQ/Modeling/Distributions/Density.h"

#include "MUQ/SamplingAlgorithms/MHKernel.h"
#include "MUQ/SamplingAlgorithms/MHProposal.h"
#include "MUQ/SamplingAlgorithms/CrankNicolsonProposal.h"
#include "MUQ/SamplingAlgorithms/SamplingProblem.h"
#include "MUQ/SamplingAlgorithms/SubsamplingMIProposal.h"

#include "MUQ/SamplingAlgorithms/MIComponentFactory.h"

#include <boost/property_tree/ptree.hpp>

namespace pt = boost::property_tree;
using namespace muq::Modeling;
using namespace muq::SamplingAlgorithms;
using namespace muq::Utilities;

#include "MUQ/SamplingAlgorithms/ParallelMIMCMCWorker.h"
#include "MUQ/SamplingAlgorithms/ParallelFixedSamplesMIMCMC.h"

#include "ParallelProblem.h"

#include <ctime>

int main(int argc, char **argv){
  spdlog::set_level(spdlog::level::debug);

  MPI_Init(&argc, &argv);
  auto comm = std::make_shared<parcer::Communicator>();

  // Name trace according to current time stamp
  std::time_t result = std::time(nullptr);
  std::string timestamp = std::asctime(std::localtime(&result));
  auto tracer = std::make_shared<OTF2Tracer>("trace", timestamp);

  pt::ptree pt;
  pt.put("NumSamples_0", 1e3);
  pt.put("NumSamples_1", 5e2);
  pt.put("NumSamples_2", 1e2);
  pt.put("NumSamples_3", 1e2);
  pt.put("MCMC.BurnIn", 100);
  pt.put("MLMCMC.Subsampling", 10);
  pt.put("MLMCMC.Scheduling", true);


  auto componentFactory = std::make_shared<MyMIComponentFactory>(pt);
  StaticLoadBalancingMIMCMC parallelMIMCMC (pt, componentFactory, std::make_shared<RoundRobinStaticLoadBalancer>(), std::make_shared<parcer::Communicator>(), tracer);

  if (comm->GetRank() == 0) {
    parallelMIMCMC.Run();
    Eigen::VectorXd meanQOI = parallelMIMCMC.MeanQOI();
    std::cout << "mean QOI: " << meanQOI.transpose() << std::endl;
  }
  parallelMIMCMC.WriteToFile("parallelMIMCMC.h5");
  parallelMIMCMC.Finalize();
  tracer->write();

  MPI_Finalize();
}

Complete Code

#include "MUQ/SamplingAlgorithms/SLMCMC.h"
#include "MUQ/SamplingAlgorithms/GreedyMLMCMC.h"
#include "MUQ/SamplingAlgorithms/MIMCMC.h"

#include "MUQ/Modeling/Distributions/Gaussian.h"
#include "MUQ/Modeling/Distributions/Density.h"

#include "MUQ/SamplingAlgorithms/MHKernel.h"
#include "MUQ/SamplingAlgorithms/MHProposal.h"
#include "MUQ/SamplingAlgorithms/CrankNicolsonProposal.h"
#include "MUQ/SamplingAlgorithms/SamplingProblem.h"
#include "MUQ/SamplingAlgorithms/SubsamplingMIProposal.h"

#include "MUQ/SamplingAlgorithms/MIComponentFactory.h"

#include <boost/property_tree/ptree.hpp>

namespace pt = boost::property_tree;
using namespace muq::Modeling;
using namespace muq::SamplingAlgorithms;
using namespace muq::Utilities;

#include "MUQ/SamplingAlgorithms/ParallelMIMCMCWorker.h"
#include "MUQ/SamplingAlgorithms/ParallelFixedSamplesMIMCMC.h"

#include "ParallelProblem.h"

#include <ctime>

int main(int argc, char **argv){
  spdlog::set_level(spdlog::level::debug);

  MPI_Init(&argc, &argv);
  auto comm = std::make_shared<parcer::Communicator>();

  // Name trace according to current time stamp
  std::time_t result = std::time(nullptr);
  std::string timestamp = std::asctime(std::localtime(&result));
  auto tracer = std::make_shared<OTF2Tracer>("trace", timestamp);

  pt::ptree pt;
  pt.put("NumSamples_0", 1e3);
  pt.put("NumSamples_1", 5e2);
  pt.put("NumSamples_2", 1e2);
  pt.put("NumSamples_3", 1e2);
  pt.put("MCMC.BurnIn", 100);
  pt.put("MLMCMC.Subsampling", 10);
  pt.put("MLMCMC.Scheduling", true);


  auto componentFactory = std::make_shared<MyMIComponentFactory>(pt);
  StaticLoadBalancingMIMCMC parallelMIMCMC (pt, componentFactory, std::make_shared<RoundRobinStaticLoadBalancer>(), std::make_shared<parcer::Communicator>(), tracer);

  if (comm->GetRank() == 0) {
    parallelMIMCMC.Run();
    Eigen::VectorXd meanQOI = parallelMIMCMC.MeanQOI();
    std::cout << "mean QOI: " << meanQOI.transpose() << std::endl;
  }
  parallelMIMCMC.WriteToFile("parallelMIMCMC.h5");
  parallelMIMCMC.Finalize();
  tracer->write();

  MPI_Finalize();
}
Slack LOGO MUQ is on Slack!
  • Join our slack workspace to connect with other users, get help, and discuss new features.
Test Status
Acknowledgments

NSF Logo

This material is based upon work supported by the National Science Foundation under Grant No. 1550487.

Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation.

DOE Logo

This material is based upon work supported by the US Department of Energy, Office of Advanced Scientific Computing Research, SciDAC (Scientific Discovery through Advanced Computing) program under awards DE-SC0007099 and DE-SC0021226, for the QUEST and FASTMath SciDAC Institutes.