What happened?
When using MPI, the flux may be computed incorrectly.
This occurs when the subdomain boundaries have different levels in each subdomains. As a result, the flux cannot be computed correctly, leading to a potential error in MPI.
We need to find a way of integrating the values of the neighbors higher levels into the current sub-domain
Here is the commands to reproduce the error :
mpirun -np [1,2] minear-convection-modified --max-level 5 --min-level 3 --mr-eps 0.01
Picture 1 : Comparison of the field for the step 3 with aggressive colorbar scale. Grey cells are exactly zeros. The blue case is not present in the MPI side. (left : MPI_size=1, right: MPI_size=2). For the MPI_size=2 case, the subdomain boundary coincides with the lower edge of the non-zero region.

Please note that I use a custom linear-convection case.
Input code
// Copyright 2018-2025 the samurai's authors
// SPDX-License-Identifier: BSD-3-Clause
#include <samurai/io/hdf5.hpp>
#include <samurai/io/restart.hpp>
#include <samurai/mr/adapt.hpp>
#include <samurai/mr/mesh.hpp>
#include <samurai/samurai.hpp>
#include <samurai/schemes/fv.hpp>
#include <filesystem>
namespace fs = std::filesystem;
template <class Field>
void save(const fs::path& path, const std::string& filename, const Field& u, const std::string& suffix = "")
{
using mesh_t = typename Field::mesh_t;
using mesh_id_t = typename mesh_t::mesh_id_t;
auto mesh = u.mesh();
auto level_ = samurai::make_scalar_field<std::size_t>("level", mesh);
mpi::communicator world;
int rank = world.rank();
int size = world.size();
if (!fs::exists(path))
{
fs::create_directory(path);
}
samurai::for_each_cell(mesh,
[&](const auto& cell)
{
level_[cell] = cell.level;
});
samurai::save(path, fmt::format("{}_cells_size_{}{}", filename, size, suffix),
{true, true}, mesh[mesh_id_t::cells], u, level_);
samurai::save(path, fmt::format("{}_cells_and_ghosts_size_{}{}", filename, size, suffix),
{true, true}, mesh[mesh_id_t::cells_and_ghosts], u, level_);
}
int main(int argc, char* argv[])
{
auto& app = samurai::initialize("Finite volume example for the linear convection equation", argc, argv);
static constexpr std::size_t dim = 2;
using Config = samurai::MRConfig<dim, 1>;
using Box = samurai::Box<double, dim>;
using point_t = typename Box::point_t;
std::cout << "------------------------- Linear convection -------------------------" << std::endl;
//--------------------//
// Program parameters //
//--------------------//
// Simulation parameters
double left_box = -1;
double right_box = 1;
// Time integration
double Tf = 3;
double dt = 0;
double cfl = 0.95;
double t = 0.;
std::string restart_file;
// Multiresolution parameters
std::size_t min_level = 1;
std::size_t max_level = dim == 1 ? 6 : 4;
double mr_epsilon = 1e-4; // Threshold used by multiresolution
double mr_regularity = 1.; // Regularity guess for multiresolution
// Output parameters
fs::path path = fs::current_path();
std::string filename = "linear_convection_" + std::to_string(dim) + "D";
std::size_t nfiles = 0;
app.add_option("--left", left_box, "The left border of the box")->capture_default_str()->group("Simulation parameters");
app.add_option("--right", right_box, "The right border of the box")->capture_default_str()->group("Simulation parameters");
app.add_option("--Ti", t, "Initial time")->capture_default_str()->group("Simulation parameters");
app.add_option("--Tf", Tf, "Final time")->capture_default_str()->group("Simulation parameters");
app.add_option("--restart-file", restart_file, "Restart file")->capture_default_str()->group("Simulation parameters");
app.add_option("--dt", dt, "Time step")->capture_default_str()->group("Simulation parameters");
app.add_option("--cfl", cfl, "The CFL")->capture_default_str()->group("Simulation parameters");
app.add_option("--min-level", min_level, "Minimum level of the multiresolution")->capture_default_str()->group("Multiresolution");
app.add_option("--max-level", max_level, "Maximum level of the multiresolution")->capture_default_str()->group("Multiresolution");
app.add_option("--mr-eps", mr_epsilon, "The epsilon used by the multiresolution to adapt the mesh")
->capture_default_str()
->group("Multiresolution");
app.add_option("--mr-reg",
mr_regularity,
"The regularity criteria used by the multiresolution to "
"adapt the mesh")
->capture_default_str()
->group("Multiresolution");
app.add_option("--path", path, "Output path")->capture_default_str()->group("Output");
app.add_option("--filename", filename, "File name prefix")->capture_default_str()->group("Output");
app.add_option("--nfiles", nfiles, "Number of output files")->capture_default_str()->group("Output");
app.allow_extras();
SAMURAI_PARSE(argc, argv);
//--------------------//
// Problem definition //
//--------------------//
point_t box_corner1, box_corner2;
box_corner1.fill(left_box);
box_corner2.fill(right_box);
Box box(box_corner1, box_corner2);
std::array<bool, dim> periodic;
periodic.fill(false);
samurai::MRMesh<Config> mesh;
auto u = samurai::make_scalar_field<double>("u", mesh);
if (restart_file.empty())
{
mesh = {box, min_level, max_level, periodic};
// Initial solution
u = samurai::make_scalar_field<double>("u",
mesh,
[](const auto& coords)
{
if constexpr (dim == 1)
{
const auto& x = coords(0);
return (x >= -0.8 && x <= -0.3) ? 1. : 0.;
}
else
{
const auto& x = coords(0);
const auto& y = coords(1);
return (x >= -0.8 && x <= -0.3 && y >= 0.3 && y <= 0.8) ? 1. : 0.;
}
});
}
else
{
samurai::load(restart_file, mesh, u);
}
samurai::make_bc<samurai::Dirichlet<1>>(u, 0.);
auto unp1 = samurai::make_scalar_field<double>("unp1", mesh);
// Intermediary fields for the RK3 scheme
auto u1 = samurai::make_scalar_field<double>("u1", mesh);
auto u2 = samurai::make_scalar_field<double>("u2", mesh);
unp1.fill(0);
u1.fill(0);
u2.fill(0);
// Convection operator
samurai::VelocityVector<dim> velocity;
velocity.fill(1);
if constexpr (dim == 2)
{
velocity(1) = -1;
}
auto conv = samurai::make_convection_upwind<decltype(u)>(velocity);
//--------------------//
// Time iteration //
//--------------------//
if (dt == 0)
{
double dx = mesh.cell_length(max_level);
auto a = xt::abs(velocity);
double sum_velocities = xt::sum(xt::abs(velocity))();
dt = cfl * dx / sum_velocities;
}
auto MRadaptation = samurai::make_MRAdapt(u);
MRadaptation(mr_epsilon, mr_regularity);
double dt_save = nfiles == 0 ? dt : Tf / static_cast<double>(nfiles);
std::size_t nsave = 0, nt = 0;
if (nfiles != 1)
{
std::string suffix = (nfiles != 1) ? fmt::format("_ite_{}", nsave++) : "";
save(path, filename, u, suffix);
}
while (t != Tf)
{
// Move to next timestep
t += dt;
if (t > Tf)
{
dt += Tf - t;
t = Tf;
}
std::cout << fmt::format("iteration {}: t = {:.2f}, dt = {}", nt++, t, dt) << std::flush;
// Mesh adaptation
MRadaptation(mr_epsilon, mr_regularity);
samurai::update_ghost_mr(u);
unp1.resize();
u1.resize();
u2.resize();
u1.fill(0);
u2.fill(0);
unp1 = u - dt * conv(u);
samurai::update_ghost_mr(unp1);
// TVD-RK3 (SSPRK3)
//u1 = u - dt * conv(u);
//samurai::update_ghost_mr(u1);
//u2 = 3. / 4 * u + 1. / 4 * (u1 - dt * conv(u1));
//samurai::update_ghost_mr(u2);
//unp1 = 1. / 3 * u + 2. / 3 * (u2 - dt * conv(u2));
// u <-- unp1
std::swap(u.array(), unp1.array());
// Save the result
if (nfiles == 0 || t >= static_cast<double>(nsave + 1) * dt_save || t == Tf)
{
if (nfiles != 1)
{
std::string suffix = (nfiles != 1) ? fmt::format("_ite_{}", nsave++) : "";
save(path, filename, u, suffix);
}
else
{
save(path, filename, u);
}
}
std::cout << std::endl;
}
if constexpr (dim == 1)
{
std::cout << std::endl;
std::cout << "Run the following command to view the results:" << std::endl;
std::cout << "python <<path to samurai>>/python/read_mesh.py " << filename << "_ite_ --field u level --start 0 --end " << nsave
<< std::endl;
}
samurai::finalize();
return 0;
}
What expected?
We expect the solution to be perfectly symmetric even in MPI.
What is your operating system?
Linux
How did you install our software?
from source
Software version
0.23.0
Relevant log output
Code of Conduct
What happened?
When using MPI, the flux may be computed incorrectly.
This occurs when the subdomain boundaries have different levels in each subdomains. As a result, the flux cannot be computed correctly, leading to a potential error in MPI.
We need to find a way of integrating the values of the neighbors higher levels into the current sub-domain
Here is the commands to reproduce the error :
mpirun -np [1,2] minear-convection-modified --max-level 5 --min-level 3 --mr-eps 0.01Picture 1 : Comparison of the field for the step 3 with aggressive colorbar scale. Grey cells are exactly zeros. The blue case is not present in the MPI side. (left : MPI_size=1, right: MPI_size=2). For the MPI_size=2 case, the subdomain boundary coincides with the lower edge of the non-zero region.
Please note that I use a custom
linear-convectioncase.Input code
What expected?
We expect the solution to be perfectly symmetric even in MPI.
What is your operating system?
Linux
How did you install our software?
from source
Software version
0.23.0
Relevant log output
Code of Conduct