NSSC/Exercise_01/main.cpp

370 lines
14 KiB
C++

/**
*
*/
#include <stddef.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <functional>
#define _USE_MATH_DEFINES /* enables math constants from cmath */
#include <cmath>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <fstream>
#include <iomanip>
#include "Matrix.h"
#include "Solver.h"
#include "utils.h"
int main(int argn, char* argv[]) {
/******************************* MPI Setup ********************************/
#ifdef USE_MPI
// Initialize MPI
MPI_Init(nullptr, nullptr);
// Get MPI config
int mpi_size; /*< MPI pool size (a.k.a. total number of processes) */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
#else
int mpi_size = 1;
#endif
/**************************** Parse Arguments *****************************/
size_t dim, resolution, iterations;
{
if (0 < argn && argn < 4) {
std::cerr << "usage: " << argv[0] << " <resolution> <iterations>" << std::endl;
return -1;
} else if (argn > 4) {
std::cerr << "warning: " << "ignoring all but the first three params" << std::endl;
}
if (std::string(argv[1]) == "1D") {
dim = 1;
} else if (std::string(argv[1]) == "2D") {
dim = 2;
} else {
std::cerr << "error: Parsing arg 1 <dim> failed" << std::endl;
return -1;
}
if (!(std::istringstream(argv[2]) >> resolution)) {
std::cerr << "error: Parsing arg 2 <resolution> failed" << std::endl;
return -1;
}
if (!(std::istringstream(argv[3]) >> iterations)) {
std::cerr << "error: Parsing arg 3 <iterations> failed" << std::endl;
return -1;
}
if (resolution < 10 || resolution > 65536) {
std::cerr << "error: arg 1 <resolution> " << resolution
<< " out of domain [10, 65536]" << std::endl;
return -1;
}
if (iterations > 65536) {
std::cerr << "error: arg 2 <iterations> " << iterations
<< "out of domain [0, 65536]" << std::endl;
return -1;
}
}
// Report configuration
std::cout << std::setfill(' ')
#ifdef USE_MPI
<< "using MPI: true\n"
#else
<< "using MPI: false\n"
#endif
<< "nr. processes: " << std::setw(5) << mpi_size << '\n'
<< "resolution: " << std::setw(5) << resolution << '\n'
<< "iterations: " << std::setw(5) << iterations << std::endl;
/**************** Setup (local) PDE + Boundary Conditions *****************/
const double k = M_PI;
const double h = 1.0 / static_cast<double>(resolution - 1);
#ifdef USE_MPI
// Group processes into a cartesian communication topology. Set initial
// values for a 1D grid.
int mpi_dims[2] = {mpi_size, 1};
// In case of a 2D grid, make equal partitions ob both axes (as equal as
// possible. Note that `MPI_Dims_create` does not garantee "as equal as".
// For example it was observed that for 9 processes the generated grid
// was a 9 x 1, the following computes a 3 x 3).
if (dim == 2) {
two_factors(mpi_size, mpi_dims);
}
// Report grid topology
std::cout << "topology: " << dim << "D";
if (dim == 2) {
std::cout << " (" << mpi_dims[0] << " x " << mpi_dims[1] << ")";
}
std::cout << std::endl;
// Setup a cartesian topology communicator (NON-cyclic)
const int mpi_periods[2] = {false, false};
MPI_Comm mpi_comm_grid;
MPI_Cart_create(
MPI_COMM_WORLD, // Old Communicator
2, // number of dimensions
mpi_dims, // grid dimensions
mpi_periods, // grid periodicity
true, // allow process reordering
&mpi_comm_grid
);
// Get MPI rank
int mpi_world_rank; /*< MPI world rank (a.k.a. grid process ID) */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
// Get MPI rank with respect to the grid communicator
int mpi_grid_rank; /*< MPI grid rank (a.k.a. grid process ID) */
MPI_Comm_rank(mpi_comm_grid, &mpi_grid_rank);
// Get coordinates with respect to the grid communicator
int mpi_coords[2];
MPI_Cart_coords(mpi_comm_grid, mpi_grid_rank, 2, mpi_coords);
// Get direct neightbours in the communication grid
struct { int north; int east; int south; int west; } mpi_neighbours;
// Get X-direction (dim 0) neightbours
MPI_Cart_shift(
mpi_comm_grid, // grid communicator
0, // axis index (0 <-> X)
1, // offset
&(mpi_neighbours.west), // negated offset neightbour
&(mpi_neighbours.east) // offset neightbour
);
// Get Y-direction (dim 1) neightbours
MPI_Cart_shift(
mpi_comm_grid,
1, // axis index (1 <-> Y)
1,
&(mpi_neighbours.south),
&(mpi_neighbours.north)
);
// Calc local (base) grid size (without ghost layers)
size_t nx = partition(resolution, mpi_dims[0], mpi_coords[0]);
size_t ny = partition(resolution, mpi_dims[1], mpi_coords[1]);
// Add ghost layers for each (existing) neighbour
ny += (mpi_neighbours.north != MPI_PROC_NULL);
nx += (mpi_neighbours.east != MPI_PROC_NULL);
ny += (mpi_neighbours.south != MPI_PROC_NULL);
nx += (mpi_neighbours.west != MPI_PROC_NULL);
// Compute local domain [xmin, xmax] x [ymin, ymax]
double xmin = (mpi_neighbours.west == MPI_PROC_NULL) ? 0.0
: h * partition_sum(resolution, mpi_dims[0], mpi_coords[0] - 1);
double xmax = (mpi_neighbours.east == MPI_PROC_NULL) ? 1.0
: h * (partition_sum(resolution, mpi_dims[0], mpi_coords[0]) + 1);
double ymin = (mpi_neighbours.south == MPI_PROC_NULL) ? 0.0
: h * partition_sum(resolution, mpi_dims[1], mpi_coords[1] - 1);
double ymax = (mpi_neighbours.north == MPI_PROC_NULL) ? 1.0
: h * (partition_sum(resolution, mpi_dims[1], mpi_coords[1]) + 1);
// /*************************__________DEBUG__________*************************/
// std::cout << " (" << mpi_coords[0] << ", " << mpi_coords[1] << ") ";
// std::cout << " -> "
// << "N: " << ((mpi_neighbours.north != MPI_PROC_NULL) ? '1' : '.') << ", "
// << "E: " << ((mpi_neighbours.east != MPI_PROC_NULL) ? '1' : '.') << ", "
// << "S: " << ((mpi_neighbours.south != MPI_PROC_NULL) ? '1' : '.') << ", "
// << "W: " << ((mpi_neighbours.west != MPI_PROC_NULL) ? '1' : '.');
// std::cout << " -> "
// << nx << " x " << ny;
// std::cout << " -> " << std::setprecision(2)
// << "[" << std::setw(4) << std::left << xmin
// << ", " << std::setw(4) << std::left << xmax << "]"
// << " x "
// << "[" << std::setw(4) << std::left << ymin
// << ", " << std::setw(4) << std::left << ymax << "]";
// std::cout << std::endl;
// /*************************__________DEBUG__________*************************/
// Create MPI vector Type (for boundary condition exchange)
// Allows directly exchange of matrix rows (north/south bounds) since the
// row elements are "sparce" in the sence that they are not directly aside
// each other in memory (column major matrix layout) in constrast to columns.
MPI_Datatype mpi_type_row;
MPI_Type_vector(ny, 1, nx, MPI_DOUBLE, &mpi_type_row);
MPI_Type_commit(&mpi_type_row);
#else
// Discretization grid resolution
size_t nx = resolution, ny = resolution;
// PDE domain borders [xmin, xmax] x [ymin, ymax] = [0, 1] x [0, 1]
double xmin = 0.0, xmax = 1.0, ymin = 0.0, ymax = 1.0;
#endif
// Declare right hand side function f(x, y) = k^2 sin(2 pi x) sinh(2 pi y)
std::function<double(double, double)> fun = [k](double x, double y) {
return k * k * sin(2 * M_PI * x) * sinh(2 * M_PI * y);
};
// Boundary conditions
/** North boundary condition gN(x) = k^2 sin(2 pi x) sinh(2 pi) */
std::function<double(double)> gN;
#ifdef USE_MPI
// Check if local north boundary is part of the global north boundary
if (mpi_neighbours.north == MPI_PROC_NULL) {
#endif
// The local north boundary is equals the global north boundary
gN = [k](double x) { return sin(2 * M_PI * x) * sinh(2 * M_PI); };
#ifdef USE_MPI
} else {
// local north boundary is zero like all the rest
gN = [k](double) { return 0.0; };
}
#endif
/** East, South and West boundary conditions are all 0 */
std::function<double(double)> g0 = [k](double) { return 0.0; };
/******************************* Solve PDE ********************************/
// Instanciate solver (local instance)
Solver solver(nx, ny, xmin, xmax, ymin, ymax, h, k, fun, gN, g0, g0, g0);
// Run solver iterations
for (size_t iter = 0; iter < iterations; ++iter) {
// Perform a single stencil jacobi iteration
solver.iterate();
#ifdef USE_MPI
// Non-blocking send boundary conditions to all neightbours
MPI_Request mpi_requests[4];
int mpi_request_count = 0;
if (mpi_neighbours.north != MPI_PROC_NULL) {
auto bound = solver.read_boundary(Solver::Dir::North);
MPI_Isend(bound.data(), bound.size(), MPI_DOUBLE,
mpi_neighbours.north, iter, mpi_comm_grid,
&mpi_requests[mpi_request_count++]);
}
if (mpi_neighbours.east != MPI_PROC_NULL) {
auto bound = solver.read_boundary(Solver::Dir::East);
MPI_Isend(bound.data(), 1, mpi_type_row,
mpi_neighbours.east, iter, mpi_comm_grid,
&mpi_requests[mpi_request_count++]);
}
if (mpi_neighbours.south != MPI_PROC_NULL) {
auto bound = solver.read_boundary(Solver::Dir::South);
MPI_Isend(bound.data(), bound.size(), MPI_DOUBLE,
mpi_neighbours.south, iter, mpi_comm_grid,
&mpi_requests[mpi_request_count++]);
}
if (mpi_neighbours.west != MPI_PROC_NULL) {
auto bound = solver.read_boundary(Solver::Dir::West);
MPI_Isend(bound.data(), 1, mpi_type_row,
mpi_neighbours.west, iter, mpi_comm_grid,
&mpi_requests[mpi_request_count++]);
}
// Wait for all send to complete before receiving new data
// (just to be save)
MPI_Waitall(mpi_request_count, mpi_requests, MPI_STATUSES_IGNORE);
// Get new boundary conditions using a blocking receive
MPI_Status mpi_status;
if (mpi_neighbours.north != MPI_PROC_NULL) {
auto bound = solver.write_boundary(Solver::Dir::North);
MPI_Recv(bound.data(), bound.size(), MPI_DOUBLE,
mpi_neighbours.north, iter, mpi_comm_grid, &mpi_status);
}
if (mpi_neighbours.east != MPI_PROC_NULL) {
auto bound = solver.write_boundary(Solver::Dir::East);
MPI_Recv(bound.data(), 1, mpi_type_row,
mpi_neighbours.east, iter, mpi_comm_grid, &mpi_status);
}
if (mpi_neighbours.south != MPI_PROC_NULL) {
auto bound = solver.write_boundary(Solver::Dir::South);
MPI_Recv(bound.data(), bound.size(), MPI_DOUBLE,
mpi_neighbours.south, iter, mpi_comm_grid, &mpi_status);
}
if (mpi_neighbours.west != MPI_PROC_NULL) {
auto bound = solver.write_boundary(Solver::Dir::West);
MPI_Recv(bound.data(), 1, mpi_type_row,
mpi_neighbours.west, iter, mpi_comm_grid, &mpi_status);
}
#endif
}
/***************************** Solution Stats *****************************/
std::cout
<< "|| residuals ||_2: " << std::setw(15) << solver.resid_norm()
<< "\n|| residuals ||_inf: " << std::setw(15) << solver.resid_norm<Max>()
<< std::endl;
// Get solution
Matrix<double>& solution = solver.solution();
// Compute analytic (true) solution
Matrix<double> analytic(solution.nrow(), solution.ncol());
double x = xmin;
double y = ymin;
for (size_t j = 0; j < analytic.ncol(); ++j, y += h) {
for (size_t i = 0; i < analytic.nrow(); ++i, x += h) {
analytic(i, j) = sin(2 * M_PI * x) * sinh(2 * M_PI * y);
}
}
// Calculate error
std::cout
<< "|| error ||_2: " << std::setw(15) << solution.dist(analytic)
<< "\n|| error ||_inf: " << std::setw(15) << solution.dist<Max>(analytic)
<< std::endl;
/*************************__________DEBUG__________*************************/
// Create R scripts for plotting the solution
std::ostringstream file_name;
#ifdef USE_MPI
file_name << "Rplot_" << mpi_coords[0] << "_" << mpi_coords[1] << ".R";
#else
file_name << "Rplot_seriel.R";
#endif
std::ofstream out(file_name.str());
out << "sol <- matrix(c(" << solution(0);
for (size_t i = 1; i < solution.size(); ++i) {
out << ',' << solution(i);
}
out << "), " << nx << ", " << ny << ")\n"
#ifdef USE_MPI
<< "png(filename = \"mpi_plot_" << mpi_coords[0] <<
"_" << mpi_coords[1] << ".png\")\n"
#else
<< "png(filename = \"seriel_plot.png\")\n"
#endif
#ifdef USE_MPI
<< "image(sol, zlim = c(-270, 270), axes = FALSE, main = \"Cart Coords: "
<< mpi_coords[0] << ", " << mpi_coords[1] << "\")\n"
#else
<< "image(sol, zlim = c(-270, 270), axes = FALSE, main = \"Seriel\")\n"
#endif
<< "axis(1, at = c(0, 1), labels = c("
<< "\"" << std::setprecision(2) << xmin << "\", "
<< "\"" << std::setprecision(2) << xmax << "\"))\n"
<< "axis(2, at = c(0, 1), labels = c("
<< "\"" << std::setprecision(2) << ymin << "\", "
<< "\"" << std::setprecision(2) << ymax << "\"))\n"
// << "axis(2, at = seq(100, 800, by = 100))"
<< "dev.off()" << std::endl;
out.close();
/*************************__________DEBUG__________*************************/
// MPI shutdown/cleanup
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}