fix: Grid splitting bug and stats calculation

This commit is contained in:
Daniel Kapla 2022-03-14 12:40:32 +01:00
parent e8e09b0637
commit ee5cbef1d6
3 changed files with 44 additions and 37 deletions

View File

@ -4,8 +4,8 @@
#include <ostream>
#include <utility>
#include <vector>
#define _USE_MATH_DEFINES /* enables math constants from cmath */
#include <cmath>
#include <assert.h>
inline size_t mod(int num, size_t module) {
while (num < 0) { num += module; };
@ -87,36 +87,45 @@ public:
}
}
/**
* Distance of this Matrix to given matrix A as || this - A ||_N
*
* Note: It there is a dimension missmatch, only the number of elements
* of the smaller matrix are considured.
*
* @param A matrix to compute the "distance" to
* @param mar_b bottom margins
* @param mar_l left margins
* @param mar_t top margins
* @param mar_r right margins
*/
template <enum Norm N = Norm::Frob>
double dist(const Matrix<T>& A) const {
T accum = static_cast<T>(0); /*< result accomulator */
size_t nelem = size() < A.size() ? size() : A.size();
double dist(const Matrix<T>& A,
size_t mar_b = 0, size_t mar_l = 0, size_t mar_t = 0, size_t mar_r = 0
) const {
// Enforce valid norm types (at compile time)
static_assert(N == Norm::Frob || N == Norm::Max);
if constexpr (N == Norm::Frob) {
// Sum of Squared differences
for (size_t i = 0; i < nelem; ++i) {
T diff = (*this)(i) - A(i);
accum += diff * diff;
}
return std::sqrt(static_cast<double>(accum));
} else { // Maximum Norm
for (size_t i = 0; i < nelem; ++i) {
T diff = std::abs((*this)(i) - A(i));
if (accum < diff) {
accum = diff;
}
}
return static_cast<double>(accum);
assert(this->_nrow == A._nrow);
assert(this->_ncol == A._ncol);
T accum = static_cast<T>(0); /*< result accomulator */
for (size_t j = mar_l; j + mar_r < _ncol; ++j) {
for (size_t i = mar_t; i + mar_b < _nrow; ++i) {
if constexpr (N == Norm::Frob) {
T diff = (*this)(i, j) - A(i, j);
accum += diff * diff;
} else {
accum = std::max(std::abs((*this)(i, j) - A(i, j)), accum);
}
}
}
if constexpr (N == Norm::Frob) {
return std::sqrt(static_cast<double>(accum));
} else {
return static_cast<double>(accum);
}
}

View File

@ -62,13 +62,13 @@ public:
for (size_t y = 0; y < ny; ++y) { _sol(0, y) = _tmp(0, y) = gW(Y(y)); }
};
/* Maps x/y index to x/y position in the [xmin, xmax] x [ymin, ymax] domain */
double X(size_t x) const {
double ratio = static_cast<double>(x) / static_cast<double>(_nx - 1);
/* Maps (i, j) indices to (x, y) position in the [xmin, xmax] x [ymin, ymax] domain */
double X(size_t i) const {
double ratio = static_cast<double>(i) / static_cast<double>(_nx - 1);
return _xmin + ratio * (_xmax - _xmin);
}
double Y(size_t y) const {
double ratio = static_cast<double>(y) / static_cast<double>(_ny - 1);
double Y(size_t j) const {
double ratio = static_cast<double>(j) / static_cast<double>(_ny - 1);
return _ymin + ratio * (_ymax - _ymin);
}

View File

@ -1,6 +1,7 @@
/**
*
*/
#define _USE_MATH_DEFINES /* enables math constants from cmath */
#include <stddef.h>
#include <iostream>
@ -8,7 +9,6 @@
#include <sstream>
#include <functional>
#include <chrono>
#define _USE_MATH_DEFINES /* enables math constants from cmath */
#include <cmath>
#ifdef USE_MPI
#include <mpi.h>
@ -165,13 +165,13 @@ int main(int argn, char* argv[]) {
// Compute local domain [xmin, xmax] x [ymin, ymax]
double xmin = (mpi_neighbours.west == MPI_PROC_NULL) ? 0.0
: h * partition_sum(resolution, mpi_dims[0], mpi_coords[0] - 1);
: h * (partition_sum(resolution, mpi_dims[0], mpi_coords[0] - 1) - 1);
double xmax = (mpi_neighbours.east == MPI_PROC_NULL) ? 1.0
: h * (partition_sum(resolution, mpi_dims[0], mpi_coords[0]) + 1);
: h * partition_sum(resolution, mpi_dims[0], mpi_coords[0]);
double ymin = (mpi_neighbours.south == MPI_PROC_NULL) ? 0.0
: h * partition_sum(resolution, mpi_dims[1], mpi_coords[1] - 1);
: h * (partition_sum(resolution, mpi_dims[1], mpi_coords[1] - 1) - 1);
double ymax = (mpi_neighbours.north == MPI_PROC_NULL) ? 1.0
: h * (partition_sum(resolution, mpi_dims[1], mpi_coords[1]) + 1);
: h * partition_sum(resolution, mpi_dims[1], mpi_coords[1]);
// Create MPI vector Type (for boundary condition exchange)
// Allows directly exchange of matrix rows (north/south bounds) since the
@ -283,18 +283,16 @@ int main(int argn, char* argv[]) {
// Compute analytic (true) solution
Matrix<double> analytic(solution.nrow(), solution.ncol());
double x = xmin;
double y = ymin;
for (size_t j = 0; j < analytic.ncol(); ++j, y += h) {
for (size_t i = 0; i < analytic.nrow(); ++i, x += h) {
analytic(i, j) = sin(2 * M_PI * x) * sinh(2 * M_PI * y);
for (size_t j = 0; j < analytic.ncol(); ++j) {
for (size_t i = 0; i < analytic.nrow(); ++i) {
analytic(i, j) = sin(2 * M_PI * solver.X(i)) * sinh(2 * M_PI * solver.Y(j));
}
}
#ifdef USE_MPI
// Frobenius Norm Accumulator
double frob_norm_sendbuf[2] = {
solver.resid_norm(), solution.dist(analytic)
solver.resid_norm(), solution.dist(analytic, 1, 1, 1, 1)
};
// Square the norms (Accumulation of partial results is the square root of
// the sum of the squared partial norms)
@ -302,7 +300,7 @@ int main(int argn, char* argv[]) {
frob_norm_sendbuf[1] = frob_norm_sendbuf[1] * frob_norm_sendbuf[1];
// Maximum Norm Accumulator
double max_norm_sendbuf[2] = {
solver.resid_norm<Max>(), solution.dist<Max>(analytic)
solver.resid_norm<Max>(), solution.dist<Max>(analytic, 1, 1, 1, 1)
};
// Global result reduction buffers
@ -320,10 +318,10 @@ int main(int argn, char* argv[]) {
frob_norm_recvbuf[1] = std::sqrt(frob_norm_recvbuf[1]);
#else
double frob_norm_recvbuf[2] = {
solver.resid_norm(), solution.dist(analytic)
solver.resid_norm(), solution.dist(analytic, 1, 1, 1, 1)
};
double max_norm_recvbuf[2] = {
solver.resid_norm<Max>(), solution.dist<Max>(analytic)
solver.resid_norm<Max>(), solution.dist<Max>(analytic, 1, 1, 1, 1)
};
#endif