Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Googletest unit testing #40

Draft
wants to merge 12 commits into
base: vlasiator-version
Choose a base branch
from
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
*.o
*.exe
240 changes: 240 additions & 0 deletions UnitTests/grid.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,240 @@
// Validation for MPI functionality and gtest
// hello.fails_on_all and hello.fails_on_last should fail
// And the other tests should pass

#include <gtest/gtest.h>
#include <mpi.h>
#include "../dccrg.hpp"
#include "../dccrg_cartesian_geometry.hpp"

int main(int argc, char* argv[])
{
int mpiError{MPI_SUCCESS};
mpiError = MPI_Init(&argc, &argv);
::testing::InitGoogleTest(&argc, argv);
int ret {RUN_ALL_TESTS()};
mpiError = MPI_Finalize();
return ret;
}

enum class GridType {
simple,
vlasovian,
magnetospheric
};

class GridTest : public testing::TestWithParam<GridType> {
protected:
void SetUp() override
{
typedef dccrg::Types<3>::neighborhood_item_t neigh_t;
std::vector<neigh_t> neighborhood;

grid.set_initial_length({size, size, size}).set_neighborhood_length(stencil_width).set_maximum_refinement_level(reflevel).initialize(MPI_COMM_WORLD);
switch (GetParam()) {
case GridType::simple:
break; // Defaults should be fine for everything
case GridType::magnetospheric:
for (int i = 0; i < reflevel; ++i) {
grid.refine_completely_at({static_cast<double>(size) / 2.0, static_cast<double>(size) / 2.0, static_cast<double>(size) / 2.0});
grid.stop_refining();
}
// Fallthrough
case GridType::vlasovian:
// Extended sysboundaries
for (int x = -stencil_width; x <= stencil_width; ++x) {
for (int y = -stencil_width; y <= stencil_width; ++y) {
for (int z = -stencil_width; z <= stencil_width; ++z) {
if (x || y || z) {
neighborhood.push_back({x, y, z});
}
}
}
}
grid.add_neighborhood(neighborhoods++, neighborhood);

// Vlasov solver
for (int d = -stencil_width; d <= stencil_width; ++d) {
if (d) {
neighborhood.push_back({d, 0, 0});
neighborhood.push_back({0, d, 0});
neighborhood.push_back({0, 0, d});
}
}
grid.add_neighborhood(neighborhoods++, neighborhood);

break;
default:
FAIL() << "Grid type not implemented!";
break;
}

grid.balance_load();

// Simple data we can check
for (auto cell : grid.get_cells()) {
*grid[cell] = cell;
}
grid.update_copies_of_remote_neighbors();
for (int neighborhood = 0; neighborhood < neighborhoods; ++neighborhood) {
grid.update_copies_of_remote_neighbors(neighborhood);
}
}

dccrg::Dccrg<uint64_t, dccrg::Cartesian_Geometry> grid;
int neighborhoods {0};
const int stencil_width {3};
const uint64_t size {10};
int reflevel {2};
};

INSTANTIATE_TEST_SUITE_P(Simple, GridTest, testing::Values(GridType::simple));
INSTANTIATE_TEST_SUITE_P(Vlasovian, GridTest, testing::Values(GridType::vlasovian));
INSTANTIATE_TEST_SUITE_P(Magnetospheric, GridTest, testing::Values(GridType::magnetospheric));

TEST_P(GridTest, contents)
{
for (auto cell : grid.get_cells()) {
ASSERT_NE_MPI(grid[cell], nullptr);
EXPECT_EQ_MPI(*grid[cell], cell);
}
}

TEST_P(GridTest, remote_contents)
{
for (auto cell : grid.get_cells()) {
ASSERT_NE_MPI(grid[cell], nullptr);

for (auto& [neighbor, dir] : *grid.get_neighbors_of(cell)) {
if (neighbor != dccrg::error_cell) {
ASSERT_NE_MPI(grid[neighbor], nullptr);
EXPECT_EQ_MPI(*grid[neighbor], neighbor);
}
}

for (int neighborhood = 0; neighborhood < neighborhoods; ++neighborhood) {
for (auto& [neighbor, dir] : *grid.get_neighbors_of(cell, neighborhood)) {
if (neighbor != dccrg::error_cell) {
ASSERT_NE_MPI(grid[neighbor], nullptr);
EXPECT_EQ_MPI(*grid[neighbor], neighbor);
}
}
}
}
}

TEST_P(GridTest, contents_after_loadbalance)
{
grid.set_load_balancing_method("RANDOM").balance_load();
grid.balance_load();
for (auto cell : grid.get_cells()) {
ASSERT_NE_MPI(grid[cell], nullptr);
EXPECT_EQ_MPI(*grid[cell], cell);
}
}

TEST_P(GridTest, consistent_neighbors)
{
auto cells = grid.get_cells();
for (auto cell : grid.get_cells()) {
auto* my_neighbors_of {grid.get_neighbors_of(cell)};
EXPECT_NE_MPI(my_neighbors_of, nullptr);
auto* my_neighbors_to {grid.get_neighbors_to(cell)};
EXPECT_NE_MPI(my_neighbors_to, nullptr);

for (auto [neighbor, dir] : *my_neighbors_of) {
if (neighbor != dccrg::error_cell) {
ASSERT_NE_MPI(grid[neighbor], nullptr);
std::vector<std::pair<uint64_t, std::array<int, 4>>> other_neighbors_to;
if (std::find(cells.begin(), cells.end(), neighbor) != cells.end()) {
auto* p {grid.get_neighbors_to(neighbor)};
EXPECT_NE_MPI(p, nullptr);
other_neighbors_to = *p;
} else {
// Warning: giga jank
std::vector<uint64_t> found_neighbors;
for (auto& [neigh, dir] : grid.find_neighbors_of(neighbor, grid.get_neighborhood_of(), grid.get_max_ref_lvl_diff())) {
found_neighbors.push_back(neigh);
}
other_neighbors_to = grid.find_neighbors_to(neighbor, found_neighbors);
}
EXPECT_NE_MPI(std::find_if(other_neighbors_to.begin(), other_neighbors_to.end(), [&cell](const std::pair<const uint64_t, std::array<int, 4>> pair){return pair.first == cell;}), other_neighbors_to.end());
}
}

for (auto [neighbor, dir] : *my_neighbors_to) {
std::vector<std::pair<uint64_t, std::array<int, 4>>> other_neighbors_of;
if (std::find(cells.begin(), cells.end(), neighbor) != cells.end()) {
auto* p {grid.get_neighbors_of(neighbor)};
ASSERT_NE_MPI(p, nullptr);
other_neighbors_of = *p;
} else {
other_neighbors_of = grid.find_neighbors_of(neighbor, grid.get_neighborhood_of(), grid.get_max_ref_lvl_diff());
}
EXPECT_NE_MPI(std::find_if(other_neighbors_of.begin(), other_neighbors_of.end(), [&cell](const std::pair<const uint64_t, std::array<int, 4>> pair){return pair.first == cell;}), other_neighbors_of.end());
}
}
}

// TODO: cannot test consistency of remote neighbors without dccrg changes
TEST_P(GridTest, consistent_user_neighbors)
{
auto cells = grid.get_cells();
for (auto cell : grid.get_cells()) {
for (int neighborhood = 0; neighborhood < neighborhoods; ++neighborhood) {
auto* my_neighbors_of {grid.get_neighbors_of(cell, neighborhood)};
EXPECT_NE_MPI(my_neighbors_of, nullptr);

for (auto [neighbor, dir] : *my_neighbors_of) {
if (neighbor != dccrg::error_cell) {
EXPECT_NE_MPI(grid[neighbor], nullptr);
std::vector<std::pair<uint64_t, std::array<int, 4>>> other_neighbors_to;
if (std::find(cells.begin(), cells.end(), neighbor) != cells.end()) {
auto* p {grid.get_neighbors_to(neighbor, neighborhood)};
ASSERT_NE_MPI(p, nullptr);
other_neighbors_to = *p;
EXPECT_NE_MPI(std::find_if(other_neighbors_to.begin(), other_neighbors_to.end(), [&cell](const std::pair<const uint64_t, std::array<int, 4>> pair){return pair.first == cell;}), other_neighbors_to.end());
}
}
}

auto* my_neighbors_to {grid.get_neighbors_to(cell, neighborhood)};
EXPECT_NE_MPI(my_neighbors_to, nullptr);

for (auto [neighbor, dir] : *my_neighbors_to) {
std::vector<std::pair<uint64_t, std::array<int, 4>>> other_neighbors_of;
if (std::find(cells.begin(), cells.end(), neighbor) != cells.end()) {
auto* p {grid.get_neighbors_of(neighbor, neighborhood)};
ASSERT_NE_MPI(p, nullptr);
other_neighbors_of = *p;
EXPECT_NE_MPI(std::find_if(other_neighbors_of.begin(), other_neighbors_of.end(), [&cell](const std::pair<const uint64_t, std::array<int, 4>> pair){return pair.first == cell;}), other_neighbors_of.end());
}
}
}
}
}

// TODO test proper copies and frees of dccrg.comm
// Right now this can't be done because the getter is not a getter
TEST_P(GridTest, copy)
{
auto other_grid = grid;

// Local cells should be identical immediately after copy
for (auto cell : grid.get_cells()) {
EXPECT_NE_MPI(other_grid[cell], nullptr);
EXPECT_EQ_MPI(*other_grid[cell], cell);
}

other_grid.balance_load();
for (auto cell : other_grid.get_cells()) {
EXPECT_NE_MPI(other_grid[cell], nullptr);
EXPECT_EQ_MPI(*other_grid[cell], cell);
}

// Load balancing copy shouldn't affect original
for (auto cell : grid.get_cells()) {
EXPECT_NE_MPI(grid[cell], nullptr);
EXPECT_EQ_MPI(*grid[cell], cell);
}
}
23 changes: 23 additions & 0 deletions UnitTests/makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
ARCH=${VLASIATOR_ARCH}
include ../../../MAKE/Makefile.${ARCH}

# These should be in Makefile.${ARCH}
# INC_GTEST=-I/projappl/project_2005018/lkotipal/libraries/gtest/include
# LIB_GTEST=-L/projappl/project_2005018/lkotipal/libraries/gtest/lib64 -lgtest -Wl,-rpath=/projappl/project_2005018/lkotipal/libraries/gtest/lib64
# LIB_NOPROFILE = -L$(LIBRARY_PREFIX)/phiprof/lib -lnophiprof -Wl,-rpath=$(LIBRARY_PREFIX)/phiprof/lib

CXXFLAGS+=$(INC_MPI) $(INC_BOOST) $(INC_ZOLTAN) $(INC_PROFILE) $(INC_GTEST) -DGTEST_HAS_MPI
LDFLAGS+=$(LIB_BOOST) $(LIB_ZOLTAN) $(LIB_GTEST) $(LIB_NOPROFILE)

TESTS=validation.exe grid.exe

all: $(TESTS)

%.exe: %.o
$(CMP) $(CXXFLAGS) $< -o $@ $(LDFLAGS)

%.o: %.cpp
$(CMP) -c $(CXXFLAGS) $< -o $@

clean:
rm $(TESTS)
85 changes: 85 additions & 0 deletions UnitTests/validation.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
// Validation for MPI functionality and gtest
// hello.fails_on_all, hello.fails_on_first and hello.fails_on_last should fail
// And the other tests should pass

#include <gtest/gtest.h>
#include <mpi.h>

int main(int argc, char* argv[])
{
int mpiError{MPI_SUCCESS};
mpiError = MPI_Init(&argc, &argv);
::testing::InitGoogleTest(&argc, argv);
int ret {RUN_ALL_TESTS()};
mpiError = MPI_Finalize();
return ret;
}

TEST(hello, passes_on_all)
{
int myRank {0};
int mpiError {MPI_SUCCESS};

mpiError = MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);
ASSERT_EQ_MPI(0, 0);
}

TEST(hello, fails_on_all)
{
int myRank {0};
int mpiError {MPI_SUCCESS};

mpiError = MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);
ASSERT_EQ_MPI(myRank, -1);
}

TEST(hello, fails_on_first)
{
int myRank {0};
int size{0};
int mpiError {MPI_SUCCESS};

mpiError = MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);
mpiError = MPI_Comm_size(MPI_COMM_WORLD, &size);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);

ASSERT_NE_MPI(myRank, 0);
}

TEST(hello, fails_on_last)
{
int myRank {0};
int size{0};
int mpiError {MPI_SUCCESS};

mpiError = MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);
mpiError = MPI_Comm_size(MPI_COMM_WORLD, &size);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);

ASSERT_NE_MPI(myRank, size - 1);
}

TEST(MPI, sendrecv)
{
int myRank{0};
int size{0};
int mpiError{MPI_SUCCESS};

mpiError = MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);
mpiError = MPI_Comm_size(MPI_COMM_WORLD, &size);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);

int dest {myRank < size - 1 ? myRank + 1 : 0};
int source {myRank > 0 ? myRank - 1 : size - 1};
int send {myRank};
int recv {-1};

mpiError = MPI_Sendrecv(&send, 1, MPI_INT, dest, 0, &recv, 1, MPI_INT, source, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
ASSERT_EQ_MPI(mpiError, MPI_SUCCESS);
ASSERT_EQ_MPI(source, recv);
}