Compare commits
8 Commits
4f82b8359b
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b61b9c8f6c | ||
|
|
04652c0059 | ||
|
|
b00d52af2a | ||
|
|
a885062c83 | ||
|
|
84deab6ced | ||
|
|
7967991bd9 | ||
|
|
02ab50ab60 | ||
|
|
5bcf1382ba |
@@ -3,6 +3,8 @@ cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
|
|||||||
# create project
|
# create project
|
||||||
project(mpi-daxpy-test)
|
project(mpi-daxpy-test)
|
||||||
|
|
||||||
|
option(TEST_MANAGED "Test managed memory" ON)
|
||||||
|
|
||||||
# add dependencies
|
# add dependencies
|
||||||
include(cmake/CPM.cmake)
|
include(cmake/CPM.cmake)
|
||||||
CPMFindPackage(NAME gtensor
|
CPMFindPackage(NAME gtensor
|
||||||
@@ -10,26 +12,41 @@ CPMFindPackage(NAME gtensor
|
|||||||
GIT_TAG "main"
|
GIT_TAG "main"
|
||||||
OPTIONS "GTENSOR_ENABLE_BLAS ON")
|
OPTIONS "GTENSOR_ENABLE_BLAS ON")
|
||||||
|
|
||||||
|
set(MPI_CXX_SKIP_MPICXX ON)
|
||||||
find_package(MPI REQUIRED)
|
find_package(MPI REQUIRED)
|
||||||
|
|
||||||
add_executable(mpi_daxpy_gt)
|
|
||||||
target_sources(mpi_daxpy_gt PRIVATE mpi_daxpy_gt.cc)
|
|
||||||
target_link_libraries(mpi_daxpy_gt gtensor::gtensor)
|
|
||||||
target_link_libraries(mpi_daxpy_gt gtensor::blas)
|
|
||||||
target_link_libraries(mpi_daxpy_gt MPI::MPI_CXX)
|
|
||||||
|
|
||||||
add_executable(mpi_stencil_gt)
|
|
||||||
target_sources(mpi_stencil_gt PRIVATE mpi_stencil_gt.cc)
|
|
||||||
target_link_libraries(mpi_stencil_gt gtensor::gtensor)
|
|
||||||
target_link_libraries(mpi_stencil_gt MPI::MPI_CXX)
|
|
||||||
|
|
||||||
add_executable(mpi_stencil2d_gt)
|
|
||||||
target_sources(mpi_stencil2d_gt PRIVATE mpi_stencil2d_gt.cc)
|
|
||||||
target_link_libraries(mpi_stencil2d_gt gtensor::gtensor)
|
|
||||||
target_link_libraries(mpi_stencil2d_gt MPI::MPI_CXX)
|
|
||||||
|
|
||||||
if ("${GTENSOR_DEVICE}" STREQUAL "cuda")
|
if ("${GTENSOR_DEVICE}" STREQUAL "cuda")
|
||||||
enable_language(CUDA)
|
enable_language(CUDA)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_executable(mpi_daxpy_gt)
|
||||||
|
target_sources(mpi_daxpy_gt PRIVATE mpi_daxpy_gt.cc)
|
||||||
|
target_link_libraries(mpi_daxpy_gt PRIVATE gtensor::gtensor)
|
||||||
|
target_link_libraries(mpi_daxpy_gt PRIVATE gtensor::blas)
|
||||||
|
target_link_libraries(mpi_daxpy_gt PRIVATE MPI::MPI_CXX)
|
||||||
|
|
||||||
|
add_executable(mpi_stencil_gt)
|
||||||
|
target_sources(mpi_stencil_gt PRIVATE mpi_stencil_gt.cc)
|
||||||
|
target_link_libraries(mpi_stencil_gt PRIVATE gtensor::gtensor)
|
||||||
|
target_link_libraries(mpi_stencil_gt PRIVATE MPI::MPI_CXX)
|
||||||
|
|
||||||
|
add_executable(mpi_stencil2d_gt)
|
||||||
|
target_sources(mpi_stencil2d_gt PRIVATE mpi_stencil2d_gt.cc)
|
||||||
|
target_link_libraries(mpi_stencil2d_gt PRIVATE gtensor::gtensor)
|
||||||
|
target_link_libraries(mpi_stencil2d_gt PRIVATE MPI::MPI_CXX)
|
||||||
|
#target_compile_features(mpi_stencil2d_gt PRIVATE cxx_std_17)
|
||||||
|
if (TEST_MANAGED)
|
||||||
|
message(STATUS "${PROJECT_NAME}: Enabling managed memory")
|
||||||
|
target_compile_definitions(mpi_stencil2d_gt PRIVATE TEST_MANAGED)
|
||||||
|
endif()
|
||||||
|
if (GTENSOR_DEVICE STREQUAL "hip" AND DEFINED ENV{PE_MPICH_GTL_DIR_amd_gfx90a})
|
||||||
|
message(STATUS "${PROJECT_NAME}: Linking gtl libs for HIP backend")
|
||||||
|
target_link_options(mpi_stencil2d_gt PRIVATE
|
||||||
|
$ENV{PE_MPICH_GTL_DIR_amd_gfx90a}
|
||||||
|
$ENV{PE_MPICH_GTL_LIBS_amd_gfx90a})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if ("${GTENSOR_DEVICE}" STREQUAL "cuda")
|
||||||
set_source_files_properties(mpi_daxpy_gt.cc
|
set_source_files_properties(mpi_daxpy_gt.cc
|
||||||
TARGET_DIRECTORY mpi_daxpy_gt
|
TARGET_DIRECTORY mpi_daxpy_gt
|
||||||
PROPERTIES LANGUAGE CUDA)
|
PROPERTIES LANGUAGE CUDA)
|
||||||
@@ -37,7 +54,7 @@ if ("${GTENSOR_DEVICE}" STREQUAL "cuda")
|
|||||||
TARGET_DIRECTORY mpi_stencil_gt
|
TARGET_DIRECTORY mpi_stencil_gt
|
||||||
PROPERTIES LANGUAGE CUDA)
|
PROPERTIES LANGUAGE CUDA)
|
||||||
set_source_files_properties(mpi_stencil2d_gt.cc
|
set_source_files_properties(mpi_stencil2d_gt.cc
|
||||||
TARGET_DIRECTORY mpi_stencil_gt
|
TARGET_DIRECTORY mpi_stencil2d_gt
|
||||||
PROPERTIES LANGUAGE CUDA)
|
PROPERTIES LANGUAGE CUDA)
|
||||||
else()
|
else()
|
||||||
set_source_files_properties(mpi_daxpy_gt.cc
|
set_source_files_properties(mpi_daxpy_gt.cc
|
||||||
@@ -47,7 +64,7 @@ else()
|
|||||||
TARGET_DIRECTORY mpi_stencil_gt
|
TARGET_DIRECTORY mpi_stencil_gt
|
||||||
PROPERTIES LANGUAGE CXX)
|
PROPERTIES LANGUAGE CXX)
|
||||||
set_source_files_properties(mpi_stencil2d_gt.cc
|
set_source_files_properties(mpi_stencil2d_gt.cc
|
||||||
TARGET_DIRECTORY mpi_stencil_gt
|
TARGET_DIRECTORY mpi_stencil2d_gt
|
||||||
PROPERTIES LANGUAGE CXX)
|
PROPERTIES LANGUAGE CXX)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,8 @@
|
|||||||
|
|
||||||
using namespace gt::placeholders;
|
using namespace gt::placeholders;
|
||||||
|
|
||||||
|
const double PI = 3.141592653598793;
|
||||||
|
|
||||||
inline void check(const char* fname, int line, int mpi_rval)
|
inline void check(const char* fname, int line, int mpi_rval)
|
||||||
{
|
{
|
||||||
if (mpi_rval != MPI_SUCCESS) {
|
if (mpi_rval != MPI_SUCCESS) {
|
||||||
@@ -79,7 +81,8 @@ static const gt::gtensor<double, 1> stencil5 = {1.0 / 12.0, -2.0 / 3.0, 0.0,
|
|||||||
*
|
*
|
||||||
* Size of the result will be size of z with minus 4 in second dimension.
|
* Size of the result will be size of z with minus 4 in second dimension.
|
||||||
*/
|
*/
|
||||||
inline auto stencil2d_1d_5(const gt::gtensor_device<double, 2>& z,
|
template <typename S>
|
||||||
|
inline auto stencil2d_1d_5_d0(const gt::ext::gtensor2<double, 2, S>& z,
|
||||||
const gt::gtensor<double, 1>& stencil)
|
const gt::gtensor<double, 1>& stencil)
|
||||||
{
|
{
|
||||||
return stencil(0) * z.view(_s(0, -4), _all) +
|
return stencil(0) * z.view(_s(0, -4), _all) +
|
||||||
@@ -89,6 +92,23 @@ inline auto stencil2d_1d_5(const gt::gtensor_device<double, 2>& z,
|
|||||||
stencil(4) * z.view(_s(4, _), _all);
|
stencil(4) * z.view(_s(4, _), _all);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return unevaluated expression that calculates the 1d stencil in the
|
||||||
|
* second dimension of a 2d array.
|
||||||
|
*
|
||||||
|
* Size of the result will be size of z with minus 4 in second dimension.
|
||||||
|
*/
|
||||||
|
template <typename S>
|
||||||
|
inline auto stencil2d_1d_5_d1(const gt::ext::gtensor2<double, 2, S>& z,
|
||||||
|
const gt::gtensor<double, 1>& stencil)
|
||||||
|
{
|
||||||
|
return stencil(0) * z.view(_all, _s(0, -4)) +
|
||||||
|
stencil(1) * z.view(_all, _s(1, -3)) +
|
||||||
|
stencil(2) * z.view(_all, _s(2, -2)) +
|
||||||
|
stencil(3) * z.view(_all, _s(3, -1)) +
|
||||||
|
stencil(4) * z.view(_all, _s(4, _));
|
||||||
|
}
|
||||||
|
|
||||||
void set_rank_device(int n_ranks, int rank)
|
void set_rank_device(int n_ranks, int rank)
|
||||||
{
|
{
|
||||||
int n_devices, device, ranks_per_device;
|
int n_devices, device, ranks_per_device;
|
||||||
@@ -113,8 +133,9 @@ void set_rank_device(int n_ranks, int rank)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// exchange in first dimension, staging into contiguous buffers on device
|
// exchange in first dimension, staging into contiguous buffers on device
|
||||||
|
template <typename S>
|
||||||
void boundary_exchange_x(MPI_Comm comm, int world_size, int rank,
|
void boundary_exchange_x(MPI_Comm comm, int world_size, int rank,
|
||||||
gt::gtensor_device<double, 2>& d_z, int n_bnd,
|
gt::ext::gtensor2<double, 2, S>& d_z, int n_bnd,
|
||||||
bool stage_host = false)
|
bool stage_host = false)
|
||||||
{
|
{
|
||||||
auto buf_shape = gt::shape(n_bnd, d_z.shape(1));
|
auto buf_shape = gt::shape(n_bnd, d_z.shape(1));
|
||||||
@@ -233,102 +254,244 @@ void boundary_exchange_x(MPI_Comm comm, int world_size, int rank,
|
|||||||
gt::synchronize();
|
gt::synchronize();
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char** argv)
|
// exchange in second dimension, optional staging into device buffer
|
||||||
|
template <typename S>
|
||||||
|
void boundary_exchange_y(MPI_Comm comm, int world_size, int rank,
|
||||||
|
gt::ext::gtensor2<double, 2, S>& d_z, int n_bnd,
|
||||||
|
bool stage_device)
|
||||||
|
{
|
||||||
|
gt::shape_type<2> buf_shape;
|
||||||
|
if (stage_device) {
|
||||||
|
buf_shape = gt::shape(d_z.shape(0), n_bnd);
|
||||||
|
} else {
|
||||||
|
buf_shape = {0, 0};
|
||||||
|
}
|
||||||
|
|
||||||
|
gt::gtensor_device<double, 2> sbuf_l(buf_shape);
|
||||||
|
gt::gtensor_device<double, 2> sbuf_r(buf_shape);
|
||||||
|
gt::gtensor_device<double, 2> rbuf_r(buf_shape);
|
||||||
|
gt::gtensor_device<double, 2> rbuf_l(buf_shape);
|
||||||
|
|
||||||
|
MPI_Request req_l[2];
|
||||||
|
MPI_Request req_r[2];
|
||||||
|
|
||||||
|
int rank_l = rank - 1;
|
||||||
|
int rank_r = rank + 1;
|
||||||
|
|
||||||
|
auto sv_l = gt::view_strided(d_z, _all, _s(n_bnd, 2 * n_bnd));
|
||||||
|
auto sv_r = gt::view_strided(d_z, _all, _s(-2 * n_bnd, -n_bnd));
|
||||||
|
auto rv_l = gt::view_strided(d_z, _all, _s(0, n_bnd));
|
||||||
|
auto rv_r = gt::view_strided(d_z, _all, _s(-n_bnd, _));
|
||||||
|
|
||||||
|
// start async copy of ghost points into send buffers
|
||||||
|
if (stage_device) {
|
||||||
|
if (rank_l >= 0) {
|
||||||
|
sbuf_l = sv_l;
|
||||||
|
}
|
||||||
|
if (rank_r <= world_size) {
|
||||||
|
sbuf_r = sv_r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initiate async recv
|
||||||
|
if (rank_l >= 0) {
|
||||||
|
double* rbuf_l_data = nullptr;
|
||||||
|
if (stage_device) {
|
||||||
|
rbuf_l_data = rbuf_l.data().get();
|
||||||
|
} else {
|
||||||
|
rbuf_l_data = rv_l.data().get();
|
||||||
|
}
|
||||||
|
CHECK(MPI_Irecv(rbuf_l_data, rv_l.size(), MPI_DOUBLE, rank_l, 123, comm,
|
||||||
|
&req_l[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rank_r < world_size) {
|
||||||
|
double* rbuf_r_data = nullptr;
|
||||||
|
if (stage_device) {
|
||||||
|
rbuf_r_data = rbuf_r.data().get();
|
||||||
|
} else {
|
||||||
|
rbuf_r_data = rv_r.data().get();
|
||||||
|
}
|
||||||
|
CHECK(MPI_Irecv(rbuf_r_data, rv_r.size(), MPI_DOUBLE, rank_r, 456, comm,
|
||||||
|
&req_r[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for send buffer fill
|
||||||
|
// if (stage_device) {
|
||||||
|
gt::synchronize();
|
||||||
|
// }
|
||||||
|
|
||||||
|
// initiate async sends
|
||||||
|
if (rank_l >= 0) {
|
||||||
|
double* sbuf_l_data = nullptr;
|
||||||
|
if (stage_device) {
|
||||||
|
sbuf_l_data = sbuf_l.data().get();
|
||||||
|
} else {
|
||||||
|
sbuf_l_data = sv_l.data().get();
|
||||||
|
}
|
||||||
|
CHECK(MPI_Isend(sbuf_l_data, sv_l.size(), MPI_DOUBLE, rank_l, 456, comm,
|
||||||
|
&req_l[1]));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rank_r < world_size) {
|
||||||
|
double* sbuf_r_data = nullptr;
|
||||||
|
if (stage_device) {
|
||||||
|
sbuf_r_data = sbuf_r.data().get();
|
||||||
|
} else {
|
||||||
|
sbuf_r_data = sv_r.data().get();
|
||||||
|
}
|
||||||
|
CHECK(MPI_Isend(sbuf_r_data, sv_r.size(), MPI_DOUBLE, rank_r, 123, comm,
|
||||||
|
&req_r[1]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for send/recv to complete, then copy data back into main data array
|
||||||
|
int mpi_rval;
|
||||||
|
if (rank_l >= 0) {
|
||||||
|
MPI_Status status[2];
|
||||||
|
mpi_rval = MPI_Waitall(2, req_l, status);
|
||||||
|
if (mpi_rval != MPI_SUCCESS) {
|
||||||
|
printf("send_l error: %d (%d %d)\n", mpi_rval, status[0].MPI_ERROR,
|
||||||
|
status[1].MPI_ERROR);
|
||||||
|
}
|
||||||
|
if (stage_device) {
|
||||||
|
gt::copy(rbuf_l, rv_l);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (rank_r < world_size) {
|
||||||
|
MPI_Status status[2];
|
||||||
|
mpi_rval = MPI_Waitall(2, req_r, status);
|
||||||
|
if (mpi_rval != MPI_SUCCESS) {
|
||||||
|
printf("send_r error: %d (%d %d)\n", mpi_rval, status[0].MPI_ERROR,
|
||||||
|
status[1].MPI_ERROR);
|
||||||
|
}
|
||||||
|
if (stage_device) {
|
||||||
|
gt::copy(rbuf_r, rv_r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gt::synchronize();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int Dim, typename S>
|
||||||
|
void print_test_name(bool use_buffers)
|
||||||
|
{
|
||||||
|
if constexpr (std::is_same<S, gt::space::device>::value) {
|
||||||
|
printf("TEST dim:%d, device , buf:%d", Dim, use_buffers);
|
||||||
|
} else {
|
||||||
|
printf("TEST dim:%d, managed, buf:%d", Dim, use_buffers);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename S, int Dim>
|
||||||
|
void test_deriv(int device_id, uint32_t vendor_id, int world_size,
|
||||||
|
int world_rank, int n_global, int n_iter, bool use_buffers,
|
||||||
|
int n_warmup = 5)
|
||||||
{
|
{
|
||||||
// Note: domain will be n_global x n_global plus ghost points in one dimension
|
// Note: domain will be n_global x n_global plus ghost points in one dimension
|
||||||
int n_global = 8 * 1024;
|
|
||||||
bool stage_host = false;
|
|
||||||
int n_iter = 100;
|
|
||||||
int n_warmup = 5;
|
|
||||||
|
|
||||||
if (argc > 1) {
|
|
||||||
n_global = std::atoi(argv[1]) * 1024;
|
|
||||||
}
|
|
||||||
if (argc > 2) {
|
|
||||||
if (argv[2][0] == '1') {
|
|
||||||
stage_host = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (argc > 3) {
|
|
||||||
n_iter = std::atoi(argv[3]);
|
|
||||||
}
|
|
||||||
|
|
||||||
int n_sten = 5;
|
int n_sten = 5;
|
||||||
int n_bnd = (n_sten - 1) / 2;
|
int n_bnd = (n_sten - 1) / 2;
|
||||||
int world_size, world_rank, device_id;
|
|
||||||
uint32_t vendor_id;
|
|
||||||
|
|
||||||
CHECK(MPI_Init(NULL, NULL));
|
|
||||||
|
|
||||||
CHECK(MPI_Comm_size(MPI_COMM_WORLD, &world_size));
|
|
||||||
CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &world_rank));
|
|
||||||
|
|
||||||
if (n_global % world_size != 0) {
|
|
||||||
printf("%d nmpi (%d) must be divisor of domain size (%d), exiting\n",
|
|
||||||
world_rank, world_size, n_global);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_local = n_global / world_size;
|
const int n_local = n_global / world_size;
|
||||||
const int n_local_with_ghost = n_local + 2 * n_bnd;
|
|
||||||
|
|
||||||
set_rank_device(world_size, world_rank);
|
int nx_local, ny_local;
|
||||||
device_id = gt::backend::clib::device_get();
|
int nx_local_ghost, ny_local_ghost;
|
||||||
vendor_id = gt::backend::clib::device_get_vendor_id(device_id);
|
int nx_bnd, ny_bnd;
|
||||||
|
|
||||||
if (world_rank == 0) {
|
if constexpr (Dim == 0) {
|
||||||
printf("n procs = %d\n", world_size);
|
nx_bnd = n_bnd;
|
||||||
printf("n_global = %d\n", n_global);
|
ny_bnd = 0;
|
||||||
printf("n_local = %d\n", n_local);
|
nx_local = n_local;
|
||||||
printf("n_iter = %d\n", n_iter);
|
nx_local_ghost = n_local + 2 * n_bnd;
|
||||||
printf("n_warmup = %d\n", n_warmup);
|
ny_local = n_global;
|
||||||
printf("stage_host = %d\n", stage_host);
|
ny_local_ghost = n_global;
|
||||||
|
} else {
|
||||||
|
nx_bnd = 0;
|
||||||
|
ny_bnd = n_bnd;
|
||||||
|
nx_local = n_global;
|
||||||
|
nx_local_ghost = n_global;
|
||||||
|
ny_local = n_local;
|
||||||
|
ny_local_ghost = n_local + 2 * n_bnd;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto h_z = gt::empty<double>({n_local_with_ghost, n_global});
|
gt::shape_type<2> z_shape(nx_local_ghost, ny_local_ghost);
|
||||||
auto d_z = gt::empty_device<double>({n_local_with_ghost, n_global});
|
gt::shape_type<2> dz_shape(nx_local, ny_local);
|
||||||
|
|
||||||
auto h_dzdx_numeric = gt::empty<double>({n_local, n_global});
|
auto h_z = gt::empty<double>(z_shape);
|
||||||
auto h_dzdx_actual = gt::empty<double>({n_local, n_global});
|
gt::ext::gtensor2<double, 2, S> d_z(z_shape);
|
||||||
auto d_dzdx_numeric = gt::empty_device<double>({n_local, n_global});
|
|
||||||
|
|
||||||
double lx = 8;
|
auto h_dz_numeric = gt::empty<double>(dz_shape);
|
||||||
double dx = lx / n_global;
|
auto h_dz_actual = gt::empty<double>(dz_shape);
|
||||||
double lx_local = lx / world_size;
|
gt::ext::gtensor2<double, 2, S> d_dz_numeric(dz_shape);
|
||||||
double scale = n_global / lx;
|
|
||||||
|
double ln = 8;
|
||||||
|
double delta = ln / n_global;
|
||||||
|
double ln_local = ln / world_size;
|
||||||
|
double scale = n_global / ln;
|
||||||
auto fn = [](double x, double y) { return x * x * x + y * y; };
|
auto fn = [](double x, double y) { return x * x * x + y * y; };
|
||||||
auto fn_dzdx = [](double x, double y) { return 3 * x * x; };
|
auto fn_dzdx = [](double x, double y) { return 3 * x * x; };
|
||||||
|
auto fn_dzdy = [](double x, double y) { return 2 * y; };
|
||||||
|
|
||||||
struct timespec start, end;
|
struct timespec start, end;
|
||||||
double iter_time = 0.0;
|
double iter_time = 0.0;
|
||||||
double total_time = 0.0;
|
double total_time = 0.0;
|
||||||
|
|
||||||
double x_start = world_rank * lx_local;
|
double x_start = 0, y_start = 0;
|
||||||
for (int j = 0; j < n_global; j++) {
|
if constexpr (Dim == 0) {
|
||||||
double ytmp = j * dx;
|
x_start = world_rank * ln_local;
|
||||||
for (int i = 0; i < n_local; i++) {
|
} else {
|
||||||
double xtmp = x_start + i * dx;
|
y_start = world_rank * ln_local;
|
||||||
h_z(i + n_bnd, j) = fn(xtmp, ytmp);
|
}
|
||||||
h_dzdx_actual(i, j) = fn_dzdx(xtmp, ytmp);
|
for (int j = 0; j < ny_local; j++) {
|
||||||
|
double ytmp = y_start + j * delta;
|
||||||
|
for (int i = 0; i < nx_local; i++) {
|
||||||
|
double xtmp = x_start + i * delta;
|
||||||
|
h_z(i + nx_bnd, j + ny_bnd) = fn(xtmp, ytmp);
|
||||||
|
if constexpr (Dim == 0) {
|
||||||
|
h_dz_actual(i, j) = fn_dzdx(xtmp, ytmp);
|
||||||
|
} else {
|
||||||
|
h_dz_actual(i, j) = fn_dzdy(xtmp, ytmp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill boundary points on ends
|
// fill boundary points on ends
|
||||||
|
if constexpr (Dim == 0) {
|
||||||
if (world_rank == 0) {
|
if (world_rank == 0) {
|
||||||
for (int j = 0; j < n_global; j++) {
|
for (int j = 0; j < ny_local; j++) {
|
||||||
double ytmp = j * dx;
|
double ytmp = j * delta;
|
||||||
for (int i = 0; i < n_bnd; i++) {
|
for (int i = 0; i < nx_bnd; i++) {
|
||||||
double xtmp = (i - n_bnd) * dx;
|
double xtmp = (i - nx_bnd) * delta;
|
||||||
h_z(i, j) = fn(xtmp, ytmp);
|
h_z(i, j) = fn(xtmp, ytmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (world_rank == world_size - 1) {
|
if (world_rank == world_size - 1) {
|
||||||
for (int j = 0; j < n_global; j++) {
|
for (int j = 0; j < ny_local; j++) {
|
||||||
double ytmp = j * dx;
|
double ytmp = j * delta;
|
||||||
for (int i = 0; i < n_bnd; i++) {
|
for (int i = 0; i < nx_bnd; i++) {
|
||||||
double xtmp = lx + i * dx;
|
double xtmp = ln + i * delta;
|
||||||
h_z(n_bnd + n_local + i, j) = fn(xtmp, ytmp);
|
h_z(nx_bnd + nx_local + i, j) = fn(xtmp, ytmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (world_rank == 0) {
|
||||||
|
for (int j = 0; j < ny_bnd; j++) {
|
||||||
|
double ytmp = (j - ny_bnd) * delta;
|
||||||
|
for (int i = 0; i < nx_local; i++) {
|
||||||
|
double xtmp = i * delta;
|
||||||
|
h_z(i, j) = fn(xtmp, ytmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (world_rank == world_size - 1) {
|
||||||
|
for (int j = 0; j < ny_bnd; j++) {
|
||||||
|
double ytmp = ln + j * delta;
|
||||||
|
for (int i = 0; i < nx_local; i++) {
|
||||||
|
double xtmp = i * delta;
|
||||||
|
h_z(i, ny_bnd + ny_local + j) = fn(xtmp, ytmp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -347,8 +510,13 @@ int main(int argc, char** argv)
|
|||||||
|
|
||||||
for (int i = 0; i < n_warmup + n_iter; i++) {
|
for (int i = 0; i < n_warmup + n_iter; i++) {
|
||||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
boundary_exchange_x(MPI_COMM_WORLD, world_size, world_rank, d_z, n_bnd,
|
if constexpr (Dim == 0) {
|
||||||
stage_host);
|
boundary_exchange_x<S>(MPI_COMM_WORLD, world_size, world_rank, d_z, n_bnd,
|
||||||
|
use_buffers);
|
||||||
|
} else {
|
||||||
|
boundary_exchange_y<S>(MPI_COMM_WORLD, world_size, world_rank, d_z, n_bnd,
|
||||||
|
use_buffers);
|
||||||
|
}
|
||||||
clock_gettime(CLOCK_MONOTONIC, &end);
|
clock_gettime(CLOCK_MONOTONIC, &end);
|
||||||
iter_time =
|
iter_time =
|
||||||
((end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) * 1.0e-9);
|
((end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) * 1.0e-9);
|
||||||
@@ -358,13 +526,19 @@ int main(int argc, char** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// do some calculation, to try to more closely simulate what happens in GENE
|
// do some calculation, to try to more closely simulate what happens in GENE
|
||||||
d_dzdx_numeric = stencil2d_1d_5(d_z, stencil5) * scale;
|
if constexpr (Dim == 0) {
|
||||||
|
d_dz_numeric = stencil2d_1d_5_d0<S>(d_z, stencil5) * scale;
|
||||||
|
} else {
|
||||||
|
d_dz_numeric = stencil2d_1d_5_d1<S>(d_z, stencil5) * scale;
|
||||||
|
}
|
||||||
gt::synchronize();
|
gt::synchronize();
|
||||||
}
|
}
|
||||||
|
#ifdef DEBUG
|
||||||
printf("%d/%d exchange time %0.8f ms\n", world_rank, world_size,
|
printf("%d/%d exchange time %0.8f ms\n", world_rank, world_size,
|
||||||
total_time / n_iter * 1000);
|
total_time / n_iter * 1000);
|
||||||
|
#endif
|
||||||
|
|
||||||
gt::copy(d_dzdx_numeric, h_dzdx_numeric);
|
gt::copy(d_dz_numeric, h_dz_numeric);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
@@ -378,10 +552,181 @@ int main(int argc, char** argv)
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
double err_norm = std::sqrt(gt::sum_squares(h_dzdx_numeric - h_dzdx_actual));
|
double err_norm = std::sqrt(gt::sum_squares(h_dz_numeric - h_dz_actual));
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
printf("%d/%d [%d:0x%08x] err_norm = %.8f\n", world_rank, world_size,
|
printf("%d/%d [%d:0x%08x] err_norm = %.8f\n", world_rank, world_size,
|
||||||
device_id, vendor_id, err_norm);
|
device_id, vendor_id, err_norm);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
double time_sum;
|
||||||
|
MPI_Reduce(&total_time, &time_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
double err_sum;
|
||||||
|
MPI_Reduce(&err_norm, &err_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
if (world_rank == 0) {
|
||||||
|
print_test_name<Dim, S>(use_buffers);
|
||||||
|
printf("; %0.8f, err=%0.8f\n", time_sum, err_sum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename S, int Dim>
|
||||||
|
void test_sum(int device_id, uint32_t vendor_id, int world_size, int world_rank,
|
||||||
|
int n_global, int n_iter, int n_warmup = 5)
|
||||||
|
{
|
||||||
|
// Note: domain will be n_global x n_global plus ghost points in one dimension
|
||||||
|
const int n_local = n_global / world_size;
|
||||||
|
|
||||||
|
int nx_local, ny_local;
|
||||||
|
|
||||||
|
struct timespec start, end;
|
||||||
|
double iter_time = 0.0;
|
||||||
|
double total_time = 0.0;
|
||||||
|
|
||||||
|
if constexpr (Dim == 0) {
|
||||||
|
nx_local = n_local;
|
||||||
|
ny_local = n_global;
|
||||||
|
} else {
|
||||||
|
nx_local = n_global;
|
||||||
|
ny_local = n_local;
|
||||||
|
}
|
||||||
|
|
||||||
|
gt::shape_type<2> z_shape(nx_local, ny_local);
|
||||||
|
|
||||||
|
gt::ext::gtensor2<double, 2, S> d_z(z_shape, PI / world_size);
|
||||||
|
|
||||||
|
// reduction test
|
||||||
|
gt::shape_type<1> sum_shape;
|
||||||
|
if constexpr (Dim == 0) {
|
||||||
|
sum_shape = gt::shape(d_z.shape(0));
|
||||||
|
} else {
|
||||||
|
sum_shape = gt::shape(d_z.shape(1));
|
||||||
|
}
|
||||||
|
gt::ext::gtensor2<double, 1, S> d_sum(sum_shape);
|
||||||
|
gt::gtensor<double, 1> h_sum(sum_shape);
|
||||||
|
for (int i = 0; i < n_warmup + n_iter; i++) {
|
||||||
|
if constexpr (Dim == 0) {
|
||||||
|
gt::sum_axis_to(d_sum, d_z, 0);
|
||||||
|
gt::synchronize();
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
|
CHECK(MPI_Allreduce(MPI_IN_PLACE, d_sum.data().get(), d_sum.size(),
|
||||||
|
MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &end);
|
||||||
|
} else {
|
||||||
|
gt::sum_axis_to(d_sum, d_z, 1);
|
||||||
|
gt::synchronize();
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
|
CHECK(MPI_Allreduce(MPI_IN_PLACE, d_sum.data().get(), d_sum.size(),
|
||||||
|
MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD));
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &end);
|
||||||
|
}
|
||||||
|
iter_time =
|
||||||
|
((end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) * 1.0e-9);
|
||||||
|
|
||||||
|
if (i >= n_warmup) {
|
||||||
|
total_time += iter_time;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#ifdef DEBUG
|
||||||
|
printf("%d/%d allreduce time %0.8f ms\n", world_rank, world_size,
|
||||||
|
total_time / n_iter * 1000);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
gt::copy(d_sum, h_sum);
|
||||||
|
|
||||||
|
double time_sum;
|
||||||
|
MPI_Reduce(&total_time, &time_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
if (world_rank == 0) {
|
||||||
|
print_test_name<Dim, S>(false);
|
||||||
|
printf("; allreduce=%0.8f\n", time_sum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char** argv)
|
||||||
|
{
|
||||||
|
using S = gt::space::managed;
|
||||||
|
|
||||||
|
// Note: domain will be n_global x n_global plus ghost points in one dimension
|
||||||
|
int n_global = 8 * 1024;
|
||||||
|
int n_iter = 1000;
|
||||||
|
int n_warmup = 10;
|
||||||
|
|
||||||
|
if (argc > 1) {
|
||||||
|
n_global = std::atoi(argv[1]) * 1024;
|
||||||
|
}
|
||||||
|
if (argc > 2) {
|
||||||
|
n_iter = std::atoi(argv[2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
int world_size, world_rank, device_id;
|
||||||
|
uint32_t vendor_id;
|
||||||
|
|
||||||
|
CHECK(MPI_Init(NULL, NULL));
|
||||||
|
|
||||||
|
CHECK(MPI_Comm_size(MPI_COMM_WORLD, &world_size));
|
||||||
|
CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &world_rank));
|
||||||
|
|
||||||
|
if (n_global % world_size != 0) {
|
||||||
|
printf("%d nmpi (%d) must be divisor of domain size (%d), exiting\n",
|
||||||
|
world_rank, world_size, n_global);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_local = n_global / world_size;
|
||||||
|
|
||||||
|
set_rank_device(world_size, world_rank);
|
||||||
|
device_id = gt::backend::clib::device_get();
|
||||||
|
vendor_id = gt::backend::clib::device_get_vendor_id(device_id);
|
||||||
|
|
||||||
|
if (world_rank == 0) {
|
||||||
|
printf("n procs = %d\n", world_size);
|
||||||
|
printf("n_global = %d\n", n_global);
|
||||||
|
printf("n_local = %d\n", n_local);
|
||||||
|
printf("n_iter = %d\n", n_iter);
|
||||||
|
printf("n_warmup = %d\n", n_warmup);
|
||||||
|
}
|
||||||
|
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
test_deriv<gt::space::device, 0>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, true, 5);
|
||||||
|
test_deriv<gt::space::device, 0>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, false, 5);
|
||||||
|
#ifdef TEST_MANAGED
|
||||||
|
test_deriv<gt::space::managed, 0>(device_id, vendor_id, world_size,
|
||||||
|
world_rank, n_global, n_iter, true, 5);
|
||||||
|
test_deriv<gt::space::managed, 0>(device_id, vendor_id, world_size,
|
||||||
|
world_rank, n_global, n_iter, false, 5);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
test_deriv<gt::space::device, 1>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, true, 5);
|
||||||
|
test_deriv<gt::space::device, 1>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, false, 5);
|
||||||
|
#ifdef TEST_MANAGED
|
||||||
|
test_deriv<gt::space::managed, 1>(device_id, vendor_id, world_size,
|
||||||
|
world_rank, n_global, n_iter, true, 5);
|
||||||
|
test_deriv<gt::space::managed, 1>(device_id, vendor_id, world_size,
|
||||||
|
world_rank, n_global, n_iter, false, 5);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
test_sum<gt::space::device, 0>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, 5);
|
||||||
|
#ifdef TEST_MANAGED
|
||||||
|
test_sum<gt::space::managed, 0>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, 5);
|
||||||
|
#endif
|
||||||
|
test_sum<gt::space::device, 1>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, 5);
|
||||||
|
#ifdef TEST_MANAGED
|
||||||
|
test_sum<gt::space::managed, 1>(device_id, vendor_id, world_size, world_rank,
|
||||||
|
n_global, n_iter, 5);
|
||||||
|
#endif
|
||||||
|
|
||||||
MPI_Finalize();
|
MPI_Finalize();
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user