Skip to content
Snippets Groups Projects
Commit a293dd22 authored by Gregor Weiss's avatar Gregor Weiss
Browse files

Reading with MPIIO

parent c23ee2d4
Branches
No related merge requests found
......@@ -14,6 +14,7 @@
#include "HeatTransfer.h"
#include "Settings.h"
#include <vector>
#include <mpi.h>
class IO
......@@ -23,6 +24,8 @@ public:
~IO();
void write(int step, const HeatTransfer &ht, const Settings &s,
MPI_Comm comm);
void read(const int step, std::vector<double> &buffer, const Settings &s,
MPI_Comm comm);
private:
std::string m_outputfilename;
......
......@@ -12,6 +12,8 @@
#include <string>
#include <iostream> // std::cout
#include <adios2.h>
adios2::ADIOS ad;
......@@ -88,3 +90,7 @@ void IO::write(int step, const HeatTransfer &ht, const Settings &s,
bpWriter.EndStep();
}
}
void IO::read(const int step, std::vector<double> &ht, const Settings &s,
MPI_Comm comm)
{ std::cout << "IO::read not implemented for adios2 format." << std::endl; }
......@@ -76,3 +76,7 @@ void IO::write(int step, const HeatTransfer &ht, const Settings &s,
out << std::endl;
}
}
void IO::read(const int step, std::vector<double> &ht, const Settings &s,
MPI_Comm comm)
{ std::cout << "IO::read not implemented for ascii format." << std::endl; }
......@@ -67,3 +67,7 @@ void IO::write(int step, const HeatTransfer &ht, const Settings &s,
out.write(reinterpret_cast<const char*>(ht.data()), static_cast<std::streamsize>(s.ndx*s.ndy));
}
void IO::read(const int step, std::vector<double> &ht, const Settings &s,
MPI_Comm comm)
{ std::cout << "IO::read not implemented for binary format." << std::endl; }
......@@ -35,7 +35,7 @@ IO::IO(const Settings &s, MPI_Comm comm)
(
comm,
m_outputfilename.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL,
&fh
);
......@@ -51,6 +51,15 @@ void IO::write(int step, const HeatTransfer &ht, const Settings &s,
{
// Set file pointer
int offset = ( mpiio_rank + mpiio_size*step ) * mpiio_count * sizeof(double);
MPI_File_seek( fh, offset, MPI_SEEK_SET);
MPI_File_write(fh, ht.data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
MPI_File_seek( fh, offset, MPI_SEEK_SET );
MPI_File_write( fh, ht.data_noghost().data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE );
}
void IO::read(const int step, std::vector<double> &buffer, const Settings &s,
MPI_Comm comm)
{
// Set file pointer
int offset = ( mpiio_rank + mpiio_size*step ) * mpiio_count * sizeof(double);
MPI_File_seek( fh, offset, MPI_SEEK_SET );
MPI_File_read( fh, buffer.data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE );
}
......@@ -16,7 +16,8 @@
MPI_File fh;
MPI_Datatype filetype;
int count;
int mpiio_count;
int mpiio_rank, mpiio_size;
IO::IO(const Settings &s, MPI_Comm comm)
{
......@@ -38,16 +39,15 @@ IO::IO(const Settings &s, MPI_Comm comm)
dargs[0] = dargs[1] = MPI_DISTRIBUTE_DFLT_DARG;
// Set count of buffer, i.e. size of ht.data()
count = s.ndx * s.ndy;
mpiio_count = s.ndx * s.ndy;
// Create array MPI_Datatype
int rank, size;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &mpiio_rank);
MPI_Comm_size(comm, &mpiio_size);
MPI_Type_create_darray
(
size,
rank,
mpiio_size,
mpiio_rank,
2,
gsizes,
distribs,
......@@ -64,31 +64,48 @@ IO::IO(const Settings &s, MPI_Comm comm)
(
comm,
m_outputfilename.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL,
&fh
);
}
IO::~IO()
{
MPI_File_close(&fh);
MPI_Type_free(&filetype);
}
void IO::write(int step, const HeatTransfer &ht, const Settings &s,
MPI_Comm comm)
{
// Set file view
int offset = mpiio_size * step * mpiio_count * sizeof(double);
MPI_File_set_view
(
fh,
0,
offset,
MPI_DOUBLE,
filetype,
"native",
MPI_INFO_NULL
);
MPI_File_write_all(fh, ht.data_noghost().data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
}
IO::~IO()
void IO::read(const int step, std::vector<double> &buffer, const Settings &s,
MPI_Comm comm)
{
MPI_File_close(&fh);
MPI_Type_free(&filetype);
}
void IO::write(int step, const HeatTransfer &ht, const Settings &s,
MPI_Comm comm)
{
MPI_File_write_all(fh, ht.data(), count, MPI_DOUBLE, MPI_STATUS_IGNORE);
// Set file view
int offset = mpiio_size * step * mpiio_count * sizeof(double);
MPI_File_set_view
(
fh,
offset,
MPI_DOUBLE,
filetype,
"native",
MPI_INFO_NULL
);
MPI_File_read_all(fh, buffer.data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
}
......@@ -11,16 +11,17 @@
#include "IO.h"
#include <string>
#include <chrono> // std::chrono::system_clock
#include <random> // std::default_random_engine
#include <vector> // std::vector
#include <algorithm> // std::shuffle
#include <iostream>
MPI_File fh;
MPI_Datatype filetype;
int count;
int mpiio_count;
int mpiio_rank, mpiio_size;
IO::IO(const Settings &s, MPI_Comm comm)
{
......@@ -30,48 +31,48 @@ IO::IO(const Settings &s, MPI_Comm comm)
int globsize = s.gndx * s.gndy;
// Set count of buffer, i.e. size of ht.data()
count = s.ndx * s.ndy;
mpiio_count = s.ndx * s.ndy;
MPI_Status status;
int rank, size;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &mpiio_rank);
MPI_Comm_size(comm, &mpiio_size);
//- Create an random index vectors for each process
std::vector<int> indexes(count, 0);
if (rank == 0)
std::vector<int> indexes(mpiio_count, 0);
if (mpiio_rank == 0)
{
std::vector<int> globindexes( globsize, 0 );
// fill global unique index vector
std::generate( globindexes.begin(), globindexes.end(), [i = 0] () mutable { return i++; } );
// random shuffling
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::shuffle( globindexes.begin(), globindexes.end(), std::default_random_engine(seed) );
//unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
//std::shuffle( globindexes.begin(), globindexes.end(), std::default_random_engine(seed) );
std::random_shuffle( globindexes.begin(), globindexes.end() );
// move and send data into local index vectors
std::vector<int> locindexes( count, 0 );
std::vector<int> locindexes( mpiio_count, 0 );
int n = 0; auto globIt = globindexes.begin();
std::move( globIt, globIt + count, indexes.begin() );
++n; globIt += count;
std::move( globIt, globIt + mpiio_count, indexes.begin() );
++n; globIt += mpiio_count;
for (
;
n < size && globIt != globindexes.end();
++n , globIt += count
n < mpiio_size && globIt != globindexes.end();
++n , globIt += mpiio_count
)
{
std::move( globIt, globIt + count, locindexes.begin() );
MPI_Send( locindexes.data(), count, MPI_INT, n, 0, comm );
std::move( globIt, globIt + mpiio_count, locindexes.begin() );
MPI_Send( locindexes.data(), mpiio_count, MPI_INT, n, 0, comm );
}
} else {
MPI_Recv( indexes.data(), count, MPI_INT, 0, 0, comm, &status);
MPI_Recv( indexes.data(), mpiio_count, MPI_INT, 0, 0, comm, &status);
}
std::sort( indexes.begin(), indexes.end() ); // monotonically increasing for file view
// Create indexed MPI_Datatype
MPI_Type_create_indexed_block
(
count,
mpiio_count,
1,
indexes.data(),
MPI_DOUBLE,
......@@ -84,31 +85,48 @@ IO::IO(const Settings &s, MPI_Comm comm)
(
comm,
m_outputfilename.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL,
&fh
);
}
IO::~IO()
{
MPI_File_close(&fh);
MPI_Type_free(&filetype);
}
void IO::write(int step, const HeatTransfer &ht, const Settings &s,
MPI_Comm comm)
{
// Set file view
int offset = mpiio_size * step * mpiio_count * sizeof(double);
MPI_File_set_view
(
fh,
0,
offset,
MPI_DOUBLE,
filetype,
"native",
MPI_INFO_NULL
);
MPI_File_write_all(fh, ht.data_noghost().data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
}
IO::~IO()
{
MPI_File_close(&fh);
MPI_Type_free(&filetype);
}
void IO::write(int step, const HeatTransfer &ht, const Settings &s,
MPI_Comm comm)
void IO::read(const int step, std::vector<double> &buffer, const Settings &s,
MPI_Comm comm)
{
MPI_File_write_all(fh, ht.data(), count, MPI_DOUBLE, MPI_STATUS_IGNORE);
// Set file view
int offset = mpiio_size * step * mpiio_count * sizeof(double);
MPI_File_set_view
(
fh,
offset,
MPI_DOUBLE,
filetype,
"native",
MPI_INFO_NULL
);
MPI_File_read_all(fh, buffer.data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
}
......@@ -35,7 +35,7 @@ IO::IO(const Settings &s, MPI_Comm comm)
(
comm,
m_outputfilename.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL,
&fh
);
......@@ -51,5 +51,13 @@ void IO::write(int step, const HeatTransfer &ht, const Settings &s,
{
int offset = ( mpiio_rank + mpiio_size*step ) * mpiio_count * sizeof(double);
MPI_File_write_at(fh, offset, ht.data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
MPI_File_write_at(fh, offset, ht.data_noghost().data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
}
void IO::read(const int step, std::vector<double> &buffer, const Settings &s,
MPI_Comm comm)
{
int offset = ( mpiio_rank + mpiio_size*step ) * mpiio_count * sizeof(double);
MPI_File_read_at(fh, offset, buffer.data(), mpiio_count, MPI_DOUBLE, MPI_STATUS_IGNORE);
}
......@@ -107,7 +107,45 @@ int main(int argc, char *argv[])
auto avg_time = std::accumulate(times.begin(), times.end(), 0.0) / static_cast<double>(times.size());
auto max_time = *std::max_element(times.begin(), times.end());
auto min_time = *std::min_element(times.begin(), times.end());
std::cout << "global size [GB] " << GB
std::cout << "Writing global size [GB] " << GB
<< " local size [GB] " << lGB
<< " perf [GB/s] " << GB/avg_time
<< " max perf [GB/s] " << GB / (min_time)
<< " min perf [GB/s] " << GB / (max_time)
<< std::endl;
MPI_Barrier(mpiHeatTransferComm);
}
//- Reading
for (unsigned int t = 1; t <= settings.steps; ++t)
{
std::vector<double> buffer( settings.ndx*settings.ndy, 0.0 );
MPI_Barrier(mpiHeatTransferComm);
if (rank == 0)
std::cout << "Reading step " << t << ":\n";
double timeIO_start = 0.0;
unsigned int nioit = 0;
std::vector<double> times(settings.ioiterations - settings.ioit0, 0.0);
for (unsigned int it = 0; it < settings.ioiterations; ++it)
{
if (it >= settings.ioit0)
{ timeIO_start = MPI_Wtime(); }
io.read(t, buffer, settings, mpiHeatTransferComm);
if (it >= settings.ioit0)
{ times[it - settings.ioit0] = MPI_Wtime() - timeIO_start; }
}
auto GB = static_cast<double>(settings.gndx*settings.gndy*sizeof(double)) / 1.0e9;
auto lGB = static_cast<double>(settings.ndx*settings.ndy*sizeof(double)) / 1.0e9;
auto avg_time = std::accumulate(times.begin(), times.end(), 0.0) / static_cast<double>(times.size());
auto max_time = *std::max_element(times.begin(), times.end());
auto min_time = *std::min_element(times.begin(), times.end());
std::cout << "Reading global size [GB] " << GB
<< " local size [GB] " << lGB
<< " perf [GB/s] " << GB/avg_time
<< " max perf [GB/s] " << GB / (min_time)
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment