To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit d3af0296 authored by ahuegli's avatar ahuegli

bug fixing

parent 4a515c33
......@@ -222,7 +222,7 @@ void WaveEquation::run(double t_end) {
if (count % 10 == 0) {
if (rank == 0)
std::cout << count << " t=" << t << "\n";
// Print(count); //saving data really slows down the code
Print(count); //saving data really slows down the code
}
pack_all();
......@@ -245,43 +245,43 @@ void WaveEquation::run(double t_end) {
local_request.resize(local_request.size() + 2);
pid_recv = 0 * threads_per_dim * threads_per_dim + t1 * threads_per_dim + t2;
pid_send = 1 * threads_per_dim * threads_per_dim + t1 * threads_per_dim + t2;
MPI_Irecv(unpack[0], nloc*nloc, MPI_DOUBLE, rank_minus[0], 100, cart_comm,&local_request[0]);
MPI_Isend( pack[0], nloc*nloc, MPI_DOUBLE, rank_plus [0], 100, cart_comm,&local_request[1]);
MPI_Irecv(unpack[pid_recv], nloc*nloc, MPI_DOUBLE, rank_minus[0], pid_recv, cart_comm,&local_request[0]);
MPI_Isend( pack[pid_send], nloc*nloc, MPI_DOUBLE, rank_plus [0], pid_send, cart_comm,&local_request[1]);
}
if (t0 == p - 1) {
local_request.resize(local_request.size() + 2);
pid_recv = 1 * threads_per_dim * threads_per_dim + t1 * threads_per_dim + t2;
pid_send = 0 * threads_per_dim * threads_per_dim + t1 * threads_per_dim + t2;
MPI_Irecv(unpack[1], nloc*nloc, MPI_DOUBLE, rank_plus [0], 101, cart_comm,&local_request[2]);
MPI_Isend( pack[1], nloc*nloc, MPI_DOUBLE, rank_minus[0], 101, cart_comm,&local_request[3]);
MPI_Irecv(unpack[pid_recv], nloc*nloc, MPI_DOUBLE, rank_plus [0], pid_recv, cart_comm,&local_request[2]);
MPI_Isend( pack[pid_send], nloc*nloc, MPI_DOUBLE, rank_minus[0], pid_send, cart_comm,&local_request[3]);
}
if (t1 == 0) {
local_request.resize(local_request.size() + 2);
pid_recv = 2 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t2;
pid_send = 3 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t2;
MPI_Irecv(unpack[2], nloc*nloc, MPI_DOUBLE, rank_minus[1], 200, cart_comm,&local_request[4]);
MPI_Isend( pack[2], nloc*nloc, MPI_DOUBLE, rank_plus [1], 200, cart_comm,&local_request[5]);
MPI_Irecv(unpack[pid_recv], nloc*nloc, MPI_DOUBLE, rank_minus[1], pid_recv, cart_comm,&local_request[4]);
MPI_Isend( pack[pid_send], nloc*nloc, MPI_DOUBLE, rank_plus [1], pid_send, cart_comm,&local_request[5]);
}
if (t1 == p - 1) {
local_request.resize(local_request.size() + 2);
pid_recv = 3 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t2;
pid_send = 2 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t2;
MPI_Irecv(unpack[3], nloc*nloc, MPI_DOUBLE, rank_plus [1], 201, cart_comm,&local_request[6]);
MPI_Isend( pack[3], nloc*nloc, MPI_DOUBLE, rank_minus[1], 201, cart_comm,&local_request[7]);
MPI_Irecv(unpack[pid_recv], nloc*nloc, MPI_DOUBLE, rank_plus [1], pid_recv, cart_comm,&local_request[6]);
MPI_Isend( pack[pid_send], nloc*nloc, MPI_DOUBLE, rank_minus[1], pid_send, cart_comm,&local_request[7]);
}
if (t2 == 0) {
local_request.resize(local_request.size() + 2);
pid_recv = 4 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t1;
pid_send = 5 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t1;
MPI_Irecv(unpack[4], nloc*nloc, MPI_DOUBLE, rank_minus[2], 300, cart_comm,&local_request[8]);
MPI_Isend( pack[4], nloc*nloc, MPI_DOUBLE, rank_plus [2], 300, cart_comm,&local_request[9]);
MPI_Irecv(unpack[pid_recv], nloc*nloc, MPI_DOUBLE, rank_minus[2], pid_recv, cart_comm,&local_request[8]);
MPI_Isend( pack[pid_send], nloc*nloc, MPI_DOUBLE, rank_plus [2], pid_send, cart_comm,&local_request[9]);
}
if (t2 == p - 1) {
local_request.resize(local_request.size() + 2);
pid_recv = 5 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t1;
pid_send = 4 * threads_per_dim * threads_per_dim + t0 * threads_per_dim + t1;
MPI_Irecv(unpack[5], nloc*nloc, MPI_DOUBLE, rank_plus [2], 301, cart_comm,&local_request[10]);
MPI_Isend( pack[5], nloc*nloc, MPI_DOUBLE, rank_minus[2], 301, cart_comm,&local_request[11]);
MPI_Irecv(unpack[pid_recv], nloc*nloc, MPI_DOUBLE, rank_plus [2], pid_recv, cart_comm,&local_request[10]);
MPI_Isend( pack[pid_send], nloc*nloc, MPI_DOUBLE, rank_minus[2], pid_send, cart_comm,&local_request[11]);
}
// uncomment when you complete question 2
MPI_Waitall(local_request.size(),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment