Commit bd033fdf authored by chatzidp's avatar chatzidp
Browse files

Examples for MPI part2

parent d0329454
......@@ -10,13 +10,13 @@ int main(int argc, char** argv) {
int num;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&num);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
double ds=3.1415927; // to send
double dr; // to receive
int tag=99;
if(num==0) {
if(rank==0) {
MPI_Send(&ds,1,MPI_DOUBLE,1,tag,MPI_COMM_WORLD);
MPI_Recv (&dr,1,MPI_DOUBLE,1,tag,MPI_COMM_WORLD,&status);
}
......
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <stdio.h>
#include <math.h>
#include <mpi.h>
#ifndef NITERS
#define NITERS 10000
#endif
int main(int argc, char **argv)
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
const int n_iters = NITERS;
double t, elapsed;
int i;
/* do performance benchmark */
if (rank == 0) printf("Entering MPI_Barrier() benchmark\n");
MPI_Barrier(MPI_COMM_WORLD);
t = MPI_Wtime();
for (i=0; i<n_iters; i++) MPI_Barrier(MPI_COMM_WORLD);
elapsed = MPI_Wtime() - t;
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) printf("MPI_Barrier() benchmark: Average Barrier Latency = %lf usec\n", (elapsed/(double)n_iters)*1000.0*1000.0);
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <stdio.h>
#include <math.h>
#include <mpi.h>
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank % 2)
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int main(int argc , char **argv)
{
int size, rank;
double data;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand48(rank);
for (int k = 0; k < 10; k++) {
if (!rank) data = drand48();
MPI_Bcast(&data, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if (rank) sleep(1);
printf("Step %d: I am Process %d Data = %f\n", k, rank, data);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int main(int argc , char **argv)
{
int rank, size;
double data;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand48(rank);
for (int k = 0; k < 10; k++) {
if (!rank) data = drand48();
if (!rank) {
for (int i = 1; i < size; i++)
MPI_Ssend(&data, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
else {
sleep(lrand48()%2); // random sleep - what can I do?
MPI_Status status;
MPI_Recv(&data, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
}
printf("Step %d: I am Process %d Data = %f\n", k, rank, data);
}
MPI_Finalize();
}
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
#include <string>
#include <mpi.h>
int main(int argc, char** argv) {
MPI_Status status;
int rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
double ds=3.1415927; // to send
double dr; // to receive
int tag=99;
if(rank==0) {
MPI_Send(&ds,1,MPI_DOUBLE,1,tag,MPI_COMM_WORLD);
MPI_Recv (&dr,1,MPI_DOUBLE,1,tag,MPI_COMM_WORLD,&status);
}
else {
MPI_Send(&ds,1,MPI_DOUBLE,0,tag,MPI_COMM_WORLD);
MPI_Recv (&dr,1,MPI_DOUBLE,0,tag,MPI_COMM_WORLD,&status);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <iostream>
#include <string>
#include <mpi.h>
#ifndef N
#define N 1024
#endif
int main(int argc, char** argv) {
MPI_Status status;
int num;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&num);
char *ds=(char *)calloc(1, N*sizeof(char)); // to send
char *dr=(char *)calloc(1, N*sizeof(char)); // to recv
int tag=99;
if(num==0) {
MPI_Send(ds,N,MPI_CHAR,1,tag,MPI_COMM_WORLD);
MPI_Recv(dr,N,MPI_CHAR,1,tag,MPI_COMM_WORLD,&status);
}
else {
MPI_Send(ds,N,MPI_CHAR,0,tag,MPI_COMM_WORLD);
MPI_Recv(dr,N,MPI_CHAR,0,tag,MPI_COMM_WORLD,&status);
int count;
MPI_Get_count(&status,MPI_CHAR,&count);
printf("status.MPI_SOURCE = %d, status.MPI_TAG = %d, count = %d\n",
status.MPI_SOURCE, status.MPI_TAG, count);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void init_data(double *x, int N)
{
int i;
printf("initializing %d elements\n", N);
for (i = 0; i < N; i++) x[i] = 1000 + i;
}
void print_data(double *x, int N)
{
int i;
printf("printing %d elements\n", N);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
}
int main(int argc, char **argv)
{
int rank, nranks;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
int MAX_N = 100;
int N;
if (rank == 0) {
N = lrand48()%MAX_N;
double *x = (double *)calloc(1, N*sizeof(double));
init_data(x, N);
MPI_Send(x, N, MPI_DOUBLE, 1, 123, MPI_COMM_WORLD);
}
if (rank == 1) {
double *y = (double *)calloc(1, MAX_N*sizeof(double));
MPI_Recv(y, MAX_N, MPI_DOUBLE, 0, 123, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_DOUBLE, &N);
print_data(y, N);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void init_data(double *x, int N)
{
int i;
printf("initializing %d elements\n", N);
for (i = 0; i < N; i++) x[i] = 1000 + i;
}
void print_data(double *x, int N)
{
int i;
printf("printing %d elements\n", N);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
}
int main(int argc, char **argv)
{
int rank, nranks;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
int MAX_N = 100;
int N;
if (rank == 0) {
N = lrand48()%MAX_N;
double *x = (double *)calloc(1, N*sizeof(double));
init_data(x, N);
MPI_Send(x, N, MPI_DOUBLE, 1, 123, MPI_COMM_WORLD);
}
if (rank == 1) {
MPI_Probe(0, 123, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_DOUBLE, &N);
double *y = (double *)calloc(1, N*sizeof(double));
MPI_Recv(y, N, MPI_DOUBLE, 0, 123, MPI_COMM_WORLD, &status);
print_data(y, N);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void init_data(double *x, int N)
{
int i;
printf("initializing %d elements\n", N);
for (i = 0; i < N; i++) x[i] = 1000 + i;
}
void print_data(double *x, int N)
{
int i;
printf("printing %d elements\n", N);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
}
int main(int argc, char **argv)
{
int rank, nranks;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
int MAX_N = 100;
int N;
if (rank == 0) {
N = lrand48()%MAX_N;
double *x = (double *)calloc(1, N*sizeof(double));
init_data(x, N);
MPI_Send(&N, 1, MPI_INT, 1, 123, MPI_COMM_WORLD);
MPI_Send(x, N, MPI_DOUBLE, 1, 124, MPI_COMM_WORLD);
}
if (rank == 1) {
MPI_Recv(&N, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
double *y = (double *)calloc(1, N*sizeof(double));
MPI_Recv(y, N, MPI_DOUBLE, 0, 124, MPI_COMM_WORLD, &status);
print_data(y, N);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void init_data(double *x, int N)
{
int i;
printf("initializing %d elements\n", N);
for (i = 0; i < N; i++) x[i] = 1000 + i;
}
void print_data(double *x, int N)
{
int i;
printf("printing %d elements\n", N);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
}
int main(int argc, char **argv)
{
int rank, nranks;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
int MAX_N = 100;
int N;
if (rank == 0) {
N = lrand48()%MAX_N;
double *x = (double *)calloc(1, N*sizeof(double));
init_data(x, N);
MPI_Send(&N, 1, MPI_INT, 1, 123, MPI_COMM_WORLD);
long type = MPI_DOUBLE;
printf("sending type = %ld\n", type);
MPI_Send(&type, sizeof(long), MPI_BYTE, 1, 124, MPI_COMM_WORLD);
MPI_Send(x, N, MPI_DOUBLE, 1, 125, MPI_COMM_WORLD);
}
if (rank == 1) {
MPI_Recv(&N, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
double *y = (double *)calloc(1, N*sizeof(double));
long type;
MPI_Recv(&type, sizeof(long), MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
printf("received type = %ld\n", type);
if (type == MPI_DOUBLE) {
MPI_Recv(y, N, MPI_DOUBLE, 0, 125, MPI_COMM_WORLD, &status);
} else {
MPI_Abort(MPI_COMM_WORLD, 911); // from Deino MPI
}
print_data(y, N);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <vector>
#include <stdio.h>
#include <numeric>
#include <iostream>
using namespace std;
int main( int argc, char** argv )
{
// vector size
const int N = 1600;
int num_processes, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&num_processes);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
// initialize local parts of the vectors and do the sum z = x + y
int nlocal = N / num_processes;
std::vector<float> x(nlocal,-1.2), y(nlocal,3.4), z(nlocal);
for( int i = 0; i < nlocal; i++ ) z[i] = x[i] + y[i];
std::vector<float> fullz(N);
MPI_Gather(&z[0],nlocal,MPI_FLOAT,&fullz[0],nlocal,MPI_FLOAT, 0,MPI_COMM_WORLD);
// if (rank == 0)
std::cout << std::accumulate( fullz.begin(), fullz.end(), 0. ) << std::endl;
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <vector>
#include <stdio.h>
#include <numeric>
#include <iostream>
using namespace std;
int main( int argc, char** argv )
{
// vector size
const int N = 1600;
int num_processes, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&num_processes);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
// initialize local parts of the vectors and do the sum z = x + y
int nlocal = N / num_processes;
std::vector<float> x(nlocal,-1.2), y(nlocal,3.4), z(nlocal);
for( int i = 0; i < nlocal; i++ ) z[i] = x[i] + y[i];
if (rank == 0) {
std::vector<float> fullz(N);
MPI_Gather(&z[0],nlocal,MPI_FLOAT,&fullz[0],nlocal,MPI_FLOAT, 0,MPI_COMM_WORLD);
std::cout << std::accumulate( fullz.begin(), fullz.end(), 0. ) << std::endl;
}
else {
MPI_Gather(&z[0],nlocal,MPI_FLOAT,NULL,nlocal,MPI_FLOAT, 0,MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv)
{
int rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
const int nlocal = 10;
double data[nlocal];
for (int i = 0; i < nlocal; i++) data[i] = 100*rank + i;
int step = 0;
char filename[256];
sprintf(filename, "mydata_%05d.bin", step);
MPI_File f;
MPI_File_open(MPI_COMM_WORLD, filename , MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &f);
MPI_File_set_size (f, 0);
MPI_Offset base;
MPI_File_get_position(f, &base);
MPI_Offset len = nlocal*sizeof(double);
MPI_Offset offset = rank*len;
MPI_Status status;
MPI_File_write_at_all(f, base + offset, data, nlocal, MPI_DOUBLE, &status);
MPI_File_close(&f);
MPI_Finalize();
}
// Example codes for HPC course
// (c) 2016 Panos Hadjidoukas, ETH Zurich
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv)
{
int rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int rank_to_print = 0;
if (argc == 2) rank_to_print = atoi(argv[1]);
const int nlocal = 10;
double data[nlocal];
int step = 0;
char filename[256];
sprintf(filename, "mydata_%05d.bin", step);
MPI_File f;
MPI_File_open(MPI_COMM_WORLD, filename , MPI_MODE_RDONLY, MPI_INFO_NULL, &f);
MPI_Offset base;
MPI_File_get_position(f, &base);