To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 02fdb4c3 authored by chatzidp's avatar chatzidp

Merge branch 'master' of gitlab.ethz.ch:hpcse_hs16/lecture

parents 3f9f1c3e c396e4f5
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
int fibonacci(int n)
{
int i, j;
if (n<2)
return n;
else {
#pragma omp task shared(i) firstprivate(n) untied final (n<=5)
i = fibonacci(n-1);
#pragma omp task shared(j) firstprivate(n) untied final (n<=5)
j = fibonacci(n-2);
#pragma omp taskwait
return i + j;
}
}
int main()
{
int n;
std::cin >> n;
#pragma omp parallel shared(n)
{
#pragma omp single nowait
std::cout << fibonacci(n) << std::endl;
}
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
#include <mutex>
#include "omp_mutex.hpp"
int main()
{
omp_mutex m;
#pragma omp parallel for
for (int i=0; i < 100; ++i) {
{
std::lock_guard<omp_mutex> lock(m);
std::cout << "Hello from the " << i << "-th iteration\n";
}
}
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
#include <omp.h>
int main() {
#pragma omp parallel
#pragma omp master
std::cout << "Only thread " << omp_get_thread_num()
<< " of " << omp_get_num_threads() << " is printing.\n";
return 0;
}
#include <omp.h>
class mutex {
public:
mutex() { omp_init_lock(&mutex_);}
~mutex() { omp_destroy_lock(&mutex_);}
void lock() { omp_set_lock(&mutex_);}
void unlock() { omp_unset_lock(&mutex_);}
private:
omp_lock_t mutex_;
};
template <class Mutex>
class lock_guard {
public:
lock_guard(Mutex& m)
: mutex_(m)
{
mutex_.lock();
}
~lock_guard()
{
mutex_.unlock();
}
private:
Mutex& mutex_;
};
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <omp.h>
class omp_mutex
{
public:
omp_mutex() { omp_init_lock(&_mutex); }
~omp_mutex() { omp_destroy_lock(&_mutex); }
void lock() { omp_set_lock(&_mutex); }
void unlock() { omp_unset_lock(&_mutex); }
omp_mutex(omp_mutex const&) = delete;
omp_mutex& operator=(omp_mutex const&) = delete;
private:
omp_lock_t _mutex;
};
class omp_recursive_mutex
{
public:
omp_recursive_mutex() { omp_init_nest_lock(&_mutex); }
~omp_recursive_mutex() { omp_destroy_nest_lock(&_mutex); }
void lock() { omp_set_nest_lock(&_mutex); }
void unlock() { omp_unset_nest_lock(&_mutex); }
omp_recursive_mutex(omp_recursive_mutex const&) = delete;
omp_recursive_mutex& operator=(omp_recursive_mutex const&) = delete;
private:
omp_nest_lock_t _mutex;
};
#include <iostream>
#include <omp.h>
int main()
{
std::cout << "I am thread " << omp_get_thread_num()
<< " of " << omp_get_num_threads() << " threads." << std::endl;
}
\ No newline at end of file
#include <iostream>
#include <omp.h>
int main()
{
#pragma omp parallel
{
// now we execute this block in multiple threads
std::cout << "I am thread " << omp_get_thread_num()
<< " of " << omp_get_num_threads() << " threads." << std::endl;
}
}
\ No newline at end of file
#include <iostream>
#include <omp.h>
int main()
{
#pragma omp parallel
{
// now we execute this block in multiple threads
#pragma omp critical
std::cout << "I am thread " << omp_get_thread_num()
<< " of " << omp_get_num_threads() << " threads." << std::endl;
}
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
int main()
{
#pragma omp parallel for ordered
for (int i=0; i < 100; ++i) {
// do some (fake) work
int j=i;
#pragma omp ordered
std::cout << "Hello from the " << j << "-th iteration\n";
}
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
#include <vector>
#include <algorithm>
#include <random>
#include <iterator>
// this may not be optimal but it is simple and short
template <class It>
void quicksort(It first, It last)
{
// empty sequence or length 1: we are done
if (last-first <= 1)
return;
// pick a random value (here the first) and partition the sequence by it
typedef typename std::iterator_traits<It>::value_type value_type;
// pick a pivot
value_type pivot = *(last-1);
It split = std::partition(first,last,[=](value_type x) { return x < pivot;});
// move the pivot to the center
std::swap(*(last-1),*split);
// sort the two partitions individually
quicksort(first,split);
quicksort(split+1,last);
}
int main()
{
int n;
std::cin >> n;
// create random numbers
std::mt19937 mt;
std::uniform_int_distribution<int> dist(0,std::numeric_limits<int>::max());
std::vector<int> data(n);
std::generate(data.begin(),data.end(),std::bind(dist,mt));
// check if it is sorted
if (std::is_sorted(data.begin(), data.end()))
std:: cout << "Initial data is sorted.\n";
else
std:: cout << "Initial data is not sorted.\n";
// call quicksort
quicksort(data.begin(),data.end());
// check if it is sorted
if (std::is_sorted(data.begin(), data.end()))
std:: cout << "Final data is sorted.\n";
else
std:: cout << "Final data is not sorted.\n";
//std::copy(data.begin(),data.end(),std::ostream_iterator<int>(std::cout,"\n"));
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
#include <vector>
#include <algorithm>
#include <random>
#include <iterator>
// this may not be optimal but it is simple and short
template <class It>
void quicksort(It first, It last)
{
// empty sequence or length 1: we are done
if (last-first <= 1)
return;
// pick a pivot, here randomly choose the last value
typedef typename std::iterator_traits<It>::value_type value_type;
value_type pivot = *(last-1);
// partition the sequence
It split = std::partition(first,last,[=](value_type x) { return x < pivot;});
// move the pivot to the center
std::swap(*(last-1),*split);
// sort the two partitions individually
#pragma omp task final (split-first<=1)
quicksort(first,split);
#pragma omp task final (split-first<=1)
quicksort(split+1,last);
}
int main()
{
int n;
std::cin >> n;
// create random numbers
std::mt19937 mt;
std::uniform_int_distribution<int> dist(0,std::numeric_limits<int>::max());
std::vector<int> data(n);
std::generate(data.begin(),data.end(),std::bind(dist,mt));
// check if it is sorted
if (std::is_sorted(data.begin(), data.end()))
std:: cout << "Initial data is sorted.\n";
else
std:: cout << "Initial data is not sorted.\n";
// call quicksort in parallel
#pragma omp parallel
#pragma omp single nowait
quicksort(data.begin(),data.end());
// check if it is sorted
if (std::is_sorted(data.begin(), data.end()))
std:: cout << "Final data is sorted.\n";
else
std:: cout << "Final data is not sorted.\n";
//std::copy(data.begin(),data.end(),std::ostream_iterator<int>(std::cout,"\n"));
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iomanip>
#include <iostream>
int main()
{
unsigned long const nterms = 100000000;
long double sum=0.;
for (std::size_t t = 0; t < nterms; ++t)
sum += (1.0 - 2* (t % 2)) / (2*t + 1);
std::cout << "pi=" << std::setprecision(18) << 4.*sum << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iostream>
#include <iomanip>
#include <omp.h>
int main()
{
unsigned long const nterms = 100000000;
long double sum=0.;
#pragma omp parallel reduction(+:sum)
{
int i = omp_get_thread_num();
int nthreads = omp_get_num_threads();
long double const step = (nterms+0.5l) / nthreads;
int j = (i+1) * step;
for (std::size_t t = i * step; t < j; ++t)
sum += (1.0 - 2* (t % 2)) / (2*t + 1);
}
std::cout << "pi=" << std::setprecision(18) << 4.*sum << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iomanip>
#include <iostream>
int main()
{
unsigned long const nterms = 100000000;
long double sum=0.;
#pragma omp parallel shared(sum)
{
#pragma omp for reduction(+:sum)
for (std::size_t t = 0; t < nterms; ++t)
sum += (1.0 - 2* (t % 2)) / (2*t + 1);
}
std::cout << "pi=" << std::setprecision(18) << 4.*sum << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <iomanip>
#include <iostream>
int main()
{
unsigned long const nterms = 100000000;
long double sum=0.;
#pragma omp parallel for reduction(+:sum)
for (std::size_t t = 0; t < nterms; ++t)
sum += (1.0 - 2* (t % 2)) / (2*t + 1);
std::cout << "pi=" << std::setprecision(18) << 4.*sum << std::endl;
return 0;
}
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include <cassert>
#include <functional>
inline double simpson(double (*f) (double), double a, double b, unsigned int N)
{
assert (b>=a);
assert (N!=0u);
double h=(b-a)/N;
// boundary values
double result = ( f(a) + 4*f(a+h/2) + f(b) ) / 2.0;
// values between boundaries
for ( unsigned int i = 1; i <= N-1; ++i )
result += f(a+i*h) + 2*f(a+(i+0.5)*h);
return result * h / 3.0;
}
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include "simpson.hpp"
#include <omp.h>
#include <cmath>
#include <iostream>
// The function to integrate
double func(double x)
{
return x * std::sin(x);
}
int main()
{
double a; // lower bound of integration
double b; // upper bound of integration
unsigned int nsteps; // number of subintervals for integration
// read the parameters
std::cin >> a >> b >> nsteps;
double result=0.;
#pragma omp parallel
{
int i = omp_get_thread_num();
int n = omp_get_num_threads();
double delta = (b-a)/n;
// integrate just one part in each thread
double r = simpson(func,a+i*delta,a+(i+1)*delta,nsteps/n);
result += r;
}
std::cout << result << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include "simpson.hpp"
#include <omp.h>
#include <cmath>
#include <iostream>
// The function to integrate
double func(double x)
{
return x * std::sin(x);
}
int main()
{
double a; // lower bound of integration
double b; // upper bound of integration
unsigned int nsteps; // number of subintervals for integration
// read the parameters
std::cin >> a >> b >> nsteps;
double result=0.;
#pragma omp parallel
{
int i = omp_get_thread_num();
int n = omp_get_num_threads();
double delta = (b-a)/n;
// integrate just one part in each thread
double r = simpson(func,a+i*delta,a+(i+1)*delta,nsteps/n);
#pragma omp critical (simpsonresult)
result += r;
}
std::cout << result << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include "simpson.hpp"
#include <omp.h>
#include <cmath>
#include <iostream>
// The function to integrate
double func(double x)
{
return x * std::sin(x);
}
int main()
{
double a; // lower bound of integration
double b; // upper bound of integration
unsigned int nsteps; // number of subintervals for integration
// read the parameters
std::cin >> a >> b >> nsteps;
double result=0.;
#pragma omp parallel
{
int i = omp_get_thread_num();
int n = omp_get_num_threads();
double delta = (b-a)/n;
// integrate just one part in each thread
double r = simpson(func,a+i*delta,a+(i+1)*delta,nsteps/n);
#pragma omp atomic
result += r;
}
std::cout << result << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include "simpson.hpp"
#include <omp.h>
#include <cmath>
#include <iostream>
// The function to integrate
double func(double x)
{
return x * std::sin(x);
}
int main()
{
double a; // lower bound of integration
double b; // upper bound of integration
unsigned int nsteps; // number of subintervals for integration
// read the parameters
std::cin >> a >> b >> nsteps;
double result;
#pragma omp parallel reduction(+:result)
{
int i = omp_get_thread_num();
int n = omp_get_num_threads();
double delta = (b-a)/n;
// integrate just one part in each thread
result = simpson(func,a+i*delta,a+(i+1)*delta,nsteps/n);
}
std::cout << result << std::endl;
return 0;
}
\ No newline at end of file
// Example codes for HPC course
// (c) 2012 Matthias Troyer, ETH Zurich
#include "simpson.hpp"
#include <omp.h>
#include <cmath>
#include <iostream>
// The function to integrate
double func(double x)
{
return x * std::sin(x);
}
int main()
{
double a; // lower bound of integration
double b; // upper bound of integration
unsigned int nsteps; // number of subintervals for integration
// read the parameters
std::cin >> a >> b >> nsteps;
double result=0.;
#pragma omp parallel shared(result)
{