Commit b7827fc3 authored by kicici's avatar kicici

Removing coupling framework stuff from the repo

parent df5875fa
# Cubism - Uniform resolution parallel block processing library
Cubism is a library for uniform block-structured grids, for both single-node
and multi-node usage.
## Repository structure
- `applications` - Various application-specific codes.
- `source` - The core of the library (C++ code).
- `pywrapper` - Python wrapper for the [coupling framework][1].
## Running Python examples
To run the Python example (based on the coupling framework), do the following
steps:
- Install the coupling framework, as described [here][1].
- Go to `pywrapper/examples/<example_name>`.
- Run `./example.py 1`. The `1` stands for the number of MPI nodes.
Currently won't work with more than 1 node.
- Run `./plot.sh` to generate the plots.
[1]: https://gitlab.ethz.ch/mavt-cse/coupling-framework
# Cubism - Distributed block-structured uniform grid.
This diff is collapsed.
from coupling import StdVector
from coupling.methods.operators import assign
from coupling.workflows.branching import If
from coupling.workflows.decorators import make_method
from coupling.workflows.linear import InlineWorkflow
from coupling.workflows.loops import ForRange
from coupling.contrib.cpp.cstdio import fopen, fprintf, fclose
from coupling.contrib.cpp.string import StdString
def map_to_vector(cubism, func):
"""Run `func` foreach grid point and save results into a single vector."""
class G(object): # Container for the vector.
vector = None
assert cubism.block_num, ".init() not yet called?"
nx = cubism.block_size[0] * cubism.block_num[0] * cubism.node_dims[0]
ny = cubism.block_size[1] * cubism.block_num[1] * cubism.node_dims[1]
def inner(p, info, ijk, global_ijk, **kwargs):
result = func(p, info)
# We found out what the value_type is, declare the array now.
G.vector = StdVector(result.method.output)(const=False)
assert cubism.block_size[2] == 1, "Sorry, 3d not implemented."
index = global_ijk[1] * nx + global_ijk[0]
return assign(G.vector[index], result)
foreach = cubism.process_pointwise(inner)
return InlineWorkflow(
G.vector.resize(nx * ny),
foreach,
return_value=G.vector,
localvars=G.vector,
name='Cubism.extra.map_to_vector',
)
# TODO: Make some decorator for this cache thing. It's popping out all over the
# place.
_func_cache = {}
def save_to_txt_file(cubism, getter, filename):
"""Function generator for save-to-txt."""
assert cubism.block_size[2] == 1, "Not implemented for 3D."
key = cubism, getter
if key in _func_cache:
return _func_cache[key](filename)
if isinstance(getter, str):
getter = lambda p, info, field_name=getter: getattr(p, field_name)
# TODO: ConstPointer(Char)
@make_method
def _save_to_txt_file(filename: StdString):
vector = map_to_vector(cubism, getter)
f = fopen(filename.c_str(), '"w"')
assert cubism.block_num, ".init() not yet called?"
nx = cubism.block_size[0] * cubism.block_num[0] * cubism.node_dims[0]
ny = cubism.block_size[1] * cubism.block_num[1] * cubism.node_dims[1]
code = InlineWorkflow(
vector,
f,
ForRange(nx)(
lambda iy:
ForRange(ny)(
lambda ix:
fprintf(f, '"%lf "', vector[iy * nx + ix]),
),
fprintf(f, r'"\n"'),
),
fclose(f),
localvars=(vector, f),
name='Cubism.extra.save_to_txt_file',
)
# TODO: This should be lazily generated, maybe linker is not set at
# this point yet!
if cubism.linker:
return If(cubism.linker.get_rank(cubism) == 0).Then(code)
else:
return code
_func_cache[key] = _save_to_txt_file
return _save_to_txt_file(filename)
__tmp.cpp
__bin
__build
__src
output
plots
#!/usr/bin/env coupling_compile_and_run
"""
Example of using Cubism through the coupling framework.
The example implements the 2D advection equation
dphi / dt = -(ax * dphi / dx + ay * dphi / dy),
for some constant velocity (ax, ay).
There are three parts of the simulation:
a) initialization
b) calculation of dphi / dx (gradient)
c) updating phi (phi += -a * gradient * dt).
Steps b) and c) are performed in each timestep.
Thus, the example below implements the simple 1st order Forward-time
backwards-space Euler scheme.
"""
import os
import sys
# Add folder ../.., to be able to import Cubism.
example_dir = os.path.dirname(__file__)
sys.path.insert(1, os.path.normpath(os.path.join(example_dir, '..', '..')))
import Cubism
import Cubism.misc
from coupling import * # Imports most common things.
from coupling.contrib.cpp import cmath # <cmath> methods.
from coupling.contrib.cpp.extra import sqr, strprintf # Utility methods.
from coupling.contrib.cpp.cstdio import printf
# Definition of the grid.
# We want our grid to contain two values:
# `phi` (a scalar, `double` by default)
# `grad` (a 2D vector, as `std::array<double, 2>`)
# Cubism assumes that all items in the GridPoint have the same (base) type.
# Therefore, to make sure the user doesn't mix for example Doubles and Floats,
# the entry format is such that the user specifies only the names and
# dimensionality of the items.
GP = Cubism.GridPoint('phi', ('grad', 2))
# Instantiate Cubism library given the GridPoint and the block_size.
# These arguments affect the generated code, either as just template arguments
# of internal structures and methods, or directly by generating code that
# depends on the structure of GridPoint or on the block_size.
# Technically, this instantiates a Library instance, used later for links and
# communication (not in this example).
C = Cubism.Cubism(GP, block_size=(32, 32, 1))
BLOCK_NUM = (4, 4, 1) # Number of blocks per each dimension.
VEL = (0.5, 0.1) # Simulation parameter. (velocity)
MU = 2 # Simulation parameter. (for initial condition).
@make_method # Create a C++-method `void initial_state(void) { ... }`.
def initial_state():
"""Initializes the grid with a Gaussian."""
def inner(p, xyz, **kwargs):
"""Sets the initial value of `phi` for the given grid point `p`.
Arguments:
p - GridPoint reference.
xyz - Absolute x, y, z coordinates attributed to `p`.
**kwargs - Other arguments / values we don't need here.
"""
# Calculate (x - 0.5)^2 + (y - 0.5)^2. (ignores z coordinate!)
rr = sqr(xyz[0] - .5) + sqr(xyz[1] - .5)
# We do not support (yet) the typical format
# p.phi = exp(-MU * rr)
# Instad, we manually write expressions using a special method.
return expression(p.phi, '=', cmath.exp(-MU * rr))
# Run function `inner` for each point.
# C.process_pointwise passes to the given argument one positional argument
# (GridPoint reference), and multiple keyword arguments (e.g. ijk, xyz...),
# which might or might not be important to the user. As over time
# additional keyword arguments could be added to the Cubism API, the passed
# function therefore must always be defined to be able to ignore unused
# keyword arguments.
return C.process_pointwise(inner)
@make_method # Creates `void timestep(const double &dt) { ... }`.
def timestep(dt: Double):
"""Implementation of a single time step."""
def calc_grad(p, lab, info, **kwargs):
"""Calculates the gradient `.grad` of the field `.phi`.
Arguments:
p - GridPoint reference (for updating).
lab - Wrapper for BlockLab (used for accessing neighboring
GridPoints)
info - (const) BlockInfo reference.
**kwargs - Other unused arguments.
When processing a stencil, i.e. when updating a single grid point
with values of neighboring grid points, Cubism must make sure the
data is available. It has to take the data from the neighboring blocks,
such that the data is available for fast access, by storing it in
a continuous chuck of memory. Also, if distributed, Cubism must
acquire data from other machines.
`lab` here is a wrapper for BlockLab, in the sense that it properly
generates the code for accessing the BlockLab, while keeping note
how wide stencil has to be used.
Here, stencil is (x0, x1) = (-1, 0), (y0, y1) = (-1, 0),
(z0, z1) = (0, 0), inclusive.
"""
fac = 1. / info.h_gridpoint
assert VEL[0] >= 0 and VEL[1] >= 0, "Scheme unstable for vel < 0."
# [fac] --> Code that should be executed before nested for loops
# (see generated C++ code).
# [expression...] --> Code inside the nested for loops.
# Because in Python it's not possible to capture assignment as in C++,
# we write manually "expression(a, '=', b)" instead of "a = b".
# Note: It is possible to capture assignment if LHS is not a variable
# but an item like "x.a = b", by overloading __setitem__ such that
# it keeps track of all operations. That format is available using
# StructuredWorkflow (still in development).
return [fac], [
expression(p.grad[0], '=', fac * (lab(0, 0).phi - lab(-1, 0).phi)),
expression(p.grad[1], '=', fac * (lab(0, 0).phi - lab(0, -1).phi)),
]
def advance(p, **kwargs):
"""Performs a single step of Euler time integration.
Updates `.phi` from the already calculated values of `.grad`.
"""
fac0 = dt * VEL[0]
fac1 = dt * VEL[1]
# p.phi -= dt * dot(VEL, p.grad)
return expression(p.phi, '-=', fac0 * p.grad[0] + fac1 * p.grad[1])
# Here we list all substeps of one time step, in the order of execution.
return [
C.process_stencil(calc_grad),
C.process_pointwise(advance),
]
# Immediately create a folder containing output files.
# Note: This is Python code, and it will be executed during compilation.
os.makedirs(os.path.join(os.path.dirname(__file__), 'output'), exist_ok=True)
# Specification of the complete workflow, in the order of execution.
workflow = [
C.init(BLOCK_NUM), # Initialize Cubism.
initial_state(), # Initial condition.
# Do 50 time steps. After each, save the result in a file.
ForRange(50)(lambda step: [
timestep(0.01),
# Save `.phi` values from the given library C.
Cubism.misc.save_to_txt_file(
C, 'phi', strprintf('"output/save_%03d.txt"', step)),
printf('"Saved \'output/save%03d.txt\'.\\n"', step),
]),
]
#!/bin/bash
heat_plot() {
SOURCE=$1
OUTPUT=$2
gnuplot <<-EOFMarker
set terminal png
set output "${OUTPUT}"
set view map
set cbrange [0:1]
set palette defined (0 0 0 0.5, 1 0 0 1, 2 0 0.5 1, 3 0 1 1, 4 0.5 1 0.5, 5 1 1 0, 6 1 0.5 0, 7 1 0 0, 8 0.5 0 0)
# set palette gray
plot "${SOURCE}" matrix with image
EOFMarker
}
mkdir -p plots
N=50
for i in $(seq 0 $(expr $N - 1))
do
index=$(printf "%03d" $i)
heat_plot "output/save_${index}.txt" "plots/plot_${index}.png"
done
#!/usr/bin/env coupling_compile_and_run
"""
Example of using Cubism through the coupling framework.
The example implements the 2D advection equation
dphi / dt = -(ax * dphi / dx + ay * dphi / dy),
for some constant velocity (ax, ay).
There are three parts of the simulation:
a) initialization
b) calculation of dphi / dx (gradient)
c) updating phi (phi += -a * gradient * dt).
Steps b) and c) are performed in each timestep.
Thus, the example below implements the simple 1st order Forward-time
backwards-space Euler scheme.
"""
import os
import sys
# Add folder ../.., to be able to import Cubism.
example_dir = os.path.dirname(__file__)
sys.path.insert(1, os.path.normpath(os.path.join(example_dir, '..', '..')))
import Cubism
import Cubism.misc
from coupling import * # Imports most common things.
from coupling.contrib.cpp import cmath # <cmath> methods.
from coupling.contrib.cpp.cassert import cassert
from coupling.contrib.cpp.cstdio import printf
from coupling.contrib.cpp.extra import sqr, strprintf # Utility methods.
from coupling.methods.reduce import log_reduce_max
# Definition of the grid.
# We want our grid to contain two values:
# `phi` (a scalar, `double` by default)
# `grad` (a 2D vector, as `std::array<double, 2>`)
# Cubism assumes that all items in the GridPoint have the same (base) type.
# Therefore, to make sure the user doesn't mix for example Doubles and Floats,
# the entry format is such that the user specifies only the names and
# dimensionality of the items.
GP = Cubism.GridPoint('phi', ('grad', 2))
# Instantiate Cubism library given the GridPoint and the block_size.
# These arguments affect the generated code, either as just template arguments
# of internal structures and methods, or directly by generating code that
# depends on the structure of GridPoint or on the block_size.
# Technically, this instantiates a Library instance, used later for links and
# communication (not in this example).
C = Cubism.CubismMPI(GP, block_size=(32, 32, 1))
BLOCK_NUM = (4, 4, 1) # Number of blocks per each dimension.
VEL = (0.5, 0.1) # Simulation parameter. (velocity)
MU = 2 # Simulation parameter. (for initial condition).
linker = Linker(C)
@make_method # Create a C++-method `void initial_state(void) { ... }`.
def initial_state():
"""Initializes the grid with a Gaussian."""
def inner(p, xyz, **kwargs):
"""Sets the initial value of `phi` for the given grid point `p`.
Arguments:
p - GridPoint reference.
xyz - Absolute x, y, z coordinates attributed to `p`.
**kwargs - Other arguments / values we don't need here.
"""
# Calculate (x - 0.5)^2 + (y - 0.5)^2. (ignores z coordinate!)
rr = sqr(xyz[0] - .5) + sqr(xyz[1] - .5)
# We do not support (yet) the typical format
# p.phi = exp(-MU * rr)
# Instad, we manually write expressions using a special method.
return expression(p.phi, '=', cmath.exp(-MU * rr))
# Run function `inner` for each point.
# C.process_pointwise passes to the given argument one positional argument
# (GridPoint reference), and multiple keyword arguments (e.g. ijk, xyz...),
# which might or might not be important to the user. As over time
# additional keyword arguments could be added to the Cubism API, the passed
# function therefore must always be defined to be able to ignore unused
# keyword arguments.
return C.process_pointwise(inner)
@make_method # Creates `void timestep(const double &dt) { ... }`.
def timestep(dt: Double):
"""Implementation of a single time step."""
def calc_grad(p, lab, info, **kwargs):
"""Calculates the gradient `.grad` of the field `.phi`.
Arguments:
p - GridPoint reference (for updating).
lab - Wrapper for BlockLab (used for accessing neighboring
GridPoints)
info - (const) BlockInfo reference.
**kwargs - Other unused arguments.
When processing a stencil, i.e. when updating a single grid point
with values of neighboring grid points, Cubism must make sure the
data is available. It has to take the data from the neighboring blocks,
such that the data is available for fast access, by storing it in
a continuous chuck of memory. Also, if distributed, Cubism must
acquire data from other machines.
`lab` here is a wrapper for BlockLab, in the sense that it properly
generates the code for accessing the BlockLab, while keeping note
how wide stencil has to be used.
Here, stencil is (x0, x1) = (-1, 0), (y0, y1) = (-1, 0),
(z0, z1) = (0, 0), inclusive.
"""
fac = 1. / info.h_gridpoint
assert VEL[0] >= 0 and VEL[1] >= 0, "Scheme unstable for vel < 0."
# [fac] --> Code that should be executed before nested for loops
# (see generated C++ code).
# [expression...] --> Code inside the nested for loops.
# Because in Python it's not possible to capture assignment as in C++,
# we write manually "expression(a, '=', b)" instead of "a = b".
# Note: It is possible to capture assignment if LHS is not a variable
# but an item like "x.a = b", by overloading __setitem__ such that
# it keeps track of all operations. That format is available using
# StructuredWorkflow (still in development).
return [fac], [
expression(p.grad[0], '=', fac * (lab(0, 0).phi - lab(-1, 0).phi)),
expression(p.grad[1], '=', fac * (lab(0, 0).phi - lab(0, -1).phi)),
]
def advance(p, **kwargs):
"""Performs a single step of Euler time integration.
Updates `.phi` from the already calculated values of `.grad`.
"""
fac0 = dt * VEL[0]
fac1 = dt * VEL[1]
# p.phi -= dt * dot(VEL, p.grad)
return expression(p.phi, '-=', fac0 * p.grad[0] + fac1 * p.grad[1])
# Here we list all substeps of one time step, in the order of execution.
return [
C.process_stencil(calc_grad),
C.process_pointwise(advance),
]
# Immediately create a folder containing output files.
# Note: This is Python code, and it will be executed during compilation.
os.makedirs(os.path.join(os.path.dirname(__file__), 'output'), exist_ok=True)
def determine_dim(linker):
"""Automatically calculate the division of nodes in a 2D Cartesian grid."""
N = linker.get_size(C)
sqrt = Int(cmath.sqrt(Double(N)))
dims = StdArray(Int, 3).from_values(sqrt, sqrt, 1)
return InlineWorkflow(
cassert(expression('"Works only with N=1, 4, 9, 16, ..."',
'&&', sqrt * sqrt == N)),
printf(r'"Nodes split into %dx%dx%d\n"', *dims),
return_value=dims,
localvars=(N, sqrt, dims),
name='determine_dim',
)
dims = lazy_global_variable(determine_dim(linker), 'dims')
# Specification of the complete workflow, in the order of execution.
workflow = [
C.init(linker, dims, BLOCK_NUM), # Initialize Cubism.
initial_state(), # Initial condition.
# Do 50 time steps. After each, save the result in a file.
ForRange(50)(lambda step: [
timestep(Double(0.01) / Double(log_reduce_max([*dims]))),
# Save `.phi` values from the given library C, ONLY RANK 0!
If(linker.get_rank(C) == 0).Then(
Cubism.misc.save_to_txt_file(
C, 'phi', strprintf('"output/save_%03d.txt"', step)),
printf('"Saved \'output/save%03d.txt\'.\\n"', step),
),
]),
]
#!/bin/bash
heat_plot() {
SOURCE=$1
OUTPUT=$2
gnuplot <<-EOFMarker
set terminal png
set output "${OUTPUT}"
set view map
set cbrange [0:1]
set palette defined (0 0 0 0.5, 1 0 0 1, 2 0 0.5 1, 3 0 1 1, 4 0.5 1 0.5, 5 1 1 0, 6 1 0.5 0, 7 1 0 0, 8 0.5 0 0)
# set palette gray
plot "${SOURCE}" matrix with image
EOFMarker
}
mkdir -p plots
echo "Note: The plots show only the part of the domain handled by the master node."
N=50
for i in $(seq 0 $(expr $N - 1))
do
index=$(printf "%03d" $i)
heat_plot "output/save_${index}.txt" "plots/plot_${index}.png"
done
/*
* M2P.h
* Cubism
*
* Created by Ivica Kicic on 07/05/17.
* Copyright 2017 ETH Zurich. All rights reserved.
*
*/
#ifndef _CUBISM_UTILS_M2P_H_
#define _CUBISM_UTILS_M2P_H_
#include <vector>
namespace cubism {
namespace utils {
/*
* Mesh to particle (interpolation) algorithm.
*
* For each given point (particle position), interpolate the value of the field.
*
* Mandatory template arguments:
* - DIM - Dimensionality of the grid (only 2 and 3 supported).
*
* Arguments:
* - block_processing - Block processing functor.
* - grid - Reference to the grid.
* - points - Array of the points. (*)
* - getter - Lambda of a single argument (BlockLab), returning the value
* to be interpolated (**).
* - setter - Lambda of two arguments (point ID, interpolated value).
*
* (*) Points should support operator [] for accessing x, y, z coordinates.
* (**) Returned value type X must support operators <double> * X and X + X.
*/
template <int DIM, // <-- Must be specified manually.
typename BlockProcessing,
typename Grid,
typename Array,
typename Getter,
typename Setter>
void linear_m2p(BlockProcessing block_processing,
Grid &grid,
const Array &points,
Getter getter,
Setter setter) {
/* Based on Fabian's Cubism-LAMMPS example. */
static_assert(DIM == 2 || DIM == 3);
typedef typename Grid::BlockType Block;
// BEGIN Particle storage.
struct Particle {
int id;
double pos[DIM];
};
const int N[3] = {
grid.getBlocksPerDimension(0),
grid.getBlocksPerDimension(1),
grid.getBlocksPerDimension(2)
};
std::vector<std::vector<Particle>> particles(N[0] * N[1] * N[2]);
// Map particles to CUBISM domain [0, 1] and put them in different blocks.
for (decltype(points.size()) i = 0; i < points.size(); ++i) {
const auto &point = points[i];
// Map position to [0, 1].
const double pos[3] = {
point[0],
point[1],
DIM == 3 ? point[2] : 0
};
// Find block.
const int index[3] = {
std::max(std::min(int(pos[0] * N[0]), N[0] - 1), 0),
std::max(std::min(int(pos[1] * N[1]), N[1] - 1), 0),
std::max(std::min(int(pos[2] * N[2]), N[2] - 1), 0)
};
const int idx = index[0] + N[0] * (index[1] + N[1] * index[2]);
Particle part;
part.id = i;
part.pos[0] = pos[0];
part.pos[1] = pos[1];
if (DIM == 3) part.pos[2] = pos[2];
particles[idx].push_back(part);
}
// END Particle storage.
// BEGIN Linear P2M.
const auto rhs = [&particles, &N, &getter, &setter](
const auto &lab,