From c8e531cb134d01466f2fda57e35c9f863b040a0e Mon Sep 17 00:00:00 2001
From: Charles McGrath <mcgrath@biomed.ee.ethz.ch>
Date: Tue, 22 Mar 2022 11:47:43 +0100
Subject: [PATCH] Inital changes to allow turbulence particle tracking using
 Langevin equation

---
 cmrsim/datasets/flow.py | 66 +++++++++++++++++++++++++++++++++++++----
 1 file changed, 61 insertions(+), 5 deletions(-)

diff --git a/cmrsim/datasets/flow.py b/cmrsim/datasets/flow.py
index 5dca557..58843ba 100644
--- a/cmrsim/datasets/flow.py
+++ b/cmrsim/datasets/flow.py
@@ -69,6 +69,62 @@ class FlowTrajectory(LookUpTableModule):
         return positions, velocities
 
 
+class TurbulentTrajectory(LookUpTableModule):
+    def __init__(self, flow_field: np.ndarray, map_dimensions: np.ndarray, **kwargs):
+        """
+
+        :param flow_field: (X, Y, Z, 10) 3D map storing gridded velocities in m/s at positions
+                                            X, Y, Z, Chol11, Chol12, Chol13, Chol22, Chol23, Chol33, Tau.
+        :param map_dimensions: (3, 2) -> [(xlow, xhigh), (ylow, yhigh), (zlow, zhigh)]
+                                         Physical extend of the gridded velocity fields.
+
+        :param kwargs: - device: str defaults to CPU:0
+        """
+        super().__init__(flow_field, map_dimensions, name="flow_trajectory_evaluation",
+                         **kwargs)
+
+    def __call__(self, initial_positions: tf.Tensor, initial_velocity: tf.Tensor, dt: tf.Tensor, n_steps: int,
+                 verbose: bool = True):
+        """
+
+        :param initial_positions: (#particles, 3)
+        :param initial_velocity: (#particles, 3)
+        :param dt: float - in seconds
+        :param n_steps: int -
+        :return: (n_steps, #particles, 3) - tf.EagerTensor - Trajectory
+        """
+        positions = tf.TensorArray(dtype=tf.float32, size=n_steps + 1,
+                                   element_shape=(initial_positions.shape[0], 3))
+        positions = positions.write(0, initial_positions)
+
+        particle_positions = initial_positions
+        particle_velocity = initial_velocity
+
+        n_particles = tf.shape(initial_positions)[0]
+
+        for step in tqdm(range(n_steps), disable=not verbose):
+            # Generate random numbers
+            impuls = tf.random.normal([n_particles, 3])
+
+            particle_positions, particle_velocity = self._increment_particles_turb(particle_positions,
+                                                                                   particle_velocity, impuls, dt)
+            positions = positions.write(step + 1, particle_positions)
+        return positions.stack()
+
+    @tf.function
+    def _increment_particles_turb(self, particle_positions, particle_velocity, impuls, dt):
+        data = super().linear_interpolation(r_vectors=particle_positions)
+
+        # Langevin equation, may need optimization/cleanup
+        vtu = tf.reduce_sum(impuls * data[:, 3:6], axis=1)
+        vtv = tf.reduce_sum(impuls * data[:, 6:8], axis=1)
+        vtw = impuls * data[:, 8]
+
+        v_turb = (1. - tf.exp(-2. * dt / data[:, 9])) ** 0.5 * tf.stack([vtu, vtv, vtw], axis=1)
+        v = particle_velocity * tf.exp(-dt / data[:, 9]) + v_turb
+        return particle_positions + v * dt, v
+
+
 class RefillingFlowDataset(tf.Module):
     """ Wraps a vtk-mesh of containing a velocity field. Functionality tackles the problem of in-/
     out-flow of particles for a given imaging slice.
@@ -215,7 +271,7 @@ class RefillingFlowDataset(tf.Module):
                 np.tile(np.array([[0 + 0j, 0 + 0j, 1 + 0j], ], dtype=np.complex64), [n_new, 1]),
                 default_t1 * np.ones(n_new, dtype=np.float32),
                 default_t2 * np.ones(n_new, dtype=np.float32),
-                np.ones(n_new, dtype=np.float32)   # M0
+                np.ones(n_new, dtype=np.float32)  # M0
             )
         else:
             residual_mag, t1, t2, m0 = particle_properties
@@ -229,14 +285,14 @@ class RefillingFlowDataset(tf.Module):
             is_accepted = np.where(np.array(mc_samples > decision_boundaries, dtype=bool))
 
             n_new = len(is_accepted[0])
-            initial_magnetization = np.tile(np.array([[0+0j, 0+0j, 1+0j], ], dtype=np.complex64),
+            initial_magnetization = np.tile(np.array([[0 + 0j, 0 + 0j, 1 + 0j], ], dtype=np.complex64),
                                             [n_new, 1])
             return_pos = np.concatenate([residual_particle_pos[in_tolerance],
                                          sampled_particle_positions.points[is_accepted]])
             return_properties = (np.concatenate([residual_mag[in_tolerance], initial_magnetization]),
-                np.concatenate([t1[in_tolerance], default_t1 * np.ones(n_new, dtype=np.float32)]),
-                np.concatenate([t2[in_tolerance], default_t2 * np.ones(n_new, dtype=np.float32)]),
-                np.concatenate([m0[in_tolerance], np.ones(n_new, dtype=np.float32)]))
+                                 np.concatenate([t1[in_tolerance], default_t1 * np.ones(n_new, dtype=np.float32)]),
+                                 np.concatenate([t2[in_tolerance], default_t2 * np.ones(n_new, dtype=np.float32)]),
+                                 np.concatenate([m0[in_tolerance], np.ones(n_new, dtype=np.float32)]))
 
         return return_pos, return_properties
 
-- 
GitLab