To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 5c73d990 authored by mgassner's avatar mgassner
Browse files

First

parents
basis:
number_spins: 8
symmetries: []
hamiltonian:
name: XXZ Hamiltonian
terms:
- matrix:
- - 1.5
- 0.0
- 0.0
- 0.0
- - 0.0
- -1.5
- 2.0
- 0.0
- - 0.0
- 2.0
- -1.5
- 0.0
- - 0.0
- 0.0
- 0.0
- 1.5
sites:
- - 0
- 1
- - 1
- 2
- - 2
- 3
- - 3
- 4
- - 4
- 5
- - 5
- 6
- - 6
- 7
- - 7
- 0
observables: []
output: configurations/xxz8.h5
import numpy as np
import yaml
# Spin operators
sigma_x = np.array([[0., 1.], [1., 0.]])
sigma_y = np.array([[0., -1.0j], [1.0j, 0.]])
sigma_z = np.diag([1., -1.])
sigma_0 = np.diag([1., 1.])
def create_model_input_yaml(inputyaml):
with open(r'inputs/' + inputyaml, 'r') as file:
if inputyaml[model][name] == 'xxz':
return 0
#TODO
def create_sites(length, periodic):
sites = []
for i in range(length-1):
sites.append([i, i+1])
if periodic:
sites.append([length-1, 0])
return sites # list(map(str, sites))
class spin_model:
def __init__(self, model):
self.length = model.length
self.symmetries = model.symmetries
self.name = model.name
self.matrix = model.matrix
self.sites = create_sites(self.length, model.periodic)
self.output = model.output
self.dictionary = self.create_dict()
self.file_name = 'test' # TODO: find a good way to name the yaml file
self.eigenvalue = None
self.eigenvector = None
def create_dict(self):
return {'basis':{'number_spins': self.length, 'symmetries': self.symmetries},
'hamiltonian':{'name': self.name, 'terms': [{'matrix': self.matrix, 'sites': self.sites}]},
'observables':[],
'output': self.output}
def dump_yaml(self):
with open(r'configurations/'+self.file_name+'.yaml', 'w') as file:
yaml.dump(self.dictionary, file)
def update_output(self, new_output):
self.output = new_output
self.dictionary = self.create_dict()
print(self.output, self.dictionary)
class xxz:
def __init__(self, length, periodic, delta_over_J, symmetries = []):
self.length = length # amount of sites (integer)
self.periodic = periodic # boundery or periodic (binary)
self.delta_over_J = delta_over_J # differentcoupling in z-direction (float)
self.symmetries = symmetries # to be added!
self.matrix = self.create_matrix()
self.name = "XXZ Hamiltonian"
if self.delta_over_J == 1.0:
self.name = 'Heisenberg Hamiltonian'
elif self.delta_over_J == 0.0:
self.name = 'XY Hamiltonian'
self.output = 'configurations/xxz.h5'
def create_matrix(self):
return (np.kron(sigma_x, sigma_x) + np.kron(sigma_y, sigma_y) +
self.delta_over_J * np.kron(sigma_z, sigma_z)).astype('float64').tolist()
class tfim:
def __init__(self, length, periodic, h_over_J, symmetries = []):
self.length = length # amount of sites
self.periodic = periodic # boundery or periodic
self.h_over_J = h_over_J # external field
self.symmetries = symmetries # to be added!
self.matrix = self.create_matrix()
self.output = 'configurations/tfim.h5'
self.name = 'TFIM Hamiltonian'
def create_matrix(self):
return (np.kron(sigma_z, sigma_z) - 0.5 * self.h_over_J *
(np.kron(sigma_x, sigma_0) + np.kron(sigma_0, sigma_x))).astype('float64').tolist()
import numpy as np
import sys
import math
from joblib import delayed, Parallel
import numpy as np
import scipy.special
import pickle
import numba
from numba import uint64, float32, float64
import sys
import os
from lattice_symmetries import *
import lattice_symmetries
Lx = 6
Ly = 6
NUMBER_SPINS = Lx * Ly
@numba.jit("uint64(uint64, uint64)", nogil=True, nopython=True)
def _binom(n, k):
r"""Compute the number of ways to choose k elements out of a pile of n.
:param n: the size of the pile of elements
:param k: the number of elements to take from the pile
:return: the number of ways to choose k elements out of a pile of n
"""
assert 0 <= n and n < 40
assert 0 <= k and k <= n
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
@numba.jit("uint64[:, :](uint64, uint64)", nogil=True, nopython=True)
def make_binom_cache(max_n, max_k):
assert 0 < max_k and max_k <= max_n
assert max_n < 40
cache = np.zeros((max_n + 1, max_k + 2), dtype=np.uint64)
for i in range(max_n + 1):
for j in range(min(i, max_k) + 2):
cache[i, j] = _binom(i, j) if j <= i else 0
return cache
@numba.jit("uint64(uint64)", nogil=True, nopython=True)
def _hamming_weight(n: int):
# See https://stackoverflow.com/a/9830282
n = (n & 0x5555555555555555) + ((n & 0xAAAAAAAAAAAAAAAA) >> 1)
n = (n & 0x3333333333333333) + ((n & 0xCCCCCCCCCCCCCCCC) >> 2)
n = (n & 0x0F0F0F0F0F0F0F0F) + ((n & 0xF0F0F0F0F0F0F0F0) >> 4)
n = (n & 0x00FF00FF00FF00FF) + ((n & 0xFF00FF00FF00FF00) >> 8)
n = (n & 0x0000FFFF0000FFFF) + ((n & 0xFFFF0000FFFF0000) >> 16)
n = (n & 0x00000000FFFFFFFF) + ((n & 0xFFFFFFFF00000000) >> 32)
return n
def make_full_basis():
symmetry_group = Group(
[
]
)
# print('size of the symmetry group', len(symmetry_group), flush = True)
return SpinBasis(symmetry_group, number_spins=NUMBER_SPINS, hamming_weight=NUMBER_SPINS // 2)
@numba.jit("uint64[:](uint64, uint64)", nogil=True, nopython=True)
def generate_binaries(length: int, hamming: int):
assert length > 0
assert hamming >= 0 and hamming <= length
if hamming == 0:
return np.zeros(1, dtype=np.uint64)
size = _binom(length, -hamming)
set_of_vectors = np.empty(size, dtype=np.uint64)
val = (1 << hamming) - 1
for i in range(size):
set_of_vectors[i] = val
c = val & -val # & = bitwise AND
r = val + c
val = (((r ^ val) >> 2) // c) | r # ^= bitwise XOR, >> shift right: leftmost bit copies, rightmost bit falls of
return set_of_vectors
@numba.jit("uint64(uint64, uint64, uint64)", nogil=True, nopython=True)
def _merge(vec1, vec2, dim2):
return (vec1 << dim2) | vec2
@numba.jit("uint64(uint64, uint64, uint64[:, :])", nogil=True, nopython=True)
def _get_index(x, dim, binom_cache):
n = 0
h = 0
if x & 1 == 1:
h += 1
x >>= 1
for i in range(1, dim):
if x & 1 == 1:
h += 1
n += binom_cache[i, h]
x >>= 1
return n
@numba.jit(
"float64(uint64, uint64, uint64, uint64, uint64, float64[::1], uint64[:, :])",
nogil=True,
nopython=True,
)
def _density_matrix_element(dim1, dim, hamming, vec1, vec2, amplitudes, binom_cache):
hamming1 = _hamming_weight(vec1)
smallest_number = 2 ** (hamming - hamming1) - 1
k = dim - dim1
index1 = _get_index(_merge(vec1, smallest_number, k), dim, binom_cache)
index2 = _get_index(_merge(vec2, smallest_number, k), dim, binom_cache)
size_of_traced = _binom(k, hamming - hamming1)
matrix_element = np.dot(
amplitudes[index1 : index1 + size_of_traced],
amplitudes[index2 : index2 + size_of_traced],
)
return matrix_element
@numba.jit(
"float64[:, :](uint64, uint64, uint64, uint64, float64[::1])",
nogil=True,
nopython=True,
parallel=False,
)
def sector_density_matrix(sector_dim, dim, sector_hamming, hamming, amplitudes):
assert sector_hamming <= hamming and sector_dim <= dim
assert hamming - sector_hamming <= dim - sector_dim
sector_basis = generate_binaries(sector_dim, sector_hamming)
binom_cache = make_binom_cache(dim, hamming)
n = len(sector_basis)
matrix = np.empty((n, n), dtype=np.float64)
for i in range(len(sector_basis)):
for j in range(i, len(sector_basis)):
matrix[i, j] = _density_matrix_element(
sector_dim,
dim,
hamming,
sector_basis[i],
sector_basis[j],
amplitudes,
binom_cache,
)
matrix[j, i] = np.conj(matrix[i, j])
return matrix
def density_matrix(sub_dim, dim, hamming, amplitudes):
return [
sector_density_matrix(sub_dim, dim, sub_hamming, hamming, amplitudes)
for sub_hamming in range(
max(0, hamming - (dim - sub_dim)), min(hamming, sub_dim) + 1
)
]
def index_to_spin(index, number_spins = 36):
spin = np.zeros(shape=(len(index), number_spins), dtype=bool)
l = 0
step = int(1e+7)
while l < len(index):
to = np.min([l + step, len(index)])
spin[l:to, ...] = (((index[l:to, ...].reshape(-1, 1).astype(np.uint64) & (1 << np.arange(number_spins)).astype(np.uint64))) > 0).astype(bool)
l += step
return spin
def spin_to_index(spin, number_spins = 36):
a = 2 ** np.arange(number_spins).astype(np.uint64)
return spin.dot(a).astype(np.uint64)
def idxs_rearrangement(old_states, rearrangement):
new_states = []
batch_size = 1000000
for i in range(len(old_states) // batch_size + 1):
print(i * 1. / (len(old_states) // batch_size + 1), flush=True)
new_states.append(spin_to_index(index_to_spin(old_states[i * batch_size:np.min([(i + 1) * batch_size, len(old_states)])])[:, rearrangement].astype(bool)))
return np.concatenate(new_states)
#ground_state = np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/1.000_-0.900_3.500_6_0_0/state_full.npy')
#print('loaded gs', flush=True)
basis_full = make_full_basis() # uncommented this because states wasn't defined, but used later
basis_full.build() # uncommented this because states wasn't defined, but used later
#print('basis built', flush=True)
x = np.array([0, 5, 4, 3, 2, 1, 6, 11, 10, 9, 8, 7, 12, 17, 16, 15, 14, 13, 18, 23, 22, 21, 20, 19, 24, 29, 28, 27, 26, 25, 30, 35, 34, 33, 32, 31])
y = np.array([0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 34, 35, 24, 25, 26, 27, 28, 29, 18, 19, 20, 21, 22, 23, 12, 13, 14, 15, 16, 17, 6, 7, 8, 9, 10, 11])
rot = np.array([0, 6, 12, 18, 24, 30, 5, 11, 17, 23, 29, 35, 4, 10, 16, 22, 28, 34, 3, 9, 15, 21, 27, 33, 2, 8, 14, 20, 26, 32, 1, 7, 13, 19, 25, 31])
mir = np.array([0, 6, 12, 18, 24, 30, 1, 7, 13, 19, 25, 31, 2, 8, 14, 20, 26, 32, 3, 9, 15, 21, 27, 33, 4, 10, 16, 22, 28, 34, 5, 11, 17, 23, 29, 35])
states = basis_full.states # uncommented this because states wasn't defined, but used later
#np.save('/mnt/home/nastrakhantsev/ceph/fullbasis.npy', states)
#exit(-1)
# states = np.load('/mnt/home/nastrakhantsev/ceph/fullbasis.npy')
#print('states loaded', flush=True)
'''
#
##################################
# rearrangement = np.array([12, 13, 18, 19, 14, 15, 20, 21, 0, 1, 2, 3, 6, 7, 8, 9, 4, 5, 10, 11, 16, 17, 22, 23, 24, 25, 26, 27, 28, 29])[::-1]
# rearrangement = np.array([19, 20, 25, 26, 21, 22, 27, 28, 7, 8, 9, 10, 13, 14, 15, 16, 0, 1, 2, 3, 4, 5, 6, 11, 12, 17, 18, 23, 24, 29, 30, 31, 32, 33, 34, 35])[::-1]
rearrangement = mir #np.array([12, 13, 14, 18, 19, 20, 15, 16, 21, 22, 0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 5, 11, 17, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35])#[::-1]
assert len(np.unique(rearrangement)) == Lx * Ly
assert np.allclose(np.arange(Lx * Ly), np.sort(rearrangement))
# new_idxs = idxs_rearrangement(states, rearrangement)
batch_size = len(states) // 48 + 1
new_idxs = Parallel(n_jobs=48)(delayed(idxs_rearrangement)(states[i * batch_size:np.min([(i + 1) * batch_size, len(states)])], rearrangement) for i in range(48))
print('obtained results from threads, concatenating', flush=True)
new_idxs = np.concatenate(new_idxs)
print('concatenated', flush=True)
#assert np.allclose(new_idxs, states)
#for _ in range(1000):
# idx = np.random.randint(low=0, high=len(new_idxs))
# assert new_idxs[idx] == states[idx]
#print('asserted', flush=True)
idx = np.argsort(new_idxs)
np.save('/mnt/home/nastrakhantsev/ceph/36_mir.npy', idx)
exit(-1)
'''
###################################
#rearrangement = np.array([14, 15, 20, 21, 0, 1, 2, 3, 6, 7, 8, 9, 4, 5, 10, 11, 16, 17, 22, 23, 24, 25, 26, 27, 28, 29, 12, 13, 18, 19])[::-1]
#rearrangement = np.array([21, 22, 27, 28, 7, 8, 9, 10, 13, 14, 15, 16, 0, 1, 2, 3, 4, 5, 6, 11, 12, 17, 18, 23, 24, 29, 30, 31, 32, 33, 34, 35, 19, 20, 25, 26])[::-1]
rearrangement = np.array([15, 16, 21, 22, 0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 5, 11, 17, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 12, 13, 14, 18, 19, 20])[::-1]
assert len(np.unique(rearrangement)) == Lx * Ly
assert np.allclose(np.arange(Lx * Ly), np.sort(rearrangement))
# new_idxs = idxs_rearrangement(states, rearrangement)
batch_size = len(states) // 48 + 1
new_idxs = Parallel(n_jobs=48)(delayed(idxs_rearrangement)(states[i * batch_size:np.min([(i + 1) * batch_size, len(states)])], rearrangement) for i in range(48))
print('obtained results from threads, concatenating', flush=True)
new_idxs = np.concatenate(new_idxs)
print('concatenated', flush=True)
#assert np.allclose(new_idxs, states)
#for _ in range(1000):
# idx = np.random.randint(low=0, high=len(new_idxs))
# assert new_idxs[idx] == states[idx]
#print('asserted', flush=True)
idx = np.argsort(new_idxs)
np.save('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/BC_rearrangement_36_large.npy', idx)
print('saved BC rearrangement', flush=True)
exit(-1)
##################################
#rearrangement = np.array([0, 1, 2, 3, 6, 7, 8, 9, 12, 13, 18, 19, 4, 5, 10, 11, 16, 17, 22, 23, 24, 25, 26, 27, 28, 29, 14, 15, 20, 21, ])[::-1]
#rearrangement = np.array([7, 8, 9, 10, 13, 14, 15, 16, 19, 20, 25, 26, 0, 1, 2, 3, 4, 5, 6, 11, 12, 17, 18, 23, 24, 29, 30, 31, 32, 33, 34, 35, 21, 22, 27, 28])[::-1]
rearrangement = np.array([0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 18, 19, 20, 5, 11, 17, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 15, 16, 21, 22])[::-1]
assert len(np.unique(rearrangement)) == Lx * Ly
assert np.allclose(np.arange(Lx * Ly), np.sort(rearrangement))
# new_idxs = idxs_rearrangement(states, rearrangement)
batch_size = len(states) // 48 + 1
new_idxs = Parallel(n_jobs=48)(delayed(idxs_rearrangement)(states[i * batch_size:np.min([(i + 1) * batch_size, len(states)])], rearrangement) for i in range(48))
print('obtained results from threads, concatenating', flush=True)
new_idxs = np.concatenate(new_idxs)
print('concatenated', flush=True)
#assert np.allclose(new_idxs, states)
#for _ in range(1000):
# idx = np.random.randint(low=0, high=len(new_idxs))
# assert new_idxs[idx] == states[idx]
#print('asserted', flush=True)
idx = np.argsort(new_idxs)
np.save('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/CA_rearrangement_36_large.npy', idx)
print('saved CA rearrangement', flush=True)
exit(-1)
def lambda_log_lambda(x):
y = np.where(x > 1e-7, np.log(x), 0.0)
y *= x
return y
logfile = open('/home/cluster/niastr/data/qsl_cavity/logs/log_30_{:s}.dat'.format(sys.argv[1]), 'w')
#ABC = np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/ABC_rearrangement_36_large.npy')
#ABC_inv = np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/ABC_rearrangement_36_large_inv.npy')
#BC = np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/BC_rearrangement_36_large.npy')
#CA = np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/CA_rearrangement_36_large.npy')
ground_state = np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/1.000_-0.900_{:s}_6_0_0/state_full.npy'.format(sys.argv[1]))
gs_rearr = ground_state[np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/ABC_rearrangement_36_large.npy')]
dim = Lx * Ly
hamming = dim // 2
for sub_dim in range(6, 1 + 6 + 4): #dim // 2 + 1):
print(sub_dim, flush=True)
rho = density_matrix(sub_dim, dim, hamming, gs_rearr.astype(np.float64))
entropy = 0
for iloop in range(len(rho)):
# np.save(sys.argv[3] + '_rho_{:d}_{:d}.npy'.format(sub_dim, iloop), rho)
spectrum, _ = np.linalg.eigh(rho[iloop])
entropy -= lambda_log_lambda(spectrum).sum()
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/spectrum_{:d}_{:d}.npy'.format(sub_dim, iloop), spectrum)
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/vectors_{:d}_{:d}.npy'.format(sub_dim, iloop), vectors)
if sub_dim == 6:
logfile.write('entropy_A = {:.10f}\n'.format(entropy))
if sub_dim == 10:
logfile.write('entropy_AB = {:.10f}\n'.format(entropy))
#if sub_dim == 16:
# logfile.write('entropy_ABC = {:.10f}\n'.format(entropy))
gs_rearr = ground_state[np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/ABC_rearrangement_36_large_inv.npy')]
for sub_dim in range(16, 1 + 16): #dim // 2 + 1):
print(sub_dim, flush=True)
rho = density_matrix(sub_dim, dim, hamming, gs_rearr.astype(np.float64))
entropy = 0
for iloop in range(len(rho)):
# np.save(sys.argv[3] + '_rho_{:d}_{:d}.npy'.format(sub_dim, iloop), rho)
spectrum, _ = np.linalg.eigh(rho[iloop])
entropy -= lambda_log_lambda(spectrum).sum()
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/spectrum_{:d}_{:d}.npy'.format(sub_dim, iloop), spectrum)
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/vectors_{:d}_{:d}.npy'.format(sub_dim, iloop), vectors)
if sub_dim == 16:
logfile.write('entropy_ABC = {:.10f}\n'.format(entropy))
gs_rearr = ground_state[np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/BC_rearrangement_36_large.npy')]
dim = Lx * Ly
hamming = dim // 2
for sub_dim in range(4, 1 + 4 + 8): #dim // 2 + 1):
rho = density_matrix(sub_dim, dim, hamming, gs_rearr.astype(np.float64))
entropy = 0
for iloop in range(len(rho)):
# np.save(sys.argv[3] + '_rho_{:d}_{:d}.npy'.format(sub_dim, iloop), rho)
spectrum, _ = np.linalg.eigh(rho[iloop])
entropy -= lambda_log_lambda(spectrum).sum()
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/spectrum_{:d}_{:d}.npy'.format(sub_dim, iloop), spectrum)
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/vectors_{:d}_{:d}.npy'.format(sub_dim, iloop), vectors)
if sub_dim == 4:
logfile.write('entropy_B = {:.10f}\n'.format(entropy))
if sub_dim == 14:
logfile.write('entropy_BC = {:.10f}\n'.format(entropy))
gs_rearr = ground_state[np.load('/home/cluster/niastr/scratch/ed_data_flatiron_qsl_cavity/CA_rearrangement_36_large.npy')]
dim = Lx * Ly
hamming = dim // 2
for sub_dim in range(10, 1 + 10 + 6): #dim // 2 + 1):
rho = density_matrix(sub_dim, dim, hamming, gs_rearr.astype(np.float64))
entropy = 0
for iloop in range(len(rho)):
# np.save(sys.argv[3] + '_rho_{:d}_{:d}.npy'.format(sub_dim, iloop), rho)
spectrum, _ = np.linalg.eigh(rho[iloop])
entropy -= lambda_log_lambda(spectrum).sum()
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/spectrum_{:d}_{:d}.npy'.format(sub_dim, iloop), spectrum)
# np.save('/home/cluster/niastr/data/qsl_cavity/logs/schmidt/vectors_{:d}_{:d}.npy'.format(sub_dim, iloop), vectors)
if sub_dim == 10:
logfile.write('entropy_C = {:.10f}\n'.format(entropy))
if sub_dim == 16:
logfile.write('entropy_CA = {:.10f}\n'.format(entropy))
logfile.close()
[ZoneTransfer]
ZoneId=3
HostUrl=https://mattermost.physik.uzh.ch/api/v4/files/iufonyd7dfb39f96gu6iqyrgcy?download=1
import numpy as np
import sys
import math
from joblib import delayed, Parallel
import numpy as np
import scipy.special
import pickle
import numba
from numba import uint64, float32, float64
import sys
import os
import lattice_symmetries
from lattice_symmetries import *
# TODO: import spin lenghts?
NUMBER_SPINS = 16
# rewriting functions to make them faster
@numba.jit("uint64(uint64, uint64)", nogil=True, nopython=True)
def _binom(n, k):
r"""Compute the number of ways to choose k elements out of a pile of n.
:param n: the size of the pile of elements
:param k: the number of elements to take from the pile
:return: the number of ways to choose k elements from a pile of n.
"""
assert 0 <= n and n < 40 # 40 is the maximum amount of spins
assert 0 <= k and k <= n
if k == 0 or k==n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n-i) // (i+1)
return total_ways
@numba.jit("uint64[:,:](uint64, uint64)", nogil=True, nopython=True)
def make_binom_cache(max_n, max_k):
"""
:max_n: maximum spin chain length
:max_k: maximum hamming weigth
:return: matrix of the number of ways to choose k=0,....,max_k up spins from
from n=1,..., max_n total spins.
"""
assert 0 < max_k and max_k <= max_n
assert max_n < 40
cache = np.zeros((max_n + 1, max_k +2), dtype=np.uint64)
for i in range(max_n + 1):
for j in range(min(i, max_k) + 2):
cache[i, j] = _binom(i,j) if j <= i else 0
return cache
@numba.jit("uint64(uint64)", nogil=True, nopython=True)
def _hamming_weight(n: int):
"""The Hamming weight counts the non zero-values in a bit string.
In the case of spin-chains this is the amount of up-spins.
Example: 10 -> 1010 -> hamming weight = 2
See https://stackoverflow.com/a/9830282
"""
n = (n & 0x5555555555555555) + ((n & 0xAAAAAAAAAAAAAAAA) >> 1)
n = (n & 0x3333333333333333) + ((n & 0xCCCCCCCCCCCCCCCC) >> 2)
n = (n & 0x0F0F0F0F0F0F0F0F) + ((n & 0xF0F0F0F0F0F0F0F0) >> 4)
n = (n & 0x00FF00FF00FF00FF) + ((n & 0xFF00FF00FF00FF00) >> 8)
n = (n & 0x0000FFFF0000FFFF) + ((n & 0xFFFF0000FFFF0000) >> 16)
n = (n & 0x00000000FFFFFFFF) + ((n & 0xFFFFFFFF00000000) >> 32)