Commit e63aeb09 authored by Marcus Haberland's avatar Marcus Haberland
Browse files

changed the integration method to MC integration

We exchanged scipy.integrate.quad with vegas integration
parent 09b222c0
......@@ -10,6 +10,7 @@ import matplotlib.pyplot as plt
from scipy import integrate, optimize
import LISA
import pickle
import vegas
pi = np.pi
AU = 1495978707e2 # m
......@@ -28,7 +29,6 @@ labels=['K','P','phi_0', 'theta_S_Ec', 'phi_S_Ec', 'theta_L', 'phi_L', 'ln(A)',
keys = enumerate(labels)
ArcTan = lambda x,y: np.arctan2(y,x)
file = 'dict_binaries.txt'
def isclose(a, b, rel_tol=1e-04, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
......@@ -129,7 +129,7 @@ class binary:
self.gw_amplitude = self.a0*np.array([(1.+self.cos_inclination**2)/2, -self.cos_inclination]) # [A_plus, A_cross]
def sep(self):
return (G*(self.m1+self.m2)/(pi*freq)**2)**(1/6) #m
return (G*(self.m1+self.m2)/(pi*self.f_GW)**2)**(1/6) #m
def S_n(self):
return LISA.LISA().Sn(self.f_GW)
......@@ -263,16 +263,27 @@ class binary:
print('Error in d_strain')
raise ValueError
def h_ixh_j(self,i,j,diff=1e-6):
def h_ixh_j(self,i,j,diff=1e-6,num=10**4,epsrel=1e-2):
print('Integrating dh/d{} * dh/d{} ...\n'.format(labels[i],labels[j]))
if i == j:
funcA = self.h_i(i,diff)
return integrate.quad(lambda t: (funcA(t)*1e20)**2,0,self.T_obs,limit=int(self.T_obs*self.f_GW*20),epsrel=self.epsrel,epsabs=0)[0]*1e-40
integ = vegas.Integrator([[0,self.T_obs]])
result = integ(lambda t: (funcA(t))**2,nitn=10,neval=num,adapt=False)
it = 0
while(result[0].sdev/result[0].mean > epsrel and it < 1): # try again, but harder
it += 1
print('Trying again, because Q = {:.3f} and relative error = {:.3f}'.format(result.Q, result[0].sdev/result[0].mean))
result = integ(lambda t: (funcA(t))**2,nitn=10,neval=num*10**it,adapt=False)
print('Integral = {:.3e} +- {:.3e}, rel. uncertainty = {:.3f}, Q = {:.3f} \n'.format(result[0].mean, result[0].sdev,result[0].sdev/result[0].mean, result.Q))
return result[0].mean
if i != j:
funcA = self.h_i(i,diff)
funcB = self.h_i(j,diff)
return integrate.quad(lambda t: funcA(t)*1e20*funcB(t)*1e20,0,self.T_obs,limit=int(self.T_obs*self.f_GW*20),epsrel=self.epsrel,epsabs=np.sqrt(self.Fishers[self.key-1,i,i]*1e40*self.Fishers[self.key-1,j,j]*1e40)*self.epsabs)[0]*1e-40
integ = vegas.Integrator([[0,self.T_obs]])
result = integ(lambda t: funcA(t)*funcB(t),nitn=6,neval=num*10,adapt=False,rtol=epsrel,atol=np.sqrt(self.Fishers[self.key-1,i,i] * self.Fishers[self.key-1,j,j])*self.epsrel/10)
print(result.summary(),' \n Integral = {:.3e} +- {:.3e}, rel. uncertainty = {:.3f}, Q = {:.3f} \n'.format(result[0].mean, result[0].sdev, result[0].sdev/abs(result[0].mean), result.Q))
return result[0].mean
def h_i(self,i,diff=1e-6):
B = None
if i == 3:
......@@ -378,16 +389,25 @@ class binary:
self.Fisher *= 2./self.S_n() #2*mat/S_n
return self.Fisher
def rel_uncertainty(self,diff=1e-6,epsrel=1.49e-2,epsabs=1.49e-2,both=False):
def rel_uncertainty(self,diff=1e-6,both=False):
in_dict, js = self.in_dict_bin()
if in_dict:
return js['Tamanini_plot']
try:
unc = np.sqrt(np.abs(np.diag(np.linalg.inv(self.Fisher))[:3])) #np.linalg.eigvals(self.Fisher_mat()) for diagonalized
try:
unc = np.sqrt(np.diag(np.linalg.inv(self.Fisher)[:3]))
except:
print('Error in Uncertainty calculation! Error matrix isn´t positive on diagonal. Please retry with higher accuracy.')
unc = np.sqrt(np.abs(np.diag(np.linalg.inv(self.Fisher))[:3]))
self.add_json()
return unc/np.array([self.K,self.P,1.])
except:
unc = np.sqrt(np.abs(np.diag(np.linalg.inv(self.Fisher_mat(diff)))[:3])) #np.linalg.eigvals(self.Fisher_mat()) for diagonalized
try:
unc = np.sqrt(np.diag(np.linalg.inv(self.Fisher_mat(diff,both))[:3]))
except:
print('Error in Uncertainty calculation! Error matrix isn´t positive on diagonal. Please retry with higher accuracy.')
unc = np.sqrt(np.abs(np.diag(np.linalg.inv(self.Fisher_mat(diff,both)))[:3]))
unc = np.sqrt(np.abs(np.diag(np.linalg.inv(self.Fisher_mat(diff,both)))[:3])) #np.linalg.eigvals(self.Fisher_mat()) for diagonalized
self.add_json()
return unc/np.array([self.K,self.P,1.])
......@@ -398,43 +418,45 @@ class binary:
self.key = key
res += integrate.quad(lambda t: self.strain(t)**2,0,self.T_obs,limit=int(self.T_obs*self.f_GW*20))[0]
else:
res += 1e-20*integrate.quad(lambda t: (self.strain(t)*1e10)**2,0,self.T_obs,limit=int(self.T_obs*self.f_GW*20),epsrel=self.epsrel,epsabs=0)[0]
res += integrate.quad(lambda t: (self.strain(t))**2,0,self.T_obs,limit=int(self.T_obs*self.f_GW*20),epsrel=self.epsrel,epsabs=0)[0]
res *= np.sqrt(2)
return res
def signal_to_noise(self):
def signal_to_noise(self,both=False):
if not hasattr(self, 'snr'):
self.snr = np.sqrt(2*self.hxh()/self.S_n())
self.snr = np.sqrt(2*self.hxh(both)/self.S_n())
return self.snr
def json(self):
if not hasattr(self, 'Fisher'):
self.Fisher_mat()
err = np.linalg.inv(self.Fisher)
labels = ['K','P','phi_0', 'theta_S_Ec', 'phi_S_Ec', 'theta_L', 'phi_L', 'ln(A)', 'f_1', 'f_0','M_P','M_c','dist']
vals = [self.K,self.P/yr,self.phi_0,self.theta_S_Ec,self.phi_S_Ec,self.theta_L,self.phi_L,np.log(self.a0),self.f_1,self.f_GW,self.mP/M_J,self.chirp_mass,self.dist/pc]
if self.mode == 'm':
vals2 = [self.K,self.P,1.,1.,1.,1.,1.,np.log(self.a0),self.f_1]
if self.mode == 's':
vals2 = [self.K,self.P,1.]
if self.mode == 'l':
vals2 = [self.K,self.P,1.,1.,1.,1.,1.,np.log(self.a0),self.f_1,self.f_0]
rel_unc = err / np.outer(vals2,vals2)
self.js = {'Fisher' : self.Fisher,'Error' : err,
'pars' : {label : vals[i] for i,label in enumerate(labels)},
'correlation' : rel_unc,
'rel_unc' : np.sqrt(np.diag(rel_unc)),
'exo_rel_unc' : np.sqrt(np.diag(rel_unc))[:3],
'SNR' : self.signal_to_noise(),
'binary' : self
}
self.js['Tamanini_plot'] = self.js['SNR']*self.js['pars']['M_P']*self.js['exo_rel_unc']
if not hasattr(self, 'js'):
err = np.linalg.inv(self.Fisher)
labels = ['K','P','phi_0', 'theta_S_Ec', 'phi_S_Ec', 'theta_L', 'phi_L', 'ln(A)', 'f_1', 'f_0','M_P','M_c','dist']
vals = [self.K,self.P/yr,self.phi_0,self.theta_S_Ec,self.phi_S_Ec,self.theta_L,self.phi_L,np.log(self.a0),self.f_1,self.f_GW,self.mP/M_J,self.chirp_mass,self.dist/pc]
if self.mode == 'm':
vals2 = [self.K,self.P,1.,1.,1.,1.,1.,np.log(self.a0),self.f_1]
if self.mode == 's':
vals2 = [self.K,self.P,1.]
if self.mode == 'l':
vals2 = [self.K,self.P,1.,1.,1.,1.,1.,np.log(self.a0),self.f_1,self.f_0]
rel_unc = err / np.outer(vals2,vals2)
self.js = {'Fisher' : self.Fisher,'Error' : err,
'pars' : {label : vals[i] for i,label in enumerate(labels)},
'correlation' : rel_unc,
'rel_unc' : np.sqrt(np.diag(rel_unc)),
'exo_rel_unc' : np.sqrt(np.diag(rel_unc))[:3],
'SNR' : self.signal_to_noise(),
'binary' : self
}
self.js['Tamanini_plot'] = self.js['SNR']*self.js['pars']['M_P']*self.js['exo_rel_unc']
return self.js
def add_json(self):
in_dict, js = self.in_dict_bin()
if not in_dict:
self.json()
infile = open(file,'rb')
dict_binaries = pickle.load(infile)
infile.close()
......@@ -445,6 +467,7 @@ class binary:
outfile.close()
return self.js
else:
self.js = js
return js
def in_dict_bin(self):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment