33This module contains classes and functions for post-processing the output
34of the Bayesian parameter estimation codes.
41from math
import cos,ceil,floor,sqrt,pi
as pi_constant
42from xml.dom
import minidom
43from operator
import itemgetter
48from .io
import read_samples
54from matplotlib
import pyplot
as plt,lines
as mpl_lines
55from scipy
import stats
56from scipy
import special
57from scipy
import signal
58from scipy.optimize
import newton
59from scipy
import interpolate
60from scipy
import integrate
63from itertools
import combinations
64from .lalinference
import LALInferenceHDF5PosteriorSamplesDatasetName
as posterior_grp_name
68 import lalsimulation
as lalsim
70 print(
'Cannot import lalsimulation SWIG bindings')
74 from .imrtgr.nrutils
import bbh_average_fits_precessing
76 print(
'Cannot import lalinference.imrtgr.nrutils. Will suppress final parameter and peak luminosity calculations.')
81 hostname_short=socket.gethostbyaddr(socket.gethostname())[0].split(
'.',1)[1]
83 hostname_short=
'Unknown'
84if hostname_short==
'ligo.caltech.edu' or hostname_short==
'cluster.ldas.cit':
85 matplotlib.rcParams.update(
86 {
'mathtext.fontset' :
"custom",
87 'mathtext.fallback' :
'cm'
90from xml.etree.cElementTree
import Element, SubElement, tostring, XMLParser
94from lal
import LIGOTimeGPS, TimeDelayFromEarthCenter
95from .
import git_version
97__author__=
"Ben Aylott <benjamin.aylott@ligo.org>, Ben Farr <bfarr@u.northwestern.edu>, Will M. Farr <will.farr@ligo.org>, John Veitch <john.veitch@ligo.org>, Vivien Raymond <vivien.raymond@ligo.org>"
98__version__=
"git id %s"%git_version.id
99__date__= git_version.date
102 return siminspiral.geocent_end_time + 1e-9*siminspiral.geocent_end_time_ns
105 """Workaround for missing astropy.table.Table.replace_column method,
106 which was added in Astropy 1.1.
108 FIXME: remove this function when LALSuite depends on Astropy >= 1.1.
"""
109 index = table.colnames.index(old)
110 table.remove_column(old)
111 table.add_column(astropy.table.Column(new, name=old), index=index)
114 """Workaround for missing astropy.table.Table.as_array method,
115 which was added in Astropy 1.0.
117 FIXME: remove this function when LALSuite depends on Astropy >= 1.0.
"""
119 return table.as_array()
127logParams=[
'logl',
'loglh1',
'loglh2',
'logll1',
'loglv1',
'deltalogl',
'deltaloglh1',
'deltalogll1',
'deltaloglv1',
'logw',
'logprior',
'logpost',
'nulllogl',
'chain_log_evidence',
'chain_delta_log_evidence',
'chain_log_noise_evidence',
'chain_log_bayes_factor']
129relativePhaseParams=[ a+b+
'_relative_phase' for a,b
in combinations([
'h1',
'l1',
'v1'],2)]
130snrParams=[
'snr',
'optimal_snr',
'matched_filter_snr',
'coherence'] + [
'%s_optimal_snr'%(i)
for i
in [
'h1',
'l1',
'v1']] + [
'%s_cplx_snr_amp'%(i)
for i
in [
'h1',
'l1',
'v1']] + [
'%s_cplx_snr_arg'%(i)
for i
in [
'h1',
'l1',
'v1']] + relativePhaseParams
131calAmpParams=[
'calamp_%s'%(ifo)
for ifo
in [
'h1',
'l1',
'v1']]
132calPhaseParams=[
'calpha_%s'%(ifo)
for ifo
in [
'h1',
'l1',
'v1']]
133calParams = calAmpParams + calPhaseParams
135massParams=[
'm1',
'm2',
'chirpmass',
'mchirp',
'mc',
'eta',
'q',
'massratio',
'asym_massratio',
'mtotal',
'mf',
'mf_evol',
'mf_nonevol']
137spinParamsPrec=[
'a1',
'a2',
'phi1',
'theta1',
'phi2',
'theta2',
'costilt1',
'costilt2',
'costheta_jn',
'cosbeta',
'tilt1',
'tilt1_isco',
'tilt2',
'tilt2_isco',
'phi_jl',
'theta_jn',
'phi12',
'phi12_isco',
'af',
'af_evol',
'af_nonevol',
'afz',
'afz_evol',
'afz_nonevol']
138spinParamsAli=[
'spin1',
'spin2',
'a1z',
'a2z']
139spinParamsEff=[
'chi',
'effectivespin',
'chi_eff',
'chi_tot',
'chi_p']
140spinParams=spinParamsPrec+spinParamsEff+spinParamsAli
142cosmoParam=[
'm1_source',
'm2_source',
'mtotal_source',
'mc_source',
'redshift',
'mf_source',
'mf_source_evol',
'mf_source_nonevol',
'm1_source_maxldist',
'm2_source_maxldist',
'mtotal_source_maxldist',
'mc_source_maxldist',
'redshift_maxldist',
'mf_source_maxldist',
'mf_source_maxldist_evol',
'mf_source_maxldist_nonevol']
144ppEParams=[
'ppEalpha',
'ppElowera',
'ppEupperA',
'ppEbeta',
'ppElowerb',
'ppEupperB',
'alphaPPE',
'aPPE',
'betaPPE',
'bPPE']
145tigerParams= [
'dchi%i'%(i)
for i
in range(8)] + [
'dchi%il'%(i)
for i
in [5,6] ] + [
'dxi%d'%(i+1)
for i
in range(6)] + [
'dalpha%i'%(i+1)
for i
in range(5)] + [
'dbeta%i'%(i+1)
for i
in range(3)] + [
'dsigma%i'%(i+1)
for i
in range(4)] + [
'dipolecoeff'] + [
'dchiminus%i'%(i)
for i
in [1,2]] + [
'dchiMinus%i'%(i)
for i
in [1,2]] + [
'db1',
'db2',
'db3',
'db4',
'dc1',
'dc2',
'dc4',
'dcl']
146qnmtestParams=[
'domega220',
'dtau220',
'domega210',
'dtau210',
'domega330',
'dtau330',
'domega440',
'dtau440',
'domega550',
'dtau550']
147bransDickeParams=[
'omegaBD',
'ScalarCharge1',
'ScalarCharge2']
148massiveGravitonParams=[
'lambdaG']
149lorentzInvarianceViolationParams=[
'log10lambda_a',
'lambda_a',
'log10lambda_eff',
'lambda_eff',
'log10livamp',
'liv_amp']
150tidalParams=[
'lambda1',
'lambda2',
'lam_tilde',
'dlam_tilde',
'lambdat',
'dlambdat',
'lambdas',
'bluni']
151fourPiecePolyParams=[
'logp1',
'gamma1',
'gamma2',
'gamma3']
152spectralParams=[
'sdgamma0',
'sdgamma1',
'sdgamma2',
'sdgamma3']
153energyParams=[
'e_rad',
'e_rad_evol',
'e_rad_nonevol',
'l_peak',
'l_peak_evol',
'l_peak_nonevol',
'e_rad_maxldist',
'e_rad_maxldist_evol',
'e_rad_maxldist_nonevol']
154spininducedquadParams = [
'dquadmon1',
'dquadmon2',
'dquadmona',
'dquadmona']
156 ppEParams + tigerParams + bransDickeParams + massiveGravitonParams
157 + tidalParams + fourPiecePolyParams + spectralParams + energyParams
158 + lorentzInvarianceViolationParams + spininducedquadParams + qnmtestParams
162distParams=[
'distance',
'distMPC',
'dist',
'distance_maxl']
163incParams=[
'iota',
'inclination',
'cosiota']
164polParams=[
'psi',
'polarisation',
'polarization']
165skyParams=[
'ra',
'rightascension',
'declination',
'dec']
166phaseParams=[
'phase',
'phi0',
'phase_maxl']
168timeParams=[
'time',
'time_mean']
169endTimeParams=[
'l1_end_time',
'h1_end_time',
'v1_end_time']
171statsParams=[
'logprior',
'logl',
'deltalogl',
'deltaloglh1',
'deltalogll1',
'deltaloglv1',
'deltaloglh2',
'deltaloglg1']
172calibParams=[
'calpha_l1',
'calpha_h1',
'calpha_v1',
'calamp_l1',
'calamp_h1',
'calamp_v1']
175confidenceLevels=[0.67,0.9,0.95,0.99]
177greedyBinSizes={
'mc':0.025,
'm1':0.1,
'm2':0.1,
'mass1':0.1,
'mass2':0.1,
'mtotal':0.1,
'mc_source':0.025,
'm1_source':0.1,
'm2_source':0.1,
'mtotal_source':0.1,
'mc_source_maxldist':0.025,
'm1_source_maxldist':0.1,
'm2_source_maxldist':0.1,
'mtotal_source_maxldist':0.1,
'eta':0.001,
'q':0.01,
'asym_massratio':0.01,
'iota':0.01,
'cosiota':0.02,
'time':1e-4,
'time_mean':1e-4,
'distance':1.0,
'dist':1.0,
'distance_maxl':1.0,
'redshift':0.01,
'redshift_maxldist':0.01,
'mchirp':0.025,
'chirpmass':0.025,
'spin1':0.04,
'spin2':0.04,
'a1z':0.04,
'a2z':0.04,
'a1':0.02,
'a2':0.02,
'phi1':0.05,
'phi2':0.05,
'theta1':0.05,
'theta2':0.05,
'ra':0.05,
'dec':0.05,
'chi':0.05,
'chi_eff':0.05,
'chi_tot':0.05,
'chi_p':0.05,
'costilt1':0.02,
'costilt2':0.02,
'thatas':0.05,
'costheta_jn':0.02,
'beta':0.05,
'omega':0.05,
'cosbeta':0.02,
'ppealpha':1.0,
'ppebeta':1.0,
'ppelowera':0.01,
'ppelowerb':0.01,
'ppeuppera':0.01,
'ppeupperb':0.01,
'polarisation':0.04,
'rightascension':0.05,
'declination':0.05,
'massratio':0.001,
'inclination':0.01,
'phase':0.05,
'tilt1':0.05,
'tilt2':0.05,
'phi_jl':0.05,
'theta_jn':0.05,
'phi12':0.05,
'flow':1.0,
'phase_maxl':0.05,
'calamp_l1':0.01,
'calamp_h1':0.01,
'calamp_v1':0.01,
'calpha_h1':0.01,
'calpha_l1':0.01,
'calpha_v1':0.01,
'logdistance':0.1,
'psi':0.1,
'costheta_jn':0.1,
'mf':0.1,
'mf_evol':0.1,
'mf_nonevol':0.1,
'mf_source':0.1,
'mf_source_evol':0.1,
'mf_source_nonevol':0.1,
'mf_source_maxldist':0.1,
'mf_source_maxldist_evol':0.1,
'mf_source_maxldist_nonevol':0.1,
'af':0.02,
'af_evol':0.02,
'af_nonevol':0.02,
'afz':0.02,
'afz_evol':0.01,
'afz_nonevol':0.01,
'e_rad':0.1,
'e_rad_evol':0.1,
'e_rad_nonevol':0.1,
'e_rad_maxldist':0.1,
'e_rad_maxldist_evol':0.1,
'e_rad_maxldist_nonevol':0.1,
'l_peak':0.1,
'l_peak_evol':0.1,
'l_peak_nonevol':0.1}
179 greedyBinSizes[s]=0.05
180for derived_time
in [
'h1_end_time',
'l1_end_time',
'v1_end_time',
'h1l1_delay',
'l1v1_delay',
'h1v1_delay']:
181 greedyBinSizes[derived_time]=greedyBinSizes[
'time']
182for derived_phase
in relativePhaseParams:
183 greedyBinSizes[derived_phase]=0.05
184for param
in tigerParams + bransDickeParams + massiveGravitonParams + lorentzInvarianceViolationParams + qnmtestParams:
185 greedyBinSizes[param]=0.01
186for param
in tidalParams:
187 greedyBinSizes[param]=2.5
188for param
in fourPiecePolyParams:
189 greedyBinSizes[param]=2.5
190for param
in spectralParams:
191 greedyBinSizes[param]=2.5
192for param
in spininducedquadParams:
193 greedyBinSizes[param]=2.5
195for loglname
in statsParams:
196 greedyBinSizes[loglname]=0.1
199 location = lal.cached_detector_by_prefix[ifo_prefix].location
202 t_gps = LIGOTimeGPS(inj.geocent_end_time, inj.geocent_end_time_ns)
203 t0 = inj.geocent_end_time + 1e-9*inj.geocent_end_time_ns
204 return t0 + TimeDelayFromEarthCenter(location,ra,dec,t_gps)
207__default_line_styles=[
'solid',
'dashed',
'dashdot',
'dotted']
209__default_color_lst=[
'r',
'b',
'y',
'g',
'c',
'm']
211__default_css_string=
"""
214font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
241font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
243border-collapse:collapse;
248border:1px solid #B5C1CF;
249padding:3px 7px 2px 7px;
257background-color:#B3CEEF;
282border-bottom-style:double;
286__default_javascript_string='''
288function toggle_visibility(tbid,lnkid)
291 if(document.all){document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'block' ?
'none' :
'block';}
293 else{document.getElementById(tbid).style.display = document.getElementById(tbid).style.display ==
'table' ?
'none' :
'table';}
295 document.getElementById(lnkid).value = document.getElementById(lnkid).value ==
'[-] Collapse' ?
'[+] Expand' :
'[-] Collapse';
303#===============================================================================
304# Function to return the correct prior distribution for selected parameters
305#===============================================================================
316 'mtotal_source':
None,
319 'm1_source_maxldist':
None,
320 'm2_source_maxldist':
None,
321 'mtotal_source_maxldist':
None,
322 'mc_source_maxldist':
None,
323 'redshift_maxldist':
None,
328 'mf_source_evol':
None,
329 'mf_source_nonevol':
None,
330 'mf_source_maxldist':
None,
331 'mf_source_maxldist_evol':
None,
332 'mf_source_maxldist_nonevol':
None,
341 'e_rad_nonevol':
None,
342 'e_rad_maxldist':
None,
343 'e_rad_maxldist_evol':
None,
344 'e_rad_maxldist_nonevol':
None,
347 'l_peak_nonevol':
None,
365 'costilt1':
'uniform',
366 'costilt2':
'uniform',
370 'time_mean':
'uniform',
372 'distance_maxl':
'x**2',
378 'costheta_jn':
'uniform',
393 'lambda1' :
'uniform',
394 'lambda2':
'uniform',
407 'calamp_h1' :
'uniform',
408 'calamp_l1' :
'uniform',
409 'calpha_h1' :
'uniform',
410 'calpha_l1' :
'uniform',
411 'polar_eccentricity':
'uniform',
412 'polar_angle':
'uniform',
416 return distributions(name)
425 A lookup table for plot labels.
427 m1_names = ['mass1',
'm1']
428 m2_names = [
'mass2',
'm2']
429 mc_names = [
'mc',
'mchirp',
'chirpmass']
430 eta_names = [
'eta',
'massratio',
'sym_massratio']
431 q_names = [
'q',
'asym_massratio']
432 iota_names = [
'iota',
'incl',
'inclination']
433 dist_names = [
'dist',
'distance']
434 ra_names = [
'rightascension',
'ra']
435 dec_names = [
'declination',
'dec']
436 phase_names = [
'phi_orb',
'phi',
'phase',
'phi0']
437 gr_test_names = [
'dchiMinus2',
'dchiMinus1'] + [
'dchi%d'%i
for i
in range(8)]+[
'dchil%d'%i
for i
in [5,6]]+[
'dxi%d'%(i+1)
for i
in range(6)]+[
'dalpha%d'%(i+1)
for i
in range(5)]+[
'dbeta%d'%(i+1)
for i
in range(3)]+[
'dsigma%d'%(i+1)
for i
in range(4)] + [
'dipolecoeff'] + [
'db1',
'db2',
'db3',
'db4',
'dc1',
'dc2',
'dc4',
'dcl']
440 'm1':
r'$m_1\,(\mathrm{M}_\odot)$',
441 'm2':
r'$m_2\,(\mathrm{M}_\odot)$',
442 'mc':
r'$\mathcal{M}\,(\mathrm{M}_\odot)$',
445 'mtotal':
r'$M_\mathrm{total}\,(\mathrm{M}_\odot)$',
446 'm1_source':
r'$m_{1}^\mathrm{source}\,(\mathrm{M}_\odot)$',
447 'm2_source':
r'$m_{2}^\mathrm{source}\,(\mathrm{M}_\odot)$',
448 'mtotal_source':
r'$M_\mathrm{total}^\mathrm{source}\,(\mathrm{M}_\odot)$',
449 'mc_source':
r'$\mathcal{M}^\mathrm{source}\,(\mathrm{M}_\odot)$',
451 'm1_source_maxldist':
r'$m_{1}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
452 'm2_source_maxldist':
r'$m_{2}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
453 'mtotal_source_maxldist':
r'$M_\mathrm{total}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
454 'mc_source_maxldist':
r'$\mathcal{M}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
455 'redshift_maxldist':
r'$z^\mathrm{maxLdist}$',
456 'mf':
r'$M_\mathrm{final}\,(\mathrm{M}_\odot)$',
457 'mf_evol':
r'$M_\mathrm{final}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
458 'mf_nonevol':
r'$M_\mathrm{final}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
459 'mf_source':
r'$M_\mathrm{final}^\mathrm{source}\,(\mathrm{M}_\odot)$',
460 'mf_source_evol':
r'$M_\mathrm{final}^\mathrm{source, evol}\,(\mathrm{M}_\odot)$',
461 'mf_source_nonevol':
r'$M_\mathrm{final}^\mathrm{source, non-evol}\,(\mathrm{M}_\odot)$',
462 'mf_source_maxldist':
r'$M_\mathrm{final}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
463 'mf_source_maxldist_evol':
r'$M_\mathrm{final}^\mathrm{source, evol - maxLdist}\,(\mathrm{M}_\odot)$',
464 'mf_source_maxldist_nonevol':
r'$M_\mathrm{final}^\mathrm{source, non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
465 'af':
r'$a_\mathrm{final}$',
466 'af_evol':
r'$a_\mathrm{final}^\mathrm{evol}$',
467 'af_nonevol':
r'$a_\mathrm{final}^\mathrm{non-evol}$',
468 'afz':
r'$a_{\mathrm{final}, z}$',
469 'afz_evol':
r'$a_{\mathrm{final}, z}^\mathrm{evol}$',
470 'afz_nonevol':
r'$a_{\mathrm{final}, z}^\mathrm{non-evol}$',
471 'e_rad':
r'$E_\mathrm{rad}\,(\mathrm{M}_\odot)$',
472 'e_rad_evol':
r'$E_\mathrm{rad}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
473 'e_rad_nonevol':
r'$E_\mathrm{rad}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
474 'e_rad_maxldist':
r'$E_\mathrm{rad}^\mathrm{maxLdist}\,(\mathrm{M}_\odot)$',
475 'e_rad_maxldist_evol':
r'$E_\mathrm{rad}^\mathrm{evol - maxLdist}\,(\mathrm{M}_\odot)$',
476 'e_rad_maxldist_nonevol':
r'$E_\mathrm{rad}^\mathrm{non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
477 'l_peak':
r'$L_\mathrm{peak}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
478 'l_peak_evol':
r'$L_\mathrm{peak}^\mathrm{evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
479 'l_peak_nonevol':
r'$L_\mathrm{peak}^\mathrm{non-evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
486 'theta1':
r'$\theta_1\,(\mathrm{rad})$',
487 'theta2':
r'$\theta_2\,(\mathrm{rad})$',
488 'phi1':
r'$\phi_1\,(\mathrm{rad})$',
489 'phi2':
r'$\phi_2\,(\mathrm{rad})$',
490 'chi_eff':
r'$\chi_\mathrm{eff}$',
491 'chi_tot':
r'$\chi_\mathrm{total}$',
492 'chi_p':
r'$\chi_\mathrm{P}$',
493 'tilt1':
r'$t_1\,(\mathrm{rad})$',
494 'tilt2':
r'$t_2\,(\mathrm{rad})$',
495 'tilt1_isco':
r'$t_1^\mathrm{ISCO}\,(\mathrm{rad})$',
496 'tilt2_isco':
r'$t_2^\mathrm{ISCO}\,(\mathrm{rad})$',
497 'costilt1':
r'$\mathrm{cos}(t_1)$',
498 'costilt2':
r'$\mathrm{cos}(t_2)$',
499 'iota':
r'$\iota\,(\mathrm{rad})$',
500 'cosiota':
r'$\mathrm{cos}(\iota)$',
501 'time':
r'$t_\mathrm{c}\,(\mathrm{s})$',
502 'time_mean':
r'$<t>\,(\mathrm{s})$',
503 'dist':
r'$d_\mathrm{L}\,(\mathrm{Mpc})$',
504 'distance_maxl':
r'$d_\mathrm{L}^\mathrm{maxL}\,(\mathrm{Mpc})$',
507 'phase':
r'$\phi\,(\mathrm{rad})$',
508 'phase_maxl':
r'$\phi^\mathrm{maxL}\,(\mathrm{rad})$',
509 'psi':
r'$\psi\,(\mathrm{rad})$',
510 'theta_jn':
r'$\theta_\mathrm{JN}\,(\mathrm{rad})$',
511 'costheta_jn':
r'$\mathrm{cos}(\theta_\mathrm{JN})$',
512 'beta':
r'$\beta\,(\mathrm{rad})$',
513 'cosbeta':
r'$\mathrm{cos}(\beta)$',
514 'phi_jl':
r'$\phi_\mathrm{JL}\,(\mathrm{rad})$',
515 'phi12':
r'$\phi_\mathrm{12}\,(\mathrm{rad})$',
516 'phi12_isco':
r'$\phi_\mathrm{12}^\mathrm{ISCO}\,(\mathrm{rad})$',
517 'logl':
r'$\mathrm{log}(\mathcal{L})$',
518 'h1_end_time':
r'$t_\mathrm{H}$',
519 'l1_end_time':
r'$t_\mathrm{L}$',
520 'v1_end_time':
r'$t_\mathrm{V}$',
521 'h1l1_delay':
r'$\Delta t_\mathrm{HL}$',
522 'h1v1_delay':
r'$\Delta t_\mathrm{HV}$',
523 'l1v1_delay':
r'$\Delta t_\mathrm{LV}$',
524 'lambdat' :
r'$\tilde{\Lambda}$',
525 'dlambdat':
r'$\delta \tilde{\Lambda}$',
526 'lambda1' :
r'$\lambda_1$',
527 'lambda2':
r'$\lambda_2$',
528 'lam_tilde' :
r'$\tilde{\Lambda}$',
529 'dlam_tilde':
r'$\delta \tilde{\Lambda}$',
530 'logp1':
r'$\log(p_1)$',
531 'gamma1':
r'$\Gamma_1$',
532 'gamma2':
r'$\Gamma_2$',
533 'gamma3':
r'$\Gamma_3$',
534 'sdgamma0' :
r'$\gamma_{0}$',
535 'sdgamma1' :
r'$\gamma_{1}$',
536 'sdgamma2' :
r'$\gamma_{2}$',
537 'sdgamma3' :
r'$\gamma_{3}$',
538 'calamp_h1' :
r'$\delta A_{H1}$',
539 'calamp_l1' :
r'$\delta A_{L1}$',
540 'calpha_h1' :
r'$\delta \phi_{H1}$',
541 'calpha_l1' :
r'$\delta \phi_{L1}$',
542 'polar_eccentricity':
r'$\epsilon_{polar}$',
543 'polar_angle':
r'$\alpha_{polar}$',
544 'alpha':
r'$\alpha_{polar}$',
545 'dchiminus1':
r'$d\chi_{-1}$',
546 'dchiminus2':
r'$d\chi_{-2}$',
547 'dchi0':
r'$d\chi_0$',
548 'dchi1':
r'$d\chi_1$',
549 'dchi2':
r'$d\chi_2$',
550 'dchi3':
r'$d\chi_3$',
551 'dchi3S':
r'$d\chi_{3S}$',
552 'dchi3NS':
r'$d\chi_{3NS}$',
553 'dchi4':
r'$d\chi_4$',
554 'dchi4S':
r'$d\chi_{4S}$',
555 'dchi4NS':
r'$d\chi_{4NS}$',
556 'dchi5':
r'$d\chi_5$',
557 'dchi5S':
r'$d\chi_{5S}$',
558 'dchi5NS':
r'$d\chi_{5NS}$',
559 'dchi5l':
r'$d\chi_{5l}$',
560 'dchi5lS':
r'$d\chi_{5lS}$',
561 'dchi5lNS':
r'$d\chi_{5lNS}$',
562 'dchi6':
r'$d\chi_6$',
563 'dchi6S':
r'$d\chi_{6S}$',
564 'dchi6NS':
r'$d\chi_{6NS}$',
565 'dchi6l':
r'$d\chi_{6l}$',
566 'dchi7':
r'$d\chi_7$',
567 'dchi7S':
r'$d\chi_{7S}$',
568 'dchi7NS':
r'$d\chi_{7NS}$',
575 'dalpha1':
r'$d\alpha_1$',
576 'dalpha2':
r'$d\alpha_2$',
577 'dalpha3':
r'$d\alpha_3$',
578 'dalpha4':
r'$d\alpha_4$',
579 'dalpha5':
r'$d\alpha_5$',
580 'dbeta1':
r'$d\beta_1$',
581 'dbeta2':
r'$d\beta_2$',
582 'dbeta3':
r'$d\beta_3$',
583 'dsigma1':
r'$d\sigma_1$',
584 'dsigma2':
r'$d\sigma_2$',
585 'dsigma3':
r'$d\sigma_3$',
586 'dsigma4':
r'$d\sigma_4$',
587 'dquadmon1':
r'$\delta\kappa_1$',
588 'dquadmon2':
r'$\delta\kappa_2$',
589 'dquadmons':
r'$\delta\kappa_s$',
590 'dquadmona':
r'$\delta\kappa_a$',
591 'domega220':
r'$d\omega_{220}$',
592 'dtau220':
r'$d\tau_{220}$',
593 'domega210':
r'$d\omega_{210}$',
594 'dtau210':
r'$d\tau_{210}$',
595 'domega330':
r'$d\omega_{330}$',
596 'dtau330':
r'$d\tau_{330}$',
597 'domega440':
r'$d\omega_{440}$',
598 'dtau440':
r'$d\tau_{440}$',
599 'domega550':
r'$d\omega_{550}$',
600 'dtau550':
r'$d\tau_{550}$',
601 'optimal_snr':
r'$\rho^{opt}$',
602 'h1_optimal_snr':
r'$\rho^{opt}_{H1}$',
603 'l1_optimal_snr':
r'$\rho^{opt}_{L1}$',
604 'v1_optimal_snr':
r'$\rho^{opt}_{V1}$',
605 'matched_filter_snr':
r'$\rho^{MF}$',
606 'lambdas':
r'$\Lambda_S$',
607 'bluni':
r'$BL_{uniform}$',
608 'log10lambda_a':
r'$\log\lambda_{\mathbb{A}} [\mathrm{m}]$',
609 'log10lambda_eff':
r'$\log\lambda_{eff} [\mathrm{m}]$',
610 'lambda_eff':
r'$\lambda_{eff} [\mathrm{m}]$',
611 'lambda_a':
r'$\lambda_{\mathbb{A}} [\mathrm{m}]$',
612 'liv_amp':
r'$\mathbb{A} [\mathrm{{eV}^{2-\alpha}}]$' ,
613 'log10livamp':
r'$\log \mathbb{A}[\mathrm{{eV}^{2-\alpha}}]$',
614 'dchikappaS':
r'$d\chi_{\kappa_{S}}$',
615 'dchikappaA':
r'$d\chi_{\kappa_{A}}$',
616 'dchiMinus1':
r'$d\chi_{-1}$',
617 'dchiMinus2':
r'$d\chi_{-2}$',
618 'db1':
r'$\delta b_1$',
619 'db2':
r'$\delta b_2$',
620 'db3':
r'$\delta b_3$',
621 'db4':
r'$\delta b_4$',
622 'dc1':
r'$\delta c_1$',
623 'dc2':
r'$\delta c_2$',
624 'dc4':
r'$\delta c_4$',
625 'dcl':
r'$\delta c_l$',
630 if param
in m1_names:
632 elif param
in m2_names:
634 elif param
in mc_names:
636 elif param
in eta_names:
638 elif param
in q_names:
640 elif param
in iota_names:
642 elif param
in dist_names:
644 elif param
in ra_names:
646 elif param
in dec_names:
648 elif param
in phase_names:
652 label = labels[param]
665 A data structure representing one parameter in a chain of posterior samples.
666 The Posterior
class generates instances of this class for pivoting onto
a given
667 parameter (the Posterior
class is per-Sampler oriented whereas this
class represents
668 the same one parameter
in successive samples
in the chain).
670 def __init__(self,name,posterior_samples,injected_value=None,injFref=None,trigger_values=None,prior=None):
672 Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
674 @param name: A literal string name
for the parameter.
675 @param posterior_samples: A 1D array of the samples.
676 @param injected_value: The injected
or real value of the parameter.
677 @param injFref: reference frequency
for injection
678 @param trigger_values: The trigger values of the parameter (dictionary w/ IFOs
as keys).
679 @param prior: The prior value corresponding to each sample.
693 Container method. Defined as number of samples.
699 Container method . Returns posterior containing sample idx (allows slicing).
706 Return the string literal name of the parameter.
714 Return the arithmetic mean for the marginal PDF on the parameter.
722 Return the median value for the marginal PDF on the parameter.
730 Return the standard deviation of the marginal PDF on the parameter.
735 if not np.isfinite(stdev):
737 except OverflowError:
745 Return the 'standard accuracy statistic' (stacc) of the marginal
746 posterior of the parameter.
748 stacc
is a standard deviant incorporating information about the
749 accuracy of the waveform recovery. Defined
as the mean of the sum
750 of the squared differences between the points
in the PDF
751 (x_i - sampled according to the posterior)
and the true value
752 (\f$x_{true}\f$). So
for a marginalized one-dimensional PDF:
753 \f$stacc = \sqrt{\frac{1}{N}\sum_{i=1}^N (x_i-x_{\rm true})2}\f$
764 Return the injected value set at construction . If no value was set
773 Return the trigger values set at construction. If no value was set
782 Set the injected/real value of the parameter.
784 @param new_injval: The injected/real value to set.
791 Set the trigger values of the parameter.
793 @param new_trigvals: Dictionary containing trigger values
with IFO keys.
801 Return a 1D numpy.array of the samples.
808 Remove samples from posterior, analagous to numpy.delete but opperates
in place.
810 @param samples: A list containing the indexes of the samples to be removed.
817 Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
820 from numpy
import seterr
as np_seterr
821 from scipy
import seterr
as sp_seterr
823 np_seterr(under=
'ignore')
824 sp_seterr(under=
'ignore')
828 exfile=open(
'exception.out',
'w')
837 """Returns the KL divergence between the prior and the posterior.
838 It measures the relative information content in nats. The prior
is evaluated
839 at run time. It defaults to
None. If
None is passed, it just returns the information content
844 return np.array([1./(np.max(x)-np.min(x))
for _
in x])
846 posterior, dx = np.histogram(self.
samples,bins=36,density=
True)
847 from scipy.stats
import entropy
852 elif prior==
'uniform':
853 prior+=
'(self.samples)'
855 prior.replace(
'x',
'self.samples')
856 elif not(prior.startswith(
'np.')):
858 prior+=
'(self.samples)'
863 prior_dist = eval(prior)
867 return entropy(posterior, qk=prior_dist)
871 Evaluate probability intervals.
873 @param intervals: A list of the probability intervals [0-1]
876 samples_temp=np.sort(np.squeeze(self.samples))
878 for interval
in intervals:
881 N=np.size(samples_temp)
883 lower_idx=
int(floor((N/2.0)*(1-interval)))
887 upper_idx=N-
int(floor((N/2.0)*(1-interval)))
891 list_of_ci.append((float(samples_temp[lower_idx]),float(samples_temp[upper_idx])))
893 list_of_ci.append((
None,
None))
899 Data structure for a table of posterior samples .
901 def __init__(self,commonResultsFormatData,SimInspiralTableEntry=None,inj_spin_frame='OrbitalL', injFref=100,SnglInspiralList=None,name=None,description=None):
905 @param commonResultsFormatData: A 2D array containing the posterior
906 samples
and related data. The samples chains form the columns.
907 @param SimInspiralTableEntry: A SimInspiralTable row containing the injected values.
908 @param SnglInspiralList: A list of SnglInspiral objects containing the triggers.
909 @param inj_spin_frame: spin frame
910 @param injFref: reference frequency
911 @param name: optional name
912 @param description: optional description
915 common_output_table_header,common_output_table_raw =commonResultsFormatData
921 self._loglaliases=['deltalogl',
'posterior',
'logl',
'logL',
'likelihood']
922 self.
_logpaliases=[
'logp',
'logP',
'prior',
'logprior',
'Prior',
'logPrior']
924 common_output_table_header=[i.lower()
for i
in common_output_table_header]
928 'mchirp':
lambda inj:inj.mchirp,
929 'chirpmass':
lambda inj:inj.mchirp,
930 'mc':
lambda inj:inj.mchirp,
931 'mass1':
lambda inj:inj.mass1,
932 'm1':
lambda inj:inj.mass1,
933 'mass2':
lambda inj:inj.mass2,
934 'm2':
lambda inj:inj.mass2,
935 'mtotal':
lambda inj:float(inj.mass1)+float(inj.mass2),
936 'eta':
lambda inj:inj.eta,
938 'asym_massratio':self.
_inj_q,
939 'massratio':
lambda inj:inj.eta,
940 'sym_massratio':
lambda inj:inj.eta,
941 'time':
lambda inj:float(
get_end(inj)),
942 'time_mean':
lambda inj:float(
get_end(inj)),
943 'end_time':
lambda inj:float(
get_end(inj)),
944 'phi0':
lambda inj:inj.phi0,
945 'phi_orb':
lambda inj: inj.coa_phase,
946 'phase':
lambda inj: inj.coa_phase,
947 'dist':
lambda inj:inj.distance,
948 'distance':
lambda inj:inj.distance,
953 'dec':
lambda inj:inj.latitude,
954 'declination':
lambda inj:inj.latitude,
955 'lat':
lambda inj:inj.latitude,
956 'latitude':
lambda inj:inj.latitude,
957 'psi':
lambda inj: np.mod(inj.polarization, np.pi),
959 'polarisation':
lambda inj:inj.polarization,
960 'polarization':
lambda inj:inj.polarization,
964 'lal_amporder':
lambda inj:inj.amp_order}
970 for one_d_posterior_samples,param_name
in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
974 if 'mchirp' in common_output_table_header
and 'eta' in common_output_table_header \
975 and (
not 'm1' in common_output_table_header)
and (
not 'm2' in common_output_table_header):
977 print(
'Inferring m1 and m2 from mchirp and eta')
982 print(
'Unable to deduce m1 and m2 from input columns')
989 if loglalias
in common_output_table_header:
993 print(
"No '%s' column in input table!"%loglalias)
998 raise RuntimeError(
"No likelihood/posterior values found!")
1002 if logpalias
in common_output_table_header:
1006 print(
"No '%s' column in input table!"%logpalias)
1008 if not 'log' in logpalias:
1011 if name
is not None:
1014 if description
is not None:
1021 Add some useful derived parameters (such as tilt angles, time delays, etc)
in the Posterior object
1026 if 'mc' in pos.names:
1028 elif 'chirpmass' in pos.names:
1029 mchirp_name =
'chirpmass'
1031 mchirp_name =
'mchirp'
1033 if 'asym_massratio' in pos.names:
1034 q_name =
'asym_massratio'
1038 if 'sym_massratio' in pos.names:
1039 eta_name=
'sym_massratio'
1040 elif 'massratio' in pos.names:
1041 eta_name=
'massratio'
1045 if 'mass1' in pos.names
and 'mass2' in pos.names:
1046 pos.append_mapping((
'm1',
'm2'),
lambda x,y:(x,y), (
'mass1',
'mass2'))
1048 if (mchirp_name
in pos.names
and eta_name
in pos.names)
and \
1049 (
'mass1' not in pos.names
or 'm1' not in pos.names)
and \
1050 (
'mass2' not in pos.names
or 'm2' not in pos.names):
1052 pos.append_mapping((
'm1',
'm2'),mc2ms,(mchirp_name,eta_name))
1054 if (mchirp_name
in pos.names
and q_name
in pos.names)
and \
1055 (
'mass1' not in pos.names
or 'm1' not in pos.names)
and \
1056 (
'mass2' not in pos.names
or 'm2' not in pos.names):
1058 pos.append_mapping((
'm1',
'm2'),q2ms,(mchirp_name,q_name))
1059 pos.append_mapping(
'eta',q2eta,q_name)
1061 if (
'm1' in pos.names
and 'm2' in pos.names
and not 'mtotal' in pos.names ):
1062 pos.append_mapping(
'mtotal',
lambda m1,m2: m1+m2, (
'm1',
'm2') )
1064 if(
'a_spin1' in pos.names): pos.append_mapping(
'a1',
lambda a:a,
'a_spin1')
1065 if(
'a_spin2' in pos.names): pos.append_mapping(
'a2',
lambda a:a,
'a_spin2')
1066 if(
'phi_spin1' in pos.names): pos.append_mapping(
'phi1',
lambda a:a,
'phi_spin1')
1067 if(
'phi_spin2' in pos.names): pos.append_mapping(
'phi2',
lambda a:a,
'phi_spin2')
1068 if(
'theta_spin1' in pos.names): pos.append_mapping(
'theta1',
lambda a:a,
'theta_spin1')
1069 if(
'theta_spin2' in pos.names): pos.append_mapping(
'theta2',
lambda a:a,
'theta_spin2')
1071 my_ifos=[
'h1',
'l1',
'v1']
1072 for ifo1,ifo2
in combinations(my_ifos,2):
1073 p1=ifo1+
'_cplx_snr_arg'
1074 p2=ifo2+
'_cplx_snr_arg'
1075 if p1
in pos.names
and p2
in pos.names:
1076 delta=np.mod(pos[p1].samples - pos[p2].samples + np.pi ,2.0*np.pi)-np.pi
1089 if (
'ra' in pos.names
or 'rightascension' in pos.names) \
1090 and (
'declination' in pos.names
or 'dec' in pos.names) \
1091 and 'time' in pos.names:
1092 from lal
import LIGOTimeGPS, TimeDelayFromEarthCenter
1093 from numpy
import array
1094 detMap = {
'H1':
'LHO_4k',
'H2':
'LHO_2k',
'L1':
'LLO_4k',
1095 'G1':
'GEO_600',
'V1':
'VIRGO',
'T1':
'TAMA_300'}
1096 if 'ra' in pos.names:
1098 else: ra_name=
'rightascension'
1099 if 'dec' in pos.names:
1101 else: dec_name=
'declination'
1103 my_ifos=[
'H1',
'L1',
'V1']
1108 location = lal.cached_detector_by_prefix[ifo].location
1109 ifo_times[ifo]=array(list(map(
lambda ra,dec,time: array([time[0]+TimeDelayFromEarthCenter(location,ra[0],dec[0],LIGOTimeGPS(float(time[0])))]), pos[ra_name].samples,pos[dec_name].samples,pos[
'time'].samples)))
1110 loc_end_time=
PosteriorOneDPDF(ifo.lower()+
'_end_time',ifo_times[ifo],injected_value=inj_time)
1111 pos.append(loc_end_time)
1112 for ifo1
in my_ifos:
1113 for ifo2
in my_ifos:
1114 if ifo1==ifo2:
continue
1115 delay_time=ifo_times[ifo2]-ifo_times[ifo1]
1120 time_delay=
PosteriorOneDPDF(ifo1.lower()+ifo2.lower()+
'_delay',delay_time,inj_delay)
1121 pos.append(time_delay)
1123 print(
'Warning: Could not import lal python bindings, check you ./configured with --enable-swig-python')
1124 print(
'This means I cannot calculate time delays')
1127 new_spin_params = [
'tilt1',
'tilt2',
'theta_jn',
'beta']
1128 if not set(new_spin_params).issubset(set(pos.names)):
1129 old_params = [
'f_ref',mchirp_name,
'eta',
'iota',
'a1',
'theta1',
'phi1']
1130 if 'a2' in pos.names: old_params += [
'a2',
'theta2',
'phi2']
1132 pos.append_mapping(new_spin_params, spin_angles, old_params)
1134 print(
"Warning: Cannot find spin parameters. Skipping spin angle calculations.")
1137 if 'a1' in pos.names:
1138 if 'tilt1' in pos.names:
1139 pos.append_mapping(
'a1z',
lambda a, tilt: a*np.cos(tilt), (
'a1',
'tilt1'))
1141 pos.append_mapping(
'a1z',
lambda x: x,
'a1')
1143 if injection
is not None:
1144 inj_az = injection.spin1z
1145 pos[
'a1z'].set_injval(inj_az)
1147 pos.append_mapping(
'a1',
lambda x: np.abs(x),
'a1z')
1149 if 'a2' in pos.names:
1150 if 'tilt2' in pos.names:
1151 pos.append_mapping(
'a2z',
lambda a, tilt: a*np.cos(tilt), (
'a2',
'tilt2'))
1153 pos.append_mapping(
'a2z',
lambda x: x,
'a2')
1155 if injection
is not None:
1156 inj_az = injection.spin2z
1157 pos[
'a2z'].set_injval(inj_az)
1159 pos.append_mapping(
'a2',
lambda x: np.abs(x),
'a2z')
1162 if (
'm1' in pos.names
and 'a1z' in pos.names)
and (
'm2' in pos.names
and 'a2z' in pos.names):
1163 pos.append_mapping(
'chi_eff',
lambda m1,a1z,m2,a2z: (m1*a1z + m2*a2z) / (m1 + m2), (
'm1',
'a1z',
'm2',
'a2z'))
1166 if (
'm1' in pos.names
and 'a1' in pos.names
and 'tilt1' in pos.names)
and (
'm2' in pos.names
and 'a2' in pos.names
and 'tilt2' in pos.names):
1167 pos.append_mapping(
'chi_tot',
lambda m1,a1,m2,a2: (m1*a1 + m2*a2) / (m1 + m2), (
'm1',
'a1',
'm2',
'a2'))
1170 if (
'm1' in pos.names
and 'a1' in pos.names
and 'tilt1' in pos.names)
and (
'm2' in pos.names
and 'a2' in pos.names
and 'tilt2' in pos.names):
1171 pos.append_mapping(
'chi_p', chi_precessing, [
'm1',
'a1',
'tilt1',
'm2',
'a2',
'tilt2'])
1174 if(
'distance' in pos.names):
1175 pos.append_mapping(
'redshift', calculate_redshift,
'distance')
1176 elif(
'dist' in pos.names):
1177 pos.append_mapping(
'redshift', calculate_redshift,
'dist')
1179 elif(
'distance_maxl' in pos.names):
1180 pos.append_mapping(
'redshift_maxldist', calculate_redshift,
'distance_maxl')
1183 if (
'm1' in pos.names)
and (
'redshift' in pos.names):
1184 pos.append_mapping(
'm1_source', source_mass, [
'm1',
'redshift'])
1186 if (
'm2' in pos.names)
and (
'redshift' in pos.names):
1187 pos.append_mapping(
'm2_source', source_mass, [
'm2',
'redshift'])
1189 if (
'mtotal' in pos.names)
and (
'redshift' in pos.names):
1190 pos.append_mapping(
'mtotal_source', source_mass, [
'mtotal',
'redshift'])
1192 if (
'mc' in pos.names)
and (
'redshift' in pos.names):
1193 pos.append_mapping(
'mc_source', source_mass, [
'mc',
'redshift'])
1196 if (
'm1' in pos.names)
and (
'redshift_maxldist' in pos.names):
1197 pos.append_mapping(
'm1_source_maxldist', source_mass, [
'm1',
'redshift_maxldist'])
1199 if (
'm2' in pos.names)
and (
'redshift_maxldist' in pos.names):
1200 pos.append_mapping(
'm2_source_maxldist', source_mass, [
'm2',
'redshift_maxldist'])
1202 if (
'mtotal' in pos.names)
and (
'redshift_maxldist' in pos.names):
1203 pos.append_mapping(
'mtotal_source_maxldist', source_mass, [
'mtotal',
'redshift_maxldist'])
1205 if (
'mc' in pos.names)
and (
'redshift_maxldist' in pos.names):
1206 pos.append_mapping(
'mc_source_maxldist', source_mass, [
'mc',
'redshift_maxldist'])
1209 if (
'log10lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1210 pos.append_mapping(
'log10lambda_a',
lambda z,nonGR_alpha,wl,dist:np.log10(
lambda_a(z, nonGR_alpha, 10**wl, dist)), [
'redshift',
'nonGR_alpha',
'log10lambda_eff',
'dist'])
1211 if (
'log10lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1212 pos.append_mapping(
'log10livamp',
lambda z,nonGR_alpha,wl,dist:np.log10(
amplitudeMeasure(z, nonGR_alpha, 10**wl, dist)), [
'redshift',
'nonGR_alpha',
'log10lambda_eff',
'dist'])
1213 if (
'lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1214 pos.append_mapping(
'lambda_a', lambda_a, [
'redshift',
'nonGR_alpha',
'log10lambda_eff',
'dist'])
1215 if (
'lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1216 pos.append_mapping(
'liv_amp', amplitudeMeasure, [
'redshift',
'nonGR_alpha',
'lambda_eff',
'dist'])
1219 new_tidal_params = [
'lam_tilde',
'dlam_tilde']
1220 old_tidal_params = [
'lambda1',
'lambda2',
'q']
1221 if 'lambda1' in pos.names
or 'lambda2' in pos.names:
1223 pos.append_mapping(new_tidal_params, symm_tidal_params, old_tidal_params)
1225 print(
"Warning: Cannot find tidal parameters. Skipping tidal calculations.")
1228 old_spin_params = [
'iota',
'theta1',
'phi1',
'theta2',
'phi2',
'beta']
1229 new_spin_params = [
'theta_jn',
'phi_jl',
'tilt1',
'tilt2',
'phi12',
'a1',
'a2',
'm1',
'm2',
'f_ref',
'phase']
1231 if pos[
'f_ref'].samples[0][0]==0.0:
1232 for name
in [
'flow',
'f_lower']:
1233 if name
in pos.names:
1234 new_spin_params = [
'theta_jn',
'phi_jl',
'tilt1',
'tilt2',
'phi12',
'a1',
'a2',
'm1',
'm2', name]
1236 print(
"No f_ref for SimInspiralTransformPrecessingNewInitialConditions().")
1237 if set(new_spin_params).issubset(set(pos.names))
and not set(old_spin_params).issubset(set(pos.names)):
1238 pos.append_mapping(old_spin_params, physical2radiationFrame, new_spin_params)
1241 if 'spin1' in pos.names:
1242 inj_a1 = inj_a2 =
None
1244 inj_a1 = sqrt(injection.spin1x*injection.spin1x + injection.spin1y*injection.spin1y + injection.spin1z*injection.spin1z)
1245 inj_a2 = sqrt(injection.spin2x*injection.spin2x + injection.spin2y*injection.spin2y + injection.spin2z*injection.spin2z)
1248 a1_samps = abs(pos[
'spin1'].samples)
1252 print(
"Warning: problem accessing spin1 values.")
1255 a2_samps = abs(pos[
'spin2'].samples)
1259 print(
"Warning: no spin2 values found.")
1265 if len(np.intersect1d(pos.names,tidalParams)) == 0:
1269 FinalSpinFits = [
'HBR2016',
'UIB2016',
'HL2016']
1270 FinalMassFits = [
'UIB2016',
'HL2016']
1271 LpeakFits = [
'UIB2016',
'HL2016']
1275 spin_angle_suffix =
''
1276 evol_suffix =
'_evol'
1278 if all([x
in pos.names
for x
in [
'tilt1_isco',
'tilt2_isco',
'phi12_isco']]):
1279 spin_angle_suffix =
'_isco'
1280 elif all([x
in pos.names
for x
in [
'tilt1',
'tilt2',
'phi12']]):
1281 evol_suffix =
'_nonevol'
1283 zero_vec = np.array([0.])
1285 tilt1_name =
'tilt1' + spin_angle_suffix
1286 tilt2_name =
'tilt2' + spin_angle_suffix
1287 phi12_name =
'phi12' + spin_angle_suffix
1288 mf_name =
'mf' + evol_suffix
1289 mf_source_name =
'mf_source' + evol_suffix
1290 mf_source_maxldist_name =
'mf_source_maxldist' + evol_suffix
1292 if (
'm1' in pos.names)
and (
'm2' in pos.names):
1293 if (
'a1' in pos.names)
and (
'a2' in pos.names):
1294 if (tilt1_name
in pos.names)
and (tilt2_name
in pos.names)
and (phi12_name
in pos.names):
1296 print(
"Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and 3D spins).")
1297 if evol_suffix ==
'_evol':
1298 print(
"Applying these to *_isco evolved spin samples and outputting *_evol samples.")
1300 print(
"Applying these to unevolved spin samples and outputting *_nonevol samples.")
1301 print(
"Final mass fits:", FinalMassFits,
"; Final spin fits:", FinalSpinFits,
"; Peak luminosity fits:", LpeakFits)
1303 pos.append_mapping(
'af' + evol_suffix,
lambda m1, m2, chi1, chi2, tilt1, tilt2, phi12:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12,
'af', FinalSpinFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name, phi12_name])
1304 except Exception
as e:
1305 print(
"Could not calculate %s. The error was: %s"%(
'af' + evol_suffix,
str(e)))
1307 pos.append_mapping(
'afz' + evol_suffix,
lambda m1, m2, chi1, chi2, tilt1, tilt2:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec,
'afz', FinalSpinFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name])
1308 except Exception
as e:
1309 print(
"Could not calculate %s. The error was: %s"%(
'afz' + evol_suffix,
str(e)))
1311 pos.append_mapping(mf_name,
lambda m1, m2, chi1, chi2, tilt1, tilt2:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec,
'Mf', FinalMassFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name])
1312 except Exception
as e:
1313 print(
"Could not calculate %s. The error was: %s"%(mf_name,
str(e)))
1315 pos.append_mapping(
'l_peak' + evol_suffix,
lambda m1, m2, chi1, chi2, tilt1, tilt2:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec,
'Lpeak', LpeakFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name])
1316 except Exception
as e:
1317 print(
"Could not calculate %s. The error was: %s"%(
'l_peak' + evol_suffix,
str(e)))
1318 elif (
'a1z' in pos.names)
and (
'a2z' in pos.names):
1320 print(
"Using averages for final mass, final spin, and peak luminosity (on masses and projected spin components).")
1321 print(
"Outputting *_evol samples because spin evolution is trivial in this nonprecessing case.")
1322 print(
"Final mass fits:", FinalMassFits,
"; Final spin fits:", FinalSpinFits,
"; Peak luminosity fits:", LpeakFits)
1325 pos.append_mapping(
'afz_evol',
lambda m1, m2, chi1, chi2:
bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec,
'afz', FinalSpinFits), [
'm1',
'm2',
'a1z',
'a2z'])
1326 except Exception
as e:
1327 print(
"Could not calculate afz_evol. The error was: %s"%(
str(e)))
1329 pos.append_mapping(
'af_evol',
lambda a: abs(a),
'afz_evol')
1330 except Exception
as e:
1331 print(
"Could not calculate af_evol. The error was: %s"%(
str(e)))
1333 pos.append_mapping(
'mf_evol',
lambda m1, m2, chi1, chi2:
bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec,
'Mf', FinalMassFits), [
'm1',
'm2',
'a1z',
'a2z'])
1334 except Exception
as e:
1335 print(
"Could not calculate mf_evol. The error was: %s"%(
str(e)))
1337 pos.append_mapping(
'l_peak_evol',
lambda m1, m2, chi1, chi2:
bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec,
'Lpeak', LpeakFits), [
'm1',
'm2',
'a1z',
'a2z'])
1338 except Exception
as e:
1339 print(
"Could not calculate l_peak_evol. The error was: %s"%(
str(e)))
1341 print(
"Could not calculate final parameters or Lpeak. Found samples for a1 and a2 but not for tilt angles and phi12 or spin components (a1z and a2z).")
1344 print(
"Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and zero spins).")
1345 print(
"Outputting *_evol samples because spin evolution is trivial in this nonspinning case.")
1346 print(
"Final mass fits:", FinalMassFits,
"; Final spin fits:", FinalSpinFits,
"; Peak luminosity fits:", LpeakFits)
1348 pos.append_mapping(
'afz_evol',
lambda m1, m2:
bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec,
'afz', FinalSpinFits), [
'm1',
'm2'])
1349 except Exception
as e:
1350 print(
"Could not calculate afz_evol. The error was: %s"%(
str(e)))
1352 pos.append_mapping(
'af_evol',
lambda a: abs(a),
'afz_evol')
1353 except Exception
as e:
1354 print(
"Could not calculate af_evol. The error was: %s"%(
str(e)))
1356 pos.append_mapping(
'mf_evol',
lambda m1, m2:
bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec,
'Mf', FinalMassFits), [
'm1',
'm2'])
1357 except Exception
as e:
1358 print(
"Could not calculate mf_evol. The error was: %s"%(
str(e)))
1360 pos.append_mapping(
'l_peak_evol',
lambda m1, m2:
bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec,
'Lpeak', LpeakFits), [
'm1',
'm2'])
1361 except Exception
as e:
1362 print(
"Could not calculate l_peak_evol. The error was: %s"%(
str(e)))
1365 if (mf_name
in pos.names)
and (
'redshift' in pos.names):
1367 pos.append_mapping(mf_source_name, source_mass, [mf_name,
'redshift'])
1368 except Exception
as e:
1369 print(
"Could not calculate final source frame mass. The error was: %s"%(
str(e)))
1371 if (mf_name
in pos.names)
and (
'redshift_maxldist' in pos.names):
1373 pos.append_mapping(mf_source_maxldist_name, source_mass, [mf_name,
'redshift_maxldist'])
1374 except Exception
as e:
1375 print(
"Could not calculate final source frame mass using maxldist redshift. The error was: %s"%(
str(e)))
1378 if (
'mtotal_source' in pos.names)
and (mf_source_name
in pos.names):
1380 pos.append_mapping(
'e_rad' + evol_suffix,
lambda mtot_s, mf_s: mtot_s-mf_s, [
'mtotal_source', mf_source_name])
1381 except Exception
as e:
1382 print(
"Could not calculate radiated energy. The error was: %s"%(
str(e)))
1384 if (
'mtotal_source_maxldist' in pos.names)
and (mf_source_maxldist_name
in pos.names):
1386 pos.append_mapping(
'e_rad_maxldist' + evol_suffix,
lambda mtot_s, mf_s: mtot_s-mf_s, [
'mtotal_source_maxldist', mf_source_maxldist_name])
1387 except Exception
as e:
1388 print(
"Could not calculate radiated energy using maxldist redshift results. The error was: %s"%(
str(e)))
1392 Returns a new Posterior object that contains a bootstrap
1400 samples.append(oneDpos.samples)
1402 samplesBlock=np.hstack(samples)
1404 bootstrapSamples=samplesBlock[:,:]
1405 Nsamp=bootstrapSamples.shape[0]
1407 rows=np.vsplit(samplesBlock,Nsamp)
1409 for i
in range(Nsamp):
1410 bootstrapSamples[i,:]=random.choice(rows)
1416 Remove samples from all OneDPosteriors.
1418 @param samples: The indexes of the samples to be removed.
1420 for name,pos
in self:
1421 pos.delete_samples_by_idx(samples)
1426 Remove samples containing NaN in request params.
1428 @param param_list: The parameters to be checked
for NaNs.
1430 nan_idxs = np.array(())
1432 for param
in param_list:
1433 nan_bool_array = np.isnan(self[param].samples).any(1)
1434 idxs = np.where(nan_bool_array ==
True)[0]
1436 nan_dict[param]=len(idxs)
1437 nan_idxs = np.append(nan_idxs, idxs)
1438 total_samps = len(self)
1439 nan_samps = len(nan_idxs)
1441 print(
"WARNING: removing %i of %i total samples due to NaNs:"% (nan_samps,total_samps))
1442 for param
in nan_dict.keys():
1443 print(
"\t%i NaNs in %s."%(nan_dict[param],param))
1449 """Returns the Deviance Information Criterion estimated from the
1450 posterior samples. The DIC is defined
as -2*(<log(L)> -
1451 Var(log(L))); smaller values are
"better."
1455 return -2.0*(np.mean(self.
_logL) - np.var(self.
_logL))
1460 Return the injected values.
1469 Return the trigger values .
1475 def _total_incl_restarts(self, samples):
1478 for x
in samples[1:]:
1482 total += samples[-1]
1487 Returns the number of cycles in the longest chain
1491 header=header.split()
1492 if not (
'cycle' in header):
1493 raise RuntimeError(
"Cannot compute number of cycles in longest chain")
1495 cycle_col=header.index(
'cycle')
1496 if 'chain' in header:
1497 chain_col=header.index(
'chain')
1498 chain_indexes=np.unique(samps[:,chain_col])
1500 for ind
in chain_indexes:
1501 chain_cycle_samps=samps[ samps[:,chain_col] == ind, cycle_col ]
1503 return int(max_cycle)
1510 Set the injected values of the parameters.
1512 @param injection: A SimInspiralTable row object containing the injected parameters.
1514 if injection
is not None:
1516 for name,onepos
in self:
1518 if new_injval
is not None:
1519 self[name].set_injval(new_injval)
1523 Set the trigger values of the parameters.
1525 @param triggers: A list of SnglInspiral objects.
1527 if triggers
is not None:
1529 for name,onepos
in self:
1531 if new_trigvals
is not None:
1532 self[name].set_trigvals(new_trigvals)
1535 def _getinjpar(self,paramname):
1537 Map parameter names to parameters in a SimInspiralTable .
1541 if paramname.lower().strip() == key.lower().strip():
1548 def _gettrigpar(self,paramname):
1550 Map parameter names to parameters in a SnglInspiral.
1555 if paramname.lower().strip() == key.lower().strip():
1560 except AttributeError:
1566 Container method . Returns posterior chain,one_d_pos, with name one_d_pos.name.
1572 Container method. Defined as number of samples.
1574 return len(self.
_logL)
1578 Container method. Returns iterator from self.
forward for use
in
1579 for (...)
in (...) etc.
1585 Generate a forward iterator (in sense of list of names) over Posterior
1586 with name,one_d_pos.
1589 while current_item < self.
dim:
1590 name=list(self.
_posterior.keys())[current_item]
1597 Generate a forward iterator over the list of samples corresponding to
1598 the data stored within the Posterior instance. These are returned as
1599 ParameterSamples instances.
1603 while current_item < len(self):
1604 sample_array=(np.squeeze(pos_array[current_item,:]))
1612 Return number of parameters.
1619 Return list of parameter names.
1622 for key,value
in self:
1623 nameslist.append(key)
1629 Return dict {paramName:paramMean} .
1632 for name,pos
in self:
1633 meansdict[name]=pos.mean
1639 Return dict {paramName:paramMedian} .
1642 for name,pos
in self:
1643 mediansdict[name]=pos.median
1649 Return dict {paramName:paramStandardDeviation} .
1652 for name,pos
in self:
1653 stdsdict[name]=pos.stdev
1659 Return qualified string containing the 'name' of the Posterior instance.
1666 Return qualified string containing a 'description' of the Posterior instance.
1670 def append(self,one_d_posterior):
1672 Container method. Add a new OneDParameter to the Posterior instance.
1674 self._posterior[one_d_posterior.name]=one_d_posterior
1677 def pop(self,param_name):
1679 Container method. Remove PosteriorOneDPDF from the Posterior instance.
1685 Append posteriors pos1,pos2,...=func(post_names)
1690 if isinstance(post_names, str):
1691 old_post = copy.deepcopy(self[post_names])
1692 old_inj = old_post.injval
1693 old_trigs = old_post.trigvals
1695 new_inj =
func(old_inj)
1700 for IFO
in old_trigs.keys():
1701 new_trigs[IFO] =
func(old_trigs[IFO])
1705 samps =
func(old_post.samples)
1706 new_post =
PosteriorOneDPDF(new_param_names, samps, injected_value=new_inj, trigger_values=new_trigs)
1707 if new_post.samples.ndim == 0:
1708 print(
"WARNING: No posterior calculated for %s ..." % new_post.name)
1713 old_posts = [copy.deepcopy(self[post_name])
for post_name
in post_names]
1714 old_injs = [post.injval
for post
in old_posts]
1715 old_trigs = [post.trigvals
for post
in old_posts]
1716 samps =
func(*[post.samples
for post
in old_posts])
1718 if isinstance(new_param_names, str):
1719 if None not in old_injs:
1720 inj =
func(*old_injs)
1723 if None not in old_trigs:
1725 for IFO
in old_trigs[0].keys():
1726 oldvals = [param[IFO]
for param
in old_trigs]
1727 new_trigs[IFO] =
func(*oldvals)
1730 new_post =
PosteriorOneDPDF(new_param_names, samps, injected_value=inj, trigger_values=new_trigs)
1734 if None not in old_injs:
1735 injs =
func(*old_injs)
1737 injs = [
None for name
in new_param_names]
1738 if None not in old_trigs:
1739 new_trigs = [{}
for param
in range(len(new_param_names))]
1740 for IFO
in old_trigs[0].keys():
1741 oldvals = [param[IFO]
for param
in old_trigs]
1742 newvals =
func(*oldvals)
1743 for param,newval
in enumerate(newvals):
1744 new_trigs[param][IFO] = newval
1746 new_trigs = [
None for param
in range(len(new_param_names))]
1747 if not samps: return()
1748 new_posts = [
PosteriorOneDPDF(new_param_name,samp,injected_value=inj,trigger_values=new_trigs)
for (new_param_name,samp,inj,new_trigs)
in zip(new_param_names,samps,injs,new_trigs)]
1749 for post
in new_posts:
1750 if post.samples.ndim == 0:
1751 print(
"WARNING: No posterior calculated for %s ..." % post.name)
1756 def _average_posterior(self, samples, post_name):
1758 Returns the average value of the 'post_name' column of the
1762 for samp
in samples:
1763 ap = ap + samp[post_name]
1764 return ap / len(samples)
1766 def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias = 0):
1768 Returns the average value of the posterior assuming that the
1769 'logl_name' column contains log(L)
and the
'prior_name' column
1770 contains the prior (un-logged).
1773 for samp
in samples:
1774 ap += np.exp(samp[logl_name]-log_bias)*samp[prior_name]
1775 return ap / len(samples)
1777 def _bias_factor(self):
1779 Returns a sensible bias factor for the evidence so that
1780 integrals are representable
as doubles.
1782 return np.mean(self.
_logL)
1786 Returns the log of the direct-integration evidence for the
1789 allowed_coord_names=["spin1",
"spin2",
"a1",
"phi1",
"theta1",
"a2",
"phi2",
"theta2",
1790 "iota",
"psi",
"ra",
"dec",
1791 "phi_orb",
"phi0",
"dist",
"time",
"mc",
"mchirp",
"chirpmass",
"q"]
1793 header=header.split()
1794 coord_names=[name
for name
in allowed_coord_names
if name
in header]
1795 coordinatized_samples=[
PosteriorSample(row, header, coord_names)
for row
in samples]
1796 tree=
KDTree(coordinatized_samples)
1798 if "prior" in header
and "logl" in header:
1801 elif "prior" in header
and "likelihood" in header:
1804 elif "post" in header:
1805 return np.log(tree.integrate(
lambda samps: self.
_average_posterior(samps,
"post"), boxing))
1806 elif "posterior" in header:
1807 return np.log(tree.integrate(
lambda samps: self.
_average_posterior(samps,
"posterior"), boxing))
1809 raise RuntimeError(
"could not find 'post', 'posterior', 'logl' and 'prior', or 'likelihood' and 'prior' columns in output to compute direct integration evidence")
1812 """Returns an approximation to the log(evidence) obtained by
1813 fitting an ellipse around the highest-posterior samples and
1814 performing the harmonic mean approximation within the ellipse.
1815 Because the ellipse should be well-sampled, this provides a
1816 better approximation to the evidence than the full-domain HM.
"""
1817 allowed_coord_names=["spin1",
"spin2",
"a1",
"phi1",
"theta1",
"a2",
"phi2",
"theta2",
1818 "iota",
"psi",
"ra",
"dec",
1819 "phi_orb",
"phi0",
"dist",
"time",
"mc",
"mchirp",
"chirpmass",
"q"]
1821 header=header.split()
1823 n=
int(0.05*samples.shape[0])
1827 coord_names=[name
for name
in allowed_coord_names
if name
in header]
1828 indexes=np.argsort(self.
_logL[:,0])
1830 my_samples=samples[indexes[-n:], :]
1831 my_samples=np.array([
PosteriorSample(sample,header,coord_names).coord()
for sample
in my_samples])
1833 mu=np.mean(my_samples, axis=0)
1834 cov=np.cov(my_samples, rowvar=0)
1837 for mysample
in my_samples:
1838 d=np.dot(mysample-mu, np.linalg.solve(cov, mysample-mu))
1846 for sample,logl
in zip(samples, self.
_logL):
1848 d=np.dot(coord-mu, np.linalg.solve(cov, coord-mu))
1851 ellipse_logl.append(logl)
1852 ellipse_samples.append(sample)
1854 if len(ellipse_samples) > 5*n:
1855 print(
'WARNING: ellpise evidence region encloses significantly more samples than %d'%n)
1857 ellipse_samples=np.array(ellipse_samples)
1858 ellipse_logl=np.array(ellipse_logl)
1860 ndim = len(coord_names)
1861 ellipse_volume=np.pi**(ndim/2.0)*d0**(ndim/2.0)/special.gamma(ndim/2.0+1)*np.sqrt(np.linalg.det(cov))
1864 prior_index=header.index(
'prior')
1865 pmu=np.mean(ellipse_samples[:,prior_index])
1866 pstd=np.std(ellipse_samples[:,prior_index])
1868 print(
'WARNING: prior variation greater than 100\\% over elliptical volume.')
1869 approx_prior_integral=ellipse_volume*pmu
1872 approx_prior_integral=ellipse_volume
1874 ll_bias=np.mean(ellipse_logl)
1875 ellipse_logl = ellipse_logl - ll_bias
1877 return np.log(approx_prior_integral) - np.log(np.mean(1.0/np.exp(ellipse_logl))) + ll_bias
1881 Returns the log of the harmonic mean evidence for the set of
1885 return bf + np.log(1/np.mean(1/np.exp(self.
_logL-bf)))
1889 Find the sample with maximum likelihood probability. Returns value
1890 of likelihood
and index of sample .
1892 logl_vals=self._logL
1894 max_logl=logl_vals[0]
1895 for i
in range(len(logl_vals)):
1896 if logl_vals[i] > max_logl:
1897 max_logl=logl_vals[i]
1899 return max_logl,max_i
1903 Find the sample with maximum a posteriori probability. Returns value
1904 of posterior
and index of sample .
1906 logl_vals=self._logL
1907 if self.
_logP is not None:
1908 logp_vals=self.
_logP
1913 max_pos=logl_vals[0]+logp_vals[0]
1914 for i
in range(len(logl_vals)):
1915 if logl_vals[i]+logp_vals[i] > max_pos:
1916 max_pos=logl_vals[i]+logp_vals[i]
1918 return max_pos,max_i
1920 def _print_table_row(self,name,entries):
1922 Print a html table row representation of
1924 name:item1,item2,item3,...
1927 row_str='<tr><td>%s</td>'%name
1928 for entry
in entries:
1929 row_str+=
'<td>%s</td>'%entry
1936 Return the maximum likelihood probability and the corresponding
1941 for param_name
in self.
names:
1942 maxLvals[param_name]=self.
_posterior[param_name].samples[max_i][0]
1944 return (max_logl,maxLvals)
1949 Return the maximum a posteriori probability and the corresponding
1954 for param_name
in self.
names:
1955 maxPvals[param_name]=self.
_posterior[param_name].samples[max_i][0]
1957 return (max_pos,maxPvals)
1962 Return an (M,N) numpy.array of posterior samples; M = len(self);
1967 for param_name,one_pos
in self:
1968 column=np.array(one_pos.samples)
1969 header_string+=param_name+
'\t'
1970 posterior_table.append(column)
1971 posterior_table=tuple(posterior_table)
1972 return np.column_stack(posterior_table),header_string
1976 Dump the posterior table to a file in the
'common format'.
1978 posterior_table, header_string = self.samples()
1984 header=header_string,
1989 Returns an approximation to the Gelman-Rubin statistic (see
1990 Gelman, A. and Rubin, D. B., Statistical Science, Vol 7,
1991 No. 4, pp. 457--511 (1992))
for the parameter given, accurate
1992 as the number of samples
in each chain goes to infinity. The
1993 posterior samples must have a column named
'chain' so that the
1994 different chains can be separated.
1996 from numpy
import seterr
as np_seterr
1997 np_seterr(all=
'raise')
1999 if "chain" in self.
names:
2000 chains=np.unique(self[
"chain"].samples)
2001 chain_index=self.
names.index(
"chain")
2002 param_index=self.
names.index(pname)
2004 chainData=[data[ data[:,chain_index] == chain, param_index]
for chain
in chains]
2005 allData=np.concatenate(chainData)
2006 chainMeans=[np.mean(data)
for data
in chainData]
2007 chainVars=[np.var(data)
for data
in chainData]
2008 BoverN=np.var(chainMeans)
2009 W=np.mean(chainVars)
2010 sigmaHat2=W + BoverN
2012 VHat=sigmaHat2 + BoverN/m
2016 print(
"Error when computer Gelman-Rubin R statistic for %s. This may be a fixed parameter"%pname)
2020 raise RuntimeError(
'could not find necessary column header "chain" in posterior samples')
2023 """Returns a healpix map in the pixel ordering that represents the
2024 posterior density (per square degree) on the sky. The pixels
2025 will be chosen to have at least the given resolution (in
2032 while hp.nside2resol(nside, arcmin=
True) > resol*60.0/2.0:
2035 ras = self[
'ra'].samples.squeeze()
2036 decs = self[
'dec'].samples.squeeze()
2039 thetas = np.pi/2.0 - decs
2042 inds = hp.ang2pix(nside, thetas, phis, nest=
False)
2043 counts = np.bincount(inds)
2044 if counts.shape[0] < hp.nside2npix(nside):
2045 counts = np.concatenate((counts, np.zeros(hp.nside2npix(nside) - counts.shape[0])))
2048 hpmap = hp.sphtfunc.smoothing(counts, sigma=resol*np.pi/180.0)
2050 hpmap = hpmap / (np.sum(hpmap)*hp.nside2pixarea(nside, degrees=
True))
2053 hpmap = hp.reorder(hpmap, r2n=
True)
2059 Define a string representation of the Posterior class ; returns
2060 a html formatted table of various properties of posteriors.
2062 return_val='<table border="1" id="statstable"><tr><th/>'
2063 column_names=[
'maP',
'maxL',
'stdev',
'mean',
'median',
'stacc',
'injection value']
2066 IFOs = [trig.ifo
for trig
in self.
_triggers]
2068 column_names.append(IFO+
' trigger values')
2070 for column_name
in column_names:
2071 return_val+=
'<th>%s</th>'%column_name
2075 for name,oned_pos
in self:
2078 maxL=oned_pos.samples[max_i][0]
2080 maP=oned_pos.samples[max_j][0]
2081 mean=
str(oned_pos.mean)
2082 stdev=
str(oned_pos.stdev)
2083 median=
str(np.squeeze(oned_pos.median))
2084 stacc=
str(oned_pos.stacc)
2085 injval=
str(oned_pos.injval)
2086 trigvals=oned_pos.trigvals
2088 row = [maP,maxL,stdev,mean,median,stacc,injval]
2092 row.append(
str(trigvals[IFO]))
2097 return_val+=
'</table>'
2100 parser.feed(return_val)
2104 rough_string = tostring(elem,
'utf-8')
2105 reparsed = minidom.parseString(rough_string)
2106 return_val=reparsed.toprettyxml(indent=
" ")
2107 return return_val[len(
'<?xml version="1.0" ?>')+1:]
2113 def _inj_m1(self,inj):
2115 Return the mapping of (mchirp,eta)->m1; m1>m2 i.e. return the greater of the mass
2116 components (m1) calculated
from the chirp mass
and the symmetric mass ratio.
2118 @param inj: a custom type
with the attributes
'mchirp' and 'eta'.
2120 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2123 def _inj_m2(self,inj):
2125 Return the mapping of (mchirp,eta)->m2; m1>m2 i.e. return the lesser of the mass
2126 components (m2) calculated
from the chirp mass
and the symmetric mass ratio.
2128 @param inj: a custom type
with the attributes
'mchirp' and 'eta'.
2130 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2133 def _inj_q(self,inj):
2135 Return the mapping of (mchirp,eta)->q; m1>m2 i.e. return the mass ratio q=m2/m1.
2137 @param inj: a custom type
with the attributes
'mchirp' and 'eta'.
2139 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2142 def _inj_longitude(self,inj):
2144 Return the mapping of longitude found in inj to the interval [0,2*pi).
2146 @param inj: a custom type
with the attribute
'longitude'.
2148 if inj.longitude>2*pi_constant
or inj.longitude<0.0:
2149 maplong=2*pi_constant*(((float(inj.longitude))/(2*pi_constant)) - floor(((float(inj.longitude))/(2*pi_constant))))
2150 print(
"Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(
str(inj.longitude),
str(maplong)))
2153 return inj.longitude
2155 def _inj_spins(self, inj, frame='OrbitalL'):
2157 from lalsimulation
import SimInspiralTransformPrecessingWvf2PE
2166 axis = lalsim.SimInspiralGetFrameAxisFromString(frame)
2173 iota=inj.inclination
2174 m1, m2 = inj.mass1, inj.mass2
2175 mc, eta = inj.mchirp, inj.eta
2177 a1, theta1, phi1 =
cart2sph(s1x, s1y, s1z)
2178 a2, theta2, phi2 =
cart2sph(s2x, s2y, s2z)
2180 spins = {
'a1':a1,
'theta1':theta1,
'phi1':phi1,
2181 'a2':a2,
'theta2':theta2,
'phi2':phi2,
2184 if inj.spin1x == inj.spin1y == inj.spin2x == inj.spin2y == 0.:
2185 spins[
'a1z'] = inj.spin1z
2186 spins[
'a2z'] = inj.spin2z
2189 S1 = np.hstack((s1x, s1y, s1z))
2190 S2 = np.hstack((s2x, s2y, s2z))
2192 zhat = np.array([0., 0., 1.])
2193 aligned_comp_spin1 =
array_dot(S1, zhat)
2194 aligned_comp_spin2 =
array_dot(S2, zhat)
2195 chi = aligned_comp_spin1 + aligned_comp_spin2 + \
2196 np.sqrt(1. - 4.*eta) * (aligned_comp_spin1 - aligned_comp_spin2)
2202 spins[
'beta'] = beta
2203 spins[
'spinchi'] = chi
2207 if not frame==
'OrbitalL':
2208 print(
"I cannot calculate the injected values of the spin angles unless frame is OrbitalL. Skipping...")
2211 theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2=SimInspiralTransformPrecessingWvf2PE(inj.inclination,inj.spin1x, inj.spin1y, inj.spin1z,inj.spin2x, inj.spin2y, inj.spin2z, m1, m2, f_ref, inj.coa_phase)
2212 spins[
'theta_jn']=theta_jn
2213 spins[
'phi12']=phi12
2214 spins[
'tilt1']=tilt1
2215 spins[
'tilt2']=tilt2
2216 spins[
'phi_jl']=phi_jl
2221 iota_back,a1x_back,a1y_back,a1z_back,a2x_back,a2y_back,a2z_back = \
2222 lalsim.SimInspiralTransformPrecessingNewInitialConditions(theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2,m1*lal.MSUN_SI,m2*lal.MSUN_SI,f_ref,inj.coa_phase)
2223 print(a1x_back,a1y_back,a1z_back)
2224 print(a2x_back,a2y_back,a2z_back)
2232 Data structure for a table of posterior samples .
2234 def __init__(self,commonResultsFormatData,SimBurstTableEntry=None,injFref=None,SnglBurstList=None,name=None,description=None):
2238 @param commonResultsFormatData: A 2D array containing the posterior
2239 samples
and related data. The samples chains form the columns.
2240 @param SimBurstTableEntry: A ligolw.lscstables.SimBurst row containing the injected values.
2241 @param SnglBurstList: A list of SnglBurst objects containing the triggers.
2242 @param injFref: reference frequency
in injection
2243 @param name: optional name
for this Posterior
2244 @param description: optional description
for this Posterior
2246 common_output_table_header,common_output_table_raw =commonResultsFormatData
2254 common_output_table_header=[i.lower()
for i
in common_output_table_header]
2258 'f0':
lambda inj:inj.frequency,
2259 'frequency':
lambda inj:inj.frequency,
2260 'centre_frequency':
lambda inj:inj.frequency,
2261 'quality':
lambda inj:inj.q,
2262 'hrss':
lambda inj:inj.hrss,
2263 'loghrss':
lambda inj:np.log(inj.hrss),
2264 'polar_angle':
lambda inj:inj.pol_ellipse_angle,
2265 'pol_ellipse_angle':
lambda inj:inj.pol_ellipse_angle,
2266 'pol_ellipse_e':
lambda inj:inj.pol_ellipse_e,
2267 'alpha':
lambda inj:inj.pol_ellipse_angle,
2268 'polar_eccentricity':
lambda inj:inj.pol_ellipse_e,
2269 'eccentricity':
lambda inj:inj.pol_ellipse_e,
2270 'time':
lambda inj:float(
get_end(inj)),
2271 'end_time':
lambda inj:float(
get_end(inj)),
2276 'dec':
lambda inj:inj.dec,
2277 'declination':
lambda inj:inj.dec,
2278 'lat':
lambda inj:inj.dec,
2279 'latitude':
lambda inj:inj.dec,
2280 'psi':
lambda inj: np.mod(inj.psi, np.pi),
2282 'polarisation':
lambda inj:inj.psi,
2283 'polarization':
lambda inj:inj.psi,
2284 'duration':
lambda inj:inj.duration,
2290 for one_d_posterior_samples,param_name
in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
2297 if loglalias
in common_output_table_header:
2301 print(
"No '%s' column in input table!"%loglalias)
2306 raise RuntimeError(
"No likelihood/posterior values found!")
2310 if logpalias
in common_output_table_header:
2314 print(
"No '%s' column in input table!"%logpalias)
2316 if not 'log' in logpalias:
2318 if name
is not None:
2321 if description
is not None:
2329 def _inj_longitude(self,inj):
2331 Return the mapping of longitude found in inj to the interval [0,2*pi).
2333 @param inj: a custom type
with the attribute
'longitude'.
2335 if inj.ra>2*pi_constant
or inj.ra<0.0:
2336 maplong=2*pi_constant*(((float(inj.ra)/(2*pi_constant)) - floor(((float(inj.ra))/(2*pi_constant)))))
2337 print(
"Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(
str(inj.ra),
str(maplong)))
2348 Construct a kD-tree from a sequence of objects. Each object
2349 should
return its coordinates using obj.coord().
2351 if len(objects) == 0:
2352 raise RuntimeError(
"cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2353 elif len(objects) == 1:
2368 sorted_objects=sorted(self.
_objects, key=
lambda obj: (obj.coord())[longest_dim])
2369 N = len(sorted_objects)
2370 bound=0.5*(sorted_objects[
int(N/2)].coord()[longest_dim] + sorted_objects[
int(N/2)-1].coord()[longest_dim])
2371 low = [obj
for obj
in self.
_objects if obj.coord()[longest_dim] < bound]
2372 high = [obj
for obj
in self.
_objects if obj.coord()[longest_dim] >= bound]
2376 low = [obj
for obj
in self.
_objects if obj.coord()[longest_dim]==bound]
2377 high = [obj
for obj
in self.
_objects if obj.coord()[longest_dim] > bound]
2381 def _same_coords(self, objects):
2383 True if and only
if all the given objects have the same
2386 if len(objects) <= 1:
2388 coords = [obj.coord()
for obj
in objects]
2390 for ci
in coords[1:]:
2391 if not np.all(ci == c0):
2395 def _bounds_of_objects(self):
2397 Bounds of the objects contained in the tree.
2402 low=np.minimum(low,obj.coord())
2403 high=np.maximum(high,obj.coord())
2406 def _longest_dimension(self):
2408 Longest dimension of the tree bounds.
2412 return np.argmax(widths)
2416 Returns the objects in the tree.
2422 Iterator over all the objects contained in the tree.
2428 Returns the left tree.
2434 Returns the right tree.
2440 Returns the dimension along which this level of the kD-tree
2447 Returns the coordinates of the lower-left and upper-right
2448 corners of the bounding box
for this tree: low_left, up_right
2454 Returns the volume of the bounding box of the tree.
2458 for l,h
in zip(low,high):
2464 Returns the integral of f(objects) over the tree. The
2465 optional boxing parameter determines how deep to descend into
2466 the tree before computing f.
2474 return tree.volume()*f(tree._objects)
2479 return self.
operate(x,y,boxing=boxing)
2481 def operate(self,f,g,boxing=64):
2483 Operates on tree nodes exceeding boxing parameter depth.
2494 A kD-tree suitable for splitting parameter spaces
and counting hypervolumes.
2495 Is modified
from the KDTree
class so that bounding boxes are stored. This means that
2496 there are no longer gaps
in the hypervolume once the samples have been split into groups.
2498 def __init__(self, objects,boundingbox,dims=0):
2500 Construct a kD-tree from a sequence of objects. Each object
2501 should
return its coordinates using obj.coord().
2502 the obj should also store the bounds of the hypervolume its found
in.
2503 for non-leaf objects we need the name of the dimension split
and value at split.
2508 if len(objects) == 0:
2509 raise RuntimeError(
"cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2510 elif len(objects) == 1:
2519 sorted_objects=sorted(self.
_objects, key=
lambda obj: (obj.coord())[split_dim])
2520 N = len(sorted_objects)
2521 self.
_split_value = 0.5*(sorted_objects[
int(N/2)].coord()[split_dim] + sorted_objects[
int(N/2)-1].coord()[split_dim])
2523 low = [obj
for obj
in self.
_objects if obj.coord()[split_dim] < bound]
2524 high = [obj
for obj
in self.
_objects if obj.coord()[split_dim] >= bound]
2528 low = [obj
for obj
in self.
_objects if obj.coord()[split_dim] == bound]
2529 high = [obj
for obj
in self.
_objects if obj.coord()[split_dim] > bound]
2530 leftBoundingbox = []
2531 rightBoundingbox = []
2533 leftBoundingbox.append(list(i))
2534 rightBoundingbox.append(list(i))
2535 leftBoundingbox[1][split_dim] = bound
2536 rightBoundingbox[0][split_dim] = bound
2540 if (split_dim < (len(self.
_objects[0].coord()) - 1)):
2541 child_dim = split_dim + 1
2546 if (len(high) != 0):
2551 def _same_coords(self, objects):
2553 True if and only
if all the given objects have the same
2556 if len(objects) <= 1:
2558 coords = [obj.coord()
for obj
in objects]
2560 for ci
in coords[1:]:
2561 if not np.all(ci == c0):
2567 Returns the objects in the tree.
2573 Iterator over all the objects contained in the tree.
2579 Returns the left tree.
2585 Returns the right tree.
2591 Returns the dimension along which this level of the kD-tree
2594 return self._split_dim
2598 Returns the coordinates of the lower-left and upper-right
2599 corners of the bounding box
for this tree: low_left, up_right
2605 Returns the volume of the bounding box of the tree.
2609 for l,h
in zip(low,high):
2615 Returns the integral of f(objects) over the tree. The
2616 optional boxing parameter determines how deep to descend into
2617 the tree before computing f.
2620 return tree.volume()*f(tree._objects)
2625 return self.
operate(x,y,boxing=boxing)
2627 def operate(self,f,g,boxing=64):
2629 Operates on tree nodes exceeding boxing parameter depth.
2636 def search(self,coordinates,boxing = 64):
2638 takes a set of coordinates and searches down through the tree untill it gets
2639 to a box
with less than
'boxing' objects
in it
and returns the box bounds,
2640 number of objects
in the box,
and the weighting.
2649 def fillNewTree(self,boxing = 64, isArea = False):
2651 copies tree structure, but with KDSkeleton
as the new nodes.
2655 newNode =
KDSkeleton(self.
bounds(), left_child =
None , right_child =
None)
2673 object to store the structure of a kd tree
2676 def __init__(self, bounding_box, left_child = None, right_child = None):
2679 self.
_left = left_child
2680 self.
_right = right_child
2693 def search(self,coordinates):
2695 takes a set of coordinates and searches down through the tree untill it gets
2696 to a box
with less than
'boxing' objects
in it
and returns the box bounds,
2697 number of objects
in the box,
and the weighting.
2699 if self.
_left is None:
2717 A single parameter sample object, suitable for inclusion
in a
2721 def __init__(self, sample_array, headers, coord_names):
2723 Given the sample array, headers for the values,
and the names
2724 of the desired coordinates, construct a parameter sample
2729 if not (len(sample_array) == len(self.
_headers)):
2730 print(
"Header length = ", len(self.
_headers))
2731 print(
"Sample length = ", len(sample_array))
2732 raise RuntimeError(
"parameter and sample lengths do not agree")
2738 Return the element with the corresponding name.
2745 raise KeyError(
"key not found in posterior sample: %s"%key)
2749 Return the coordinates for the parameter sample.
2759 Return analytic likelihood values.
2762 def __init__(self, covariance_matrix_files, mean_vector_files):
2764 Prepare analytic likelihood for the given parameters.
2767 if isinstance(covariance_matrix_files, str):
2768 covariance_matrix_files = [covariance_matrix_files]
2769 if isinstance(mean_vector_files, str):
2770 mean_vector_files = [mean_vector_files]
2772 covarianceMatrices = [np.loadtxt(csvFile, delimiter=
',')
for csvFile
in covariance_matrix_files]
2773 num_matrices = len(covarianceMatrices)
2775 if num_matrices != len(mean_vector_files):
2776 raise RuntimeError(
'Must give a mean vector list for every covariance matrix')
2778 param_line = open(mean_vector_files[0]).readline()
2779 self.
_params = [param.strip()
for param
in param_line.split(
',')]
2781 converter=
lambda x: eval(x.replace(
'pi',
'%.32f'%pi_constant))
2783 for i
in range(num_matrices):
2784 CM = covarianceMatrices[i]
2785 vecFile = mean_vector_files[i]
2787 param_line = open(vecFile).readline()
2788 params = [param.strip()
for param
in param_line.split(
',')]
2789 if set(params)!=set(self.
_params):
2790 raise RuntimeError(
'Parameters do not agree between mean vector files.')
2792 sigmas = dict(zip(params,np.sqrt(CM.diagonal())))
2793 colNums = range(len(params))
2794 converters = dict(zip(colNums,[converter
for i
in colNums]))
2795 meanVectors = np.loadtxt(vecFile, delimiter=
',', skiprows=1, converters=converters)
2797 for vec
in meanVectors:
2798 means = dict(zip(params,vec))
2799 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param]))
for param
in params]
2800 self.
_modes.append(dict(mode))
2802 means = dict(zip(params,meanVectors))
2803 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param]))
for param
in params]
2804 self.
_modes.append(dict(mode))
2807 def pdf(self, param):
2809 Return PDF function for parameter.
2816 def cdf(self, param):
2818 Return PDF function for parameter.
2828 Return list of parameter names described by analytic likelihood function.
2840 A base class for representing web content using ElementTree .
2844 self.
_html=Element(tag)
2846 for attribname,attribvalue
in attrib.items():
2847 self.
_html.attrib[attribname]=attribvalue
2849 parent.append(self.
_html)
2853 Return a pretty-printed XML string of the htmlPage.
2856 rough_string = tostring(elem)
2857 reparsed = minidom.parseString(rough_string)
2858 return reparsed.toprettyxml(indent=
" ")
2869 def p(self,pstring):
2909 def a(self,url,linktext):
2911 Ea.attrib[
'href']=url
2918 if idtable
is not None:
2921 Etab=Element(
'table',args)
2928 Insert row in table tab.
2929 If given, label used
as id
for the table tag
2933 if label
is not None:
2934 Etr.attrib[
'id']=label
2938 def insert_td(self,row,td,label=None,legend=None):
2940 Insert cell td into row row.
2941 Sets id to label, if given
2950 td=minidom.parseString(td)
2951 td=td.toprettyxml(indent=
" ")
2953 if label
is not None:
2954 Etd.attrib[
'id']=label
2955 if legend
is not None:
2956 legend.a(
'#%s'%label,
'%s'%label)
2968 A concrete class for generating an XHTML(1) document. Inherits
from htmlChunk.
2970 def __init__(self,title=None,css=None,javascript=None,toc=False):
2971 htmlChunk.__init__(self,
'html',attrib={
'xmlns':
"http://www.w3.org/1999/xhtml"})
2972 self.
doctype_str=
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
2975 Etitle=SubElement(self.
_head,
'title')
2979 if title
is not None:
2980 Etitle.text=
str(title)
2983 if javascript
is not None:
2985 self.
_jscript.attrib[
'type']=
"text/javascript"
2988 self.
_css=SubElement(self.
_head,
'style')
2989 self.
_css.attrib[
'type']=
"text/css"
2998 if legend
is not None:
2999 legend.a(
'#%s'%section_name,
'%s'%section_name)
3005 Create a section embedded into a table that can be collapsed with a button
3007 newSection=htmlCollapseSection(section_name,table_id=innertable_id,start_closed=start_closed)
3009 if legend
is not None:
3010 legend.a(
'#%s'%section_name,
'%s'%section_name)
3016 Create a section which is not appended to the body of html, but to the parent Element
3018 newSection=htmlSection(section_name,htmlElement=parent,blank=True)
3019 parent.append(newSection._html)
3034 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3036 def __init__(self,section_name,htmlElement=None,blank=False):
3038 htmlChunk.__init__(self,
'div',attrib={
'class':
'ppsection',
'id':section_name},parent=htmlElement)
3040 htmlChunk.__init__(self,
'div',attrib={
'style':
'"color:#000000"',
'id':section_name},parent=htmlElement)
3041 self.
h2(section_name)
3046 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3049 def __init__(self,section_name,htmlElement=None,table_id=None,start_closed=True):
3050 htmlChunk.__init__(self,
'div',attrib={
'class':
'ppsection',
'id':section_name},parent=htmlElement)
3052 if table_id
is None:
3053 table_id=random.randint(1,10000000)
3058 k=random.randint(1,10000000)
3060 st=
'<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[+] Expand" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self.
_html.attrib[
'id'],k,self.
table_id,k)
3061 string=string.replace(
'table ',
'table style="display: none" ')
3063 st=
'<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[-] Collapse" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self.
_html.attrib[
'id'],k,self.
table_id,k)
3064 string=string.replace(
'table ',
'table style="display: table" ')
3066 st+=
'</td></tr></table>'
3067 htmlChunk.write(self,st)
3074def _calculate_confidence_levels(hist, points, injBin, NSamples):
3076 Returns (injectionconf, toppoints), where injectionconf is the
3077 confidence level of the injection, contained
in the injBin
and
3078 toppoints
is a list of (pointx, pointy, ptindex, frac),
with
3079 pointx
and pointy the (x,y) coordinates of the corresponding
3080 element of the points array, ptindex the index of the point
in the
3081 array,
and frac the cumulative fraction of points
with larger
3082 posterior probability.
3084 The hist argument should be a one-dimensional array that contains
3085 counts of sample points
in each bin.
3087 The points argument should be a 2-D array storing the sky location
3088 associated
with each bin; the first index runs
from 0 to NBins -
3089 1,
while the second index runs
from 0 to 1.
3091 The injBin argument gives the bin index
in which the injection
is
3094 The NSamples argument
is used to normalize the histogram counts
3095 into fractional probability.
3098 histIndices=np.argsort(hist)[::-1]
3103 for i
in histIndices:
3104 frac+=float(hist[i])/float(NSamples)
3105 toppoints.append((points[i,0], points[i,1], i, frac))
3108 print(
'Injection found at confidence level %g'%injConf)
3110 return (injConf, toppoints)
3112def _greedy_bin(greedyHist,greedyPoints,injection_bin_index,bin_size,Nsamples,confidence_levels):
3114 An interal function representing the common, dimensionally-independent part of the
3115 greedy binning algorithms.
3119 (injectionconfidence,toppoints)=_calculate_confidence_levels(greedyHist, greedyPoints, injection_bin_index, Nsamples)
3123 confidence_levels.sort()
3125 toppoints=np.array(toppoints)
3126 for printcl
in confidence_levels:
3127 nBins=np.searchsorted(toppoints[:,3], printcl) + 1
3129 if nBins >= len(toppoints):
3130 nBins=len(toppoints)-1
3132 accl=toppoints[nBins-1,3]
3134 reses[printcl]=nBins*bin_size
3138 if injection_bin_index
and injectionconfidence:
3139 i=list(np.nonzero(np.asarray(toppoints)[:,2]==injection_bin_index))[0]
3140 injection_area=bin_size*(i+1)
3142 return toppoints,injectionconfidence,reses,injection_area
3147 return - (cos(pi_constant/2. - bounds[0][1])-cos(pi_constant/2. - bounds[1][1]))*(bounds[1][0] - bounds[0][0])
3150 size =
int(len(items)*fraction)
3151 random.shuffle(items)
3152 return items[:size], items[size:]
3155 if tree._left
is None:
3157 elif coordinates[tree._splitDim] < tree._splitValue:
3169 confidence_levels.sort()
3171 class Harvester(list):
3177 def __call__(self,tree):
3178 number_density=float(len(tree.objects()))/float(tree.volume())
3179 self.append([number_density,tree.volume(),tree.bounds()])
3180 self.unrho+=number_density
3182 def close_ranks(self):
3184 for i
in range(len(self)):
3185 self[i][0]/=self.unrho
3187 return sorted(self,key=itemgetter(0))
3192 samples,header=posterior.samples()
3193 header=header.split()
3194 coord_names=[
"ra",
"dec",
"dist"]
3195 coordinatized_samples=[PosteriorSample(row, header, coord_names)
for row
in samples]
3196 tree=KDTree(coordinatized_samples)
3200 tree.operate(a,h,boxing=samples_per_bin)
3208 confidence_intervals={}
3209 for rho,vol,bounds
in b:
3213 if acc_rho>confidence_levels[cl_idx]:
3214 confidence_intervals[acc_rho]=acc_vol
3216 if cl_idx==len(confidence_levels):
3219 return confidence_intervals
3223 takes samples and applies a KDTree to them to
return confidence levels
3224 returns confidence_intervals - dictionary of user_provided_CL:calculated_area
3225 b - ordered list of KD leaves
3226 injInfo -
if injection values provided then returns
3227 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_area]
3228 Not quite sure that the repeated samples case
is fixed, posibility of infinite loop.
3230 confidence_levels.sort()
3231 from math
import cos, pi
3232 class Harvester(list):
3234 when called by kdtree.operate will be used to calculate the density of each bin (sky area)
3240 def __call__(self,tree):
3242 area = - (cos(pi/2. - tree.bounds()[0][1])-cos(pi/2. - tree.bounds()[1][1]))*(tree.bounds()[1][0] - tree.bounds()[0][0])
3243 number_density=float(len(tree.objects()))/float(area)
3244 self.append([number_density,len(tree.objects()),area,tree.bounds()])
3245 self.unrho+=number_density
3247 def close_ranks(self):
3249 for i
in range(len(self)):
3250 self[i][0]/=self.unrho
3252 return sorted(self,key=itemgetter(0))
3257 peparser=PEOutputParser(
'common')
3259 samples,header=posterior.samples()
3260 header=header.split()
3261 coord_names=[
"ra",
"dec"]
3262 initial_dimensions = [[0.,-pi/2.],[2.*pi, pi/2.]]
3263 coordinatized_samples=[PosteriorSample(row, header, coord_names)
for row
in samples]
3264 tree=KDTreeVolume(coordinatized_samples,initial_dimensions)
3267 tree.operate(a,h,boxing=samples_per_bin)
3268 totalSamples = len(tree.objects())
3273 samplecounter += entry[1]
3274 entry[1] = float(samplecounter)/float(totalSamples)
3281 if posterior[
'ra'].injval
is not None and posterior[
'dec'].injval
is not None:
3282 injBound,injNum,injWeight = tree.search([posterior[
'ra'].injval,posterior[
'dec'].injval],boxing = samples_per_bin)
3283 injInfo = [injBound,injNum,injWeight]
3284 inj_area = - (cos(pi/2. - injBound[0][1])-cos(pi/2. - injBound[1][1]))*(injBound[1][0] - injBound[0][0])
3285 inj_number_density=float(injNum)/float(inj_area)
3286 inj_rho = inj_number_density / a.unrho
3290 inj_number_density=
None
3294 confidence_intervals={}
3295 for rho,confidence_level,vol,bounds
in b:
3298 if confidence_level>confidence_levels[cl_idx]:
3299 print(
str(confidence_level))
3301 confidence_intervals[confidence_levels[cl_idx]]=acc_vol
3303 if cl_idx==len(confidence_levels):
3307 for rho,sample_number,vol,bounds
in b:
3309 print(
'total area: ' +
str(acc_vol))
3312 inj_confidence =
None
3313 inj_confidence_area =
None
3314 if inj_rho
is not None:
3316 for rho,confidence_level,vol,bounds
in b:
3319 inj_confidence = confidence_level
3320 inj_confidence_area = acc_vol
3321 injInfo.append(inj_confidence)
3322 injInfo.append(inj_confidence_area)
3323 print(
'inj ' +
str(vol))
3325 return confidence_intervals, b, injInfo
3327def kdtree_bin(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10):
3329 takes samples and applies a KDTree to them to
return confidence levels
3330 returns confidence_intervals - dictionary of user_provided_CL:calculated_volume
3331 b - ordered list of KD leaves
3332 initial_boundingbox - list of lists [upperleft_coords,lowerright_coords]
3333 injInfo -
if injection values provided then returns
3334 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_volume]
3335 Not quite sure that the repeated samples case
is fixed, posibility of infinite loop.
3337 confidence_levels.sort()
3338 print(confidence_levels)
3339 class Harvester(list):
3341 when called by kdtree.operate will be used to calculate the density of each bin
3347 def __call__(self,tree):
3348 number_density=float(len(tree.objects()))/float(tree.volume())
3349 self.append([number_density,len(tree.objects()),tree.volume(),tree.bounds()])
3350 self.unrho+=number_density
3352 def close_ranks(self):
3354 for i
in range(len(self)):
3355 self[i][0]/=self.unrho
3357 return sorted(self,key=itemgetter(0))
3362 peparser=PEOutputParser(
'common')
3364 samples,header=posterior.samples()
3365 header=header.split()
3366 coordinatized_samples=[PosteriorSample(row, header, coord_names)
for row
in samples]
3369 if initial_boundingbox
is None:
3370 low=coordinatized_samples[0].coord()
3371 high=coordinatized_samples[0].coord()
3372 for obj
in coordinatized_samples[1:]:
3373 low=np.minimum(low,obj.coord())
3374 high=np.maximum(high,obj.coord())
3375 initial_boundingbox = [low,high]
3377 tree=KDTreeVolume(coordinatized_samples,initial_boundingbox)
3380 tree.operate(a,h,boxing=samples_per_bin)
3384 totalSamples = len(tree.objects())
3387 samplecounter += entry[1]
3388 entry[1] = float(samplecounter)/float(totalSamples)
3395 def checkNone(listoParams):
3396 for param
in listoParams:
3397 if posterior[param].injval
is None:
3402 if checkNone(coord_names):
3403 injBound,injNum,injWeight = tree.search([posterior[x].injval
for x
in coord_names],boxing = samples_per_bin)
3404 injInfo = [injBound,injNum,injWeight]
3409 for aCoord,bCoord
in zip(low,high):
3410 inj_volume = inj_volume*(bCoord - aCoord)
3411 inj_number_density=float(injNum)/float(inj_volume)
3412 inj_rho = inj_number_density / a.unrho
3413 print(injNum,inj_volume,inj_number_density,a.unrho,injBound)
3417 inj_number_density=
None
3421 confidence_intervals={}
3422 for rho,sample_number,vol,bounds
in b:
3425 if sample_number>confidence_levels[cl_idx]:
3426 confidence_intervals[confidence_levels[cl_idx]]=(acc_vol,sample_number)
3428 if cl_idx==len(confidence_levels):
3432 for rho,sample_number,vol,bounds
in b:
3436 inj_confidence =
None
3437 inj_confidence_area =
None
3438 if inj_rho
is not None:
3439 print(
'calculating cl')
3441 for rho,confidence_level,vol,bounds
in b:
3444 inj_confidence = confidence_level
3445 inj_confidence_area = acc_vol
3446 injInfo.append(inj_confidence)
3447 injInfo.append(inj_confidence_area)
3450 return confidence_intervals, b, initial_boundingbox,injInfo
3452def kdtree_bin2Step(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10,injCoords = None,alternate = False, fraction = 0.5, skyCoords=False):
3454 input: posterior class instance, list of confidence
levels, optional choice of inital parameter space,
samples per box in kdtree
3455 note initial_boundingbox
is [[lowerbound of each param][upper bound of each param]],
if not specified will just take limits of samples
3456 fraction
is proportion of samples used
for making the tree structure.
3457 returns: confidence_intervals, sorted list of kd objects, initial_boundingbox, injInfo
3458 where injInfo
is [bounding box injection
is found within, samples
in said box, weighting of box (
in case of repeated samples),inj_confidence, inj_confidence_area]
3460 confidence_levels.sort()
3462 samples,header=posterior.samples()
3463 numberSamples = len(samples)
3464 if alternate ==
False:
3465 samplesStructure, samplesFill =
random_split(samples,fraction)
3467 samplesStructure = samples[:
int(numberSamples*fraction)]
3468 samplesFill = samples[
int(numberSamples*fraction):]
3469 samplesFillLen = len(samplesFill)
3471 header=header.split()
3472 coordinatized_samples=[
PosteriorSample(row, header, coord_names)
for row
in samplesStructure]
3474 if skyCoords ==
True:
3475 initial_boundingbox = [[0,-pi_constant/2.],[2*pi_constant,pi_constant/2.]]
3476 if initial_boundingbox
is None:
3477 low=coordinatized_samples[0].coord()
3478 high=coordinatized_samples[0].coord()
3479 for obj
in coordinatized_samples[1:]:
3480 low=np.minimum(low,obj.coord())
3481 high=np.maximum(high,obj.coord())
3482 initial_boundingbox = [low,high]
3483 tree=
KDTreeVolume(coordinatized_samples,initial_boundingbox)
3484 tree2fill = tree.fillNewTree(boxing=samples_per_bin, isArea = skyCoords)
3487 for name
in coord_names:
3488 columns.append(header.index(name))
3490 for sample
in samplesFill:
3492 for column
in columns:
3493 tempSample.append(sample[column])
3496 def getValues(tree,listing):
3497 if tree._left
is None:
3498 listing.append([tree.bounds(),tree._importance,tree._samples,tree._volume])
3500 getValues(tree._left,listing)
3501 getValues(tree._right,listing)
3504 getValues(tree2fill,listLeaves)
3507 for cl
in confidence_levels:
3508 clSamples.append(samplesFillLen*cl)
3510 sortedLeavesList = sorted(listLeaves, key=
lambda importance: importance[1])
3511 sortedLeavesList.reverse()
3512 runningTotalSamples = 0
3513 for i
in range(len(sortedLeavesList)):
3514 runningTotalSamples += sortedLeavesList[i][2]
3515 sortedLeavesList[i].append(float(runningTotalSamples)/samplesFillLen,)
3521 lencl = len(clSamples)
3523 confidence_intervals={}
3524 interpConfAreas = {}
3526 for leaf
in sortedLeavesList:
3527 countSamples += leaf[2]
3530 if level < lencl
and countSamples >= clSamples[level]:
3531 confidence_intervals[confidence_levels[level]]=(volume,float(countSamples)/samplesFillLen)
3532 interpConfAreas[confidence_levels[level]] = volume-leaf[3]*(countSamples-clSamples[level])/leaf[2]
3535 if injCoords
is not None:
3536 injBound,injNum,injImportance = tree2fill.search(injCoords)
3537 injInfo = [injBound,injNum,injImportance]
3543 inj_confidence =
None
3544 inj_confidence_area =
None
3545 if injInfo
is not None:
3548 for leaf
in sortedLeavesList:
3551 if leaf[1] <= injImportance:
3552 inj_confidence = float(acc_cl)/samplesFillLen
3553 inj_confidence_area = acc_vol
3554 injInfo.append(inj_confidence)
3555 injInfo.append(inj_confidence_area)
3558 return sortedLeavesList, interpConfAreas, injInfo
3560 def checkNone(listoParams):
3561 for param
in listoParams:
3562 if posterior[param].injval
is None:
3568 Determine the 2-parameter Bayesian Confidence Intervals using a greedy
3571 @param posterior: an instance of the Posterior
class.
3573 @param greedy2Params: a dict - {param1Name:param1binSize,param2Name:param2binSize} .
3575 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
3579 par1_name,par2_name=greedy2Params.keys()
3582 par1pos=posterior[par1_name.lower()].samples
3583 par2pos=posterior[par2_name.lower()].samples
3586 par1_bin=greedy2Params[par1_name]
3587 par2_bin=greedy2Params[par2_name]
3590 par1_injvalue=posterior[par1_name.lower()].injval
3591 par2_injvalue=posterior[par2_name.lower()].injval
3594 par1pos_min=min(par1pos)[0]
3595 par2pos_min=min(par2pos)[0]
3597 par1pos_max=
max(par1pos)[0]
3598 par2pos_max=
max(par2pos)[0]
3600 par1pos_Nbins=
int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
3602 par2pos_Nbins=
int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
3604 greedyHist = np.zeros(par1pos_Nbins*par2pos_Nbins,dtype=
'i8')
3605 greedyPoints = np.zeros((par1pos_Nbins*par2pos_Nbins,2))
3608 par1_point=par1pos_min
3609 par2_point=par2pos_min
3610 for i
in range(par2pos_Nbins):
3612 par1_point=par1pos_min
3613 for j
in range(par1pos_Nbins):
3615 greedyPoints[j+par1pos_Nbins*i,0]=par1_point
3616 greedyPoints[j+par1pos_Nbins*i,1]=par2_point
3617 par1_point+=par1_bin
3618 par2_point+=par2_bin
3623 if par1_injvalue
is not None and par2_injvalue
is not None:
3625 par1_binNumber=
int(floor((par1_injvalue-par1pos_min)/par1_bin))
3626 par2_binNumber=
int(floor((par2_injvalue-par2pos_min)/par2_bin))
3628 injbin=
int(par1_binNumber+par2_binNumber*par1pos_Nbins)
3629 elif par1_injvalue
is None and par2_injvalue
is not None:
3630 print(
"Injection value not found for %s!"%par1_name)
3632 elif par1_injvalue
is not None and par2_injvalue
is None:
3633 print(
"Injection value not found for %s!"%par2_name)
3636 for par1_samp,par2_samp
in zip(par1pos,par2pos):
3637 par1_samp=par1_samp[0]
3638 par2_samp=par2_samp[0]
3639 par1_binNumber=
int(floor((par1_samp-par1pos_min)/par1_bin))
3640 par2_binNumber=
int(floor((par2_samp-par2pos_min)/par2_bin))
3642 greedyHist[par1_binNumber+par2_binNumber*par1pos_Nbins]+=1
3644 raise RuntimeError(
"Problem binning samples: %i,%i,%i,%i,%i,%f,%f,%f,%f,%f,%f .")%(par1_binNumber,par2_binNumber,par1pos_Nbins,par2pos_Nbins,par1_binNumber+par2_binNumber*par1pos_Nbins,par1_samp,par1pos_min,par1_bin,par1_samp,par2pos_min,par2_bin)
3646 toppoints,injection_cl,reses,injection_area=\
3651 float(par1_bin*par2_bin),
3656 return toppoints,injection_cl,reses,injection_area
3660 Utility function to convert longitude,latitude on a unit sphere to
3661 cartesian co-ordinates.
3664 x=np.cos(lat)*np.cos(long)
3665 y=np.cos(lat)*np.sin(long)
3667 return np.array([x,y,z])
3672 Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
3674 x = r*np.sin(theta)*np.cos(phi)
3675 y = r*np.sin(theta)*np.sin(phi)
3682 Utility function to convert cartesian coords to r,theta,phi.
3684 r = np.sqrt(x*x + y*y + z*z)
3685 theta = np.arccos(z/r)
3686 phi = np.fmod(2*pi_constant + np.arctan2(y,x), 2*pi_constant)
3693 """Plots a sky map from a healpix map, optionally including an
3694 injected position. This is a temporary map to display before
3695 ligo.skymap utility
is used to generated a smoother one.
3697 :param hpmap: An array representing a healpix map (
in nested
3698 ordering
if ``nest =
True``).
3700 :param outdir: The output directory.
3702 :param inj: If
not ``
None``, then ``[ra, dec]`` of the injection
3703 associated
with the posterior map.
3705 :param nest: Flag indicating the pixel ordering
in healpix.
3709 fig = plt.figure(frameon=False, figsize=(8,6))
3710 hp.mollview(hpmap, nest=nest, min=0, max=np.max(hpmap), cmap=
'Greys', coord=
'E', fig=fig.number, title=
'Histogrammed skymap' )
3711 plt.grid(
True,color=
'g',figure=fig)
3714 theta = np.pi/2.0 - inj[1]
3715 hp.projplot(theta, inj[0],
'*', markerfacecolor=
'white', markeredgecolor=
'black', markersize=10)
3717 plt.savefig(os.path.join(outdir,
'skymap.png'))
3722 """Returns the area (in square degrees) for each confidence level with
3723 a greedy binning algorithm for the given healpix map.
3727 hpmap = hpmap / np.sum(hpmap)
3729 hpmap = np.sort(hpmap)[::-1]
3730 cum_hpmap = np.cumsum(hpmap)
3732 pixarea = hp.nside2pixarea(hp.npix2nside(hpmap.shape[0]))
3733 pixarea = pixarea*(180.0/np.pi)**2
3737 npix = np.sum(cum_hpmap < cl)
3738 areas.append(npix*pixarea)
3740 return np.array(areas)
3743 """Returns the greedy p-value estimate for the given injection.
3747 nside = hp.npix2nside(hpmap.shape[0])
3748 hpmap = hpmap / np.sum(hpmap)
3750 injpix = hp.ang2pix(nside, np.pi/2.0-inj[1], inj[0], nest=nest)
3751 injvalue = hpmap[injpix]
3753 return np.sum(hpmap[hpmap >= injvalue])
3759 Utility function for converting mchirp,eta to component masses. The
3760 masses are defined so that m1>m2. The rvalue
is a tuple (m1,m2).
3762 root = np.sqrt(0.25-eta)
3763 fraction = (0.5+root) / (0.5-root)
3764 invfraction = 1/fraction
3766 m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)
3768 m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)
3775 Utility function for converting mchirp,q to component masses. The
3776 masses are defined so that m1>m2. The rvalue
is a tuple (m1,m2).
3778 factor = mc * np.power(1+q, 1.0/5.0);
3779 m1 = factor * np.power(q, -3.0/5.0);
3780 m2 = factor * np.power(q, 2.0/5.0);
3787 Utility function for converting q to eta. The
3790 eta = q/((1+q)*(1+q))
3791 return np.clip(eta,0,0.25)
3797 Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
3799 m1,m2 = mc2ms(mc,eta)
3805def ang_dist(long1,lat1,long2,lat2):
3807 Find the angular separation of (long1,lat1) and (long2,lat2), which are
3808 specified
in radians.
3811 x1=np.cos(lat1)*np.cos(long1)
3812 y1=np.cos(lat1)*np.sin(long1)
3814 x2=np.cos(lat2)*np.cos(long2)
3815 y2=np.cos(lat2)*np.sin(long2)
3817 sep=np.acos(x1*x2+y1*y2+z1*z2)
3824 Calculate dot products between vectors in rows of numpy arrays.
3827 product = (vec1*vec2).sum()
3829 product = (vec1*vec2).sum(axis=1).reshape(-1,1)
3836 Find angles between vectors in rows of numpy arrays.
3838 vec1_mag = np.sqrt(array_dot(vec1, vec1))
3839 vec2_mag = np.sqrt(array_dot(vec2, vec2))
3840 return np.arccos(
array_dot(vec1, vec2)/(vec1_mag*vec2_mag))
3846 Find polar angles of vectors in rows of a numpy array.
3851 z = vec[:,2].reshape(-1,1)
3853 return np.arccos(z/norm)
3859 Compute general rotation matrices for a given angles
and direction vectors.
3861 cosa = np.cos(angle)
3862 sina = np.sin(angle)
3863 direction /= np.sqrt(array_dot(direction,direction))
3867 R = np.array( [np.diag([i,i,i])
for i
in cosa.flat] )
3868 R += np.array( [np.outer(direction[i],direction[i])*(1.0-cosa[i])
for i
in range(nSamps)] )
3869 R += np.array( [np.array( [[ 0.0, -direction[i,2], direction[i,1]],
3870 [ direction[i,2], 0.0, -direction[i,0]],
3871 [-direction[i,1], direction[i,0], 0.0 ]] ) * sina[i]
for i
in range(nSamps)] )
3874 R = np.diag([cosa,cosa,cosa])
3875 R += np.outer(direction,direction) * (1.0 - cosa)
3876 R += np.array( [[ 0.0, -direction[2], direction[1]],
3877 [ direction[2], 0.0, -direction[0]],
3878 [-direction[1], direction[0], 0.0 ]] ) * sina
3884 tmp1 = vx*np.cos(angle) - vy*np.sin(angle);
3885 tmp2 = vx*np.sin(angle) + vy*np.cos(angle);
3886 return np.asarray([tmp1,tmp2,vz])
3890 tmp1 = vx*np.cos(angle) + vz*np.sin(angle);
3891 tmp2 = - vx*np.sin(angle) + vz*np.cos(angle);
3892 return np.asarray([tmp1,vy,tmp2])
3896 Calculate orbital angular momentum vector.
3897 Note: The units of Lmag are different than what used in lalsimulation.
3898 Mc must be called
in units of Msun here.
3900 Note that
if one wants to build J=L+S1+S2
with L returned by this function, S1
and S2
3901 must
not get the Msun^2 factor.
3903 eta = m1*m2/( (m1+m2)*(m1+m2) )
3905 Lx, Ly, Lz = sph2cart(Lmag, inclination, 0.0)
3906 return np.hstack((Lx,Ly,Lz))
3910 v0 = np.power((m1+m2) *pi_constant * lal.MTSUN_SI * fref, 1.0/3.0)
3912 PNFirst = (((m1+m2)**2)*eta)/v0
3913 PNSecond = 1+ (v0**2) * (3.0/2.0 +eta/6.0)
3914 Lmag= PNFirst*PNSecond
3919 Calculate BH angular momentum vector.
3921 Sx, Sy, Sz = sph2cart(m**2 * a, theta, phi)
3922 return np.hstack((Sx,Sy,Sz))
3928 Calculate best tidal parameters [Eqs. (5) and (6)
in Wade et al. PRD 89, 103012 (2014)]
3931 lambdap = lambda1 + lambda2
3932 lambdam = lambda1 - lambda2
3936 raise ValueError(
"q > 1, while this function requires q <= 1.")
3938 dmbym = (1. - q)/(1. + q)
3942 lam_tilde = (8./13.)*((1.+7.*eta-31.*eta*eta)*lambdap + dmbym*(1.+9.*eta-11.*eta*eta)*lambdam)
3943 dlam_tilde = (1./2.)*(dmbym*(1.-13272.*eta/1319.+8944.*eta*eta/1319.)*lambdap + (1.-15910.*eta/1319.+32850.*eta*eta/1319.+3380.*eta*eta*eta/1319.)*lambdam)
3944 return lam_tilde, dlam_tilde
3946def spin_angles(fref,mc,eta,incl,a1,theta1,phi1,a2=None,theta2=None,phi2=None):
3948 Calculate physical spin angles.
3950 singleSpin = None in (a2,theta2,phi2)
3951 m1, m2 =
mc2ms(mc,eta)
3966 return tilt1, tilt2, theta_jn, beta
3970 Calculate the magnitude of the effective precessing spin
3971 following convention from Phys. Rev. D 91, 024043 -- arXiv:1408.1810
3972 note: the paper uses naming convention where m1 < m2
3973 (
and similar
for associated spin parameters)
and q > 1
3976 A1 = 2. + (3.*q_inv/2.)
3977 A2 = 2. + 3./(2.*q_inv)
3978 S1_perp = a1*np.sin(tilt1)*m1*m1
3979 S2_perp = a2*np.sin(tilt2)*m2*m2
3980 Sp = np.maximum(A1*S2_perp, A2*S1_perp)
3981 chi_p = Sp/(A2*m1*m1)
3986 Calculate the redshift from the luminosity distance measurement using the
3987 Cosmology Calculator provided
in LAL.
3988 By default assuming cosmological parameters
from arXiv:1502.01589 -
'Planck 2015 results. XIII. Cosmological parameters'
3989 Using parameters
from table 4, column
'TT+lowP+lensing+ext'
3990 This corresponds to Omega_M = 0.3065, Omega_Lambda = 0.6935, H_0 = 67.90 km s^-1 Mpc^-1
3991 Returns an array of redshifts
3993 def find_z_root(z,dl,omega):
3994 return dl - lal.LuminosityDistance(omega,z)
3996 omega = lal.CreateCosmologicalParameters(h,om,ol,w0,0.0,0.0)
3997 if isinstance(distance,float):
3998 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (distance,omega))])
4000 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (d,omega))
for d
in distance[:,0]])
4001 return z.reshape(z.shape[0],1)
4005 Calculate source mass parameter for mass m
as:
4006 m_source = m / (1.0 + z)
4009 return mass / (1.0 + redshift)
4014 Calculate D_alpha integral; multiplicative factor put later
4015 D_alpha = integral{ ((1+z')^(alpha-2))/sqrt(Omega_m*(1+z')^3 +Omega_lambda) dz
'} # eq.15 of arxiv 1110.2720
4017 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0)
4021 return (1.0+redshift)**(nonGR_alpha-2.0)/(np.sqrt(omega_m*(1.0+redshift)**3.0 + omega_l))
4025 D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha
4026 D_alpha calculated
from integrand
in above function
4028 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0)
4029 H0 = omega.h*lal.H0FAC_SI
4030 dist = integrate.quad(integrand_distance, 0, redshift ,args=(nonGR_alpha))[0]
4031 dist *= (1.0 + redshift)**(1.0 - nonGR_alpha)
4034 return dist*lal.C_SI
4036def lambda_a(redshift, nonGR_alpha, lambda_eff, distance):
4038 Converting from the effective wavelength-like parameter to lambda_A:
4039 lambda_A = lambda_{eff}*(D_alpha/D_L)^(1/(2-alpha))*(1/(1+z)^((1-alpha)/(2-alpha)))
4041 Dfunc = np.vectorize(DistanceMeasure)
4042 D_alpha = Dfunc(redshift, nonGR_alpha)
4043 dl = distance*lal.PC_SI*1e6
4044 return lambda_eff*(D_alpha/(dl*(1.0+redshift)**(1.0-nonGR_alpha)))**(1./(2.0-nonGR_alpha))
4048 Converting to Lorentz violating parameter "A" in dispersion relation
from lambda_A:
4049 A = (lambda_A/h)^(alpha-2)
4051 hPlanck = 4.13567e-15
4052 ampFunc = np.vectorize(lambda_a)
4053 lambdaA = ampFunc(redshift, nonGR_alpha, lambda_eff, distance)/lal.C_SI
4054 return (lambdaA/hPlanck)**(nonGR_alpha-2.0)
4057def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref,phiref):
4059 Wrapper function for SimInspiralTransformPrecessingNewInitialConditions().
4060 Vectorizes function
for use
in append_mapping() methods of the posterior
class.
4063 import lalsimulation
as lalsim
4065 print(
'bayespputils.py: Cannot import lalsimulation SWIG bindings to calculate physical to radiation')
4066 print(
'frame angles, did you remember to use --enable-swig-python when ./configuring lalsimulation?')
4068 from numpy
import shape
4069 transformFunc = lalsim.SimInspiralTransformPrecessingNewInitialConditions
4072 m1_SI = m1*lal.MSUN_SI
4073 m2_SI = m2*lal.MSUN_SI
4076 ins = [theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref]
4077 if len(
shape(ins))>1:
4080 for p,param
in enumerate(ins):
4081 ins[p] = param.flatten()
4086 results = np.array([transformFunc(t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir)
for (t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir)
in zip(*ins)])
4087 iota = results[:,0].reshape(-1,1)
4088 spin1x = results[:,1].reshape(-1,1)
4089 spin1y = results[:,2].reshape(-1,1)
4090 spin1z = results[:,3].reshape(-1,1)
4091 spin2x = results[:,4].reshape(-1,1)
4092 spin2y = results[:,5].reshape(-1,1)
4093 spin2z = results[:,6].reshape(-1,1)
4094 a1,theta1,phi1 =
cart2sph(spin1x,spin1y,spin1z)
4095 a2,theta2,phi2 =
cart2sph(spin2x,spin2y,spin2z)
4097 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4099 S1 = np.hstack([m1*m1*spin1x,m1*m1*spin1y,m1*m1*spin1z])
4100 S2 = np.hstack([m2*m2*spin2x,m2*m2*spin2y,m2*m2*spin2z])
4104 return iota, theta1, phi1, theta2, phi2, beta
4109 elif len(
shape(ins))<=1:
4112 for p,param
in enumerate(ins):
4118 results = np.array(transformFunc(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref))
4126 a1,theta1,phi1 =
cart2sph(spin1x,spin1y,spin1z)
4127 a2,theta2,phi2 =
cart2sph(spin2x,spin2y,spin2z)
4129 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4131 S1 = m1*m1*np.hstack([spin1x,spin1y,spin1z])
4132 S2 = m2*m2*np.hstack([spin2x,spin2y,spin2z])
4136 return iota, theta1, phi1, theta2, phi2, beta
4146 from scipy
import seterr
as sp_seterr
4148 np.seterr(under=
'ignore')
4149 sp_seterr(under=
'ignore')
4150 pos_samps=onedpos.samples
4152 gkde=onedpos.gaussian_kde
4153 except np.linalg.linalg.LinAlgError:
4154 print(
'Singular matrix in KDE. Skipping')
4156 ind=np.linspace(np.min(pos_samps),np.max(pos_samps),101)
4157 kdepdf=gkde.evaluate(ind)
4158 plt.plot(ind,kdepdf,color=
'green')
4162def plot_one_param_pdf(posterior,plot1DParams,analyticPDF=None,analyticCDF=None,plotkde=False):
4164 Plots a 1D histogram and (gaussian) kernel density estimate of the
4165 distribution of posterior samples
for a given parameter.
4167 @param posterior: an instance of the Posterior
class.
4169 @param plot1DParams: a dict; {paramName:Nbins}
4171 @param analyticPDF: an analytic probability distribution function describing the distribution.
4173 @param analyticCDF: an analytic cumulative distribution function describing the distribution.
4175 @param plotkde: Use KDE to smooth plot (default:
False)
4178 matplotlib.rcParams['text.usetex']=
False
4180 param=list(plot1DParams.keys())[0].lower()
4181 histbins=list(plot1DParams.values())[0]
4183 pos_samps=posterior[param].samples
4184 injpar=posterior[param].injval
4185 trigvals=posterior[param].trigvals
4188 myfig=plt.figure(figsize=(6,4.5),dpi=150)
4190 axes=plt.Axes(myfig,[0.15,0.15,0.6,0.76])
4191 myfig.add_axes(axes)
4192 majorFormatterX=ScalarFormatter(useMathText=
True)
4193 majorFormatterX.format_data=
lambda data:
'%.6g'%(data)
4194 majorFormatterY=ScalarFormatter(useMathText=
True)
4195 majorFormatterY.format_data=
lambda data:
'%.6g'%(data)
4196 majorFormatterX.set_scientific(
True)
4197 majorFormatterY.set_scientific(
True)
4199 if param.find(
'time')!=-1:
4200 offset=floor(min(pos_samps))
4201 pos_samps=pos_samps-offset
4202 if injpar
is not None:
4203 injpar=injpar-offset
4204 ax1_name=param+
' + %i'%(
int(offset))
4205 else: ax1_name=param
4207 (n, bins, patches)=plt.hist(pos_samps,histbins,density=
True,facecolor=
'grey')
4208 Nchars=
max(map(
lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4217 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks)
4218 xmin,xmax=plt.xlim()
4219 if param==
'rightascension' or param==
'ra':
4222 if param==
'declination' or param==
'dec':
4225 axes.xaxis.set_major_formatter(majorFormatterX)
4226 axes.yaxis.set_major_formatter(majorFormatterY)
4228 locatorX.view_limits(bins[0],bins[-1])
4229 axes.xaxis.set_major_locator(locatorX)
4231 histbinSize=bins[1]-bins[0]
4233 (xmin,xmax)=plt.xlim()
4234 x = np.linspace(xmin,xmax,2*len(bins))
4235 plt.plot(x, analyticPDF(x+offset), color=
'r', linewidth=2, linestyle=
'dashed')
4239 from numpy.random
import permutation
4240 samps = permutation(pos_samps)[:max_samps,:].flatten()
4241 D,p = stats.kstest(samps+offset, analyticCDF, mode=
'asymp')
4242 plt.title(
"%s: ks p-value %.3f"%(param,p))
4246 if injpar
is not None:
4248 delta_samps=
max(pos_samps)-min(pos_samps)
4249 minrange=min(pos_samps)-0.05*delta_samps
4250 maxrange=
max(pos_samps)+0.05*delta_samps
4251 if minrange<injpar
and maxrange>injpar:
4253 plt.axvline(injpar, color=
'r', linestyle=
'-.', linewidth=4)
4259 if min(pos_samps)<injpar
and max(pos_samps)>injpar:
4260 bins_to_inj=(injpar-bins[0])/histbinSize
4261 injbinh=
int(floor(bins_to_inj))
4262 injbin_frac=bins_to_inj-float(injbinh)
4265 rbins=(sum(n[0:injbinh-1])+injbin_frac*n[injbinh])*histbinSize
4267 if trigvals
is not None:
4268 for IFO
in [IFO
for IFO
in trigvals.keys()]:
4269 trigval = trigvals[IFO]
4270 if min(pos_samps)<trigval
and max(pos_samps)>trigval:
4271 if IFO==
'H1': color =
'r'
4272 elif IFO==
'L1': color =
'g'
4273 elif IFO==
'V1': color =
'm'
4275 plt.axvline(trigval, color=color, linestyle=
'-.')
4279 plt.ylabel(
'Probability Density')
4282 if(param.lower()==
'ra' or param.lower()==
'rightascension'):
4283 xmin,xmax=plt.xlim()
4297class RALocator(matplotlib.ticker.MultipleLocator):
4299 RA tick locations with some intelligence
4302 hour=pi_constant/12.0
4303 if(max-min)>12.0*hour:
4305 elif(max-min)>6.0*hour:
4308 elif (max-min)>3.0*pi_constant/12.0:
4310 elif (max-min)>hour:
4315 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4317class DecLocator(matplotlib.ticker.MultipleLocator):
4319 Dec tick locations with some intelligence
4321 def __init__(self, min=-pi_constant/2.0,max=pi_constant/2.0):
4322 deg=pi_constant/180.0
4323 if (max-min)>60*deg:
4325 elif (max-min)>20*deg:
4327 elif (max-min)>10*deg:
4331 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4335 matplotlib.ticker.FuncFormatter.__init__(self,getRAString)
4339 matplotlib.ticker.FuncFormatter.__init__(self,getDecString)
4344 secs=radians*12.0*3600/pi_constant
4345 hours, rem = divmod(secs, 3600 )
4346 mins,rem = divmod(rem, 60 )
4354 if accuracy==
'hour':
return r'%ih'%(hours)
4355 if accuracy==
'min':
return r'%ih%im'%(hours,mins)
4356 if accuracy==
'sec':
return r'%ih%im%2.0fs'%(hours,mins,secs)
4358 if abs(fmod(secs,60.0))>=0.5: return(
getRAString(radians,accuracy=
'sec'))
4359 if abs(fmod(mins,60.0))>=0.5: return(
getRAString(radians,accuracy=
'min'))
4360 else: return(
getRAString(radians,accuracy=
'hour'))
4364 if matplotlib.rcParams[
'text.usetex']:
4376 deg,rem=divmod(radians,pi_constant/180.0)
4377 mins, rem = divmod(rem, pi_constant/(180.0*60.0))
4378 secs = rem * (180.0*3600.0)/pi_constant
4385 if (accuracy==
'arcmin' or accuracy==
'deg')
and secs>30: mins=mins+1
4386 if accuracy==
'deg' and mins>30: deg=deg+1
4387 if accuracy==
'deg':
return '%i'%(sign*deg)+degsymb
4388 if accuracy==
'arcmin':
return '%i%s%i%s'%(sign*deg,degsymb,mins,minsymb)
4389 if accuracy==
'arcsec':
return '%i%s%i%s%2.0f%s'%(sign*deg,degsymb,mins,minsymb,secs,secsymb)
4398 Make a corner plot using the triangle module
4399 (See http://github.com/dfm/corner.py)
4400 @param posterior: The Posterior object
4401 @param levels: a list of confidence levels
4402 @param parnames: list of parameters to include
4408 import triangle
as corner
4410 print(
'Cannot load corner module. Try running\n\t$ pip install corner')
4412 parnames=list(filter(
lambda x: x
in posterior.names, parnames))
4413 labels = [
plot_label(parname)
for parname
in parnames]
4414 data = np.hstack([posterior[p].samples
for p
in parnames])
4415 if posterior.injection:
4416 injvals=[posterior[p].injval
for p
in parnames]
4417 myfig=corner.corner(data,labels=labels,truths=injvals,quantiles=levels,plot_datapoints=
False,bins=20)
4419 myfig=corner.corner(data,labels=labels,quantiles=levels,plot_datapoints=
False,bins=20)
4423def plot_two_param_kde_greedy_levels(posteriors_by_name,plot2DkdeParams,levels,colors_by_name,line_styles=__default_line_styles,figsize=(4,3),dpi=250,figposition=[0.2,0.2,0.48,0.75],legend=
'right',hatches_by_name=
None,Npixels=50):
4425 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4427 @param posteriors_by_name: dictionary of Posterior objects, indexed by name
4429 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4431 @param levels: list of credible levels
4433 @param colors_by_name: dict of colors, indexed by name
4435 @param line_styles: list of line styles
for the credible levels
4437 @param figsize: figsize to
pass to matplotlib
4439 @param dpi: dpi to
pass to matplotlib
4441 @param figposition: figposition to
pass to matplotlib
4443 @param legend: position of legend
4445 @param hatches_by_name: dict of hatch styles indexed by name
4447 @param Npixels: Number of pixels
in each axis of the 2D grid
4450 from scipy
import seterr
as sp_seterr
4451 confidence_levels=levels
4455 par2_name,par1_name=plot2DkdeParams.keys()
4456 xbin=plot2DkdeParams[par1_name]
4457 ybin=plot2DkdeParams[par2_name]
4459 np.seterr(under=
'ignore')
4460 sp_seterr(under=
'ignore')
4462 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4464 axes=fig.add_axes(figposition)
4468 if len(line_styles)<len(levels):
4469 raise RuntimeError(
"Error: Need as many or more line styles to choose from as confidence levels to plot!")
4472 for name,posterior
in posteriors_by_name.items():
4473 print(
'Plotting '+name)
4474 name_list.append(name)
4475 par1_injvalue=posterior[par1_name].injval
4476 par2_injvalue=posterior[par2_name].injval
4478 par_trigvalues1=posterior[par1_name].trigvals
4479 par_trigvalues2=posterior[par2_name].trigvals
4480 xdat=posterior[par1_name].samples
4481 ydat=posterior[par2_name].samples
4482 a=np.squeeze(posterior[par1_name].samples)
4483 b=np.squeeze(posterior[par2_name].samples)
4485 if par1_name.find(
'time')!=-1:
4486 offset=floor(min(a))
4489 par1_injvalue=par1_injvalue-offset
4490 ax1_name=par1_name+
' + %i'%(
int(offset))
4491 else: ax1_name=par1_name
4493 if par2_name.find(
'time')!=-1:
4494 offset=floor(min(b))
4497 par2_injvalue=par2_injvalue-offset
4498 ax2_name=par2_name+
' + %i'%(
int(offset))
4499 else: ax2_name=par2_name
4501 samp=np.transpose(np.column_stack((xdat,ydat)))
4504 kde=stats.kde.gaussian_kde(samp)
4515 xsorted=np.sort(xdat,axis=
None)
4516 ysorted=np.sort(ydat,axis=
None)
4517 imin=
int(0.01*len(xsorted))
4518 imax=
int(0.99*len(xsorted))
4523 xmid=0.5*(xmin+xmax)
4524 ymid=0.5*(ymin+ymax)
4525 xmin=xmid+(xmin-xmid)/0.95
4526 xmax=xmid+(xmax-xmid)/0.95
4527 ymin=ymid+(ymin-ymid)/0.95
4528 ymax=ymid+(ymax-ymid)/0.95
4529 xax=np.linspace(xmin,xmax,Nx)
4530 yax=np.linspace(ymin,ymax,Ny)
4538 x,y = np.meshgrid(xax,yax)
4539 grid_coords = np.vstack( (x.flatten(),y.flatten()) )
4540 z = kde(grid_coords)
4541 z = z.reshape(Nx,Ny)
4542 densort=np.sort(den)[::-1]
4546 for level
in levels:
4547 ilevel =
int(Npts*level + 0.5)
4550 zvalues.append(densort[ilevel])
4551 CS=plt.contour(x, y, z, zvalues,colors=[colors_by_name[name]],linestyles=line_styles )
4554 if par1_injvalue
is not None and par2_injvalue
is not None:
4555 plt.plot([par1_injvalue],[par2_injvalue],
'b*',scalex=
False,scaley=
False,markersize=12)
4557 if par_trigvalues1
is not None and par_trigvalues2
is not None:
4558 par1IFOs = set([IFO
for IFO
in par_trigvalues1.keys()])
4559 par2IFOs = set([IFO
for IFO
in par_trigvalues2.keys()])
4560 IFOs = par1IFOs.intersection(par2IFOs)
4562 if IFO==
'H1': color =
'r'
4563 elif IFO==
'L1': color =
'g'
4564 elif IFO==
'V1': color =
'm'
4566 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker=
'o',scalex=
False,scaley=
False)
4572 if len(name_list)!=len(CSlst):
4573 raise RuntimeError(
"Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4577 for plot_name
in name_list:
4578 full_name_list.append(plot_name)
4579 if len(confidence_levels)>1:
4580 for ls_,cl
in zip(line_styles[0:len(confidence_levels)],confidence_levels):
4581 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),ls=ls_,color=
'k'))
4582 full_name_list.append(
'%s%%'%
str(
int(cl*100)))
4584 fig_actor_lst = [cs.collections[0]
for cs
in CSlst]
4585 fig_actor_lst.extend(dummy_lines)
4586 if legend
is not None:
4588 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right',framealpha=0.1)
4590 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right')
4591 for text
in twodcontour_legend.get_texts():
4592 text.set_fontsize(
'small')
4594 majorFormatterX=ScalarFormatter(useMathText=
True)
4595 majorFormatterX.format_data=
lambda data:
'%.4g'%(data)
4596 majorFormatterY=ScalarFormatter(useMathText=
True)
4597 majorFormatterY.format_data=
lambda data:
'%.4g'%(data)
4598 majorFormatterX.set_scientific(
True)
4599 majorFormatterY.set_scientific(
True)
4600 axes.xaxis.set_major_formatter(majorFormatterX)
4601 axes.yaxis.set_major_formatter(majorFormatterY)
4602 Nchars=
max(map(
lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4611 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4612 if par1_name==
'rightascension' or par1_name==
'ra':
4613 (ramin,ramax)=plt.xlim()
4616 if par1_name==
'declination' or par1_name==
'dec':
4617 (decmin,decmax)=plt.xlim()
4620 axes.xaxis.set_major_formatter(majorFormatterX)
4621 if par2_name==
'rightascension' or par2_name==
'ra':
4622 (ramin,ramax)=plt.ylim()
4624 axes.yaxis.set_major_locator(locatorY)
4626 if par2_name==
'declination' or par2_name==
'dec':
4627 (decmin,decmax)=plt.ylim()
4630 axes.yaxis.set_major_locator(locatorY)
4632 axes.yaxis.set_major_formatter(majorFormatterY)
4634 axes.xaxis.set_major_locator(locatorX)
4637 if(par1_name.lower()==
'ra' or par1_name.lower()==
'rightascension'):
4638 xmin,xmax=plt.xlim()
4639 if(xmin<0.0): xmin=0.0
4640 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4647 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4649 @param posterior: an instance of the Posterior
class.
4651 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4654 from scipy
import seterr
as sp_seterr
4656 par1_name,par2_name=plot2DkdeParams.keys()
4657 Nx=plot2DkdeParams[par1_name]
4658 Ny=plot2DkdeParams[par2_name]
4660 xdat=posterior[par1_name].samples
4661 ydat=posterior[par2_name].samples
4663 par_injvalue1=posterior[par1_name].injval
4664 par_injvalue2=posterior[par2_name].injval
4666 par_trigvalues1=posterior[par1_name].trigvals
4667 par_trigvalues2=posterior[par2_name].trigvals
4669 np.seterr(under=
'ignore')
4670 sp_seterr(under=
'ignore')
4672 myfig=plt.figure(1,figsize=(6,4),dpi=200)
4673 myfig.add_axes(plt.Axes(myfig,[0.20,0.20,0.75,0.7]))
4676 xax=np.linspace(min(xdat),
max(xdat),Nx)
4677 yax=np.linspace(min(ydat),
max(ydat),Ny)
4678 x,y=np.meshgrid(xax,yax)
4680 samp=np.transpose(np.column_stack((xdat,ydat)))
4682 kde=stats.kde.gaussian_kde(samp)
4684 grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
4686 z = kde(grid_coords.T)
4687 z = z.reshape(Nx,Ny)
4690 asp=xax.ptp()/yax.ptp()
4692 plt.imshow(z,extent=(xax[0],xax[-1],yax[0],yax[-1]),aspect=asp,origin=
'lower')
4695 if par_injvalue1
is not None and par_injvalue2
is not None:
4696 plt.plot([par_injvalue1],[par_injvalue2],
'bo',scalex=
False,scaley=
False)
4698 if par_trigvalues1
is not None and par_trigvalues2
is not None:
4699 par1IFOs = set([IFO
for IFO
in par_trigvalues1.keys()])
4700 par2IFOs = set([IFO
for IFO
in par_trigvalues2.keys()])
4701 IFOs = par1IFOs.intersection(par2IFOs)
4703 if IFO==
'H1': color =
'r'
4704 elif IFO==
'L1': color =
'g'
4705 elif IFO==
'V1': color =
'm'
4707 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker=
'o',scalex=
False,scaley=
False)
4714 if(par1_name.lower()==
'ra' or par1_name.lower()==
'rightascension'):
4715 xmin,xmax=plt.xlim()
4723 Filter injections to find the injection with end time given by time +/- 0.1s
4726 injection = itertools.ifilter(
lambda a: abs(float(
get_end(a)) - time) < 0.1, injections).next()
4729def histogram2D(posterior,greedy2Params,confidence_levels):
4731 Returns a 2D histogram and edges
for the two parameters passed
in greedy2Params, plus the actual discrete confidence levels
4732 imposed by the finite number of samples.
4733 H,xedges,yedges,Hlasts =
histogram2D(posterior,greedy2Params,confidence_levels)
4734 @param posterior: Posterior instance
4735 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4736 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4739 par1_name,par2_name=greedy2Params.keys()
4740 par1_bin=greedy2Params[par1_name]
4741 par2_bin=greedy2Params[par2_name]
4742 par1_injvalue=posterior[par1_name.lower()].injval
4743 par2_injvalue=posterior[par2_name.lower()].injval
4744 a=np.squeeze(posterior[par1_name].samples)
4745 b=np.squeeze(posterior[par2_name].samples)
4750 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4751 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4752 H, xedges, yedges = np.histogram2d(a,b, bins=(par1pos_Nbins, par2pos_Nbins),density=True)
4755 confidence_levels.sort()
4758 idxes=np.argsort(temp)
4760 for cl
in confidence_levels:
4761 while float(Hsum/np.sum(H))<cl:
4769 Hlasts.append(Hlast)
4770 return (H,xedges,yedges,Hlasts)
4772def plot_two_param_greedy_bins_contourf(posteriors_by_name,greedy2Params,confidence_levels,colors_by_name,figsize=(7,6),dpi=120,figposition=[0.3,0.3,0.5,0.5],legend=
'right',hatches_by_name=
None):
4774 @param posteriors_by_name A dictionary of posterior objects indexed by name
4775 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4776 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4777 @param colors_by_name: dict of colors, indexed by name
4778 @param figsize: figsize to
pass to matplotlib
4779 @param dpi: dpi to
pass to matplotlib
4780 @param figposition: figposition to
pass to matplotlib
4781 @param legend: Legend position to
pass to matplotlib
4782 @param hatches_by_name: dict of hatch styles, indexed by name
4784 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4786 fig.add_axes(figposition)
4789 par1_name,par2_name=greedy2Params.keys()
4790 for name,posterior
in posteriors_by_name.items():
4791 name_list.append(name)
4792 H,xedges,yedges,Hlasts=
histogram2D(posterior,greedy2Params,confidence_levels+[0.99999999999999])
4793 extent= [xedges[0], yedges[-1], xedges[-1], xedges[0]]
4794 CS2=plt.contourf(yedges[:-1],xedges[:-1],H,Hlasts,extend=
'max',colors=[colors_by_name[name]] ,alpha=0.3 )
4795 CS=plt.contour(yedges[:-1],xedges[:-1],H,Hlasts,extend=
'max',colors=[colors_by_name[name]] )
4798 plt.title(
"%s-%s confidence contours (greedy binning)"%(par1_name,par2_name))
4801 if len(name_list)!=len(CSlst):
4802 raise RuntimeError(
"Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4805 for plot_name
in name_list:
4806 full_name_list.append(plot_name)
4807 if len(confidence_levels)>1:
4808 for cl
in confidence_levels+[1]:
4809 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),color=
'k'))
4810 full_name_list.append(
'%s%%'%
str(
int(cl*100)))
4811 fig_actor_lst = [cs.collections[0]
for cs
in CSlst]
4812 fig_actor_lst.extend(dummy_lines)
4813 if legend
is not None:
4815 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right',framealpha=0.1)
4817 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right')
4818 for text
in twodcontour_legend.get_texts():
4819 text.set_fontsize(
'small')
4827 Histograms of the ranked pixels produced by the 2-parameter greedy
4828 binning algorithm colured by their confidence level.
4830 @param posterior: an instance of the Posterior
class.
4832 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4834 @param confidence_levels: list of confidence levels to show
4837 from scipy
import seterr
as sp_seterr
4839 np.seterr(under=
'ignore')
4840 sp_seterr(under=
'ignore')
4843 par1_name,par2_name=greedy2Params.keys()
4845 par1_bin=greedy2Params[par1_name]
4846 par2_bin=greedy2Params[par2_name]
4848 a=np.squeeze(posterior[par1_name].samples)
4849 b=np.squeeze(posterior[par2_name].samples)
4852 par1_injvalue=posterior[par1_name.lower()].injval
4853 par2_injvalue=posterior[par2_name.lower()].injval
4862 par1pos_Nbins=
int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4863 par2pos_Nbins=
int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4866 if par1_name.find(
'time')!=-1:
4867 offset=floor(min(a))
4870 par1_injvalue=par1_injvalue-offset
4871 ax1_name=par1_name+
' + %i'%(
int(offset))
4872 else: ax1_name=par1_name
4874 if par2_name.find(
'time')!=-1:
4875 offset=floor(min(b))
4878 par2_injvalue=par2_injvalue-offset
4879 ax2_name=par2_name+
' + %i'%(
int(offset))
4880 else: ax2_name=par2_name
4884 par1_trigvalues=posterior[par1_name.lower()].trigvals
4885 par2_trigvalues=posterior[par2_name.lower()].trigvals
4888 axes=plt.Axes(myfig,[0.3,0.3,0.95-0.3,0.90-0.3])
4889 myfig.add_axes(axes)
4898 majorFormatterX=ScalarFormatter(useMathText=
True)
4899 majorFormatterX.format_data=
lambda data:
'%.4g'%(data)
4900 majorFormatterY=ScalarFormatter(useMathText=
True)
4901 majorFormatterY.format_data=
lambda data:
'%.4g'%(data)
4902 majorFormatterX.set_scientific(
True)
4903 majorFormatterY.set_scientific(
True)
4904 axes.xaxis.set_major_formatter(majorFormatterX)
4905 axes.yaxis.set_major_formatter(majorFormatterY)
4906 H, xedges, yedges = np.histogram2d(a,b, bins,density=
False)
4914 Hsum_actual=np.sum(H)
4916 idxes=np.argsort(temp)
4918 while Hsum<Hsum_actual:
4927 H.flat[max_i]=1-float(Hsum)/float(Hsum_actual)
4929 extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
4930 plt.imshow(np.flipud(H), axes=axes, aspect=
'auto', extent=extent, interpolation=
'nearest',cmap=
'gray_r')
4931 plt.gca().autoscale_view()
4936 Nchars=
max(map(
lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4945 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4946 (xmin,xmax)=plt.xlim()
4947 (ymin,ymax)=plt.ylim()
4948 if par2_name==
'rightascension' or par2_name==
'ra':
4951 if par2_name==
'declination' or par2_name==
'dec':
4954 if par1_name==
'rightascension' or par1_name==
'ra':
4956 axes.yaxis.set_major_locator(locatorY)
4958 if par1_name==
'declination' or par1_name==
'dec':
4960 axes.yaxis.set_major_locator(locatorY)
4963 axes.xaxis.set_major_formatter(majorFormatterX)
4964 axes.yaxis.set_major_formatter(majorFormatterY)
4966 axes.xaxis.set_major_locator(locatorX)
4968 if par1_injvalue
is not None and par2_injvalue
is not None:
4969 plt.plot([par2_injvalue],[par1_injvalue],
'bo',scalex=
False,scaley=
False)
4973 if(par2_name.lower()==
'ra' or par2_name.lower()==
'rightascension'):
4974 xmin,xmax=plt.xlim()
4975 if(xmin)<0.0: xmin=0.0
4976 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4984 Determine the 1-parameter Bayesian Confidence Interval using a greedy
4987 @param posterior: an instance of the posterior
class.
4989 @param greedy1Param: a dict; {paramName:paramBinSize}.
4991 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
4994 paramName=list(greedy1Param.keys())[0]
4995 par_bin=list(greedy1Param.values())[0]
4996 par_samps=posterior[paramName.lower()].samples
4998 parpos_min=min(par_samps)[0]
4999 parpos_max=max(par_samps)[0]
5001 par_point=parpos_min
5003 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5005 greedyPoints=np.zeros((parpos_Nbins,2))
5007 greedyHist=np.zeros(parpos_Nbins,dtype=
'i8')
5010 for i
in range(parpos_Nbins):
5011 greedyPoints[i,0]=par_point
5012 greedyPoints[i,1]=par_point
5015 for par_samp
in par_samps:
5016 par_samp=par_samp[0]
5017 par_binNumber=
int(floor((par_samp-parpos_min)/par_bin))
5019 greedyHist[par_binNumber]+=1
5021 print(
"IndexError: bin number: %i total bins: %i parsamp: %f "\
5022 %(par_binNumber,parpos_Nbins,par_samp))
5026 par_injvalue=posterior[paramName].injval
5028 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5029 injbin=par_binNumber
5031 toppoints,injectionconfidence,reses,injection_area=_greedy_bin(greedyHist,greedyPoints,injbin,float(par_bin),
int(len(par_samps)),confidence_levels)
5033 confidence_levels.sort()
5034 for cl
in confidence_levels:
5035 ind=np.nonzero(toppoints[:,-1]<cl)
5038 cl_intervals.append((np.min(toppoints[ind,0]),np.max(toppoints[ind,0])))
5042 cl_intervals.append((toppoints[ind[0],0],toppoints[ind[0],0]))
5044 return toppoints,injectionconfidence,reses,injection_area,cl_intervals
5049 Calculates the smallest contigious 1-parameter confidence interval for a
5050 set of given confidence levels.
5052 @param posterior: an instance of the Posterior
class.
5054 @param contInt1Params: a dict {paramName:paramBinSize}.
5056 @param confidence_levels: Required confidence intervals.
5062 paramName=list(contInt1Params.keys())[0]
5063 par_bin=list(contInt1Params.values())[0]
5065 par_injvalue=posterior[paramName].injval
5067 par_samps=posterior[paramName].samples
5069 parpos_min=min(par_samps)
5070 parpos_max=max(par_samps)
5072 par_point=parpos_min
5073 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5075 greedyHist=np.zeros(parpos_Nbins,dtype='i8')
5077 for par_samp
in par_samps:
5078 par_binNumber=
int(floor((par_samp-parpos_min)/par_bin))
5080 greedyHist[par_binNumber]+=1
5082 print(
"IndexError: bin number: %i total bins: %i parsamp: %f bin: %i"\
5093 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5094 injbin=par_binNumber
5098 len_par_samps=len(par_samps)
5103 while j < len(confidence_levels):
5104 confidence_level=confidence_levels[j]
5109 for i
in range(len(greedyHist)):
5116 while right<len(greedyHist):
5117 Npoints=sum(greedyHist[left:right])
5118 frac=float(Npoints)/float(len_par_samps)
5121 if max_frac
is None:
5134 if injbin
is not None and injinterval
is None:
5135 if injbin
in range(max_left,max_right):
5136 injinterval=(max_right-max_left)*par_bin
5137 oneDContInj[
'interval']=injinterval
5138 oneDContInj[
'confidence']=1-frac
5139 if max_frac > confidence_level:
5144 if max_frac
is None:
5145 print(
"Cant determine intervals at %f confidence!"%confidence_level)
5148 oneDContCL[
'left']=max_left*par_bin
5149 oneDContCL[
'right']=max_right*par_bin
5150 oneDContCL[
'width']=(max_right-max_left)*par_bin
5152 while k+1<len(confidence_levels) :
5153 if confidence_levels[k+1]<max_frac:
5158 return oneDContCL,oneDContInj
5163 super(ACLError, self).
__init__(*args)
5167 """Returns an estimate of the autocorrelation function of a given
5168 series. Returns only the positive-lag portion of the ACF,
5169 normalized so that the zero-th element is 1.
"""
5170 x=series-np.mean(series)
5173 acf=np.fft.ifftshift(signal.fftconvolve(y,x,mode='full'))
5183 """Attempts to find a self-consistent estimate of the
5184 autocorrelation length of a given series.
5186 If C(tau) is the autocorrelation function (normalized so C(0) = 1,
5187 for example
from the autocorrelation procedure
in this module),
5188 then the autocorrelation length
is the smallest s such that
5190 1 + 2*C(1) + 2*C(2) + ... + 2*C(M*s) < s
5192 In words: the autocorrelation length
is the shortest length so
5193 that the sum of the autocorrelation function
is smaller than that
5194 length over a window of M times that length.
5196 The maximum window length
is restricted to be len(series)/K
as a
5197 safety precaution against relying on data near the extreme of the
5198 lags
in the ACF, where there
is a lot of noise. Note that this
5199 implies that the series must be at least M*K*s samples long
in
5200 order to get a reliable estimate of the ACL.
5202 If no such s can be found, raises ACLError;
in this case it
is
5203 likely that the series
is too short relative to its true
5204 autocorrelation length to obtain a consistent ACL estimate.
"""
5210 imax=
int(acf.shape[0]/K)
5214 s=np.arange(1, cacf.shape[0]+1)/float(M)
5218 estimates=np.flatnonzero(cacf[:imax] < s[:imax])
5220 if estimates.shape[0] > 0:
5223 return s[estimates[0]]
5226 raise ACLError(
'autocorrelation length too short for consistent estimate')
5231 Compute the effective sample size, calculating the ACL using only
5232 the second half of the samples to avoid ACL overestimation due to
5233 chains equilibrating after adaptation.
5241 Neffective = floor(N/acl)
5243 return (Neffective, acl, acf)
5249 from igwn_ligolw
import lsctables
5250 from igwn_ligolw
import utils
5251 coincXML = utils.load_filename(xml_file)
5252 coinc = lsctables.CoincTable.get_table(coincXML)
5253 coincMap = lsctables.CoincMapTable.get_table(coincXML)
5254 snglInsps = lsctables.SnglInspiralTable.get_table(coincXML)
5256 if (trignum>len(coinc)):
5257 raise RuntimeError(
"Error: You asked for trigger %d, but %s contains only %d triggers" %(trignum,xml_file,len(coinc)))
5259 coincEventID = coinc.getColumnByName(
'coinc_event_id')[trignum]
5260 eventIDs = [row.event_id
for row
in coincMap
if row.coinc_event_id == coincEventID]
5261 triggers = [row
for row
in snglInsps
if row.event_id
in eventIDs]
5270 Given a list of files, threshold value, and a desired
5271 number of outputs posterior samples,
return the skip number to
5272 achieve the desired number of posterior samples.
5274 if nDownsample
is None:
5275 print(
"Max ACL(s):")
5276 splineParams=[
"spcal_npts",
"spcal_active",
"constantcal_active"]
5277 for i
in np.arange(25):
5278 for k
in lal.cached_detector_by_prefix:
5279 splineParams.append(k.lower()+
'_spcal_freq_'+
str(i))
5280 splineParams.append(k.lower()+
'_spcal_logfreq_'+
str(i))
5282 nonParams = [
"logpost",
"post",
"cycle",
"timestamp",
"snrh1",
"snrl1",
"snrv1",
5283 "margtime",
"margtimephi",
"margtime",
"time_max",
"time_min",
5284 "time_mean",
"time_maxl",
"sky_frame",
"psdscaleflag",
"logdeltaf",
"flow",
"f_ref",
5285 "lal_amporder",
"lal_pnorder",
"lal_approximant",
"tideo",
"spino",
"signalmodelflag",
5286 "temperature",
"nifo",
"nlocaltemps",
"ntemps",
"randomseed",
"samplerate",
"segmentlength",
"segmentstart",
5287 "t0",
"phase_maxl",
"azimuth",
"cosalpha",
"lal_amporder",
"bluni"] + logParams + snrParams + splineParams
5288 fixedParams = [p
for p
in samples.colnames
if all(x==samples[p][0]
for x
in samples[p])]
5289 print(
"Fixed parameters: "+
str(fixedParams))
5290 nonParams.extend(fixedParams)
5291 params = [p
for p
in samples.colnames
if p.lower()
not in nonParams]
5292 stride=np.diff(samples[
'cycle'])[0]
5293 results = np.array([np.array(
effectiveSampleSize(samples[param])[:2])
for param
in params])
5294 nEffs = results[:,0]
5295 nEffective = min(nEffs)
5297 maxACLind = np.argmax(ACLs)
5298 maxACL = ACLs[maxACLind]
5300 print(
"%i (%s)." %(stride*maxACL,params[maxACLind]))
5303 if nDownsample
is not None:
5304 if len(samples) > nDownsample:
5305 nskip *= floor(len(samples)/nDownsample)
5310 if len(samples) > nEff:
5311 nskip =
int(ceil(len(samples)/nEff))
5318 A parser for the output of Bayesian parameter estimation codes.
5320 TODO: Will be abstract
class when LDG moves over to Python >2.6,
5321 inherited by each method .
5324 if inputtype ==
'ns':
5326 elif inputtype ==
'common':
5328 elif inputtype ==
'fm':
5330 elif inputtype ==
"inf_mcmc":
5332 elif inputtype ==
"xml":
5334 elif inputtype ==
'hdf5':
5336 elif inputtype ==
'hdf5s':
5339 raise ValueError(
'Invalid value for "inputtype": %r' % inputtype)
5341 def parse(self,files,**kwargs):
5345 return self.
_parser(files,**kwargs)
5347 def _infmcmc_to_pos(self,files,outdir=None,deltaLogP=None,fixedBurnins=None,nDownsample=None,oldMassConvention=False,**kwargs):
5349 Parser for lalinference_mcmcmpi output.
5351 if not (fixedBurnins
is None):
5352 if not (deltaLogP
is None):
5353 print(
"Warning: using deltaLogP criteria in addition to fixed burnin")
5354 if len(fixedBurnins) == 1
and len(files) > 1:
5355 print(
"Only one fixedBurnin criteria given for more than one output. Applying this to all outputs.")
5356 fixedBurnins = np.ones(len(files),
'int')*fixedBurnins[0]
5357 elif len(fixedBurnins) != len(files):
5358 raise RuntimeError(
"Inconsistent number of fixed burnin criteria and output files specified.")
5359 print(
"Fixed burning criteria: ",fixedBurnins)
5361 fixedBurnins = np.zeros(len(files))
5362 logPThreshold=-np.inf
5363 if not (deltaLogP
is None):
5364 logPThreshold= - deltaLogP
5365 print(
"Eliminating any samples before log(Post) = ", logPThreshold)
5367 if nDownsample
is None:
5368 print(
"Downsampling to take only uncorrelated posterior samples from each file.")
5369 if len(nskips) == 1
and np.isnan(nskips[0]):
5370 print(
"WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5373 for i
in range(len(nskips)):
5374 if np.isnan(nskips[i]):
5375 print(
"%s eliminated since all samples are correlated.")
5377 print(
"Downsampling by a factor of ", nskips[0],
" to achieve approximately ", nDownsample,
" posterior samples")
5380 runfileName=os.path.join(outdir,
"lalinfmcmc_headers.dat")
5381 postName=
"posterior_samples.dat"
5382 runfile=open(runfileName,
'w')
5383 outfile=open(postName,
'w')
5392 def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False):
5394 Concatenate all the samples from the given files into outfile.
5395 For each file, only those samples past the point where the
5396 log(post) > logPThreshold are concatenated after eliminating
5403 nskips = np.ones(len(files),
'int')
5404 for infilename,i,nskip,fixedBurnin
in zip(files,range(1,len(files)+1),nskips,fixedBurnins):
5405 infile=open(infilename,
'r')
5407 print(
"Writing header of %s to %s"%(infilename,runfile.name))
5409 runfile.write(
'Chain '+
str(i)+
':\n')
5410 runfile.writelines(runInfo)
5411 print(
"Processing file %s to %s"%(infilename,outfile.name))
5413 if 'f_ref' not in header:
5416 if oldMassConvention:
5421 if not outputHeader:
5422 for label
in header:
5423 outfile.write(label)
5426 outfile.write(
"f_ref")
5428 outfile.write(
"chain")
5431 iterindex=header.index(
"cycle")
5432 logpindex=header.index(
"logpost")
5436 lineParams=line.split()
5437 iter=
int(lineParams[iterindex])
5438 logP=float(lineParams[logpindex])
5439 if (iter > fixedBurnin)
and (logP >= logPThreshold):
5442 if nRead % nskip == 0:
5443 for label
in outputHeader:
5449 outfile.write(lineParams[header.index(label)])
5452 outfile.write(f_ref)
5454 outfile.write(
str(i))
5457 if output: acceptedChains += 1
5460 print(
"%i of %i chains accepted."%(acceptedChains,len(files)))
5462 def _swaplabel12(self, label):
5463 if label[-1] ==
'1':
5464 return label[0:-1] +
'2'
5465 elif label[-1] ==
'2':
5466 return label[0:-1] +
'1'
5470 def _find_max_logP(self, files):
5472 Given a list of files, reads them, finding the maximum log(post)
5475 for inpname
in files:
5476 infile=open(inpname,
'r')
5479 logpindex=header.index(
"logpost")
5481 line=line.lstrip().split()
5482 logP=float(line[logpindex])
5487 print(
"Found max log(post) = ", maxLogP)
5490 def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample):
5492 Given a list of files, threshold value, and a desired
5493 number of outputs posterior samples,
return the skip number to
5494 achieve the desired number of posterior samples.
5499 if nDownsample
is None: print(
"Max ACL(s):")
5500 for inpname,fixedBurnin
in zip(files,fixedBurnins):
5501 infile = open(inpname,
'r')
5504 header = [name.lower()
for name
in header]
5505 logpindex = header.index(
"logpost")
5506 iterindex = header.index(
"cycle")
5507 deltaLburnedIn =
False
5508 fixedBurnedIn =
False
5513 line = line.lstrip().split()
5514 iter =
int(line[iterindex])
5515 logP = float(line[logpindex])
5516 if iter > fixedBurnin:
5517 fixedBurnedIn =
True
5520 fixedBurnedIn =
False
5523 if logP > logPthreshold:
5524 deltaLburnedIn =
True
5527 if fixedBurnedIn
and deltaLburnedIn
and not adapting:
5531 if nDownsample
is None:
5533 splineParams=[
"spcal_npts",
"spcal_active",
"constantcal_active"]
5534 for i
in np.arange(5):
5535 for k
in [
'h1',
'l1']:
5536 splineParams.append(k+
'_spcal_freq'+
str(i))
5537 splineParams.append(k+
'_spcal_logfreq'+
str(i))
5539 nonParams = [
"logpost",
"cycle",
"timestamp",
"snrh1",
"snrl1",
"snrv1",
5540 "margtime",
"margtimephi",
"margtime",
"time_max",
"time_min",
5541 "time_mean",
"time_maxl",
"sky_frame",
"psdscaleflag",
"logdeltaf",
"flow",
"f_ref",
5542 "lal_amporder",
"lal_pnorder",
"lal_approximant",
"tideo",
"spino",
"signalmodelflag",
5543 "temperature",
"nifo",
"nlocaltemps",
"ntemps",
"randomseed",
"samplerate",
"segmentlength",
"segmentstart",
5544 "t0",
"phase_maxl",
"azimuth",
"cosalpha"] + logParams + snrParams + splineParams
5545 nonParamsIdxs = [header.index(name)
for name
in nonParams
if name
in header]
5546 samps = np.array(lines).astype(float)
5547 fixedIdxs = np.where(np.amin(samps,axis=0)-np.amax(samps,axis=0) == 0.0)[0]
5548 nonParamsIdxs.extend(fixedIdxs)
5549 paramIdxs = [i
for i
in range(len(header))
if i
not in nonParamsIdxs]
5550 stride=samps[1,iterindex] - samps[0,iterindex]
5552 nEffs = results[:,0]
5553 nEffectives.append(min(nEffs))
5555 maxACLind = np.argmax(ACLs)
5556 maxACL = ACLs[maxACLind]
5558 maxACLind = paramIdxs[maxACLind]
5559 print(
"%i (%s) for chain %s." %(stride*maxACL,header[maxACLind],inpname))
5561 nEffectives.append(
None)
5562 print(
"Error computing effective sample size of %s!"%inpname)
5566 nskips = np.ones(nfiles)
5568 if nDownsample
is not None:
5569 if ntot > nDownsample:
5570 nskips *=
int(floor(ntot/nDownsample))
5572 for i
in range(nfiles):
5573 nEff = nEffectives[i]
5577 nskips[i] =
int(ceil(ntot/nEff))
5582 def _find_infmcmc_f_ref(self, runInfo):
5584 Searches through header to determine reference frequency of waveforms.
5585 If no fRef given, calls _find_infmcmc_f_lower to get the lower frequency
5586 bound, which is the default reference frequency
for LALInference.
5589 runInfoIter = iter(runInfo)
5590 for line
in runInfoIter:
5591 headers=line.lstrip().lower().split()
5593 fRefColNum = headers.index(
'f_ref')
5594 info = runInfoIter.next().lstrip().lower().split()
5603 runInfoIter = iter(runInfo)
5604 for line
in runInfoIter:
5605 headers=line.lstrip().lower().split()
5607 if headers[0]==
"command":
5609 fRefInd = headers.index(
'--fref')+1
5610 fRef = headers[fRefInd]
5623 def _find_infmcmc_f_lower(self, runInfo):
5625 Searches through header to determine starting frequency of waveforms.
5626 Assumes same for all IFOs.
5628 runInfo = iter(runInfo)
5629 for line
in runInfo:
5630 headers=line.lstrip().lower().split()
5632 flowColNum = headers.index(
'f_low')
5633 IFOinfo = runInfo.next().lstrip().lower().split()
5634 f_lower = IFOinfo[flowColNum]
5640 def _clear_infmcmc_header(self, infile):
5642 Reads lalinference_mcmcmpi file given, returning the run info and
5643 common output header information.
5647 runInfo.append(line)
5648 headers=line.lstrip().lower().split()
5650 headers.index(
'cycle')
5655 raise RuntimeError(
"couldn't find line with 'cycle' in LALInferenceMCMC input")
5656 return runInfo[:-1],headers
5659 def _ns_to_pos(self,files,Nlive=None,Npost=None,posfilename='posterior_samples.dat'):
5661 Parser for nested sampling output.
5662 files : list of input NS files
5663 Nlive : Number of live points
5664 Npost : Desired number of posterior samples
5665 posfilename : Posterior output file name (default:
'posterior_samples.dat')
5670 print(
"Need lalinference.nest2pos to convert nested sampling output!")
5674 raise RuntimeError(
"Need to specify number of live points in positional arguments of parse!")
5682 parsfilename = (it.next()).strip(
'.gz')+
'_params.txt'
5684 if os.path.isfile(parsfilename):
5685 print(
'Looking for '+parsfilename)
5687 if os.access(parsfilename,os.R_OK):
5689 with open(parsfilename,
'r')
as parsfile:
5690 outpars=parsfile.readline()+
'\n'
5692 raise RuntimeError(
'Cannot open parameters file %s!'%(parsfilename))
5695 outpars=
'mchirp \t eta \t time \t phi0 \t dist \t RA \t \
5696 dec \t psi \t iota \t logl \n'
5699 parsvec=outpars.split()
5701 for i
in range(len(parsvec)):
5702 if parsvec[i].lower()==
'logl':
5705 print(
'Error! Could not find logL column in parameter list: %s'%(outpars))
5708 inarrays=list(map(np.loadtxt,files))
5710 pos=
draw_posterior_many(inarrays,[Nlive
for f
in files],logLcols=[logLcol
for f
in files])
5714 with open(posfilename,
'w')
as posfile:
5716 posfile.write(outpars)
5720 posfile.write(
'%10.12e\t' %(i))
5723 with open(posfilename,
'r')
as posfile:
5728 def _followupmcmc_to_pos(self,files):
5730 Parser for followupMCMC output.
5735 def _multinest_to_pos(self,files):
5737 Parser for MultiNest output.
5741 def _xml_to_pos(self,infile):
5743 Parser for VOTable XML Using
5745 from xml.etree
import ElementTree
as ET
5746 xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
5748 register_namespace=ET.register_namespace
5749 except AttributeError:
5750 def register_namespace(prefix,uri):
5751 ET._namespace_map[uri]=prefix
5752 register_namespace(
'vot',xmlns)
5753 tree = ET.ElementTree()
5757 tables = tree.findall(
'.//{%s}TABLE'%(xmlns))
5758 for table
in tables:
5759 if table.get(
'utype')==
'lalinference:results:posteriorsamples':
5761 for table
in tables:
5762 if table.get(
'utype')==
'lalinference:results:nestedsamples':
5763 nsresource=[node
for node
in tree.findall(
'{%s}RESOURCE'%(xmlns))
if node.get(
'utype')==
'lalinference:results'][0]
5765 raise RuntimeError(
'Cannot find "Posterior Samples" TABLE element in XML input file %s'%(infile))
5767 def _VOTTABLE2pos(self,table):
5769 Parser for a VOT TABLE element
with FIELDs
and TABLEDATA elements
5771 from xml.etree
import ElementTree
as ET
5772 xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
5774 register_namespace=ET.register_namespace
5775 except AttributeError:
5776 def register_namespace(prefix,uri):
5777 ET._namespace_map[uri]=prefix
5778 register_namespace(
'vot',xmlns)
5781 for field
in table.findall(
'./{%s}FIELD'%(xmlns)):
5782 header.append(field.attrib[
'name'])
5784 raise RuntimeError(
'Unable to find FIELD nodes for table headers in XML table')
5785 data=table.findall(
'./{%s}DATA'%(xmlns))
5786 tabledata=data[0].find(
'./{%s}TABLEDATA'%(xmlns))
5788 for row
in tabledata:
5789 llines.append(np.array(list(map(
lambda a:float(a.text),row))))
5790 flines=np.array(llines)
5791 for i
in range(0,len(header)):
5792 if header[i].lower().find(
'log')!=-1
and header[i].lower()
not in logParams
and re.sub(
'log',
'', header[i].lower())
not in [h.lower()
for h
in header]
and header[i].lower()
not in lorentzInvarianceViolationParams:
5793 print(
'exponentiating %s'%(header[i]))
5795 flines[:,i]=np.exp(flines[:,i])
5797 header[i]=re.sub(
'log',
'', header[i], flags=re.IGNORECASE)
5798 if header[i].lower().find(
'sin')!=-1
and re.sub(
'sin',
'', header[i].lower())
not in [h.lower()
for h
in header]:
5799 print(
'asining %s'%(header[i]))
5800 flines[:,i]=np.arcsin(flines[:,i])
5801 header[i]=re.sub(
'sin',
'', header[i], flags=re.IGNORECASE)
5802 if header[i].lower().find(
'cos')!=-1
and re.sub(
'cos',
'', header[i].lower())
not in [h.lower()
for h
in header]:
5803 print(
'acosing %s'%(header[i]))
5804 flines[:,i]=np.arccos(flines[:,i])
5805 header[i]=re.sub(
'cos',
'', header[i], flags=re.IGNORECASE)
5806 header[i]=header[i].replace(
'(',
'')
5807 header[i]=header[i].replace(
')',
'')
5808 print(
'Read columns %s'%(
str(header)))
5809 return header,flines
5811 def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5812 from astropy.table
import vstack
5814 if fixedBurnins
is None:
5815 fixedBurnins = np.zeros(len(infiles))
5817 if len(infiles) > 1:
5818 multiple_chains =
True
5821 for i, [infile, fixedBurnin]
in enumerate(zip(infiles, fixedBurnins)):
5822 chain = self.
_hdf5_to_table(infile, fixedBurnin=fixedBurnin, deltaLogP=deltaLogP, nDownsample=nDownsample, multiple_chains=multiple_chains, tablename=tablename, **kwargs)
5823 chain.add_column(astropy.table.Column(i*np.ones(len(chain)), name=
'chain'))
5824 chains.append(chain)
5827 if deltaLogP
is not None:
5828 logPThreshold = -np.inf
5829 for chain
in chains:
5831 logPThreshold =
max([logPThreshold,
max(chain[
'logpost'])- deltaLogP])
5832 print(
"Eliminating any samples before log(L) = {}".format(logPThreshold))
5834 for i, chain
in enumerate(chains):
5835 if deltaLogP
is not None:
5836 above_threshold = np.arange(len(chain))[chain[
'logpost'] > logPThreshold]
5837 burnin_idx = above_threshold[0]
if len(above_threshold) > 0
else len(chain)
5840 chains[i] = chain[burnin_idx:]
5842 samples = vstack(chains)
5845 if nDownsample
is not None:
5847 samples = samples[::nskip]
5849 return samples.colnames,
as_array(samples).view(float).reshape(-1, len(samples.columns))
5851 def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs):
5853 Parse a HDF5 file and return an array of posterior samples ad list of
5854 parameter names. Equivalent to
'_common_to_pos' and work
in progress.
5857 samples =
read_samples(infile, tablename=posterior_grp_name)
5860 params = samples.colnames
5862 for param
in params:
5863 param_low = param.lower()
5864 if param_low.find(
'log') != -1
and param_low
not in logParams
and re.sub(
'log',
'', param_low)
not in [p.lower()
for p
in params]
and param_low
not in lorentzInvarianceViolationParams:
5865 print(
'exponentiating %s' % param)
5866 new_param = re.sub(
'log',
'', param, flags=re.IGNORECASE)
5867 samples[new_param] = np.exp(samples[param])
5870 if param_low.find(
'sin') != -1
and re.sub(
'sin',
'', param_low)
not in [p.lower()
for p
in params]:
5871 print(
'asining %s' % param)
5872 new_param = re.sub(
'sin',
'', param, flags=re.IGNORECASE)
5873 samples[new_param] = np.arcsin(samples[param])
5876 if param_low.find(
'cos') != -1
and re.sub(
'cos',
'', param_low)
not in [p.lower()
for p
in params]:
5877 print(
'acosing %s' % param)
5878 new_param = re.sub(
'cos',
'', param, flags=re.IGNORECASE)
5879 samples[new_param] = np.arccos(samples[param])
5883 if param != param.replace(
'(',
''):
5884 samples.rename_column(param, param.replace(
'(',
''))
5885 if param != param.replace(
')',
''):
5886 samples.rename_column(param, param.replace(
')',
''))
5891 params = samples.colnames
5892 print(
'Read columns %s' %
str(params))
5895 if 'cycle' in params:
5896 if not (fixedBurnin
is None):
5897 if not (deltaLogP
is None):
5898 print(
"Warning: using deltaLogP criteria in addition to fixed burnin")
5899 print(
"Fixed burning criteria: ",fixedBurnin)
5903 burned_in_cycles = np.arange(len(samples))[samples[
'cycle'] > fixedBurnin]
5904 burnin_idx = burned_in_cycles[0]
if len(burned_in_cycles) > 0
else len(samples)
5905 samples = samples[burnin_idx:]
5907 logPThreshold=-np.inf
5908 if len(samples) > 0
and not (deltaLogP
is None):
5909 logPThreshold =
max(samples[
'logpost'])- deltaLogP
5910 print(
"Eliminating any samples before log(post) = ", logPThreshold)
5911 burnin_idx = np.arange(len(samples))[samples[
'logpost'] > logPThreshold][0]
5912 samples = samples[burnin_idx:]
5914 if len(samples) > 0:
5916 if nDownsample
is None:
5917 print(
"Downsampling to take only uncorrelated posterior samples from each file.")
5918 if np.isnan(nskip)
and not multiple_chains:
5919 print(
"WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5921 samples = samples[::nskip]
5924 print(
"WARNING: All samples in {} are correlated.".format(infile))
5925 samples = samples[-1:]
5927 print(
"Downsampling by a factor of ", nskip,
" to achieve approximately ", nDownsample,
" posterior samples")
5928 samples = samples[::nskip]
5932 def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5933 samples = self.
_hdf5_to_table(infile, fixedBurnin=fixedBurnins, deltaLogP=deltaLogP, nDownsample=nDownsample, tablename=tablename, **kwargs)
5935 return samples.colnames,
as_array(samples).view(float).reshape(-1, len(samples.columns))
5937 def _common_to_pos(self,infile,info=[None,None]):
5939 Parse a file in the
'common format' and return an array of posterior
5940 samples
and list of parameter names. Will apply inverse functions to
5941 columns
with names containing sin,cos,log.
5944 [headerfile,delimiter]=info
5946 if headerfile==
None:
5947 formatstr=infile.readline().lstrip()
5949 hf=open(headerfile,
'r')
5950 formatstr=hf.readline().lstrip()
5953 formatstr=formatstr.replace(
'#',
'')
5954 formatstr=formatstr.replace(
'"',
'')
5956 header=formatstr.split(delimiter)
5957 header[-1]=header[-1].rstrip(
'\n')
5960 dec=re.compile(
r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$|^inf$')
5962 for line_number,line
in enumerate(infile):
5963 sline=line.split(delimiter)
5964 if sline[-1] ==
'\n':
5968 print(
'Ignoring empty line in input file: %s'%(sline))
5970 elif len(sline)!=nparams:
5971 sys.stderr.write(
'WARNING: Malformed row %i, read %i elements but there is meant to be %i\n'%(line_number,len(sline),nparams))
5974 for elemn,st
in enumerate(sline):
5975 s=st.replace(
'\n',
'')
5976 if dec.search(s)
is None:
5977 print(
'Warning! Ignoring non-numeric data after the header: %s. Row = %i,Element=%i'%(s,line_number,elemn))
5983 llines.append(list(map(float,sline)))
5985 flines=np.array(llines)
5987 if not flines.any():
5988 raise RuntimeError(
"ERROR: no lines read in!")
5991 for i
in range(0,len(header)):
5992 if header[i].lower().find(
'log')!=-1
and header[i].lower()
not in logParams
and re.sub(
'log',
'', header[i].lower())
not in [h.lower()
for h
in header]
and header[i].lower()
not in lorentzInvarianceViolationParams:
5993 print(
'exponentiating %s'%(header[i]))
5995 flines[:,i]=np.exp(flines[:,i])
5997 header[i]=re.sub(
'log',
'', header[i], flags=re.IGNORECASE)
5998 if header[i].lower().find(
'sin')!=-1
and re.sub(
'sin',
'', header[i].lower())
not in [h.lower()
for h
in header]:
5999 print(
'asining %s'%(header[i]))
6000 flines[:,i]=np.arcsin(flines[:,i])
6001 header[i]=re.sub(
'sin',
'', header[i], flags=re.IGNORECASE)
6002 if header[i].lower().find(
'cos')!=-1
and re.sub(
'cos',
'', header[i].lower())
not in [h.lower()
for h
in header]:
6003 print(
'acosing %s'%(header[i]))
6004 flines[:,i]=np.arccos(flines[:,i])
6005 header[i]=re.sub(
'cos',
'', header[i], flags=re.IGNORECASE)
6006 header[i]=header[i].replace(
'(',
'')
6007 header[i]=header[i].replace(
')',
'')
6008 print(
'Read columns %s'%(
str(header)))
6009 return header,flines
6014 lines=fo.split(
'\n')
6019 key=line.replace(
'[1]',
'').strip(
' ').strip(
'"')
6023 if result
is not {}:
6029 key=line.strip(
'"').split()[1]
6034 newline=line.strip(
'"').split()
6035 if newline
is not []:
6036 out2.append(line.strip(
'"').split())
6045 Parse a VO Table RESOURCE containing nested sampling output and
6046 return a VOTable TABLE element
with posterior samples
in it.
6047 This can be added to an existing tree by the user.
6048 Nlive will be read
from the nsresource, unless specified
6050 from xml.etree
import ElementTree
as ET
6052 from math
import log, exp
6053 xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
6055 register_namespace=ET.register_namespace
6056 except AttributeError:
6057 def register_namespace(prefix,uri):
6058 ET._namespace_map[uri]=prefix
6059 register_namespace(
'vot',xmlns)
6061 postable=ET.Element(
"{%s}TABLE"%(xmlns),attrib={
'name':
'Posterior Samples',
'utype':
'lalinference:results:posteriorsamples'})
6063 nstables=[resource
for resource
in nsresource.findall(
"./{%s}TABLE"%(xmlns))
if resource.get(
"utype")==
"lalinference:results:nestedsamples"]
6067 runstateResource = [resource
for resource
in nsresource.findall(
"./{%s}RESOURCE"%(xmlns))
if resource.get(
"utype")==
"lalinference:state"][0]
6068 algTable = [table
for table
in runstateResource.findall(
"./{%s}TABLE"%(xmlns))
if table.get(
"utype")==
"lalinference:state:algorithmparams"][0]
6069 Nlive = int ([param
for param
in algTable.findall(
"./{%s}PARAM"%(xmlns))
if param.get(
"name")==
'Nlive'][0].get(
'value'))
6070 print(
'Found Nlive %i'%(Nlive))
6072 raise RuntimeError(
"Cannot find number of live points in XML table, please specify")
6074 for fieldnode
in nstable.findall(
'./{%s}FIELD'%xmlns):
6075 if fieldnode.get(
'name') ==
'logL':
6078 postable.append(copy.deepcopy(fieldnode))
6079 for paramnode
in nstable.findall(
'./{%s}PARAM'%(xmlns)):
6080 postable.append(copy.deepcopy(paramnode))
6082 RuntimeError(
"Unable to find logL column")
6083 posdataNode=ET.Element(
"{%s}DATA"%(xmlns))
6084 postabledataNode=ET.Element(
"{%s}TABLEDATA"%(xmlns))
6085 postable.append(posdataNode)
6086 posdataNode.append(postabledataNode)
6087 nstabledata=nstable.find(
'./{%s}DATA/{%s}TABLEDATA'%(xmlns,xmlns))
6088 logw=log(1.0 - exp(-1.0/float(Nlive)))
6090 for row
in nstabledata:
6091 logL=float(row[logLcol].text)
6092 weights.append(logL-logw)
6093 logw=logw-1.0/float(Nlive)
6095 weights = [w - mw
for w
in weights]
6096 for (row,weight)
in zip(nstabledata,weights):
6097 if weight > log(random.random()):
6098 postabledataNode.append(copy.deepcopy(row))
6101xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
6109 if tag==
'{%s}TABLE'%(xmlns):
6110 if attrib[
'utype']==
'lalinference:results:nestedsamples'\
6111 or attrib[
'utype']==
'lalinference:results:posteriorsamples':
6124 if tag==
'{%s}FIELD'%(xmlns):
6126 if tag==
'{%s}TR'%(xmlns):
6128 if tag==
'{%s}TD'%(xmlns):
6130 if tag==
'{%s}PARAM'%(xmlns):
6133 namenode.p(attrib[
'name'])
6135 valnode.p(attrib[
'value'])
6138 if tag==
'{%s}TABLE'%(xmlns):
6141 if tag==
'{%s}FIELD'%(xmlns):
6143 if tag==
'{%s}TD'%(xmlns):
6150 return self.
html.toprettyxml()
6152def _cl_width(cl_bound):
6153 """Returns (high - low), the width of the given confidence
6156 return cl_bound[1] - cl_bound[0]
6158def _cl_count(cl_bound, samples):
6159 """Returns the number of samples within the given confidence
6162 return np.sum((samples >= cl_bound[0]) & (samples <= cl_bound[1]))
6165 """Returns a tuple (relative_change, fractional_uncertainty,
6166 percentile_uncertainty) giving the uncertainty in confidence
6167 intervals
from multiple posteriors.
6169 The uncertainty
in the confidence intervals
is the difference
in
6170 length between the widest interval, formed
from the smallest to
6171 largest values among all the cl_bounds,
and the narrowest
6172 interval, formed
from the largest-small
and smallest-large values
6173 among all the cl_bounds. Note that neither the smallest nor the
6174 largest confidence intervals necessarily correspond to one of the
6177 The relative change relates the confidence interval uncertainty to
6178 the expected value of the parameter, the fractional uncertainty
6179 relates this length to the length of the confidence level
from the
6180 combined posteriors,
and the percentile uncertainty gives the
6181 change
in percentile over the combined posterior between the
6182 smallest
and largest confidence intervals.
6184 @param cl The confidence level (between 0
and 1).
6186 @param cl_bounds A list of (low, high) pairs giving the confidence
6187 interval associated
with each posterior.
6189 @param posteriors A list of PosteriorOneDPDF objects giving the
6192 Ns=[p.samples.shape[0] for p
in posteriors]
6197 all_samples = np.squeeze(np.concatenate([p.samples
for p
in posteriors], axis=0))
6198 weights = np.squeeze(np.concatenate([p.samples*0.0+1.0/(Nsamplers*N)
for (N,p)
in zip(Ns,posteriors)], axis=0))
6200 isort=np.argsort(all_samples)
6202 all_samples = all_samples[isort]
6203 weights = weights[isort]
6205 param_mean = np.average(all_samples, weights=weights)
6207 N=all_samples.shape[0]
6209 alpha = (1.0 - cl)/2.0
6211 wttotal = np.cumsum(weights)
6212 ilow = np.nonzero(wttotal >= alpha)[0][0]
6213 ihigh = np.nonzero(wttotal >= 1.0-alpha)[0][0]
6215 all_cl_bound = (all_samples[ilow], all_samples[ihigh])
6217 low_bounds = np.array([l
for (l,h)
in cl_bounds])
6218 high_bounds = np.array([h
for (l,h)
in cl_bounds])
6220 largest_cl_bound = (np.min(low_bounds), np.max(high_bounds))
6221 smallest_cl_bound = (np.max(low_bounds), np.min(high_bounds))
6223 if smallest_cl_bound[1] < smallest_cl_bound[0]:
6225 smallest_cl_bound = (0.0, 0.0)
6227 ci_uncertainty = _cl_width(largest_cl_bound) - _cl_width(smallest_cl_bound)
6229 relative_change = ci_uncertainty/param_mean
6231 frac_uncertainty = ci_uncertainty/_cl_width(all_cl_bound)
6233 quant_uncertainty = float(_cl_count(largest_cl_bound, all_samples) - _cl_count(smallest_cl_bound, all_samples))/float(N)
6235 return (relative_change, frac_uncertainty, quant_uncertainty)
6238def plot_waveform(pos=None,siminspiral=None,event=0,path=None,ifos=['H1','L1','V1']):
6240 from igwn_ligolw
import lsctables,ligolw
6241 from lalsimulation.lalsimulation
import SimInspiralChooseTDWaveform,SimInspiralChooseFDWaveform
6242 from lalsimulation.lalsimulation
import SimInspiralImplementedTDApproximants,SimInspiralImplementedFDApproximants
6243 from lal.lal
import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT
6244 from lal.lal
import ComputeDetAMResponse, GreenwichMeanSiderealTime
6245 from lal.lal
import LIGOTimeGPS
6246 from lal.lal
import MSUN_SI
as LAL_MSUN_SI
6247 from lal.lal
import PC_SI
as LAL_PC_SI
6248 import lalsimulation
as lalsim
6249 from math
import cos,sin,sqrt
6250 from igwn_ligolw
import utils
6253 from numpy
import arange
6258 colors_inj={
'H1':
'r',
'L1':
'g',
'V1':
'm',
'I1':
'b',
'J1':
'y'}
6259 colors_rec={
'H1':
'k',
'L1':
'k',
'V1':
'k',
'I1':
'k',
'J1':
'k'}
6265 deltaF = 1.0 / (length* deltaT);
6269 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(
int(length), 1 );
6270 window=CreateTukeyREAL8Window(
int(length),2.0*pad*srate/length);
6271 WinNorm = sqrt(window.sumofsquares/window.data.length);
6274 strainT=CreateREAL8TimeSeries(
"strainT",segStart,0.0,1.0/srate,DimensionlessUnit,
int(length));
6275 strainF= CreateCOMPLEX16FrequencySeries(
"strainF",segStart, 0.0, deltaF, DimensionlessUnit,
int(length/2. +1));
6282 inj_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6283 rec_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6288 if siminspiral
is not None:
6291 xmldoc = utils.load_filename(siminspiral)
6292 tbl = lsctables.SimInspiralTable.get_table(xmldoc)
6298 e = sys.exc_info()[0]
6300 print(
"Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,siminspiral))
6303 REAL8time=tbl.geocent_end_time+1e-9*tbl.geocent_end_time_ns
6304 GPStime=LIGOTimeGPS(REAL8time)
6310 phiRef=tbl.coa_phase
6321 iota=tbl.inclination
6322 print(
"WARNING: Defaulting to inj_fref =100Hz to plot the injected WF. This is hardcoded since xml table does not carry this information\n")
6326 wf=
str(tbl.waveform)
6328 injapproximant=lalsim.GetApproximantFromString(wf)
6329 amplitudeO=
int(tbl.amp_order )
6330 phaseO=lalsim.GetOrderFromString(wf)
6332 waveFlags=lal.CreateDict()
6333 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6334 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6338 psi=tbl.polarization
6340 if SimInspiralImplementedFDApproximants(injapproximant):
6342 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6344 deltaF, f_min, f_max, f_ref,
6345 waveFlags, injapproximant)
6346 elif SimInspiralImplementedTDApproximants(injapproximant):
6348 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6350 deltaT, f_min, f_ref,
6351 waveFlags, injapproximant)
6353 print(
"\nThe approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%injapproximant)
6357 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6361 for k
in np.arange(strainT.data.length):
6362 if k<plus.data.length:
6363 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6365 strainT.data.data[k]=0.0
6366 strainT.data.data[k]*=window.data.data[k]
6368 inj_strains[ifo][
"T"][
'strain']=np.array([strainT.data.data[k]
for k
in arange(plus.data.length)])
6369 inj_strains[ifo][
"T"][
'x']=np.array([REAL8time - deltaT*(plus.data.length-1-k)
for k
in np.arange(plus.data.length)])
6372 for j
in arange(strainF.data.length):
6373 strainF.data.data[j]=0.0
6374 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6375 for j
in arange(strainF.data.length):
6376 strainF.data.data[j]/=WinNorm
6378 inj_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6379 inj_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6380 elif inj_domain==
'F':
6381 for k
in np.arange(strainF.data.length):
6382 if k<plus.data.length:
6383 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6385 strainF.data.data[k]=0.0
6387 inj_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6388 inj_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6392 _,which=pos._posMap()
6394 if 'time' in pos.names:
6395 REAL8time=pos[
'time'].samples[which][0]
6396 elif 'time_maxl' in pos.names:
6397 REAL8time=pos[
'time_maxl'].samples[which][0]
6398 elif 'time_min' in pos.names
and 'time_max' in pos.names:
6399 REAL8time=pos[
'time_min'].samples[which][0]+0.5*(pos[
'time_max'].samples[which][0]-pos[
'time_min'].samples[which][0])
6401 print(
"ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
6407 approximant=
int(pos[
'LAL_APPROXIMANT'].samples[which][0])
6408 amplitudeO=
int(pos[
'LAL_AMPORDER'].samples[which][0])
6409 phaseO=
int(pos[
'LAL_PNORDER'].samples[which][0])
6413 GPStime=LIGOTimeGPS(REAL8time)
6415 q=pos[
'q'].samples[which][0]
6416 mc=pos[
'mc'].samples[which][0]
6418 if 'dist' in pos.names:
6419 D=pos[
'dist'].samples[which][0]
6420 elif 'distance' in pos.names:
6421 D=pos[
'distance'].samples[which][0]
6422 elif 'logdistance' in pos.names:
6423 D=np.exp(pos[
'distance'].samples[which][0])
6427 if 'phi_orb' in pos.names:
6428 phiRef=pos[
'phi_orb'].samples[which][0]
6429 elif 'phase' in pos.names:
6430 phiRef=pos[
'phase'].samples[which][0]
6431 elif 'phase_maxl' in pos.names:
6432 phiRef=pos[
'phase_maxl'].samples[which][0]
6433 print(
'INFO: phi_orb not estimated, using maximum likelihood value')
6435 print(
'WARNING: phi_orb not found in posterior files. Defaulting to 0.0 which is probably *not* what you want\n')
6439 for name
in [
'flow',
'f_lower']:
6440 if name
in pos.names:
6441 f_min=pos[name].samples[which][0]
6446 for name
in [
'fref',
'f_ref',
'f_Ref',
'fRef']:
6447 if name
in pos.names:
6450 Fref = np.unique(pos[fname].samples)
6452 print(
"ERROR: Expected f_ref to be constant for all samples. Can't tell which value was injected! Defaulting to 100 Hz\n")
6457 print(
"WARNING: Could not read fref from posterior file! Defaulting to 100 Hz\n")
6460 a = pos[
'a1'].samples[which][0]
6461 the = pos[
'theta_spin1'].samples[which][0]
6462 phi = pos[
'phi_spin1'].samples[which][0]
6463 s1x = (a * sin(the) * cos(phi));
6464 s1y = (a * sin(the) * sin(phi));
6465 s1z = (a * cos(the));
6466 a = pos[
'a2'].samples[which][0]
6467 the = pos[
'theta_spin2'].samples[which][0]
6468 phi = pos[
'phi_spin2'].samples[which][0]
6469 s2x = (a * sin(the) * cos(phi));
6470 s2y = (a * sin(the) * sin(phi));
6471 s2z = (a * cos(the));
6472 iota=pos[
'inclination'].samples[which][0]
6475 iota, s1x, s1y, s1z, s2x, s2y, s2z=lalsim.SimInspiralTransformPrecessingNewInitialConditions(pos[
'theta_jn'].samples[which][0], pos[
'phi_JL'].samples[which][0], pos[
'tilt1'].samples[which][0], pos[
'tilt2'].samples[which][0], pos[
'phi12'].samples[which][0], pos[
'a1'].samples[which][0], pos[
'a2'].samples[which][0], m1, m2, f_ref, phiRef)
6477 if 'a1z' in pos.names:
6478 s1z=pos[
'a1z'].samples[which][0]
6479 elif 'a1' in pos.names:
6480 s1z=pos[
'a1'].samples[which][0]
6483 if 'a2z' in pos.names:
6484 s2z=pos[
'a2z'].samples[which][0]
6485 elif 'a2' in pos.names:
6486 s2z=pos[
'a2'].samples[which][0]
6490 if 'inclination' in pos.names:
6491 iota=pos[
'inclination'].samples[which][0]
6493 iota=pos[
'theta_jn'].samples[which][0]
6497 approximant=
int(pos[
'LAL_APPROXIMANT'].samples[which][0])
6498 amplitudeO=
int(pos[
'LAL_AMPORDER'].samples[which][0])
6499 phaseO=
int(pos[
'LAL_PNORDER'].samples[which][0])
6501 waveFlags=lal.CreateDict()
6502 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6503 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6504 if 'tideO' in pos.names:
6505 tidalO=
int(pos[
'tideO'].samples[which][0])
6506 lalsim.SimInspiralWaveformParamsInsertPNTidalOrder(waveFlags, tidalO)
6507 if 'spinO' in pos.names:
6508 spinO=
int(pos[
'spinO'].samples[which][0])
6509 lalsim.SimInspiralWaveformParamsInsertPNSpinOrder(waveFlags, spinO)
6510 if 'lambda1' in pos.names:
6511 lalsim.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, pos[
'lambda1'].samples[which][0])
6512 if 'lambda2' in pos.names:
6513 lalsim.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, pos[
'lambda2'].samples[which][0])
6515 if SimInspiralImplementedFDApproximants(approximant):
6517 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6519 deltaF, f_min, f_max, f_ref,
6520 waveFlags, approximant)
6521 elif SimInspiralImplementedTDApproximants(approximant):
6523 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6525 deltaT, f_min, f_ref,
6526 waveFlags, approximant)
6528 print(
"The approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%approximant)
6531 ra=pos[
'ra'].samples[which][0]
6532 dec=pos[
'dec'].samples[which][0]
6533 psi=pos[
'psi'].samples[which][0]
6536 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6540 for k
in np.arange(strainT.data.length):
6541 if k<plus.data.length:
6542 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6544 strainT.data.data[k]=0.0
6545 strainT.data.data[k]*=window.data.data[k]
6547 rec_strains[ifo][
"T"][
'strain']=np.array([strainT.data.data[k]
for k
in arange(plus.data.length)])
6548 rec_strains[ifo][
"T"][
'x']=np.array([REAL8time - deltaT*(plus.data.length-1-k)
for k
in np.arange(plus.data.length)])
6551 for j
in arange(strainF.data.length):
6552 strainF.data.data[j]=0.0
6553 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6554 for j
in arange(strainF.data.length):
6555 strainF.data.data[j]/=WinNorm
6557 rec_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6558 rec_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6559 elif rec_domain==
'F':
6560 for k
in np.arange(strainF.data.length):
6561 if k<plus.data.length:
6562 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6564 strainF.data.data[k]=0.0
6566 rec_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6567 rec_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6569 myfig=plt.figure(1,figsize=(23,15))
6577 if rec_domain
is not None and inj_domain
is not None:
6578 if rec_domain==
"T" and inj_domain==
"T":
6580 elif rec_domain
is not None:
6583 elif inj_domain
is not None:
6587 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=
False,sharey=
False)
6588 plt.setp(A,figwidth=23,figheight=15)
6589 for (r,i)
in zip(np.arange(rows),ifos):
6590 for c
in np.arange(cols):
6592 if type(ax)==np.ndarray:
6596 if rec_strains[i][
"T"][
'strain']
is not None or rec_strains[i][
"F"][
'strain']
is not None:
6598 if global_domain==
"T":
6599 ax.plot(rec_strains[i][
"T"][
'x'],rec_strains[i][
"T"][
'strain'],colors_rec[i],alpha=0.5,label=
'%s maP'%i)
6601 data=rec_strains[i][
"F"][
'strain']
6602 f=rec_strains[i][
"F"][
'x']
6603 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6605 ax.semilogx(f[mask],ys[mask].real,
'.-',color=colors_rec[i],alpha=0.5,label=
'%s maP'%i)
6607 data=rec_strains[i][
"F"][
'strain']
6608 f=rec_strains[i][
"F"][
'x']
6609 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6611 ax.loglog(f[mask],abs(ys[mask]),
'--',color=colors_rec[i],alpha=0.5,linewidth=4)
6612 ax.set_xlim([min(f[mask]),
max(f[mask])])
6613 ax.grid(
True,which=
'both')
6614 if inj_strains[i][
"T"][
'strain']
is not None or inj_strains[i][
"F"][
'strain']
is not None:
6616 if global_domain==
"T":
6617 ax.plot(inj_strains[i][
"T"][
'x'],inj_strains[i][
"T"][
'strain'],colors_inj[i],alpha=0.5,label=
'%s inj'%i)
6619 data=inj_strains[i][
"F"][
'strain']
6620 f=inj_strains[i][
"F"][
'x']
6621 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6623 ax.plot(f[mask],ys[mask].real,
'.-',color=colors_inj[i],alpha=0.5,label=
'%s inj'%i)
6625 data=inj_strains[i][
"F"][
'strain']
6626 f=inj_strains[i][
"F"][
'x']
6627 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6629 ax.loglog(f[mask],abs(ys[mask]),
'--',color=colors_inj[i],alpha=0.5,linewidth=4)
6630 ax.set_xlim([min(f[mask]),
max(f[mask])])
6631 ax.grid(
True,which=
'both')
6635 if global_domain==
"T":
6636 ax.set_title(
r"$h(t)$",fontsize=font_size)
6638 ax.set_title(
r"$\Re[h(f)]$",fontsize=font_size)
6640 ax.set_title(
r"$|h(f)|$",fontsize=font_size)
6643 if global_domain==
"T":
6644 ax.set_xlabel(
"time [s]",fontsize=font_size)
6646 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
6648 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
6650 ax.legend(loc=
'best')
6654 A.savefig(os.path.join(path,
'WF_DetFrame.png'),bbox_inches=
'tight')
6655 return inj_strains,rec_strains
6659 myfig2=plt.figure(figsize=(15,15),dpi=500)
6660 ax=plt.subplot(1,1,1)
6661 colors={
'H1':
'r',
'L1':
'g',
'V1':
'm',
'I1':
'k',
'J1':
'y'}
6667 if not os.path.isfile(f):
6668 print(
"PSD file %s has not been found and won't be plotted\n"%f)
6681 idx=f.find(
'-PSD.dat')
6683 freqs[ifo.lower()] = freq
6686 for (f,d)
in zip(freq,data):
6687 if f>f_min
and d!=0.0
and np.isfinite(d):
6690 plt.loglog(fr,da,colors[ifo],label=ifo,alpha=0.5,linewidth=3)
6691 plt.xlim([min(fr),
max(fr)])
6692 plt.xlabel(
"Frequency [Hz]",fontsize=26)
6693 plt.ylabel(
"PSD",fontsize=26)
6694 plt.legend(loc=
'best')
6695 plt.grid(which=
'both')
6698 myfig2.savefig(os.path.join(outpath,
'PSD.png'),bbox_inches=
'tight')
6700 myfig2.savefig(os.path.join(outpath,
'PSD.png'))
6705cred_level =
lambda cl, x: np.sort(x, axis=0)[
int(cl*len(x))]
6708 """Return location of lower or upper confidence levels
6711 cl: Confidence level to return the bound of.
6712 lower: If ``
True``,
return the lower bound, otherwise
return the upper bound.
6720 """Returns the angle in degrees corresponding to the spline
6721 calibration parameters delta_psi.
6724 rot = (2.0 + 1.0j*delta_psi)/(2.0 - 1.0j*delta_psi)
6726 return 180.0/np.pi*np.arctan2(np.imag(rot), np.real(rot))
6728def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None):
6729 """Plot calibration posterior estimates for a spline model in log space.
6731 logf: The (log) location of spline control points.
6732 ys: List of posterior draws of function at control points ``logf``
6733 nx: Number of points to evaluate spline at for plotting.
6734 level: Credible level to fill
in.
6735 color: Color to plot
with.
6736 label: Label
for plot.
6737 xform: Function to transform the spline into plotted values.
6740 fs = np.linspace(f.min(), f.max(), nf)
6742 data = np.zeros((ys.shape[0], nf))
6749 mu = np.mean(zs, axis=0)
6752 plt.errorbar(np.exp(logf), mu, yerr=[lower_cl, upper_cl], fmt=
'.', color=color, lw=4, alpha=0.5, capsize=0)
6754 for i, samp
in enumerate(ys):
6756 temp = interpolate.spline(logf, samp, np.log(fs))
6757 except AttributeError:
6758 calSpline = interpolate.InterpolatedUnivariateSpline(logf, samp, k=3, ext=2)
6759 temp = calSpline(np.log(fs))
6763 data[i] = xform(temp)
6765 line, = plt.plot(fs, np.mean(data, axis=0), color=color, label=label)
6766 color = line.get_color()
6767 plt.fill_between(fs,
cred_interval(data, level),
cred_interval(data, level, lower=
False), color=color, alpha=.1, linewidth=0.1)
6768 plt.xlim(f.min()-.5, f.max()+50)
6771 fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(15, 15), dpi=500)
6778 ifos = np.unique([param.split(
'_')[0]
for param
in params
if 'spcal_freq' in param])
6780 if ifo==
'h1': color =
'r'
6781 elif ifo==
'l1': color =
'g'
6782 elif ifo==
'v1': color =
'm'
6786 freq_params = np.sort([param
for param
in params
if
6787 '{0}_spcal_freq'.format(ifo)
in param])
6789 logfreqs = np.log([pos[param].median
for param
in freq_params])
6793 amp_params = np.sort([param
for param
in params
if
6794 '{0}_spcal_amp'.format(ifo)
in param])
6795 if len(amp_params) > 0:
6796 amp = 100*np.column_stack([pos[param].samples
for param
in amp_params])
6797 plot_spline_pos(logfreqs, amp, color=color, level=level, label=
"{0} (mean, {1}%)".format(ifo.upper(),
int(level*100)))
6801 phase_params = np.sort([param
for param
in params
if
6802 '{0}_spcal_phase'.format(ifo)
in param])
6803 if len(phase_params) > 0:
6804 phase = np.column_stack([pos[param].samples
for param
in phase_params])
6806 plot_spline_pos(logfreqs, phase, color=color, level=level, label=
"{0} (mean, {1}%)".format(ifo.upper(),
int(level*100)), xform=spline_angle_xform)
6808 ax1.tick_params(labelsize=.75*font_size)
6809 ax2.tick_params(labelsize=.75*font_size)
6811 plt.legend(loc=
'upper right', prop={
'size':.75*font_size}, framealpha=0.1)
6813 plt.legend(loc=
'upper right', prop={
'size':.75*font_size})
6814 ax1.set_xscale(
'log')
6815 ax2.set_xscale(
'log')
6817 ax2.set_xlabel(
'Frequency (Hz)', fontsize=font_size)
6818 ax1.set_ylabel(
'Amplitude (%)', fontsize=font_size)
6819 ax2.set_ylabel(
'Phase (deg)', fontsize=font_size)
6821 outp = os.path.join(outpath,
'calibration.png')
6824 fig.savefig(outp, bbox_inches=
'tight')
6831 from lalinference.lalinference
import SimBurstChooseFDWaveform,SimBurstChooseTDWaveform
6832 from lalinference.lalinference
import SimBurstImplementedFDApproximants,SimBurstImplementedTDApproximants
6833 from lal.lal
import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT,CreateReverseREAL8FFTPlan
6834 from lal.lal
import LIGOTimeGPS
6835 import lalinference
as lalinf
6836 from lal
import ComputeDetAMResponse, GreenwichMeanSiderealTime, LIGOTimeGPS
6838 from math
import cos,sin,sqrt
6839 from igwn_ligolw
import lsctables
6840 from igwn_ligolw
import utils
6843 from numpy
import arange,real,absolute,fabs,pi
6844 from matplotlib
import pyplot
as plt
6849 colors_inj={
'H1':
'r',
'L1':
'g',
'V1':
'm',
'I1':
'b',
'J1':
'y'}
6850 colors_rec={
'H1':
'k',
'L1':
'k',
'V1':
'k',
'I1':
'k',
'J1':
'k'}
6852 from igwn_ligolw
import ligolw
6853 from igwn_ligolw
import table
6854 class LIGOLWContentHandlerExtractSimBurstTable(ligolw.LIGOLWContentHandler):
6855 def __init__(self,document):
6856 ligolw.LIGOLWContentHandler.__init__(self,document)
6857 self.tabname=lsctables.SimBurstTable.tableName
6859 self.tableElementName=
''
6860 def startElement(self,name,attrs):
6861 if attrs.has_key(
'Name')
and table.Table.TableName(attrs[
'Name'])==self.tabname:
6862 self.tableElementName=name
6864 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6867 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6868 def endElement(self,name):
6869 if self.intable: ligolw.LIGOLWContentHandler.endElement(self,name)
6870 if self.intable
and name==self.tableElementName: self.intable=
False
6877 deltaF = 1.0 / (length* deltaT);
6881 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(
int(length), 1 );
6882 freqToTimeFFTPlan = CreateReverseREAL8FFTPlan(
int(length), 1 );
6883 window=CreateTukeyREAL8Window(
int(length),2.0*pad*srate/length);
6885 segStart=939936910.000
6886 strainFinj= CreateCOMPLEX16FrequencySeries(
"strainF",segStart,0.0,deltaF,DimensionlessUnit,
int(length/2. +1));
6887 strainTinj=CreateREAL8TimeSeries(
"strainT",segStart,0.0,1.0/srate,DimensionlessUnit,
int(length));
6888 strainFrec= CreateCOMPLEX16FrequencySeries(
"strainF",segStart,0.0,deltaF,DimensionlessUnit,
int(length/2. +1));
6889 strainTrec=CreateREAL8TimeSeries(
"strainT",segStart,0.0,1.0/srate,DimensionlessUnit,
int(length));
6900 inj_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6901 rec_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6906 if simburst
is not None:
6909 xmldoc = utils.load_filename(simburst,contenthandler=LIGOLWContentHandlerExtractSimBurstTable)
6910 tbl = lsctables.SimBurstTable.get_table(xmldoc)
6916 print(
"Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,simburst))
6919 REAL8time=tbl.time_geocent_gps+1e-9*tbl.time_geocent_gps_ns
6920 GPStime=LIGOTimeGPS(REAL8time)
6921 GlobREAL8time=(REAL8time)
6922 strainTinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6923 strainFinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6928 polar_e_angle=tbl.pol_ellipse_angle
6929 polar_e_ecc=tbl.pol_ellipse_e
6931 BurstExtraParams=
None
6932 wf=
str(tbl.waveform)
6934 injapproximant=lalinf.GetBurstApproximantFromString(wf)
6939 if SimBurstImplementedFDApproximants(injapproximant):
6941 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6942 elif SimBurstImplementedTDApproximants(injapproximant):
6944 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6946 print(
"\nThe approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%injapproximant)
6950 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6953 tCinstrain=np.floor(REAL8time-float(strainTinj.epoch))/deltaT
6956 tSinstrain=
int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT)
6957 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT-tSinstrain
6960 for k
in np.arange(tSinstrain):
6961 strainTinj.data.data[k]=0.0
6963 for k
in np.arange(plus.data.length):
6964 strainTinj.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6966 for k
in np.arange(strainTinj.data.length- (tSinstrain +plus.data.length)):
6967 strainTinj.data.data[k+tSinstrain+plus.data.length]=0.0
6968 for k
in np.arange(strainTinj.data.length):
6969 strainTinj.data.data[k]*=window.data.data[k]
6970 np.savetxt(
'file.out',zip(np.array([strainTinj.epoch + j*deltaT
for j
in arange(strainTinj.data.length)]),np.array([strainTinj.data.data[j]
for j
in arange(strainTinj.data.length)])))
6972 inj_strains[ifo][
"T"][
'strain']=np.array([strainTinj.data.data[j]
for j
in arange(strainTinj.data.length)])
6973 inj_strains[ifo][
"T"][
'x']=np.array([strainTinj.epoch + j*deltaT
for j
in arange(strainTinj.data.length)])
6975 for j
in arange(strainFinj.data.length):
6976 strainFinj.data.data[j]=0.0
6977 REAL8TimeFreqFFT(strainFinj,strainTinj,timeToFreqFFTPlan);
6978 twopit=2.*np.pi*(rem*deltaT)
6979 for k
in arange(strainFinj.data.length):
6980 re = cos(twopit*deltaF*k)
6981 im = -sin(twopit*deltaF*k)
6982 strainFinj.data.data[k]*= (re + 1j*im);
6984 inj_strains[ifo][
"F"][
'strain']=np.array([strainFinj.data.data[k]
for k
in arange(
int(strainFinj.data.length))])
6985 inj_strains[ifo][
"F"][
'x']=np.array([strainFinj.f0+ k*strainFinj.deltaF
for k
in arange(
int(strainFinj.data.length))])
6986 elif inj_domain==
'F':
6987 for k
in np.arange(strainFinj.data.length):
6988 if k<plus.data.length:
6989 strainFinj.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6991 strainFinj.data.data[k]=0.0
6992 twopit=2.*np.pi*(REAL8time-float(strainFinj.epoch))
6993 for k
in arange(strainFinj.data.length):
6994 re = cos(twopit*deltaF*k)
6995 im = -sin(twopit*deltaF*k)
6996 strainFinj.data.data[k]*= (re + 1j*im);
6998 inj_strains[ifo][
"F"][
'strain']=np.array([strainFinj.data.data[k]
for k
in arange(
int(strainFinj.data.length))])
6999 inj_strains[ifo][
"F"][
'x']=np.array([strainFinj.f0+ k*strainFinj.deltaF
for k
in arange(
int(strainFinj.data.length))])
7002 if f0
is not None and f0
is not np.nan:
7003 if q
is not None and q
is not np.nan:
7005 if f0-6.*sigmaF>plot_fmin:
7006 plot_fmin=f0-6.*sigmaF
7007 if f0+6.*sigmaF<plot_fmax:
7008 plot_fmax=f0+6.*sigmaF
7010 if REAL8time-6.*sigmaT<plot_tmin:
7011 plot_tmin=REAL8time-6.*sigmaT
7012 if REAL8time+6.*sigmaT>plot_tmax:
7013 plot_tmax=REAL8time+6.*sigmaT
7015 if dur
is not None and dur
is not np.nan:
7016 sigmaF=1./sqrt(2.)/pi/dur
7017 if 0+6.*sigmaF<plot_fmax:
7018 plot_fmax=0+6.*sigmaF
7021 if REAL8time-6.*sigmaT<plot_tmin:
7022 plot_tmin=REAL8time-6.*sigmaT
7023 if REAL8time+6.*sigmaT>plot_tmax:
7024 plot_tmax=REAL8time+6.*sigmaT
7030 _,which=pos._posMap()
7032 if 'time' in pos.names:
7033 REAL8time=pos[
'time'].samples[which][0]
7034 elif 'time_maxl' in pos.names:
7035 REAL8time=pos[
'time_maxl'].samples[which][0]
7036 elif 'time_mean' in pos.names:
7037 REAL8time=pos[
'time_mean'].samples[which][0]
7038 elif 'time_min' in pos.names
and 'time_max' in pos.names:
7039 REAL8time=pos[
'time_min'].samples[which][0]+0.5*(pos[
'time_max'].samples[which][0]-pos[
'time_min'].samples[which][0])
7041 print(
"ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
7048 approximant=
int(pos[
'LAL_APPROXIMANT'].samples[which][0])
7052 GPStime=LIGOTimeGPS(REAL8time)
7053 if GlobREAL8time
is None:
7054 GlobREAL8time=REAL8time
7055 strainTrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7056 strainFrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7057 if "duration" in pos.names:
7058 dur=pos[
"duration"].samples[which][0]
7061 if "quality" in pos.names:
7062 q=pos[
'quality'].samples[which][0]
7065 if 'frequency' in pos.names:
7066 f0=pos[
'frequency'].samples[which][0]
7070 hrss=pos[
'hrss'].samples[which][0]
7072 hrss=np.exp(pos[
'loghrss'].samples[which][0])
7073 if np.isnan(q)
and not np.isnan(dur):
7076 if 'alpha' in pos.names:
7077 alpha=pos[
'alpha'].samples[which][0]
7079 polar_e_ecc=pos[
'polar_eccentricity'].samples[which][0]
7080 elif 'polar_ellipse_angle' in pos.names:
7081 polar_e_angle=pos[
'polar_ellipse_angle'].samples[which][0]
7082 polar_e_ecc=pos[
'polar_eccentricity'].samples[which][0]
7084 BurstExtraParams=
None
7088 if SimBurstImplementedFDApproximants(approximant):
7090 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7091 elif SimBurstImplementedTDApproximants(approximant):
7093 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7095 print(
"The approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%approximant)
7097 ra=pos[
'ra'].samples[which][0]
7098 dec=pos[
'dec'].samples[which][0]
7099 psi=pos[
'psi'].samples[which][0]
7102 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
7105 tCinstrain=np.floor(REAL8time-float(strainTrec.epoch))/deltaT
7107 tSinstrain=
int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT)
7110 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT-tSinstrain
7114 for k
in np.arange(tSinstrain):
7115 strainTrec.data.data[k]=0.0
7117 for k
in np.arange(plus.data.length):
7118 strainTrec.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7120 for k
in np.arange(strainTrec.data.length- (tSinstrain +plus.data.length)):
7121 strainTrec.data.data[k+tSinstrain+plus.data.length]=0.0
7122 for k
in np.arange(strainTrec.data.length):
7123 strainTrec.data.data[k]*=window.data.data[k]
7125 rec_strains[ifo][
"T"][
'strain']=np.array([strainTrec.data.data[j]
for j
in arange(strainTrec.data.length)])
7126 rec_strains[ifo][
"T"][
'x']=np.array([strainTrec.epoch + j*deltaT
for j
in arange(strainTrec.data.length)])
7128 for j
in arange(strainFrec.data.length):
7129 strainFrec.data.data[j]=0.0
7130 REAL8TimeFreqFFT(strainFrec,strainTrec,timeToFreqFFTPlan);
7131 twopit=2.*np.pi*(rem*deltaT)
7132 for k
in arange(strainFrec.data.length):
7133 re = cos(twopit*deltaF*k)
7134 im = -sin(twopit*deltaF*k)
7135 strainFrec.data.data[k]*= (re + 1j*im);
7137 rec_strains[ifo][
"F"][
'strain']=np.array([strainFrec.data.data[k]
for k
in arange(
int(strainFrec.data.length))])
7138 rec_strains[ifo][
"F"][
'x']=np.array([strainFrec.f0+ k*strainFrec.deltaF
for k
in arange(
int(strainFrec.data.length))])
7139 elif rec_domain==
'F':
7140 for k
in np.arange(strainFrec.data.length):
7141 if k<plus.data.length:
7142 strainFrec.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7144 strainFrec.data.data[k]=0.0
7145 twopit=2.*np.pi*(REAL8time-float(strainFrec.epoch))
7146 for k
in arange(strainFrec.data.length):
7147 re = cos(twopit*deltaF*k)
7148 im = -sin(twopit*deltaF*k)
7149 strainFrec.data.data[k]*= (re + 1j*im);
7151 rec_strains[ifo][
"F"][
'strain']=np.array([strainFrec.data.data[k]
for k
in arange(
int(strainFrec.data.length))])
7152 rec_strains[ifo][
"F"][
'x']=np.array([strainFrec.f0+ k*strainFrec.deltaF
for k
in arange(
int(strainFrec.data.length))])
7155 if f0
is not None and f0
is not np.nan:
7156 if q
is not None and q
is not np.nan:
7158 if f0-6.*sigmaF>plot_fmin:
7159 plot_fmin=f0-6.*sigmaF
7160 if f0+6.*sigmaF<plot_fmax:
7161 plot_fmax=f0+6.*sigmaF
7163 if REAL8time-6.*sigmaT<plot_tmin:
7164 plot_tmin=REAL8time-6.*sigmaT
7165 if REAL8time+6.*sigmaT>plot_tmax:
7166 plot_tmax=REAL8time+6.*sigmaT
7168 if dur
is not None and dur
is not np.nan:
7169 sigmaF=1./sqrt(2.)/pi/dur
7170 if 0+6.*sigmaF<plot_fmax:
7171 plot_fmax=0+6.*sigmaF
7174 if REAL8time-6.*sigmaT<plot_tmin:
7175 plot_tmin=REAL8time-6.*sigmaT
7176 if REAL8time+6.*sigmaT>plot_tmax:
7177 plot_tmax=REAL8time+6.*sigmaT
7179 myfig=plt.figure(1,figsize=(10,7))
7187 if rec_domain
is not None and inj_domain
is not None:
7188 if rec_domain==
"T" and inj_domain==
"T":
7190 elif rec_domain
is not None:
7193 elif inj_domain
is not None:
7197 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=
False,sharey=
False)
7198 plt.setp(A,figwidth=10,figheight=7)
7199 for (r,i)
in zip(np.arange(rows),ifos):
7200 for c
in np.arange(cols):
7202 if type(ax)==np.ndarray:
7206 if rec_strains[i][
"T"][
'strain']
is not None or rec_strains[i][
"F"][
'strain']
is not None:
7208 if global_domain==
"T":
7209 ax.plot(rec_strains[i][
"T"][
'x'],rec_strains[i][
"T"][
'strain'],colors_rec[i],label=
'%s maP'%i,linewidth=5)
7210 ax.set_xlim([plot_tmin,plot_tmax])
7213 data=rec_strains[i][
"F"][
'strain']
7214 f=rec_strains[i][
"F"][
'x']
7215 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7217 ax.plot(f[mask],real(ys[mask]),
'-',color=colors_rec[i],label=
'%s maP'%i,linewidth=5)
7218 ax.set_xlim([plot_fmin,plot_fmax])
7220 data=rec_strains[i][
"F"][
'strain']
7221 f=rec_strains[i][
"F"][
'x']
7222 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7224 ax.loglog(f[mask],absolute(ys[mask]),
'--',color=colors_rec[i],linewidth=5)
7225 ax.grid(
True,which=
'both')
7226 ax.set_xlim([plot_fmin,plot_fmax])
7227 if inj_strains[i][
"T"][
'strain']
is not None or inj_strains[i][
"F"][
'strain']
is not None:
7229 if global_domain==
"T":
7230 ax.plot(inj_strains[i][
"T"][
'x'],inj_strains[i][
"T"][
'strain'],colors_inj[i],label=
'%s inj'%i,linewidth=2)
7231 ax.set_xlim([plot_tmin,plot_tmax])
7233 data=inj_strains[i][
"F"][
'strain']
7234 f=inj_strains[i][
"F"][
'x']
7235 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7237 ax.plot(f[mask],real(ys[mask]),
'-',color=colors_inj[i],label=
'%s inj'%i,linewidth=2)
7238 ax.set_xlim([plot_fmin,plot_fmax])
7240 data=inj_strains[i][
"F"][
'strain']
7241 f=inj_strains[i][
"F"][
'x']
7242 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7244 ax.loglog(f[mask],absolute(ys[mask]),
'--',color=colors_inj[i],linewidth=2)
7245 ax.grid(
True,which=
'both')
7246 ax.set_xlim([plot_fmin,plot_fmax])
7249 if global_domain==
"T":
7250 ax.set_title(
r"$h(t)$",fontsize=font_size)
7252 ax.set_title(
r"$\Re[h(f)]$",fontsize=font_size)
7254 ax.set_title(
r"$|h(f)|$",fontsize=font_size)
7257 if global_domain==
"T":
7258 ax.set_xlabel(
"time [s]",fontsize=font_size)
7260 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
7262 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
7264 ax.legend(loc=
'best')
7268 A.savefig(os.path.join(path,
'WF_DetFrame.png'),bbox_inches=
'tight')
7269 return inj_strains, rec_strains
7271def make_1d_table(html,legend,label,pos,pars,noacf,GreedyRes,onepdfdir,sampsdir,savepdfs,greedy,analyticLikelihood,nDownsample):
7273 from numpy
import unique, sort
7274 global confidenceLevels
7275 confidence_levels=confidenceLevels
7280 if set(pos.names)-set(pars)==set(pos.names):
7284 tabid=
'onedmargtable_'+label.lower()
7285 html_ompdf=html.add_collapse_section(
'1D marginal posterior PDFs (%s)'%label,legend=legend,innertable_id=tabid)
7288 html_ompdf_write=
'<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th><th>Autocorrelation</th></tr>'%tabid
7290 html_ompdf_write=
'<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>'%tabid
7293 if 'chain' in pos.names:
7294 data,header=pos.samples()
7295 par_index=pos.names.index(
'cycle')
7296 chain_index=pos.names.index(
"chain")
7297 chains=unique(pos[
"chain"].samples)
7298 chainCycles = [sort(data[ data[:,chain_index] == chain, par_index ])
for chain
in chains]
7301 for cycles
in chainCycles:
7303 chainNcycles.append(cycles[-1] - cycles[0])
7304 chainNskips.append(cycles[1] - cycles[0])
7306 chainNcycles.append(1)
7307 chainNskips.append(1)
7308 elif 'cycle' in pos.names:
7309 cycles = sort(pos[
'cycle'].samples)
7311 Ncycles = cycles[-1]-cycles[0]
7312 Nskip = cycles[1]-cycles[0]
7318 for par_name
in pars:
7319 par_name=par_name.lower()
7321 pos[par_name.lower()]
7326 par_bin=GreedyRes[par_name]
7328 print(
"Bin size is not set for %s, skipping binning."%par_name)
7332 binParams={par_name:par_bin}
7337 print(
"Using greedy 1-d binning credible regions\n")
7339 toppoints,injectionconfidence,reses,injection_area,cl_intervals=
greedy_bin_one_param(pos,binParams,confidence_levels)
7342 print(
"Using 2-step KDE 1-d credible regions\n")
7344 if pos[par_name].injval
is None:
7347 injCoords=[pos[par_name].injval]
7348 _,reses,injstats=
kdtree_bin2Step(pos,[par_name],confidence_levels,injCoords=injCoords)
7349 if injstats
is not None:
7350 injectionconfidence=injstats[3]
7351 injection_area=injstats[4]
7353 print(
"Generating 1D plot for %s."%par_name)
7357 if analyticLikelihood:
7358 pdf = analyticLikelihood.pdf(par_name)
7359 cdf = analyticLikelihood.cdf(par_name)
7361 oneDPDFParams={par_name:50}
7364 except Exception
as exc:
7366 f
"failed to produce plot for {par_name}: "
7367 f
"[{type(exc).__name__}] {exc}",
7371 figname=par_name+
'.png'
7372 oneDplotPath=os.path.join(onepdfdir,figname)
7373 plotFig.savefig(oneDplotPath)
7374 if(savepdfs): plotFig.savefig(os.path.join(onepdfdir,par_name+
'.pdf'))
7378 print(
"r of injected value of %s (bins) = %f"%(par_name, rbins))
7381 myfig=plt.figure(figsize=(4,3.5),dpi=200)
7382 pos_samps=pos[par_name].samples
7383 if not (
"chain" in pos.names):
7386 plt.plot(pos_samps,
'k.', markersize=5, alpha=0.5, linewidth=0.0, figure=myfig)
7387 maxLen=len(pos_samps)
7392 data,header=pos.samples()
7393 par_index=pos.names.index(par_name)
7394 chain_index=pos.names.index(
"chain")
7395 chains=unique(pos[
"chain"].samples)
7396 chainData=[data[ data[:,chain_index] == chain, par_index ]
for chain
in chains]
7397 chainDataRanges=[range(len(cd))
for cd
in chainData]
7398 maxLen=
max([len(cd)
for cd
in chainData])
7399 for rng, data
in zip(chainDataRanges, chainData):
7400 plt.plot(rng, data, marker=
'.', markersize=1, alpha=0.5, linewidth=0.0,figure=myfig)
7401 plt.title(
"Gelman-Rubin R = %g"%(pos.gelman_rubin(par_name)))
7409 injpar=pos[par_name].injval
7411 if injpar
is not None:
7413 minrange=min(pos_samps)-0.05*(
max(pos_samps)-min(pos_samps))
7414 maxrange=
max(pos_samps)+0.05*(
max(pos_samps)-min(pos_samps))
7415 if minrange<injpar
and maxrange>injpar:
7416 plt.axhline(injpar, color=
'r', linestyle=
'-.',linewidth=4)
7417 myfig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_samps.png')))
7418 if(savepdfs): myfig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_samps.pdf')))
7422 acffig=plt.figure(figsize=(4,3.5),dpi=200)
7423 if not (
"chain" in pos.names):
7427 lines=plt.plot(acf,
'k.', marker=
'.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7429 if nDownsample
is None:
7430 plt.title(
'Autocorrelation Function')
7431 elif 'cycle' in pos.names:
7432 last_color = lines[-1].get_color()
7433 plt.axvline(acl/Nskip, linestyle=
'-.', color=last_color)
7434 plt.title(
'ACL = %i N = %i'%(acl,Neff))
7435 acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.png')))
7436 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.pdf')))
7446 for rng, data, Nskip, Ncycles
in zip(chainDataRanges, chainData, chainNskips, chainNcycles):
7450 lines=plt.plot(acf,
'k.', marker=
'.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7452 if nDownsample
is not None:
7453 last_color = lines[-1].get_color()
7454 plt.axvline(acl/Nskip, linestyle=
'-.', color=last_color)
7455 if nDownsample
is None:
7456 plt.title(
'Autocorrelation Function')
7458 plt.title(
'ACL = %i N = %i'%(
max(acls),Nsamps))
7459 acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.png')))
7460 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.pdf')))
7469 acfhtml=
'<td width="30%"><img width="100%" src="1Dsamps/'+figname.replace(
'.png',
'_acf.png')+
'"/></td>'
7471 acfhtml=
'<td>ACF generation failed!</td>'
7472 html_ompdf_write+=
'<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+
'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace(
'.png',
'_samps.png')+
'"/></td>'+acfhtml+
'</tr>'
7474 html_ompdf_write+=
'<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+
'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace(
'.png',
'_samps.png')+
'"/></td></tr>'
7476 html_ompdf_write+=
'</table>'
7477 html_ompdf.write(html_ompdf_write)
def __init__(self, *args)
Return analytic likelihood values.
def names(self)
Return list of parameter names described by analytic likelihood function.
def __init__(self, covariance_matrix_files, mean_vector_files)
Prepare analytic likelihood for the given parameters.
def cdf(self, param)
Return PDF function for parameter.
def pdf(self, param)
Return PDF function for parameter.
Data structure for a table of posterior samples .
def __init__(self, commonResultsFormatData, SimBurstTableEntry=None, injFref=None, SnglBurstList=None, name=None, description=None)
Constructor.
def _inj_longitude(self, inj)
Dec tick locations with some intelligence.
def __init__(self, min=-pi_constant/2.0, max=pi_constant/2.0)
object to store the structure of a kd tree
def setSplit(self, dimension, value)
def setImportance(self, sampleNumber, volume)
def __init__(self, bounding_box, left_child=None, right_child=None)
def search(self, coordinates)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def objects(self)
Returns the objects in the tree.
def __init__(self, objects)
Construct a kD-tree from a sequence of objects.
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def volume(self)
Returns the volume of the bounding box of the tree.
def _bounds_of_objects(self)
def _longest_dimension(self)
def right(self)
Returns the right tree.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
def _same_coords(self, objects)
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
A kD-tree suitable for splitting parameter spaces and counting hypervolumes.
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def fillNewTree(self, boxing=64, isArea=False)
copies tree structure, but with KDSkeleton as the new nodes.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def objects(self)
Returns the objects in the tree.
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
def right(self)
Returns the right tree.
def volume(self)
Returns the volume of the bounding box of the tree.
def _same_coords(self, objects)
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def search(self, coordinates, boxing=64)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def __init__(self, objects, boundingbox, dims=0)
Construct a kD-tree from a sequence of objects.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
A parser for the output of Bayesian parameter estimation codes.
def _clear_infmcmc_header(self, infile)
def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample)
def _common_to_pos(self, infile, info=[None, None])
def __init__(self, inputtype)
def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _xml_to_pos(self, infile)
def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs)
def _find_infmcmc_f_lower(self, runInfo)
def _infmcmc_to_pos(self, files, outdir=None, deltaLogP=None, fixedBurnins=None, nDownsample=None, oldMassConvention=False, **kwargs)
def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False)
def parse(self, files, **kwargs)
Parse files.
def _swaplabel12(self, label)
def _VOTTABLE2pos(self, table)
def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _find_infmcmc_f_ref(self, runInfo)
def _ns_to_pos(self, files, Nlive=None, Npost=None, posfilename='posterior_samples.dat')
def _followupmcmc_to_pos(self, files)
Data structure for a table of posterior samples .
def stdevs(self)
Return dict {paramName:paramStandardDeviation} .
def maxP(self)
Return the maximum a posteriori probability and the corresponding set of parameters.
def set_triggers(self, triggers)
Set the trigger values of the parameters.
def __init__(self, commonResultsFormatData, SimInspiralTableEntry=None, inj_spin_frame='OrbitalL', injFref=100, SnglInspiralList=None, name=None, description=None)
Constructor.
def bySample(self)
Generate a forward iterator over the list of samples corresponding to the data stored within the Post...
def __len__(self)
Container method.
def __iter__(self)
Container method.
def extend_posterior(self)
Add some useful derived parameters (such as tilt angles, time delays, etc) in the Posterior object.
def delete_NaN_entries(self, param_list)
Remove samples containing NaN in request params.
def name(self)
Return qualified string containing the 'name' of the Posterior instance.
def gelman_rubin(self, pname)
Returns an approximation to the Gelman-Rubin statistic (see Gelman, A.
def di_evidence(self, boxing=64)
Returns the log of the direct-integration evidence for the posterior samples.
def __getitem__(self, key)
Container method .
def _average_posterior(self, samples, post_name)
def _inj_spins(self, inj, frame='OrbitalL')
def forward(self)
Generate a forward iterator (in sense of list of names) over Posterior with name,one_d_pos.
def triggers(self)
Return the trigger values .
def append(self, one_d_posterior)
Container method.
def _gettrigpar(self, paramname)
def description(self)
Return qualified string containing a 'description' of the Posterior instance.
def bootstrap(self)
Returns a new Posterior object that contains a bootstrap sample of self.
def means(self)
Return dict {paramName:paramMean} .
def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias=0)
def delete_samples_by_idx(self, samples)
Remove samples from all OneDPosteriors.
def maxL(self)
Return the maximum likelihood probability and the corresponding set of parameters.
def injection(self)
Return the injected values.
def _getinjpar(self, paramname)
def healpix_map(self, resol, nest=True)
Returns a healpix map in the pixel ordering that represents the posterior density (per square degree)...
def _print_table_row(self, name, entries)
def _total_incl_restarts(self, samples)
def __str__(self)
Define a string representation of the Posterior class ; returns a html formatted table of various pro...
def DIC(self)
Returns the Deviance Information Criterion estimated from the posterior samples.
def write_to_file(self, fname)
Dump the posterior table to a file in the 'common format'.
def names(self)
Return list of parameter names.
def samples(self)
Return an (M,N) numpy.array of posterior samples; M = len(self); N = dim(self) .
def pop(self, param_name)
Container method.
def append_mapping(self, new_param_names, func, post_names)
Append posteriors pos1,pos2,...=func(post_names)
def elliptical_subregion_evidence(self)
Returns an approximation to the log(evidence) obtained by fitting an ellipse around the highest-poste...
def _inj_longitude(self, inj)
def medians(self)
Return dict {paramName:paramMedian} .
def longest_chain_cycles(self)
Returns the number of cycles in the longest chain.
def dim(self)
Return number of parameters.
def set_injection(self, injection)
Set the injected values of the parameters.
def harmonic_mean_evidence(self)
Returns the log of the harmonic mean evidence for the set of posterior samples.
A data structure representing one parameter in a chain of posterior samples.
def name(self)
Return the string literal name of the parameter.
def median(self)
Return the median value for the marginal PDF on the parameter.
def delete_samples_by_idx(self, samples)
Remove samples from posterior, analagous to numpy.delete but opperates in place.
def injval(self)
Return the injected value set at construction .
def set_trigvals(self, new_trigvals)
Set the trigger values of the parameter.
def __len__(self)
Container method.
def __init__(self, name, posterior_samples, injected_value=None, injFref=None, trigger_values=None, prior=None)
Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
def stacc(self)
Return the 'standard accuracy statistic' (stacc) of the marginal posterior of the parameter.
def set_injval(self, new_injval)
Set the injected/real value of the parameter.
def trigvals(self)
Return the trigger values set at construction.
def mean(self)
Return the arithmetic mean for the marginal PDF on the parameter.
def __getitem__(self, idx)
Container method .
def prob_interval(self, intervals)
Evaluate probability intervals.
def KL(self)
Returns the KL divergence between the prior and the posterior.
def gaussian_kde(self)
Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
def stdev(self)
Return the standard deviation of the marginal PDF on the parameter.
def samples(self)
Return a 1D numpy.array of the samples.
A single parameter sample object, suitable for inclusion in a kD-tree.
def __getitem__(self, key)
Return the element with the corresponding name.
def coord(self)
Return the coordinates for the parameter sample.
def __init__(self, sample_array, headers, coord_names)
Given the sample array, headers for the values, and the names of the desired coordinates,...
RA tick locations with some intelligence.
def __init__(self, min=0.0, max=2.0 *pi_constant)
def start(self, tag, attrib)
A base class for representing web content using ElementTree .
def tab(self, idtable=None)
def toprettyxml(self)
Return a pretty-printed XML string of the htmlPage.
def insert_row(self, tab, label=None)
Insert row in table tab.
def append(self, element)
def insert_td(self, row, td, label=None, legend=None)
Insert cell td into row row.
def __init__(self, tag, attrib=None, parent=None)
def a(self, url, linktext)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, table_id=None, start_closed=True)
A concrete class for generating an XHTML(1) document.
def add_section(self, section_name, legend=None)
def add_collapse_section(self, section_name, legend=None, innertable_id=None, start_closed=True)
Create a section embedded into a table that can be collapsed with a button.
def add_section_to_element(self, section_name, parent)
Create a section which is not appended to the body of html, but to the parent Element.
def __init__(self, title=None, css=None, javascript=None, toc=False)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, blank=False)
def plot_two_param_greedy_bins_hist(posterior, greedy2Params, confidence_levels)
Histograms of the ranked pixels produced by the 2-parameter greedy binning algorithm colured by their...
def cred_interval(x, cl=.9, lower=True)
Return location of lower or upper confidence levels Args: x: List of samples.
def histogram2D(posterior, greedy2Params, confidence_levels)
Returns a 2D histogram and edges for the two parameters passed in greedy2Params, plus the actual disc...
def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None)
Plot calibration posterior estimates for a spline model in log space.
def lambda_a(redshift, nonGR_alpha, lambda_eff, distance)
Converting from the effective wavelength-like parameter to lambda_A: lambda_A = lambda_{eff}*(D_alpha...
def spline_angle_xform(delta_psi)
Returns the angle in degrees corresponding to the spline calibration parameters delta_psi.
def q2eta(q)
Utility function for converting q to eta.
def plot_one_param_pdf(posterior, plot1DParams, analyticPDF=None, analyticCDF=None, plotkde=False)
Plots a 1D histogram and (gaussian) kernel density estimate of the distribution of posterior samples ...
def det_end_time(ifo_prefix, inj)
def autocorrelation(series)
Returns an estimate of the autocorrelation function of a given series.
def array_ang_sep(vec1, vec2)
Find angles between vectors in rows of numpy arrays.
def parse_converge_output_section(fo)
def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref, phiref)
changes for testing Lorentz violations made till here
def plot_sky_map(hpmap, outdir, inj=None, nest=True)
Plots a sky map from a healpix map, optionally including an injected position.
def mc2q(mc, eta)
Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
def kdtree_bin2Step(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10, injCoords=None, alternate=False, fraction=0.5, skyCoords=False)
input: posterior class instance, list of confidence levels, optional choice of inital parameter space...
def plot_calibration_pos(pos, level=.9, outpath=None)
def orbital_momentum_mag(fref, m1, m2, eta)
def plot_burst_waveform(pos=None, simburst=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def plot_two_param_kde(posterior, plot2DkdeParams)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def make_1d_table(html, legend, label, pos, pars, noacf, GreedyRes, onepdfdir, sampsdir, savepdfs, greedy, analyticLikelihood, nDownsample)
def plot_label(param)
A lookup table for plot labels.
def effectiveSampleSize(samples, Nskip=1)
Compute the effective sample size, calculating the ACL using only the second half of the samples to a...
def random_split(items, fraction)
def plot_waveform(pos=None, siminspiral=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def getDecString(radians, accuracy='auto')
def plot_psd(psd_files, outpath=None, f_min=30.)
def autocorrelation_length_estimate(series, acf=None, M=5, K=2)
Attempts to find a self-consistent estimate of the autocorrelation length of a given series.
def contigious_interval_one_param(posterior, contInt1Params, confidence_levels)
Calculates the smallest contigious 1-parameter confidence interval for a set of given confidence leve...
def as_array(table)
Workaround for missing astropy.table.Table.as_array method, which was added in Astropy 1....
def addSample(tree, coordinates)
def cart2sph(x, y, z)
Utility function to convert cartesian coords to r,theta,phi.
def vo_nest2pos(nsresource, Nlive=None)
Parse a VO Table RESOURCE containing nested sampling output and return a VOTable TABLE element with p...
def find_ndownsample(samples, nDownsample)
Given a list of files, threshold value, and a desired number of outputs posterior samples,...
def skymap_inj_pvalue(hpmap, inj, nest=True)
Returns the greedy p-value estimate for the given injection.
def symm_tidal_params(lambda1, lambda2, q)
Calculate best tidal parameters [Eqs.
def kdtree_bin_sky_area(posterior, confidence_levels, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def getRAString(radians, accuracy='auto')
def kdtree_bin_sky_volume(posterior, confidence_levels)
def plot_one_param_pdf_kde(fig, onedpos)
def q2ms(mc, q)
Utility function for converting mchirp,q to component masses.
def skyArea(bounds)
functions used in 2stage kdtree
def amplitudeMeasure(redshift, nonGR_alpha, lambda_eff, distance)
Converting to Lorentz violating parameter "A" in dispersion relation from lambda_A: A = (lambda_A/h)^...
def plot_two_param_greedy_bins_contourf(posteriors_by_name, greedy2Params, confidence_levels, colors_by_name, figsize=(7, 6), dpi=120, figposition=[0.3, 0.3, 0.5, 0.5], legend='right', hatches_by_name=None)
def confidence_interval_uncertainty(cl, cl_bounds, posteriors)
Returns a tuple (relative_change, fractional_uncertainty, percentile_uncertainty) giving the uncertai...
def ang_dist(long1, lat1, long2, lat2)
Find the angular separation of (long1,lat1) and (long2,lat2), which are specified in radians.
def pol2cart(long, lat)
Utility function to convert longitude,latitude on a unit sphere to cartesian co-ordinates.
def kdtree_bin(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def plot_corner(posterior, levels, parnames=None)
Make a corner plot using the triangle module (See http://github.com/dfm/corner.py)
def sph2cart(r, theta, phi)
Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
def get_inj_by_time(injections, time)
Filter injections to find the injection with end time given by time +/- 0.1s.
def array_dot(vec1, vec2)
Calculate dot products between vectors in rows of numpy arrays.
def rotation_matrix(angle, direction)
Compute general rotation matrices for a given angles and direction vectors.
def greedy_bin_two_param(posterior, greedy2Params, confidence_levels)
Determine the 2-parameter Bayesian Confidence Intervals using a greedy binning algorithm.
def chi_precessing(m1, a1, tilt1, m2, a2, tilt2)
Calculate the magnitude of the effective precessing spin following convention from Phys.
def skymap_confidence_areas(hpmap, cls)
Returns the area (in square degrees) for each confidence level with a greedy binning algorithm for th...
def readCoincXML(xml_file, trignum)
def calculate_redshift(distance, h=0.6790, om=0.3065, ol=0.6935, w0=-1.0)
Calculate the redshift from the luminosity distance measurement using the Cosmology Calculator provid...
def replace_column(table, old, new)
Workaround for missing astropy.table.Table.replace_column method, which was added in Astropy 1....
def mc2ms(mc, eta)
Utility function for converting mchirp,eta to component masses.
def array_polar_ang(vec)
Find polar angles of vectors in rows of a numpy array.
def component_momentum(m, a, theta, phi)
Calculate BH angular momentum vector.
def ROTATEY(angle, vx, vy, vz)
def DistanceMeasure(redshift, nonGR_alpha)
D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha # from eq.15 of arxiv 1110.2720 D_alpha calculated from int...
def integrand_distance(redshift, nonGR_alpha)
Following functions added for testing Lorentz violations.
def plot_two_param_kde_greedy_levels(posteriors_by_name, plot2DkdeParams, levels, colors_by_name, line_styles=__default_line_styles, figsize=(4, 3), dpi=250, figposition=[0.2, 0.2, 0.48, 0.75], legend='right', hatches_by_name=None, Npixels=50)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def orbital_momentum(fref, m1, m2, inclination)
Calculate orbital angular momentum vector.
def ROTATEZ(angle, vx, vy, vz)
def source_mass(mass, redshift)
Calculate source mass parameter for mass m as: m_source = m / (1.0 + z) For a parameter m.
def greedy_bin_one_param(posterior, greedy1Param, confidence_levels)
Determine the 1-parameter Bayesian Confidence Interval using a greedy binning algorithm.
def spin_angles(fref, mc, eta, incl, a1, theta1, phi1, a2=None, theta2=None, phi2=None)
Calculate physical spin angles.
def bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12, quantity, fits)
Convenience functions.
def read_samples(filename, path=None, tablename=POSTERIOR_SAMPLES)
Read an HDF5 sample chain file.
def draw_posterior_many(datas, Nlives, verbose=False)
Draw samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array datas,...
def draw_N_posterior_many(datas, Nlives, Npost, verbose=False)
Draw Npost samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array dat...