Loading [MathJax]/extensions/TeX/AMSsymbols.js
LALInference 4.1.8.1-02cf16d
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
bayespputils.py
Go to the documentation of this file.
1# -*- coding: utf-8 -*-
2#
3# bayespputils.py
4#
5# Copyright 2010
6# Benjamin Aylott <benjamin.aylott@ligo.org>,
7# Benjamin Farr <bfarr@u.northwestern.edu>,
8# Will M. Farr <will.farr@ligo.org>,
9# John Veitch <john.veitch@ligo.org>,
10# Salvatore Vitale <salvatore.vitale@ligo.org>,
11# Vivien Raymond <vivien.raymond@ligo.org>
12#
13# This program is free software; you can redistribute it and/or modify
14# it under the terms of the GNU General Public License as published by
15# the Free Software Foundation; either version 2 of the License, or
16# (at your option) any later version.
17#
18# This program is distributed in the hope that it will be useful,
19# but WITHOUT ANY WARRANTY; without even the implied warranty of
20# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21# GNU General Public License for more details.
22#
23# You should have received a copy of the GNU General Public License
24# along with this program; if not, write to the Free Software
25# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
26# MA 02110-1301, USA.
27
28#===============================================================================
29# Preamble
30#===============================================================================
31
32"""
33This module contains classes and functions for post-processing the output
34of the Bayesian parameter estimation codes.
35"""
36
37#standard library imports
38import os
39import sys
40import warnings
41from math import cos,ceil,floor,sqrt,pi as pi_constant
42from xml.dom import minidom
43from operator import itemgetter
44
45#related third party imports
46import matplotlib
47matplotlib.use('agg')
48from .io import read_samples
49import healpy as hp
50import astropy.table
51import numpy as np
52np.random.seed(42)
53from numpy import fmod
54from matplotlib import pyplot as plt,lines as mpl_lines
55from scipy import stats
56from scipy import special
57from scipy import signal
58from scipy.optimize import newton
59from scipy import interpolate
60from scipy import integrate
61import random
62import socket
63from itertools import combinations
64from .lalinference import LALInferenceHDF5PosteriorSamplesDatasetName as posterior_grp_name
65import re
66
67try:
68 import lalsimulation as lalsim
69except ImportError:
70 print('Cannot import lalsimulation SWIG bindings')
71 raise
72
73try:
74 from .imrtgr.nrutils import bbh_average_fits_precessing
75except ImportError:
76 print('Cannot import lalinference.imrtgr.nrutils. Will suppress final parameter and peak luminosity calculations.')
77
78from matplotlib.ticker import ScalarFormatter
79
80try:
81 hostname_short=socket.gethostbyaddr(socket.gethostname())[0].split('.',1)[1]
82except:
83 hostname_short='Unknown'
84if hostname_short=='ligo.caltech.edu' or hostname_short=='cluster.ldas.cit': #The CIT cluster has troubles with the default 'cm' font. 'custom' has the least troubles, but does not include \odot
85 matplotlib.rcParams.update(
86 {'mathtext.fontset' : "custom",
87 'mathtext.fallback' : 'cm'
88 })
89
90from xml.etree.cElementTree import Element, SubElement, tostring, XMLParser
91
92#local application/library specific imports
93import lal
94from lal import LIGOTimeGPS, TimeDelayFromEarthCenter
95from . import git_version
96
97__author__="Ben Aylott <benjamin.aylott@ligo.org>, Ben Farr <bfarr@u.northwestern.edu>, Will M. Farr <will.farr@ligo.org>, John Veitch <john.veitch@ligo.org>, Vivien Raymond <vivien.raymond@ligo.org>"
98__version__= "git id %s"%git_version.id
99__date__= git_version.date
100
101def get_end(siminspiral):
102 return siminspiral.geocent_end_time + 1e-9*siminspiral.geocent_end_time_ns
103
104def replace_column(table, old, new):
105 """Workaround for missing astropy.table.Table.replace_column method,
106 which was added in Astropy 1.1.
107
108 FIXME: remove this function when LALSuite depends on Astropy >= 1.1."""
109 index = table.colnames.index(old)
110 table.remove_column(old)
111 table.add_column(astropy.table.Column(new, name=old), index=index)
112
113def as_array(table):
114 """Workaround for missing astropy.table.Table.as_array method,
115 which was added in Astropy 1.0.
116
117 FIXME: remove this function when LALSuite depends on Astropy >= 1.0."""
118 try:
119 return table.as_array()
120 except:
121 return table._data
122
123#===============================================================================
124# Constants
125#===============================================================================
126#Parameters which are not to be exponentiated when found
127logParams=['logl','loglh1','loglh2','logll1','loglv1','deltalogl','deltaloglh1','deltalogll1','deltaloglv1','logw','logprior','logpost','nulllogl','chain_log_evidence','chain_delta_log_evidence','chain_log_noise_evidence','chain_log_bayes_factor']
128#Parameters known to cbcBPP
129relativePhaseParams=[ a+b+'_relative_phase' for a,b in combinations(['h1','l1','v1'],2)]
130snrParams=['snr','optimal_snr','matched_filter_snr','coherence'] + ['%s_optimal_snr'%(i) for i in ['h1','l1','v1']] + ['%s_cplx_snr_amp'%(i) for i in ['h1','l1','v1']] + ['%s_cplx_snr_arg'%(i) for i in ['h1', 'l1', 'v1']] + relativePhaseParams
131calAmpParams=['calamp_%s'%(ifo) for ifo in ['h1','l1','v1']]
132calPhaseParams=['calpha_%s'%(ifo) for ifo in ['h1','l1','v1']]
133calParams = calAmpParams + calPhaseParams
134# Masses
135massParams=['m1','m2','chirpmass','mchirp','mc','eta','q','massratio','asym_massratio','mtotal','mf','mf_evol','mf_nonevol']
136#Spins
137spinParamsPrec=['a1','a2','phi1','theta1','phi2','theta2','costilt1','costilt2','costheta_jn','cosbeta','tilt1','tilt1_isco','tilt2','tilt2_isco','phi_jl','theta_jn','phi12','phi12_isco','af','af_evol','af_nonevol','afz','afz_evol','afz_nonevol']
138spinParamsAli=['spin1','spin2','a1z','a2z']
139spinParamsEff=['chi','effectivespin','chi_eff','chi_tot','chi_p']
140spinParams=spinParamsPrec+spinParamsEff+spinParamsAli
141# Source frame params
142cosmoParam=['m1_source','m2_source','mtotal_source','mc_source','redshift','mf_source','mf_source_evol','mf_source_nonevol','m1_source_maxldist','m2_source_maxldist','mtotal_source_maxldist','mc_source_maxldist','redshift_maxldist','mf_source_maxldist','mf_source_maxldist_evol','mf_source_maxldist_nonevol']
143#Strong Field
144ppEParams=['ppEalpha','ppElowera','ppEupperA','ppEbeta','ppElowerb','ppEupperB','alphaPPE','aPPE','betaPPE','bPPE']
145tigerParams= ['dchi%i'%(i) for i in range(8)] + ['dchi%il'%(i) for i in [5,6] ] + ['dxi%d'%(i+1) for i in range(6)] + ['dalpha%i'%(i+1) for i in range(5)] + ['dbeta%i'%(i+1) for i in range(3)] + ['dsigma%i'%(i+1) for i in range(4)] + ['dipolecoeff'] + ['dchiminus%i'%(i) for i in [1,2]] + ['dchiMinus%i'%(i) for i in [1,2]] + ['db1','db2','db3','db4','dc1','dc2','dc4','dcl']
146qnmtestParams=['domega220','dtau220','domega210','dtau210','domega330','dtau330','domega440','dtau440','domega550','dtau550']
147bransDickeParams=['omegaBD','ScalarCharge1','ScalarCharge2']
148massiveGravitonParams=['lambdaG']
149lorentzInvarianceViolationParams=['log10lambda_a','lambda_a','log10lambda_eff','lambda_eff','log10livamp','liv_amp']
150tidalParams=['lambda1','lambda2','lam_tilde','dlam_tilde','lambdat','dlambdat','lambdas','bluni']
151fourPiecePolyParams=['logp1','gamma1','gamma2','gamma3']
152spectralParams=['sdgamma0','sdgamma1','sdgamma2','sdgamma3']
153energyParams=['e_rad', 'e_rad_evol', 'e_rad_nonevol', 'l_peak', 'l_peak_evol', 'l_peak_nonevol', 'e_rad_maxldist', 'e_rad_maxldist_evol', 'e_rad_maxldist_nonevol']
154spininducedquadParams = ['dquadmon1', 'dquadmon2', 'dquadmona', 'dquadmona']
155strongFieldParams = (
156 ppEParams + tigerParams + bransDickeParams + massiveGravitonParams
157 + tidalParams + fourPiecePolyParams + spectralParams + energyParams
158 + lorentzInvarianceViolationParams + spininducedquadParams + qnmtestParams
159)
160
161#Extrinsic
162distParams=['distance','distMPC','dist','distance_maxl']
163incParams=['iota','inclination','cosiota']
164polParams=['psi','polarisation','polarization']
165skyParams=['ra','rightascension','declination','dec']
166phaseParams=['phase', 'phi0','phase_maxl']
167#Times
168timeParams=['time','time_mean']
169endTimeParams=['l1_end_time','h1_end_time','v1_end_time']
170#others
171statsParams=['logprior','logl','deltalogl','deltaloglh1','deltalogll1','deltaloglv1','deltaloglh2','deltaloglg1']
172calibParams=['calpha_l1','calpha_h1','calpha_v1','calamp_l1','calamp_h1','calamp_v1']
173
174## Greedy bin sizes for cbcBPP and confidence leves used for the greedy bin intervals
175confidenceLevels=[0.67,0.9,0.95,0.99]
176
177greedyBinSizes={'mc':0.025,'m1':0.1,'m2':0.1,'mass1':0.1,'mass2':0.1,'mtotal':0.1,'mc_source':0.025,'m1_source':0.1,'m2_source':0.1,'mtotal_source':0.1,'mc_source_maxldist':0.025,'m1_source_maxldist':0.1,'m2_source_maxldist':0.1,'mtotal_source_maxldist':0.1,'eta':0.001,'q':0.01,'asym_massratio':0.01,'iota':0.01,'cosiota':0.02,'time':1e-4,'time_mean':1e-4,'distance':1.0,'dist':1.0,'distance_maxl':1.0,'redshift':0.01,'redshift_maxldist':0.01,'mchirp':0.025,'chirpmass':0.025,'spin1':0.04,'spin2':0.04,'a1z':0.04,'a2z':0.04,'a1':0.02,'a2':0.02,'phi1':0.05,'phi2':0.05,'theta1':0.05,'theta2':0.05,'ra':0.05,'dec':0.05,'chi':0.05,'chi_eff':0.05,'chi_tot':0.05,'chi_p':0.05,'costilt1':0.02,'costilt2':0.02,'thatas':0.05,'costheta_jn':0.02,'beta':0.05,'omega':0.05,'cosbeta':0.02,'ppealpha':1.0,'ppebeta':1.0,'ppelowera':0.01,'ppelowerb':0.01,'ppeuppera':0.01,'ppeupperb':0.01,'polarisation':0.04,'rightascension':0.05,'declination':0.05,'massratio':0.001,'inclination':0.01,'phase':0.05,'tilt1':0.05,'tilt2':0.05,'phi_jl':0.05,'theta_jn':0.05,'phi12':0.05,'flow':1.0,'phase_maxl':0.05,'calamp_l1':0.01,'calamp_h1':0.01,'calamp_v1':0.01,'calpha_h1':0.01,'calpha_l1':0.01,'calpha_v1':0.01,'logdistance':0.1,'psi':0.1,'costheta_jn':0.1,'mf':0.1,'mf_evol':0.1,'mf_nonevol':0.1,'mf_source':0.1,'mf_source_evol':0.1,'mf_source_nonevol':0.1,'mf_source_maxldist':0.1,'mf_source_maxldist_evol':0.1,'mf_source_maxldist_nonevol':0.1,'af':0.02,'af_evol':0.02,'af_nonevol':0.02,'afz':0.02,'afz_evol':0.01,'afz_nonevol':0.01,'e_rad':0.1,'e_rad_evol':0.1,'e_rad_nonevol':0.1,'e_rad_maxldist':0.1,'e_rad_maxldist_evol':0.1,'e_rad_maxldist_nonevol':0.1,'l_peak':0.1,'l_peak_evol':0.1,'l_peak_nonevol':0.1}
178for s in snrParams:
179 greedyBinSizes[s]=0.05
180for derived_time in ['h1_end_time','l1_end_time','v1_end_time','h1l1_delay','l1v1_delay','h1v1_delay']:
181 greedyBinSizes[derived_time]=greedyBinSizes['time']
182for derived_phase in relativePhaseParams:
183 greedyBinSizes[derived_phase]=0.05
184for param in tigerParams + bransDickeParams + massiveGravitonParams + lorentzInvarianceViolationParams + qnmtestParams:
185 greedyBinSizes[param]=0.01
186for param in tidalParams:
187 greedyBinSizes[param]=2.5
188for param in fourPiecePolyParams:
189 greedyBinSizes[param]=2.5
190for param in spectralParams:
191 greedyBinSizes[param]=2.5
192for param in spininducedquadParams:
193 greedyBinSizes[param]=2.5
194 #Confidence levels
195for loglname in statsParams:
196 greedyBinSizes[loglname]=0.1
197
198def det_end_time(ifo_prefix, inj):
199 location = lal.cached_detector_by_prefix[ifo_prefix].location
200 ra = inj.longitude
201 dec = inj.latitude
202 t_gps = LIGOTimeGPS(inj.geocent_end_time, inj.geocent_end_time_ns)
203 t0 = inj.geocent_end_time + 1e-9*inj.geocent_end_time_ns
204 return t0 + TimeDelayFromEarthCenter(location,ra,dec,t_gps)
205
206#Pre-defined ordered list of line styles for use in matplotlib contour plots.
207__default_line_styles=['solid', 'dashed', 'dashdot', 'dotted']
208#Pre-defined ordered list of matplotlib colours for use in plots.
209__default_color_lst=['r','b','y','g','c','m']
210#A default css string for use in html results pages.
211__default_css_string="""
212p,h1,h2,h3,h4,h5
213{
214font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
215}
216
217p
218{
219font-size:14px;
220}
221
222h1
223{
224font-size:20px;
225}
226
227h2
228{
229font-size:18px;
230}
231
232h3
233{
234font-size:16px;
235}
236
237
238
239table
240{
241font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
242width:100%;
243border-collapse:collapse;
244}
245td,th
246{
247font-size:12px;
248border:1px solid #B5C1CF;
249padding:3px 7px 2px 7px;
250}
251th
252{
253font-size:14px;
254text-align:left;
255padding-top:5px;
256padding-bottom:4px;
257background-color:#B3CEEF;
258color:#ffffff;
259}
260#postable tr:hover
261{
262background: #DFF4FF;
263}
264#covtable tr:hover
265{
266background: #DFF4FF;
267}
268#statstable tr:hover
269{
270background: #DFF4FF;
271}
272
273img {
274 max-width: 510px;
275 max-height: 510px;
276 width:100%;
277 eight:100%;
278}
279
280.ppsection
281{
282border-bottom-style:double;
283}
284
285"""
286__default_javascript_string='''
287//<![CDATA[
288function toggle_visibility(tbid,lnkid)
289{
290
291 if(document.all){document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'block' ? 'none' : 'block';}
292
293 else{document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'table' ? 'none' : 'table';}
294
295 document.getElementById(lnkid).value = document.getElementById(lnkid).value == '[-] Collapse' ? '[+] Expand' : '[-] Collapse';
296
297 }
298 //]]>
299
300'''
301
302
303#===============================================================================
304# Function to return the correct prior distribution for selected parameters
305#===============================================================================
306def get_prior(name):
307 distributions={
308 'm1':'uniform',
309 'm2':'uniform',
310 'mc':None,
311 'eta':None,
312 'q':None,
313 'mtotal':'uniform',
314 'm1_source':None,
315 'm2_source':None,
316 'mtotal_source':None,
317 'mc_source':None,
318 'redshift':None,
319 'm1_source_maxldist':None,
320 'm2_source_maxldist':None,
321 'mtotal_source_maxldist':None,
322 'mc_source_maxldist':None,
323 'redshift_maxldist':None,
324 'mf':None,
325 'mf_evol':None,
326 'mf_nonevol':None,
327 'mf_source':None,
328 'mf_source_evol':None,
329 'mf_source_nonevol':None,
330 'mf_source_maxldist':None,
331 'mf_source_maxldist_evol':None,
332 'mf_source_maxldist_nonevol':None,
333 'af':None,
334 'af_evol':None,
335 'af_nonevol':None,
336 'afz':None,
337 'afz_evol':None,
338 'afz_nonevol':None,
339 'e_rad':None,
340 'e_rad_evol':None,
341 'e_rad_nonevol':None,
342 'e_rad_maxldist':None,
343 'e_rad_maxldist_evol':None,
344 'e_rad_maxldist_nonevol':None,
345 'l_peak':None,
346 'l_peak_evol':None,
347 'l_peak_nonevol':None,
348 'spin1':'uniform',
349 'spin2':'uniform',
350 'a1':'uniform',
351 'a2':'uniform',
352 'a1z':'uniform',
353 'a2z':'uniform',
354 'theta1':'uniform',
355 'theta2':'uniform',
356 'phi1':'uniform',
357 'phi2':'uniform',
358 'chi_eff':None,
359 'chi_tot':None,
360 'chi_p':None,
361 'tilt1':None,
362 'tilt2':None,
363 'tilt1_isco':None,
364 'tilt2_isco':None,
365 'costilt1':'uniform',
366 'costilt2':'uniform',
367 'iota':'np.cos',
368 'cosiota':'uniform',
369 'time':'uniform',
370 'time_mean':'uniform',
371 'dist':'x**2',
372 'distance_maxl':'x**2',
373 'ra':'uniform',
374 'dec':'np.cos',
375 'phase':'uniform',
376 'psi':'uniform',
377 'theta_jn':'np.sin',
378 'costheta_jn':'uniform',
379 'beta':None,
380 'cosbeta':None,
381 'phi_jl':None,
382 'phi12':None,
383 'phi12_isco':None,
384 'logl':None,
385 'h1_end_time':None,
386 'l1_end_time':None,
387 'v1_end_time':None,
388 'h1l1_delay':None,
389 'h1v1_delay':None,
390 'l1v1_delay':None,
391 'lambdat' :None,
392 'dlambdat':None,
393 'lambda1' : 'uniform',
394 'lambda2': 'uniform',
395 'lam_tilde' : None,
396 'dlam_tilde': None,
397 'lambdas':'uniform',
398 'bluni':'uniform',
399 'logp1':None,
400 'gamma1':None,
401 'gamma2':None,
402 'gamma3':None,
403 'sdgamma0': None,
404 'sdgamma1': None,
405 'sdgamma2': None,
406 'sdgamma3': None,
407 'calamp_h1' : 'uniform',
408 'calamp_l1' : 'uniform',
409 'calpha_h1' : 'uniform',
410 'calpha_l1' : 'uniform',
411 'polar_eccentricity':'uniform',
412 'polar_angle':'uniform',
413 'alpha':'uniform'
414 }
415 try:
416 return distributions(name)
417 except:
418 return None
419
420#===============================================================================
421# Function used to generate plot labels.
422#===============================================================================
423def plot_label(param):
424 """
425 A lookup table for plot labels.
426 """
427 m1_names = ['mass1', 'm1']
428 m2_names = ['mass2', 'm2']
429 mc_names = ['mc','mchirp','chirpmass']
430 eta_names = ['eta','massratio','sym_massratio']
431 q_names = ['q','asym_massratio']
432 iota_names = ['iota','incl','inclination']
433 dist_names = ['dist','distance']
434 ra_names = ['rightascension','ra']
435 dec_names = ['declination','dec']
436 phase_names = ['phi_orb', 'phi', 'phase', 'phi0']
437 gr_test_names = ['dchiMinus2','dchiMinus1'] + ['dchi%d'%i for i in range(8)]+['dchil%d'%i for i in [5,6]]+['dxi%d'%(i+1) for i in range(6)]+['dalpha%d'%(i+1) for i in range(5)]+['dbeta%d'%(i+1) for i in range(3)]+['dsigma%d'%(i+1) for i in range(4)] + ['dipolecoeff'] + ['db1','db2','db3','db4','dc1','dc2','dc4','dcl']
438
439 labels={
440 'm1':r'$m_1\,(\mathrm{M}_\odot)$',
441 'm2':r'$m_2\,(\mathrm{M}_\odot)$',
442 'mc':r'$\mathcal{M}\,(\mathrm{M}_\odot)$',
443 'eta':r'$\eta$',
444 'q':r'$q$',
445 'mtotal':r'$M_\mathrm{total}\,(\mathrm{M}_\odot)$',
446 'm1_source':r'$m_{1}^\mathrm{source}\,(\mathrm{M}_\odot)$',
447 'm2_source':r'$m_{2}^\mathrm{source}\,(\mathrm{M}_\odot)$',
448 'mtotal_source':r'$M_\mathrm{total}^\mathrm{source}\,(\mathrm{M}_\odot)$',
449 'mc_source':r'$\mathcal{M}^\mathrm{source}\,(\mathrm{M}_\odot)$',
450 'redshift':r'$z$',
451 'm1_source_maxldist':r'$m_{1}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
452 'm2_source_maxldist':r'$m_{2}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
453 'mtotal_source_maxldist':r'$M_\mathrm{total}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
454 'mc_source_maxldist':r'$\mathcal{M}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
455 'redshift_maxldist':r'$z^\mathrm{maxLdist}$',
456 'mf':r'$M_\mathrm{final}\,(\mathrm{M}_\odot)$',
457 'mf_evol':r'$M_\mathrm{final}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
458 'mf_nonevol':r'$M_\mathrm{final}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
459 'mf_source':r'$M_\mathrm{final}^\mathrm{source}\,(\mathrm{M}_\odot)$',
460 'mf_source_evol':r'$M_\mathrm{final}^\mathrm{source, evol}\,(\mathrm{M}_\odot)$',
461 'mf_source_nonevol':r'$M_\mathrm{final}^\mathrm{source, non-evol}\,(\mathrm{M}_\odot)$',
462 'mf_source_maxldist':r'$M_\mathrm{final}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
463 'mf_source_maxldist_evol':r'$M_\mathrm{final}^\mathrm{source, evol - maxLdist}\,(\mathrm{M}_\odot)$',
464 'mf_source_maxldist_nonevol':r'$M_\mathrm{final}^\mathrm{source, non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
465 'af':r'$a_\mathrm{final}$',
466 'af_evol':r'$a_\mathrm{final}^\mathrm{evol}$',
467 'af_nonevol':r'$a_\mathrm{final}^\mathrm{non-evol}$',
468 'afz':r'$a_{\mathrm{final}, z}$',
469 'afz_evol':r'$a_{\mathrm{final}, z}^\mathrm{evol}$',
470 'afz_nonevol':r'$a_{\mathrm{final}, z}^\mathrm{non-evol}$',
471 'e_rad':r'$E_\mathrm{rad}\,(\mathrm{M}_\odot)$',
472 'e_rad_evol':r'$E_\mathrm{rad}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
473 'e_rad_nonevol':r'$E_\mathrm{rad}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
474 'e_rad_maxldist':r'$E_\mathrm{rad}^\mathrm{maxLdist}\,(\mathrm{M}_\odot)$',
475 'e_rad_maxldist_evol':r'$E_\mathrm{rad}^\mathrm{evol - maxLdist}\,(\mathrm{M}_\odot)$',
476 'e_rad_maxldist_nonevol':r'$E_\mathrm{rad}^\mathrm{non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
477 'l_peak':r'$L_\mathrm{peak}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
478 'l_peak_evol':r'$L_\mathrm{peak}^\mathrm{evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
479 'l_peak_nonevol':r'$L_\mathrm{peak}^\mathrm{non-evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
480 'spin1':r'$S_1$',
481 'spin2':r'$S_2$',
482 'a1':r'$a_1$',
483 'a2':r'$a_2$',
484 'a1z':r'$a_{1z}$',
485 'a2z':r'$a_{2z}$',
486 'theta1':r'$\theta_1\,(\mathrm{rad})$',
487 'theta2':r'$\theta_2\,(\mathrm{rad})$',
488 'phi1':r'$\phi_1\,(\mathrm{rad})$',
489 'phi2':r'$\phi_2\,(\mathrm{rad})$',
490 'chi_eff':r'$\chi_\mathrm{eff}$',
491 'chi_tot':r'$\chi_\mathrm{total}$',
492 'chi_p':r'$\chi_\mathrm{P}$',
493 'tilt1':r'$t_1\,(\mathrm{rad})$',
494 'tilt2':r'$t_2\,(\mathrm{rad})$',
495 'tilt1_isco':r'$t_1^\mathrm{ISCO}\,(\mathrm{rad})$',
496 'tilt2_isco':r'$t_2^\mathrm{ISCO}\,(\mathrm{rad})$',
497 'costilt1':r'$\mathrm{cos}(t_1)$',
498 'costilt2':r'$\mathrm{cos}(t_2)$',
499 'iota':r'$\iota\,(\mathrm{rad})$',
500 'cosiota':r'$\mathrm{cos}(\iota)$',
501 'time':r'$t_\mathrm{c}\,(\mathrm{s})$',
502 'time_mean':r'$<t>\,(\mathrm{s})$',
503 'dist':r'$d_\mathrm{L}\,(\mathrm{Mpc})$',
504 'distance_maxl':r'$d_\mathrm{L}^\mathrm{maxL}\,(\mathrm{Mpc})$',
505 'ra':r'$\alpha$',
506 'dec':r'$\delta$',
507 'phase':r'$\phi\,(\mathrm{rad})$',
508 'phase_maxl':r'$\phi^\mathrm{maxL}\,(\mathrm{rad})$',
509 'psi':r'$\psi\,(\mathrm{rad})$',
510 'theta_jn':r'$\theta_\mathrm{JN}\,(\mathrm{rad})$',
511 'costheta_jn':r'$\mathrm{cos}(\theta_\mathrm{JN})$',
512 'beta':r'$\beta\,(\mathrm{rad})$',
513 'cosbeta':r'$\mathrm{cos}(\beta)$',
514 'phi_jl':r'$\phi_\mathrm{JL}\,(\mathrm{rad})$',
515 'phi12':r'$\phi_\mathrm{12}\,(\mathrm{rad})$',
516 'phi12_isco':r'$\phi_\mathrm{12}^\mathrm{ISCO}\,(\mathrm{rad})$',
517 'logl':r'$\mathrm{log}(\mathcal{L})$',
518 'h1_end_time':r'$t_\mathrm{H}$',
519 'l1_end_time':r'$t_\mathrm{L}$',
520 'v1_end_time':r'$t_\mathrm{V}$',
521 'h1l1_delay':r'$\Delta t_\mathrm{HL}$',
522 'h1v1_delay':r'$\Delta t_\mathrm{HV}$',
523 'l1v1_delay':r'$\Delta t_\mathrm{LV}$',
524 'lambdat' : r'$\tilde{\Lambda}$',
525 'dlambdat': r'$\delta \tilde{\Lambda}$',
526 'lambda1' : r'$\lambda_1$',
527 'lambda2': r'$\lambda_2$',
528 'lam_tilde' : r'$\tilde{\Lambda}$',
529 'dlam_tilde': r'$\delta \tilde{\Lambda}$',
530 'logp1':r'$\log(p_1)$',
531 'gamma1':r'$\Gamma_1$',
532 'gamma2':r'$\Gamma_2$',
533 'gamma3':r'$\Gamma_3$',
534 'sdgamma0' : r'$\gamma_{0}$',
535 'sdgamma1' : r'$\gamma_{1}$',
536 'sdgamma2' : r'$\gamma_{2}$',
537 'sdgamma3' : r'$\gamma_{3}$',
538 'calamp_h1' : r'$\delta A_{H1}$',
539 'calamp_l1' : r'$\delta A_{L1}$',
540 'calpha_h1' : r'$\delta \phi_{H1}$',
541 'calpha_l1' : r'$\delta \phi_{L1}$',
542 'polar_eccentricity':r'$\epsilon_{polar}$',
543 'polar_angle':r'$\alpha_{polar}$',
544 'alpha':r'$\alpha_{polar}$',
545 'dchiminus1':r'$d\chi_{-1}$',
546 'dchiminus2':r'$d\chi_{-2}$',
547 'dchi0':r'$d\chi_0$',
548 'dchi1':r'$d\chi_1$',
549 'dchi2':r'$d\chi_2$',
550 'dchi3':r'$d\chi_3$',
551 'dchi3S':r'$d\chi_{3S}$',
552 'dchi3NS':r'$d\chi_{3NS}$',
553 'dchi4':r'$d\chi_4$',
554 'dchi4S':r'$d\chi_{4S}$',
555 'dchi4NS':r'$d\chi_{4NS}$',
556 'dchi5':r'$d\chi_5$',
557 'dchi5S':r'$d\chi_{5S}$',
558 'dchi5NS':r'$d\chi_{5NS}$',
559 'dchi5l':r'$d\chi_{5l}$',
560 'dchi5lS':r'$d\chi_{5lS}$',
561 'dchi5lNS':r'$d\chi_{5lNS}$',
562 'dchi6':r'$d\chi_6$',
563 'dchi6S':r'$d\chi_{6S}$',
564 'dchi6NS':r'$d\chi_{6NS}$',
565 'dchi6l':r'$d\chi_{6l}$',
566 'dchi7':r'$d\chi_7$',
567 'dchi7S':r'$d\chi_{7S}$',
568 'dchi7NS':r'$d\chi_{7NS}$',
569 'dxi1':r'$d\xi_1$',
570 'dxi2':r'$d\xi_2$',
571 'dxi3':r'$d\xi_3$',
572 'dxi4':r'$d\xi_4$',
573 'dxi5':r'$d\xi_5$',
574 'dxi6':r'$d\xi_6$',
575 'dalpha1':r'$d\alpha_1$',
576 'dalpha2':r'$d\alpha_2$',
577 'dalpha3':r'$d\alpha_3$',
578 'dalpha4':r'$d\alpha_4$',
579 'dalpha5':r'$d\alpha_5$',
580 'dbeta1':r'$d\beta_1$',
581 'dbeta2':r'$d\beta_2$',
582 'dbeta3':r'$d\beta_3$',
583 'dsigma1':r'$d\sigma_1$',
584 'dsigma2':r'$d\sigma_2$',
585 'dsigma3':r'$d\sigma_3$',
586 'dsigma4':r'$d\sigma_4$',
587 'dquadmon1':r'$\delta\kappa_1$',
588 'dquadmon2':r'$\delta\kappa_2$',
589 'dquadmons':r'$\delta\kappa_s$',
590 'dquadmona':r'$\delta\kappa_a$',
591 'domega220':r'$d\omega_{220}$',
592 'dtau220':r'$d\tau_{220}$',
593 'domega210':r'$d\omega_{210}$',
594 'dtau210':r'$d\tau_{210}$',
595 'domega330':r'$d\omega_{330}$',
596 'dtau330':r'$d\tau_{330}$',
597 'domega440':r'$d\omega_{440}$',
598 'dtau440':r'$d\tau_{440}$',
599 'domega550':r'$d\omega_{550}$',
600 'dtau550':r'$d\tau_{550}$',
601 'optimal_snr':r'$\rho^{opt}$',
602 'h1_optimal_snr':r'$\rho^{opt}_{H1}$',
603 'l1_optimal_snr':r'$\rho^{opt}_{L1}$',
604 'v1_optimal_snr':r'$\rho^{opt}_{V1}$',
605 'matched_filter_snr':r'$\rho^{MF}$',
606 'lambdas':r'$\Lambda_S$',
607 'bluni':r'$BL_{uniform}$',
608 'log10lambda_a':r'$\log\lambda_{\mathbb{A}} [\mathrm{m}]$',
609 'log10lambda_eff':r'$\log\lambda_{eff} [\mathrm{m}]$',
610 'lambda_eff':r'$\lambda_{eff} [\mathrm{m}]$',
611 'lambda_a':r'$\lambda_{\mathbb{A}} [\mathrm{m}]$',
612 'liv_amp':r'$\mathbb{A} [\mathrm{{eV}^{2-\alpha}}]$' ,
613 'log10livamp':r'$\log \mathbb{A}[\mathrm{{eV}^{2-\alpha}}]$',
614 'dchikappaS':r'$d\chi_{\kappa_{S}}$',
615 'dchikappaA':r'$d\chi_{\kappa_{A}}$',
616 'dchiMinus1':r'$d\chi_{-1}$',
617 'dchiMinus2':r'$d\chi_{-2}$',
618 'db1':r'$\delta b_1$',
619 'db2':r'$\delta b_2$',
620 'db3':r'$\delta b_3$',
621 'db4':r'$\delta b_4$',
622 'dc1':r'$\delta c_1$',
623 'dc2':r'$\delta c_2$',
624 'dc4':r'$\delta c_4$',
625 'dcl':r'$\delta c_l$',
626
627 }
628
629 # Handle cases where multiple names have been used
630 if param in m1_names:
631 param = 'm1'
632 elif param in m2_names:
633 param = 'm2'
634 elif param in mc_names:
635 param = 'mc'
636 elif param in eta_names:
637 param = 'eta'
638 elif param in q_names:
639 param = 'q'
640 elif param in iota_names:
641 param = 'iota'
642 elif param in dist_names:
643 param = 'dist'
644 elif param in ra_names:
645 param = 'ra'
646 elif param in dec_names:
647 param = 'dec'
648 elif param in phase_names:
649 param = 'phase'
650
651 try:
652 label = labels[param]
653 except KeyError:
654 # Use simple string if no formated label is available for param
655 label = param
656
657 return label
658
659#===============================================================================
660# Class definitions
661#===============================================================================
662
663class PosteriorOneDPDF(object):
664 """
665 A data structure representing one parameter in a chain of posterior samples.
666 The Posterior class generates instances of this class for pivoting onto a given
667 parameter (the Posterior class is per-Sampler oriented whereas this class represents
668 the same one parameter in successive samples in the chain).
669 """
670 def __init__(self,name,posterior_samples,injected_value=None,injFref=None,trigger_values=None,prior=None):
671 """
672 Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
673
674 @param name: A literal string name for the parameter.
675 @param posterior_samples: A 1D array of the samples.
676 @param injected_value: The injected or real value of the parameter.
677 @param injFref: reference frequency for injection
678 @param trigger_values: The trigger values of the parameter (dictionary w/ IFOs as keys).
679 @param prior: The prior value corresponding to each sample.
680 """
681 self.__name=name
682 self.__posterior_samples=np.array(posterior_samples)
683
684 self.__injFref=injFref
685 self.__injval=injected_value
686 self.__trigvals=trigger_values
687 self.__prior=prior
688
689 return
690
691 def __len__(self):
692 """
693 Container method. Defined as number of samples.
694 """
695 return len(self.__posterior_samples)
696
697 def __getitem__(self,idx):
698 """
699 Container method . Returns posterior containing sample idx (allows slicing).
700 """
701 return PosteriorOneDPDF(self.__name, self.__posterior_samples[idx], injected_value=self.__injval, f_ref=self.__f_ref, trigger_values=self.__trigvals)
702
703 @property
704 def name(self):
705 """
706 Return the string literal name of the parameter.
707
708 """
709 return self.__name
710
711 @property
712 def mean(self):
713 """
714 Return the arithmetic mean for the marginal PDF on the parameter.
715
716 """
717 return np.mean(self.__posterior_samples)
718
719 @property
720 def median(self):
721 """
722 Return the median value for the marginal PDF on the parameter.
723
724 """
725 return np.median(self.__posterior_samples)
726
727 @property
728 def stdev(self):
729 """
730 Return the standard deviation of the marginal PDF on the parameter.
731
732 """
733 try:
734 stdev = sqrt(np.var(self.__posterior_samples))
735 if not np.isfinite(stdev):
736 raise OverflowError
737 except OverflowError:
738 mean = np.mean(self.__posterior_samples)
739 stdev = mean * sqrt(np.var(self.__posterior_samples/mean))
740 return stdev
741
742 @property
743 def stacc(self):
744 r"""
745 Return the 'standard accuracy statistic' (stacc) of the marginal
746 posterior of the parameter.
747
748 stacc is a standard deviant incorporating information about the
749 accuracy of the waveform recovery. Defined as the mean of the sum
750 of the squared differences between the points in the PDF
751 (x_i - sampled according to the posterior) and the true value
752 (\f$x_{true}\f$). So for a marginalized one-dimensional PDF:
753 \f$stacc = \sqrt{\frac{1}{N}\sum_{i=1}^N (x_i-x_{\rm true})2}\f$
754
755 """
756 if self.__injval is None:
757 return None
758 else:
759 return np.sqrt(np.mean((self.__posterior_samples - self.__injval)**2.0))
760
761 @property
762 def injval(self):
763 """
764 Return the injected value set at construction . If no value was set
765 will return None .
766
767 """
768 return self.__injval
769
770 @property
771 def trigvals(self):
772 """
773 Return the trigger values set at construction. If no value was set
774 will return None .
775
776 """
777 return self.__trigvals
778
779 #@injval.setter #Python 2.6+
780 def set_injval(self,new_injval):
781 """
782 Set the injected/real value of the parameter.
783
784 @param new_injval: The injected/real value to set.
785 """
786
787 self.__injval=new_injval
788
789 def set_trigvals(self,new_trigvals):
790 """
791 Set the trigger values of the parameter.
792
793 @param new_trigvals: Dictionary containing trigger values with IFO keys.
794 """
795
796 self.__trigvals=new_trigvals
797
798 @property
799 def samples(self):
800 """
801 Return a 1D numpy.array of the samples.
802
803 """
804 return self.__posterior_samples
805
806 def delete_samples_by_idx(self,samples):
807 """
808 Remove samples from posterior, analagous to numpy.delete but opperates in place.
809
810 @param samples: A list containing the indexes of the samples to be removed.
811 """
812 self.__posterior_samples=np.delete(self.__posterior_samples,samples).reshape(-1,1)
813
814 @property
815 def gaussian_kde(self):
816 """
817 Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
818
819 """
820 from numpy import seterr as np_seterr
821 from scipy import seterr as sp_seterr
822
823 np_seterr(under='ignore')
824 sp_seterr(under='ignore')
825 try:
826 return_value=stats.kde.gaussian_kde(np.transpose(self.__posterior_samples))
827 except:
828 exfile=open('exception.out','w')
829 np.savetxt(exfile,self.__posterior_samples)
830 exfile.close()
831 raise
832
833 return return_value
834
835 @property
836 def KL(self):
837 """Returns the KL divergence between the prior and the posterior.
838 It measures the relative information content in nats. The prior is evaluated
839 at run time. It defaults to None. If None is passed, it just returns the information content
840 of the posterior."
841 """
842
843 def uniform(x):
844 return np.array([1./(np.max(x)-np.min(x)) for _ in x])
845
846 posterior, dx = np.histogram(self.samples,bins=36,density=True)
847 from scipy.stats import entropy
848 # check the kind of prior and process the string accordingly
849 prior = get_prior(self.name)
850 if prior is None:
851 raise ValueError
852 elif prior=='uniform':
853 prior+='(self.samples)'
854 elif 'x' in prior:
855 prior.replace('x','self.samples')
856 elif not(prior.startswith('np.')):
857 prior = 'np.'+prior
858 prior+='(self.samples)'
859 else:
860 raise ValueError
861
862 try:
863 prior_dist = eval(prior)
864 except:
865 raise ValueError
866
867 return entropy(posterior, qk=prior_dist)
868
869 def prob_interval(self,intervals):
870 """
871 Evaluate probability intervals.
872
873 @param intervals: A list of the probability intervals [0-1]
874 """
875 list_of_ci=[]
876 samples_temp=np.sort(np.squeeze(self.samples))
877
878 for interval in intervals:
879 if interval<1.0:
880 samples_temp
881 N=np.size(samples_temp)
882 #Find index of lower bound
883 lower_idx=int(floor((N/2.0)*(1-interval)))
884 if lower_idx<0:
885 lower_idx=0
886 #Find index of upper bound
887 upper_idx=N-int(floor((N/2.0)*(1-interval)))
888 if upper_idx>N:
889 upper_idx=N-1
890
891 list_of_ci.append((float(samples_temp[lower_idx]),float(samples_temp[upper_idx])))
892 else:
893 list_of_ci.append((None,None))
894
895 return list_of_ci
896
897class Posterior(object):
898 """
899 Data structure for a table of posterior samples .
900 """
901 def __init__(self,commonResultsFormatData,SimInspiralTableEntry=None,inj_spin_frame='OrbitalL', injFref=100,SnglInspiralList=None,name=None,description=None):
902 """
903 Constructor.
904
905 @param commonResultsFormatData: A 2D array containing the posterior
906 samples and related data. The samples chains form the columns.
907 @param SimInspiralTableEntry: A SimInspiralTable row containing the injected values.
908 @param SnglInspiralList: A list of SnglInspiral objects containing the triggers.
909 @param inj_spin_frame: spin frame
910 @param injFref: reference frequency
911 @param name: optional name
912 @param description: optional description
913
914 """
915 common_output_table_header,common_output_table_raw =commonResultsFormatData
916 self._posterior={}
917 self._injFref=injFref
918 self._injection=SimInspiralTableEntry
919
920 self._triggers=SnglInspiralList
921 self._loglaliases=['deltalogl', 'posterior', 'logl','logL','likelihood']
922 self._logpaliases=['logp', 'logP','prior','logprior','Prior','logPrior']
923
924 common_output_table_header=[i.lower() for i in common_output_table_header]
925
926 # Define XML mapping
927 self._injXMLFuncMap={
928 'mchirp':lambda inj:inj.mchirp,
929 'chirpmass':lambda inj:inj.mchirp,
930 'mc':lambda inj:inj.mchirp,
931 'mass1':lambda inj:inj.mass1,
932 'm1':lambda inj:inj.mass1,
933 'mass2':lambda inj:inj.mass2,
934 'm2':lambda inj:inj.mass2,
935 'mtotal':lambda inj:float(inj.mass1)+float(inj.mass2),
936 'eta':lambda inj:inj.eta,
937 'q':self._inj_q,
938 'asym_massratio':self._inj_q,
939 'massratio':lambda inj:inj.eta,
940 'sym_massratio':lambda inj:inj.eta,
941 'time': lambda inj:float(get_end(inj)),
942 'time_mean': lambda inj:float(get_end(inj)),
943 'end_time': lambda inj:float(get_end(inj)),
944 'phi0':lambda inj:inj.phi0,
945 'phi_orb': lambda inj: inj.coa_phase,
946 'phase': lambda inj: inj.coa_phase,
947 'dist':lambda inj:inj.distance,
948 'distance':lambda inj:inj.distance,
949 'ra':self._inj_longitude,
950 'rightascension':self._inj_longitude,
951 'long':self._inj_longitude,
952 'longitude':self._inj_longitude,
953 'dec':lambda inj:inj.latitude,
954 'declination':lambda inj:inj.latitude,
955 'lat':lambda inj:inj.latitude,
956 'latitude':lambda inj:inj.latitude,
957 'psi': lambda inj: np.mod(inj.polarization, np.pi),
958 'f_ref': lambda inj: self._injFref,
959 'polarisation':lambda inj:inj.polarization,
960 'polarization':lambda inj:inj.polarization,
961 'h1_end_time':lambda inj:det_end_time('H', inj),
962 'l1_end_time':lambda inj:det_end_time('L', inj),
963 'v1_end_time':lambda inj:det_end_time('V', inj),
964 'lal_amporder':lambda inj:inj.amp_order}
965
966 # Add on all spin parameterizations
967 for key, val in self._inj_spins(self._injection, frame=inj_spin_frame).items():
968 self._injXMLFuncMap[key] = val
969
970 for one_d_posterior_samples,param_name in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
971
972 self._posterior[param_name]=PosteriorOneDPDF(param_name.lower(),one_d_posterior_samples,injected_value=self._getinjpar(param_name),injFref=self._injFref,trigger_values=self._gettrigpar(param_name))
973
974 if 'mchirp' in common_output_table_header and 'eta' in common_output_table_header \
975 and (not 'm1' in common_output_table_header) and (not 'm2' in common_output_table_header):
976 try:
977 print('Inferring m1 and m2 from mchirp and eta')
978 (m1,m2)=mc2ms(self._posterior['mchirp'].samples, self._posterior['eta'].samples)
979 self._posterior['m1']=PosteriorOneDPDF('m1',m1,injected_value=self._getinjpar('m1'),trigger_values=self._gettrigpar('m1'))
980 self._posterior['m2']=PosteriorOneDPDF('m2',m2,injected_value=self._getinjpar('m2'),trigger_values=self._gettrigpar('m2'))
981 except KeyError:
982 print('Unable to deduce m1 and m2 from input columns')
983
984
985 logLFound=False
986
987 for loglalias in self._loglaliases:
988
989 if loglalias in common_output_table_header:
990 try:
991 self._logL=self._posterior[loglalias].samples
992 except KeyError:
993 print("No '%s' column in input table!"%loglalias)
994 continue
995 logLFound=True
996
997 if not logLFound:
998 raise RuntimeError("No likelihood/posterior values found!")
999 self._logP=None
1000
1001 for logpalias in self._logpaliases:
1002 if logpalias in common_output_table_header:
1003 try:
1004 self._logP=self._posterior[logpalias].samples
1005 except KeyError:
1006 print("No '%s' column in input table!"%logpalias)
1007 continue
1008 if not 'log' in logpalias:
1009 self._logP=[np.log(i) for i in self._logP]
1010
1011 if name is not None:
1012 self.__name=name
1013
1014 if description is not None:
1015 self.__description=description
1016
1017 return
1018
1019 def extend_posterior(self):
1020 """
1021 Add some useful derived parameters (such as tilt angles, time delays, etc) in the Posterior object
1022 """
1023 injection=self._injection
1024 pos=self
1025 # Generate component mass posterior samples (if they didnt exist already)
1026 if 'mc' in pos.names:
1027 mchirp_name = 'mc'
1028 elif 'chirpmass' in pos.names:
1029 mchirp_name = 'chirpmass'
1030 else:
1031 mchirp_name = 'mchirp'
1032
1033 if 'asym_massratio' in pos.names:
1034 q_name = 'asym_massratio'
1035 else:
1036 q_name = 'q'
1037
1038 if 'sym_massratio' in pos.names:
1039 eta_name= 'sym_massratio'
1040 elif 'massratio' in pos.names:
1041 eta_name= 'massratio'
1042 else:
1043 eta_name='eta'
1044
1045 if 'mass1' in pos.names and 'mass2' in pos.names:
1046 pos.append_mapping(('m1','m2'), lambda x,y:(x,y), ('mass1','mass2'))
1047
1048 if (mchirp_name in pos.names and eta_name in pos.names) and \
1049 ('mass1' not in pos.names or 'm1' not in pos.names) and \
1050 ('mass2' not in pos.names or 'm2' not in pos.names):
1051
1052 pos.append_mapping(('m1','m2'),mc2ms,(mchirp_name,eta_name))
1053
1054 if (mchirp_name in pos.names and q_name in pos.names) and \
1055 ('mass1' not in pos.names or 'm1' not in pos.names) and \
1056 ('mass2' not in pos.names or 'm2' not in pos.names):
1057
1058 pos.append_mapping(('m1','m2'),q2ms,(mchirp_name,q_name))
1059 pos.append_mapping('eta',q2eta,q_name)
1060
1061 if ('m1' in pos.names and 'm2' in pos.names and not 'mtotal' in pos.names ):
1062 pos.append_mapping('mtotal', lambda m1,m2: m1+m2, ('m1','m2') )
1063
1064 if('a_spin1' in pos.names): pos.append_mapping('a1',lambda a:a,'a_spin1')
1065 if('a_spin2' in pos.names): pos.append_mapping('a2',lambda a:a,'a_spin2')
1066 if('phi_spin1' in pos.names): pos.append_mapping('phi1',lambda a:a,'phi_spin1')
1067 if('phi_spin2' in pos.names): pos.append_mapping('phi2',lambda a:a,'phi_spin2')
1068 if('theta_spin1' in pos.names): pos.append_mapping('theta1',lambda a:a,'theta_spin1')
1069 if('theta_spin2' in pos.names): pos.append_mapping('theta2',lambda a:a,'theta_spin2')
1070
1071 my_ifos=['h1','l1','v1']
1072 for ifo1,ifo2 in combinations(my_ifos,2):
1073 p1=ifo1+'_cplx_snr_arg'
1074 p2=ifo2+'_cplx_snr_arg'
1075 if p1 in pos.names and p2 in pos.names:
1076 delta=np.mod(pos[p1].samples - pos[p2].samples + np.pi ,2.0*np.pi)-np.pi
1077 pos.append(PosteriorOneDPDF(ifo1+ifo2+'_relative_phase',delta))
1078
1079 # Ensure that both theta_jn and inclination are output for runs
1080 # with zero tilt (for runs with tilt, this will be taken care of
1081 # below when the old spin angles are computed as functions of the
1082 # new ones
1083 # Disabled this since the parameters are degenerate and causing problems
1084 #if ('theta_jn' in pos.names) and (not 'tilt1' in pos.names) and (not 'tilt2' in pos.names):
1085 # pos.append_mapping('iota', lambda t:t, 'theta_jn')
1086
1087 # Compute time delays from sky position
1088 try:
1089 if ('ra' in pos.names or 'rightascension' in pos.names) \
1090 and ('declination' in pos.names or 'dec' in pos.names) \
1091 and 'time' in pos.names:
1092 from lal import LIGOTimeGPS, TimeDelayFromEarthCenter
1093 from numpy import array
1094 detMap = {'H1': 'LHO_4k', 'H2': 'LHO_2k', 'L1': 'LLO_4k',
1095 'G1': 'GEO_600', 'V1': 'VIRGO', 'T1': 'TAMA_300'}
1096 if 'ra' in pos.names:
1097 ra_name='ra'
1098 else: ra_name='rightascension'
1099 if 'dec' in pos.names:
1100 dec_name='dec'
1101 else: dec_name='declination'
1102 ifo_times={}
1103 my_ifos=['H1','L1','V1']
1104 for ifo in my_ifos:
1105 inj_time=None
1106 if injection:
1107 inj_time = det_end_time(ifo, injection)
1108 location = lal.cached_detector_by_prefix[ifo].location
1109 ifo_times[ifo]=array(list(map(lambda ra,dec,time: array([time[0]+TimeDelayFromEarthCenter(location,ra[0],dec[0],LIGOTimeGPS(float(time[0])))]), pos[ra_name].samples,pos[dec_name].samples,pos['time'].samples)))
1110 loc_end_time=PosteriorOneDPDF(ifo.lower()+'_end_time',ifo_times[ifo],injected_value=inj_time)
1111 pos.append(loc_end_time)
1112 for ifo1 in my_ifos:
1113 for ifo2 in my_ifos:
1114 if ifo1==ifo2: continue
1115 delay_time=ifo_times[ifo2]-ifo_times[ifo1]
1116 if injection:
1117 inj_delay=float(det_end_time(ifo2, injection)-det_end_time(ifo1, injection))
1118 else:
1119 inj_delay=None
1120 time_delay=PosteriorOneDPDF(ifo1.lower()+ifo2.lower()+'_delay',delay_time,inj_delay)
1121 pos.append(time_delay)
1122 except ImportError:
1123 print('Warning: Could not import lal python bindings, check you ./configured with --enable-swig-python')
1124 print('This means I cannot calculate time delays')
1125
1126 #Calculate new spin angles
1127 new_spin_params = ['tilt1','tilt2','theta_jn','beta']
1128 if not set(new_spin_params).issubset(set(pos.names)):
1129 old_params = ['f_ref',mchirp_name,'eta','iota','a1','theta1','phi1']
1130 if 'a2' in pos.names: old_params += ['a2','theta2','phi2']
1131 try:
1132 pos.append_mapping(new_spin_params, spin_angles, old_params)
1133 except KeyError:
1134 print("Warning: Cannot find spin parameters. Skipping spin angle calculations.")
1135
1136 #Store signed spin magnitudes in separate parameters and make a1,a2 magnitudes
1137 if 'a1' in pos.names:
1138 if 'tilt1' in pos.names:
1139 pos.append_mapping('a1z', lambda a, tilt: a*np.cos(tilt), ('a1','tilt1'))
1140 else:
1141 pos.append_mapping('a1z', lambda x: x, 'a1')
1142 inj_az = None
1143 if injection is not None:
1144 inj_az = injection.spin1z
1145 pos['a1z'].set_injval(inj_az)
1146 pos.pop('a1')
1147 pos.append_mapping('a1', lambda x: np.abs(x), 'a1z')
1148
1149 if 'a2' in pos.names:
1150 if 'tilt2' in pos.names:
1151 pos.append_mapping('a2z', lambda a, tilt: a*np.cos(tilt), ('a2','tilt2'))
1152 else:
1153 pos.append_mapping('a2z', lambda x: x, 'a2')
1154 inj_az = None
1155 if injection is not None:
1156 inj_az = injection.spin2z
1157 pos['a2z'].set_injval(inj_az)
1158 pos.pop('a2')
1159 pos.append_mapping('a2', lambda x: np.abs(x), 'a2z')
1160
1161 #Calculate effective spin parallel to L
1162 if ('m1' in pos.names and 'a1z' in pos.names) and ('m2' in pos.names and 'a2z' in pos.names):
1163 pos.append_mapping('chi_eff', lambda m1,a1z,m2,a2z: (m1*a1z + m2*a2z) / (m1 + m2), ('m1','a1z','m2','a2z'))
1164
1165 #If precessing spins calculate total effective spin
1166 if ('m1' in pos.names and 'a1' in pos.names and 'tilt1' in pos.names) and ('m2' in pos.names and 'a2' in pos.names and 'tilt2' in pos.names):
1167 pos.append_mapping('chi_tot', lambda m1,a1,m2,a2: (m1*a1 + m2*a2) / (m1 + m2), ('m1','a1','m2','a2'))
1168
1169 #Calculate effective precessing spin magnitude
1170 if ('m1' in pos.names and 'a1' in pos.names and 'tilt1' in pos.names) and ('m2' in pos.names and 'a2' in pos.names and 'tilt2' in pos.names):
1171 pos.append_mapping('chi_p', chi_precessing, ['m1', 'a1', 'tilt1', 'm2', 'a2', 'tilt2'])
1172
1173 # Calculate redshift from luminosity distance measurements
1174 if('distance' in pos.names):
1175 pos.append_mapping('redshift', calculate_redshift, 'distance')
1176 elif('dist' in pos.names):
1177 pos.append_mapping('redshift', calculate_redshift, 'dist')
1178 # If using the DistanceMarginalisation, compute the maxL redshift distribution from the maxL d_L
1179 elif('distance_maxl' in pos.names):
1180 pos.append_mapping('redshift_maxldist', calculate_redshift, 'distance_maxl')
1181
1182 # Calculate source mass parameters
1183 if ('m1' in pos.names) and ('redshift' in pos.names):
1184 pos.append_mapping('m1_source', source_mass, ['m1', 'redshift'])
1185
1186 if ('m2' in pos.names) and ('redshift' in pos.names):
1187 pos.append_mapping('m2_source', source_mass, ['m2', 'redshift'])
1188
1189 if ('mtotal' in pos.names) and ('redshift' in pos.names):
1190 pos.append_mapping('mtotal_source', source_mass, ['mtotal', 'redshift'])
1191
1192 if ('mc' in pos.names) and ('redshift' in pos.names):
1193 pos.append_mapping('mc_source', source_mass, ['mc', 'redshift'])
1194
1195 # Calculate source mass parameters if DistanceMarginalisation was used, using the maxL distance and redshift
1196 if ('m1' in pos.names) and ('redshift_maxldist' in pos.names):
1197 pos.append_mapping('m1_source_maxldist', source_mass, ['m1', 'redshift_maxldist'])
1198
1199 if ('m2' in pos.names) and ('redshift_maxldist' in pos.names):
1200 pos.append_mapping('m2_source_maxldist', source_mass, ['m2', 'redshift_maxldist'])
1201
1202 if ('mtotal' in pos.names) and ('redshift_maxldist' in pos.names):
1203 pos.append_mapping('mtotal_source_maxldist', source_mass, ['mtotal', 'redshift_maxldist'])
1204
1205 if ('mc' in pos.names) and ('redshift_maxldist' in pos.names):
1206 pos.append_mapping('mc_source_maxldist', source_mass, ['mc', 'redshift_maxldist'])
1207
1208 # Calling functions testing Lorentz invariance violation
1209 if ('log10lambda_eff' in pos.names) and ('redshift' in pos.names):
1210 pos.append_mapping('log10lambda_a', lambda z,nonGR_alpha,wl,dist:np.log10(lambda_a(z, nonGR_alpha, 10**wl, dist)), ['redshift', 'nonGR_alpha', 'log10lambda_eff', 'dist'])
1211 if ('log10lambda_eff' in pos.names) and ('redshift' in pos.names):
1212 pos.append_mapping('log10livamp', lambda z,nonGR_alpha,wl,dist:np.log10(amplitudeMeasure(z, nonGR_alpha, 10**wl, dist)), ['redshift','nonGR_alpha','log10lambda_eff', 'dist'])
1213 if ('lambda_eff' in pos.names) and ('redshift' in pos.names):
1214 pos.append_mapping('lambda_a', lambda_a, ['redshift', 'nonGR_alpha', 'log10lambda_eff', 'dist'])
1215 if ('lambda_eff' in pos.names) and ('redshift' in pos.names):
1216 pos.append_mapping('liv_amp', amplitudeMeasure, ['redshift','nonGR_alpha','lambda_eff', 'dist'])
1217
1218 #Calculate new tidal parameters
1219 new_tidal_params = ['lam_tilde','dlam_tilde']
1220 old_tidal_params = ['lambda1','lambda2','q']
1221 if 'lambda1' in pos.names or 'lambda2' in pos.names:
1222 try:
1223 pos.append_mapping(new_tidal_params, symm_tidal_params, old_tidal_params)
1224 except KeyError:
1225 print("Warning: Cannot find tidal parameters. Skipping tidal calculations.")
1226
1227 #If new spin params present, calculate old ones
1228 old_spin_params = ['iota', 'theta1', 'phi1', 'theta2', 'phi2', 'beta']
1229 new_spin_params = ['theta_jn', 'phi_jl', 'tilt1', 'tilt2', 'phi12', 'a1', 'a2', 'm1', 'm2', 'f_ref','phase']
1230 try:
1231 if pos['f_ref'].samples[0][0]==0.0:
1232 for name in ['flow','f_lower']:
1233 if name in pos.names:
1234 new_spin_params = ['theta_jn', 'phi_jl', 'tilt1', 'tilt2', 'phi12', 'a1', 'a2', 'm1', 'm2', name]
1235 except:
1236 print("No f_ref for SimInspiralTransformPrecessingNewInitialConditions().")
1237 if set(new_spin_params).issubset(set(pos.names)) and not set(old_spin_params).issubset(set(pos.names)):
1238 pos.append_mapping(old_spin_params, physical2radiationFrame, new_spin_params)
1239
1240 #Calculate spin magnitudes for aligned runs
1241 if 'spin1' in pos.names:
1242 inj_a1 = inj_a2 = None
1243 if injection:
1244 inj_a1 = sqrt(injection.spin1x*injection.spin1x + injection.spin1y*injection.spin1y + injection.spin1z*injection.spin1z)
1245 inj_a2 = sqrt(injection.spin2x*injection.spin2x + injection.spin2y*injection.spin2y + injection.spin2z*injection.spin2z)
1246
1247 try:
1248 a1_samps = abs(pos['spin1'].samples)
1249 a1_pos = PosteriorOneDPDF('a1',a1_samps,injected_value=inj_a1)
1250 pos.append(a1_pos)
1251 except KeyError:
1252 print("Warning: problem accessing spin1 values.")
1253
1254 try:
1255 a2_samps = abs(pos['spin2'].samples)
1256 a2_pos = PosteriorOneDPDF('a2',a2_samps,injected_value=inj_a2)
1257 pos.append(a2_pos)
1258 except KeyError:
1259 print("Warning: no spin2 values found.")
1260
1261 # For BBHs: Calculate mass and spin of final merged system, radiated energy, and peak luminosity in GWs
1262
1263 # Only apply fits if this is a BBH run (with no tidal parameters)
1264
1265 if len(np.intersect1d(pos.names,tidalParams)) == 0:
1266
1267 # Set fits to consider (and average over)
1268
1269 FinalSpinFits = ['HBR2016', 'UIB2016', 'HL2016']
1270 FinalMassFits = ['UIB2016', 'HL2016']
1271 LpeakFits = ['UIB2016', 'HL2016']
1272
1273 # If evolved spin angle samples are present, use those to compute the final mass and spin, peak luminosity, and radiated energy; also, use the _evol suffix in the aligned-spin case, since here the spin angles are trivially evolved
1274
1275 spin_angle_suffix = ''
1276 evol_suffix = '_evol'
1277
1278 if all([x in pos.names for x in ['tilt1_isco','tilt2_isco','phi12_isco']]):
1279 spin_angle_suffix = '_isco'
1280 elif all([x in pos.names for x in ['tilt1','tilt2','phi12']]):
1281 evol_suffix = '_nonevol'
1282
1283 zero_vec = np.array([0.])
1284
1285 tilt1_name = 'tilt1' + spin_angle_suffix
1286 tilt2_name = 'tilt2' + spin_angle_suffix
1287 phi12_name = 'phi12' + spin_angle_suffix
1288 mf_name = 'mf' + evol_suffix
1289 mf_source_name = 'mf_source' + evol_suffix
1290 mf_source_maxldist_name = 'mf_source_maxldist' + evol_suffix
1291
1292 if ('m1' in pos.names) and ('m2' in pos.names):
1293 if ('a1' in pos.names) and ('a2' in pos.names):
1294 if (tilt1_name in pos.names) and (tilt2_name in pos.names) and (phi12_name in pos.names):
1295 # Precessing case
1296 print("Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and 3D spins).")
1297 if evol_suffix == '_evol':
1298 print("Applying these to *_isco evolved spin samples and outputting *_evol samples.")
1299 else:
1300 print("Applying these to unevolved spin samples and outputting *_nonevol samples.")
1301 print("Final mass fits:", FinalMassFits, "; Final spin fits:", FinalSpinFits, "; Peak luminosity fits:", LpeakFits)
1302 try:
1303 pos.append_mapping('af' + evol_suffix, lambda m1, m2, chi1, chi2, tilt1, tilt2, phi12: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12, 'af', FinalSpinFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name, phi12_name])
1304 except Exception as e:
1305 print("Could not calculate %s. The error was: %s"%('af' + evol_suffix, str(e)))
1306 try:
1307 pos.append_mapping('afz' + evol_suffix, lambda m1, m2, chi1, chi2, tilt1, tilt2: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec, 'afz', FinalSpinFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name])
1308 except Exception as e:
1309 print("Could not calculate %s. The error was: %s"%('afz' + evol_suffix, str(e)))
1310 try:
1311 pos.append_mapping(mf_name, lambda m1, m2, chi1, chi2, tilt1, tilt2: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec, 'Mf', FinalMassFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name])
1312 except Exception as e:
1313 print("Could not calculate %s. The error was: %s"%(mf_name, str(e)))
1314 try:
1315 pos.append_mapping('l_peak' + evol_suffix, lambda m1, m2, chi1, chi2, tilt1, tilt2: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec, 'Lpeak', LpeakFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name])
1316 except Exception as e:
1317 print("Could not calculate %s. The error was: %s"%('l_peak' + evol_suffix, str(e)))
1318 elif ('a1z' in pos.names) and ('a2z' in pos.names):
1319 # Aligned-spin case
1320 print("Using averages for final mass, final spin, and peak luminosity (on masses and projected spin components).")
1321 print("Outputting *_evol samples because spin evolution is trivial in this nonprecessing case.")
1322 print("Final mass fits:", FinalMassFits, "; Final spin fits:", FinalSpinFits, "; Peak luminosity fits:", LpeakFits)
1323 try:
1324 # Compute absolute values of spins and compute tilt angles to allow for negative spin values
1325 pos.append_mapping('afz_evol', lambda m1, m2, chi1, chi2: bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec, 'afz', FinalSpinFits), ['m1', 'm2', 'a1z', 'a2z'])
1326 except Exception as e:
1327 print("Could not calculate afz_evol. The error was: %s"%(str(e)))
1328 try:
1329 pos.append_mapping('af_evol', lambda a: abs(a), 'afz_evol')
1330 except Exception as e:
1331 print("Could not calculate af_evol. The error was: %s"%(str(e)))
1332 try:
1333 pos.append_mapping('mf_evol', lambda m1, m2, chi1, chi2: bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec, 'Mf', FinalMassFits), ['m1', 'm2', 'a1z', 'a2z'])
1334 except Exception as e:
1335 print("Could not calculate mf_evol. The error was: %s"%(str(e)))
1336 try:
1337 pos.append_mapping('l_peak_evol', lambda m1, m2, chi1, chi2: bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec, 'Lpeak', LpeakFits), ['m1', 'm2', 'a1z', 'a2z'])
1338 except Exception as e:
1339 print("Could not calculate l_peak_evol. The error was: %s"%(str(e)))
1340 else:
1341 print("Could not calculate final parameters or Lpeak. Found samples for a1 and a2 but not for tilt angles and phi12 or spin components (a1z and a2z).")
1342 else:
1343 # Nonspinning case
1344 print("Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and zero spins).")
1345 print("Outputting *_evol samples because spin evolution is trivial in this nonspinning case.")
1346 print("Final mass fits:", FinalMassFits, "; Final spin fits:", FinalSpinFits, "; Peak luminosity fits:", LpeakFits)
1347 try:
1348 pos.append_mapping('afz_evol', lambda m1, m2: bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec, 'afz', FinalSpinFits), ['m1', 'm2'])
1349 except Exception as e:
1350 print("Could not calculate afz_evol. The error was: %s"%(str(e)))
1351 try:
1352 pos.append_mapping('af_evol', lambda a: abs(a), 'afz_evol')
1353 except Exception as e:
1354 print("Could not calculate af_evol. The error was: %s"%(str(e)))
1355 try:
1356 pos.append_mapping('mf_evol', lambda m1, m2: bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec, 'Mf', FinalMassFits), ['m1', 'm2'])
1357 except Exception as e:
1358 print("Could not calculate mf_evol. The error was: %s"%(str(e)))
1359 try:
1360 pos.append_mapping('l_peak_evol', lambda m1, m2: bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec, 'Lpeak', LpeakFits), ['m1', 'm2'])
1361 except Exception as e:
1362 print("Could not calculate l_peak_evol. The error was: %s"%(str(e)))
1363
1364 # Convert final mass to source frame
1365 if (mf_name in pos.names) and ('redshift' in pos.names):
1366 try:
1367 pos.append_mapping(mf_source_name, source_mass, [mf_name, 'redshift'])
1368 except Exception as e:
1369 print("Could not calculate final source frame mass. The error was: %s"%(str(e)))
1370
1371 if (mf_name in pos.names) and ('redshift_maxldist' in pos.names):
1372 try:
1373 pos.append_mapping(mf_source_maxldist_name, source_mass, [mf_name, 'redshift_maxldist'])
1374 except Exception as e:
1375 print("Could not calculate final source frame mass using maxldist redshift. The error was: %s"%(str(e)))
1376
1377 # Calculate radiated energy
1378 if ('mtotal_source' in pos.names) and (mf_source_name in pos.names):
1379 try:
1380 pos.append_mapping('e_rad' + evol_suffix, lambda mtot_s, mf_s: mtot_s-mf_s, ['mtotal_source', mf_source_name])
1381 except Exception as e:
1382 print("Could not calculate radiated energy. The error was: %s"%(str(e)))
1383
1384 if ('mtotal_source_maxldist' in pos.names) and (mf_source_maxldist_name in pos.names):
1385 try:
1386 pos.append_mapping('e_rad_maxldist' + evol_suffix, lambda mtot_s, mf_s: mtot_s-mf_s, ['mtotal_source_maxldist', mf_source_maxldist_name])
1387 except Exception as e:
1388 print("Could not calculate radiated energy using maxldist redshift results. The error was: %s"%(str(e)))
1389
1390 def bootstrap(self):
1391 """
1392 Returns a new Posterior object that contains a bootstrap
1393 sample of self.
1394
1395 """
1396 names=[]
1397 samples=[]
1398 for name,oneDpos in self._posterior.items():
1399 names.append(name)
1400 samples.append(oneDpos.samples)
1401
1402 samplesBlock=np.hstack(samples)
1403
1404 bootstrapSamples=samplesBlock[:,:]
1405 Nsamp=bootstrapSamples.shape[0]
1406
1407 rows=np.vsplit(samplesBlock,Nsamp)
1408
1409 for i in range(Nsamp):
1410 bootstrapSamples[i,:]=random.choice(rows)
1411
1412 return Posterior((names,bootstrapSamples),self._injection,self._triggers)
1413
1414 def delete_samples_by_idx(self,samples):
1415 """
1416 Remove samples from all OneDPosteriors.
1417
1418 @param samples: The indexes of the samples to be removed.
1419 """
1420 for name,pos in self:
1421 pos.delete_samples_by_idx(samples)
1422 return
1423
1424 def delete_NaN_entries(self,param_list):
1425 """
1426 Remove samples containing NaN in request params.
1427
1428 @param param_list: The parameters to be checked for NaNs.
1429 """
1430 nan_idxs = np.array(())
1431 nan_dict = {}
1432 for param in param_list:
1433 nan_bool_array = np.isnan(self[param].samples).any(1)
1434 idxs = np.where(nan_bool_array == True)[0]
1435 if len(idxs) > 0:
1436 nan_dict[param]=len(idxs)
1437 nan_idxs = np.append(nan_idxs, idxs)
1438 total_samps = len(self)
1439 nan_samps = len(nan_idxs)
1440 if nan_samps != 0:
1441 print("WARNING: removing %i of %i total samples due to NaNs:"% (nan_samps,total_samps))
1442 for param in nan_dict.keys():
1443 print("\t%i NaNs in %s."%(nan_dict[param],param))
1444 self.delete_samples_by_idx(nan_idxs)
1445 return
1446
1447 @property
1448 def DIC(self):
1449 """Returns the Deviance Information Criterion estimated from the
1450 posterior samples. The DIC is defined as -2*(<log(L)> -
1451 Var(log(L))); smaller values are "better."
1452
1453 """
1454
1455 return -2.0*(np.mean(self._logL) - np.var(self._logL))
1456
1457 @property
1458 def injection(self):
1459 """
1460 Return the injected values.
1461
1462 """
1463
1464 return self._injection
1465
1466 @property
1467 def triggers(self):
1468 """
1469 Return the trigger values .
1470
1471 """
1472
1473 return self._triggers
1474
1475 def _total_incl_restarts(self, samples):
1476 total=0
1477 last=samples[0]
1478 for x in samples[1:]:
1479 if x < last:
1480 total += last
1481 last = x
1482 total += samples[-1]
1483 return total
1484
1485 def longest_chain_cycles(self):
1486 """
1487 Returns the number of cycles in the longest chain
1488
1489 """
1490 samps,header=self.samples()
1491 header=header.split()
1492 if not ('cycle' in header):
1493 raise RuntimeError("Cannot compute number of cycles in longest chain")
1494
1495 cycle_col=header.index('cycle')
1496 if 'chain' in header:
1497 chain_col=header.index('chain')
1498 chain_indexes=np.unique(samps[:,chain_col])
1499 max_cycle=0
1500 for ind in chain_indexes:
1501 chain_cycle_samps=samps[ samps[:,chain_col] == ind, cycle_col ]
1502 max_cycle=max(max_cycle, self._total_incl_restarts(chain_cycle_samps))
1503 return int(max_cycle)
1504 else:
1505 return int(self._total_incl_restarts(samps[:,cycle_col]))
1506
1507 #@injection.setter #Python 2.6+
1508 def set_injection(self,injection):
1509 """
1510 Set the injected values of the parameters.
1511
1512 @param injection: A SimInspiralTable row object containing the injected parameters.
1513 """
1514 if injection is not None:
1515 self._injection=injection
1516 for name,onepos in self:
1517 new_injval=self._getinjpar(name)
1518 if new_injval is not None:
1519 self[name].set_injval(new_injval)
1520
1521 def set_triggers(self,triggers):
1522 """
1523 Set the trigger values of the parameters.
1524
1525 @param triggers: A list of SnglInspiral objects.
1526 """
1527 if triggers is not None:
1528 self._triggers=triggers
1529 for name,onepos in self:
1530 new_trigvals=self._gettrigpar(name)
1531 if new_trigvals is not None:
1532 self[name].set_trigvals(new_trigvals)
1533
1534
1535 def _getinjpar(self,paramname):
1536 """
1537 Map parameter names to parameters in a SimInspiralTable .
1538 """
1539 if self._injection is not None:
1540 for key,value in self._injXMLFuncMap.items():
1541 if paramname.lower().strip() == key.lower().strip():
1542 try:
1543 return self._injXMLFuncMap[key](self._injection)
1544 except TypeError:
1545 return self._injXMLFuncMap[key]
1546 return None
1547
1548 def _gettrigpar(self,paramname):
1549 """
1550 Map parameter names to parameters in a SnglInspiral.
1551 """
1552 vals = None
1553 if self._triggers is not None:
1554 for key,value in self._injXMLFuncMap.items():
1555 if paramname.lower().strip() == key.lower().strip():
1556 try:
1557 vals = dict([(trig.ifo,self._injXMLFuncMap[key](trig)) for trig in self._triggers])
1558 except TypeError:
1559 return self._injXMLFuncMap[key]
1560 except AttributeError:
1561 return None
1562 return vals
1563
1564 def __getitem__(self,key):
1565 """
1566 Container method . Returns posterior chain,one_d_pos, with name one_d_pos.name.
1567 """
1568 return self._posterior[key.lower()]
1569
1570 def __len__(self):
1571 """
1572 Container method. Defined as number of samples.
1573 """
1574 return len(self._logL)
1575
1576 def __iter__(self):
1577 """
1578 Container method. Returns iterator from self.forward for use in
1579 for (...) in (...) etc.
1580 """
1581 return self.forward()
1582
1583 def forward(self):
1584 """
1585 Generate a forward iterator (in sense of list of names) over Posterior
1586 with name,one_d_pos.
1587 """
1588 current_item = 0
1589 while current_item < self.dim:
1590 name=list(self._posterior.keys())[current_item]
1591 pos=self._posterior[name]
1592 current_item += 1
1593 yield name,pos
1594
1595 def bySample(self):
1596 """
1597 Generate a forward iterator over the list of samples corresponding to
1598 the data stored within the Posterior instance. These are returned as
1599 ParameterSamples instances.
1600 """
1601 current_item=0
1602 pos_array,header=self.samples
1603 while current_item < len(self):
1604 sample_array=(np.squeeze(pos_array[current_item,:]))
1605 yield PosteriorSample(sample_array, header, header)
1606 current_item += 1
1607
1608
1609 @property
1610 def dim(self):
1611 """
1612 Return number of parameters.
1613 """
1614 return len(self._posterior.keys())
1615
1616 @property
1617 def names(self):
1618 """
1619 Return list of parameter names.
1620 """
1621 nameslist=[]
1622 for key,value in self:
1623 nameslist.append(key)
1624 return nameslist
1625
1626 @property
1627 def means(self):
1628 """
1629 Return dict {paramName:paramMean} .
1630 """
1631 meansdict={}
1632 for name,pos in self:
1633 meansdict[name]=pos.mean
1634 return meansdict
1635
1636 @property
1637 def medians(self):
1638 """
1639 Return dict {paramName:paramMedian} .
1640 """
1641 mediansdict={}
1642 for name,pos in self:
1643 mediansdict[name]=pos.median
1644 return mediansdict
1645
1646 @property
1647 def stdevs(self):
1648 """
1649 Return dict {paramName:paramStandardDeviation} .
1650 """
1651 stdsdict={}
1652 for name,pos in self:
1653 stdsdict[name]=pos.stdev
1654 return stdsdict
1655
1656 @property
1657 def name(self):
1658 """
1659 Return qualified string containing the 'name' of the Posterior instance.
1660 """
1661 return self.__name
1662
1663 @property
1664 def description(self):
1665 """
1666 Return qualified string containing a 'description' of the Posterior instance.
1667 """
1668 return self.__description
1669
1670 def append(self,one_d_posterior):
1671 """
1672 Container method. Add a new OneDParameter to the Posterior instance.
1673 """
1674 self._posterior[one_d_posterior.name]=one_d_posterior
1675 return
1676
1677 def pop(self,param_name):
1678 """
1679 Container method. Remove PosteriorOneDPDF from the Posterior instance.
1680 """
1681 return self._posterior.pop(param_name)
1682
1683 def append_mapping(self, new_param_names, func, post_names):
1684 """
1685 Append posteriors pos1,pos2,...=func(post_names)
1686 """
1687 # deepcopy 1D posteriors to ensure mapping function doesn't modify the originals
1688 import copy
1689 #1D input
1690 if isinstance(post_names, str):
1691 old_post = copy.deepcopy(self[post_names])
1692 old_inj = old_post.injval
1693 old_trigs = old_post.trigvals
1694 if old_inj:
1695 new_inj = func(old_inj)
1696 else:
1697 new_inj = None
1698 if old_trigs:
1699 new_trigs = {}
1700 for IFO in old_trigs.keys():
1701 new_trigs[IFO] = func(old_trigs[IFO])
1702 else:
1703 new_trigs = None
1704
1705 samps = func(old_post.samples)
1706 new_post = PosteriorOneDPDF(new_param_names, samps, injected_value=new_inj, trigger_values=new_trigs)
1707 if new_post.samples.ndim == 0:
1708 print("WARNING: No posterior calculated for %s ..." % new_post.name)
1709 else:
1710 self.append(new_post)
1711 #MultiD input
1712 else:
1713 old_posts = [copy.deepcopy(self[post_name]) for post_name in post_names]
1714 old_injs = [post.injval for post in old_posts]
1715 old_trigs = [post.trigvals for post in old_posts]
1716 samps = func(*[post.samples for post in old_posts])
1717 #1D output
1718 if isinstance(new_param_names, str):
1719 if None not in old_injs:
1720 inj = func(*old_injs)
1721 else:
1722 inj = None
1723 if None not in old_trigs:
1724 new_trigs = {}
1725 for IFO in old_trigs[0].keys():
1726 oldvals = [param[IFO] for param in old_trigs]
1727 new_trigs[IFO] = func(*oldvals)
1728 else:
1729 new_trigs = None
1730 new_post = PosteriorOneDPDF(new_param_names, samps, injected_value=inj, trigger_values=new_trigs)
1731 self.append(new_post)
1732 #MultiD output
1733 else:
1734 if None not in old_injs:
1735 injs = func(*old_injs)
1736 else:
1737 injs = [None for name in new_param_names]
1738 if None not in old_trigs:
1739 new_trigs = [{} for param in range(len(new_param_names))]
1740 for IFO in old_trigs[0].keys():
1741 oldvals = [param[IFO] for param in old_trigs]
1742 newvals = func(*oldvals)
1743 for param,newval in enumerate(newvals):
1744 new_trigs[param][IFO] = newval
1745 else:
1746 new_trigs = [None for param in range(len(new_param_names))]
1747 if not samps: return() # Something went wrong
1748 new_posts = [PosteriorOneDPDF(new_param_name,samp,injected_value=inj,trigger_values=new_trigs) for (new_param_name,samp,inj,new_trigs) in zip(new_param_names,samps,injs,new_trigs)]
1749 for post in new_posts:
1750 if post.samples.ndim == 0:
1751 print("WARNING: No posterior calculated for %s ..." % post.name)
1752 else:
1753 self.append(post)
1754 return
1755
1756 def _average_posterior(self, samples, post_name):
1757 """
1758 Returns the average value of the 'post_name' column of the
1759 given samples.
1760 """
1761 ap = 0.0
1762 for samp in samples:
1763 ap = ap + samp[post_name]
1764 return ap / len(samples)
1765
1766 def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias = 0):
1767 """
1768 Returns the average value of the posterior assuming that the
1769 'logl_name' column contains log(L) and the 'prior_name' column
1770 contains the prior (un-logged).
1771 """
1772 ap = 0.0
1773 for samp in samples:
1774 ap += np.exp(samp[logl_name]-log_bias)*samp[prior_name]
1775 return ap / len(samples)
1776
1777 def _bias_factor(self):
1778 """
1779 Returns a sensible bias factor for the evidence so that
1780 integrals are representable as doubles.
1781 """
1782 return np.mean(self._logL)
1783
1784 def di_evidence(self, boxing=64):
1785 """
1786 Returns the log of the direct-integration evidence for the
1787 posterior samples.
1788 """
1789 allowed_coord_names=["spin1", "spin2", "a1", "phi1", "theta1", "a2", "phi2", "theta2",
1790 "iota", "psi", "ra", "dec",
1791 "phi_orb", "phi0", "dist", "time", "mc", "mchirp", "chirpmass", "q"]
1792 samples,header=self.samples()
1793 header=header.split()
1794 coord_names=[name for name in allowed_coord_names if name in header]
1795 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
1796 tree=KDTree(coordinatized_samples)
1797
1798 if "prior" in header and "logl" in header:
1799 bf = self._bias_factor()
1800 return bf + np.log(tree.integrate(lambda samps: self._average_posterior_like_prior(samps, "logl", "prior", bf), boxing))
1801 elif "prior" in header and "likelihood" in header:
1802 bf = self._bias_factor()
1803 return bf + np.log(tree.integrate(lambda samps: self._average_posterior_like_prior(samps, "likelihood", "prior", bf), boxing))
1804 elif "post" in header:
1805 return np.log(tree.integrate(lambda samps: self._average_posterior(samps, "post"), boxing))
1806 elif "posterior" in header:
1807 return np.log(tree.integrate(lambda samps: self._average_posterior(samps, "posterior"), boxing))
1808 else:
1809 raise RuntimeError("could not find 'post', 'posterior', 'logl' and 'prior', or 'likelihood' and 'prior' columns in output to compute direct integration evidence")
1810
1812 """Returns an approximation to the log(evidence) obtained by
1813 fitting an ellipse around the highest-posterior samples and
1814 performing the harmonic mean approximation within the ellipse.
1815 Because the ellipse should be well-sampled, this provides a
1816 better approximation to the evidence than the full-domain HM."""
1817 allowed_coord_names=["spin1", "spin2", "a1", "phi1", "theta1", "a2", "phi2", "theta2",
1818 "iota", "psi", "ra", "dec",
1819 "phi_orb", "phi0", "dist", "time", "mc", "mchirp", "chirpmass", "q"]
1820 samples,header=self.samples()
1821 header=header.split()
1822
1823 n=int(0.05*samples.shape[0])
1824 if not n > 1:
1825 raise IndexError
1826
1827 coord_names=[name for name in allowed_coord_names if name in header]
1828 indexes=np.argsort(self._logL[:,0])
1829
1830 my_samples=samples[indexes[-n:], :] # The highest posterior samples.
1831 my_samples=np.array([PosteriorSample(sample,header,coord_names).coord() for sample in my_samples])
1832
1833 mu=np.mean(my_samples, axis=0)
1834 cov=np.cov(my_samples, rowvar=0)
1835
1836 d0=None
1837 for mysample in my_samples:
1838 d=np.dot(mysample-mu, np.linalg.solve(cov, mysample-mu))
1839 if d0 is None:
1840 d0 = d
1841 else:
1842 d0=max(d0,d)
1843
1844 ellipse_logl=[]
1845 ellipse_samples=[]
1846 for sample,logl in zip(samples, self._logL):
1847 coord=PosteriorSample(sample, header, coord_names).coord()
1848 d=np.dot(coord-mu, np.linalg.solve(cov, coord-mu))
1849
1850 if d <= d0:
1851 ellipse_logl.append(logl)
1852 ellipse_samples.append(sample)
1853
1854 if len(ellipse_samples) > 5*n:
1855 print('WARNING: ellpise evidence region encloses significantly more samples than %d'%n)
1856
1857 ellipse_samples=np.array(ellipse_samples)
1858 ellipse_logl=np.array(ellipse_logl)
1859
1860 ndim = len(coord_names)
1861 ellipse_volume=np.pi**(ndim/2.0)*d0**(ndim/2.0)/special.gamma(ndim/2.0+1)*np.sqrt(np.linalg.det(cov))
1862
1863 try:
1864 prior_index=header.index('prior')
1865 pmu=np.mean(ellipse_samples[:,prior_index])
1866 pstd=np.std(ellipse_samples[:,prior_index])
1867 if pstd/pmu > 1.0:
1868 print('WARNING: prior variation greater than 100\\% over elliptical volume.')
1869 approx_prior_integral=ellipse_volume*pmu
1870 except KeyError:
1871 # Maybe prior = 1?
1872 approx_prior_integral=ellipse_volume
1873
1874 ll_bias=np.mean(ellipse_logl)
1875 ellipse_logl = ellipse_logl - ll_bias
1876
1877 return np.log(approx_prior_integral) - np.log(np.mean(1.0/np.exp(ellipse_logl))) + ll_bias
1878
1879 def harmonic_mean_evidence(self):
1880 """
1881 Returns the log of the harmonic mean evidence for the set of
1882 posterior samples.
1883 """
1884 bf = self._bias_factor()
1885 return bf + np.log(1/np.mean(1/np.exp(self._logL-bf)))
1886
1887 def _posMaxL(self):
1888 """
1889 Find the sample with maximum likelihood probability. Returns value
1890 of likelihood and index of sample .
1891 """
1892 logl_vals=self._logL
1893 max_i=0
1894 max_logl=logl_vals[0]
1895 for i in range(len(logl_vals)):
1896 if logl_vals[i] > max_logl:
1897 max_logl=logl_vals[i]
1898 max_i=i
1899 return max_logl,max_i
1900
1901 def _posMap(self):
1902 """
1903 Find the sample with maximum a posteriori probability. Returns value
1904 of posterior and index of sample .
1905 """
1906 logl_vals=self._logL
1907 if self._logP is not None:
1908 logp_vals=self._logP
1909 else:
1910 return None
1911
1912 max_i=0
1913 max_pos=logl_vals[0]+logp_vals[0]
1914 for i in range(len(logl_vals)):
1915 if logl_vals[i]+logp_vals[i] > max_pos:
1916 max_pos=logl_vals[i]+logp_vals[i]
1917 max_i=i
1918 return max_pos,max_i
1919
1920 def _print_table_row(self,name,entries):
1921 """
1922 Print a html table row representation of
1923
1924 name:item1,item2,item3,...
1925 """
1926
1927 row_str='<tr><td>%s</td>'%name
1928 for entry in entries:
1929 row_str+='<td>%s</td>'%entry
1930 row_str+='</tr>'
1931 return row_str
1932
1933 @property
1934 def maxL(self):
1935 """
1936 Return the maximum likelihood probability and the corresponding
1937 set of parameters.
1938 """
1939 maxLvals={}
1940 max_logl,max_i=self._posMaxL()
1941 for param_name in self.names:
1942 maxLvals[param_name]=self._posterior[param_name].samples[max_i][0]
1943
1944 return (max_logl,maxLvals)
1945
1946 @property
1947 def maxP(self):
1948 """
1949 Return the maximum a posteriori probability and the corresponding
1950 set of parameters.
1951 """
1952 maxPvals={}
1953 max_pos,max_i=self._posMap()
1954 for param_name in self.names:
1955 maxPvals[param_name]=self._posterior[param_name].samples[max_i][0]
1956
1957 return (max_pos,maxPvals)
1958
1959
1960 def samples(self):
1961 """
1962 Return an (M,N) numpy.array of posterior samples; M = len(self);
1963 N = dim(self) .
1964 """
1965 header_string=''
1966 posterior_table=[]
1967 for param_name,one_pos in self:
1968 column=np.array(one_pos.samples)
1969 header_string+=param_name+'\t'
1970 posterior_table.append(column)
1971 posterior_table=tuple(posterior_table)
1972 return np.column_stack(posterior_table),header_string
1973
1974 def write_to_file(self,fname):
1975 """
1976 Dump the posterior table to a file in the 'common format'.
1977 """
1978 posterior_table, header_string = self.samples()
1979 np.savetxt(
1980 fname,
1981 posterior_table,
1982 comments='',
1983 delimiter='\t',
1984 header=header_string,
1985 )
1986
1987 def gelman_rubin(self, pname):
1988 """
1989 Returns an approximation to the Gelman-Rubin statistic (see
1990 Gelman, A. and Rubin, D. B., Statistical Science, Vol 7,
1991 No. 4, pp. 457--511 (1992)) for the parameter given, accurate
1992 as the number of samples in each chain goes to infinity. The
1993 posterior samples must have a column named 'chain' so that the
1994 different chains can be separated.
1995 """
1996 from numpy import seterr as np_seterr
1997 np_seterr(all='raise')
1998
1999 if "chain" in self.names:
2000 chains=np.unique(self["chain"].samples)
2001 chain_index=self.names.index("chain")
2002 param_index=self.names.index(pname)
2003 data,header=self.samples()
2004 chainData=[data[ data[:,chain_index] == chain, param_index] for chain in chains]
2005 allData=np.concatenate(chainData)
2006 chainMeans=[np.mean(data) for data in chainData]
2007 chainVars=[np.var(data) for data in chainData]
2008 BoverN=np.var(chainMeans)
2009 W=np.mean(chainVars)
2010 sigmaHat2=W + BoverN
2011 m=len(chainData)
2012 VHat=sigmaHat2 + BoverN/m
2013 try:
2014 R = VHat/W
2015 except:
2016 print("Error when computer Gelman-Rubin R statistic for %s. This may be a fixed parameter"%pname)
2017 R = np.nan
2018 return R
2019 else:
2020 raise RuntimeError('could not find necessary column header "chain" in posterior samples')
2021
2022 def healpix_map(self, resol, nest=True):
2023 """Returns a healpix map in the pixel ordering that represents the
2024 posterior density (per square degree) on the sky. The pixels
2025 will be chosen to have at least the given resolution (in
2026 degrees).
2027
2028 """
2029
2030 # Ensure that the resolution is twice the desired
2031 nside = 2
2032 while hp.nside2resol(nside, arcmin=True) > resol*60.0/2.0:
2033 nside *= 2
2034
2035 ras = self['ra'].samples.squeeze()
2036 decs = self['dec'].samples.squeeze()
2037
2038 phis = ras
2039 thetas = np.pi/2.0 - decs
2040
2041 # Create the map in ring ordering
2042 inds = hp.ang2pix(nside, thetas, phis, nest=False)
2043 counts = np.bincount(inds)
2044 if counts.shape[0] < hp.nside2npix(nside):
2045 counts = np.concatenate((counts, np.zeros(hp.nside2npix(nside) - counts.shape[0])))
2046
2047 # Smooth the map a bit (Gaussian sigma = resol)
2048 hpmap = hp.sphtfunc.smoothing(counts, sigma=resol*np.pi/180.0)
2049
2050 hpmap = hpmap / (np.sum(hpmap)*hp.nside2pixarea(nside, degrees=True))
2051
2052 if nest:
2053 hpmap = hp.reorder(hpmap, r2n=True)
2054
2055 return hpmap
2056
2057 def __str__(self):
2058 """
2059 Define a string representation of the Posterior class ; returns
2060 a html formatted table of various properties of posteriors.
2061 """
2062 return_val='<table border="1" id="statstable"><tr><th/>'
2063 column_names=['maP','maxL','stdev','mean','median','stacc','injection value']
2064 IFOs = []
2065 if self._triggers is not None:
2066 IFOs = [trig.ifo for trig in self._triggers]
2067 for IFO in IFOs:
2068 column_names.append(IFO+' trigger values')
2069
2070 for column_name in column_names:
2071 return_val+='<th>%s</th>'%column_name
2072
2073 return_val+='</tr>'
2074
2075 for name,oned_pos in self:
2076
2077 max_logl,max_i=self._posMaxL()
2078 maxL=oned_pos.samples[max_i][0]
2079 max_post,max_j=self._posMap()
2080 maP=oned_pos.samples[max_j][0]
2081 mean=str(oned_pos.mean)
2082 stdev=str(oned_pos.stdev)
2083 median=str(np.squeeze(oned_pos.median))
2084 stacc=str(oned_pos.stacc)
2085 injval=str(oned_pos.injval)
2086 trigvals=oned_pos.trigvals
2087
2088 row = [maP,maxL,stdev,mean,median,stacc,injval]
2089 if self._triggers is not None:
2090 for IFO in IFOs:
2091 try:
2092 row.append(str(trigvals[IFO]))
2093 except TypeError:
2094 row.append(None)
2095 return_val+=self._print_table_row(name,row)
2096
2097 return_val+='</table>'
2098
2099 parser=XMLParser()
2100 parser.feed(return_val)
2101 Estr=parser.close()
2102
2103 elem=Estr
2104 rough_string = tostring(elem, 'utf-8')
2105 reparsed = minidom.parseString(rough_string)
2106 return_val=reparsed.toprettyxml(indent=" ")
2107 return return_val[len('<?xml version="1.0" ?>')+1:]
2108
2109
2110 #===============================================================================
2111 # Functions used to parse injection structure.
2112 #===============================================================================
2113 def _inj_m1(self,inj):
2114 """
2115 Return the mapping of (mchirp,eta)->m1; m1>m2 i.e. return the greater of the mass
2116 components (m1) calculated from the chirp mass and the symmetric mass ratio.
2117
2118 @param inj: a custom type with the attributes 'mchirp' and 'eta'.
2119 """
2120 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2121 return mass1
2122
2123 def _inj_m2(self,inj):
2124 """
2125 Return the mapping of (mchirp,eta)->m2; m1>m2 i.e. return the lesser of the mass
2126 components (m2) calculated from the chirp mass and the symmetric mass ratio.
2127
2128 @param inj: a custom type with the attributes 'mchirp' and 'eta'.
2129 """
2130 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2131 return mass2
2132
2133 def _inj_q(self,inj):
2134 """
2135 Return the mapping of (mchirp,eta)->q; m1>m2 i.e. return the mass ratio q=m2/m1.
2136
2137 @param inj: a custom type with the attributes 'mchirp' and 'eta'.
2138 """
2139 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2140 return mass2/mass1
2141
2142 def _inj_longitude(self,inj):
2143 """
2144 Return the mapping of longitude found in inj to the interval [0,2*pi).
2145
2146 @param inj: a custom type with the attribute 'longitude'.
2147 """
2148 if inj.longitude>2*pi_constant or inj.longitude<0.0:
2149 maplong=2*pi_constant*(((float(inj.longitude))/(2*pi_constant)) - floor(((float(inj.longitude))/(2*pi_constant))))
2150 print("Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(str(inj.longitude),str(maplong)))
2151 return maplong
2152 else:
2153 return inj.longitude
2154
2155 def _inj_spins(self, inj, frame='OrbitalL'):
2156
2157 from lalsimulation import SimInspiralTransformPrecessingWvf2PE
2158
2159 spins = {}
2160 f_ref = self._injFref
2161
2162 if not inj:
2163 spins = {}
2164
2165 else:
2166 axis = lalsim.SimInspiralGetFrameAxisFromString(frame)
2167 s1x=inj.spin1x
2168 s1y=inj.spin1y
2169 s1z=inj.spin1z
2170 s2x=inj.spin2x
2171 s2y=inj.spin2y
2172 s2z=inj.spin2z
2173 iota=inj.inclination
2174 m1, m2 = inj.mass1, inj.mass2
2175 mc, eta = inj.mchirp, inj.eta
2176
2177 a1, theta1, phi1 = cart2sph(s1x, s1y, s1z)
2178 a2, theta2, phi2 = cart2sph(s2x, s2y, s2z)
2179
2180 spins = {'a1':a1, 'theta1':theta1, 'phi1':phi1,
2181 'a2':a2, 'theta2':theta2, 'phi2':phi2,
2182 'iota':iota}
2183 # If spins are aligned, save the sign of the z-component
2184 if inj.spin1x == inj.spin1y == inj.spin2x == inj.spin2y == 0.:
2185 spins['a1z'] = inj.spin1z
2186 spins['a2z'] = inj.spin2z
2187
2188 L = orbital_momentum(f_ref, m1,m2, iota)
2189 S1 = np.hstack((s1x, s1y, s1z))
2190 S2 = np.hstack((s2x, s2y, s2z))
2191
2192 zhat = np.array([0., 0., 1.])
2193 aligned_comp_spin1 = array_dot(S1, zhat)
2194 aligned_comp_spin2 = array_dot(S2, zhat)
2195 chi = aligned_comp_spin1 + aligned_comp_spin2 + \
2196 np.sqrt(1. - 4.*eta) * (aligned_comp_spin1 - aligned_comp_spin2)
2197 S1 *= m1**2
2198 S2 *= m2**2
2199 J = L + S1 + S2
2200
2201 beta = array_ang_sep(J, L)
2202 spins['beta'] = beta
2203 spins['spinchi'] = chi
2204 # Huge caveat: SimInspiralTransformPrecessingWvf2PE assumes that the cartesian spins in the XML table are given in the L frame, ie. in a frame where L||z. While this is the default in inspinj these days, other possibilities exist.
2205 # Unfortunately, we don't have a function (AFIK), that transforms spins from an arbitrary frame to an arbitrary frame, otherwise I'd have called it here to be sure we convert in the L frame.
2206 # FIXME: add that function here if it ever gets written. For the moment just check
2207 if not frame=='OrbitalL':
2208 print("I cannot calculate the injected values of the spin angles unless frame is OrbitalL. Skipping...")
2209 return spins
2210 # m1 and m2 here are NOT in SI, but in Msun, this is not a typo.
2211 theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2=SimInspiralTransformPrecessingWvf2PE(inj.inclination,inj.spin1x, inj.spin1y, inj.spin1z,inj.spin2x, inj.spin2y, inj.spin2z, m1, m2, f_ref, inj.coa_phase)
2212 spins['theta_jn']=theta_jn
2213 spins['phi12']=phi12
2214 spins['tilt1']=tilt1
2215 spins['tilt2']=tilt2
2216 spins['phi_jl']=phi_jl
2217
2218 """
2219 #If everything is all right, this function should give back the cartesian spins. Uncomment to check
2220 print("Inverting ")
2221 iota_back,a1x_back,a1y_back,a1z_back,a2x_back,a2y_back,a2z_back = \
2222 lalsim.SimInspiralTransformPrecessingNewInitialConditions(theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2,m1*lal.MSUN_SI,m2*lal.MSUN_SI,f_ref,inj.coa_phase)
2223 print(a1x_back,a1y_back,a1z_back)
2224 print(a2x_back,a2y_back,a2z_back)
2225 print(iota_back)
2226 """
2227
2228 return spins
2229
2231 """
2232 Data structure for a table of posterior samples .
2233 """
2234 def __init__(self,commonResultsFormatData,SimBurstTableEntry=None,injFref=None,SnglBurstList=None,name=None,description=None):
2235 """
2236 Constructor.
2237
2238 @param commonResultsFormatData: A 2D array containing the posterior
2239 samples and related data. The samples chains form the columns.
2240 @param SimBurstTableEntry: A ligolw.lscstables.SimBurst row containing the injected values.
2241 @param SnglBurstList: A list of SnglBurst objects containing the triggers.
2242 @param injFref: reference frequency in injection
2243 @param name: optional name for this Posterior
2244 @param description: optional description for this Posterior
2245 """
2246 common_output_table_header,common_output_table_raw =commonResultsFormatData
2247 self._posterior_posterior={}
2248 self._injFref_injFref=injFref
2249 self._injection_injection=SimBurstTableEntry
2250 self._triggers_triggers=SnglBurstList
2251 self._loglaliases_loglaliases=['posterior', 'logl','logL','likelihood', 'deltalogl']
2252 self._logpaliases_logpaliases=['logp', 'logP','prior','logprior','Prior','logPrior']
2253
2254 common_output_table_header=[i.lower() for i in common_output_table_header]
2255
2256 # Define XML mapping
2258 'f0':lambda inj:inj.frequency,
2259 'frequency':lambda inj:inj.frequency,
2260 'centre_frequency':lambda inj:inj.frequency,
2261 'quality':lambda inj:inj.q,
2262 'hrss':lambda inj:inj.hrss,
2263 'loghrss':lambda inj:np.log(inj.hrss),
2264 'polar_angle':lambda inj:inj.pol_ellipse_angle,
2265 'pol_ellipse_angle':lambda inj:inj.pol_ellipse_angle,
2266 'pol_ellipse_e':lambda inj:inj.pol_ellipse_e,
2267 'alpha':lambda inj:inj.pol_ellipse_angle,
2268 'polar_eccentricity':lambda inj:inj.pol_ellipse_e,
2269 'eccentricity':lambda inj:inj.pol_ellipse_e,
2270 'time': lambda inj:float(get_end(inj)),
2271 'end_time': lambda inj:float(get_end(inj)),
2273 'rightascension':self._inj_longitude_inj_longitude,
2274 'long':self._inj_longitude_inj_longitude,
2275 'longitude':self._inj_longitude_inj_longitude,
2276 'dec':lambda inj:inj.dec,
2277 'declination':lambda inj:inj.dec,
2278 'lat':lambda inj:inj.dec,
2279 'latitude':lambda inj:inj.dec,
2280 'psi': lambda inj: np.mod(inj.psi, np.pi),
2281 'f_ref': lambda inj: self._injFref_injFref,
2282 'polarisation':lambda inj:inj.psi,
2283 'polarization':lambda inj:inj.psi,
2284 'duration':lambda inj:inj.duration,
2285 'h1_end_time':lambda inj:det_end_time('H', inj),
2286 'l1_end_time':lambda inj:det_end_time('L', inj),
2287 'v1_end_time':lambda inj:det_end_time('V', inj),
2288 }
2289
2290 for one_d_posterior_samples,param_name in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
2291
2292 self._posterior_posterior[param_name]=PosteriorOneDPDF(param_name.lower(),one_d_posterior_samples,injected_value=self._getinjpar(param_name),injFref=self._injFref_injFref,trigger_values=self._gettrigpar(param_name))
2293
2294 logLFound=False
2295
2296 for loglalias in self._loglaliases_loglaliases:
2297 if loglalias in common_output_table_header:
2298 try:
2299 self._logL_logL=self._posterior_posterior[loglalias].samples
2300 except KeyError:
2301 print("No '%s' column in input table!"%loglalias)
2302 continue
2303 logLFound=True
2304
2305 if not logLFound:
2306 raise RuntimeError("No likelihood/posterior values found!")
2307
2308 self._logP_logP=None
2309 for logpalias in self._logpaliases_logpaliases:
2310 if logpalias in common_output_table_header:
2311 try:
2312 self._logP_logP=self._posterior_posterior[logpalias].samples
2313 except KeyError:
2314 print("No '%s' column in input table!"%logpalias)
2315 continue
2316 if not 'log' in logpalias:
2317 self._logP_logP=[np.log(i) for i in self._logP_logP]
2318 if name is not None:
2319 self.__name__name=name
2320
2321 if description is not None:
2322 self.__description__description=description
2323
2324 return
2325 #===============================================================================
2326 # Functions used to parse injection structure.
2327 #===============================================================================
2328
2329 def _inj_longitude(self,inj):
2330 """
2331 Return the mapping of longitude found in inj to the interval [0,2*pi).
2332
2333 @param inj: a custom type with the attribute 'longitude'.
2334 """
2335 if inj.ra>2*pi_constant or inj.ra<0.0:
2336 maplong=2*pi_constant*(((float(inj.ra)/(2*pi_constant)) - floor(((float(inj.ra))/(2*pi_constant)))))
2337 print("Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(str(inj.ra),str(maplong)))
2338 return maplong
2339 else:
2340 return inj.ra
2341
2342class KDTree(object):
2343 """
2344 A kD-tree.
2345 """
2346 def __init__(self, objects):
2347 """
2348 Construct a kD-tree from a sequence of objects. Each object
2349 should return its coordinates using obj.coord().
2350 """
2351 if len(objects) == 0:
2352 raise RuntimeError("cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2353 elif len(objects) == 1:
2354 self._objects = objects[:]
2355 coord=self._objects[0].coord()
2356 self._bounds = coord,coord
2357 elif self._same_coords(objects):
2358 # All the same coordinates
2359 self._objects = [ objects[0] ]
2360 coord=self._objects[0].coord()
2361 self._bounds = coord,coord
2362 else:
2363 self._objects = objects[:]
2364 self._bounds = self._bounds_of_objects()
2365 low,high=self._bounds
2366 self._split_dim=self._longest_dimension()
2367 longest_dim = self._split_dim
2368 sorted_objects=sorted(self._objects, key=lambda obj: (obj.coord())[longest_dim])
2369 N = len(sorted_objects)
2370 bound=0.5*(sorted_objects[int(N/2)].coord()[longest_dim] + sorted_objects[int(N/2)-1].coord()[longest_dim])
2371 low = [obj for obj in self._objects if obj.coord()[longest_dim] < bound]
2372 high = [obj for obj in self._objects if obj.coord()[longest_dim] >= bound]
2373 if len(low)==0:
2374 # Then there must be multiple values with the same
2375 # coordinate as the minimum element of high
2376 low = [obj for obj in self._objects if obj.coord()[longest_dim]==bound]
2377 high = [obj for obj in self._objects if obj.coord()[longest_dim] > bound]
2378 self._left = KDTree(low)
2379 self._right = KDTree(high)
2380
2381 def _same_coords(self, objects):
2382 """
2383 True if and only if all the given objects have the same
2384 coordinates.
2385 """
2386 if len(objects) <= 1:
2387 return True
2388 coords = [obj.coord() for obj in objects]
2389 c0 = coords[0]
2390 for ci in coords[1:]:
2391 if not np.all(ci == c0):
2392 return False
2393 return True
2394
2395 def _bounds_of_objects(self):
2396 """
2397 Bounds of the objects contained in the tree.
2398 """
2399 low=self._objects[0].coord()
2400 high=self._objects[0].coord()
2401 for obj in self._objects[1:]:
2402 low=np.minimum(low,obj.coord())
2403 high=np.maximum(high,obj.coord())
2404 return low,high
2405
2406 def _longest_dimension(self):
2407 """
2408 Longest dimension of the tree bounds.
2409 """
2410 low,high = self._bounds
2411 widths = high-low
2412 return np.argmax(widths)
2413
2414 def objects(self):
2415 """
2416 Returns the objects in the tree.
2417 """
2418 return self._objects[:]
2419
2420 def __iter__(self):
2421 """
2422 Iterator over all the objects contained in the tree.
2423 """
2424 return self._objects.__iter__()
2425
2426 def left(self):
2427 """
2428 Returns the left tree.
2429 """
2430 return self._left
2431
2432 def right(self):
2433 """
2434 Returns the right tree.
2435 """
2436 return self._right
2437
2438 def split_dim(self):
2439 """
2440 Returns the dimension along which this level of the kD-tree
2441 splits.
2442 """
2443 return self._split_dim
2444
2445 def bounds(self):
2446 """
2447 Returns the coordinates of the lower-left and upper-right
2448 corners of the bounding box for this tree: low_left, up_right
2449 """
2450 return self._bounds
2451
2452 def volume(self):
2453 """
2454 Returns the volume of the bounding box of the tree.
2455 """
2456 v = 1.0
2457 low,high=self._bounds
2458 for l,h in zip(low,high):
2459 v = v*(h - l)
2460 return v
2461
2462 def integrate(self,f,boxing=64):
2463 """
2464 Returns the integral of f(objects) over the tree. The
2465 optional boxing parameter determines how deep to descend into
2466 the tree before computing f.
2467 """
2468 # if len(self._objects) <= boxing:
2469 # return self.volume()*f(self._objects)
2470 # else:
2471 # return self._left.integrate(f, boxing) + self._right.integrate(f, boxing)
2472
2473 def x(tree):
2474 return tree.volume()*f(tree._objects)
2475
2476 def y(a,b):
2477 return a+b
2478
2479 return self.operate(x,y,boxing=boxing)
2480
2481 def operate(self,f,g,boxing=64):
2482 """
2483 Operates on tree nodes exceeding boxing parameter depth.
2484 """
2485 if len(self._objects) <= boxing:
2486 return f(self)
2487 else:
2488
2489 return g(self._left.operate(f,g,boxing),self._right.operate(f,g,boxing))
2490
2491
2492class KDTreeVolume(object):
2493 """
2494 A kD-tree suitable for splitting parameter spaces and counting hypervolumes.
2495 Is modified from the KDTree class so that bounding boxes are stored. This means that
2496 there are no longer gaps in the hypervolume once the samples have been split into groups.
2497 """
2498 def __init__(self, objects,boundingbox,dims=0):
2499 """
2500 Construct a kD-tree from a sequence of objects. Each object
2501 should return its coordinates using obj.coord().
2502 the obj should also store the bounds of the hypervolume its found in.
2503 for non-leaf objects we need the name of the dimension split and value at split.
2504 """
2505 self._dimension = dims
2506 self._bounds = boundingbox
2507 self._weight = 1
2508 if len(objects) == 0: #for no objects - something is wrong, i think it can only happen in first call
2509 raise RuntimeError("cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2510 elif len(objects) == 1: #1 object, have reached leaf of tree
2511 self._objects = objects[:]
2512 elif self._same_coords(objects): # When ALL samples have the same coordinates in all dimensions
2513 self._weight = len(objects)
2514 self._objects = [ objects[0] ] #need to modify kdtree_bin functions to use _weight to get correct number of samples
2515 coord=self._objects[0].coord()
2516 else: #construct next level of tree with multiple samples
2517 self._objects = objects[:]
2518 split_dim = self._dimension
2519 sorted_objects=sorted(self._objects, key=lambda obj: (obj.coord())[split_dim])
2520 N = len(sorted_objects)
2521 self._split_value = 0.5*(sorted_objects[int(N/2)].coord()[split_dim] + sorted_objects[int(N/2)-1].coord()[split_dim])
2522 bound = self._split_value
2523 low = [obj for obj in self._objects if obj.coord()[split_dim] < bound]
2524 high = [obj for obj in self._objects if obj.coord()[split_dim] >= bound]
2525 if len(low)==0:
2526 # Then there must be multiple values with the same
2527 # coordinate as the minimum element of 'high'
2528 low = [obj for obj in self._objects if obj.coord()[split_dim] == bound]
2529 high = [obj for obj in self._objects if obj.coord()[split_dim] > bound]
2530 leftBoundingbox = []
2531 rightBoundingbox = []
2532 for i in self._bounds:
2533 leftBoundingbox.append(list(i))
2534 rightBoundingbox.append(list(i))
2535 leftBoundingbox[1][split_dim] = bound
2536 rightBoundingbox[0][split_dim] = bound
2537 # designate the next dimension to use for split for sub-trees
2538 # if has got to the end of the list of dimensions then starts
2539 # again at dimension = 0
2540 if (split_dim < (len(self._objects[0].coord()) - 1)):
2541 child_dim = split_dim + 1
2542 else:
2543 child_dim = 0
2544 self._left = KDTreeVolume(low,leftBoundingbox,dims = child_dim)
2545 # added in a load of messing about incase there are repeated values in the currently checked dimension
2546 if (len(high) != 0):
2547 self._right = KDTreeVolume(high,rightBoundingbox,dims = child_dim)
2548 else:
2549 self._right = None
2550
2551 def _same_coords(self, objects):
2552 """
2553 True if and only if all the given objects have the same
2554 coordinates.
2555 """
2556 if len(objects) <= 1:
2557 return True
2558 coords = [obj.coord() for obj in objects]
2559 c0 = coords[0]
2560 for ci in coords[1:]:
2561 if not np.all(ci == c0):
2562 return False
2563 return True
2564
2565 def objects(self):
2566 """
2567 Returns the objects in the tree.
2568 """
2569 return self._objects[:]
2570
2571 def __iter__(self):
2572 """
2573 Iterator over all the objects contained in the tree.
2574 """
2575 return self._objects.__iter__()
2576
2577 def left(self):
2578 """
2579 Returns the left tree.
2580 """
2581 return self._left
2582
2583 def right(self):
2584 """
2585 Returns the right tree.
2586 """
2587 return self._right
2588
2589 def split_dim(self):
2590 """
2591 Returns the dimension along which this level of the kD-tree
2592 splits.
2593 """
2594 return self._split_dim
2595
2596 def bounds(self):
2597 """
2598 Returns the coordinates of the lower-left and upper-right
2599 corners of the bounding box for this tree: low_left, up_right
2600 """
2601 return self._bounds
2602
2603 def volume(self):
2604 """
2605 Returns the volume of the bounding box of the tree.
2606 """
2607 v = 1.0
2608 low,high=self._bounds
2609 for l,h in zip(low,high):
2610 v = v*(h - l)
2611 return v
2612
2613 def integrate(self,f,boxing=64):
2614 """
2615 Returns the integral of f(objects) over the tree. The
2616 optional boxing parameter determines how deep to descend into
2617 the tree before computing f.
2618 """
2619 def x(tree):
2620 return tree.volume()*f(tree._objects)
2621
2622 def y(a,b):
2623 return a+b
2624
2625 return self.operate(x,y,boxing=boxing)
2626
2627 def operate(self,f,g,boxing=64):
2628 """
2629 Operates on tree nodes exceeding boxing parameter depth.
2630 """
2631 if len(self._objects) <= boxing:
2632 return f(self)
2633 else:
2634 return g(self._left.operate(f,g,boxing),self._right.operate(f,g,boxing))
2635
2636 def search(self,coordinates,boxing = 64):
2637 """
2638 takes a set of coordinates and searches down through the tree untill it gets
2639 to a box with less than 'boxing' objects in it and returns the box bounds,
2640 number of objects in the box, and the weighting.
2641 """
2642 if len(self._objects) <= boxing:
2643 return self._bounds,len(self._objects),self._weight
2644 elif coordinates[self._dimension] < self._split_value:
2645 return self._left.search(coordinates,boxing)
2646 else:
2647 return self._right.search(coordinates,boxing)
2648
2649 def fillNewTree(self,boxing = 64, isArea = False):
2650 """
2651 copies tree structure, but with KDSkeleton as the new nodes.
2652 """
2653 boxN = boxing
2654 if len(self._objects) <= boxN:
2655 newNode = KDSkeleton(self.bounds(), left_child = None , right_child = None)
2656 if isArea:
2657 newNode.setImportance(len(self._objects),skyArea(self.bounds()))
2658 else:
2659 newNode.setImportance(len(self._objects),self.volume())
2660 return newNode
2661 else:
2662 if isArea:
2663 newNode = KDSkeleton(self.bounds, left_child = self._left.fillNewTree(boxN,isArea=True), right_child = self._right.fillNewTree(boxN,isArea=True))
2664 newNode.setImportance(len(self._objects),skyArea(self.bounds()))
2665 else:
2666 newNode = KDSkeleton(self.bounds, left_child = self._left.fillNewTree(boxN), right_child = self._right.fillNewTree(boxN))
2667 newNode.setImportance(len(self._objects),self.volume())
2668 newNode.setSplit(self._dimension,self._split_value)
2669 return newNode
2670
2671class KDSkeleton(object):
2672 """
2673 object to store the structure of a kd tree
2674 """
2675
2676 def __init__(self, bounding_box, left_child = None, right_child = None):
2677 self._bounds = bounding_box
2678 #self._names = coordinate_names
2679 self._left = left_child
2680 self._right = right_child
2681 self._samples = 0
2682 self._splitValue = None
2683 self._splitDim = None
2684 self._importance = None
2685 self._volume = None
2686
2687 def addSample(self):
2688 self._samples +=1
2689
2690 def bounds(self):
2691 return self._bounds
2692
2693 def search(self,coordinates):
2694 """
2695 takes a set of coordinates and searches down through the tree untill it gets
2696 to a box with less than 'boxing' objects in it and returns the box bounds,
2697 number of objects in the box, and the weighting.
2698 """
2699 if self._left is None:
2700 return self._bounds, self._samples, self._importance
2701 elif coordinates[self._splitDim] < self._splitValue:
2702 return self._left.search(coordinates)
2703 else:
2704 return self._right.search(coordinates)
2705
2706 def setImportance(self, sampleNumber, volume):
2707 self._importance = sampleNumber/volume
2708 self._volume = volume
2709
2710 def setSplit(self,dimension,value):
2711 self._splitDim = dimension
2712 self._splitValue = value
2713
2714
2715class PosteriorSample(object):
2716 """
2717 A single parameter sample object, suitable for inclusion in a
2718 kD-tree.
2719 """
2720
2721 def __init__(self, sample_array, headers, coord_names):
2722 """
2723 Given the sample array, headers for the values, and the names
2724 of the desired coordinates, construct a parameter sample
2725 object.
2726 """
2727 self._samples=sample_array[:]
2728 self._headers=headers
2729 if not (len(sample_array) == len(self._headers)):
2730 print("Header length = ", len(self._headers))
2731 print("Sample length = ", len(sample_array))
2732 raise RuntimeError("parameter and sample lengths do not agree")
2733 self._coord_names=coord_names
2734 self._coord_indexes=[self._headers.index(name) for name in coord_names]
2735
2736 def __getitem__(self, key):
2737 """
2738 Return the element with the corresponding name.
2739 """
2740 key=key.lower()
2741 if key in self._headers:
2742 idx=self._headers.index(key)
2743 return self._samples[idx]
2744 else:
2745 raise KeyError("key not found in posterior sample: %s"%key)
2746
2747 def coord(self):
2748 """
2749 Return the coordinates for the parameter sample.
2750 """
2751 return self._samples[self._coord_indexes]
2752
2753
2754
2755
2756
2757class AnalyticLikelihood(object):
2758 """
2759 Return analytic likelihood values.
2760 """
2761
2762 def __init__(self, covariance_matrix_files, mean_vector_files):
2763 """
2764 Prepare analytic likelihood for the given parameters.
2765 """
2766 # Make sure files names are in a list
2767 if isinstance(covariance_matrix_files, str):
2768 covariance_matrix_files = [covariance_matrix_files]
2769 if isinstance(mean_vector_files, str):
2770 mean_vector_files = [mean_vector_files]
2771
2772 covarianceMatrices = [np.loadtxt(csvFile, delimiter=',') for csvFile in covariance_matrix_files]
2773 num_matrices = len(covarianceMatrices)
2774
2775 if num_matrices != len(mean_vector_files):
2776 raise RuntimeError('Must give a mean vector list for every covariance matrix')
2777
2778 param_line = open(mean_vector_files[0]).readline()
2779 self._params = [param.strip() for param in param_line.split(',')]
2780
2781 converter=lambda x: eval(x.replace('pi','%.32f'%pi_constant)) # converts fractions w/ pi (e.g. 3.0*pi/2.0)
2782 self._modes = []
2783 for i in range(num_matrices):
2784 CM = covarianceMatrices[i]
2785 vecFile = mean_vector_files[i]
2786
2787 param_line = open(vecFile).readline()
2788 params = [param.strip() for param in param_line.split(',')]
2789 if set(params)!=set(self._params):
2790 raise RuntimeError('Parameters do not agree between mean vector files.')
2791
2792 sigmas = dict(zip(params,np.sqrt(CM.diagonal())))
2793 colNums = range(len(params))
2794 converters = dict(zip(colNums,[converter for i in colNums]))
2795 meanVectors = np.loadtxt(vecFile, delimiter=',', skiprows=1, converters=converters)
2796 try:
2797 for vec in meanVectors:
2798 means = dict(zip(params,vec))
2799 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param])) for param in params]
2800 self._modes.append(dict(mode))
2801 except TypeError:
2802 means = dict(zip(params,meanVectors))
2803 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param])) for param in params]
2804 self._modes.append(dict(mode))
2805 self._num_modes = len(self._modes)
2806
2807 def pdf(self, param):
2808 """
2809 Return PDF function for parameter.
2810 """
2811 pdf = None
2812 if param in self._params:
2813 pdf = lambda x: (1.0/self._num_modes) * sum([mode[param].pdf(x) for mode in self._modes])
2814 return pdf
2815
2816 def cdf(self, param):
2817 """
2818 Return PDF function for parameter.
2819 """
2820 cdf = None
2821 if param in self._params:
2822 cdf = lambda x: (1.0/self._num_modes) * sum([mode[param].cdf(x) for mode in self._modes])
2823 return cdf
2824
2825 @property
2826 def names(self):
2827 """
2828 Return list of parameter names described by analytic likelihood function.
2829 """
2830 return self._params
2831
2832
2833
2834#===============================================================================
2835# Web page creation classes (wrap ElementTrees)
2836#===============================================================================
2837
2838class htmlChunk(object):
2839 """
2840 A base class for representing web content using ElementTree .
2841 """
2842 def __init__(self,tag,attrib=None,parent=None):
2843
2844 self._html=Element(tag)#attrib={'xmlns':"http://www.w3.org/1999/xhtml"})
2845 if attrib:
2846 for attribname,attribvalue in attrib.items():
2847 self._html.attrib[attribname]=attribvalue
2848 if parent:
2849 parent.append(self._html)
2850
2851 def toprettyxml(self):
2852 """
2853 Return a pretty-printed XML string of the htmlPage.
2854 """
2855 elem=self._html
2856 rough_string = tostring(elem)
2857 reparsed = minidom.parseString(rough_string)
2858 return reparsed.toprettyxml(indent=" ")
2859
2860 def __str__(self):
2861 return self.toprettyxml()
2862
2863 def write(self,string):
2864 parser=XMLParser()
2865 parser.feed(string)
2866 Estr=parser.close()
2867 self._html.append(Estr)
2868
2869 def p(self,pstring):
2870 Ep=Element('p')
2871 Ep.text=pstring
2872 self._html.append(Ep)
2873 return Ep
2874
2875 def h1(self,h1string):
2876 Ep=Element('h1')
2877 Ep.text=h1string
2878 self._html.append(Ep)
2879 return Ep
2880#
2881 def h5(self,h1string):
2882 Ep=Element('h5')
2883 Ep.text=h1string
2884 self._html.append(Ep)
2885 return Ep
2886
2887 def h2(self,h2string):
2888 Ep=Element('h2')
2889 Ep.text=h2string
2890 self._html.append(Ep)
2891 return Ep
2892
2893 def h3(self,h1string):
2894 Ep=Element('h3')
2895 Ep.text=h1string
2896 self._html.append(Ep)
2897 return Ep
2898
2899 def br(self):
2900 Ebr=Element('br')
2901 self._html.append(Ebr)
2902 return Ebr
2903
2904 def hr(self):
2905 Ehr=Element('hr')
2906 self._html.append(Ehr)
2907 return Ehr
2908
2909 def a(self,url,linktext):
2910 Ea=Element('a')
2911 Ea.attrib['href']=url
2912 Ea.text=linktext
2913 self._html.append(Ea)
2914 return Ea
2915
2916 def tab(self,idtable=None):
2917 args={}
2918 if idtable is not None:
2919 args={'id':idtable}
2920
2921 Etab=Element('table',args)
2922 self._html.append(Etab)
2923 return Etab
2924
2925 def insert_row(self,tab,label=None):
2926
2927 """
2928 Insert row in table tab.
2929 If given, label used as id for the table tag
2930 """
2931
2932 Etr=Element('tr')
2933 if label is not None:
2934 Etr.attrib['id']=label
2935 tab.append(Etr)
2936 return Etr
2937
2938 def insert_td(self,row,td,label=None,legend=None):
2939 """
2940 Insert cell td into row row.
2941 Sets id to label, if given
2942 """
2943
2944 Etd=Element('td')
2945
2946 if type(td) is str:
2947 Etd.text=td
2948 else:
2949 td=tostring(td)
2950 td=minidom.parseString(td)
2951 td=td.toprettyxml(indent=" ")
2952 Etd.text=td
2953 if label is not None:
2954 Etd.attrib['id']=label
2955 if legend is not None:
2956 legend.a('#%s'%label,'%s'%label)
2957 legend.br()
2958 row.append(Etd)
2959 return Etd
2960
2961 def append(self,element):
2962 self._html.append(element)
2963
2964
2965#
2966class htmlPage(htmlChunk):
2967 """
2968 A concrete class for generating an XHTML(1) document. Inherits from htmlChunk.
2969 """
2970 def __init__(self,title=None,css=None,javascript=None,toc=False):
2971 htmlChunk.__init__(self,'html',attrib={'xmlns':"http://www.w3.org/1999/xhtml"})
2972 self.doctype_str='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
2973
2974 self._head=SubElement(self._html,'head')
2975 Etitle=SubElement(self._head,'title')
2976 self._body=SubElement(self._html,'body')
2977 self._css=None
2978 self._jscript=None
2979 if title is not None:
2980 Etitle.text=str(title)
2981 self._title=SubElement(self._body,'h1')
2982 self._title.text=title
2983 if javascript is not None:
2984 self._jscript=SubElement(self._head,'script')
2985 self._jscript.attrib['type']="text/javascript"
2986 self._jscript.text=str(javascript)
2987 if css is not None:
2988 self._css=SubElement(self._head,'style')
2989 self._css.attrib['type']="text/css"
2990 self._css.text=str(css)
2991
2992 def __str__(self):
2993 return self.doctype_str+'\n'+self.toprettyxml()
2994
2995 def add_section(self,section_name,legend=None):
2996 newSection=htmlSection(section_name)
2997 self._body.append(newSection._html)
2998 if legend is not None:
2999 legend.a('#%s'%section_name,'%s'%section_name)
3000 legend.br()
3001 return newSection
3002
3003 def add_collapse_section(self,section_name,legend=None,innertable_id=None,start_closed=True):
3004 """
3005 Create a section embedded into a table that can be collapsed with a button
3006 """
3007 newSection=htmlCollapseSection(section_name,table_id=innertable_id,start_closed=start_closed)
3008 self._body.append(newSection._html)
3009 if legend is not None:
3010 legend.a('#%s'%section_name,'%s'%section_name)
3011 legend.br()
3012 return newSection
3013
3014 def add_section_to_element(self,section_name,parent):
3015 """
3016 Create a section which is not appended to the body of html, but to the parent Element
3017 """
3018 newSection=htmlSection(section_name,htmlElement=parent,blank=True)
3019 parent.append(newSection._html)
3020 return newSection
3021
3022
3023 @property
3024 def body(self):
3025 return self._body
3026
3027 @property
3028 def head(self):
3029 return self._head
3030
3031
3032class htmlSection(htmlChunk):
3033 """
3034 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3035 """
3036 def __init__(self,section_name,htmlElement=None,blank=False):
3037 if not blank:
3038 htmlChunk.__init__(self,'div',attrib={'class':'ppsection','id':section_name},parent=htmlElement)
3039 else:
3040 htmlChunk.__init__(self,'div',attrib={'style':'"color:#000000"','id':section_name},parent=htmlElement)
3041 self.h2(section_name)
3042
3043
3045 """
3046 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3047 """
3048
3049 def __init__(self,section_name,htmlElement=None,table_id=None,start_closed=True):
3050 htmlChunk.__init__(self,'div',attrib={'class':'ppsection','id':section_name},parent=htmlElement)
3051 # if table id is none, generate a random id:
3052 if table_id is None:
3053 table_id=random.randint(1,10000000)
3054 self.table_id=table_id
3055 self._start_closed=start_closed
3056
3057 def write(self,string):
3058 k=random.randint(1,10000000)
3059 if self._start_closed:
3060 st='<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[+] Expand" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self._html.attrib['id'],k,self.table_id,k)
3061 string=string.replace('table ', 'table style="display: none" ')
3062 else:
3063 st='<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[-] Collapse" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self._html.attrib['id'],k,self.table_id,k)
3064 string=string.replace('table ', 'table style="display: table" ')
3065 st+=string
3066 st+='</td></tr></table>'
3067 htmlChunk.write(self,st)
3068
3069#===============================================================================
3070# Internal module functions
3071#===============================================================================
3072
3073
3074def _calculate_confidence_levels(hist, points, injBin, NSamples):
3075 """
3076 Returns (injectionconf, toppoints), where injectionconf is the
3077 confidence level of the injection, contained in the injBin and
3078 toppoints is a list of (pointx, pointy, ptindex, frac), with
3079 pointx and pointy the (x,y) coordinates of the corresponding
3080 element of the points array, ptindex the index of the point in the
3081 array, and frac the cumulative fraction of points with larger
3082 posterior probability.
3083
3084 The hist argument should be a one-dimensional array that contains
3085 counts of sample points in each bin.
3086
3087 The points argument should be a 2-D array storing the sky location
3088 associated with each bin; the first index runs from 0 to NBins -
3089 1, while the second index runs from 0 to 1.
3090
3091 The injBin argument gives the bin index in which the injection is
3092 found.
3093
3094 The NSamples argument is used to normalize the histogram counts
3095 into fractional probability.
3096 """
3097
3098 histIndices=np.argsort(hist)[::-1] # In decreasing order
3099
3100 toppoints=[]
3101 frac=0.0
3102 injConf=None
3103 for i in histIndices:
3104 frac+=float(hist[i])/float(NSamples)
3105 toppoints.append((points[i,0], points[i,1], i, frac))
3106 if i == injBin:
3107 injConf=frac
3108 print('Injection found at confidence level %g'%injConf)
3109
3110 return (injConf, toppoints)
3111
3112def _greedy_bin(greedyHist,greedyPoints,injection_bin_index,bin_size,Nsamples,confidence_levels):
3113 """
3114 An interal function representing the common, dimensionally-independent part of the
3115 greedy binning algorithms.
3116 """
3117
3118 #Now call confidence level C extension function to determine top-ranked pixels
3119 (injectionconfidence,toppoints)=_calculate_confidence_levels(greedyHist, greedyPoints, injection_bin_index, Nsamples)
3120
3121 #Determine interval/area contained within given confidence intervals
3122 nBins=0
3123 confidence_levels.sort()
3124 reses={}
3125 toppoints=np.array(toppoints)
3126 for printcl in confidence_levels:
3127 nBins=np.searchsorted(toppoints[:,3], printcl) + 1
3128
3129 if nBins >= len(toppoints):
3130 nBins=len(toppoints)-1
3131
3132 accl=toppoints[nBins-1,3]
3133
3134 reses[printcl]=nBins*bin_size
3135
3136 #Find area
3137 injection_area=None
3138 if injection_bin_index and injectionconfidence:
3139 i=list(np.nonzero(np.asarray(toppoints)[:,2]==injection_bin_index))[0]
3140 injection_area=bin_size*(i+1)
3141
3142 return toppoints,injectionconfidence,reses,injection_area
3143#
3144#### functions used in 2stage kdtree
3145
3146def skyArea(bounds):
3147 return - (cos(pi_constant/2. - bounds[0][1])-cos(pi_constant/2. - bounds[1][1]))*(bounds[1][0] - bounds[0][0])
3148
3149def random_split(items, fraction):
3150 size = int(len(items)*fraction)
3151 random.shuffle(items)
3152 return items[:size], items[size:]
3153
3154def addSample(tree,coordinates):
3155 if tree._left is None:
3156 tree.addSample()
3157 elif coordinates[tree._splitDim] < tree._splitValue:
3158 addSample(tree._left,coordinates)
3159 else:
3160 addSample(tree._right,coordinates)
3161
3162#
3163#===============================================================================
3164# Public module functions
3165#===============================================================================
3166
3167def kdtree_bin_sky_volume(posterior,confidence_levels):
3168
3169 confidence_levels.sort()
3170
3171 class Harvester(list):
3172
3173 def __init__(self):
3174 list.__init__(self)
3175 self.unrho=0.
3176
3177 def __call__(self,tree):
3178 number_density=float(len(tree.objects()))/float(tree.volume())
3179 self.append([number_density,tree.volume(),tree.bounds()])
3180 self.unrho+=number_density
3181
3182 def close_ranks(self):
3183
3184 for i in range(len(self)):
3185 self[i][0]/=self.unrho
3186
3187 return sorted(self,key=itemgetter(0))
3188
3189 def h(a,b):
3190 pass
3191
3192 samples,header=posterior.samples()
3193 header=header.split()
3194 coord_names=["ra","dec","dist"]
3195 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
3196 tree=KDTree(coordinatized_samples)
3197
3198 a=Harvester()
3199 samples_per_bin=10
3200 tree.operate(a,h,boxing=samples_per_bin)
3201
3202 b=a.close_ranks()
3203 b.reverse()
3204
3205 acc_rho=0.
3206 acc_vol=0.
3207 cl_idx=0
3208 confidence_intervals={}
3209 for rho,vol,bounds in b:
3210 acc_rho+=rho
3211 acc_vol+=vol
3212
3213 if acc_rho>confidence_levels[cl_idx]:
3214 confidence_intervals[acc_rho]=acc_vol
3215 cl_idx+=1
3216 if cl_idx==len(confidence_levels):
3217 break
3218
3219 return confidence_intervals
3220
3221def kdtree_bin_sky_area(posterior,confidence_levels,samples_per_bin=10):
3222 """
3223 takes samples and applies a KDTree to them to return confidence levels
3224 returns confidence_intervals - dictionary of user_provided_CL:calculated_area
3225 b - ordered list of KD leaves
3226 injInfo - if injection values provided then returns
3227 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_area]
3228 Not quite sure that the repeated samples case is fixed, posibility of infinite loop.
3229 """
3230 confidence_levels.sort()
3231 from math import cos, pi
3232 class Harvester(list):
3233 """
3234 when called by kdtree.operate will be used to calculate the density of each bin (sky area)
3235 """
3236 def __init__(self):
3237 list.__init__(self)
3238 self.unrho=0.
3239
3240 def __call__(self,tree):
3241 #area = (cos(tree.bounds()[0][1])-cos(tree.bounds()[1][1]))*(tree.bounds()[1][0] - tree.bounds()[0][0])
3242 area = - (cos(pi/2. - tree.bounds()[0][1])-cos(pi/2. - tree.bounds()[1][1]))*(tree.bounds()[1][0] - tree.bounds()[0][0])
3243 number_density=float(len(tree.objects()))/float(area)
3244 self.append([number_density,len(tree.objects()),area,tree.bounds()])
3245 self.unrho+=number_density
3246
3247 def close_ranks(self):
3248
3249 for i in range(len(self)):
3250 self[i][0]/=self.unrho
3251
3252 return sorted(self,key=itemgetter(0))
3253
3254 def h(a,b):
3255 pass
3256
3257 peparser=PEOutputParser('common')
3258
3259 samples,header=posterior.samples()
3260 header=header.split()
3261 coord_names=["ra","dec"]
3262 initial_dimensions = [[0.,-pi/2.],[2.*pi, pi/2.]]
3263 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
3264 tree=KDTreeVolume(coordinatized_samples,initial_dimensions)
3265
3266 a=Harvester()
3267 tree.operate(a,h,boxing=samples_per_bin)
3268 totalSamples = len(tree.objects())
3269 b=a.close_ranks()
3270 b.reverse()
3271 samplecounter=0.0
3272 for entry in b:
3273 samplecounter += entry[1]
3274 entry[1] = float(samplecounter)/float(totalSamples)
3275
3276 acc_rho=0.
3277 acc_vol=0.
3278 cl_idx=0
3279
3280 #checks for injection and extract details of the node in the tree that the injection is found
3281 if posterior['ra'].injval is not None and posterior['dec'].injval is not None:
3282 injBound,injNum,injWeight = tree.search([posterior['ra'].injval,posterior['dec'].injval],boxing = samples_per_bin)
3283 injInfo = [injBound,injNum,injWeight]
3284 inj_area = - (cos(pi/2. - injBound[0][1])-cos(pi/2. - injBound[1][1]))*(injBound[1][0] - injBound[0][0])
3285 inj_number_density=float(injNum)/float(inj_area)
3286 inj_rho = inj_number_density / a.unrho
3287 else:
3288 injInfo = None
3289 inj_area = None
3290 inj_number_density=None
3291 inj_rho = None
3292
3293 #finds the volume contained within the confidence levels requested by user
3294 confidence_intervals={}
3295 for rho,confidence_level,vol,bounds in b:
3296 acc_vol+=vol
3297
3298 if confidence_level>confidence_levels[cl_idx]:
3299 print(str(confidence_level))
3300 print(acc_vol)
3301 confidence_intervals[confidence_levels[cl_idx]]=acc_vol
3302 cl_idx+=1
3303 if cl_idx==len(confidence_levels):
3304 break
3305
3306 acc_vol = 0.
3307 for rho,sample_number,vol,bounds in b:
3308 acc_vol+=vol
3309 print('total area: ' + str(acc_vol))
3310
3311 #finds the confidence level of the injection and the volume of the associated contained region
3312 inj_confidence = None
3313 inj_confidence_area = None
3314 if inj_rho is not None:
3315 acc_vol=0.
3316 for rho,confidence_level,vol,bounds in b:
3317 acc_vol+=vol
3318 if rho <= inj_rho:
3319 inj_confidence = confidence_level
3320 inj_confidence_area = acc_vol
3321 injInfo.append(inj_confidence)
3322 injInfo.append(inj_confidence_area)
3323 print('inj ' +str(vol))
3324 break
3325 return confidence_intervals, b, injInfo
3326
3327def kdtree_bin(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10):
3328 """
3329 takes samples and applies a KDTree to them to return confidence levels
3330 returns confidence_intervals - dictionary of user_provided_CL:calculated_volume
3331 b - ordered list of KD leaves
3332 initial_boundingbox - list of lists [upperleft_coords,lowerright_coords]
3333 injInfo - if injection values provided then returns
3334 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_volume]
3335 Not quite sure that the repeated samples case is fixed, posibility of infinite loop.
3336 """
3337 confidence_levels.sort()
3338 print(confidence_levels)
3339 class Harvester(list):
3340 """
3341 when called by kdtree.operate will be used to calculate the density of each bin
3342 """
3343 def __init__(self):
3344 list.__init__(self)
3345 self.unrho=0.
3346
3347 def __call__(self,tree):
3348 number_density=float(len(tree.objects()))/float(tree.volume())
3349 self.append([number_density,len(tree.objects()),tree.volume(),tree.bounds()])
3350 self.unrho+=number_density
3351
3352 def close_ranks(self):
3353
3354 for i in range(len(self)):
3355 self[i][0]/=self.unrho
3356
3357 return sorted(self,key=itemgetter(0))
3358
3359 def h(a,b):
3360 pass
3361
3362 peparser=PEOutputParser('common')
3363
3364 samples,header=posterior.samples()
3365 header=header.split()
3366 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
3367
3368 #if initial bounding box is not provided, create it using max/min of sample coords.
3369 if initial_boundingbox is None:
3370 low=coordinatized_samples[0].coord()
3371 high=coordinatized_samples[0].coord()
3372 for obj in coordinatized_samples[1:]:
3373 low=np.minimum(low,obj.coord())
3374 high=np.maximum(high,obj.coord())
3375 initial_boundingbox = [low,high]
3376
3377 tree=KDTreeVolume(coordinatized_samples,initial_boundingbox)
3378
3379 a=Harvester()
3380 tree.operate(a,h,boxing=samples_per_bin)
3381
3382 b=a.close_ranks()
3383 b.reverse()
3384 totalSamples = len(tree.objects())
3385 samplecounter=0.0
3386 for entry in b:
3387 samplecounter += entry[1]
3388 entry[1] = float(samplecounter)/float(totalSamples)
3389
3390 acc_rho=0.
3391 acc_vol=0.
3392 cl_idx=0
3393
3394 #check that there is an injection value for all dimension names
3395 def checkNone(listoParams):
3396 for param in listoParams:
3397 if posterior[param].injval is None:
3398 return False
3399 return True
3400
3401 #checks for injection and extract details of the lnode in the tree that the injection is found
3402 if checkNone(coord_names):
3403 injBound,injNum,injWeight = tree.search([posterior[x].injval for x in coord_names],boxing = samples_per_bin)
3404 injInfo = [injBound,injNum,injWeight]
3405 #calculate volume of injections bin
3406 inj_volume = 1.
3407 low = injBound[0]
3408 high = injBound[1]
3409 for aCoord,bCoord in zip(low,high):
3410 inj_volume = inj_volume*(bCoord - aCoord)
3411 inj_number_density=float(injNum)/float(inj_volume)
3412 inj_rho = inj_number_density / a.unrho
3413 print(injNum,inj_volume,inj_number_density,a.unrho,injBound)
3414 else:
3415 injInfo = None
3416 inj_area = None
3417 inj_number_density=None
3418 inj_rho = None
3419
3420 #finds the volume contained within the confidence levels requested by user
3421 confidence_intervals={}
3422 for rho,sample_number,vol,bounds in b:
3423 acc_vol+=vol
3424
3425 if sample_number>confidence_levels[cl_idx]:
3426 confidence_intervals[confidence_levels[cl_idx]]=(acc_vol,sample_number)
3427 cl_idx+=1
3428 if cl_idx==len(confidence_levels):
3429 break
3430
3431 acc_vol = 0.
3432 for rho,sample_number,vol,bounds in b:
3433 acc_vol+=vol
3434
3435 #finds the confidence level of the injection and the volume of the associated contained region
3436 inj_confidence = None
3437 inj_confidence_area = None
3438 if inj_rho is not None:
3439 print('calculating cl')
3440 acc_vol=0.
3441 for rho,confidence_level,vol,bounds in b:
3442 acc_vol+=vol
3443 if rho <= inj_rho:
3444 inj_confidence = confidence_level
3445 inj_confidence_area = acc_vol
3446 injInfo.append(inj_confidence)
3447 injInfo.append(inj_confidence_area)
3448 break
3449
3450 return confidence_intervals, b, initial_boundingbox,injInfo
3451
3452def kdtree_bin2Step(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10,injCoords = None,alternate = False, fraction = 0.5, skyCoords=False):
3453 """
3454 input: posterior class instance, list of confidence levels, optional choice of inital parameter space, samples per box in kdtree
3455 note initial_boundingbox is [[lowerbound of each param][upper bound of each param]], if not specified will just take limits of samples
3456 fraction is proportion of samples used for making the tree structure.
3457 returns: confidence_intervals, sorted list of kd objects, initial_boundingbox, injInfo
3458 where injInfo is [bounding box injection is found within, samples in said box, weighting of box (in case of repeated samples),inj_confidence, inj_confidence_area]
3459 """
3460 confidence_levels.sort()
3461
3462 samples,header=posterior.samples()
3463 numberSamples = len(samples)
3464 if alternate == False:
3465 samplesStructure, samplesFill = random_split(samples,fraction)
3466 else:
3467 samplesStructure = samples[:int(numberSamples*fraction)]
3468 samplesFill = samples[int(numberSamples*fraction):]
3469 samplesFillLen = len(samplesFill)
3470
3471 header=header.split()
3472 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samplesStructure]
3473 #if initial bounding box is not provided, create it using max/min of sample coords.
3474 if skyCoords == True:
3475 initial_boundingbox = [[0,-pi_constant/2.],[2*pi_constant,pi_constant/2.]]
3476 if initial_boundingbox is None:
3477 low=coordinatized_samples[0].coord()
3478 high=coordinatized_samples[0].coord()
3479 for obj in coordinatized_samples[1:]:
3480 low=np.minimum(low,obj.coord())
3481 high=np.maximum(high,obj.coord())
3482 initial_boundingbox = [low,high]
3483 tree=KDTreeVolume(coordinatized_samples,initial_boundingbox)
3484 tree2fill = tree.fillNewTree(boxing=samples_per_bin, isArea = skyCoords)#set isArea True if looking at sky coords(modifies stored volume values
3485
3486 columns = []
3487 for name in coord_names:
3488 columns.append(header.index(name))
3489
3490 for sample in samplesFill:
3491 tempSample=[]
3492 for column in columns:
3493 tempSample.append(sample[column])
3494 addSample(tree2fill,tempSample)
3495
3496 def getValues(tree,listing):
3497 if tree._left is None:
3498 listing.append([tree.bounds(),tree._importance,tree._samples,tree._volume])
3499 else:
3500 getValues(tree._left,listing)
3501 getValues(tree._right,listing)
3502
3503 listLeaves = []
3504 getValues(tree2fill,listLeaves)
3505
3506 clSamples = []
3507 for cl in confidence_levels:
3508 clSamples.append(samplesFillLen*cl)
3509
3510 sortedLeavesList = sorted(listLeaves, key=lambda importance: importance[1])
3511 sortedLeavesList.reverse()
3512 runningTotalSamples = 0
3513 for i in range(len(sortedLeavesList)):
3514 runningTotalSamples += sortedLeavesList[i][2]
3515 sortedLeavesList[i].append(float(runningTotalSamples)/samplesFillLen,)
3516
3517
3518 level = 0
3519 countSamples = 0
3520 volume = 0
3521 lencl = len(clSamples)
3522 #finds confidence levels
3523 confidence_intervals={}
3524 interpConfAreas = {}
3525 countLeaves = 0
3526 for leaf in sortedLeavesList:
3527 countSamples += leaf[2]
3528 countLeaves += 1
3529 volume += leaf[3]
3530 if level < lencl and countSamples >= clSamples[level]:
3531 confidence_intervals[confidence_levels[level]]=(volume,float(countSamples)/samplesFillLen)
3532 interpConfAreas[confidence_levels[level]] = volume-leaf[3]*(countSamples-clSamples[level])/leaf[2]
3533 level +=1
3534
3535 if injCoords is not None:
3536 injBound,injNum,injImportance = tree2fill.search(injCoords)
3537 injInfo = [injBound,injNum,injImportance]
3538 else:
3539 injInfo = None
3540
3541
3542 #finds the confidence level of the injection and the volume of the associated contained region
3543 inj_confidence = None
3544 inj_confidence_area = None
3545 if injInfo is not None:
3546 acc_vol=0.
3547 acc_cl=0.
3548 for leaf in sortedLeavesList:
3549 acc_vol+=leaf[3]
3550 acc_cl+=leaf[2]
3551 if leaf[1] <= injImportance:
3552 inj_confidence = float(acc_cl)/samplesFillLen
3553 inj_confidence_area = acc_vol
3554 injInfo.append(inj_confidence)
3555 injInfo.append(inj_confidence_area)
3556 break
3557
3558 return sortedLeavesList, interpConfAreas, injInfo
3559 #check that there is an injection value for all dimension names
3560 def checkNone(listoParams):
3561 for param in listoParams:
3562 if posterior[param].injval is None:
3563 return False
3564 return True
3565
3566def greedy_bin_two_param(posterior,greedy2Params,confidence_levels):
3567 """
3568 Determine the 2-parameter Bayesian Confidence Intervals using a greedy
3569 binning algorithm.
3570
3571 @param posterior: an instance of the Posterior class.
3572
3573 @param greedy2Params: a dict - {param1Name:param1binSize,param2Name:param2binSize} .
3574
3575 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
3576 """
3577
3578 #Extract parameter names
3579 par1_name,par2_name=greedy2Params.keys()
3580
3581 #Set posterior array columns
3582 par1pos=posterior[par1_name.lower()].samples
3583 par2pos=posterior[par2_name.lower()].samples
3584
3585 #Extract bin sizes
3586 par1_bin=greedy2Params[par1_name]
3587 par2_bin=greedy2Params[par2_name]
3588
3589 #Extract injection information
3590 par1_injvalue=posterior[par1_name.lower()].injval
3591 par2_injvalue=posterior[par2_name.lower()].injval
3592
3593 #Create 2D bin array
3594 par1pos_min=min(par1pos)[0]
3595 par2pos_min=min(par2pos)[0]
3596
3597 par1pos_max=max(par1pos)[0]
3598 par2pos_max=max(par2pos)[0]
3599
3600 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
3601
3602 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
3603
3604 greedyHist = np.zeros(par1pos_Nbins*par2pos_Nbins,dtype='i8')
3605 greedyPoints = np.zeros((par1pos_Nbins*par2pos_Nbins,2))
3606
3607 #Fill bin values
3608 par1_point=par1pos_min
3609 par2_point=par2pos_min
3610 for i in range(par2pos_Nbins):
3611
3612 par1_point=par1pos_min
3613 for j in range(par1pos_Nbins):
3614
3615 greedyPoints[j+par1pos_Nbins*i,0]=par1_point
3616 greedyPoints[j+par1pos_Nbins*i,1]=par2_point
3617 par1_point+=par1_bin
3618 par2_point+=par2_bin
3619
3620
3621 #If injection point given find which bin its in...
3622 injbin=None
3623 if par1_injvalue is not None and par2_injvalue is not None:
3624
3625 par1_binNumber=int(floor((par1_injvalue-par1pos_min)/par1_bin))
3626 par2_binNumber=int(floor((par2_injvalue-par2pos_min)/par2_bin))
3627
3628 injbin=int(par1_binNumber+par2_binNumber*par1pos_Nbins)
3629 elif par1_injvalue is None and par2_injvalue is not None:
3630 print("Injection value not found for %s!"%par1_name)
3631
3632 elif par1_injvalue is not None and par2_injvalue is None:
3633 print("Injection value not found for %s!"%par2_name)
3634
3635 #Bin posterior samples
3636 for par1_samp,par2_samp in zip(par1pos,par2pos):
3637 par1_samp=par1_samp[0]
3638 par2_samp=par2_samp[0]
3639 par1_binNumber=int(floor((par1_samp-par1pos_min)/par1_bin))
3640 par2_binNumber=int(floor((par2_samp-par2pos_min)/par2_bin))
3641 try:
3642 greedyHist[par1_binNumber+par2_binNumber*par1pos_Nbins]+=1
3643 except:
3644 raise RuntimeError("Problem binning samples: %i,%i,%i,%i,%i,%f,%f,%f,%f,%f,%f .")%(par1_binNumber,par2_binNumber,par1pos_Nbins,par2pos_Nbins,par1_binNumber+par2_binNumber*par1pos_Nbins,par1_samp,par1pos_min,par1_bin,par1_samp,par2pos_min,par2_bin)
3645 #Call greedy bins routine
3646 toppoints,injection_cl,reses,injection_area=\
3647 _greedy_bin(
3648 greedyHist,
3649 greedyPoints,
3650 injbin,
3651 float(par1_bin*par2_bin),
3652 int(len(par1pos)),
3653 confidence_levels
3654 )
3655
3656 return toppoints,injection_cl,reses,injection_area
3657
3658def pol2cart(long,lat):
3659 """
3660 Utility function to convert longitude,latitude on a unit sphere to
3661 cartesian co-ordinates.
3662 """
3663
3664 x=np.cos(lat)*np.cos(long)
3665 y=np.cos(lat)*np.sin(long)
3666 z=np.sin(lat)
3667 return np.array([x,y,z])
3668#
3669
3670def sph2cart(r,theta,phi):
3671 """
3672 Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
3673 """
3674 x = r*np.sin(theta)*np.cos(phi)
3675 y = r*np.sin(theta)*np.sin(phi)
3676 z = r*np.cos(theta)
3677 return x,y,z
3678
3679
3680def cart2sph(x,y,z):
3681 """
3682 Utility function to convert cartesian coords to r,theta,phi.
3683 """
3684 r = np.sqrt(x*x + y*y + z*z)
3685 theta = np.arccos(z/r)
3686 phi = np.fmod(2*pi_constant + np.arctan2(y,x), 2*pi_constant)
3687
3688 return r,theta,phi
3689
3690
3691
3692def plot_sky_map(hpmap, outdir, inj=None, nest=True):
3693 """Plots a sky map from a healpix map, optionally including an
3694 injected position. This is a temporary map to display before
3695 ligo.skymap utility is used to generated a smoother one.
3696
3697 :param hpmap: An array representing a healpix map (in nested
3698 ordering if ``nest = True``).
3699
3700 :param outdir: The output directory.
3701
3702 :param inj: If not ``None``, then ``[ra, dec]`` of the injection
3703 associated with the posterior map.
3704
3705 :param nest: Flag indicating the pixel ordering in healpix.
3706
3707 """
3708
3709 fig = plt.figure(frameon=False, figsize=(8,6))
3710 hp.mollview(hpmap, nest=nest, min=0, max=np.max(hpmap), cmap='Greys', coord='E', fig=fig.number, title='Histogrammed skymap' )
3711 plt.grid(True,color='g',figure=fig)
3712
3713 if inj is not None:
3714 theta = np.pi/2.0 - inj[1]
3715 hp.projplot(theta, inj[0], '*', markerfacecolor='white', markeredgecolor='black', markersize=10)
3716
3717 plt.savefig(os.path.join(outdir, 'skymap.png'))
3718
3719 return fig
3720
3721def skymap_confidence_areas(hpmap, cls):
3722 """Returns the area (in square degrees) for each confidence level with
3723 a greedy binning algorithm for the given healpix map.
3724
3725 """
3726
3727 hpmap = hpmap / np.sum(hpmap) # Normalise to sum to one.
3728
3729 hpmap = np.sort(hpmap)[::-1] # Sort from largest to smallest
3730 cum_hpmap = np.cumsum(hpmap)
3731
3732 pixarea = hp.nside2pixarea(hp.npix2nside(hpmap.shape[0]))
3733 pixarea = pixarea*(180.0/np.pi)**2 # In square degrees
3734
3735 areas = []
3736 for cl in cls:
3737 npix = np.sum(cum_hpmap < cl) # How many pixels to sum before cl?
3738 areas.append(npix*pixarea)
3739
3740 return np.array(areas)
3741
3742def skymap_inj_pvalue(hpmap, inj, nest=True):
3743 """Returns the greedy p-value estimate for the given injection.
3744
3745 """
3746
3747 nside = hp.npix2nside(hpmap.shape[0])
3748 hpmap = hpmap / np.sum(hpmap) # Normalise to sum to one
3749
3750 injpix = hp.ang2pix(nside, np.pi/2.0-inj[1], inj[0], nest=nest)
3751 injvalue = hpmap[injpix]
3752
3753 return np.sum(hpmap[hpmap >= injvalue])
3754
3755#
3756
3757def mc2ms(mc,eta):
3758 """
3759 Utility function for converting mchirp,eta to component masses. The
3760 masses are defined so that m1>m2. The rvalue is a tuple (m1,m2).
3761 """
3762 root = np.sqrt(0.25-eta)
3763 fraction = (0.5+root) / (0.5-root)
3764 invfraction = 1/fraction
3765
3766 m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)
3767
3768 m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)
3769 return (m1,m2)
3770#
3771#
3772
3773def q2ms(mc,q):
3774 """
3775 Utility function for converting mchirp,q to component masses. The
3776 masses are defined so that m1>m2. The rvalue is a tuple (m1,m2).
3777 """
3778 factor = mc * np.power(1+q, 1.0/5.0);
3779 m1 = factor * np.power(q, -3.0/5.0);
3780 m2 = factor * np.power(q, 2.0/5.0);
3781 return (m1,m2)
3782#
3783#
3784
3785def q2eta(q):
3786 """
3787 Utility function for converting q to eta. The
3788 rvalue is eta.
3789 """
3790 eta = q/((1+q)*(1+q))
3791 return np.clip(eta,0,0.25) # Explicitly cap eta at 0.25, in case it exceeds this slightly due to floating-point issues
3792#
3793#
3794
3795def mc2q(mc,eta):
3796 """
3797 Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
3798 """
3799 m1,m2 = mc2ms(mc,eta)
3800 q = m2/m1
3801 return q
3802#
3803#
3804
3805def ang_dist(long1,lat1,long2,lat2):
3806 """
3807 Find the angular separation of (long1,lat1) and (long2,lat2), which are
3808 specified in radians.
3809 """
3810
3811 x1=np.cos(lat1)*np.cos(long1)
3812 y1=np.cos(lat1)*np.sin(long1)
3813 z1=np.sin(lat1)
3814 x2=np.cos(lat2)*np.cos(long2)
3815 y2=np.cos(lat2)*np.sin(long2)
3816 z2=np.sin(lat2)
3817 sep=np.acos(x1*x2+y1*y2+z1*z2)
3818 return(sep)
3819#
3820#
3821
3822def array_dot(vec1, vec2):
3823 """
3824 Calculate dot products between vectors in rows of numpy arrays.
3825 """
3826 if vec1.ndim==1:
3827 product = (vec1*vec2).sum()
3828 else:
3829 product = (vec1*vec2).sum(axis=1).reshape(-1,1)
3830 return product
3831#
3832#
3833
3834def array_ang_sep(vec1, vec2):
3835 """
3836 Find angles between vectors in rows of numpy arrays.
3837 """
3838 vec1_mag = np.sqrt(array_dot(vec1, vec1))
3839 vec2_mag = np.sqrt(array_dot(vec2, vec2))
3840 return np.arccos(array_dot(vec1, vec2)/(vec1_mag*vec2_mag))
3841#
3842#
3843
3844def array_polar_ang(vec):
3845 """
3846 Find polar angles of vectors in rows of a numpy array.
3847 """
3848 if vec.ndim==1:
3849 z = vec[2]
3850 else:
3851 z = vec[:,2].reshape(-1,1)
3852 norm = np.sqrt(array_dot(vec,vec))
3853 return np.arccos(z/norm)
3854#
3855#
3856
3857def rotation_matrix(angle, direction):
3858 """
3859 Compute general rotation matrices for a given angles and direction vectors.
3860 """
3861 cosa = np.cos(angle)
3862 sina = np.sin(angle)
3863 direction /= np.sqrt(array_dot(direction,direction))
3864 #Assume calculating array of rotation matrices.
3865 try:
3866 nSamps = len(angle)
3867 R = np.array( [np.diag([i,i,i]) for i in cosa.flat] )
3868 R += np.array( [np.outer(direction[i],direction[i])*(1.0-cosa[i]) for i in range(nSamps)] )
3869 R += np.array( [np.array( [[ 0.0, -direction[i,2], direction[i,1]],
3870 [ direction[i,2], 0.0, -direction[i,0]],
3871 [-direction[i,1], direction[i,0], 0.0 ]] ) * sina[i] for i in range(nSamps)] )
3872 #Only computing one rotation matrix.
3873 except TypeError:
3874 R = np.diag([cosa,cosa,cosa])
3875 R += np.outer(direction,direction) * (1.0 - cosa)
3876 R += np.array( [[ 0.0, -direction[2], direction[1]],
3877 [ direction[2], 0.0, -direction[0]],
3878 [-direction[1], direction[0], 0.0 ]] ) * sina
3879 return R
3880
3881
3882def ROTATEZ(angle, vx, vy, vz):
3883 # This is the ROTATEZ in LALSimInspiral.c.
3884 tmp1 = vx*np.cos(angle) - vy*np.sin(angle);
3885 tmp2 = vx*np.sin(angle) + vy*np.cos(angle);
3886 return np.asarray([tmp1,tmp2,vz])
3887
3888def ROTATEY(angle, vx, vy, vz):
3889 # This is the ROTATEY in LALSimInspiral.c
3890 tmp1 = vx*np.cos(angle) + vz*np.sin(angle);
3891 tmp2 = - vx*np.sin(angle) + vz*np.cos(angle);
3892 return np.asarray([tmp1,vy,tmp2])
3893
3894def orbital_momentum(fref, m1,m2, inclination):
3895 """
3896 Calculate orbital angular momentum vector.
3897 Note: The units of Lmag are different than what used in lalsimulation.
3898 Mc must be called in units of Msun here.
3899
3900 Note that if one wants to build J=L+S1+S2 with L returned by this function, S1 and S2
3901 must not get the Msun^2 factor.
3902 """
3903 eta = m1*m2/( (m1+m2)*(m1+m2) )
3904 Lmag = orbital_momentum_mag(fref, m1,m2,eta)
3905 Lx, Ly, Lz = sph2cart(Lmag, inclination, 0.0)
3906 return np.hstack((Lx,Ly,Lz))
3907#
3908#
3909def orbital_momentum_mag(fref, m1,m2,eta):
3910 v0 = np.power((m1+m2) *pi_constant * lal.MTSUN_SI * fref, 1.0/3.0)
3911 #1 PN Mtot*Mtot*eta/v
3912 PNFirst = (((m1+m2)**2)*eta)/v0
3913 PNSecond = 1+ (v0**2) * (3.0/2.0 +eta/6.0)
3914 Lmag= PNFirst*PNSecond
3915 return Lmag
3916
3917def component_momentum(m, a, theta, phi):
3918 """
3919 Calculate BH angular momentum vector.
3920 """
3921 Sx, Sy, Sz = sph2cart(m**2 * a, theta, phi)
3922 return np.hstack((Sx,Sy,Sz))
3923#
3924#
3925
3926def symm_tidal_params(lambda1,lambda2,q):
3927 """
3928 Calculate best tidal parameters [Eqs. (5) and (6) in Wade et al. PRD 89, 103012 (2014)]
3929 Requires q <= 1
3930 """
3931 lambdap = lambda1 + lambda2
3932 lambdam = lambda1 - lambda2
3933
3934 # Check that q <= 1, as expected
3935 if np.any(q > 1):
3936 raise ValueError("q > 1, while this function requires q <= 1.")
3937
3938 dmbym = (1. - q)/(1. + q) # Equivalent to sqrt(1 - 4*eta) for q <= 1
3939
3940 eta = q2eta(q)
3941
3942 lam_tilde = (8./13.)*((1.+7.*eta-31.*eta*eta)*lambdap + dmbym*(1.+9.*eta-11.*eta*eta)*lambdam)
3943 dlam_tilde = (1./2.)*(dmbym*(1.-13272.*eta/1319.+8944.*eta*eta/1319.)*lambdap + (1.-15910.*eta/1319.+32850.*eta*eta/1319.+3380.*eta*eta*eta/1319.)*lambdam)
3944 return lam_tilde, dlam_tilde
3945
3946def spin_angles(fref,mc,eta,incl,a1,theta1,phi1,a2=None,theta2=None,phi2=None):
3947 """
3948 Calculate physical spin angles.
3949 """
3950 singleSpin = None in (a2,theta2,phi2)
3951 m1, m2 = mc2ms(mc,eta)
3952 L = orbital_momentum(fref, m1,m2, incl)
3953 S1 = component_momentum(m1, a1, theta1, phi1)
3954 if not singleSpin:
3955 S2 = component_momentum(m2, a2, theta2, phi2)
3956 else:
3957 S2 = 0.0
3958 J = L + S1 + S2
3959 tilt1 = array_ang_sep(L,S1)
3960 if not singleSpin:
3961 tilt2 = array_ang_sep(L,S2)
3962 else:
3963 tilt2 = None
3964 theta_jn = array_polar_ang(J)
3965 beta = array_ang_sep(J,L)
3966 return tilt1, tilt2, theta_jn, beta
3967#
3968def chi_precessing(m1, a1, tilt1, m2, a2, tilt2):
3969 """
3970 Calculate the magnitude of the effective precessing spin
3971 following convention from Phys. Rev. D 91, 024043 -- arXiv:1408.1810
3972 note: the paper uses naming convention where m1 < m2
3973 (and similar for associated spin parameters) and q > 1
3974 """
3975 q_inv = m1/m2
3976 A1 = 2. + (3.*q_inv/2.)
3977 A2 = 2. + 3./(2.*q_inv)
3978 S1_perp = a1*np.sin(tilt1)*m1*m1
3979 S2_perp = a2*np.sin(tilt2)*m2*m2
3980 Sp = np.maximum(A1*S2_perp, A2*S1_perp)
3981 chi_p = Sp/(A2*m1*m1)
3982 return chi_p
3983
3984def calculate_redshift(distance,h=0.6790,om=0.3065,ol=0.6935,w0=-1.0):
3985 """
3986 Calculate the redshift from the luminosity distance measurement using the
3987 Cosmology Calculator provided in LAL.
3988 By default assuming cosmological parameters from arXiv:1502.01589 - 'Planck 2015 results. XIII. Cosmological parameters'
3989 Using parameters from table 4, column 'TT+lowP+lensing+ext'
3990 This corresponds to Omega_M = 0.3065, Omega_Lambda = 0.6935, H_0 = 67.90 km s^-1 Mpc^-1
3991 Returns an array of redshifts
3992 """
3993 def find_z_root(z,dl,omega):
3994 return dl - lal.LuminosityDistance(omega,z)
3995
3996 omega = lal.CreateCosmologicalParameters(h,om,ol,w0,0.0,0.0)
3997 if isinstance(distance,float):
3998 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (distance,omega))])
3999 else:
4000 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (d,omega)) for d in distance[:,0]])
4001 return z.reshape(z.shape[0],1)
4002
4003def source_mass(mass, redshift):
4004 """
4005 Calculate source mass parameter for mass m as:
4006 m_source = m / (1.0 + z)
4007 For a parameter m.
4008 """
4009 return mass / (1.0 + redshift)
4010
4011## Following functions added for testing Lorentz violations
4012def integrand_distance(redshift,nonGR_alpha):
4013 """
4014 Calculate D_alpha integral; multiplicative factor put later
4015 D_alpha = integral{ ((1+z')^(alpha-2))/sqrt(Omega_m*(1+z')^3 +Omega_lambda) dz'} # eq.15 of arxiv 1110.2720
4016 """
4017 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0) ## Planck 2015 values
4018 omega_m = omega.om # matter density
4019 omega_l = omega.ol # dark energy density
4020 #lal.DestroyCosmologicalParameters(omega)
4021 return (1.0+redshift)**(nonGR_alpha-2.0)/(np.sqrt(omega_m*(1.0+redshift)**3.0 + omega_l))
4022
4023def DistanceMeasure(redshift,nonGR_alpha):
4024 """
4025 D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha # from eq.15 of arxiv 1110.2720
4026 D_alpha calculated from integrand in above function
4027 """
4028 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0) ## Planck 2015 values
4029 H0 = omega.h*lal.H0FAC_SI ## Hubble constant in SI units
4030 dist = integrate.quad(integrand_distance, 0, redshift ,args=(nonGR_alpha))[0]
4031 dist *= (1.0 + redshift)**(1.0 - nonGR_alpha)
4032 dist /= H0
4033 #lal.DestroyCosmologicalParameters(omega)
4034 return dist*lal.C_SI ## returns D_alpha in metres
4035
4036def lambda_a(redshift, nonGR_alpha, lambda_eff, distance):
4037 """
4038 Converting from the effective wavelength-like parameter to lambda_A:
4039 lambda_A = lambda_{eff}*(D_alpha/D_L)^(1/(2-alpha))*(1/(1+z)^((1-alpha)/(2-alpha)))
4040 """
4041 Dfunc = np.vectorize(DistanceMeasure)
4042 D_alpha = Dfunc(redshift, nonGR_alpha)
4043 dl = distance*lal.PC_SI*1e6 ## luminosity distane in metres
4044 return lambda_eff*(D_alpha/(dl*(1.0+redshift)**(1.0-nonGR_alpha)))**(1./(2.0-nonGR_alpha))
4045
4046def amplitudeMeasure(redshift, nonGR_alpha, lambda_eff, distance):
4047 """
4048 Converting to Lorentz violating parameter "A" in dispersion relation from lambda_A:
4049 A = (lambda_A/h)^(alpha-2) # eqn. 13 of arxiv 1110.2720
4050 """
4051 hPlanck = 4.13567e-15 # Planck's constant in eV.s
4052 ampFunc = np.vectorize(lambda_a)
4053 lambdaA = ampFunc(redshift, nonGR_alpha, lambda_eff, distance)/lal.C_SI # convert to seconds
4054 return (lambdaA/hPlanck)**(nonGR_alpha-2.0)
4055############################ changes for testing Lorentz violations made till here
4056
4057def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref,phiref):
4058 """
4059 Wrapper function for SimInspiralTransformPrecessingNewInitialConditions().
4060 Vectorizes function for use in append_mapping() methods of the posterior class.
4061 """
4062 try:
4063 import lalsimulation as lalsim
4064 except ImportError:
4065 print('bayespputils.py: Cannot import lalsimulation SWIG bindings to calculate physical to radiation')
4066 print('frame angles, did you remember to use --enable-swig-python when ./configuring lalsimulation?')
4067 return None
4068 from numpy import shape
4069 transformFunc = lalsim.SimInspiralTransformPrecessingNewInitialConditions
4070
4071 # Convert component masses to SI units
4072 m1_SI = m1*lal.MSUN_SI
4073 m2_SI = m2*lal.MSUN_SI
4074
4075 # Flatten arrays
4076 ins = [theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref]
4077 if len(shape(ins))>1:
4078 # ins is a list of lists (i.e. we are converting full posterior chains)
4079 try:
4080 for p,param in enumerate(ins):
4081 ins[p] = param.flatten()
4082 except:
4083 pass
4084
4085 try:
4086 results = np.array([transformFunc(t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir) for (t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir) in zip(*ins)])
4087 iota = results[:,0].reshape(-1,1)
4088 spin1x = results[:,1].reshape(-1,1)
4089 spin1y = results[:,2].reshape(-1,1)
4090 spin1z = results[:,3].reshape(-1,1)
4091 spin2x = results[:,4].reshape(-1,1)
4092 spin2y = results[:,5].reshape(-1,1)
4093 spin2z = results[:,6].reshape(-1,1)
4094 a1,theta1,phi1 = cart2sph(spin1x,spin1y,spin1z)
4095 a2,theta2,phi2 = cart2sph(spin2x,spin2y,spin2z)
4096
4097 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4098 L = orbital_momentum(fref, m1,m2, iota)
4099 S1 = np.hstack([m1*m1*spin1x,m1*m1*spin1y,m1*m1*spin1z])
4100 S2 = np.hstack([m2*m2*spin2x,m2*m2*spin2y,m2*m2*spin2z])
4101 J = L + S1 + S2
4102 beta = array_ang_sep(J,L)
4103
4104 return iota, theta1, phi1, theta2, phi2, beta
4105 except: # Catch all exceptions, including failure for the transformFunc
4106 # Something went wrong, returning None
4107 return None
4108
4109 elif len(shape(ins))<=1:
4110 # ins is a list of floats (i.e. we are converting the injected values) or empty
4111 try:
4112 for p,param in enumerate(ins):
4113 ins[p] = param
4114 except:
4115 pass
4116
4117 try:
4118 results = np.array(transformFunc(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref))
4119 iota = results[0]
4120 spin1x = results[1]
4121 spin1y = results[2]
4122 spin1z = results[3]
4123 spin2x = results[4]
4124 spin2y = results[5]
4125 spin2z = results[6]
4126 a1,theta1,phi1 = cart2sph(spin1x,spin1y,spin1z)
4127 a2,theta2,phi2 = cart2sph(spin2x,spin2y,spin2z)
4128
4129 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4130 L = orbital_momentum(fref, m1,m2, iota)
4131 S1 = m1*m1*np.hstack([spin1x,spin1y,spin1z])
4132 S2 = m2*m2*np.hstack([spin2x,spin2y,spin2z])
4133 J = L + S1 + S2
4134 beta = array_ang_sep(J,L)
4135
4136 return iota, theta1, phi1, theta2, phi2, beta
4137
4138 except: # Catch all exceptions, including failure for the transformFunc
4139 # Something went wrong, returning None
4140 return None
4141#
4142#
4143
4144def plot_one_param_pdf_kde(fig,onedpos):
4145
4146 from scipy import seterr as sp_seterr
4147
4148 np.seterr(under='ignore')
4149 sp_seterr(under='ignore')
4150 pos_samps=onedpos.samples
4151 try:
4152 gkde=onedpos.gaussian_kde
4153 except np.linalg.linalg.LinAlgError:
4154 print('Singular matrix in KDE. Skipping')
4155 else:
4156 ind=np.linspace(np.min(pos_samps),np.max(pos_samps),101)
4157 kdepdf=gkde.evaluate(ind)
4158 plt.plot(ind,kdepdf,color='green')
4159 return
4160
4161
4162def plot_one_param_pdf(posterior,plot1DParams,analyticPDF=None,analyticCDF=None,plotkde=False):
4163 """
4164 Plots a 1D histogram and (gaussian) kernel density estimate of the
4165 distribution of posterior samples for a given parameter.
4166
4167 @param posterior: an instance of the Posterior class.
4168
4169 @param plot1DParams: a dict; {paramName:Nbins}
4170
4171 @param analyticPDF: an analytic probability distribution function describing the distribution.
4172
4173 @param analyticCDF: an analytic cumulative distribution function describing the distribution.
4174
4175 @param plotkde: Use KDE to smooth plot (default: False)
4176 """
4177
4178 matplotlib.rcParams['text.usetex']=False
4179
4180 param=list(plot1DParams.keys())[0].lower()
4181 histbins=list(plot1DParams.values())[0]
4182
4183 pos_samps=posterior[param].samples
4184 injpar=posterior[param].injval
4185 trigvals=posterior[param].trigvals
4186
4187 #myfig=plt.figure(figsize=(4,3.5),dpi=200)
4188 myfig=plt.figure(figsize=(6,4.5),dpi=150)
4189 #axes=plt.Axes(myfig,[0.2, 0.2, 0.7,0.7])
4190 axes=plt.Axes(myfig,[0.15,0.15,0.6,0.76])
4191 myfig.add_axes(axes)
4192 majorFormatterX=ScalarFormatter(useMathText=True)
4193 majorFormatterX.format_data=lambda data:'%.6g'%(data)
4194 majorFormatterY=ScalarFormatter(useMathText=True)
4195 majorFormatterY.format_data=lambda data:'%.6g'%(data)
4196 majorFormatterX.set_scientific(True)
4197 majorFormatterY.set_scientific(True)
4198 offset=0.0
4199 if param.find('time')!=-1:
4200 offset=floor(min(pos_samps))
4201 pos_samps=pos_samps-offset
4202 if injpar is not None:
4203 injpar=injpar-offset
4204 ax1_name=param+' + %i'%(int(offset))
4205 else: ax1_name=param
4206
4207 (n, bins, patches)=plt.hist(pos_samps,histbins,density=True,facecolor='grey')
4208 Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4209 if Nchars>8:
4210 Nticks=3
4211 elif Nchars>5:
4212 Nticks=4
4213 elif Nchars>4:
4214 Nticks=6
4215 else:
4216 Nticks=6
4217 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks)
4218 xmin,xmax=plt.xlim()
4219 if param=='rightascension' or param=='ra':
4220 locatorX=RALocator(min=xmin,max=xmax)
4221 majorFormatterX=RAFormatter()
4222 if param=='declination' or param=='dec':
4223 locatorX=DecLocator(min=xmin,max=xmax)
4224 majorFormatterX=DecFormatter()
4225 axes.xaxis.set_major_formatter(majorFormatterX)
4226 axes.yaxis.set_major_formatter(majorFormatterY)
4227
4228 locatorX.view_limits(bins[0],bins[-1])
4229 axes.xaxis.set_major_locator(locatorX)
4230 if plotkde: plot_one_param_pdf_kde(myfig,posterior[param])
4231 histbinSize=bins[1]-bins[0]
4232 if analyticPDF:
4233 (xmin,xmax)=plt.xlim()
4234 x = np.linspace(xmin,xmax,2*len(bins))
4235 plt.plot(x, analyticPDF(x+offset), color='r', linewidth=2, linestyle='dashed')
4236 if analyticCDF:
4237 # KS underflows with too many samples
4238 max_samps=1000
4239 from numpy.random import permutation
4240 samps = permutation(pos_samps)[:max_samps,:].flatten()
4241 D,p = stats.kstest(samps+offset, analyticCDF, mode='asymp')
4242 plt.title("%s: ks p-value %.3f"%(param,p))
4243
4244 rbins=None
4245
4246 if injpar is not None:
4247 # We will plot the injection if it is <5% outside the posterior
4248 delta_samps=max(pos_samps)-min(pos_samps)
4249 minrange=min(pos_samps)-0.05*delta_samps
4250 maxrange=max(pos_samps)+0.05*delta_samps
4251 if minrange<injpar and maxrange>injpar:
4252
4253 plt.axvline(injpar, color='r', linestyle='-.', linewidth=4)
4254
4255 #rkde=gkde.integrate_box_1d(min(pos[:,i]),getinjpar(injection,i))
4256 #print "r of injected value of %s (kde) = %f"%(param,rkde)
4257
4258 #Find which bin the true value is in
4259 if min(pos_samps)<injpar and max(pos_samps)>injpar:
4260 bins_to_inj=(injpar-bins[0])/histbinSize
4261 injbinh=int(floor(bins_to_inj))
4262 injbin_frac=bins_to_inj-float(injbinh)
4263
4264 #Integrate over the bins
4265 rbins=(sum(n[0:injbinh-1])+injbin_frac*n[injbinh])*histbinSize
4266
4267 if trigvals is not None:
4268 for IFO in [IFO for IFO in trigvals.keys()]:
4269 trigval = trigvals[IFO]
4270 if min(pos_samps)<trigval and max(pos_samps)>trigval:
4271 if IFO=='H1': color = 'r'
4272 elif IFO=='L1': color = 'g'
4273 elif IFO=='V1': color = 'm'
4274 else: color = 'c'
4275 plt.axvline(trigval, color=color, linestyle='-.')
4276 #
4277 plt.grid()
4278 plt.xlabel(plot_label(ax1_name))
4279 plt.ylabel('Probability Density')
4280
4281 # For RA and dec set custom labels and for RA reverse
4282 if(param.lower()=='ra' or param.lower()=='rightascension'):
4283 xmin,xmax=plt.xlim()
4284 plt.xlim(xmax,xmin)
4285 #if(param.lower()=='ra' or param.lower()=='rightascension'):
4286 # locs, ticks = plt.xticks()
4287 # newlocs, newticks = formatRATicks(locs)
4288 # plt.xticks(newlocs,newticks,rotation=45)
4289 #if(param.lower()=='dec' or param.lower()=='declination'):
4290 # locs, ticks = plt.xticks()
4291 # newlocs, newticks = formatDecTicks(locs)
4292 # plt.xticks(newlocs,newticks,rotation=45)
4293
4294 return rbins,myfig#,rkde
4295#
4296
4297class RALocator(matplotlib.ticker.MultipleLocator):
4298 """
4299 RA tick locations with some intelligence
4300 """
4301 def __init__(self,min=0.0,max=2.0*pi_constant):
4302 hour=pi_constant/12.0
4303 if(max-min)>12.0*hour:
4304 base=3.0*hour
4305 elif(max-min)>6.0*hour:
4306 base=2.0*hour
4307 # Put hour ticks if there are more than 3 hours displayed
4308 elif (max-min)>3.0*pi_constant/12.0:
4309 base=hour
4310 elif (max-min)>hour:
4311 base=hour/2.0
4312 else:
4313 base=hour/4.0
4314
4315 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4316
4317class DecLocator(matplotlib.ticker.MultipleLocator):
4318 """
4319 Dec tick locations with some intelligence
4320 """
4321 def __init__(self, min=-pi_constant/2.0,max=pi_constant/2.0):
4322 deg=pi_constant/180.0
4323 if (max-min)>60*deg:
4324 base=30.0*deg
4325 elif (max-min)>20*deg:
4326 base=10*deg
4327 elif (max-min)>10*deg:
4328 base=5*deg
4329 else:
4330 base=deg
4331 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4332
4333class RAFormatter(matplotlib.ticker.FuncFormatter):
4334 def __init__(self,accuracy='auto'):
4335 matplotlib.ticker.FuncFormatter.__init__(self,getRAString)
4336
4337class DecFormatter(matplotlib.ticker.FuncFormatter):
4338 def __init__(self,accuracy='auto'):
4339 matplotlib.ticker.FuncFormatter.__init__(self,getDecString)
4340
4341
4342
4343def getRAString(radians,accuracy='auto'):
4344 secs=radians*12.0*3600/pi_constant
4345 hours, rem = divmod(secs, 3600 )
4346 mins,rem = divmod(rem, 60 )
4347 secs = rem
4348 if secs>=59.5:
4349 secs=secs-60
4350 mins=mins+1
4351 if mins>=59.5:
4352 mins=mins-60
4353 hours=hours+1
4354 if accuracy=='hour': return r'%ih'%(hours)
4355 if accuracy=='min': return r'%ih%im'%(hours,mins)
4356 if accuracy=='sec': return r'%ih%im%2.0fs'%(hours,mins,secs)
4357 else:
4358 if abs(fmod(secs,60.0))>=0.5: return(getRAString(radians,accuracy='sec'))
4359 if abs(fmod(mins,60.0))>=0.5: return(getRAString(radians,accuracy='min'))
4360 else: return(getRAString(radians,accuracy='hour'))
4361
4362def getDecString(radians,accuracy='auto'):
4363 # LaTeX doesn't like unicode degree symbols etc
4364 if matplotlib.rcParams['text.usetex']:
4365 degsymb='$^\\circ$'
4366 minsymb="'"
4367 secsymb="''"
4368 else:
4369 degsymb=chr(0x0B0)
4370 minsymb=chr(0x027)
4371 secsymb=chr(0x2033)
4372 if(radians<0):
4373 radians=-radians
4374 sign=-1
4375 else: sign=+1
4376 deg,rem=divmod(radians,pi_constant/180.0)
4377 mins, rem = divmod(rem, pi_constant/(180.0*60.0))
4378 secs = rem * (180.0*3600.0)/pi_constant
4379 #if secs>=59.5:
4380 # secs=secs-60.0
4381 # mins=mins+1
4382 #if mins>=59.5:
4383 # mins=mins-60.0
4384 # deg=deg+1
4385 if (accuracy=='arcmin' or accuracy=='deg') and secs>30: mins=mins+1
4386 if accuracy=='deg' and mins>30: deg=deg+1
4387 if accuracy=='deg': return '%i'%(sign*deg)+degsymb
4388 if accuracy=='arcmin': return '%i%s%i%s'%(sign*deg,degsymb,mins,minsymb)
4389 if accuracy=='arcsec': return '%i%s%i%s%2.0f%s'%(sign*deg,degsymb,mins,minsymb,secs,secsymb)
4390 else:
4391 # if abs(fmod(secs,60.0))>=0.5 and abs(fmod(secs,60)-60)>=0.5 : return(getDecString(sign*radians,accuracy='arcsec'))
4392 # if abs(fmod(mins,60.0))>=0.5 and abs(fmod(mins,60)-60)>=0.5: return(getDecString(sign*radians,accuracy='arcmin'))
4393 # else: return(getDecString(sign*radians,accuracy='deg'))
4394 return(getDecString(sign*radians,accuracy='deg'))
4395
4396def plot_corner(posterior,levels,parnames=None):
4397 """
4398 Make a corner plot using the triangle module
4399 (See http://github.com/dfm/corner.py)
4400 @param posterior: The Posterior object
4401 @param levels: a list of confidence levels
4402 @param parnames: list of parameters to include
4403 """
4404 try:
4405 import corner
4406 except ImportError:
4407 try:
4408 import triangle as corner
4409 except ImportError:
4410 print('Cannot load corner module. Try running\n\t$ pip install corner')
4411 return None
4412 parnames=list(filter(lambda x: x in posterior.names, parnames))
4413 labels = [plot_label(parname) for parname in parnames]
4414 data = np.hstack([posterior[p].samples for p in parnames])
4415 if posterior.injection:
4416 injvals=[posterior[p].injval for p in parnames]
4417 myfig=corner.corner(data,labels=labels,truths=injvals,quantiles=levels,plot_datapoints=False,bins=20)
4418 else:
4419 myfig=corner.corner(data,labels=labels,quantiles=levels,plot_datapoints=False,bins=20)
4420 return(myfig)
4421
4422
4423def plot_two_param_kde_greedy_levels(posteriors_by_name,plot2DkdeParams,levels,colors_by_name,line_styles=__default_line_styles,figsize=(4,3),dpi=250,figposition=[0.2,0.2,0.48,0.75],legend='right',hatches_by_name=None,Npixels=50):
4424 """
4425 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4426
4427 @param posteriors_by_name: dictionary of Posterior objects, indexed by name
4428
4429 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4430
4431 @param levels: list of credible levels
4432
4433 @param colors_by_name: dict of colors, indexed by name
4434
4435 @param line_styles: list of line styles for the credible levels
4436
4437 @param figsize: figsize to pass to matplotlib
4438
4439 @param dpi: dpi to pass to matplotlib
4440
4441 @param figposition: figposition to pass to matplotlib
4442
4443 @param legend: position of legend
4444
4445 @param hatches_by_name: dict of hatch styles indexed by name
4446
4447 @param Npixels: Number of pixels in each axis of the 2D grid
4448 """
4449
4450 from scipy import seterr as sp_seterr
4451 confidence_levels=levels
4452
4453 # Reversed parameter order here for consistency with the other
4454 # plotting functions
4455 par2_name,par1_name=plot2DkdeParams.keys()
4456 xbin=plot2DkdeParams[par1_name]
4457 ybin=plot2DkdeParams[par2_name]
4458 levels= levels
4459 np.seterr(under='ignore')
4460 sp_seterr(under='ignore')
4461
4462 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4463 plt.clf()
4464 axes=fig.add_axes(figposition)
4465 name_list=[]
4466
4467 #This fixes the precedence of line styles in the plot
4468 if len(line_styles)<len(levels):
4469 raise RuntimeError("Error: Need as many or more line styles to choose from as confidence levels to plot!")
4470
4471 CSlst=[]
4472 for name,posterior in posteriors_by_name.items():
4473 print('Plotting '+name)
4474 name_list.append(name)
4475 par1_injvalue=posterior[par1_name].injval
4476 par2_injvalue=posterior[par2_name].injval
4477
4478 par_trigvalues1=posterior[par1_name].trigvals
4479 par_trigvalues2=posterior[par2_name].trigvals
4480 xdat=posterior[par1_name].samples
4481 ydat=posterior[par2_name].samples
4482 a=np.squeeze(posterior[par1_name].samples)
4483 b=np.squeeze(posterior[par2_name].samples)
4484 offset=0.0
4485 if par1_name.find('time')!=-1:
4486 offset=floor(min(a))
4487 a=a-offset
4488 if par1_injvalue:
4489 par1_injvalue=par1_injvalue-offset
4490 ax1_name=par1_name+' + %i'%(int(offset))
4491 else: ax1_name=par1_name
4492
4493 if par2_name.find('time')!=-1:
4494 offset=floor(min(b))
4495 b=b-offset
4496 if par2_injvalue:
4497 par2_injvalue=par2_injvalue-offset
4498 ax2_name=par2_name+' + %i'%(int(offset))
4499 else: ax2_name=par2_name
4500
4501 samp=np.transpose(np.column_stack((xdat,ydat)))
4502
4503 try:
4504 kde=stats.kde.gaussian_kde(samp)
4505 den=kde(samp)
4506 except:
4507 return None
4508
4509 #grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
4510 Nx=Npixels
4511 Ny=Npixels
4512 # Ugly hack to produce plots centred on the main mode:
4513 # Choose 1%-99% percentile range of the samples
4514 # Then zoom out by 0.95 to encompass the contour edges
4515 xsorted=np.sort(xdat,axis=None)
4516 ysorted=np.sort(ydat,axis=None)
4517 imin=int(0.01*len(xsorted))
4518 imax=int(0.99*len(xsorted))
4519 xmax=xsorted[imax]
4520 xmin=xsorted[imin]
4521 ymax=ysorted[imax]
4522 ymin=ysorted[imin]
4523 xmid=0.5*(xmin+xmax)
4524 ymid=0.5*(ymin+ymax)
4525 xmin=xmid+(xmin-xmid)/0.95
4526 xmax=xmid+(xmax-xmid)/0.95
4527 ymin=ymid+(ymin-ymid)/0.95
4528 ymax=ymid+(ymax-ymid)/0.95
4529 xax=np.linspace(xmin,xmax,Nx)
4530 yax=np.linspace(ymin,ymax,Ny)
4531 #print 'Plot limits %f-%f x %f-%f'%(xmin,xmax,ymin,ymax)
4532
4533 # Original way which includes all samples
4534 #xax = np.linspace(np.min(xdat),np.max(xdat),Nx)
4535 #yax = np.linspace(np.min(ydat),np.max(ydat),Ny)
4536
4537 # Do the actual plotting
4538 x,y = np.meshgrid(xax,yax)
4539 grid_coords = np.vstack( (x.flatten(),y.flatten()) )
4540 z = kde(grid_coords)
4541 z = z.reshape(Nx,Ny)
4542 densort=np.sort(den)[::-1]
4543 values=[]
4544 Npts=xdat.shape[0]
4545 zvalues=[]
4546 for level in levels:
4547 ilevel = int(Npts*level + 0.5)
4548 if ilevel >= Npts:
4549 ilevel = Npts-1
4550 zvalues.append(densort[ilevel])
4551 CS=plt.contour(x, y, z, zvalues,colors=[colors_by_name[name]],linestyles=line_styles )
4552 CSlst.append(CS)
4553
4554 if par1_injvalue is not None and par2_injvalue is not None:
4555 plt.plot([par1_injvalue],[par2_injvalue],'b*',scalex=False,scaley=False,markersize=12)
4556
4557 if par_trigvalues1 is not None and par_trigvalues2 is not None:
4558 par1IFOs = set([IFO for IFO in par_trigvalues1.keys()])
4559 par2IFOs = set([IFO for IFO in par_trigvalues2.keys()])
4560 IFOs = par1IFOs.intersection(par2IFOs)
4561 for IFO in IFOs:
4562 if IFO=='H1': color = 'r'
4563 elif IFO=='L1': color = 'g'
4564 elif IFO=='V1': color = 'm'
4565 else: color = 'c'
4566 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker='o',scalex=False,scaley=False)
4567
4568 plt.xlabel(plot_label(par1_name))
4569 plt.ylabel(plot_label(par2_name))
4570 plt.grid()
4571
4572 if len(name_list)!=len(CSlst):
4573 raise RuntimeError("Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4574
4575 full_name_list=[]
4576 dummy_lines=[]
4577 for plot_name in name_list:
4578 full_name_list.append(plot_name)
4579 if len(confidence_levels)>1:
4580 for ls_,cl in zip(line_styles[0:len(confidence_levels)],confidence_levels):
4581 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),ls=ls_,color='k'))
4582 full_name_list.append('%s%%'%str(int(cl*100)))
4583
4584 fig_actor_lst = [cs.collections[0] for cs in CSlst]
4585 fig_actor_lst.extend(dummy_lines)
4586 if legend is not None:
4587 try:
4588 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right',framealpha=0.1)
4589 except:
4590 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right')
4591 for text in twodcontour_legend.get_texts():
4592 text.set_fontsize('small')
4593
4594 majorFormatterX=ScalarFormatter(useMathText=True)
4595 majorFormatterX.format_data=lambda data:'%.4g'%(data)
4596 majorFormatterY=ScalarFormatter(useMathText=True)
4597 majorFormatterY.format_data=lambda data:'%.4g'%(data)
4598 majorFormatterX.set_scientific(True)
4599 majorFormatterY.set_scientific(True)
4600 axes.xaxis.set_major_formatter(majorFormatterX)
4601 axes.yaxis.set_major_formatter(majorFormatterY)
4602 Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4603 if Nchars>8:
4604 Nticks=3
4605 elif Nchars>5:
4606 Nticks=4
4607 elif Nchars>4:
4608 Nticks=5
4609 else:
4610 Nticks=6
4611 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4612 if par1_name=='rightascension' or par1_name=='ra':
4613 (ramin,ramax)=plt.xlim()
4614 locatorX=RALocator(min=ramin,max=ramax)
4615 majorFormatterX=RAFormatter()
4616 if par1_name=='declination' or par1_name=='dec':
4617 (decmin,decmax)=plt.xlim()
4618 locatorX=DecLocator(min=decmin,max=decmax)
4619 majorFormatterX=DecFormatter()
4620 axes.xaxis.set_major_formatter(majorFormatterX)
4621 if par2_name=='rightascension' or par2_name=='ra':
4622 (ramin,ramax)=plt.ylim()
4623 locatorY=RALocator(ramin,ramax)
4624 axes.yaxis.set_major_locator(locatorY)
4625 majorFormatterY=RAFormatter()
4626 if par2_name=='declination' or par2_name=='dec':
4627 (decmin,decmax)=plt.ylim()
4628 locatorY=DecLocator(min=decmin,max=decmax)
4629 majorFormatterY=DecFormatter()
4630 axes.yaxis.set_major_locator(locatorY)
4631
4632 axes.yaxis.set_major_formatter(majorFormatterY)
4633 #locatorX.view_limits(bins[0],bins[-1])
4634 axes.xaxis.set_major_locator(locatorX)
4635
4636
4637 if(par1_name.lower()=='ra' or par1_name.lower()=='rightascension'):
4638 xmin,xmax=plt.xlim()
4639 if(xmin<0.0): xmin=0.0
4640 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4641 plt.xlim(xmax,xmin)
4642
4643 return fig
4644
4645def plot_two_param_kde(posterior,plot2DkdeParams):
4646 """
4647 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4648
4649 @param posterior: an instance of the Posterior class.
4650
4651 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4652 """
4653
4654 from scipy import seterr as sp_seterr
4655
4656 par1_name,par2_name=plot2DkdeParams.keys()
4657 Nx=plot2DkdeParams[par1_name]
4658 Ny=plot2DkdeParams[par2_name]
4659
4660 xdat=posterior[par1_name].samples
4661 ydat=posterior[par2_name].samples
4662
4663 par_injvalue1=posterior[par1_name].injval
4664 par_injvalue2=posterior[par2_name].injval
4665
4666 par_trigvalues1=posterior[par1_name].trigvals
4667 par_trigvalues2=posterior[par2_name].trigvals
4668
4669 np.seterr(under='ignore')
4670 sp_seterr(under='ignore')
4671
4672 myfig=plt.figure(1,figsize=(6,4),dpi=200)
4673 myfig.add_axes(plt.Axes(myfig,[0.20,0.20,0.75,0.7]))
4674 plt.clf()
4675
4676 xax=np.linspace(min(xdat),max(xdat),Nx)
4677 yax=np.linspace(min(ydat),max(ydat),Ny)
4678 x,y=np.meshgrid(xax,yax)
4679
4680 samp=np.transpose(np.column_stack((xdat,ydat)))
4681
4682 kde=stats.kde.gaussian_kde(samp)
4683
4684 grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
4685
4686 z = kde(grid_coords.T)
4687 z = z.reshape(Nx,Ny)
4688
4689
4690 asp=xax.ptp()/yax.ptp()
4691# if(asp<0.8 or asp > 1.6): asp=1.4
4692 plt.imshow(z,extent=(xax[0],xax[-1],yax[0],yax[-1]),aspect=asp,origin='lower')
4693 plt.colorbar()
4694
4695 if par_injvalue1 is not None and par_injvalue2 is not None:
4696 plt.plot([par_injvalue1],[par_injvalue2],'bo',scalex=False,scaley=False)
4697
4698 if par_trigvalues1 is not None and par_trigvalues2 is not None:
4699 par1IFOs = set([IFO for IFO in par_trigvalues1.keys()])
4700 par2IFOs = set([IFO for IFO in par_trigvalues2.keys()])
4701 IFOs = par1IFOs.intersection(par2IFOs)
4702 for IFO in IFOs:
4703 if IFO=='H1': color = 'r'
4704 elif IFO=='L1': color = 'g'
4705 elif IFO=='V1': color = 'm'
4706 else: color = 'c'
4707 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker='o',scalex=False,scaley=False)
4708
4709 plt.xlabel(plot_label(par1_name))
4710 plt.ylabel(plot_label(par2_name))
4711 plt.grid()
4712
4713 # For RA and dec set custom labels and for RA reverse
4714 if(par1_name.lower()=='ra' or par1_name.lower()=='rightascension'):
4715 xmin,xmax=plt.xlim()
4716 plt.xlim(xmax,xmin)
4717
4718
4719 return myfig
4720
4721def get_inj_by_time(injections,time):
4722 """
4723 Filter injections to find the injection with end time given by time +/- 0.1s
4724 """
4725 import itertools
4726 injection = itertools.ifilter(lambda a: abs(float(get_end(a)) - time) < 0.1, injections).next()
4727 return injection
4728
4729def histogram2D(posterior,greedy2Params,confidence_levels):
4730 """
4731 Returns a 2D histogram and edges for the two parameters passed in greedy2Params, plus the actual discrete confidence levels
4732 imposed by the finite number of samples.
4733 H,xedges,yedges,Hlasts = histogram2D(posterior,greedy2Params,confidence_levels)
4734 @param posterior: Posterior instance
4735 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4736 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4737 """
4738
4739 par1_name,par2_name=greedy2Params.keys()
4740 par1_bin=greedy2Params[par1_name]
4741 par2_bin=greedy2Params[par2_name]
4742 par1_injvalue=posterior[par1_name.lower()].injval
4743 par2_injvalue=posterior[par2_name.lower()].injval
4744 a=np.squeeze(posterior[par1_name].samples)
4745 b=np.squeeze(posterior[par2_name].samples)
4746 par1pos_min=a.min()
4747 par2pos_min=b.min()
4748 par1pos_max=a.max()
4749 par2pos_max=b.max()
4750 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4751 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4752 H, xedges, yedges = np.histogram2d(a,b, bins=(par1pos_Nbins, par2pos_Nbins),density=True)
4753 temp=np.copy(H)
4754 temp=temp.ravel()
4755 confidence_levels.sort()
4756 Hsum=0
4757 Hlasts=[]
4758 idxes=np.argsort(temp)
4759 j=len(idxes)-1
4760 for cl in confidence_levels:
4761 while float(Hsum/np.sum(H))<cl:
4762 #ind = np.argsort(temp)
4763 max_i=idxes[j]
4764 j-=1
4765 val = temp[max_i]
4766 Hlast=val
4767 Hsum+=val
4768 temp[max_i]=0
4769 Hlasts.append(Hlast)
4770 return (H,xedges,yedges,Hlasts)
4771
4772def plot_two_param_greedy_bins_contourf(posteriors_by_name,greedy2Params,confidence_levels,colors_by_name,figsize=(7,6),dpi=120,figposition=[0.3,0.3,0.5,0.5],legend='right',hatches_by_name=None):
4773 """
4774 @param posteriors_by_name A dictionary of posterior objects indexed by name
4775 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4776 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4777 @param colors_by_name: dict of colors, indexed by name
4778 @param figsize: figsize to pass to matplotlib
4779 @param dpi: dpi to pass to matplotlib
4780 @param figposition: figposition to pass to matplotlib
4781 @param legend: Legend position to pass to matplotlib
4782 @param hatches_by_name: dict of hatch styles, indexed by name
4783 """
4784 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4785 plt.clf()
4786 fig.add_axes(figposition)
4787 CSlst=[]
4788 name_list=[]
4789 par1_name,par2_name=greedy2Params.keys()
4790 for name,posterior in posteriors_by_name.items():
4791 name_list.append(name)
4792 H,xedges,yedges,Hlasts=histogram2D(posterior,greedy2Params,confidence_levels+[0.99999999999999])
4793 extent= [xedges[0], yedges[-1], xedges[-1], xedges[0]]
4794 CS2=plt.contourf(yedges[:-1],xedges[:-1],H,Hlasts,extend='max',colors=[colors_by_name[name]] ,alpha=0.3 )
4795 CS=plt.contour(yedges[:-1],xedges[:-1],H,Hlasts,extend='max',colors=[colors_by_name[name]] )
4796 CSlst.append(CS)
4797
4798 plt.title("%s-%s confidence contours (greedy binning)"%(par1_name,par2_name)) # add a title
4799 plt.xlabel(plot_label(par2_name))
4800 plt.ylabel(plot_label(par1_name))
4801 if len(name_list)!=len(CSlst):
4802 raise RuntimeError("Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4803 full_name_list=[]
4804 dummy_lines=[]
4805 for plot_name in name_list:
4806 full_name_list.append(plot_name)
4807 if len(confidence_levels)>1:
4808 for cl in confidence_levels+[1]:
4809 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),color='k'))
4810 full_name_list.append('%s%%'%str(int(cl*100)))
4811 fig_actor_lst = [cs.collections[0] for cs in CSlst]
4812 fig_actor_lst.extend(dummy_lines)
4813 if legend is not None:
4814 try:
4815 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right',framealpha=0.1)
4816 except:
4817 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right')
4818 for text in twodcontour_legend.get_texts():
4819 text.set_fontsize('small')
4820
4821 return fig
4822
4823
4824
4825def plot_two_param_greedy_bins_hist(posterior,greedy2Params,confidence_levels):
4826 """
4827 Histograms of the ranked pixels produced by the 2-parameter greedy
4828 binning algorithm colured by their confidence level.
4829
4830 @param posterior: an instance of the Posterior class.
4831
4832 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4833
4834 @param confidence_levels: list of confidence levels to show
4835 """
4836
4837 from scipy import seterr as sp_seterr
4838
4839 np.seterr(under='ignore')
4840 sp_seterr(under='ignore')
4841
4842 #Extract parameter names
4843 par1_name,par2_name=greedy2Params.keys()
4844 #Extract bin sizes
4845 par1_bin=greedy2Params[par1_name]
4846 par2_bin=greedy2Params[par2_name]
4847
4848 a=np.squeeze(posterior[par1_name].samples)
4849 b=np.squeeze(posterior[par2_name].samples)
4850
4851 #Extract injection information
4852 par1_injvalue=posterior[par1_name.lower()].injval
4853 par2_injvalue=posterior[par2_name.lower()].injval
4854
4855 #Create 2D bin array
4856 par1pos_min=a.min()
4857 par2pos_min=b.min()
4858
4859 par1pos_max=a.max()
4860 par2pos_max=b.max()
4861
4862 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4863 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4864
4865 # Adjust for time parameter
4866 if par1_name.find('time')!=-1:
4867 offset=floor(min(a))
4868 a=a-offset
4869 if par1_injvalue:
4870 par1_injvalue=par1_injvalue-offset
4871 ax1_name=par1_name+' + %i'%(int(offset))
4872 else: ax1_name=par1_name
4873
4874 if par2_name.find('time')!=-1:
4875 offset=floor(min(b))
4876 b=b-offset
4877 if par2_injvalue:
4878 par2_injvalue=par2_injvalue-offset
4879 ax2_name=par2_name+' + %i'%(int(offset))
4880 else: ax2_name=par2_name
4881
4882
4883 #Extract trigger information
4884 par1_trigvalues=posterior[par1_name.lower()].trigvals
4885 par2_trigvalues=posterior[par2_name.lower()].trigvals
4886
4887 myfig=plt.figure()
4888 axes=plt.Axes(myfig,[0.3,0.3,0.95-0.3,0.90-0.3])
4889 myfig.add_axes(axes)
4890
4891 #plt.clf()
4892 plt.xlabel(plot_label(ax2_name))
4893 plt.ylabel(plot_label(ax1_name))
4894
4895 #bins=(par1pos_Nbins,par2pos_Nbins)
4896 bins=(50,50) # Matches plot_one_param_pdf
4897
4898 majorFormatterX=ScalarFormatter(useMathText=True)
4899 majorFormatterX.format_data=lambda data:'%.4g'%(data)
4900 majorFormatterY=ScalarFormatter(useMathText=True)
4901 majorFormatterY.format_data=lambda data:'%.4g'%(data)
4902 majorFormatterX.set_scientific(True)
4903 majorFormatterY.set_scientific(True)
4904 axes.xaxis.set_major_formatter(majorFormatterX)
4905 axes.yaxis.set_major_formatter(majorFormatterY)
4906 H, xedges, yedges = np.histogram2d(a,b, bins,density=False)
4907
4908
4909 #Replace H with greedy bin confidence levels at each pixel...
4910 temp=np.copy(H)
4911 temp=temp.flatten()
4912
4913 Hsum=0
4914 Hsum_actual=np.sum(H)
4915
4916 idxes=np.argsort(temp)
4917 j=len(idxes)-1
4918 while Hsum<Hsum_actual:
4919 #ind = np.argsort(temp)
4920 max_i=idxes[j]
4921 j-=1
4922 val = temp[max_i]
4923 Hsum+=int(val)
4924 temp[max_i]=0
4925
4926 #print Hsum,Hsum_actual
4927 H.flat[max_i]=1-float(Hsum)/float(Hsum_actual)
4928
4929 extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
4930 plt.imshow(np.flipud(H), axes=axes, aspect='auto', extent=extent, interpolation='nearest',cmap='gray_r')
4931 plt.gca().autoscale_view()
4932 plt.colorbar()
4933
4934 #plt.hexbin(a,b,cmap='gray_r',axes=axes )
4935
4936 Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4937 if Nchars>8:
4938 Nticks=3
4939 elif Nchars>5:
4940 Nticks=4
4941 elif Nchars>4:
4942 Nticks=5
4943 else:
4944 Nticks=6
4945 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4946 (xmin,xmax)=plt.xlim()
4947 (ymin,ymax)=plt.ylim()
4948 if par2_name=='rightascension' or par2_name=='ra':
4949 locatorX=RALocator(min=xmin,max=xmax)
4950 majorFormatterX=RAFormatter()
4951 if par2_name=='declination' or par2_name=='dec':
4952 locatorX=DecLocator(min=xmin,max=xmax)
4953 majorFormatterX=DecFormatter()
4954 if par1_name=='rightascension' or par1_name=='ra':
4955 locatorY=RALocator(min=ymin,max=ymax)
4956 axes.yaxis.set_major_locator(locatorY)
4957 majorFormatterY=RAFormatter()
4958 if par1_name=='declination' or par1_name=='dec':
4959 locatorY=DecLocator(min=ymin,max=ymax)
4960 axes.yaxis.set_major_locator(locatorY)
4961 majorFormatterY=DecFormatter()
4962
4963 axes.xaxis.set_major_formatter(majorFormatterX)
4964 axes.yaxis.set_major_formatter(majorFormatterY)
4965 #locatorX.view_limits(bins[0],bins[-1])
4966 axes.xaxis.set_major_locator(locatorX)
4967
4968 if par1_injvalue is not None and par2_injvalue is not None:
4969 plt.plot([par2_injvalue],[par1_injvalue],'bo',scalex=False,scaley=False)
4970
4971
4972
4973 if(par2_name.lower()=='ra' or par2_name.lower()=='rightascension'):
4974 xmin,xmax=plt.xlim()
4975 if(xmin)<0.0: xmin=0.0
4976 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4977 plt.xlim(xmax,xmin)
4978
4979
4980 return myfig
4981
4982def greedy_bin_one_param(posterior,greedy1Param,confidence_levels):
4983 """
4984 Determine the 1-parameter Bayesian Confidence Interval using a greedy
4985 binning algorithm.
4986
4987 @param posterior: an instance of the posterior class.
4988
4989 @param greedy1Param: a dict; {paramName:paramBinSize}.
4990
4991 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
4992 """
4993
4994 paramName=list(greedy1Param.keys())[0]
4995 par_bin=list(greedy1Param.values())[0]
4996 par_samps=posterior[paramName.lower()].samples
4997
4998 parpos_min=min(par_samps)[0]
4999 parpos_max=max(par_samps)[0]
5000
5001 par_point=parpos_min
5002
5003 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5004
5005 greedyPoints=np.zeros((parpos_Nbins,2))
5006 # ...NB 2D so it can be put through same confidence level function
5007 greedyHist=np.zeros(parpos_Nbins,dtype='i8')
5008
5009 #Bin up
5010 for i in range(parpos_Nbins):
5011 greedyPoints[i,0]=par_point
5012 greedyPoints[i,1]=par_point
5013 par_point+=par_bin
5014
5015 for par_samp in par_samps:
5016 par_samp=par_samp[0]
5017 par_binNumber=int(floor((par_samp-parpos_min)/par_bin))
5018 try:
5019 greedyHist[par_binNumber]+=1
5020 except IndexError:
5021 print("IndexError: bin number: %i total bins: %i parsamp: %f "\
5022 %(par_binNumber,parpos_Nbins,par_samp))
5023
5024 #Find injection bin
5025 injbin=None
5026 par_injvalue=posterior[paramName].injval
5027 if par_injvalue:
5028 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5029 injbin=par_binNumber
5030
5031 toppoints,injectionconfidence,reses,injection_area=_greedy_bin(greedyHist,greedyPoints,injbin,float(par_bin),int(len(par_samps)),confidence_levels)
5032 cl_intervals=[]
5033 confidence_levels.sort()
5034 for cl in confidence_levels:
5035 ind=np.nonzero(toppoints[:,-1]<cl)
5036
5037 if len(ind[0]) > 1:
5038 cl_intervals.append((np.min(toppoints[ind,0]),np.max(toppoints[ind,0])))
5039
5040 else:
5041
5042 cl_intervals.append((toppoints[ind[0],0],toppoints[ind[0],0]))
5043
5044 return toppoints,injectionconfidence,reses,injection_area,cl_intervals
5045
5046#
5047def contigious_interval_one_param(posterior,contInt1Params,confidence_levels):
5048 """
5049 Calculates the smallest contigious 1-parameter confidence interval for a
5050 set of given confidence levels.
5051
5052 @param posterior: an instance of the Posterior class.
5053
5054 @param contInt1Params: a dict {paramName:paramBinSize}.
5055
5056 @param confidence_levels: Required confidence intervals.
5057
5058 """
5059 oneDContCL={}
5060 oneDContInj={}
5061
5062 paramName=list(contInt1Params.keys())[0]
5063 par_bin=list(contInt1Params.values())[0]
5064
5065 par_injvalue=posterior[paramName].injval
5066
5067 par_samps=posterior[paramName].samples
5068
5069 parpos_min=min(par_samps)
5070 parpos_max=max(par_samps)
5071
5072 par_point=parpos_min
5073 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5074
5075 greedyHist=np.zeros(parpos_Nbins,dtype='i8')
5076
5077 for par_samp in par_samps:
5078 par_binNumber=int(floor((par_samp-parpos_min)/par_bin))
5079 try:
5080 greedyHist[par_binNumber]+=1
5081 except IndexError:
5082 print("IndexError: bin number: %i total bins: %i parsamp: %f bin: %i"\
5083 %(
5084 par_binNumber,
5085 parpos_Nbins,
5086 par_samp,
5087 par_binNumber
5088 ))
5089
5090 injbin=None
5091 #Find injection bin
5092 if par_injvalue:
5093 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5094 injbin=par_binNumber
5095
5096 j=0
5097 #print "Calculating contigious confidence intervals for %s..."%par_name
5098 len_par_samps=len(par_samps)
5099
5100 injinterval=None
5101
5102 #Determine smallest contigious interval for given confidence levels (brute force)
5103 while j < len(confidence_levels):
5104 confidence_level=confidence_levels[j]
5105 #Loop over size of interval
5106 max_left=0
5107 max_right=0
5108
5109 for i in range(len(greedyHist)):
5110
5111 max_frac=None
5112 left=0
5113 right=i
5114
5115 #Slide interval
5116 while right<len(greedyHist):
5117 Npoints=sum(greedyHist[left:right])
5118 frac=float(Npoints)/float(len_par_samps)
5119 #print "left %i , right %i , frac %f"%(left,right,frac)
5120
5121 if max_frac is None:
5122 max_frac=frac
5123 max_left=left
5124 max_right=right
5125 else:
5126 if frac>max_frac:
5127 max_frac=frac
5128 max_left=left
5129 max_right=right
5130
5131 left+=1
5132 right+=1
5133
5134 if injbin is not None and injinterval is None:
5135 if injbin in range(max_left,max_right):
5136 injinterval=(max_right-max_left)*par_bin
5137 oneDContInj['interval']=injinterval
5138 oneDContInj['confidence']=1-frac
5139 if max_frac > confidence_level:
5140 break
5141
5142 max_frac=None
5143
5144 if max_frac is None:
5145 print("Cant determine intervals at %f confidence!"%confidence_level)
5146 else:
5147
5148 oneDContCL['left']=max_left*par_bin
5149 oneDContCL['right']=max_right*par_bin
5150 oneDContCL['width']=(max_right-max_left)*par_bin
5151 k=j
5152 while k+1<len(confidence_levels) :
5153 if confidence_levels[k+1]<max_frac:
5154 j+=1
5155 k+=1
5156 j+=1
5157
5158 return oneDContCL,oneDContInj
5159
5160
5161class ACLError(Exception):
5162 def __init__(self, *args):
5163 super(ACLError, self).__init__(*args)
5164
5165
5166def autocorrelation(series):
5167 """Returns an estimate of the autocorrelation function of a given
5168 series. Returns only the positive-lag portion of the ACF,
5169 normalized so that the zero-th element is 1."""
5170 x=series-np.mean(series)
5171 y=np.conj(x[::-1])
5172
5173 acf=np.fft.ifftshift(signal.fftconvolve(y,x,mode='full'))
5174
5175 N=series.shape[0]
5176
5177 acf = acf[0:N]
5178
5179 return acf/acf[0]
5180
5181
5182def autocorrelation_length_estimate(series, acf=None, M=5, K=2):
5183 """Attempts to find a self-consistent estimate of the
5184 autocorrelation length of a given series.
5185
5186 If C(tau) is the autocorrelation function (normalized so C(0) = 1,
5187 for example from the autocorrelation procedure in this module),
5188 then the autocorrelation length is the smallest s such that
5189
5190 1 + 2*C(1) + 2*C(2) + ... + 2*C(M*s) < s
5191
5192 In words: the autocorrelation length is the shortest length so
5193 that the sum of the autocorrelation function is smaller than that
5194 length over a window of M times that length.
5195
5196 The maximum window length is restricted to be len(series)/K as a
5197 safety precaution against relying on data near the extreme of the
5198 lags in the ACF, where there is a lot of noise. Note that this
5199 implies that the series must be at least M*K*s samples long in
5200 order to get a reliable estimate of the ACL.
5201
5202 If no such s can be found, raises ACLError; in this case it is
5203 likely that the series is too short relative to its true
5204 autocorrelation length to obtain a consistent ACL estimate."""
5205
5206 if acf is None:
5207 acf=autocorrelation(series)
5208 acf[1:] *= 2.0
5209
5210 imax=int(acf.shape[0]/K)
5211
5212 # Cumulative sum and ACL length associated with each window
5213 cacf=np.cumsum(acf)
5214 s=np.arange(1, cacf.shape[0]+1)/float(M)
5215
5216 # Find all places where cumulative sum over window is smaller than
5217 # associated ACL.
5218 estimates=np.flatnonzero(cacf[:imax] < s[:imax])
5219
5220 if estimates.shape[0] > 0:
5221 # Return the first index where cumulative sum is smaller than
5222 # ACL associated with that index's window
5223 return s[estimates[0]]
5224 else:
5225 # Cannot find self-consistent ACL estimate.
5226 raise ACLError('autocorrelation length too short for consistent estimate')
5227
5228
5229def effectiveSampleSize(samples, Nskip=1):
5230 """
5231 Compute the effective sample size, calculating the ACL using only
5232 the second half of the samples to avoid ACL overestimation due to
5233 chains equilibrating after adaptation.
5234 """
5235 N = len(samples)
5236 acf = autocorrelation(samples[int(N/2):])
5237 try:
5238 acl = autocorrelation_length_estimate(samples[int(N/2):], acf=acf)
5239 except ACLError:
5240 acl = N
5241 Neffective = floor(N/acl)
5242 acl *= Nskip
5243 return (Neffective, acl, acf)
5244
5245
5246def readCoincXML(xml_file, trignum):
5247 triggers=None
5248
5249 from igwn_ligolw import lsctables
5250 from igwn_ligolw import utils
5251 coincXML = utils.load_filename(xml_file)
5252 coinc = lsctables.CoincTable.get_table(coincXML)
5253 coincMap = lsctables.CoincMapTable.get_table(coincXML)
5254 snglInsps = lsctables.SnglInspiralTable.get_table(coincXML)
5255
5256 if (trignum>len(coinc)):
5257 raise RuntimeError("Error: You asked for trigger %d, but %s contains only %d triggers" %(trignum,xml_file,len(coinc)))
5258 else:
5259 coincEventID = coinc.getColumnByName('coinc_event_id')[trignum]
5260 eventIDs = [row.event_id for row in coincMap if row.coinc_event_id == coincEventID]
5261 triggers = [row for row in snglInsps if row.event_id in eventIDs]
5262 return triggers
5263
5264#===============================================================================
5265# Parameter estimation codes results parser
5266#===============================================================================
5267
5268def find_ndownsample(samples, nDownsample):
5269 """
5270 Given a list of files, threshold value, and a desired
5271 number of outputs posterior samples, return the skip number to
5272 achieve the desired number of posterior samples.
5273 """
5274 if nDownsample is None:
5275 print("Max ACL(s):")
5276 splineParams=["spcal_npts", "spcal_active","constantcal_active"]
5277 for i in np.arange(25):
5278 for k in lal.cached_detector_by_prefix:
5279 splineParams.append(k.lower()+'_spcal_freq_'+str(i))
5280 splineParams.append(k.lower()+'_spcal_logfreq_'+str(i))
5281
5282 nonParams = ["logpost", "post", "cycle", "timestamp", "snrh1", "snrl1", "snrv1",
5283 "margtime","margtimephi","margtime","time_max","time_min",
5284 "time_mean", "time_maxl","sky_frame","psdscaleflag","logdeltaf","flow","f_ref",
5285 "lal_amporder","lal_pnorder","lal_approximant","tideo","spino","signalmodelflag",
5286 "temperature","nifo","nlocaltemps","ntemps","randomseed","samplerate","segmentlength","segmentstart",
5287 "t0", "phase_maxl", "azimuth", "cosalpha", "lal_amporder", "bluni"] + logParams + snrParams + splineParams
5288 fixedParams = [p for p in samples.colnames if all(x==samples[p][0] for x in samples[p])]
5289 print("Fixed parameters: "+str(fixedParams))
5290 nonParams.extend(fixedParams)
5291 params = [p for p in samples.colnames if p.lower() not in nonParams]
5292 stride=np.diff(samples['cycle'])[0]
5293 results = np.array([np.array(effectiveSampleSize(samples[param])[:2]) for param in params])
5294 nEffs = results[:,0]
5295 nEffective = min(nEffs)
5296 ACLs = results[:,1]
5297 maxACLind = np.argmax(ACLs)
5298 maxACL = ACLs[maxACLind]
5299 # Get index in header, which includes "non-params"
5300 print("%i (%s)." %(stride*maxACL,params[maxACLind]))
5301
5302 nskip = 1
5303 if nDownsample is not None:
5304 if len(samples) > nDownsample:
5305 nskip *= floor(len(samples)/nDownsample)
5306 nskip = int(nskip)
5307 else:
5308 nEff = nEffective
5309 if nEff > 1:
5310 if len(samples) > nEff:
5311 nskip = int(ceil(len(samples)/nEff))
5312 else:
5313 nskip = np.nan
5314 return nskip
5315
5316class PEOutputParser(object):
5317 """
5318 A parser for the output of Bayesian parameter estimation codes.
5319
5320 TODO: Will be abstract class when LDG moves over to Python >2.6,
5321 inherited by each method .
5322 """
5323 def __init__(self,inputtype):
5324 if inputtype == 'ns':
5325 self._parser=self._ns_to_pos
5326 elif inputtype == 'common':
5327 self._parser=self._common_to_pos
5328 elif inputtype == 'fm':
5330 elif inputtype == "inf_mcmc":
5331 self._parser=self._infmcmc_to_pos
5332 elif inputtype == "xml":
5333 self._parser=self._xml_to_pos
5334 elif inputtype == 'hdf5':
5335 self._parser = self._hdf5_to_pos
5336 elif inputtype == 'hdf5s':
5337 self._parser = self._hdf5s_to_pos
5338 else:
5339 raise ValueError('Invalid value for "inputtype": %r' % inputtype)
5340
5341 def parse(self,files,**kwargs):
5342 """
5343 Parse files.
5344 """
5345 return self._parser(files,**kwargs)
5346
5347 def _infmcmc_to_pos(self,files,outdir=None,deltaLogP=None,fixedBurnins=None,nDownsample=None,oldMassConvention=False,**kwargs):
5348 """
5349 Parser for lalinference_mcmcmpi output.
5350 """
5351 if not (fixedBurnins is None):
5352 if not (deltaLogP is None):
5353 print("Warning: using deltaLogP criteria in addition to fixed burnin")
5354 if len(fixedBurnins) == 1 and len(files) > 1:
5355 print("Only one fixedBurnin criteria given for more than one output. Applying this to all outputs.")
5356 fixedBurnins = np.ones(len(files),'int')*fixedBurnins[0]
5357 elif len(fixedBurnins) != len(files):
5358 raise RuntimeError("Inconsistent number of fixed burnin criteria and output files specified.")
5359 print("Fixed burning criteria: ",fixedBurnins)
5360 else:
5361 fixedBurnins = np.zeros(len(files))
5362 logPThreshold=-np.inf
5363 if not (deltaLogP is None):
5364 logPThreshold= - deltaLogP
5365 print("Eliminating any samples before log(Post) = ", logPThreshold)
5366 nskips=self._find_ndownsample(files, logPThreshold, fixedBurnins, nDownsample)
5367 if nDownsample is None:
5368 print("Downsampling to take only uncorrelated posterior samples from each file.")
5369 if len(nskips) == 1 and np.isnan(nskips[0]):
5370 print("WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5371 nskips=self._find_ndownsample(files, logPThreshold, fixedBurnins, 10000)
5372 else:
5373 for i in range(len(nskips)):
5374 if np.isnan(nskips[i]):
5375 print("%s eliminated since all samples are correlated.")
5376 else:
5377 print("Downsampling by a factor of ", nskips[0], " to achieve approximately ", nDownsample, " posterior samples")
5378 if outdir is None:
5379 outdir=''
5380 runfileName=os.path.join(outdir,"lalinfmcmc_headers.dat")
5381 postName="posterior_samples.dat"
5382 runfile=open(runfileName, 'w')
5383 outfile=open(postName, 'w')
5384 try:
5385 self._infmcmc_output_posterior_samples(files, runfile, outfile, logPThreshold, fixedBurnins, nskips, oldMassConvention)
5386 finally:
5387 runfile.close()
5388 outfile.close()
5389 return self._common_to_pos(open(postName,'r'))
5390
5391
5392 def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False):
5393 """
5394 Concatenate all the samples from the given files into outfile.
5395 For each file, only those samples past the point where the
5396 log(post) > logPThreshold are concatenated after eliminating
5397 fixedBurnin.
5398 """
5399 nRead=0
5400 outputHeader=False
5401 acceptedChains=0
5402 if nskips is None:
5403 nskips = np.ones(len(files),'int')
5404 for infilename,i,nskip,fixedBurnin in zip(files,range(1,len(files)+1),nskips,fixedBurnins):
5405 infile=open(infilename,'r')
5406 try:
5407 print("Writing header of %s to %s"%(infilename,runfile.name))
5408 runInfo,header=self._clear_infmcmc_header(infile)
5409 runfile.write('Chain '+str(i)+':\n')
5410 runfile.writelines(runInfo)
5411 print("Processing file %s to %s"%(infilename,outfile.name))
5412 write_fref = False
5413 if 'f_ref' not in header:
5414 write_fref = True
5415 f_ref=self._find_infmcmc_f_ref(runInfo)
5416 if oldMassConvention:
5417 # Swap #1 for #2 because our old mass convention
5418 # has m2 > m1, while the common convention has m1
5419 # > m2
5420 header=[self._swaplabel12(label) for label in header]
5421 if not outputHeader:
5422 for label in header:
5423 outfile.write(label)
5424 outfile.write(" ")
5425 if write_fref:
5426 outfile.write("f_ref")
5427 outfile.write(" ")
5428 outfile.write("chain")
5429 outfile.write("\n")
5430 outputHeader=header
5431 iterindex=header.index("cycle")
5432 logpindex=header.index("logpost")
5433 output=False
5434 for line in infile:
5435 line=line.lstrip()
5436 lineParams=line.split()
5437 iter=int(lineParams[iterindex])
5438 logP=float(lineParams[logpindex])
5439 if (iter > fixedBurnin) and (logP >= logPThreshold):
5440 output=True
5441 if output:
5442 if nRead % nskip == 0:
5443 for label in outputHeader:
5444 # Note that the element "a1" in the
5445 # *header* actually already
5446 # corresponds to the "a2" *column* of
5447 # the input because we switched the
5448 # names above
5449 outfile.write(lineParams[header.index(label)])
5450 outfile.write("\t")
5451 if write_fref:
5452 outfile.write(f_ref)
5453 outfile.write("\t")
5454 outfile.write(str(i))
5455 outfile.write("\n")
5456 nRead=nRead+1
5457 if output: acceptedChains += 1
5458 finally:
5459 infile.close()
5460 print("%i of %i chains accepted."%(acceptedChains,len(files)))
5461
5462 def _swaplabel12(self, label):
5463 if label[-1] == '1':
5464 return label[0:-1] + '2'
5465 elif label[-1] == '2':
5466 return label[0:-1] + '1'
5467 else:
5468 return label[:]
5469
5470 def _find_max_logP(self, files):
5471 """
5472 Given a list of files, reads them, finding the maximum log(post)
5473 """
5474 maxLogP = -np.inf
5475 for inpname in files:
5476 infile=open(inpname, 'r')
5477 try:
5478 runInfo,header=self._clear_infmcmc_header(infile)
5479 logpindex=header.index("logpost")
5480 for line in infile:
5481 line=line.lstrip().split()
5482 logP=float(line[logpindex])
5483 if logP > maxLogP:
5484 maxLogP=logP
5485 finally:
5486 infile.close()
5487 print("Found max log(post) = ", maxLogP)
5488 return maxLogP
5489
5490 def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample):
5491 """
5492 Given a list of files, threshold value, and a desired
5493 number of outputs posterior samples, return the skip number to
5494 achieve the desired number of posterior samples.
5495 """
5496 nfiles = len(files)
5497 ntots=[]
5498 nEffectives = []
5499 if nDownsample is None: print("Max ACL(s):")
5500 for inpname,fixedBurnin in zip(files,fixedBurnins):
5501 infile = open(inpname, 'r')
5502 try:
5503 runInfo,header = self._clear_infmcmc_header(infile)
5504 header = [name.lower() for name in header]
5505 logpindex = header.index("logpost")
5506 iterindex = header.index("cycle")
5507 deltaLburnedIn = False
5508 fixedBurnedIn = False
5509 adapting = True
5510 lines=[]
5511 ntot=0
5512 for line in infile:
5513 line = line.lstrip().split()
5514 iter = int(line[iterindex])
5515 logP = float(line[logpindex])
5516 if iter > fixedBurnin:
5517 fixedBurnedIn = True
5518 # If adaptation reset, throw out what was collected so far
5519 elif fixedBurnedIn:
5520 fixedBurnedIn = False
5521 ntot = 0
5522 lines = []
5523 if logP > logPthreshold:
5524 deltaLburnedIn = True
5525 if iter > 0:
5526 adapting = False
5527 if fixedBurnedIn and deltaLburnedIn and not adapting:
5528 ntot += 1
5529 lines.append(line)
5530 ntots.append(ntot)
5531 if nDownsample is None:
5532 try:
5533 splineParams=["spcal_npts", "spcal_active","constantcal_active"]
5534 for i in np.arange(5):
5535 for k in ['h1','l1']:
5536 splineParams.append(k+'_spcal_freq'+str(i))
5537 splineParams.append(k+'_spcal_logfreq'+str(i))
5538
5539 nonParams = ["logpost", "cycle", "timestamp", "snrh1", "snrl1", "snrv1",
5540 "margtime","margtimephi","margtime","time_max","time_min",
5541 "time_mean", "time_maxl","sky_frame","psdscaleflag","logdeltaf","flow","f_ref",
5542 "lal_amporder","lal_pnorder","lal_approximant","tideo","spino","signalmodelflag",
5543 "temperature","nifo","nlocaltemps","ntemps","randomseed","samplerate","segmentlength","segmentstart",
5544 "t0", "phase_maxl", "azimuth", "cosalpha"] + logParams + snrParams + splineParams
5545 nonParamsIdxs = [header.index(name) for name in nonParams if name in header]
5546 samps = np.array(lines).astype(float)
5547 fixedIdxs = np.where(np.amin(samps,axis=0)-np.amax(samps,axis=0) == 0.0)[0]
5548 nonParamsIdxs.extend(fixedIdxs)
5549 paramIdxs = [i for i in range(len(header)) if i not in nonParamsIdxs]
5550 stride=samps[1,iterindex] - samps[0,iterindex]
5551 results = np.array([np.array(effectiveSampleSize(samps[:,i])[:2]) for i in paramIdxs])
5552 nEffs = results[:,0]
5553 nEffectives.append(min(nEffs))
5554 ACLs = results[:,1]
5555 maxACLind = np.argmax(ACLs)
5556 maxACL = ACLs[maxACLind]
5557 # Get index in header, which includes "non-params"
5558 maxACLind = paramIdxs[maxACLind]
5559 print("%i (%s) for chain %s." %(stride*maxACL,header[maxACLind],inpname))
5560 except:
5561 nEffectives.append(None)
5562 print("Error computing effective sample size of %s!"%inpname)
5563
5564 finally:
5565 infile.close()
5566 nskips = np.ones(nfiles)
5567 ntot = sum(ntots)
5568 if nDownsample is not None:
5569 if ntot > nDownsample:
5570 nskips *= int(floor(ntot/nDownsample))
5571 else:
5572 for i in range(nfiles):
5573 nEff = nEffectives[i]
5574 ntot = ntots[i]
5575 if nEff > 1:
5576 if ntot > nEff:
5577 nskips[i] = int(ceil(ntot/nEff))
5578 else:
5579 nskips[i] = None
5580 return nskips
5581
5582 def _find_infmcmc_f_ref(self, runInfo):
5583 """
5584 Searches through header to determine reference frequency of waveforms.
5585 If no fRef given, calls _find_infmcmc_f_lower to get the lower frequency
5586 bound, which is the default reference frequency for LALInference.
5587 """
5588 fRef = None
5589 runInfoIter = iter(runInfo)
5590 for line in runInfoIter:
5591 headers=line.lstrip().lower().split()
5592 try:
5593 fRefColNum = headers.index('f_ref') # strings get converted to all lower case
5594 info = runInfoIter.next().lstrip().lower().split()
5595 fRef = info[-1]#fRefColNum] # too many column names with spaces for this way to work. I just grab the last value. Hopefully we will update to xml output files and those messy headers will be gone.
5596 break
5597 except ValueError:
5598 continue
5599
5600 # ***TEMPORARY*** If not in table, check command line.
5601 # ...This is messy, but the only option for dealing with old headers
5602 if not fRef:
5603 runInfoIter = iter(runInfo)
5604 for line in runInfoIter:
5605 headers=line.lstrip().lower().split()
5606 try:
5607 if headers[0]=="command":
5608 try:
5609 fRefInd = headers.index('--fref')+1
5610 fRef = headers[fRefInd]
5611 except ValueError:
5612 pass
5613 break
5614 except IndexError:
5615 continue
5616
5617 # If no fRef is found, use lower frequency bound
5618 if not fRef:
5619 fRef = self._find_infmcmc_f_lower(runInfo)
5620
5621 return fRef
5622
5623 def _find_infmcmc_f_lower(self, runInfo):
5624 """
5625 Searches through header to determine starting frequency of waveforms.
5626 Assumes same for all IFOs.
5627 """
5628 runInfo = iter(runInfo)
5629 for line in runInfo:
5630 headers=line.lstrip().lower().split()
5631 try:
5632 flowColNum = headers.index('f_low')
5633 IFOinfo = runInfo.next().lstrip().lower().split()
5634 f_lower = IFOinfo[flowColNum]
5635 break
5636 except ValueError:
5637 continue
5638 return f_lower
5639
5640 def _clear_infmcmc_header(self, infile):
5641 """
5642 Reads lalinference_mcmcmpi file given, returning the run info and
5643 common output header information.
5644 """
5645 runInfo = []
5646 for line in infile:
5647 runInfo.append(line)
5648 headers=line.lstrip().lower().split()
5649 try:
5650 headers.index('cycle')
5651 break
5652 except ValueError:
5653 continue
5654 else:
5655 raise RuntimeError("couldn't find line with 'cycle' in LALInferenceMCMC input")
5656 return runInfo[:-1],headers
5657
5658
5659 def _ns_to_pos(self,files,Nlive=None,Npost=None,posfilename='posterior_samples.dat'):
5660 """
5661 Parser for nested sampling output.
5662 files : list of input NS files
5663 Nlive : Number of live points
5664 Npost : Desired number of posterior samples
5665 posfilename : Posterior output file name (default: 'posterior_samples.dat')
5666 """
5667 try:
5668 from lalinference.nest2pos import draw_N_posterior_many,draw_posterior_many
5669 except ImportError:
5670 print("Need lalinference.nest2pos to convert nested sampling output!")
5671 raise
5672
5673 if Nlive is None:
5674 raise RuntimeError("Need to specify number of live points in positional arguments of parse!")
5675
5676 #posfile.write('mchirp \t eta \t time \t phi0 \t dist \t RA \t dec \t
5677 #psi \t iota \t likelihood \n')
5678 # get parameter list
5679 it = iter(files)
5680
5681 # check if there's a file containing the parameter names
5682 parsfilename = (it.next()).strip('.gz')+'_params.txt'
5683
5684 if os.path.isfile(parsfilename):
5685 print('Looking for '+parsfilename)
5686
5687 if os.access(parsfilename,os.R_OK):
5688
5689 with open(parsfilename,'r') as parsfile:
5690 outpars=parsfile.readline()+'\n'
5691 else:
5692 raise RuntimeError('Cannot open parameters file %s!'%(parsfilename))
5693
5694 else: # Use hardcoded CBC parameter names
5695 outpars='mchirp \t eta \t time \t phi0 \t dist \t RA \t \
5696 dec \t psi \t iota \t logl \n'
5697
5698 # Find the logL column
5699 parsvec=outpars.split()
5700 logLcol=-1
5701 for i in range(len(parsvec)):
5702 if parsvec[i].lower()=='logl':
5703 logLcol=i
5704 if logLcol==-1:
5705 print('Error! Could not find logL column in parameter list: %s'%(outpars))
5706 raise RuntimeError
5707
5708 inarrays=list(map(np.loadtxt,files))
5709 if Npost is None:
5710 pos=draw_posterior_many(inarrays,[Nlive for f in files],logLcols=[logLcol for f in files])
5711 else:
5712 pos=draw_N_posterior_many(inarrays,[Nlive for f in files],Npost,logLcols=[logLcol for f in files])
5713
5714 with open(posfilename,'w') as posfile:
5715
5716 posfile.write(outpars)
5717
5718 for row in pos:
5719 for i in row:
5720 posfile.write('%10.12e\t' %(i))
5721 posfile.write('\n')
5722
5723 with open(posfilename,'r') as posfile:
5724 return_val=self._common_to_pos(posfile)
5725
5726 return return_val
5727
5728 def _followupmcmc_to_pos(self,files):
5729 """
5730 Parser for followupMCMC output.
5731 """
5732 return self._common_to_pos(open(files[0],'r'),delimiter=',')
5733
5734
5735 def _multinest_to_pos(self,files):
5736 """
5737 Parser for MultiNest output.
5738 """
5739 return self._common_to_pos(open(files[0],'r'))
5740
5741 def _xml_to_pos(self,infile):
5742 """
5743 Parser for VOTable XML Using
5744 """
5745 from xml.etree import ElementTree as ET
5746 xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
5747 try:
5748 register_namespace=ET.register_namespace
5749 except AttributeError:
5750 def register_namespace(prefix,uri):
5751 ET._namespace_map[uri]=prefix
5752 register_namespace('vot',xmlns)
5753 tree = ET.ElementTree()
5754
5755 tree.parse(infile)
5756 # Find the posterior table
5757 tables = tree.findall('.//{%s}TABLE'%(xmlns))
5758 for table in tables:
5759 if table.get('utype')=='lalinference:results:posteriorsamples':
5760 return(self._VOTTABLE2pos(table))
5761 for table in tables:
5762 if table.get('utype')=='lalinference:results:nestedsamples':
5763 nsresource=[node for node in tree.findall('{%s}RESOURCE'%(xmlns)) if node.get('utype')=='lalinference:results'][0]
5764 return(self._VOTTABLE2pos(vo_nest2pos(nsresource)))
5765 raise RuntimeError('Cannot find "Posterior Samples" TABLE element in XML input file %s'%(infile))
5766
5767 def _VOTTABLE2pos(self,table):
5768 """
5769 Parser for a VOT TABLE element with FIELDs and TABLEDATA elements
5770 """
5771 from xml.etree import ElementTree as ET
5772 xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
5773 try:
5774 register_namespace=ET.register_namespace
5775 except AttributeError:
5776 def register_namespace(prefix,uri):
5777 ET._namespace_map[uri]=prefix
5778 register_namespace('vot',xmlns)
5779
5780 header=[]
5781 for field in table.findall('./{%s}FIELD'%(xmlns)):
5782 header.append(field.attrib['name'])
5783 if(len(header)==0):
5784 raise RuntimeError('Unable to find FIELD nodes for table headers in XML table')
5785 data=table.findall('./{%s}DATA'%(xmlns))
5786 tabledata=data[0].find('./{%s}TABLEDATA'%(xmlns))
5787 llines=[]
5788 for row in tabledata:
5789 llines.append(np.array(list(map(lambda a:float(a.text),row))))
5790 flines=np.array(llines)
5791 for i in range(0,len(header)):
5792 if header[i].lower().find('log')!=-1 and header[i].lower() not in logParams and re.sub('log', '', header[i].lower()) not in [h.lower() for h in header] and header[i].lower() not in lorentzInvarianceViolationParams:
5793 print('exponentiating %s'%(header[i]))
5794
5795 flines[:,i]=np.exp(flines[:,i])
5796
5797 header[i]=re.sub('log', '', header[i], flags=re.IGNORECASE)
5798 if header[i].lower().find('sin')!=-1 and re.sub('sin', '', header[i].lower()) not in [h.lower() for h in header]:
5799 print('asining %s'%(header[i]))
5800 flines[:,i]=np.arcsin(flines[:,i])
5801 header[i]=re.sub('sin', '', header[i], flags=re.IGNORECASE)
5802 if header[i].lower().find('cos')!=-1 and re.sub('cos', '', header[i].lower()) not in [h.lower() for h in header]:
5803 print('acosing %s'%(header[i]))
5804 flines[:,i]=np.arccos(flines[:,i])
5805 header[i]=re.sub('cos', '', header[i], flags=re.IGNORECASE)
5806 header[i]=header[i].replace('(','')
5807 header[i]=header[i].replace(')','')
5808 print('Read columns %s'%(str(header)))
5809 return header,flines
5810
5811 def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5812 from astropy.table import vstack
5813
5814 if fixedBurnins is None:
5815 fixedBurnins = np.zeros(len(infiles))
5816
5817 if len(infiles) > 1:
5818 multiple_chains = True
5819
5820 chains = []
5821 for i, [infile, fixedBurnin] in enumerate(zip(infiles, fixedBurnins)):
5822 chain = self._hdf5_to_table(infile, fixedBurnin=fixedBurnin, deltaLogP=deltaLogP, nDownsample=nDownsample, multiple_chains=multiple_chains, tablename=tablename, **kwargs)
5823 chain.add_column(astropy.table.Column(i*np.ones(len(chain)), name='chain'))
5824 chains.append(chain)
5825
5826 # Apply deltaLogP criteria across chains
5827 if deltaLogP is not None:
5828 logPThreshold = -np.inf
5829 for chain in chains:
5830 if len(chain) > 0:
5831 logPThreshold = max([logPThreshold, max(chain['logpost'])- deltaLogP])
5832 print("Eliminating any samples before log(L) = {}".format(logPThreshold))
5833
5834 for i, chain in enumerate(chains):
5835 if deltaLogP is not None:
5836 above_threshold = np.arange(len(chain))[chain['logpost'] > logPThreshold]
5837 burnin_idx = above_threshold[0] if len(above_threshold) > 0 else len(chain)
5838 else:
5839 burnin_idx = 0
5840 chains[i] = chain[burnin_idx:]
5841
5842 samples = vstack(chains)
5843
5844 # Downsample one more time
5845 if nDownsample is not None:
5846 nskip = find_ndownsample(samples, nDownsample)
5847 samples = samples[::nskip]
5848
5849 return samples.colnames, as_array(samples).view(float).reshape(-1, len(samples.columns))
5850
5851 def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs):
5852 """
5853 Parse a HDF5 file and return an array of posterior samples ad list of
5854 parameter names. Equivalent to '_common_to_pos' and work in progress.
5855 """
5856 if not tablename:
5857 samples = read_samples(infile, tablename=posterior_grp_name)
5858 else:
5859 samples = read_samples(infile, tablename=tablename)
5860 params = samples.colnames
5861
5862 for param in params:
5863 param_low = param.lower()
5864 if param_low.find('log') != -1 and param_low not in logParams and re.sub('log', '', param_low) not in [p.lower() for p in params] and param_low not in lorentzInvarianceViolationParams:
5865 print('exponentiating %s' % param)
5866 new_param = re.sub('log', '', param, flags=re.IGNORECASE)
5867 samples[new_param] = np.exp(samples[param])
5868 del samples[param]
5869 param = new_param
5870 if param_low.find('sin') != -1 and re.sub('sin', '', param_low) not in [p.lower() for p in params]:
5871 print('asining %s' % param)
5872 new_param = re.sub('sin', '', param, flags=re.IGNORECASE)
5873 samples[new_param] = np.arcsin(samples[param])
5874 del samples[param]
5875 param = new_param
5876 if param_low.find('cos') != -1 and re.sub('cos', '', param_low) not in [p.lower() for p in params]:
5877 print('acosing %s' % param)
5878 new_param = re.sub('cos', '', param, flags=re.IGNORECASE)
5879 samples[new_param] = np.arccos(samples[param])
5880 del samples[param]
5881 param = new_param
5882
5883 if param != param.replace('(', ''):
5884 samples.rename_column(param, param.replace('(', ''))
5885 if param != param.replace(')', ''):
5886 samples.rename_column(param, param.replace(')', ''))
5887
5888 #Make everything a float, since that's what's excected of a CommonResultsObj
5889 replace_column(samples, param, samples[param].astype(float))
5890
5891 params = samples.colnames
5892 print('Read columns %s' % str(params))
5893
5894 # MCMC burnin and downsampling
5895 if 'cycle' in params:
5896 if not (fixedBurnin is None):
5897 if not (deltaLogP is None):
5898 print("Warning: using deltaLogP criteria in addition to fixed burnin")
5899 print("Fixed burning criteria: ",fixedBurnin)
5900 else:
5901 fixedBurnin = 0
5902
5903 burned_in_cycles = np.arange(len(samples))[samples['cycle'] > fixedBurnin]
5904 burnin_idx = burned_in_cycles[0] if len(burned_in_cycles) > 0 else len(samples)
5905 samples = samples[burnin_idx:]
5906
5907 logPThreshold=-np.inf
5908 if len(samples) > 0 and not (deltaLogP is None):
5909 logPThreshold = max(samples['logpost'])- deltaLogP
5910 print("Eliminating any samples before log(post) = ", logPThreshold)
5911 burnin_idx = np.arange(len(samples))[samples['logpost'] > logPThreshold][0]
5912 samples = samples[burnin_idx:]
5913
5914 if len(samples) > 0:
5915 nskip = find_ndownsample(samples, nDownsample)
5916 if nDownsample is None:
5917 print("Downsampling to take only uncorrelated posterior samples from each file.")
5918 if np.isnan(nskip) and not multiple_chains:
5919 print("WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5920 nskip = find_ndownsample(samples, 10000)
5921 samples = samples[::nskip]
5922 else:
5923 if np.isnan(nskip):
5924 print("WARNING: All samples in {} are correlated.".format(infile))
5925 samples = samples[-1:]
5926 else:
5927 print("Downsampling by a factor of ", nskip, " to achieve approximately ", nDownsample, " posterior samples")
5928 samples = samples[::nskip]
5929
5930 return samples
5931
5932 def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5933 samples = self._hdf5_to_table(infile, fixedBurnin=fixedBurnins, deltaLogP=deltaLogP, nDownsample=nDownsample, tablename=tablename, **kwargs)
5934
5935 return samples.colnames, as_array(samples).view(float).reshape(-1, len(samples.columns))
5936
5937 def _common_to_pos(self,infile,info=[None,None]):
5938 """
5939 Parse a file in the 'common format' and return an array of posterior
5940 samples and list of parameter names. Will apply inverse functions to
5941 columns with names containing sin,cos,log.
5942 """
5943
5944 [headerfile,delimiter]=info
5945
5946 if headerfile==None:
5947 formatstr=infile.readline().lstrip()
5948 else:
5949 hf=open(headerfile,'r')
5950 formatstr=hf.readline().lstrip()
5951 hf.close()
5952
5953 formatstr=formatstr.replace('#','')
5954 formatstr=formatstr.replace('"','')
5955
5956 header=formatstr.split(delimiter)
5957 header[-1]=header[-1].rstrip('\n')
5958 nparams=len(header)
5959 llines=[]
5960 dec=re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$|^inf$')
5961
5962 for line_number,line in enumerate(infile):
5963 sline=line.split(delimiter)
5964 if sline[-1] == '\n':
5965 del(sline[-1])
5966 proceed=True
5967 if len(sline)<1:
5968 print('Ignoring empty line in input file: %s'%(sline))
5969 proceed=False
5970 elif len(sline)!=nparams:
5971 sys.stderr.write('WARNING: Malformed row %i, read %i elements but there is meant to be %i\n'%(line_number,len(sline),nparams))
5972 proceed=False
5973
5974 for elemn,st in enumerate(sline):
5975 s=st.replace('\n','')
5976 if dec.search(s) is None:
5977 print('Warning! Ignoring non-numeric data after the header: %s. Row = %i,Element=%i'%(s,line_number,elemn))
5978 proceed=False
5979 elif s == '\n':
5980 proceed=False
5981
5982 if proceed:
5983 llines.append(list(map(float,sline)))
5984
5985 flines=np.array(llines)
5986
5987 if not flines.any():
5988 raise RuntimeError("ERROR: no lines read in!")
5989
5990
5991 for i in range(0,len(header)):
5992 if header[i].lower().find('log')!=-1 and header[i].lower() not in logParams and re.sub('log', '', header[i].lower()) not in [h.lower() for h in header] and header[i].lower() not in lorentzInvarianceViolationParams:
5993 print('exponentiating %s'%(header[i]))
5994
5995 flines[:,i]=np.exp(flines[:,i])
5996
5997 header[i]=re.sub('log', '', header[i], flags=re.IGNORECASE)
5998 if header[i].lower().find('sin')!=-1 and re.sub('sin', '', header[i].lower()) not in [h.lower() for h in header]:
5999 print('asining %s'%(header[i]))
6000 flines[:,i]=np.arcsin(flines[:,i])
6001 header[i]=re.sub('sin', '', header[i], flags=re.IGNORECASE)
6002 if header[i].lower().find('cos')!=-1 and re.sub('cos', '', header[i].lower()) not in [h.lower() for h in header]:
6003 print('acosing %s'%(header[i]))
6004 flines[:,i]=np.arccos(flines[:,i])
6005 header[i]=re.sub('cos', '', header[i], flags=re.IGNORECASE)
6006 header[i]=header[i].replace('(','')
6007 header[i]=header[i].replace(')','')
6008 print('Read columns %s'%(str(header)))
6009 return header,flines
6010
6011
6013 result={}
6014 lines=fo.split('\n')
6015 chain_line=False
6016 for line in lines:
6017
6018 if '[1]' in line:
6019 key=line.replace('[1]','').strip(' ').strip('"')
6020 result[key]={}
6021 out=result[key]
6022 continue
6023 if result is not {}:
6024 if 'chain' in line:
6025 chain_line=True
6026 continue
6027 if chain_line:
6028 chain_line=False
6029 key=line.strip('"').split()[1]
6030 out[key]=[]
6031 out2=out[key]
6032 else:
6033 try:
6034 newline=line.strip('"').split()
6035 if newline is not []:
6036 out2.append(line.strip('"').split())
6037 except:
6038 pass
6039
6040 return result
6041#
6042
6043def vo_nest2pos(nsresource,Nlive=None):
6044 """
6045 Parse a VO Table RESOURCE containing nested sampling output and
6046 return a VOTable TABLE element with posterior samples in it.
6047 This can be added to an existing tree by the user.
6048 Nlive will be read from the nsresource, unless specified
6049 """
6050 from xml.etree import ElementTree as ET
6051 import copy
6052 from math import log, exp
6053 xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
6054 try:
6055 register_namespace=ET.register_namespace
6056 except AttributeError:
6057 def register_namespace(prefix,uri):
6058 ET._namespace_map[uri]=prefix
6059 register_namespace('vot',xmlns)
6060
6061 postable=ET.Element("{%s}TABLE"%(xmlns),attrib={'name':'Posterior Samples','utype':'lalinference:results:posteriorsamples'})
6062 i=0
6063 nstables=[resource for resource in nsresource.findall("./{%s}TABLE"%(xmlns)) if resource.get("utype")=="lalinference:results:nestedsamples"]
6064
6065 nstable=nstables[0]
6066 if Nlive is None:
6067 runstateResource = [resource for resource in nsresource.findall("./{%s}RESOURCE"%(xmlns)) if resource.get("utype")=="lalinference:state"][0]
6068 algTable = [table for table in runstateResource.findall("./{%s}TABLE"%(xmlns)) if table.get("utype")=="lalinference:state:algorithmparams"][0]
6069 Nlive = int ([param for param in algTable.findall("./{%s}PARAM"%(xmlns)) if param.get("name")=='Nlive'][0].get('value'))
6070 print('Found Nlive %i'%(Nlive))
6071 if Nlive is None:
6072 raise RuntimeError("Cannot find number of live points in XML table, please specify")
6073 logLcol = None
6074 for fieldnode in nstable.findall('./{%s}FIELD'%xmlns):
6075 if fieldnode.get('name') == 'logL':
6076 logLcol=i
6077 i=i+1
6078 postable.append(copy.deepcopy(fieldnode))
6079 for paramnode in nstable.findall('./{%s}PARAM'%(xmlns)):
6080 postable.append(copy.deepcopy(paramnode))
6081 if logLcol is None:
6082 RuntimeError("Unable to find logL column")
6083 posdataNode=ET.Element("{%s}DATA"%(xmlns))
6084 postabledataNode=ET.Element("{%s}TABLEDATA"%(xmlns))
6085 postable.append(posdataNode)
6086 posdataNode.append(postabledataNode)
6087 nstabledata=nstable.find('./{%s}DATA/{%s}TABLEDATA'%(xmlns,xmlns))
6088 logw=log(1.0 - exp(-1.0/float(Nlive)))
6089 weights=[]
6090 for row in nstabledata:
6091 logL=float(row[logLcol].text)
6092 weights.append(logL-logw)
6093 logw=logw-1.0/float(Nlive)
6094 mw=max(weights)
6095 weights = [w - mw for w in weights]
6096 for (row,weight) in zip(nstabledata,weights):
6097 if weight > log(random.random()):
6098 postabledataNode.append(copy.deepcopy(row))
6099 return postable
6100
6101xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
6102
6104 def __init__(self):
6105 self.html=htmlSection("VOTable information")
6107
6108 def start(self,tag,attrib):
6109 if tag=='{%s}TABLE'%(xmlns):
6110 if attrib['utype']=='lalinference:results:nestedsamples'\
6111 or attrib['utype']=='lalinference:results:posteriorsamples':
6112 self.skiptable=1
6113 else:
6114 self.skiptable=0
6116 self.tableouter.h2(attrib['name'])
6117 try:
6118 self.tableouter.p(attrib['utype'])
6119 except KeyError:
6120 pass
6121 self.fixedparams=htmlChunk('table',attrib={'class':'statstable'},parent=self.tableouter)
6122 self.table=htmlChunk('table',attrib={'class':'statstable'},parent=self.tableouter)
6123 self.tabheader=htmlChunk('tr',parent=self.table)
6124 if tag=='{%s}FIELD'%(xmlns):
6125 self.field=htmlChunk('th',{'name':attrib['name']},parent=self.tabheader)
6126 if tag=='{%s}TR'%(xmlns):
6127 self.tabrow=htmlChunk('tr',parent=self.table)
6128 if tag=='{%s}TD'%(xmlns):
6129 self.td=htmlChunk('td',parent=self.tabrow)
6130 if tag=='{%s}PARAM'%(xmlns):
6131 pnode=htmlChunk('tr',parent=self.fixedparams)
6132 namenode=htmlChunk('td',parent=pnode)
6133 namenode.p(attrib['name'])
6134 valnode=htmlChunk('td',parent=pnode)
6135 valnode.p(attrib['value'])
6136
6137 def end(self,tag):
6138 if tag=='{%s}TABLE'%(xmlns):
6139 if not self.skiptable:
6140 self.html.append(self.tableouter._html)
6141 if tag=='{%s}FIELD'%(xmlns):
6142 self.field.p(self.datadata)
6143 if tag=='{%s}TD'%(xmlns):
6144 self.td.p(self.datadata)
6145
6146 def data(self,data):
6147 self.datadata=data
6148
6149 def close(self):
6150 return self.html.toprettyxml()
6151
6152def _cl_width(cl_bound):
6153 """Returns (high - low), the width of the given confidence
6154 bounds."""
6155
6156 return cl_bound[1] - cl_bound[0]
6157
6158def _cl_count(cl_bound, samples):
6159 """Returns the number of samples within the given confidence
6160 bounds."""
6161
6162 return np.sum((samples >= cl_bound[0]) & (samples <= cl_bound[1]))
6163
6164def confidence_interval_uncertainty(cl, cl_bounds, posteriors):
6165 """Returns a tuple (relative_change, fractional_uncertainty,
6166 percentile_uncertainty) giving the uncertainty in confidence
6167 intervals from multiple posteriors.
6168
6169 The uncertainty in the confidence intervals is the difference in
6170 length between the widest interval, formed from the smallest to
6171 largest values among all the cl_bounds, and the narrowest
6172 interval, formed from the largest-small and smallest-large values
6173 among all the cl_bounds. Note that neither the smallest nor the
6174 largest confidence intervals necessarily correspond to one of the
6175 cl_bounds.
6176
6177 The relative change relates the confidence interval uncertainty to
6178 the expected value of the parameter, the fractional uncertainty
6179 relates this length to the length of the confidence level from the
6180 combined posteriors, and the percentile uncertainty gives the
6181 change in percentile over the combined posterior between the
6182 smallest and largest confidence intervals.
6183
6184 @param cl The confidence level (between 0 and 1).
6185
6186 @param cl_bounds A list of (low, high) pairs giving the confidence
6187 interval associated with each posterior.
6188
6189 @param posteriors A list of PosteriorOneDPDF objects giving the
6190 posteriors."""
6191
6192 Ns=[p.samples.shape[0] for p in posteriors]
6193 Nsamplers=len(Ns)
6194
6195 # Weight each sample within a run equally, and each run equally
6196 # with respect to the others
6197 all_samples = np.squeeze(np.concatenate([p.samples for p in posteriors], axis=0))
6198 weights = np.squeeze(np.concatenate([p.samples*0.0+1.0/(Nsamplers*N) for (N,p) in zip(Ns,posteriors)], axis=0))
6199
6200 isort=np.argsort(all_samples)
6201
6202 all_samples = all_samples[isort]
6203 weights = weights[isort]
6204
6205 param_mean = np.average(all_samples, weights=weights)
6206
6207 N=all_samples.shape[0]
6208
6209 alpha = (1.0 - cl)/2.0
6210
6211 wttotal = np.cumsum(weights)
6212 ilow = np.nonzero(wttotal >= alpha)[0][0]
6213 ihigh = np.nonzero(wttotal >= 1.0-alpha)[0][0]
6214
6215 all_cl_bound = (all_samples[ilow], all_samples[ihigh])
6216
6217 low_bounds = np.array([l for (l,h) in cl_bounds])
6218 high_bounds = np.array([h for (l,h) in cl_bounds])
6219
6220 largest_cl_bound = (np.min(low_bounds), np.max(high_bounds))
6221 smallest_cl_bound = (np.max(low_bounds), np.min(high_bounds))
6222
6223 if smallest_cl_bound[1] < smallest_cl_bound[0]:
6224 # Then the smallest CL is NULL
6225 smallest_cl_bound = (0.0, 0.0)
6226
6227 ci_uncertainty = _cl_width(largest_cl_bound) - _cl_width(smallest_cl_bound)
6228
6229 relative_change = ci_uncertainty/param_mean
6230
6231 frac_uncertainty = ci_uncertainty/_cl_width(all_cl_bound)
6232
6233 quant_uncertainty = float(_cl_count(largest_cl_bound, all_samples) - _cl_count(smallest_cl_bound, all_samples))/float(N)
6234
6235 return (relative_change, frac_uncertainty, quant_uncertainty)
6236
6237
6238def plot_waveform(pos=None,siminspiral=None,event=0,path=None,ifos=['H1','L1','V1']):
6239 #import sim inspiral table content handler
6240 from igwn_ligolw import lsctables,ligolw
6241 from lalsimulation.lalsimulation import SimInspiralChooseTDWaveform,SimInspiralChooseFDWaveform
6242 from lalsimulation.lalsimulation import SimInspiralImplementedTDApproximants,SimInspiralImplementedFDApproximants
6243 from lal.lal import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT
6244 from lal.lal import ComputeDetAMResponse, GreenwichMeanSiderealTime
6245 from lal.lal import LIGOTimeGPS
6246 from lal.lal import MSUN_SI as LAL_MSUN_SI
6247 from lal.lal import PC_SI as LAL_PC_SI
6248 import lalsimulation as lalsim
6249 from math import cos,sin,sqrt
6250 from igwn_ligolw import utils
6251 import os
6252 import numpy as np
6253 from numpy import arange
6254 if path is None:
6255 path=os.getcwd()
6256 if event is None:
6257 event=0
6258 colors_inj={'H1':'r','L1':'g','V1':'m','I1':'b','J1':'y'}
6259 colors_rec={'H1':'k','L1':'k','V1':'k','I1':'k','J1':'k'}
6260 # time and freq data handling variables
6261 srate=4096.0
6262 seglen=60.
6263 length=srate*seglen # lenght of 60 secs, hardcoded. May call a LALSimRoutine to get an idea
6264 deltaT=1/srate
6265 deltaF = 1.0 / (length* deltaT);
6266
6267 # build window for FFT
6268 pad=0.4
6269 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(int(length), 1 );
6270 window=CreateTukeyREAL8Window(int(length),2.0*pad*srate/length);
6271 WinNorm = sqrt(window.sumofsquares/window.data.length);
6272 # time and freq domain strain:
6273 segStart=100000000
6274 strainT=CreateREAL8TimeSeries("strainT",segStart,0.0,1.0/srate,DimensionlessUnit,int(length));
6275 strainF= CreateCOMPLEX16FrequencySeries("strainF",segStart, 0.0, deltaF, DimensionlessUnit,int(length/2. +1));
6276
6277 f_min=25 # hardcoded default (may be changed below)
6278 f_ref=100 # hardcoded default (may be changed below)
6279 f_max=srate/2.0
6280 plot_fmax=f_max
6281
6282 inj_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6283 rec_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6284
6285 inj_domain=None
6286 rec_domain=None
6287 font_size=26
6288 if siminspiral is not None:
6289 skip=0
6290 try:
6291 xmldoc = utils.load_filename(siminspiral)
6292 tbl = lsctables.SimInspiralTable.get_table(xmldoc)
6293 if event>0:
6294 tbl=tbl[event]
6295 else:
6296 tbl=tbl[0]
6297 except:
6298 e = sys.exc_info()[0]
6299 print(e)
6300 print("Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,siminspiral))
6301 skip=1
6302 if not skip:
6303 REAL8time=tbl.geocent_end_time+1e-9*tbl.geocent_end_time_ns
6304 GPStime=LIGOTimeGPS(REAL8time)
6305 M1=tbl.mass1
6306 M2=tbl.mass2
6307 D=tbl.distance
6308 m1=M1*LAL_MSUN_SI
6309 m2=M2*LAL_MSUN_SI
6310 phiRef=tbl.coa_phase
6311
6312 f_min = tbl.f_lower
6313 s1x = tbl.spin1x
6314 s1y = tbl.spin1y
6315 s1z = tbl.spin1z
6316 s2x = tbl.spin2x
6317 s2y = tbl.spin2y
6318 s2z = tbl.spin2z
6319
6320 r=D*LAL_PC_SI*1.0e6
6321 iota=tbl.inclination
6322 print("WARNING: Defaulting to inj_fref =100Hz to plot the injected WF. This is hardcoded since xml table does not carry this information\n")
6323
6324 lambda1=0
6325 lambda2=0
6326 wf=str(tbl.waveform)
6327
6328 injapproximant=lalsim.GetApproximantFromString(wf)
6329 amplitudeO=int(tbl.amp_order )
6330 phaseO=lalsim.GetOrderFromString(wf)
6331
6332 waveFlags=lal.CreateDict()
6333 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6334 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6335
6336 ra=tbl.longitude
6337 dec=tbl.latitude
6338 psi=tbl.polarization
6339
6340 if SimInspiralImplementedFDApproximants(injapproximant):
6341 inj_domain='F'
6342 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6343 0, 0, 0, # Non-circular binary parameters
6344 deltaF, f_min, f_max, f_ref,
6345 waveFlags, injapproximant)
6346 elif SimInspiralImplementedTDApproximants(injapproximant):
6347 inj_domain='T'
6348 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6349 0, 0, 0, # Non-circular binary parameters
6350 deltaT, f_min, f_ref,
6351 waveFlags, injapproximant)
6352 else:
6353 print("\nThe approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%injapproximant)
6354 return None
6355
6356 for ifo in ifos:
6357 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6358 if inj_domain=='T':
6359 # strain is a temporary container for this IFO strain.
6360 # Take antenna pattern into accout and window the data
6361 for k in np.arange(strainT.data.length):
6362 if k<plus.data.length:
6363 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6364 else:
6365 strainT.data.data[k]=0.0
6366 strainT.data.data[k]*=window.data.data[k]
6367 # now copy in the dictionary only the part of strain which is not null (that is achieved using plus.data.length as length)
6368 inj_strains[ifo]["T"]['strain']=np.array([strainT.data.data[k] for k in arange(plus.data.length)])
6369 inj_strains[ifo]["T"]['x']=np.array([REAL8time - deltaT*(plus.data.length-1-k) for k in np.arange(plus.data.length)])
6370
6371 # Take the FFT
6372 for j in arange(strainF.data.length):
6373 strainF.data.data[j]=0.0
6374 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6375 for j in arange(strainF.data.length):
6376 strainF.data.data[j]/=WinNorm
6377 # copy in the dictionary
6378 inj_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6379 inj_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6380 elif inj_domain=='F':
6381 for k in np.arange(strainF.data.length):
6382 if k<plus.data.length:
6383 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6384 else:
6385 strainF.data.data[k]=0.0
6386 # copy in the dictionary
6387 inj_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6388 inj_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6389 if pos is not None:
6390
6391 # Select the maxP sample
6392 _,which=pos._posMap()
6393
6394 if 'time' in pos.names:
6395 REAL8time=pos['time'].samples[which][0]
6396 elif 'time_maxl' in pos.names:
6397 REAL8time=pos['time_maxl'].samples[which][0]
6398 elif 'time_min' in pos.names and 'time_max' in pos.names:
6399 REAL8time=pos['time_min'].samples[which][0]+0.5*(pos['time_max'].samples[which][0]-pos['time_min'].samples[which][0])
6400 else:
6401 print("ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
6402 return None
6403
6404 # first check we have approx in posterior samples, otherwise skip
6405 skip=0
6406 try:
6407 approximant=int(pos['LAL_APPROXIMANT'].samples[which][0])
6408 amplitudeO=int(pos['LAL_AMPORDER'].samples[which][0])
6409 phaseO=int(pos['LAL_PNORDER'].samples[which][0])
6410 except:
6411 skip=1
6412 if skip==0:
6413 GPStime=LIGOTimeGPS(REAL8time)
6414
6415 q=pos['q'].samples[which][0]
6416 mc=pos['mc'].samples[which][0]
6417 M1,M2=q2ms(mc,q)
6418 if 'dist' in pos.names:
6419 D=pos['dist'].samples[which][0]
6420 elif 'distance' in pos.names:
6421 D=pos['distance'].samples[which][0]
6422 elif 'logdistance' in pos.names:
6423 D=np.exp(pos['distance'].samples[which][0])
6424
6425 m1=M1*LAL_MSUN_SI
6426 m2=M2*LAL_MSUN_SI
6427 if 'phi_orb' in pos.names:
6428 phiRef=pos['phi_orb'].samples[which][0]
6429 elif 'phase' in pos.names:
6430 phiRef=pos['phase'].samples[which][0]
6431 elif 'phase_maxl' in pos.names:
6432 phiRef=pos['phase_maxl'].samples[which][0]
6433 print('INFO: phi_orb not estimated, using maximum likelihood value')
6434 else:
6435 print('WARNING: phi_orb not found in posterior files. Defaulting to 0.0 which is probably *not* what you want\n')
6436 phiRef=0.0
6437
6438 try:
6439 for name in ['flow','f_lower']:
6440 if name in pos.names:
6441 f_min=pos[name].samples[which][0]
6442 except:
6443 pass
6444
6445 try:
6446 for name in ['fref','f_ref','f_Ref','fRef']:
6447 if name in pos.names:
6448 fname=name
6449
6450 Fref = np.unique(pos[fname].samples)
6451 if len(Fref) > 1:
6452 print("ERROR: Expected f_ref to be constant for all samples. Can't tell which value was injected! Defaulting to 100 Hz\n")
6453 print(Fref)
6454 else:
6455 f_ref = Fref[0]
6456 except ValueError:
6457 print("WARNING: Could not read fref from posterior file! Defaulting to 100 Hz\n")
6458
6459 try:
6460 a = pos['a1'].samples[which][0]
6461 the = pos['theta_spin1'].samples[which][0]
6462 phi = pos['phi_spin1'].samples[which][0]
6463 s1x = (a * sin(the) * cos(phi));
6464 s1y = (a * sin(the) * sin(phi));
6465 s1z = (a * cos(the));
6466 a = pos['a2'].samples[which][0]
6467 the = pos['theta_spin2'].samples[which][0]
6468 phi = pos['phi_spin2'].samples[which][0]
6469 s2x = (a * sin(the) * cos(phi));
6470 s2y = (a * sin(the) * sin(phi));
6471 s2z = (a * cos(the));
6472 iota=pos['inclination'].samples[which][0]
6473 except:
6474 try:
6475 iota, s1x, s1y, s1z, s2x, s2y, s2z=lalsim.SimInspiralTransformPrecessingNewInitialConditions(pos['theta_jn'].samples[which][0], pos['phi_JL'].samples[which][0], pos['tilt1'].samples[which][0], pos['tilt2'].samples[which][0], pos['phi12'].samples[which][0], pos['a1'].samples[which][0], pos['a2'].samples[which][0], m1, m2, f_ref, phiRef)
6476 except:
6477 if 'a1z' in pos.names:
6478 s1z=pos['a1z'].samples[which][0]
6479 elif 'a1' in pos.names:
6480 s1z=pos['a1'].samples[which][0]
6481 else:
6482 s1z=0
6483 if 'a2z' in pos.names:
6484 s2z=pos['a2z'].samples[which][0]
6485 elif 'a2' in pos.names:
6486 s2z=pos['a2'].samples[which][0]
6487 else:
6488 s2z=0
6489 s1x=s1y=s2x=s2y=0.0
6490 if 'inclination' in pos.names:
6491 iota=pos['inclination'].samples[which][0]
6492 else:
6493 iota=pos['theta_jn'].samples[which][0]
6494
6495 r=D*LAL_PC_SI*1.0e6
6496
6497 approximant=int(pos['LAL_APPROXIMANT'].samples[which][0])
6498 amplitudeO=int(pos['LAL_AMPORDER'].samples[which][0])
6499 phaseO=int(pos['LAL_PNORDER'].samples[which][0])
6500
6501 waveFlags=lal.CreateDict()
6502 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6503 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6504 if 'tideO' in pos.names:
6505 tidalO=int(pos['tideO'].samples[which][0])
6506 lalsim.SimInspiralWaveformParamsInsertPNTidalOrder(waveFlags, tidalO)
6507 if 'spinO' in pos.names:
6508 spinO=int(pos['spinO'].samples[which][0])
6509 lalsim.SimInspiralWaveformParamsInsertPNSpinOrder(waveFlags, spinO)
6510 if 'lambda1' in pos.names:
6511 lalsim.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, pos['lambda1'].samples[which][0])
6512 if 'lambda2' in pos.names:
6513 lalsim.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, pos['lambda2'].samples[which][0])
6514
6515 if SimInspiralImplementedFDApproximants(approximant):
6516 rec_domain='F'
6517 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6518 0, 0, 0, # Non-circular binary parameters
6519 deltaF, f_min, f_max, f_ref,
6520 waveFlags, approximant)
6521 elif SimInspiralImplementedTDApproximants(approximant):
6522 rec_domain='T'
6523 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6524 0, 0, 0, # Non-circular binary parameters
6525 deltaT, f_min, f_ref,
6526 waveFlags, approximant)
6527 else:
6528 print("The approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%approximant)
6529 return None
6530
6531 ra=pos['ra'].samples[which][0]
6532 dec=pos['dec'].samples[which][0]
6533 psi=pos['psi'].samples[which][0]
6534 fs={}
6535 for ifo in ifos:
6536 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6537 if rec_domain=='T':
6538 # strain is a temporary container for this IFO strain.
6539 # Take antenna pattern into accout and window the data
6540 for k in np.arange(strainT.data.length):
6541 if k<plus.data.length:
6542 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6543 else:
6544 strainT.data.data[k]=0.0
6545 strainT.data.data[k]*=window.data.data[k]
6546 # now copy in the dictionary only the part of strain which is not null (that is achieved using plus.data.length as length)
6547 rec_strains[ifo]["T"]['strain']=np.array([strainT.data.data[k] for k in arange(plus.data.length)])
6548 rec_strains[ifo]["T"]['x']=np.array([REAL8time - deltaT*(plus.data.length-1-k) for k in np.arange(plus.data.length)])
6549
6550 # Take the FFT
6551 for j in arange(strainF.data.length):
6552 strainF.data.data[j]=0.0
6553 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6554 for j in arange(strainF.data.length):
6555 strainF.data.data[j]/=WinNorm
6556 # copy in the dictionary
6557 rec_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6558 rec_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6559 elif rec_domain=='F':
6560 for k in np.arange(strainF.data.length):
6561 if k<plus.data.length:
6562 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6563 else:
6564 strainF.data.data[k]=0.0
6565 # copy in the dictionary
6566 rec_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6567 rec_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6568
6569 myfig=plt.figure(1,figsize=(23,15))
6570
6571 rows=len(ifos)
6572 cols=2
6573
6574 #this variables decide which domain will be plotted on the left column of the plot.
6575 # only plot Time domain if both injections and recovery are TD
6576 global_domain="F"
6577 if rec_domain is not None and inj_domain is not None:
6578 if rec_domain=="T" and inj_domain=="T":
6579 global_domain="T"
6580 elif rec_domain is not None:
6581 if rec_domain=="T":
6582 global_domain="T"
6583 elif inj_domain is not None:
6584 if inj_domain=="T":
6585 global_domain="T"
6586
6587 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=False,sharey=False)
6588 plt.setp(A,figwidth=23,figheight=15)
6589 for (r,i) in zip(np.arange(rows),ifos):
6590 for c in np.arange(cols):
6591 ax=axes[r]
6592 if type(ax)==np.ndarray:
6593 ax=ax[c]
6594 else:
6595 ax=axes[c]
6596 if rec_strains[i]["T"]['strain'] is not None or rec_strains[i]["F"]['strain'] is not None:
6597 if c==0:
6598 if global_domain=="T":
6599 ax.plot(rec_strains[i]["T"]['x'],rec_strains[i]["T"]['strain'],colors_rec[i],alpha=0.5,label='%s maP'%i)
6600 else:
6601 data=rec_strains[i]["F"]['strain']
6602 f=rec_strains[i]["F"]['x']
6603 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6604 ys=data
6605 ax.semilogx(f[mask],ys[mask].real,'.-',color=colors_rec[i],alpha=0.5,label='%s maP'%i)
6606 else:
6607 data=rec_strains[i]["F"]['strain']
6608 f=rec_strains[i]["F"]['x']
6609 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6610 ys=data
6611 ax.loglog(f[mask],abs(ys[mask]),'--',color=colors_rec[i],alpha=0.5,linewidth=4)
6612 ax.set_xlim([min(f[mask]),max(f[mask])])
6613 ax.grid(True,which='both')
6614 if inj_strains[i]["T"]['strain'] is not None or inj_strains[i]["F"]['strain'] is not None:
6615 if c==0:
6616 if global_domain=="T":
6617 ax.plot(inj_strains[i]["T"]['x'],inj_strains[i]["T"]['strain'],colors_inj[i],alpha=0.5,label='%s inj'%i)
6618 else:
6619 data=inj_strains[i]["F"]['strain']
6620 f=inj_strains[i]["F"]['x']
6621 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6622 ys=data
6623 ax.plot(f[mask],ys[mask].real,'.-',color=colors_inj[i],alpha=0.5,label='%s inj'%i)
6624 else:
6625 data=inj_strains[i]["F"]['strain']
6626 f=inj_strains[i]["F"]['x']
6627 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6628 ys=data
6629 ax.loglog(f[mask],abs(ys[mask]),'--',color=colors_inj[i],alpha=0.5,linewidth=4)
6630 ax.set_xlim([min(f[mask]),max(f[mask])])
6631 ax.grid(True,which='both')
6632
6633 if r==0:
6634 if c==0:
6635 if global_domain=="T":
6636 ax.set_title(r"$h(t)$",fontsize=font_size)
6637 else:
6638 ax.set_title(r"$\Re[h(f)]$",fontsize=font_size)
6639 else:
6640 ax.set_title(r"$|h(f)|$",fontsize=font_size)
6641 elif r==rows-1:
6642 if c==0:
6643 if global_domain=="T":
6644 ax.set_xlabel("time [s]",fontsize=font_size)
6645 else:
6646 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
6647 else:
6648 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
6649
6650 ax.legend(loc='best')
6651 ax.grid(True)
6652
6653 #ax.tight_layout()
6654 A.savefig(os.path.join(path,'WF_DetFrame.png'),bbox_inches='tight')
6655 return inj_strains,rec_strains
6656
6657
6658def plot_psd(psd_files,outpath=None,f_min=30.):
6659 myfig2=plt.figure(figsize=(15,15),dpi=500)
6660 ax=plt.subplot(1,1,1)
6661 colors={'H1':'r','L1':'g','V1':'m','I1':'k','J1':'y'}
6662
6663 if outpath is None:
6664 outpath=os.getcwd()
6665 tmp=[]
6666 for f in psd_files:
6667 if not os.path.isfile(f):
6668 print("PSD file %s has not been found and won't be plotted\n"%f)
6669 else:
6670 tmp.append(f)
6671 if tmp==[]:
6672 return None
6673 else:
6674 psd_files=tmp
6675
6676 freqs = {}
6677 for f in psd_files:
6678 data=np.loadtxt(f)
6679 freq=data[:,0]
6680 data=data[:,1]
6681 idx=f.find('-PSD.dat')
6682 ifo=f[idx-2:idx]
6683 freqs[ifo.lower()] = freq
6684 fr=[]
6685 da=[]
6686 for (f,d) in zip(freq,data):
6687 if f>f_min and d!=0.0 and np.isfinite(d):
6688 fr.append(f)
6689 da.append(d)
6690 plt.loglog(fr,da,colors[ifo],label=ifo,alpha=0.5,linewidth=3)
6691 plt.xlim([min(fr),max(fr)])
6692 plt.xlabel("Frequency [Hz]",fontsize=26)
6693 plt.ylabel("PSD",fontsize=26)
6694 plt.legend(loc='best')
6695 plt.grid(which='both')
6696 try:
6697 plt.tight_layout()
6698 myfig2.savefig(os.path.join(outpath,'PSD.png'),bbox_inches='tight')
6699 except:
6700 myfig2.savefig(os.path.join(outpath,'PSD.png'))
6701 myfig2.clf()
6702
6703 return freqs
6704
6705cred_level = lambda cl, x: np.sort(x, axis=0)[int(cl*len(x))]
6706
6707def cred_interval(x, cl=.9, lower=True):
6708 """Return location of lower or upper confidence levels
6709 Args:
6710 x: List of samples.
6711 cl: Confidence level to return the bound of.
6712 lower: If ``True``, return the lower bound, otherwise return the upper bound.
6713 """
6714 if lower:
6715 return cred_level((1.-cl)/2, x)
6716 else:
6717 return cred_level((1.+cl)/2, x)
6718
6719def spline_angle_xform(delta_psi):
6720 """Returns the angle in degrees corresponding to the spline
6721 calibration parameters delta_psi.
6722
6723 """
6724 rot = (2.0 + 1.0j*delta_psi)/(2.0 - 1.0j*delta_psi)
6725
6726 return 180.0/np.pi*np.arctan2(np.imag(rot), np.real(rot))
6727
6728def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None):
6729 """Plot calibration posterior estimates for a spline model in log space.
6730 Args:
6731 logf: The (log) location of spline control points.
6732 ys: List of posterior draws of function at control points ``logf``
6733 nx: Number of points to evaluate spline at for plotting.
6734 level: Credible level to fill in.
6735 color: Color to plot with.
6736 label: Label for plot.
6737 xform: Function to transform the spline into plotted values.
6738 """
6739 f = np.exp(logf)
6740 fs = np.linspace(f.min(), f.max(), nf)
6741
6742 data = np.zeros((ys.shape[0], nf))
6743
6744 if xform is None:
6745 zs = ys
6746 else:
6747 zs = xform(ys)
6748
6749 mu = np.mean(zs, axis=0)
6750 lower_cl = mu - cred_interval(zs, level, lower=True)
6751 upper_cl = cred_interval(zs, level, lower=False) - mu
6752 plt.errorbar(np.exp(logf), mu, yerr=[lower_cl, upper_cl], fmt='.', color=color, lw=4, alpha=0.5, capsize=0)
6753
6754 for i, samp in enumerate(ys):
6755 try:
6756 temp = interpolate.spline(logf, samp, np.log(fs))
6757 except AttributeError: # scipy < 0.19.0
6758 calSpline = interpolate.InterpolatedUnivariateSpline(logf, samp, k=3, ext=2) #cubic spline (k=3), raises ValueError in extrapolation
6759 temp = calSpline(np.log(fs))
6760 if xform is None:
6761 data[i] = temp
6762 else:
6763 data[i] = xform(temp)
6764
6765 line, = plt.plot(fs, np.mean(data, axis=0), color=color, label=label)
6766 color = line.get_color()
6767 plt.fill_between(fs, cred_interval(data, level), cred_interval(data, level, lower=False), color=color, alpha=.1, linewidth=0.1)
6768 plt.xlim(f.min()-.5, f.max()+50)
6769
6770def plot_calibration_pos(pos, level=.9, outpath=None):
6771 fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(15, 15), dpi=500)
6772
6773 font_size = 32
6774 if outpath is None:
6775 outpath=os.getcwd()
6776
6777 params = pos.names
6778 ifos = np.unique([param.split('_')[0] for param in params if 'spcal_freq' in param])
6779 for ifo in ifos:
6780 if ifo=='h1': color = 'r'
6781 elif ifo=='l1': color = 'g'
6782 elif ifo=='v1': color = 'm'
6783 else: color = 'c'
6784
6785 # Assume spline control frequencies are constant
6786 freq_params = np.sort([param for param in params if
6787 '{0}_spcal_freq'.format(ifo) in param])
6788
6789 logfreqs = np.log([pos[param].median for param in freq_params])
6790
6791 # Amplitude calibration model
6792 plt.sca(ax1)
6793 amp_params = np.sort([param for param in params if
6794 '{0}_spcal_amp'.format(ifo) in param])
6795 if len(amp_params) > 0:
6796 amp = 100*np.column_stack([pos[param].samples for param in amp_params])
6797 plot_spline_pos(logfreqs, amp, color=color, level=level, label="{0} (mean, {1}%)".format(ifo.upper(), int(level*100)))
6798
6799 # Phase calibration model
6800 plt.sca(ax2)
6801 phase_params = np.sort([param for param in params if
6802 '{0}_spcal_phase'.format(ifo) in param])
6803 if len(phase_params) > 0:
6804 phase = np.column_stack([pos[param].samples for param in phase_params])
6805
6806 plot_spline_pos(logfreqs, phase, color=color, level=level, label="{0} (mean, {1}%)".format(ifo.upper(), int(level*100)), xform=spline_angle_xform)
6807
6808 ax1.tick_params(labelsize=.75*font_size)
6809 ax2.tick_params(labelsize=.75*font_size)
6810 try:
6811 plt.legend(loc='upper right', prop={'size':.75*font_size}, framealpha=0.1)
6812 except:
6813 plt.legend(loc='upper right', prop={'size':.75*font_size})
6814 ax1.set_xscale('log')
6815 ax2.set_xscale('log')
6816
6817 ax2.set_xlabel('Frequency (Hz)', fontsize=font_size)
6818 ax1.set_ylabel('Amplitude (%)', fontsize=font_size)
6819 ax2.set_ylabel('Phase (deg)', fontsize=font_size)
6820
6821 outp = os.path.join(outpath, 'calibration.png')
6822 try:
6823 fig.tight_layout()
6824 fig.savefig(outp, bbox_inches='tight')
6825 except:
6826 fig.savefig(outp)
6827 plt.close(fig)
6828
6829
6830def plot_burst_waveform(pos=None,simburst=None,event=0,path=None,ifos=['H1','L1','V1']):
6831 from lalinference.lalinference import SimBurstChooseFDWaveform,SimBurstChooseTDWaveform
6832 from lalinference.lalinference import SimBurstImplementedFDApproximants,SimBurstImplementedTDApproximants
6833 from lal.lal import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT,CreateReverseREAL8FFTPlan
6834 from lal.lal import LIGOTimeGPS
6835 import lalinference as lalinf
6836 from lal import ComputeDetAMResponse, GreenwichMeanSiderealTime, LIGOTimeGPS
6837
6838 from math import cos,sin,sqrt
6839 from igwn_ligolw import lsctables
6840 from igwn_ligolw import utils
6841 import os
6842 import numpy as np
6843 from numpy import arange,real,absolute,fabs,pi
6844 from matplotlib import pyplot as plt
6845 if path is None:
6846 path=os.getcwd()
6847 if event is None:
6848 event=0
6849 colors_inj={'H1':'r','L1':'g','V1':'m','I1':'b','J1':'y'}
6850 colors_rec={'H1':'k','L1':'k','V1':'k','I1':'k','J1':'k'}
6851 #import sim inspiral table content handler
6852 from igwn_ligolw import ligolw
6853 from igwn_ligolw import table
6854 class LIGOLWContentHandlerExtractSimBurstTable(ligolw.LIGOLWContentHandler):
6855 def __init__(self,document):
6856 ligolw.LIGOLWContentHandler.__init__(self,document)
6857 self.tabname=lsctables.SimBurstTable.tableName
6858 self.intable=False
6859 self.tableElementName=''
6860 def startElement(self,name,attrs):
6861 if attrs.has_key('Name') and table.Table.TableName(attrs['Name'])==self.tabname:
6862 self.tableElementName=name
6863 # Got the right table, let's see if it's the right event
6864 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6865 self.intable=True
6866 elif self.intable: # We are in the correct table
6867 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6868 def endElement(self,name):
6869 if self.intable: ligolw.LIGOLWContentHandler.endElement(self,name)
6870 if self.intable and name==self.tableElementName: self.intable=False
6871
6872 # time and freq data handling variables
6873 srate=4096.0
6874 seglen=10.
6875 length=srate*seglen # lenght of 10 secs, hardcoded.
6876 deltaT=1/srate
6877 deltaF = 1.0 / (length* deltaT);
6878
6879 # build window for FFT
6880 pad=0.4
6881 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(int(length), 1 );
6882 freqToTimeFFTPlan = CreateReverseREAL8FFTPlan(int(length), 1 );
6883 window=CreateTukeyREAL8Window(int(length),2.0*pad*srate/length);
6884 # A random GPS time to initialize arrays. Epoch will be overwritten with sensible times further down
6885 segStart=939936910.000
6886 strainFinj= CreateCOMPLEX16FrequencySeries("strainF",segStart,0.0,deltaF,DimensionlessUnit,int(length/2. +1));
6887 strainTinj=CreateREAL8TimeSeries("strainT",segStart,0.0,1.0/srate,DimensionlessUnit,int(length));
6888 strainFrec= CreateCOMPLEX16FrequencySeries("strainF",segStart,0.0,deltaF,DimensionlessUnit,int(length/2. +1));
6889 strainTrec=CreateREAL8TimeSeries("strainT",segStart,0.0,1.0/srate,DimensionlessUnit,int(length));
6890 GlobREAL8time=None
6891 f_min=25 # hardcoded default (may be changed below)
6892 f_ref=100 # hardcoded default (may be changed below)
6893 f_max=srate/2.0
6894
6895 plot_fmax=2048
6896 plot_fmin=0.01
6897 plot_tmin=1e11
6898 plot_tmax=-1e11
6899
6900 inj_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6901 rec_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6902
6903 inj_domain=None
6904 rec_domain=None
6905 font_size=26
6906 if simburst is not None:
6907 skip=0
6908 try:
6909 xmldoc = utils.load_filename(simburst,contenthandler=LIGOLWContentHandlerExtractSimBurstTable)
6910 tbl = lsctables.SimBurstTable.get_table(xmldoc)
6911 if event>0:
6912 tbl=tbl[event]
6913 else:
6914 tbl=tbl[0]
6915 except:
6916 print("Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,simburst))
6917 skip=1
6918 if not skip:
6919 REAL8time=tbl.time_geocent_gps+1e-9*tbl.time_geocent_gps_ns
6920 GPStime=LIGOTimeGPS(REAL8time)
6921 GlobREAL8time=(REAL8time)
6922 strainTinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6923 strainFinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6924 f0=tbl.frequency
6925 q=tbl.q
6926 dur=tbl.duration
6927 hrss=tbl.hrss
6928 polar_e_angle=tbl.pol_ellipse_angle
6929 polar_e_ecc=tbl.pol_ellipse_e
6930
6931 BurstExtraParams=None
6932 wf=str(tbl.waveform)
6933
6934 injapproximant=lalinf.GetBurstApproximantFromString(wf)
6935 ra=tbl.ra
6936 dec=tbl.dec
6937 psi=tbl.psi
6938
6939 if SimBurstImplementedFDApproximants(injapproximant):
6940 inj_domain='F'
6941 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6942 elif SimBurstImplementedTDApproximants(injapproximant):
6943 inj_domain='T'
6944 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6945 else:
6946 print("\nThe approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%injapproximant)
6947 return None
6948
6949 for ifo in ifos:
6950 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6951 if inj_domain=='T':
6952 # bin of ref time as seen in strainT
6953 tCinstrain=np.floor(REAL8time-float(strainTinj.epoch))/deltaT
6954 # bin of strainT where we need to start copying the WF over
6955 #tSinstrain=floor(tCinstrain-float(plus.data.length)/2.)+1
6956 tSinstrain=int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT)
6957 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT-tSinstrain
6958 # strain is a temporary container for this IFO strain.
6959 # Zero until tSinstrain
6960 for k in np.arange(tSinstrain):
6961 strainTinj.data.data[k]=0.0
6962 # then copy plus/cross over
6963 for k in np.arange(plus.data.length):
6964 strainTinj.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6965 # Then zeros till the end (superfluous)
6966 for k in np.arange(strainTinj.data.length- (tSinstrain +plus.data.length)):
6967 strainTinj.data.data[k+tSinstrain+plus.data.length]=0.0
6968 for k in np.arange(strainTinj.data.length):
6969 strainTinj.data.data[k]*=window.data.data[k]
6970 np.savetxt('file.out',zip(np.array([strainTinj.epoch + j*deltaT for j in arange(strainTinj.data.length)]),np.array([strainTinj.data.data[j] for j in arange(strainTinj.data.length)])))
6971 # now copy in the dictionary
6972 inj_strains[ifo]["T"]['strain']=np.array([strainTinj.data.data[j] for j in arange(strainTinj.data.length)])
6973 inj_strains[ifo]["T"]['x']=np.array([strainTinj.epoch + j*deltaT for j in arange(strainTinj.data.length)])
6974 # Take the FFT
6975 for j in arange(strainFinj.data.length):
6976 strainFinj.data.data[j]=0.0
6977 REAL8TimeFreqFFT(strainFinj,strainTinj,timeToFreqFFTPlan);
6978 twopit=2.*np.pi*(rem*deltaT)
6979 for k in arange(strainFinj.data.length):
6980 re = cos(twopit*deltaF*k)
6981 im = -sin(twopit*deltaF*k)
6982 strainFinj.data.data[k]*= (re + 1j*im);
6983 # copy in the dictionary
6984 inj_strains[ifo]["F"]['strain']=np.array([strainFinj.data.data[k] for k in arange(int(strainFinj.data.length))])
6985 inj_strains[ifo]["F"]['x']=np.array([strainFinj.f0+ k*strainFinj.deltaF for k in arange(int(strainFinj.data.length))])
6986 elif inj_domain=='F':
6987 for k in np.arange(strainFinj.data.length):
6988 if k<plus.data.length:
6989 strainFinj.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6990 else:
6991 strainFinj.data.data[k]=0.0
6992 twopit=2.*np.pi*(REAL8time-float(strainFinj.epoch))
6993 for k in arange(strainFinj.data.length):
6994 re = cos(twopit*deltaF*k)
6995 im = -sin(twopit*deltaF*k)
6996 strainFinj.data.data[k]*= (re + 1j*im);
6997 # copy in the dictionary
6998 inj_strains[ifo]["F"]['strain']=np.array([strainFinj.data.data[k] for k in arange(int(strainFinj.data.length))])
6999 inj_strains[ifo]["F"]['x']=np.array([strainFinj.f0+ k*strainFinj.deltaF for k in arange(int(strainFinj.data.length))])
7000 #update xlimits for plot, go 6 sigmas left and right of f0
7001 # This should work for SineGaussians
7002 if f0 is not None and f0 is not np.nan:
7003 if q is not None and q is not np.nan:
7004 sigmaF=f0/q
7005 if f0-6.*sigmaF>plot_fmin:
7006 plot_fmin=f0-6.*sigmaF
7007 if f0+6.*sigmaF<plot_fmax:
7008 plot_fmax=f0+6.*sigmaF
7009 sigmaT=q/(2.*pi*f0)
7010 if REAL8time-6.*sigmaT<plot_tmin:
7011 plot_tmin=REAL8time-6.*sigmaT
7012 if REAL8time+6.*sigmaT>plot_tmax:
7013 plot_tmax=REAL8time+6.*sigmaT
7014 # Now handle gaussians. For gaussians f0 is nan (FD centered at f=0)
7015 if dur is not None and dur is not np.nan:
7016 sigmaF=1./sqrt(2.)/pi/dur
7017 if 0+6.*sigmaF<plot_fmax:
7018 plot_fmax=0+6.*sigmaF
7019 plot_fmin=0.0
7020 sigmaT=dur/sqrt(2.)
7021 if REAL8time-6.*sigmaT<plot_tmin:
7022 plot_tmin=REAL8time-6.*sigmaT
7023 if REAL8time+6.*sigmaT>plot_tmax:
7024 plot_tmax=REAL8time+6.*sigmaT
7025
7026
7027 if pos is not None:
7028
7029 # Select the maxP sample
7030 _,which=pos._posMap()
7031
7032 if 'time' in pos.names:
7033 REAL8time=pos['time'].samples[which][0]
7034 elif 'time_maxl' in pos.names:
7035 REAL8time=pos['time_maxl'].samples[which][0]
7036 elif 'time_mean' in pos.names:
7037 REAL8time=pos['time_mean'].samples[which][0]
7038 elif 'time_min' in pos.names and 'time_max' in pos.names:
7039 REAL8time=pos['time_min'].samples[which][0]+0.5*(pos['time_max'].samples[which][0]-pos['time_min'].samples[which][0])
7040 else:
7041 print("ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
7042 return None
7043
7044 # first check we have approx in posterior samples, otherwise skip
7045 skip=0
7046
7047 try:
7048 approximant=int(pos['LAL_APPROXIMANT'].samples[which][0])
7049 except:
7050 skip=1
7051 if skip==0:
7052 GPStime=LIGOTimeGPS(REAL8time)
7053 if GlobREAL8time is None:
7054 GlobREAL8time=REAL8time
7055 strainTrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7056 strainFrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7057 if "duration" in pos.names:
7058 dur=pos["duration"].samples[which][0]
7059 else:
7060 dur=np.nan
7061 if "quality" in pos.names:
7062 q=pos['quality'].samples[which][0]
7063 else:
7064 q=np.nan
7065 if 'frequency' in pos.names:
7066 f0=pos['frequency'].samples[which][0]
7067 else:
7068 f0=np.nan
7069 try:
7070 hrss=pos['hrss'].samples[which][0]
7071 except:
7072 hrss=np.exp(pos['loghrss'].samples[which][0])
7073 if np.isnan(q) and not np.isnan(dur):
7074 q=sqrt(2)*pi*dur
7075 alpha=None
7076 if 'alpha' in pos.names:
7077 alpha=pos['alpha'].samples[which][0]
7078 polar_e_angle=alpha
7079 polar_e_ecc=pos['polar_eccentricity'].samples[which][0]
7080 elif 'polar_ellipse_angle' in pos.names:
7081 polar_e_angle=pos['polar_ellipse_angle'].samples[which][0]
7082 polar_e_ecc=pos['polar_eccentricity'].samples[which][0]
7083
7084 BurstExtraParams=None
7085 #if alpha:
7086 # BurstExtraParams=lalsim.SimBurstCreateExtraParam("alpha",alpha)
7087
7088 if SimBurstImplementedFDApproximants(approximant):
7089 rec_domain='F'
7090 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7091 elif SimBurstImplementedTDApproximants(approximant):
7092 rec_domain='T'
7093 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7094 else:
7095 print("The approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%approximant)
7096 return None
7097 ra=pos['ra'].samples[which][0]
7098 dec=pos['dec'].samples[which][0]
7099 psi=pos['psi'].samples[which][0]
7100 fs={}
7101 for ifo in ifos:
7102 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
7103 if rec_domain=='T':
7104 # bin of ref time as seen in strainT
7105 tCinstrain=np.floor(REAL8time-float(strainTrec.epoch))/deltaT
7106 # bin of strainT where we need to start copying the WF over
7107 tSinstrain=int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT)
7108 #tSinstrain=floor(tCinstrain-float(plus.data.length)/2.)+1
7109 #reminder for fractions of bin, will be added back in the FD WF
7110 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT-tSinstrain
7111
7112 # strain is a temporary container for this IFO strain.
7113 # Zero until tSinstrain
7114 for k in np.arange(tSinstrain):
7115 strainTrec.data.data[k]=0.0
7116 # then copy plus/cross over
7117 for k in np.arange(plus.data.length):
7118 strainTrec.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7119 # Then zeros till the end (superfluous)
7120 for k in np.arange(strainTrec.data.length- (tSinstrain +plus.data.length)):
7121 strainTrec.data.data[k+tSinstrain+plus.data.length]=0.0
7122 for k in np.arange(strainTrec.data.length):
7123 strainTrec.data.data[k]*=window.data.data[k]
7124 # now copy in the dictionary
7125 rec_strains[ifo]["T"]['strain']=np.array([strainTrec.data.data[j] for j in arange(strainTrec.data.length)])
7126 rec_strains[ifo]["T"]['x']=np.array([strainTrec.epoch + j*deltaT for j in arange(strainTrec.data.length)])
7127 # Take the FFT
7128 for j in arange(strainFrec.data.length):
7129 strainFrec.data.data[j]=0.0
7130 REAL8TimeFreqFFT(strainFrec,strainTrec,timeToFreqFFTPlan);
7131 twopit=2.*np.pi*(rem*deltaT)
7132 for k in arange(strainFrec.data.length):
7133 re = cos(twopit*deltaF*k)
7134 im = -sin(twopit*deltaF*k)
7135 strainFrec.data.data[k]*= (re + 1j*im);
7136 # copy in the dictionary
7137 rec_strains[ifo]["F"]['strain']=np.array([strainFrec.data.data[k] for k in arange(int(strainFrec.data.length))])
7138 rec_strains[ifo]["F"]['x']=np.array([strainFrec.f0+ k*strainFrec.deltaF for k in arange(int(strainFrec.data.length))])
7139 elif rec_domain=='F':
7140 for k in np.arange(strainFrec.data.length):
7141 if k<plus.data.length:
7142 strainFrec.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7143 else:
7144 strainFrec.data.data[k]=0.0
7145 twopit=2.*np.pi*(REAL8time-float(strainFrec.epoch))
7146 for k in arange(strainFrec.data.length):
7147 re = cos(twopit*deltaF*k)
7148 im = -sin(twopit*deltaF*k)
7149 strainFrec.data.data[k]*= (re + 1j*im);
7150 # copy in the dictionary
7151 rec_strains[ifo]["F"]['strain']=np.array([strainFrec.data.data[k] for k in arange(int(strainFrec.data.length))])
7152 rec_strains[ifo]["F"]['x']=np.array([strainFrec.f0+ k*strainFrec.deltaF for k in arange(int(strainFrec.data.length))])
7153 #update xlimits for plot, go 6 sigmas left and right of f0
7154 # This should work for SineGaussians
7155 if f0 is not None and f0 is not np.nan:
7156 if q is not None and q is not np.nan:
7157 sigmaF=f0/q
7158 if f0-6.*sigmaF>plot_fmin:
7159 plot_fmin=f0-6.*sigmaF
7160 if f0+6.*sigmaF<plot_fmax:
7161 plot_fmax=f0+6.*sigmaF
7162 sigmaT=q/(2.*pi*f0)
7163 if REAL8time-6.*sigmaT<plot_tmin:
7164 plot_tmin=REAL8time-6.*sigmaT
7165 if REAL8time+6.*sigmaT>plot_tmax:
7166 plot_tmax=REAL8time+6.*sigmaT
7167 # Now handle gaussians. For gaussians f0 is nan (FD centered at f=0)
7168 if dur is not None and dur is not np.nan:
7169 sigmaF=1./sqrt(2.)/pi/dur
7170 if 0+6.*sigmaF<plot_fmax:
7171 plot_fmax=0+6.*sigmaF
7172 plot_fmin=0.0
7173 sigmaT=dur/sqrt(2.)
7174 if REAL8time-6.*sigmaT<plot_tmin:
7175 plot_tmin=REAL8time-6.*sigmaT
7176 if REAL8time+6.*sigmaT>plot_tmax:
7177 plot_tmax=REAL8time+6.*sigmaT
7178
7179 myfig=plt.figure(1,figsize=(10,7))
7180
7181 rows=len(ifos)
7182 cols=2
7183
7184 #this variables decide which domain will be plotted on the left column of the plot.
7185 # only plot Time domain if both injections and recovery are TD
7186 global_domain="F"
7187 if rec_domain is not None and inj_domain is not None:
7188 if rec_domain=="T" and inj_domain=="T":
7189 global_domain="T"
7190 elif rec_domain is not None:
7191 if rec_domain=="T":
7192 global_domain="T"
7193 elif inj_domain is not None:
7194 if inj_domain=="T":
7195 global_domain="T"
7196
7197 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=False,sharey=False)
7198 plt.setp(A,figwidth=10,figheight=7)
7199 for (r,i) in zip(np.arange(rows),ifos):
7200 for c in np.arange(cols):
7201 ax=axes[r]
7202 if type(ax)==np.ndarray:
7203 ax=ax[c]
7204 else:
7205 ax=axes[c]
7206 if rec_strains[i]["T"]['strain'] is not None or rec_strains[i]["F"]['strain'] is not None:
7207 if c==0:
7208 if global_domain=="T":
7209 ax.plot(rec_strains[i]["T"]['x'],rec_strains[i]["T"]['strain'],colors_rec[i],label='%s maP'%i,linewidth=5)
7210 ax.set_xlim([plot_tmin,plot_tmax])
7211 #ax.vlines(GlobREAL8time,0.9*min(rec_strains[i]["T"]['strain']),1.1*max(rec_strains[i]["T"]['strain']),'k')
7212 else:
7213 data=rec_strains[i]["F"]['strain']
7214 f=rec_strains[i]["F"]['x']
7215 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7216 ys=data
7217 ax.plot(f[mask],real(ys[mask]),'-',color=colors_rec[i],label='%s maP'%i,linewidth=5)
7218 ax.set_xlim([plot_fmin,plot_fmax])
7219 else:
7220 data=rec_strains[i]["F"]['strain']
7221 f=rec_strains[i]["F"]['x']
7222 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7223 ys=data
7224 ax.loglog(f[mask],absolute(ys[mask]),'--',color=colors_rec[i],linewidth=5)
7225 ax.grid(True,which='both')
7226 ax.set_xlim([plot_fmin,plot_fmax])
7227 if inj_strains[i]["T"]['strain'] is not None or inj_strains[i]["F"]['strain'] is not None:
7228 if c==0:
7229 if global_domain=="T":
7230 ax.plot(inj_strains[i]["T"]['x'],inj_strains[i]["T"]['strain'],colors_inj[i],label='%s inj'%i,linewidth=2)
7231 ax.set_xlim([plot_tmin,plot_tmax])
7232 else:
7233 data=inj_strains[i]["F"]['strain']
7234 f=inj_strains[i]["F"]['x']
7235 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7236 ys=data
7237 ax.plot(f[mask],real(ys[mask]),'-',color=colors_inj[i],label='%s inj'%i,linewidth=2)
7238 ax.set_xlim([plot_fmin,plot_fmax])
7239 else:
7240 data=inj_strains[i]["F"]['strain']
7241 f=inj_strains[i]["F"]['x']
7242 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7243 ys=data
7244 ax.loglog(f[mask],absolute(ys[mask]),'--',color=colors_inj[i],linewidth=2)
7245 ax.grid(True,which='both')
7246 ax.set_xlim([plot_fmin,plot_fmax])
7247 if r==0:
7248 if c==0:
7249 if global_domain=="T":
7250 ax.set_title(r"$h(t)$",fontsize=font_size)
7251 else:
7252 ax.set_title(r"$\Re[h(f)]$",fontsize=font_size)
7253 else:
7254 ax.set_title(r"$|h(f)|$",fontsize=font_size)
7255 elif r==rows-1:
7256 if c==0:
7257 if global_domain=="T":
7258 ax.set_xlabel("time [s]",fontsize=font_size)
7259 else:
7260 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
7261 else:
7262 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
7263
7264 ax.legend(loc='best')
7265 ax.grid(True)
7266
7267 #ax.tight_layout()
7268 A.savefig(os.path.join(path,'WF_DetFrame.png'),bbox_inches='tight')
7269 return inj_strains, rec_strains
7270
7271def make_1d_table(html,legend,label,pos,pars,noacf,GreedyRes,onepdfdir,sampsdir,savepdfs,greedy,analyticLikelihood,nDownsample):
7272
7273 from numpy import unique, sort
7274 global confidenceLevels
7275 confidence_levels=confidenceLevels
7276
7277 out={}
7278 if pars==[]:
7279 return out
7280 if set(pos.names)-set(pars)==set(pos.names):
7281 return out
7282
7283 #2D plots list
7284 tabid='onedmargtable_'+label.lower()
7285 html_ompdf=html.add_collapse_section('1D marginal posterior PDFs (%s)'%label,legend=legend,innertable_id=tabid)
7286 #Table matter
7287 if not noacf:
7288 html_ompdf_write= '<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th><th>Autocorrelation</th></tr>'%tabid
7289 else:
7290 html_ompdf_write= '<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>'%tabid
7291
7292 Nskip=0
7293 if 'chain' in pos.names:
7294 data,header=pos.samples()
7295 par_index=pos.names.index('cycle')
7296 chain_index=pos.names.index("chain")
7297 chains=unique(pos["chain"].samples)
7298 chainCycles = [sort(data[ data[:,chain_index] == chain, par_index ]) for chain in chains]
7299 chainNcycles = []
7300 chainNskips = []
7301 for cycles in chainCycles:
7302 if len(cycles) > 1:
7303 chainNcycles.append(cycles[-1] - cycles[0])
7304 chainNskips.append(cycles[1] - cycles[0])
7305 else:
7306 chainNcycles.append(1)
7307 chainNskips.append(1)
7308 elif 'cycle' in pos.names:
7309 cycles = sort(pos['cycle'].samples)
7310 if len(cycles) > 1:
7311 Ncycles = cycles[-1]-cycles[0]
7312 Nskip = cycles[1]-cycles[0]
7313 else:
7314 Ncycles = 1
7315 Nskip = 1
7316
7317 printed=0
7318 for par_name in pars:
7319 par_name=par_name.lower()
7320 try:
7321 pos[par_name.lower()]
7322 except KeyError:
7323 #print "No input chain for %s, skipping binning."%par_name
7324 continue
7325 try:
7326 par_bin=GreedyRes[par_name]
7327 except KeyError:
7328 print("Bin size is not set for %s, skipping binning."%par_name)
7329 continue
7330
7331 #print "Binning %s to determine confidence levels ..."%par_name
7332 binParams={par_name:par_bin}
7333 injection_area=None
7334 injection_area=None
7335 if greedy:
7336 if printed==0:
7337 print("Using greedy 1-d binning credible regions\n")
7338 printed=1
7339 toppoints,injectionconfidence,reses,injection_area,cl_intervals=greedy_bin_one_param(pos,binParams,confidence_levels)
7340 else:
7341 if printed==0:
7342 print("Using 2-step KDE 1-d credible regions\n")
7343 printed=1
7344 if pos[par_name].injval is None:
7345 injCoords=None
7346 else:
7347 injCoords=[pos[par_name].injval]
7348 _,reses,injstats=kdtree_bin2Step(pos,[par_name],confidence_levels,injCoords=injCoords)
7349 if injstats is not None:
7350 injectionconfidence=injstats[3]
7351 injection_area=injstats[4]
7352 #Generate 1D histogram/kde plots
7353 print("Generating 1D plot for %s."%par_name)
7354 out[par_name]=reses
7355 #Get analytic description if given
7356 pdf=cdf=None
7357 if analyticLikelihood:
7358 pdf = analyticLikelihood.pdf(par_name)
7359 cdf = analyticLikelihood.cdf(par_name)
7360
7361 oneDPDFParams={par_name:50}
7362 try:
7363 rbins,plotFig=plot_one_param_pdf(pos,oneDPDFParams,pdf,cdf,plotkde=False)
7364 except Exception as exc:
7365 warnings.warn(
7366 f"failed to produce plot for {par_name}: "
7367 f"[{type(exc).__name__}] {exc}",
7368 )
7369 continue
7370
7371 figname=par_name+'.png'
7372 oneDplotPath=os.path.join(onepdfdir,figname)
7373 plotFig.savefig(oneDplotPath)
7374 if(savepdfs): plotFig.savefig(os.path.join(onepdfdir,par_name+'.pdf'))
7375 plt.close(plotFig)
7376
7377 if rbins:
7378 print("r of injected value of %s (bins) = %f"%(par_name, rbins))
7379
7380 ##Produce plot of raw samples
7381 myfig=plt.figure(figsize=(4,3.5),dpi=200)
7382 pos_samps=pos[par_name].samples
7383 if not ("chain" in pos.names):
7384 # If there is not a parameter named "chain" in the
7385 # posterior, then just produce a plot of the samples.
7386 plt.plot(pos_samps,'k.', markersize=5, alpha=0.5, linewidth=0.0, figure=myfig)
7387 maxLen=len(pos_samps)
7388 else:
7389 # If there is a parameter named "chain", then produce a
7390 # plot of the various chains in different colors, with
7391 # smaller dots.
7392 data,header=pos.samples()
7393 par_index=pos.names.index(par_name)
7394 chain_index=pos.names.index("chain")
7395 chains=unique(pos["chain"].samples)
7396 chainData=[data[ data[:,chain_index] == chain, par_index ] for chain in chains]
7397 chainDataRanges=[range(len(cd)) for cd in chainData]
7398 maxLen=max([len(cd) for cd in chainData])
7399 for rng, data in zip(chainDataRanges, chainData):
7400 plt.plot(rng, data, marker='.', markersize=1, alpha=0.5, linewidth=0.0,figure=myfig)
7401 plt.title("Gelman-Rubin R = %g"%(pos.gelman_rubin(par_name)))
7402
7403 #dataPairs=[ [rng, data] for (rng,data) in zip(chainDataRanges, chainData)]
7404 #flattenedData=[ item for pair in dataPairs for item in pair ]
7405 #maxLen=max([len(data) for data in flattenedData])
7406 #plt.plot(array(flattenedData),marker=',',linewidth=0.0,figure=myfig)
7407
7408
7409 injpar=pos[par_name].injval
7410
7411 if injpar is not None:
7412 # Allow injection to be 5% outside the posterior plot
7413 minrange=min(pos_samps)-0.05*(max(pos_samps)-min(pos_samps))
7414 maxrange=max(pos_samps)+0.05*(max(pos_samps)-min(pos_samps))
7415 if minrange<injpar and maxrange>injpar:
7416 plt.axhline(injpar, color='r', linestyle='-.',linewidth=4)
7417 myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png')))
7418 if(savepdfs): myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.pdf')))
7419 plt.close(myfig)
7420 acfail=0
7421 if not (noacf):
7422 acffig=plt.figure(figsize=(4,3.5),dpi=200)
7423 if not ("chain" in pos.names):
7424 data=pos_samps[:,0]
7425 try:
7426 (Neff, acl, acf) = effectiveSampleSize(data, Nskip)
7427 lines=plt.plot(acf, 'k.', marker='.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7428 # Give ACL info if not already downsampled according to it
7429 if nDownsample is None:
7430 plt.title('Autocorrelation Function')
7431 elif 'cycle' in pos.names:
7432 last_color = lines[-1].get_color()
7433 plt.axvline(acl/Nskip, linestyle='-.', color=last_color)
7434 plt.title('ACL = %i N = %i'%(acl,Neff))
7435 acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.png')))
7436 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.pdf')))
7437 plt.close(acffig)
7438 except:
7439 # Ignore
7440 acfail=1
7441 pass
7442 else:
7443 try:
7444 acls = []
7445 Nsamps = 0.0;
7446 for rng, data, Nskip, Ncycles in zip(chainDataRanges, chainData, chainNskips, chainNcycles):
7447 (Neff, acl, acf) = effectiveSampleSize(data, Nskip)
7448 acls.append(acl)
7449 Nsamps += Neff
7450 lines=plt.plot(acf,'k.', marker='.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7451 # Give ACL info if not already downsampled according to it
7452 if nDownsample is not None:
7453 last_color = lines[-1].get_color()
7454 plt.axvline(acl/Nskip, linestyle='-.', color=last_color)
7455 if nDownsample is None:
7456 plt.title('Autocorrelation Function')
7457 else:
7458 plt.title('ACL = %i N = %i'%(max(acls),Nsamps))
7459 acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.png')))
7460 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.pdf')))
7461 plt.close(acffig)
7462 except:
7463 # Ignore
7464 acfail=1
7465 pass
7466
7467 if not noacf:
7468 if not acfail:
7469 acfhtml='<td width="30%"><img width="100%" src="1Dsamps/'+figname.replace('.png', '_acf.png')+'"/></td>'
7470 else:
7471 acfhtml='<td>ACF generation failed!</td>'
7472 html_ompdf_write+='<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td>'+acfhtml+'</tr>'
7473 else:
7474 html_ompdf_write+='<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>'
7475
7476 html_ompdf_write+='</table>'
7477 html_ompdf.write(html_ompdf_write)
7478
7479 return out
#define max(a, b)
Return analytic likelihood values.
def names(self)
Return list of parameter names described by analytic likelihood function.
def __init__(self, covariance_matrix_files, mean_vector_files)
Prepare analytic likelihood for the given parameters.
def cdf(self, param)
Return PDF function for parameter.
def pdf(self, param)
Return PDF function for parameter.
Data structure for a table of posterior samples .
def __init__(self, commonResultsFormatData, SimBurstTableEntry=None, injFref=None, SnglBurstList=None, name=None, description=None)
Constructor.
def __init__(self, accuracy='auto')
Dec tick locations with some intelligence.
def __init__(self, min=-pi_constant/2.0, max=pi_constant/2.0)
object to store the structure of a kd tree
def setSplit(self, dimension, value)
def setImportance(self, sampleNumber, volume)
def __init__(self, bounding_box, left_child=None, right_child=None)
def search(self, coordinates)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def objects(self)
Returns the objects in the tree.
def __init__(self, objects)
Construct a kD-tree from a sequence of objects.
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def volume(self)
Returns the volume of the bounding box of the tree.
def right(self)
Returns the right tree.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
A kD-tree suitable for splitting parameter spaces and counting hypervolumes.
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def fillNewTree(self, boxing=64, isArea=False)
copies tree structure, but with KDSkeleton as the new nodes.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def objects(self)
Returns the objects in the tree.
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
def right(self)
Returns the right tree.
def volume(self)
Returns the volume of the bounding box of the tree.
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def search(self, coordinates, boxing=64)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def __init__(self, objects, boundingbox, dims=0)
Construct a kD-tree from a sequence of objects.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
A parser for the output of Bayesian parameter estimation codes.
def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample)
def _common_to_pos(self, infile, info=[None, None])
def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs)
def _infmcmc_to_pos(self, files, outdir=None, deltaLogP=None, fixedBurnins=None, nDownsample=None, oldMassConvention=False, **kwargs)
def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False)
def parse(self, files, **kwargs)
Parse files.
def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _ns_to_pos(self, files, Nlive=None, Npost=None, posfilename='posterior_samples.dat')
Data structure for a table of posterior samples .
def stdevs(self)
Return dict {paramName:paramStandardDeviation} .
def maxP(self)
Return the maximum a posteriori probability and the corresponding set of parameters.
def set_triggers(self, triggers)
Set the trigger values of the parameters.
def __init__(self, commonResultsFormatData, SimInspiralTableEntry=None, inj_spin_frame='OrbitalL', injFref=100, SnglInspiralList=None, name=None, description=None)
Constructor.
def bySample(self)
Generate a forward iterator over the list of samples corresponding to the data stored within the Post...
def __len__(self)
Container method.
def __iter__(self)
Container method.
def extend_posterior(self)
Add some useful derived parameters (such as tilt angles, time delays, etc) in the Posterior object.
def delete_NaN_entries(self, param_list)
Remove samples containing NaN in request params.
def name(self)
Return qualified string containing the 'name' of the Posterior instance.
def gelman_rubin(self, pname)
Returns an approximation to the Gelman-Rubin statistic (see Gelman, A.
def di_evidence(self, boxing=64)
Returns the log of the direct-integration evidence for the posterior samples.
def __getitem__(self, key)
Container method .
def _average_posterior(self, samples, post_name)
def _inj_spins(self, inj, frame='OrbitalL')
def forward(self)
Generate a forward iterator (in sense of list of names) over Posterior with name,one_d_pos.
def triggers(self)
Return the trigger values .
def append(self, one_d_posterior)
Container method.
def description(self)
Return qualified string containing a 'description' of the Posterior instance.
def bootstrap(self)
Returns a new Posterior object that contains a bootstrap sample of self.
def means(self)
Return dict {paramName:paramMean} .
def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias=0)
def delete_samples_by_idx(self, samples)
Remove samples from all OneDPosteriors.
def maxL(self)
Return the maximum likelihood probability and the corresponding set of parameters.
def injection(self)
Return the injected values.
def healpix_map(self, resol, nest=True)
Returns a healpix map in the pixel ordering that represents the posterior density (per square degree)...
def _print_table_row(self, name, entries)
def _total_incl_restarts(self, samples)
def __str__(self)
Define a string representation of the Posterior class ; returns a html formatted table of various pro...
def DIC(self)
Returns the Deviance Information Criterion estimated from the posterior samples.
def write_to_file(self, fname)
Dump the posterior table to a file in the 'common format'.
def names(self)
Return list of parameter names.
def samples(self)
Return an (M,N) numpy.array of posterior samples; M = len(self); N = dim(self) .
def pop(self, param_name)
Container method.
def append_mapping(self, new_param_names, func, post_names)
Append posteriors pos1,pos2,...=func(post_names)
def elliptical_subregion_evidence(self)
Returns an approximation to the log(evidence) obtained by fitting an ellipse around the highest-poste...
def medians(self)
Return dict {paramName:paramMedian} .
def longest_chain_cycles(self)
Returns the number of cycles in the longest chain.
def dim(self)
Return number of parameters.
def set_injection(self, injection)
Set the injected values of the parameters.
def harmonic_mean_evidence(self)
Returns the log of the harmonic mean evidence for the set of posterior samples.
A data structure representing one parameter in a chain of posterior samples.
def name(self)
Return the string literal name of the parameter.
def median(self)
Return the median value for the marginal PDF on the parameter.
def delete_samples_by_idx(self, samples)
Remove samples from posterior, analagous to numpy.delete but opperates in place.
def injval(self)
Return the injected value set at construction .
def set_trigvals(self, new_trigvals)
Set the trigger values of the parameter.
def __len__(self)
Container method.
def __init__(self, name, posterior_samples, injected_value=None, injFref=None, trigger_values=None, prior=None)
Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
def stacc(self)
Return the 'standard accuracy statistic' (stacc) of the marginal posterior of the parameter.
def set_injval(self, new_injval)
Set the injected/real value of the parameter.
def trigvals(self)
Return the trigger values set at construction.
def mean(self)
Return the arithmetic mean for the marginal PDF on the parameter.
def __getitem__(self, idx)
Container method .
def prob_interval(self, intervals)
Evaluate probability intervals.
def KL(self)
Returns the KL divergence between the prior and the posterior.
def gaussian_kde(self)
Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
def stdev(self)
Return the standard deviation of the marginal PDF on the parameter.
def samples(self)
Return a 1D numpy.array of the samples.
A single parameter sample object, suitable for inclusion in a kD-tree.
def __getitem__(self, key)
Return the element with the corresponding name.
def coord(self)
Return the coordinates for the parameter sample.
def __init__(self, sample_array, headers, coord_names)
Given the sample array, headers for the values, and the names of the desired coordinates,...
def __init__(self, accuracy='auto')
RA tick locations with some intelligence.
def __init__(self, min=0.0, max=2.0 *pi_constant)
def start(self, tag, attrib)
A base class for representing web content using ElementTree .
def toprettyxml(self)
Return a pretty-printed XML string of the htmlPage.
def insert_row(self, tab, label=None)
Insert row in table tab.
def insert_td(self, row, td, label=None, legend=None)
Insert cell td into row row.
def __init__(self, tag, attrib=None, parent=None)
def a(self, url, linktext)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, table_id=None, start_closed=True)
A concrete class for generating an XHTML(1) document.
def add_section(self, section_name, legend=None)
def add_collapse_section(self, section_name, legend=None, innertable_id=None, start_closed=True)
Create a section embedded into a table that can be collapsed with a button.
def add_section_to_element(self, section_name, parent)
Create a section which is not appended to the body of html, but to the parent Element.
def __init__(self, title=None, css=None, javascript=None, toc=False)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, blank=False)
static const INT4 a
type
def plot_two_param_greedy_bins_hist(posterior, greedy2Params, confidence_levels)
Histograms of the ranked pixels produced by the 2-parameter greedy binning algorithm colured by their...
def cred_interval(x, cl=.9, lower=True)
Return location of lower or upper confidence levels Args: x: List of samples.
def histogram2D(posterior, greedy2Params, confidence_levels)
Returns a 2D histogram and edges for the two parameters passed in greedy2Params, plus the actual disc...
def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None)
Plot calibration posterior estimates for a spline model in log space.
def lambda_a(redshift, nonGR_alpha, lambda_eff, distance)
Converting from the effective wavelength-like parameter to lambda_A: lambda_A = lambda_{eff}*(D_alpha...
def spline_angle_xform(delta_psi)
Returns the angle in degrees corresponding to the spline calibration parameters delta_psi.
def q2eta(q)
Utility function for converting q to eta.
def plot_one_param_pdf(posterior, plot1DParams, analyticPDF=None, analyticCDF=None, plotkde=False)
Plots a 1D histogram and (gaussian) kernel density estimate of the distribution of posterior samples ...
def det_end_time(ifo_prefix, inj)
def autocorrelation(series)
Returns an estimate of the autocorrelation function of a given series.
def array_ang_sep(vec1, vec2)
Find angles between vectors in rows of numpy arrays.
def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref, phiref)
changes for testing Lorentz violations made till here
def plot_sky_map(hpmap, outdir, inj=None, nest=True)
Plots a sky map from a healpix map, optionally including an injected position.
def mc2q(mc, eta)
Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
def kdtree_bin2Step(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10, injCoords=None, alternate=False, fraction=0.5, skyCoords=False)
input: posterior class instance, list of confidence levels, optional choice of inital parameter space...
def plot_calibration_pos(pos, level=.9, outpath=None)
def orbital_momentum_mag(fref, m1, m2, eta)
def plot_burst_waveform(pos=None, simburst=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def plot_two_param_kde(posterior, plot2DkdeParams)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def make_1d_table(html, legend, label, pos, pars, noacf, GreedyRes, onepdfdir, sampsdir, savepdfs, greedy, analyticLikelihood, nDownsample)
def plot_label(param)
A lookup table for plot labels.
def effectiveSampleSize(samples, Nskip=1)
Compute the effective sample size, calculating the ACL using only the second half of the samples to a...
def random_split(items, fraction)
def plot_waveform(pos=None, siminspiral=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def getDecString(radians, accuracy='auto')
def plot_psd(psd_files, outpath=None, f_min=30.)
def autocorrelation_length_estimate(series, acf=None, M=5, K=2)
Attempts to find a self-consistent estimate of the autocorrelation length of a given series.
def contigious_interval_one_param(posterior, contInt1Params, confidence_levels)
Calculates the smallest contigious 1-parameter confidence interval for a set of given confidence leve...
def as_array(table)
Workaround for missing astropy.table.Table.as_array method, which was added in Astropy 1....
def addSample(tree, coordinates)
def cart2sph(x, y, z)
Utility function to convert cartesian coords to r,theta,phi.
def vo_nest2pos(nsresource, Nlive=None)
Parse a VO Table RESOURCE containing nested sampling output and return a VOTable TABLE element with p...
def find_ndownsample(samples, nDownsample)
Given a list of files, threshold value, and a desired number of outputs posterior samples,...
def skymap_inj_pvalue(hpmap, inj, nest=True)
Returns the greedy p-value estimate for the given injection.
def symm_tidal_params(lambda1, lambda2, q)
Calculate best tidal parameters [Eqs.
def kdtree_bin_sky_area(posterior, confidence_levels, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def getRAString(radians, accuracy='auto')
def kdtree_bin_sky_volume(posterior, confidence_levels)
def plot_one_param_pdf_kde(fig, onedpos)
def q2ms(mc, q)
Utility function for converting mchirp,q to component masses.
def skyArea(bounds)
functions used in 2stage kdtree
def amplitudeMeasure(redshift, nonGR_alpha, lambda_eff, distance)
Converting to Lorentz violating parameter "A" in dispersion relation from lambda_A: A = (lambda_A/h)^...
def plot_two_param_greedy_bins_contourf(posteriors_by_name, greedy2Params, confidence_levels, colors_by_name, figsize=(7, 6), dpi=120, figposition=[0.3, 0.3, 0.5, 0.5], legend='right', hatches_by_name=None)
def confidence_interval_uncertainty(cl, cl_bounds, posteriors)
Returns a tuple (relative_change, fractional_uncertainty, percentile_uncertainty) giving the uncertai...
def ang_dist(long1, lat1, long2, lat2)
Find the angular separation of (long1,lat1) and (long2,lat2), which are specified in radians.
def pol2cart(long, lat)
Utility function to convert longitude,latitude on a unit sphere to cartesian co-ordinates.
def kdtree_bin(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def plot_corner(posterior, levels, parnames=None)
Make a corner plot using the triangle module (See http://github.com/dfm/corner.py)
def sph2cart(r, theta, phi)
Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
def get_end(siminspiral)
def get_inj_by_time(injections, time)
Filter injections to find the injection with end time given by time +/- 0.1s.
def array_dot(vec1, vec2)
Calculate dot products between vectors in rows of numpy arrays.
def rotation_matrix(angle, direction)
Compute general rotation matrices for a given angles and direction vectors.
def greedy_bin_two_param(posterior, greedy2Params, confidence_levels)
Determine the 2-parameter Bayesian Confidence Intervals using a greedy binning algorithm.
def chi_precessing(m1, a1, tilt1, m2, a2, tilt2)
Calculate the magnitude of the effective precessing spin following convention from Phys.
def skymap_confidence_areas(hpmap, cls)
Returns the area (in square degrees) for each confidence level with a greedy binning algorithm for th...
def readCoincXML(xml_file, trignum)
def calculate_redshift(distance, h=0.6790, om=0.3065, ol=0.6935, w0=-1.0)
Calculate the redshift from the luminosity distance measurement using the Cosmology Calculator provid...
def replace_column(table, old, new)
Workaround for missing astropy.table.Table.replace_column method, which was added in Astropy 1....
def mc2ms(mc, eta)
Utility function for converting mchirp,eta to component masses.
def array_polar_ang(vec)
Find polar angles of vectors in rows of a numpy array.
def component_momentum(m, a, theta, phi)
Calculate BH angular momentum vector.
def ROTATEY(angle, vx, vy, vz)
def DistanceMeasure(redshift, nonGR_alpha)
D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha # from eq.15 of arxiv 1110.2720 D_alpha calculated from int...
def integrand_distance(redshift, nonGR_alpha)
Following functions added for testing Lorentz violations.
def plot_two_param_kde_greedy_levels(posteriors_by_name, plot2DkdeParams, levels, colors_by_name, line_styles=__default_line_styles, figsize=(4, 3), dpi=250, figposition=[0.2, 0.2, 0.48, 0.75], legend='right', hatches_by_name=None, Npixels=50)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def orbital_momentum(fref, m1, m2, inclination)
Calculate orbital angular momentum vector.
def ROTATEZ(angle, vx, vy, vz)
def source_mass(mass, redshift)
Calculate source mass parameter for mass m as: m_source = m / (1.0 + z) For a parameter m.
def greedy_bin_one_param(posterior, greedy1Param, confidence_levels)
Determine the 1-parameter Bayesian Confidence Interval using a greedy binning algorithm.
def spin_angles(fref, mc, eta, incl, a1, theta1, phi1, a2=None, theta2=None, phi2=None)
Calculate physical spin angles.
def bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12, quantity, fits)
Convenience functions.
Definition: nrutils.py:1311
def read_samples(filename, path=None, tablename=POSTERIOR_SAMPLES)
Read an HDF5 sample chain file.
Definition: hdf5.py:168
def draw_posterior_many(datas, Nlives, verbose=False)
Draw samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array datas,...
Definition: nest2pos.py:66
def draw_N_posterior_many(datas, Nlives, Npost, verbose=False)
Draw Npost samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array dat...
Definition: nest2pos.py:115