Loading [MathJax]/extensions/TeX/AMSsymbols.js
LALPulsar 7.1.1.1-b246709
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
testHeterodyneSearch_flat_evidence_default.py
Go to the documentation of this file.
1"""
2A script to run lalpulsar_parameter_estimation_nested with increasing amplitude prior
3ranges. For a range of h0 ranges the nested sampling will be run and the odds ratio extracted.
4It will calculate the value of the log(odds ratio)-log(prior) and check that it is roughly
5flat as the prior range increases.
6"""
7
8import os
9import sys
10import time
11import numpy as np
12import subprocess as sp
13import h5py
14
15if os.environ["LALINFERENCE_ENABLED"] == "false":
16 print("Skipping test: requires LALInference")
17 sys.exit(77)
18
19try:
20 import matplotlib as mpl
21
22 mpl.use("Agg")
23 import matplotlib.pyplot as pl
24
25 doplot = True
26except ModuleNotFoundError:
27 print("matplotlib unavailable; skipping plot")
28 doplot = False
29
30exit_code = 0
31
32execu = "./lalpulsar_parameter_estimation_nested" # executable
33
34# lalpulsar_parameter_estimation_nested runs much slower with memory debugging
35os.environ["LAL_DEBUG_LEVEL"] = os.environ["LAL_DEBUG_LEVEL"].replace("memdbg", "")
36print("Modified LAL_DEBUG_LEVEL='%s'" % os.environ["LAL_DEBUG_LEVEL"])
37
38# create files needed to run the code
39
40# par file
41parfile = """\
42PSRJ J0000+0000
43RAJ 00:00:00.0
44DECJ 00:00:00.0
45F0 100
46PEPOCH 54000"""
47
48parf = "test.par"
49f = open(parf, "w")
50f.write(parfile)
51f.close()
52
53# data file
54datafile = "data.txt.gz"
55dlen = 1440 # number of data points
56dt = 600 # number of seconds between each data point
57startgps = 900000000.0 # start GPS time
58endgps = startgps + dt * (dlen - 1)
59ds = np.zeros((dlen, 3))
60ds[:, 0] = np.linspace(startgps, endgps, dlen) # time stamps
61noisesigma = 1e-24
62ds[:, -2:] = noisesigma * np.random.randn(dlen, 2)
63
64ulest = 10.8 * np.sqrt(noisesigma**2 / dlen)
65print("Estimated upper limit is %.4e" % (ulest))
66
67# output data file
68np.savetxt(datafile, ds, fmt="%.12e")
69
70# range of upper limits on h0 in prior file
71h0uls = np.logspace(np.log10(5.0 * ulest), np.log10(500.0 * ulest), 6)
72
73# some default inputs
74dets = "H1"
75Nlive = 100
76Nmcmcinitial = 0
77outfile = "test.hdf"
78outfile_SNR = "test_SNR"
79outfile_Znoise = "test_Znoise"
80
81# test two different proposals - the default proposal (which is currently --ensembleWalk 3 --uniformprop 1)
82# against just using the ensemble walk proposal
83proposals = ["", "--ensembleWalk 1 --uniformprop 0"]
84labels = ["Default", "Walk"]
85pcolor = ["b", "r"]
86max_nsigma = [2.0, 3.0]
87
88for i, proplabel in enumerate(labels):
89 if __file__.endswith("_%s.py" % proplabel.lower()):
90 print(f"Running {__file__} with proposal={proplabel} extracted from filename")
91 proposals = proposals[i : i + 1]
92 labels = labels[i : i + 1]
93 pcolor = pcolor[i : i + 1]
94 max_nsigma = max_nsigma[i : i + 1]
95 break
96else:
97 print(f"Running {__file__} with full proposal list")
98
99Ntests = 15 # number of times to run nested sampling for each h0 value to get average
100
101if doplot:
102 fig, ax = pl.subplots(1, 1)
103
104walltime = time.time()
105
106for i, prop in enumerate(proposals):
107 odds_prior = []
108 std_odds_prior = []
109 logpriors = []
110
111 for h, h0ul in enumerate(h0uls):
112 # prior file
113 priorfile = "H0 uniform 0 %e\n" % h0ul
114
115 priorf = "test.prior"
116 f = open(priorf, "w")
117 f.write(priorfile)
118 f.close()
119
120 logprior = -np.log(h0ul * np.pi * 2.0 * (np.pi / 2.0))
121 logpriors.append(logprior)
122
123 hodds = []
124 # run Ntests times to get average
125 for j in range(Ntests):
126 elapsed_walltime = time.time() - walltime
127 print(
128 "--- proposal=%i/%i h0=%i/%i test=%i/%i elapsed=%0.1fs ---"
129 % (
130 i + 1,
131 len(proposals),
132 h + 1,
133 len(h0uls),
134 j + 1,
135 Ntests,
136 elapsed_walltime,
137 ),
138 flush=True,
139 )
140
141 # run code
142 commandline = (
143 "%s --detectors %s --par-file %s --input-files %s --outfile %s --prior-file %s --Nlive %d --Nmcmcinitial %d %s"
144 % (
145 execu,
146 dets,
147 parf,
148 datafile,
149 outfile,
150 priorf,
151 Nlive,
152 Nmcmcinitial,
153 prop,
154 )
155 )
156
157 sp.check_call(commandline, shell=True)
158
159 # get odds ratio
160 f = h5py.File(outfile, "r")
161 a = f["lalinference"]["lalinference_nest"]
162 hodds.append(a.attrs["log_bayes_factor"])
163 f.close()
164
165 # clean up per-run temporary files
166 for fs in (outfile, outfile_SNR, outfile_Znoise):
167 os.remove(fs)
168
169 odds_prior.append(np.mean(hodds) - logprior)
170 std_odds_prior.append(np.std(hodds))
171
172 # use reduced chi-squared value to test for "flatness"
173 ns = np.array(odds_prior)
174 p = np.sum((ns - np.mean(ns)) ** 2 / np.array(std_odds_prior) ** 2) / float(
175 len(h0uls)
176 )
177 stdchi = np.sqrt(2.0 * float(len(h0uls))) / float(
178 len(h0uls)
179 ) # standard deviation of chi-squared distribution
180 nsigma = np.abs(p - 1.0) / stdchi
181
182 print("Reduced chi-squared test for linear relation = %f" % (p))
183
184 if nsigma > max_nsigma[i]:
185 print(
186 "This is potentially significantly (%f sigma > %f max sigma) different from a flat line"
187 % (nsigma, max_nsigma[i])
188 )
189 exit_code = 1
190
191 # plot figure
192 if doplot:
193 ax.errorbar(
194 -np.array(logpriors),
195 odds_prior,
196 yerr=std_odds_prior,
197 fmt="o",
198 label=labels[i],
199 color=pcolor[i],
200 )
201 ax.set_xlabel("log(prior volume)")
202 ax.set_ylabel("log(odds ratio)-log(prior)")
203
204if doplot:
205 pl.legend()
206 fig.savefig("odds_prior.png")
207 print("Saved plot to 'odds_prior.png'")
208
209# clean up temporary files
210for fs in (priorf, parf, datafile):
211 os.remove(fs)
212
213sys.exit(exit_code)