Loading [MathJax]/extensions/TeX/AMSsymbols.js
LALPulsar 7.1.1.1-5e288d3
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
pulsarpputils.py
Go to the documentation of this file.
1# -*- coding: utf-8 -*-
2#
3# pulsarpputils.py
4#
5# Copyright 2012
6# Matthew Pitkin <matthew.pitkin@ligo.org>
7#
8#
9# This program is free software; you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation; either version 2 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22# MA 02110-1301, USA.
23
24# known pulsar analysis post-processing utilities
25
26# Many functions in this a taken from, or derived from equivalents available in
27# the PRESTO pulsar software package http://www.cv.nrao.edu/~sransom/presto/
28
29from __future__ import print_function, division
30
31import sys
32import math
33import cmath
34import os
35import numpy as np
36import struct
37import re
38import h5py
39
40try:
41 from scipy.integrate import trapezoid, cumulative_trapezoid
42except ImportError:
43 # FIXME: Remove this once we require scipy >=1.6.0.
44 from scipy.integrate import trapz as trapezoid, cumtrapz as cumulative_trapezoid
45
46from scipy.interpolate import interp1d
47
48try:
49 from scipy.special import logsumexp
50except ImportError: # scipy < 0.19.0
51 from scipy.misc import logsumexp
52
53# some common constants taken from psr_constants.py in PRESTO
54ARCSECTORAD = float("4.8481368110953599358991410235794797595635330237270e-6")
55RADTOARCSEC = float("206264.80624709635515647335733077861319665970087963")
56SECTORAD = float("7.2722052166430399038487115353692196393452995355905e-5")
57RADTOSEC = float("13750.987083139757010431557155385240879777313391975")
58RADTODEG = float("57.295779513082320876798154814105170332405472466564")
59DEGTORAD = float("1.7453292519943295769236907684886127134428718885417e-2")
60RADTOHRS = float("3.8197186342054880584532103209403446888270314977710")
61HRSTORAD = float("2.6179938779914943653855361527329190701643078328126e-1")
62PI = float("3.1415926535897932384626433832795028841971693993751")
63TWOPI = float("6.2831853071795864769252867665590057683943387987502")
64PIBYTWO = float("1.5707963267948966192313216916397514420985846996876")
65SECPERDAY = float("86400.0")
66SECPERJULYR = float("31557600.0")
67KMPERPC = float("3.0856776e13")
68KMPERKPC = float("3.0856776e16")
69Tsun = float("4.925490947e-6") # sec
70Msun = float("1.9891e30") # kg
71Mjup = float("1.8987e27") # kg
72Rsun = float("6.9551e8") # m
73Rearth = float("6.378e6") # m
74SOL = float("299792458.0") # m/s
75MSUN = float("1.989e+30") # kg
76G = float("6.673e-11") # m^3/s^2/kg
77C = SOL
78KPC = float("3.0856776e19") # kiloparsec in metres
79I38 = float("1e38") # moment of inertia kg m^2
80
81
82# some angle conversion functions taken from psr_utils.py in PRESTO
83def rad_to_dms(rad):
84 """
85 rad_to_dms(rad):
86 Convert radians to degrees, minutes, and seconds of arc.
87 """
88 if rad < 0.0:
89 sign = -1
90 else:
91 sign = 1
92 arc = RADTODEG * np.fmod(np.fabs(rad), math.pi)
93 d = int(arc)
94 arc = (arc - d) * 60.0
95 m = int(arc)
96 s = (arc - m) * 60.0
97 if sign == -1 and d == 0:
98 return (sign * d, sign * m, sign * s)
99 else:
100 return (sign * d, m, s)
101
102
103def dms_to_rad(deg, mins, sec):
104 """
105 dms_to_rad(deg, min, sec):
106 Convert degrees, minutes, and seconds of arc to radians.
107 """
108 if deg < 0.0:
109 sign = -1
110 elif deg == 0.0 and (mins < 0.0 or sec < 0.0):
111 sign = -1
112 else:
113 sign = 1
114 return (
115 sign
116 * ARCSECTORAD
117 * (60.0 * (60.0 * np.fabs(deg) + np.fabs(mins)) + np.fabs(sec))
118 )
119
120
121def dms_to_deg(deg, mins, sec):
122 """
123 dms_to_deg(deg, min, sec):
124 Convert degrees, minutes, and seconds of arc to degrees.
125 """
126 return RADTODEG * dms_to_rad(deg, mins, sec)
127
128
129def rad_to_hms(rad):
130 """
131 rad_to_hms(rad):
132 Convert radians to hours, minutes, and seconds of arc.
133 """
134 rad = np.fmod(rad, 2.0 * math.pi)
135 if rad < 0.0:
136 rad = rad + 2.0 * math.pi
137 arc = RADTOHRS * rad
138 h = int(arc)
139 arc = (arc - h) * 60.0
140 m = int(arc)
141 s = (arc - m) * 60.0
142 return (h, m, s)
143
144
145def hms_to_rad(hour, mins, sec):
146 """
147 hms_to_rad(hour, min, sec):
148 Convert hours, minutes, and seconds of arc to radians
149 """
150 if hour < 0.0:
151 sign = -1
152 else:
153 sign = 1
154 return (
155 sign * SECTORAD * (60.0 * (60.0 * np.fabs(hour) + np.fabs(mins)) + np.fabs(sec))
156 )
157
158
159def coord_to_string(h_or_d, m, s):
160 """
161 coord_to_string(h_or_d, m, s):
162 Return a formatted string of RA or DEC values as
163 'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC.
164 """
165 retstr = ""
166 if h_or_d < 0:
167 retstr = "-"
168 elif abs(h_or_d) == 0:
169 if (m < 0.0) or (s < 0.0):
170 retstr = "-"
171
172 h_or_d, m, s = abs(h_or_d), abs(m), abs(s)
173 if s >= 9.9995:
174 return retstr + "%.2d:%.2d:%.4f" % (h_or_d, m, s)
175 else:
176 return retstr + "%.2d:%.2d:0%.4f" % (h_or_d, m, s)
177
178
179def rad_to_string(rad, ra_or_dec):
180 """
181 rad_to_string(rad, ra_or_dec):
182 Convert an angle in radians to hours/degrees, minutes seconds and output
183 it as a string in the format 'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC.
184 Whether to use hours or degrees is set by whether ra_or_dec is 'RA' or 'DEC'
185 """
186 if ra_or_dec.upper() == "RA":
187 v, m, s = rad_to_hms(rad)
188 elif ra_or_dec.upper() == "DEC":
189 v, m, s = rad_to_dms(rad)
190 else:
191 raise ValueError(
192 "Unrecognised option: Expected 'ra_or_dec' to be 'RA' or 'DEC'"
193 )
194
195 return coord_to_string(v, m, s)
196
197
198def ra_to_rad(ra_string):
199 """
200 ra_to_rad(ar_string):
201 Given a string containing RA information as
202 'hh:mm:ss.ssss', return the equivalent decimal
203 radians. Also deal with cases where input
204 string is just hh:mm, or hh.
205 """
206 hms = ra_string.split(":")
207 if len(hms) == 3:
208 return hms_to_rad(int(hms[0]), int(hms[1]), float(hms[2]))
209 elif len(hms) == 2:
210 return hms_to_rad(int(hms[0]), int(hms[1]), 0.0)
211 elif len(hms) == 1:
212 return hms_to_rad(float(hms[0]), 0.0, 0.0)
213 else:
214 print("Problem parsing RA string %s" % ra_string, file=sys.stderr)
215 sys.exit(1)
216
217
218def dec_to_rad(dec_string):
219 """
220 dec_to_rad(dec_string):
221 Given a string containing DEC information as
222 'dd:mm:ss.ssss', return the equivalent decimal
223 radians. Also deal with cases where input string
224 is just dd:mm or dd
225 """
226 dms = dec_string.split(":")
227 if "-" in dms[0] and float(dms[0]) == 0.0:
228 m = "-"
229 else:
230 m = ""
231
232 if len(dms) == 3:
233 return dms_to_rad(int(dms[0]), int(m + dms[1]), float(m + dms[2]))
234 elif len(dms) == 2:
235 return dms_to_rad(int(dms[0]), int(m + dms[1]), 0.0)
236 elif len(dms) == 1:
237 return dms_to_rad(float(dms[0]), 0.0, 0.0)
238 else:
239 print("Problem parsing DEC string %s" % dec_string, file=sys.stderr)
240 sys.exit(1)
241
242
243def p_to_f(p, pd, pdd=None):
244 """
245 p_to_f(p, pd, pdd=None):
246 Convert period, period derivative and period second
247 derivative to the equivalent frequency counterparts.
248 Will also convert from f to p.
249 """
250 f = 1.0 / p
251 fd = -pd / (p * p)
252 if pdd == None:
253 return [f, fd]
254 else:
255 if pdd == 0.0:
256 fdd = 0.0
257 else:
258 fdd = 2.0 * pd * pd / (p**3.0) - pdd / (p * p)
259
260 return [f, fd, fdd]
261
262
263def pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
264 """
265 pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
266 Calculate the period or frequency errors and
267 the pdot or fdot errors from the opposite one.
268 """
269 if pdorfd == None:
270 return [1.0 / porf, porferr / porf**2.0]
271 else:
272 forperr = porferr / porf**2.0
273 fdorpderr = np.sqrt(
274 (4.0 * pdorfd**2.0 * porferr**2.0) / porf**6.0
275 + pdorfderr**2.0 / porf**4.0
276 )
277 [forp, fdorpd] = p_to_f(porf, pdorfd)
278
279 return [forp, forperr, fdorpd, fdorpderr]
280
281
282# class to read in a pulsar par file - this is heavily based on the function
283# in parfile.py in PRESTO
284float_keys = [
285 "F",
286 "F0",
287 "F1",
288 "F2",
289 "F3",
290 "F4",
291 "F5",
292 "F6",
293 "F7",
294 "F8",
295 "F9",
296 "F10",
297 "F11",
298 "F12",
299 "PEPOCH",
300 "POSEPOCH",
301 "DM",
302 "START",
303 "FINISH",
304 "NTOA",
305 "TRES",
306 "TZRMJD",
307 "TZRFRQ",
308 "TZRSITE",
309 "NITS",
310 "A1",
311 "XDOT",
312 "E",
313 "ECC",
314 "EDOT",
315 "T0",
316 "PB",
317 "PBDOT",
318 "OM",
319 "OMDOT",
320 "EPS1",
321 "EPS2",
322 "EPS1DOT",
323 "EPS2DOT",
324 "TASC",
325 "LAMBDAPIN",
326 "BETA",
327 "RA_RAD",
328 "DEC_RAD",
329 "GAMMA",
330 "SINI",
331 "M2",
332 "MTOT",
333 "FB0",
334 "FB1",
335 "FB2",
336 "ELAT",
337 "ELONG",
338 "PMRA",
339 "PMDEC",
340 "DIST",
341 "PB_2",
342 "PB_3",
343 "T0_2",
344 "T0_3",
345 "A1_2",
346 "A1_3",
347 "OM_2",
348 "OM_3",
349 "ECC_2",
350 "ECC_3",
351 "PX",
352 "KIN",
353 "KOM",
354 "A0",
355 "B0",
356 "D_AOP",
357 # GW PARAMETERS
358 "H0",
359 "COSIOTA",
360 "PSI",
361 "PHI0",
362 "THETA",
363 "I21",
364 "I31",
365 "C22",
366 "HPLUS",
367 "HCROSS",
368 "C21",
369 "PHI22",
370 "PHI21",
371 "SNR",
372 "COSTHETA",
373 "IOTA",
374 "Q22",
375]
376str_keys = [
377 "FILE",
378 "PSR",
379 "PSRJ",
380 "NAME",
381 "RAJ",
382 "DECJ",
383 "RA",
384 "DEC",
385 "EPHEM",
386 "CLK",
387 "BINARY",
388 "UNITS",
389]
390
391
393 def __init__(self, parfilenm):
394 """
395 This class parses a TEMPO(2)-style pulsar parameter file. If possible all parameters
396 are converted into SI units and angles are in radians. Epochs will be converted from
397 MJD values into GPS times.
398 """
399 self.FILE = parfilenm
400 pf = open(parfilenm)
401 for line in pf.readlines():
402 # ignore empty lines (i.e. containing only whitespace)
403 if not line.strip():
404 continue
405
406 # Convert any 'D-' or 'D+' to 'E-' or 'E+'
407 line = line.replace("D-", "E-")
408 line = line.replace("D+", "E+")
409 # also check for lower case
410 line = line.replace("d-", "e-")
411 line = line.replace("d+", "e+")
412 splitline = line.split()
413
414 # get all upper case version in case lower case in par file
415 key = splitline[0].upper()
416
417 if key in str_keys:
418 setattr(self, key, splitline[1])
419 elif key in float_keys:
420 try:
421 setattr(self, key, float(splitline[1]))
422 except:
423 continue
424
425 if (
426 len(splitline) == 3
427 ): # Some parfiles don't have flags, but do have errors
428 if splitline[2] not in ["0", "1"]:
429 setattr(self, key + "_ERR", float(splitline[2]))
430 setattr(self, key + "_FIT", 0) # parameter was not fit
431
432 if len(splitline) == 4:
433 if splitline[2] == "1": # parameter was fit
434 setattr(self, key + "_FIT", 1)
435 else:
436 setattr(self, key + "_FIT", 0)
437 setattr(self, key + "_ERR", float(splitline[3]))
438
439 # sky position
440 if hasattr(self, "RAJ"):
441 setattr(self, "RA_RAD", ra_to_rad(self.RAJ))
442
443 # set RA error in rads (rather than secs)
444 if hasattr(self, "RAJ_ERR"):
445 setattr(self, "RA_RAD_ERR", hms_to_rad(0, 0, self.RAJ_ERR))
446
447 if hasattr(self, "RAJ_FIT"):
448 setattr(self, "RA_RAD_FIT", self["RAJ_FIT"])
449 if hasattr(self, "DECJ"):
450 setattr(self, "DEC_RAD", dec_to_rad(self.DECJ))
451
452 # set DEC error in rads (rather than arcsecs)
453 if hasattr(self, "DECJ_ERR"):
454 setattr(self, "DEC_RAD_ERR", dms_to_rad(0, 0, self.DECJ_ERR))
455
456 if hasattr(self, "DECJ_FIT"):
457 setattr(self, "DEC_RAD_FIT", self["DECJ_FIT"])
458
459 # convert proper motions to rads/sec from mas/year
460 for pv in ["RA", "DEC"]:
461 if hasattr(self, "PM" + pv):
462 pmv = self["PM" + pv]
463 setattr(self, "PM" + pv + "_ORIGINAL", pmv) # save original value
464 setattr(
465 self, "PM" + pv, pmv * np.pi / (180.0 * 3600.0e3 * 365.25 * 86400.0)
466 )
467
468 if hasattr(self, "PM" + pv + "_ERR"):
469 pmve = self["PM" + pv + "_ERR"]
470 setattr(
471 self, "PM" + pv + "_ERR_ORIGINAL", pmv
472 ) # save original value
473 setattr(
474 self,
475 "PM" + pv + "_ERR",
476 pmve * np.pi / (180.0 * 3600.0e3 * 365.25 * 86400.0),
477 )
478
479 # periods and frequencies
480 if hasattr(self, "P"):
481 setattr(self, "P0", self.P)
482 if hasattr(self, "P0"):
483 setattr(self, "F0", 1.0 / self.P0)
484 if hasattr(self, "F0"):
485 setattr(self, "P0", 1.0 / self.F0)
486 if hasattr(self, "FB0"):
487 setattr(self, "PB", (1.0 / self.FB0) / 86400.0)
488 if hasattr(self, "P0_ERR"):
489 if hasattr(self, "P1_ERR"):
490 f, ferr, fd, fderr = pferrs(self.P0, self.P0_ERR, self.P1, self.P1_ERR)
491 setattr(self, "F0_ERR", ferr)
492 setattr(self, "F1", fd)
493 setattr(self, "F1_ERR", fderr)
494 else:
495 if hasattr(self, "P1"):
496 (
497 f,
498 fd,
499 ) = p_to_f(self.P0, self.P1)
500 setattr(self, "F0_ERR", self.P0_ERR / (self.P0 * self.P0))
501 setattr(self, "F1", fd)
502 if hasattr(self, "F0_ERR"):
503 if hasattr(self, "F1_ERR"):
504 p, perr, pd, pderr = pferrs(self.F0, self.F0_ERR, self.F1, self.F1_ERR)
505 setattr(self, "P0_ERR", perr)
506 setattr(self, "P1", pd)
507 setattr(self, "P1_ERR", pderr)
508 else:
509 if hasattr(self, "F1"):
510 (
511 p,
512 pd,
513 ) = p_to_f(self.F0, self.F1)
514 setattr(self, "P0_ERR", self.F0_ERR / (self.F0 * self.F0))
515 setattr(self, "P1", pd)
516
517 # convert epochs (including binary epochs) to GPS if possible
518 try:
519 from lalpulsar import TTMJDtoGPS
520
521 for epoch in [
522 "PEPOCH",
523 "POSEPOCH",
524 "DMEPOCH",
525 "T0",
526 "TASC",
527 "T0_2",
528 "T0_3",
529 ]:
530 if hasattr(self, epoch):
531 setattr(
532 self, epoch + "_ORIGINAL", self[epoch]
533 ) # save original value
534 setattr(self, epoch, TTMJDtoGPS(self[epoch]))
535
536 if hasattr(
537 self, epoch + "_ERR"
538 ): # convert errors from days to seconds
539 setattr(
540 self, epoch + "_ERR_ORIGINAL", self[epoch + "_ERR"]
541 ) # save original value
542 setattr(self, epoch + "_ERR", self[epoch + "_ERR"] * SECPERDAY)
543 except:
544 print(
545 "Could not convert epochs to GPS times. They are all still MJD values.",
546 file=sys.stderr,
547 )
548
549 # distance and parallax (distance: kpc -> metres, parallax: mas -> rads)
550 convfacs = {"DIST": KPC, "PX": 1e-3 * ARCSECTORAD}
551 for item in convfacs:
552 if hasattr(self, item): # convert kpc to metres
553 setattr(self, item + "_ORIGINAL", self[item]) # save original value
554 setattr(self, item, self[item] * convfacs[item])
555
556 if hasattr(self, item + "_ERR"):
557 setattr(
558 self, item + "_ERR_ORIGINAL", self[item + "_ERR"]
559 ) # save original value
560 setattr(self, item + "_ERR", self[item + "_ERR"] * convfacs[item])
561
562 # binary parameters
563 # omega (angle of periastron) parameters (or others requiring conversion from degs to rads)
564 for om in ["OM", "OM_2", "OM_3", "KIN", "KOM"]:
565 if hasattr(self, om): # convert OM from degs to rads
566 setattr(self, om, self[om] / RADTODEG)
567
568 if hasattr(self, om + "_ERR"):
569 setattr(self, om + "_ERR", self[om + "_ERR"] / RADTODEG)
570
571 # period
572 for pb in ["PB", "PB_2", "PB_3"]:
573 if hasattr(self, pb): # convert PB from days to seconds
574 setattr(self, pb + "_ORIGINAL", self[pb]) # save original value
575 setattr(self, pb, self[pb] * SECPERDAY)
576
577 if hasattr(self, pb + "_ERR"):
578 setattr(
579 self, pb + "_ERR_ORIGINAL", self[pb + "_ERR"]
580 ) # save original value
581 setattr(self, pb + "_ERR", self[pb + "_ERR"] * SECPERDAY)
582
583 # OMDOT
584 if hasattr(self, "OMDOT"): # convert from deg/year to rad/sec
585 setattr(self, "OMDOT_ORIGINAL", self["OMDOT"]) # save original value
586 setattr(self, "OMDOT", self["OMDOT"] / (RADTODEG * 365.25 * SECPERDAY))
587
588 if hasattr(self, "OMDOT_ERR"):
589 setattr(
590 self, "OMDOT_ERR_ORIGINAL", self["OMDOT_ERR"]
591 ) # save original value
592 setattr(
593 self,
594 "OMDOT_ERR",
595 self["OMDOT_ERR"] / (RADTODEG * 365.25 * SECPERDAY),
596 )
597
598 if hasattr(self, "EPS1") and hasattr(self, "EPS2"):
599 ecc = math.sqrt(self.EPS1 * self.EPS1 + self.EPS2 * self.EPS2)
600 omega = math.atan2(self.EPS1, self.EPS2)
601 setattr(self, "E", ecc)
602 setattr(self, "OM", omega) # omega in rads
603 setattr(self, "T0", self.TASC + self.PB * omega / TWOPI)
604 if (
605 hasattr(self, "PB")
606 and hasattr(self, "A1")
607 and not (hasattr(self, "E") or hasattr(self, "ECC"))
608 ):
609 setattr(self, "E", 0.0)
610 if (
611 hasattr(self, "T0")
612 and not hasattr(self, "TASC")
613 and hasattr(self, "OM")
614 and hasattr(self, "PB")
615 ):
616 setattr(self, "TASC", self.T0 - self.PB * self.OM / TWOPI)
617
618 # binary unit conversion for small numbers (TEMPO2 checks if these are > 1e-7 and if so then the units are in 1e-12) - errors are not converted
619 for binu in ["XDOT", "PBDOT", "EDOT", "EPS1DOT", "EPS2DOT", "XPBDOT"]:
620 if hasattr(self, binu):
621 setattr(self, binu + "_ORIGINAL", self[binu]) # save original value
622 if np.abs(self[binu]) > 1e-7:
623 setattr(self, binu, self[binu] * 1.0e-12)
624
625 # check value is not extremely large due to ATNF catalogue error
626 if self[binu] > 10000.0: # set to zero
627 setattr(self, binu, 0.0)
628
629 # masses
630 for mass in ["M2", "MTOT"]:
631 if hasattr(self, mass): # convert solar masses to kg
632 setattr(self, mass + "_ORIGINAL", self[mass]) # save original value
633 setattr(self, mass, self[mass] * MSUN)
634
635 if hasattr(self, mass + "_ERR"):
636 setattr(
637 self, mass + "_ERR_ORIGINAL", self[mass + "_ERR"]
638 ) # save original value
639 setattr(self, mass + "_ERR", self[mass + "_ERR"] * MSUN)
640
641 # D_AOP
642 if hasattr(self, "D_AOP"): # convert from inverse arcsec to inverse radians
643 setattr(self, "D_AOP_ORIGINAL", self["D_AOP"]) # save original value
644 setattr(self, "D_AOP", self["D_AOP"] * RADTODEG * 3600.0)
645
646 pf.close()
647
648 def __getitem__(self, key):
649 try:
650 par = getattr(self, key)
651 except:
652 par = None
653
654 return par
655
656 def __str__(self):
657 out = ""
658 for k, v in self.__dict__.items():
659 if k[:2] != "__":
660 if isinstance(self.__dict__[k], string_type):
661 out += "%10s = '%s'\n" % (k, v)
662 else:
663 out += "%10s = %-20.15g\n" % (k, v)
664
665 return out
666
667
668# class to read in a nested sampling prior file
670 def __init__(self, priorfilenm):
671 self.FILE = priorfilenm
672 pf = open(priorfilenm)
673 for line in pf.readlines():
674 splitline = line.split()
675
676 # get all upper case version in case lower case in par file
677 key = splitline[0].upper()
678
679 if key in str_keys:
680 # everything in a prior files should be numeric
681 setattr(self, key, [float(splitline[2]), float(splitline[3])])
682 elif key in float_keys:
683 setattr(self, key, [float(splitline[2]), float(splitline[3])])
684
685 # get sky positions in rads as strings 'dd/hh:mm:ss.s'
686 if hasattr(self, "RA"):
687 hl, ml, sl = rad_to_hms(self.RA[0])
688 rastrl = coord_to_string(hl, ml, sl)
689 hu, mu, su = rad_to_hms(self.RA[1])
690 rastru = coord_to_string(hu, mu, su)
691 setattr(self, "RA_STR", [rastrl, rastru])
692
693 if hasattr(self, "DEC"):
694 dl, ml, sl = rad_to_dms(self.DEC[0])
695 decstrl = coord_to_string(dl, ml, sl)
696 du, mu, su = rad_to_dms(self.DEC[1])
697 decstru = coord_to_string(du, mu, su)
698 setattr(self, "DEC_STR", [decstrl, decstru])
699
700 pf.close()
701
702 def __getitem__(self, key):
703 try:
704 atr = getattr(self, key)
705 except:
706 atr = None
707
708 return atr
709
710 def __str__(self):
711 out = ""
712 for k, v in self.__dict__.items():
713 if k[:2] != "__":
714 out += "%10s = %-20.15g, %-20.15g\n" % (k, float(v[0]), float(v[1]))
715
716 return out
717
718
719# Function to return a pulsar's strain spin-down limit given its spin frequency
720# (Hz), spin-down (Hz/s) and distance (kpc). The canonical value of moment of
721# inertia of 1e38 kg m^2 is used
722def spin_down_limit(freq, fdot, dist):
723 hsd = np.sqrt((5.0 / 2.0) * (G / C**3) * I38 * np.fabs(fdot) / freq) / (
724 dist * KPC
725 )
726
727 return hsd
728
729
730# Function to convert a pulsar stain into ellipticity assuming the canonical
731# moment of inertia
732def h0_to_ellipticity(h0, freq, dist):
733 ell = h0 * C**4.0 * dist * KPC / (16.0 * np.pi**2 * G * I38 * freq**2)
734
735 return ell
736
737
738# Function to convert a pulsar strain into a mass quadrupole moment
739def h0_to_quadrupole(h0, freq, dist):
740 q22 = (
741 np.sqrt(15.0 / (8.0 * np.pi))
742 * h0
743 * C**4.0
744 * dist
745 * KPC
746 / (16.0 * np.pi**2 * G * freq**2)
747 )
748
749 return q22
750
751
752# Function to conver quadrupole moment to strain
753def quadrupole_to_h0(q22, freq, dist):
754 h0 = (
755 q22
756 * np.sqrt((8.0 * np.pi) / 15.0)
757 * 16.0
758 * np.pi**2
759 * G
760 * freq**2
761 / (C**4.0 * dist * KPC)
762 )
763
764 return h0
765
766
767# function to convert the psi' and phi0' coordinates used in nested sampling
768# into the standard psi and phi0 coordinates (using vectors of those parameters
769def phipsiconvert(phipchain, psipchain):
770 chainlen = len(phipchain)
771
772 phichain = []
773 psichain = []
774
775 theta = math.atan2(1, 2)
776 ct = math.cos(theta)
777 st = math.sin(theta)
778
779 for i in range(0, chainlen):
780 phi0 = (1 / (2 * st)) * phipchain[i] - (1 / (2 * st)) * psipchain[i]
781 psi = (1 / (2 * ct)) * phipchain[i] + (1 / (2 * ct)) * psipchain[i]
782
783 # put psi between +/-pi/4
784 if math.fabs(psi) > math.pi / 4.0:
785 # shift phi0 by pi
786 phi0 = phi0 + math.pi
787
788 # wrap around psi
789 if psi > math.pi / 4.0:
790 psi = -(math.pi / 4.0) + math.fmod(psi + (math.pi / 4.0), math.pi / 2.0)
791 else:
792 psi = (math.pi / 4.0) - math.fmod((math.pi / 4.0) - psi, math.pi / 2.0)
793
794 # get phi0 into 0 -> 2pi range
795 if phi0 > 2.0 * math.pi:
796 phi0 = math.fmod(phi0, 2.0 * math.pi)
797 else:
798 phi0 = 2.0 * math.pi - math.fmod(2.0 * math.pi - phi0, 2.0 * math.pi)
799
800 phichain.append(phi0)
801 psichain.append(psi)
802
803 return phichain, psichain
804
805
806# function to create histogram plot of the 1D posterior (potentially for
807# multiple IFOs) for a parameter (param). If an upper limit is given then
808# that will be output
810 poslist,
811 param,
812 ifos,
813 parambounds=[float("-inf"), float("inf")],
814 nbins=50,
815 upperlimit=0,
816 overplot=False,
817 parfile=None,
818 mplparams=False,
819):
820 import matplotlib
821 from matplotlib import pyplot as plt
822 from lalpulsar.pulsarhtmlutils import paramlatexdict
823
824 # create list of figures
825 myfigs = []
826
827 # create a list of upper limits
828 ulvals = []
829
830 # set some matplotlib defaults for hist
831 if not mplparams:
832 mplparams = {
833 "backend": "Agg",
834 "text.usetex": True, # use LaTeX for all text
835 "axes.linewidth": 0.5, # set axes linewidths to 0.5
836 "axes.grid": True, # add a grid
837 "grid.linewidth": 0.5,
838 "font.family": "serif",
839 "font.size": 12,
840 }
841
842 matplotlib.rcParams.update(mplparams)
843
844 # ifos line colour specs
845 coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
846
847 # param name for axis label
848 try:
849 paraxis = paramlatexdict[param.upper()]
850 except:
851 paraxis = param
852
853 ymax = []
854
855 # if a par file object is given expect that we have an injection file
856 # containing the injected value to overplot on the histgram
857 parval = None
858 if parfile:
859 parval = parfile[param.upper()]
860
861 if ifos == None:
862 # default to just output colour for H1
863 ifos = ["H1"]
864
865 # loop over ifos
866 for idx, ifo in enumerate(ifos):
867 # check whether to plot all figures on top of each other
868 if overplot and idx == 0:
869 myfig = plt.figure(figsize=(4, 4), dpi=200)
870 plt.hold(True)
871 elif not overplot:
872 myfig = plt.figure(figsize=(4, 4), dpi=200)
873
874 pos = poslist[idx]
875
876 pos_samps = pos[param].samples
877
878 # get a normalised histogram for each
879 n, bins = hist_norm_bounds(
880 pos_samps, int(nbins), parambounds[0], parambounds[1]
881 )
882
883 # plot histogram
884 plt.step(bins, n, color=coldict[ifo])
885
886 if "h0" not in param:
887 plt.xlim(parambounds[0], parambounds[1])
888
889 plt.xlabel(r"" + paraxis, fontsize=14, fontweight=100)
890 plt.ylabel(r"Probability Density", fontsize=14, fontweight=100)
891 myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
892
893 if not overplot:
894 plt.ylim(0, n.max() + 0.1 * n.max())
895 # plt.legend(ifo)
896 # set background colour of axes
897 ax = plt.gca()
898 ax.set_axis_bgcolor("#F2F1F0")
899 myfigs.append(myfig)
900 else:
901 ymax.append(n.max() + 0.1 * n.max())
902
903 # if upper limit is needed then integrate posterior using trapezium rule
904 if upperlimit != 0:
905 ct = cumulative_trapezoid(n, bins)
906
907 # prepend a zero to ct
908 ct = np.insert(ct, 0, 0)
909
910 # use linear interpolation to find the value at 'upper limit'
911 ctu, ui = np.unique(ct, return_index=True)
912 intf = interp1d(ctu, bins[ui], kind="linear")
913 ulvals.append(intf(float(upperlimit)))
914
915 # plot parameter values
916 if parval:
917 if not overplot:
918 plt.hold(True)
919 plt.axvline(parval, color="k", ls="--", linewidth=1.5)
920 else:
921 plt.axvline(parval, color="k", ls="--", linewidth=1.5)
922
923 if overplot:
924 plt.ylim(0, max(ymax))
925 # plt.legend(ifos)
926 ax = plt.gca()
927 ax.set_axis_bgcolor("#F2F1F0")
928 plt.hold(False)
929 myfigs.append(myfig)
930
931 return myfigs, ulvals
932
933
934# function to return an upper limit from a posteriors: pos is an array of posteriors samples for a
935# particular parameter
937 pos, upperlimit=0.95, parambounds=[float("-inf"), float("inf")], nbins=50
938):
939 ulval = 0
940
941 # get a normalised histogram of posterior samples
942 n, bins = hist_norm_bounds(pos, int(nbins), parambounds[0], parambounds[1])
943
944 # if upper limit is needed then integrate posterior using trapezium rule
945 if upperlimit != 0:
946 ct = cumulative_trapezoid(n, bins)
947
948 # prepend a zero to ct
949 ct = np.insert(ct, 0, 0)
950
951 # use linear interpolation to find the value at 'upper limit'
952 ctu, ui = np.unique(ct, return_index=True)
953 intf = interp1d(ctu, bins[ui], kind="linear")
954 ulval = intf(float(upperlimit))
955
956 return ulval
957
958
959def upper_limit_greedy(pos, upperlimit=0.95, nbins=100):
960 n, binedges = np.histogram(pos, bins=nbins)
961 dbins = binedges[1] - binedges[0] # width of a histogram bin
962
963 frac = 0.0
964 j = 0
965 for nv in n:
966 prevfrac = frac
967 frac += float(nv) / len(pos)
968 j += 1
969 if frac > upperlimit:
970 break
971
972 # linearly interpolate to get upper limit
973 ul = binedges[j - 1] + (upperlimit - prevfrac) * (dbins / (frac - prevfrac))
974
975 return ul
976
977
978# function to plot a posterior chain (be it MCMC chains or nested samples)
979# the input should be a list of posteriors for each IFO, and the parameter
980# required, the list of IFO. grr is a list of dictionaries giving
981# the Gelman-Rubins statistics for the given parameter for each IFO.
982# If withhist is set then it will also output a histgram, with withhist number
983# of bins
984def plot_posterior_chain(poslist, param, ifos, grr=None, withhist=0, mplparams=False):
985 import matplotlib
986 from matplotlib import pyplot as plt
987 from lalpulsar.pulsarhtmlutils import paramlatexdict
988
989 try:
990 from matplotlib import gridspec
991 except:
992 return None
993
994 if not mplparams:
995 mplparams = {
996 "backend": "Agg",
997 "text.usetex": True, # use LaTeX for all text
998 "axes.linewidth": 0.5, # set axes linewidths to 0.5
999 "axes.grid": True, # add a grid
1000 "grid.linewidth": 0.5,
1001 "font.family": "sans-serif",
1002 "font.sans-serif": "Avant Garde, Helvetica, Computer Modern Sans serif",
1003 "font.size": 15,
1004 }
1005
1006 matplotlib.rcParams.update(mplparams)
1007
1008 coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
1009
1010 # param name for axis label
1011 try:
1012 if param == "iota":
1013 p = "cosiota"
1014 else:
1015 p = param
1016
1017 paryaxis = paramlatexdict[p.upper()]
1018 except:
1019 paryaxis = param
1020
1021 if grr:
1022 legendvals = []
1023
1024 maxiter = 0
1025 maxn = 0
1026 minsamp = float("inf")
1027 maxsamp = -float("inf")
1028
1029 for idx, ifo in enumerate(ifos):
1030 if idx == 0:
1031 myfig = plt.figure(figsize=(12, 4), dpi=200)
1032 myfig.subplots_adjust(bottom=0.15)
1033
1034 if withhist:
1035 gs = gridspec.GridSpec(1, 4, wspace=0)
1036 ax1 = plt.subplot(gs[:-1])
1037 ax2 = plt.subplot(gs[-1])
1038
1039 pos = list(poslist)[idx]
1040
1041 # check for cosiota
1042 if "iota" == param:
1043 pos_samps = np.cos(pos["iota"].samples)
1044 else:
1045 pos_samps = pos[param].samples
1046
1047 if np.min(pos_samps) < minsamp:
1048 minsamp = np.min(pos_samps)
1049 if np.max(pos_samps) > maxsamp:
1050 maxsamp = np.max(pos_samps)
1051
1052 if withhist:
1053 ax1.plot(pos_samps, ".", color=coldict[ifo], markersize=1)
1054
1055 n, binedges = np.histogram(pos_samps, withhist, density=True)
1056 n = np.append(n, 0)
1057 ax2.step(n, binedges, color=coldict[ifo])
1058
1059 if np.max(n) > maxn:
1060 maxn = np.max(n)
1061 else:
1062 plt.plot(pos_samps, ".", color=coldict[ifo], markersize=1)
1063
1064 if grr:
1065 try:
1066 legendvals.append(r"$R = %.2f$" % grr[idx][param])
1067 except:
1068 legendvals = []
1069 else:
1070 legendvals = None
1071
1072 if len(pos_samps) > maxiter:
1073 maxiter = len(pos_samps)
1074
1075 if not withhist:
1076 ax1 = plt.gca()
1077
1078 bounds = [minsamp, maxsamp]
1079
1080 ax1.set_ylabel(r"" + paryaxis, fontsize=16, fontweight=100)
1081 ax1.set_xlabel(r"Iterations", fontsize=16, fontweight=100)
1082
1083 ax1.set_xlim(0, maxiter)
1084 ax1.set_ylim(bounds[0], bounds[1])
1085
1086 if withhist:
1087 ax2.set_ylim(bounds[0], bounds[1])
1088 ax2.set_xlim(0, maxn + 0.1 * maxn)
1089 ax2.set_yticklabels([])
1090 ax2.set_axis_bgcolor("#F2F1F0")
1091
1092 # add gelman-rubins stat data
1093 if legendvals:
1094 ax1.legend(legendvals, title="Gelman-Rubins test")
1095
1096 return myfig
1097
1098
1099# function to read in and plot a 2D histogram from a binary file, where the files
1100# structure is of the form:
1101# a header of six doubles with:
1102# - minimum of N-dim
1103# - the step size in N-dim
1104# - N - number of N-dim values
1105# - minimum of M-dim
1106# - the step size in M-dim
1107# - M - number of M-dim values
1108# followed by an NxM double array of values.
1109# The information returned are two lists of the x and y-axis bin centres, along
1110# with a numpy array containing the histogram values.
1112 # read in 2D binary file
1113 try:
1114 fp = open(histfile, "rb")
1115 except:
1116 print("Could not open prior file %s" % histfile, file=sys.stderr)
1117 return None, None, None
1118
1119 try:
1120 pd = fp.read() # read in all the data
1121 except:
1122 print("Could not read in data from prior file %s" % histfile, file=sys.stderr)
1123 return None, None, None
1124
1125 fp.close()
1126
1127 # read in the header (6 doubles)
1128 # try:
1129 header = struct.unpack("d" * 6, pd[: 6 * 8])
1130 # except:
1131 # print >> sys.stderr, "Could not read in header data from prior file %s" % histfile
1132 # return None, None, None
1133
1134 # read in histogram grid (NxM doubles)
1135 # try:
1136 grid = struct.unpack("d" * int(header[2]) * int(header[5]), pd[6 * 8 :])
1137 # except:
1138 # print >> sys.stderr, "Could not read in header data from prior file %s" % histfile
1139 # return None, None, None
1140
1141 header = list(header)
1142
1143 # convert grid into numpy array
1144 g = list(grid) # convert to list
1145 histarr = np.array([g[: int(header[5])]], ndmin=2)
1146 for i in range(int(header[2]) - 1):
1147 histarr = np.append(
1148 histarr, [g[(i + 1) * int(header[5]) : (i + 2) * int(header[5])]], axis=0
1149 )
1150
1151 xbins = np.linspace(
1152 header[0], header[0] + header[1] * (header[2] - 1), int(header[2])
1153 )
1154 ybins = np.linspace(
1155 header[3], header[3] + header[4] * (header[5] - 1), int(header[5])
1156 )
1157
1158 return xbins, ybins, histarr
1159
1160
1161# Function to plot a 2D histogram of from a binary file containing it.
1162# Also supply the label names for the n-dimension (x-axis) and m-dimension
1163# (y-axis). If margpars is true marginalised plots of both dimensions will
1164# also be returned.
1166 histfile, ndimlabel, mdimlabel, margpars=True, mplparams=False
1167):
1168 import matplotlib
1169 from matplotlib import pyplot as plt
1170 from lalpulsar.pulsarhtmlutils import paramlatexdict
1171
1172 # read in 2D h0 vs cos(iota) binary prior file
1173 xbins, ybins, histarr = read_hist_from_file(histfile)
1174
1175 if not xbins.any():
1176 print("Could not read binary histogram file", file=sys.stderr)
1177 return None
1178
1179 figs = []
1180
1181 # set some matplotlib defaults for amplitude spectral density
1182 if not mplparams:
1183 mplparams = {
1184 "backend": "Agg",
1185 "text.usetex": True, # use LaTeX for all text
1186 "axes.linewidth": 0.5, # set axes linewidths to 0.5
1187 "axes.grid": True, # add a grid
1188 "grid.linewidth": 0.5,
1189 "font.family": "serif",
1190 "font.size": 12,
1191 }
1192
1193 matplotlib.rcParams.update(mplparams)
1194
1195 fig = plt.figure(figsize=(4, 4), dpi=200)
1196
1197 # param name for axis label
1198 try:
1199 parxaxis = paramlatexdict[ndimlabel.upper()]
1200 except:
1201 parxaxis = ndimlabel
1202
1203 try:
1204 paryaxis = paramlatexdict[mdimlabel.upper()]
1205 except:
1206 paryaxis = mdimlabel
1207
1208 plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1209 plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100, rotation=270)
1210
1211 dx = xbins[1] - xbins[0]
1212 dy = ybins[1] - ybins[0]
1213
1214 extent = [
1215 xbins[0] - dx / 2.0,
1216 xbins[-1] + dx / 2.0,
1217 ybins[-1] + dy / 2.0,
1218 ybins[0] - dy / 2.0,
1219 ]
1220
1221 plt.imshow(
1222 np.transpose(histarr),
1223 aspect="auto",
1224 extent=extent,
1225 interpolation="bicubic",
1226 cmap="gray_r",
1227 )
1228
1229 fig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1230 fax = (fig.get_axes())[0].axis()
1231
1232 figs.append(fig)
1233
1234 if margpars:
1235 # marginalise over y-axes and produce plots
1236 xmarg = []
1237 for i in range(len(xbins)):
1238 xmarg.append(trapezoid(histarr[:][i], x=ybins))
1239
1240 # normalise
1241 xarea = trapezoid(xmarg, x=xbins)
1242 xmarg = map(lambda x: x / xarea, xmarg)
1243
1244 ymarg = []
1245 for i in range(len(ybins)):
1246 ymarg.append(trapezoid(np.transpose(histarr)[:][i], x=xbins))
1247
1248 # normalise
1249 yarea = trapezoid(ymarg, x=ybins)
1250 ymarg = map(lambda x: x / yarea, ymarg)
1251
1252 # plot x histogram
1253 figx = plt.figure(figsize=(4, 4), dpi=200)
1254 plt.step(xbins, xmarg, color="k")
1255 plt.ylim(0, max(xmarg) + 0.1 * max(xmarg))
1256 plt.xlim(fax[0], fax[1])
1257 plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1258 plt.ylabel(r"Probability Density", fontsize=14, fontweight=100)
1259 figx.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1260 ax = plt.gca()
1261 ax.set_axis_bgcolor("#F2F1F0")
1262
1263 # plot y histogram
1264 figy = plt.figure(figsize=(4, 4), dpi=200)
1265 plt.step(ybins, ymarg, color="k")
1266 plt.ylim(0, max(ymarg) + 0.1 * max(ymarg))
1267 plt.xlim(fax[3], fax[2])
1268 plt.xlabel(r"" + paryaxis, fontsize=14, fontweight=100)
1269 plt.ylabel(r"Probability Density", fontsize=14, fontweight=100)
1270 figy.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1271 ax = plt.gca()
1272 ax.set_axis_bgcolor("#F2F1F0")
1273
1274 figs.append(figx)
1275 figs.append(figy)
1276
1277 return figs
1278
1279
1280# using a binary file containing a histogram oh h0 vs cos(iota) calculate the
1281# upper limit on h0
1282def h0ul_from_prior_file(priorfile, ulval=0.95):
1283 # read in 2D h0 vs cos(iota) binary prior file
1284 h0bins, cibins, histarr = read_hist_from_file(priorfile)
1285
1286 if not h0bins.any():
1287 print("Could not read binary histogram file", file=sys.stderr)
1288 return None
1289
1290 # marginalise over cos(iota)
1291 h0marg = []
1292 for i in range(len(h0bins)):
1293 h0marg.append(trapezoid(histarr[:][i], x=cibins))
1294
1295 # normalise h0 posterior
1296 h0bins = h0bins - (h0bins[1] - h0bins[0]) / 2
1297 h0area = trapezoid(h0marg, x=h0bins)
1298 h0margnorm = map(lambda x: x / h0area, h0marg)
1299
1300 # get cumulative probability
1301 ct = cumulative_trapezoid(h0margnorm, h0bins)
1302
1303 # prepend a zero to ct
1304 ct = np.insert(ct, 0, 0)
1305
1306 # use spline interpolation to find the value at 'upper limit'
1307 ctu, ui = np.unique(ct, return_index=True)
1308 intf = interp1d(ctu, h0bins[ui], kind="linear")
1309 return intf(float(ulval))
1310
1311
1312# function to create a histogram plot of the 2D posterior
1314 poslist,
1315 params,
1316 ifos,
1317 bounds=None,
1318 nbins=[50, 50],
1319 parfile=None,
1320 overplot=False,
1321 mplparams=False,
1322):
1323 import matplotlib
1324 from matplotlib import pyplot as plt
1325 from lalpulsar.pulsarhtmlutils import paramlatexdict
1326
1327 if len(params) != 2:
1328 print("Require 2 parameters", file=sys.stderr)
1329 sys.exit(1)
1330
1331 # set some matplotlib defaults for amplitude spectral density
1332 if not mplparams:
1333 mplparams = {
1334 "backend": "Agg",
1335 "text.usetex": True, # use LaTeX for all text
1336 "axes.linewidth": 0.5, # set axes linewidths to 0.5
1337 "axes.grid": True, # add a grid
1338 "grid.linewidth": 0.5,
1339 "font.family": "serif",
1340 "font.size": 12,
1341 }
1342
1343 matplotlib.rcParams.update(mplparams)
1344
1345 if overplot:
1346 coldict = {
1347 "H1": "Reds",
1348 "H2": "GnBu",
1349 "L1": "Greens",
1350 "V1": "Blues",
1351 "G1": "BuPu",
1352 "Joint": "Greys",
1353 }
1354
1355 myfigs = []
1356
1357 # param name for axis label
1358 try:
1359 parxaxis = paramlatexdict[params[0].upper()]
1360 except:
1361 parxaxis = params[0]
1362
1363 try:
1364 paryaxis = paramlatexdict[params[1].upper()]
1365 except:
1366 paryaxis = params[1]
1367
1368 parval1 = None
1369 parval2 = None
1370
1371 if parfile:
1372 parval1 = parfile[params[0].upper()]
1373 parval2 = parfile[params[1].upper()]
1374
1375 if ifos == None:
1376 ifos = ["H1"]
1377
1378 for idx, ifo in enumerate(ifos):
1379 if overplot:
1380 if idx == 0:
1381 # if overplotting just have one figure
1382 myfig = plt.figure(figsize=(4, 4), dpi=200)
1383
1384 if len(ifos) == 1:
1385 alpha = 1.0
1386 cmap = plt.cm.get_cmap("Greys")
1387 else:
1388 alpha = 0.5 # set transparency
1389 cmap = plt.cm.get_cmap(coldict[ifo]) # set colormap
1390 # cmap.set_bad(alpha=0.) # set 'bad' masked values to be transparent
1391 else:
1392 myfig = plt.figure(figsize=(4, 4), dpi=200)
1393 alpha = 1.0 # no transparency
1394 cmap = plt.cm.get_cmap("Greys")
1395
1396 posterior = poslist[idx]
1397
1398 a = np.squeeze(posterior[params[0]].samples)
1399 b = np.squeeze(posterior[params[1]].samples)
1400
1401 # Create 2D bin array
1402 par1pos_min = a.min()
1403 par2pos_min = b.min()
1404
1405 par1pos_max = a.max()
1406 par2pos_max = b.max()
1407
1408 plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1409 plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100, rotation=270)
1410
1411 H, xedges, yedges = np.histogram2d(a, b, nbins, normed=True)
1412
1413 # if overplot and len(ifos) > 1:
1414 # # mask values that are zero, so that they are
1415 # Hm = np.ma.masked_where(np.transpose(H) == 0, np.transpose(H))
1416 # else:
1417 # Hm = np.transpose(H)
1418 Hm = np.transpose(H)
1419
1420 extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
1421 plt.imshow(
1422 Hm,
1423 aspect="auto",
1424 extent=extent,
1425 interpolation="bilinear",
1426 cmap=cmap,
1427 alpha=alpha,
1428 )
1429 # plt.colorbar()
1430
1431 if bounds:
1432 plt.xlim(bounds[0][0], bounds[0][1])
1433 plt.ylim(bounds[1][0], bounds[1][1])
1434
1435 # plot injection values if given
1436 if parval1 and parval2:
1437 if overplot and idx == len(ifos) - 1:
1438 # only plot after final posterior has been plotted
1439 plt.hold(True)
1440 plt.plot(parval1, parval2, "rx", markersize=8, mew=2)
1441 else:
1442 plt.hold(True)
1443 plt.plot(parval1, parval2, "rx", markersize=8, mew=2)
1444
1445 if overplot:
1446 plt.hold(True)
1447 if not overplot:
1448 myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1449 myfigs.append(myfig)
1450
1451 if overplot:
1452 myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1453 myfigs.append(myfig)
1454
1455 return myfigs
1456
1457
1458# a function that creates and normalises a histograms of samples, with nbins
1459# between an upper and lower bound a upper and lower bound. The values at the
1460# bin points (with length nbins+2) will be returned as numpy arrays
1461def hist_norm_bounds(samples, nbins, low=float("-inf"), high=float("inf")):
1462 # get histogram
1463 n, binedges = np.histogram(samples, nbins)
1464
1465 # get bin width
1466 binwidth = binedges[1] - binedges[0]
1467
1468 # create bin centres
1469 bincentres = np.array([])
1470 for i in range(0, len(binedges) - 1):
1471 bincentres = np.append(bincentres, binedges[i] + binwidth / 2)
1472
1473 # if histogram points are not close to boundaries (i.e. within a bin of the
1474 # boundaries) then add zeros to histrogram edges
1475 if bincentres[0] - binwidth > low:
1476 # prepend a zero to n
1477 n = np.insert(n, 0, 0)
1478
1479 # prepend a new bin centre at bincentres[0] - binwidth
1480 bincentres = np.insert(bincentres, 0, bincentres[0] - binwidth)
1481 else:
1482 # we're closer to the boundary edge than the bin width then, so set a new
1483 # bin on the boundary with a value linearly extrapolated from the
1484 # gradiant of the adjacent points
1485 dx = bincentres[0] - low
1486
1487 # prepend low value to bins
1488 bincentres = np.insert(bincentres, 0, low)
1489
1490 dn = n[1] - n[0]
1491
1492 nbound = n[0] - (dn / binwidth) * dx
1493
1494 n = n.astype(float) # convert to floats
1495
1496 # prepend to n
1497 n = np.insert(n, 0, nbound)
1498
1499 # now the other end!
1500 if bincentres[-1] + binwidth < high:
1501 # append a zero to n
1502 n = np.append(n, 0)
1503
1504 # append a new bin centre at bincentres[end] + binwidth
1505 bincentres = np.append(bincentres, bincentres[-1] + binwidth)
1506 else:
1507 dx = high - bincentres[-1]
1508
1509 # prepend low value to bins
1510 bincentres = np.append(bincentres, high)
1511
1512 dn = n[-1] - n[-2]
1513
1514 nbound = n[-1] + (dn / binwidth) * dx
1515
1516 n = n.astype(float) # convert to floats
1517
1518 # prepend to n
1519 n = np.append(n, nbound)
1520
1521 # now calculate area and normalise
1522 area = trapezoid(n, x=bincentres)
1523
1524 ns = np.array([])
1525 for i in range(0, len(bincentres)):
1526 ns = np.append(ns, float(n[i]) / area)
1527
1528 return ns, bincentres
1529
1530
1531# create a Tukey window of length N
1532def tukey_window(N, alpha=0.5):
1533 # if alpha >= 1 just return a Hanning window
1534 if alpha >= 1:
1535 return np.hanning(N)
1536
1537 # get x values at which to calculate window
1538 x = np.linspace(0, 1, N)
1539
1540 # initial square window
1541 win = np.ones(x.shape)
1542
1543 # get the left-hand side of the window 0 <= x < alpha/2
1544 lhs = x < alpha / 2
1545 win[lhs] = 0.5 * (1 + np.cos(2 * np.pi / alpha * (x[lhs] - alpha / 2)))
1546
1547 # get right hand side condition 1 - alpha / 2 <= x <= 1
1548 rhs = x >= (1 - alpha / 2)
1549 win[rhs] = 0.5 * (1 + np.cos(2 * np.pi / alpha * (x[rhs] - 1 + alpha / 2)))
1550
1551 return win
1552
1553
1554# create a function for plotting the absolute value of Bk data (read in from
1555# data files) and an averaged 1 day "two-sided" amplitude spectral density
1556# spectrogram for each IFO. Bkdata should be a dictionary contains the files
1557# keyed to the IFO
1559 Bkdata,
1560 delt=86400,
1561 plotpsds=True,
1562 plotfscan=False,
1563 removeoutlier=None,
1564 mplparams=False,
1565):
1566 import matplotlib
1567 from matplotlib.mlab import specgram
1568 from matplotlib import colors
1569 from matplotlib import pyplot as plt
1570
1571 # create list of figures
1572 Bkfigs = []
1573 psdfigs = []
1574 fscanfigs = []
1575 asdlist = []
1576 sampledt = []
1577
1578 # set some matplotlib defaults for amplitude spectral density
1579 if not mplparams:
1580 mplparams = {
1581 "backend": "Agg",
1582 "text.usetex": True, # use LaTeX for all text
1583 "axes.linewidth": 0.5, # set axes linewidths to 0.5
1584 "axes.grid": True, # add a grid
1585 "grid.linewidth": 0.5,
1586 "font.family": "sans-serif",
1587 "font.sans-serif": "Avant Garde, Helvetica, Computer Modern Sans serif",
1588 "font.size": 15,
1589 }
1590
1591 matplotlib.rcParams.update(mplparams)
1592 matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{xfrac}"
1593
1594 # ifos line colour specs
1595 coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m"}
1596 colmapdic = {
1597 "H1": "Reds",
1598 "H2": "PuBu",
1599 "L1": "Greens",
1600 "V1": "Blues",
1601 "G1": "PuRd",
1602 }
1603
1604 # there should be data for each ifo
1605 for ifo in Bkdata:
1606 # get data for given ifo
1607 try:
1608 Bk = np.loadtxt(Bkdata[ifo], comments=["#", "%"])
1609 except:
1610 print("Could not open file %s" % Bkdata[ifo], file=sys.stderr)
1611 sys.exit(-1)
1612
1613 # sort the array in time
1614 Bk = Bk[Bk[:, 0].argsort()]
1615
1616 # make sure times are unique
1617 uargs = np.unique(Bk[:, 0], return_index=True)[1]
1618 Bk = Bk[uargs, :]
1619
1620 # should be three lines in file
1621 gpstime = []
1622
1623 # remove outliers at Xsigma by working out sigma from the peak of the
1624 # distribution of the log absolute value
1625 if removeoutlier:
1626 n, binedges = np.histogram(
1627 np.log(np.fabs(np.concatenate((Bk[:, 1], Bk[:, 2])))), 50
1628 )
1629 j = n.argmax(0)
1630
1631 # standard devaition estimate
1632 stdest = math.exp((binedges[j] + binedges[j + 1]) / 2)
1633
1634 # get values within +/-8 sigma
1635 # test real parts
1636 vals = np.where(np.fabs(Bk[:, 1]) < removeoutlier * stdest)
1637 Bknew = Bk[vals, :][-1]
1638 # test imag parts
1639 vals = np.where(np.fabs(Bknew[:, 2]) < removeoutlier * stdest)
1640 Bk = Bknew[vals, :][-1]
1641
1642 gpstime = Bk[:, 0]
1643
1644 # minimum time step between points (should generally be 60 seconds)
1645 mindt = min(np.diff(gpstime))
1646 sampledt.append(mindt)
1647
1648 Bkabs = np.sqrt(Bk[:, 1] ** 2 + Bk[:, 2] ** 2)
1649
1650 # plot the time series of the data
1651 Bkfig = plt.figure(figsize=(11, 3.5), dpi=200)
1652 Bkfig.subplots_adjust(bottom=0.15, left=0.09, right=0.94)
1653
1654 tms = list(map(lambda x: x - gpstime[0], gpstime))
1655
1656 plt.plot(tms, Bkabs, ".", color=coldict[ifo], markersize=1)
1657 plt.xlabel(r"GPS - %d" % int(gpstime[0]), fontsize=14, fontweight=100)
1658 plt.ylabel(r"$|B_k|$", fontsize=14, fontweight=100)
1659 # plt.title(r'$B_k$s for ' + ifo.upper(), fontsize=14)
1660 plt.xlim(tms[0], tms[-1])
1661
1662 Bkfigs.append(Bkfig)
1663
1664 if plotpsds or plotfscan:
1665 # create PSD by splitting data into days, padding with zeros to give a
1666 # sample a second, getting the PSD for each day and combining them
1667 totlen = gpstime[-1] - gpstime[0] # total data length
1668
1669 # check mindt is an integer and greater than 1
1670 if math.fmod(mindt, 1) != 0.0 or mindt < 1:
1671 print(
1672 "Error... time steps between data points must be integers",
1673 file=sys.stderr,
1674 )
1675 sys.exit(-1)
1676
1677 count = 0
1678
1679 # zero pad the data and bin each point in the nearest bin
1680 datazeropad = np.zeros(int(math.ceil(totlen / mindt)) + 1, dtype=complex)
1681
1682 idx = list(map(lambda x: int(math.floor((x / mindt) + 0.5)), tms))
1683 for i in range(0, len(idx)):
1684 datazeropad[idx[i]] = complex(Bk[i, 1], Bk[i, 2])
1685
1686 win = tukey_window(math.floor(delt / mindt), alpha=0.1)
1687
1688 Fs = 1.0 / mindt # sample rate in Hz
1689
1690 fscan, freqs, t = specgram(
1691 datazeropad,
1692 NFFT=int(math.floor(delt / mindt)),
1693 Fs=Fs,
1694 window=win,
1695 noverlap=int(math.floor(delt / (2.0 * mindt))),
1696 )
1697
1698 if plotpsds:
1699 fshape = fscan.shape
1700
1701 totalasd = np.zeros(fshape[0])
1702
1703 for i in range(0, fshape[0]):
1704 scanasd = np.sqrt(fscan[i, :])
1705 nonzasd = np.nonzero(scanasd)
1706 scanasd = scanasd[nonzasd]
1707
1708 # median amplitude spectral density
1709 # totalasd[i] = hmean(scanasd)
1710 totalasd[i] = np.median(scanasd)
1711
1712 # average amplitude spectral density
1713 asdlist.append(totalasd)
1714
1715 # plot PSD
1716 psdfig = plt.figure(figsize=(4, 3.5), dpi=200)
1717 psdfig.subplots_adjust(left=0.18, bottom=0.15)
1718
1719 plt.plot(freqs, totalasd, color=coldict[ifo])
1720 plt.xlim(freqs[0], freqs[-1])
1721 plt.xlabel(r"Frequency (Hz)", fontsize=14, fontweight=100)
1722 plt.ylabel(r"$h/\sqrt{\rm Hz}$", fontsize=14, fontweight=100)
1723
1724 # convert frequency labels to fractions
1725 ax = plt.gca()
1726 xt = [-Fs / 2.0, -Fs / 4.0, 0.0, Fs / 2.0, Fs / 4.0]
1727 ax.set_xticks(xt)
1728 xl = []
1729 for item in xt:
1730 if item == 0:
1731 xl.append("0")
1732 else:
1733 if item < 0:
1734 xl.append(r"$-\sfrac{1}{%d}$" % (-1.0 / item))
1735 else:
1736 xl.append(r"$\sfrac{1}{%d}$" % (1.0 / item))
1737 ax.set_xticklabels(xl)
1738 # plt.setp(ax.get_xticklabels(), fontsize=16) # increase font size
1739 plt.tick_params(axis="x", which="major", labelsize=14)
1740
1741 psdfigs.append(psdfig)
1742
1743 if plotfscan:
1744 fscanfig = plt.figure(figsize=(11, 3.5), dpi=200)
1745 fscanfig.subplots_adjust(bottom=0.15, left=0.09, right=0.94)
1746
1747 extent = [tms[0], tms[-1], freqs[0], freqs[-1]]
1748 plt.imshow(
1749 np.sqrt(np.flipud(fscan)),
1750 aspect="auto",
1751 extent=extent,
1752 interpolation=None,
1753 cmap=colmapdic[ifo],
1754 norm=colors.Normalize(),
1755 )
1756 plt.ylabel(r"Frequency (Hz)", fontsize=14, fontweight=100)
1757 plt.xlabel(r"GPS - %d" % int(gpstime[0]), fontsize=14, fontweight=100)
1758
1759 # convert frequency labels to fractions
1760 ax = plt.gca()
1761 yt = [-Fs / 2.0, -Fs / 4.0, 0.0, Fs / 2.0, Fs / 4.0]
1762 ax.set_yticks(yt)
1763 yl = []
1764 for item in yt:
1765 if item == 0:
1766 yl.append("0")
1767 else:
1768 if item < 0:
1769 yl.append(r"$-\sfrac{1}{%d}$" % (-1.0 / item))
1770 else:
1771 yl.append(r"$\sfrac{1}{%d}$" % (1.0 / item))
1772 ax.set_yticklabels(yl)
1773 plt.tick_params(axis="y", which="major", labelsize=14)
1774
1775 fscanfigs.append(fscanfig)
1776
1777 return Bkfigs, psdfigs, fscanfigs, asdlist, sampledt
1778
1779
1780# a function to create a histogram of log10(results) for a list of a given parameter
1781# (if a list with previous value is given they will be plotted as well)
1782# - lims is a dictionary of lists for each IFO
1784 lims, param, ifos, prevlims=None, bins=20, overplot=False, mplparams=False
1785):
1786 import matplotlib
1787 from matplotlib import pyplot as plt
1788 from lalpulsar.pulsarhtmlutils import paramlatexdict
1789
1790 if not mplparams:
1791 mplparams = {
1792 "backend": "Agg",
1793 "text.usetex": True, # use LaTeX for all text
1794 "axes.linewidth": 0.5, # set axes linewidths to 0.5
1795 "axes.grid": True, # add a grid
1796 "grid.linewidth": 0.5,
1797 "font.family": "serif",
1798 "font.size": 12,
1799 }
1800
1801 matplotlib.rcParams.update(mplparams)
1802
1803 myfigs = []
1804
1805 # ifos line colour specs
1806 coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
1807
1808 try:
1809 parxaxis = paramlatexdict[param.upper()]
1810 except:
1811 parxaxis = param
1812
1813 paryaxis = "Number of Pulsars"
1814
1815 # get log10 of previous results
1816 logprevlims = None
1817 if prevlims is not None:
1818 logprevlims = np.log10(prevlims)
1819
1820 # get the limits of the histogram range
1821 hrange = None
1822 stacked = False
1823 if overplot:
1824 highbins = []
1825 lowbins = []
1826
1827 # combine dataset to plot as stacked
1828 stackdata = []
1829 stacked = True
1830
1831 for j, ifo in enumerate(ifos):
1832 theselims = lims[ifo]
1833
1834 # remove any None's
1835 for i, val in enumerate(theselims):
1836 if val == None:
1837 del theselims[i]
1838
1839 loglims = np.log10(theselims)
1840
1841 stackdata.append(loglims)
1842
1843 highbins.append(np.max(loglims))
1844 lowbins.append(np.min(loglims))
1845
1846 if logprevlims is not None:
1847 highbins.append(max(logprevlims))
1848 lowbins.append(min(logprevlims))
1849
1850 hrange = (min(lowbins), max(highbins))
1851
1852 for j, ifo in enumerate(ifos):
1853 # get log10 of results
1854 theselims = lims[ifo]
1855
1856 # remove any None's
1857 for i, val in enumerate(theselims):
1858 if val == None:
1859 del theselims[i]
1860
1861 loglims = np.log10(theselims)
1862
1863 if not overplot:
1864 stackdata = loglims
1865
1866 # if not overplot or (overplot and j == 0):
1867 myfig = plt.figure(figsize=(4, 4), dpi=200)
1868 maxlims = []
1869 minlims = []
1870
1871 if overplot:
1872 edgecolor = []
1873 facecolor = []
1874 for ifoname in ifos:
1875 edgecolor.append(coldict[ifoname])
1876 facecolor.append(coldict[ifoname])
1877 else:
1878 edgecolor = [coldict[ifo]]
1879 facecolor = [coldict[ifo]]
1880
1881 # plt.hist(loglims, bins, range=hrange, histtype='step', fill=True, edgecolor=coldict[ifo], facecolor=coldict[ifo], alpha=0.6)
1882 n, bins, patches = plt.hist(
1883 stackdata,
1884 bins,
1885 range=hrange,
1886 histtype="step",
1887 stacked=stacked,
1888 fill=True,
1889 color=edgecolor,
1890 alpha=0.6,
1891 )
1892 for i, patch in enumerate(patches):
1893 plt.setp(patch, "facecolor", facecolor[i])
1894
1895 if not overplot:
1896 maxlims.append(np.max(loglims))
1897 minlims.append(np.min(loglims))
1898
1899 if logprevlims is not None:
1900 if not overplot:
1901 maxlims.append(max(logprevlims))
1902 minlims.append(min(logprevlims))
1903
1904 maxlim = max(maxlims)
1905 minlim = min(minlims)
1906 else:
1907 maxlim = hrange[1]
1908 minlim = hrange[0]
1909
1910 plt.hold(True)
1911 plt.hist(
1912 logprevlims,
1913 bins,
1914 range=hrange,
1915 edgecolor="k",
1916 lw=2,
1917 histtype="step",
1918 fill=False,
1919 )
1920 plt.hold(False)
1921 else:
1922 if not overplot:
1923 maxlim = max(maxlims)
1924 minlim = min(minlims)
1925 else:
1926 maxlim = hrange[1]
1927 minlim = hrange[0]
1928
1929 # set xlabels to 10^x
1930 ax = plt.gca()
1931
1932 # work out how many ticks to set
1933 tickvals = range(int(math.ceil(maxlim) - math.floor(minlim)))
1934 tickvals = map(lambda x: x + int(math.floor(minlim)), tickvals)
1935 ax.set_xticks(tickvals)
1936 tls = map(lambda x: "$10^{%d}$" % x, tickvals)
1937 ax.set_xticklabels(tls)
1938
1939 plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100)
1940 plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1941
1942 myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1943
1944 myfigs.append(myfig)
1945
1946 if overplot:
1947 break
1948
1949 return myfigs
1950
1951
1952# a function plot upper limits verses GW frequency in log-log space. If upper and
1953# lower estimates for the upper limit are supplied these will also be plotted as a
1954# band. If previous limits are supplied these will be plotted as well.
1955# h0lims is a dictionary of lists of h0 upper limits for each of the IFOs given in
1956# the list of ifos
1957# ulesttop and ulestbot are the upper and lower ranges of the estimates limit given
1958# as a dictionary if list for each ifo.
1959# prevlim is a list of previous upper limits at the frequencies prevlimf0gw
1960# xlims is the frequency range for the plot
1961# overplot - set to true to plot different IFOs on same plot
1963 h0lims,
1964 f0gw,
1965 ifos,
1966 xlims=[10, 1500],
1967 ulesttop=None,
1968 ulestbot=None,
1969 prevlim=None,
1970 prevlimf0gw=None,
1971 overplot=False,
1972 mplparams=False,
1973):
1974 import matplotlib
1975 from matplotlib import pyplot as plt
1976
1977 if not mplparams:
1978 mplparams = {
1979 "backend": "Agg",
1980 "text.usetex": True, # use LaTeX for all text
1981 "axes.linewidth": 0.5, # set axes linewidths to 0.5
1982 "axes.grid": True, # add a grid
1983 "grid.linewidth": 0.5,
1984 "font.family": "serif",
1985 "font.size": 12,
1986 }
1987
1988 matplotlib.rcParams.update(mplparams)
1989
1990 myfigs = []
1991
1992 # ifos line colour specs
1993 coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
1994
1995 parxaxis = "Frequency (Hz)"
1996 paryaxis = "$h_0$"
1997
1998 for j, ifo in enumerate(ifos):
1999 h0lim = h0lims[ifo]
2000
2001 if len(ifos) > 1:
2002 f0s = f0gw[ifo]
2003 else:
2004 f0s = f0gw
2005
2006 if len(h0lim) != len(f0s):
2007 return None # exit if lengths aren't correct
2008
2009 if not overplot or (overplot and j == 0):
2010 myfig = plt.figure(figsize=(7, 5.5), dpi=200)
2011
2012 plt.hold(True)
2013 if ulesttop is not None and ulestbot is not None:
2014 ult = ulesttop[ifo]
2015 ulb = ulestbot[ifo]
2016
2017 if len(ult) == len(ulb) and len(ult) == len(f0s):
2018 for i in range(len(ult)):
2019 plt.loglog(
2020 [f0s[i], f0s[i]], [ulb[i], ult[i]], ls="-", lw=3, c="lightgrey"
2021 )
2022
2023 if (
2024 prevlim is not None
2025 and prevlimf0gw is not None
2026 and len(prevlim) == len(prevlimf0gw)
2027 ):
2028 if not overplot or (overplot and j == len(ifos) - 1):
2029 plt.loglog(
2030 prevlimf0gw,
2031 prevlim,
2032 marker="*",
2033 ms=10,
2034 alpha=0.7,
2035 mfc="None",
2036 mec="k",
2037 ls="None",
2038 )
2039
2040 # plot current limits
2041 plt.loglog(
2042 f0s, h0lim, marker="*", ms=10, mfc=coldict[ifo], mec=coldict[ifo], ls="None"
2043 )
2044
2045 plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100)
2046 plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
2047
2048 plt.xlim(xlims[0], xlims[1])
2049
2050 if not overplot or (overplot and j == len(ifos) - 1):
2051 plt.hold(False)
2052 myfig.subplots_adjust(left=0.12, bottom=0.10) # adjust size
2053 myfigs.append(myfig)
2054
2055 return myfigs
2056
2057
2058# a function to create the signal model for a heterodyned triaxial pulsar given
2059# a signal GPS start time, signal duration (in seconds), sample interval
2060# (seconds), detector, and the parameters h0, cos(iota), psi (rads), initial
2061# phase phi0 (rads), right ascension (rads) and declination (rads) in a
2062# dictionary. The list of time stamps, and the real and imaginary parts of the
2063# signal are returned
2064def heterodyned_triaxial_pulsar(starttime, duration, dt, detector, pardict):
2065 # create a list of times stamps
2066 ts = []
2067 tmpts = starttime
2068
2069 # create real and imaginary parts of the signal
2070 s = np.array([], dtype=complex)
2071
2072 sphi = np.sin(pardict["phi0"])
2073 cphi = np.cos(pardict["phi0"])
2074
2075 Xplus = 0.25 * (1.0 + pardict["cosiota"] * pardict["cosiota"]) * pardict["h0"]
2076 Xcross = 0.5 * pardict["cosiota"] * pardict["h0"]
2077 Xpsinphi = Xplus * sphi
2078 Xcsinphi = Xcross * sphi
2079 Xpcosphi = Xplus * cphi
2080 Xccosphi = Xcross * cphi
2081
2082 i = 0
2083 while tmpts < starttime + duration:
2084 ts.append(starttime + (dt * i))
2085
2086 # get the antenna response
2087 fp, fc = antenna_response(
2088 ts[i], pardict["ra"], pardict["dec"], pardict["psi"], detector
2089 )
2090
2091 # create real part of signal
2092 s = np.append(
2093 s, (fp * Xpcosphi + fc * Xcsinphi) + 1j * (fp * Xpsinphi - fc * Xccosphi)
2094 )
2095
2096 tmpts = ts[i] + dt
2097
2098 i = i + 1
2099
2100 return ts, s
2101
2102
2103# a function to create a heterodyned signal model for a neutron star using the complex
2104# amplitude parameters C22, phi22, C21 and phi21 and the orientation parameters cos(iota)
2105# and psi. If both C22 and C21 are non-zero then a signal at both the rotation frequency
2106# and twice the rotation frequency will be generated.
2107#
2108# Note that for the purely triaxial signal the waveform, as defined in Jones (2015)
2109# http://arxiv.org/abs/1501.05832 (and subsequently Pitkin et al (2015) http://arxiv.org/abs/1508.00416),
2110# has the opposite sign to the convention in e.g. JKS (which is used for signal generation in hardware
2111# injections). Hence, when converting h0, from the conventional model, to C22 a minus sign needs to be
2112# introduced. Also, phi0 here is defined as the rotational phase of the source, so when converting to phi22
2113# a factor of 2 is needed.
2115 pardict, detector, starttime=900000000.0, duration=86400.0, dt=60.0, datatimes=None
2116):
2117 if "cosiota" in pardict:
2118 cosiota = pardict["cosiota"]
2119 iota = math.acos(cosiota)
2120 siniota = math.sin(iota)
2121 else:
2122 raise KeyError("cos(iota) not defined!")
2123
2124 if "psi" in pardict:
2125 psi = pardict["psi"]
2126 else:
2127 raise KeyError("psi not defined!")
2128
2129 if "C22" in pardict:
2130 C22 = pardict["C22"]
2131 else:
2132 if "h0" in pardict:
2133 C22 = -pardict["h0"] / 2.0
2134 else:
2135 C22 = 0.0
2136
2137 if "phi22" in pardict:
2138 phi22 = pardict["phi22"]
2139 else:
2140 if "phi0" in pardict:
2141 phi22 = 2.0 * pardict["phi0"]
2142 else:
2143 phi22 = 0.0
2144
2145 ePhi22 = cmath.exp(phi22 * 1j)
2146
2147 if "C21" in pardict:
2148 C21 = pardict["C21"]
2149 else:
2150 C21 = 0.0
2151
2152 if "phi21" in pardict:
2153 phi21 = pardict["phi21"]
2154 else:
2155 phi21 = 0.0
2156
2157 ePhi21 = cmath.exp(phi21 * 1j)
2158
2159 s = [] # signal
2160 ts = [] # times
2161
2162 if "C21" in pardict and "C22" in pardict and "h0" not in pardict:
2163 freqs = [1.0, 2.0]
2164 else:
2165 freqs = [2.0]
2166
2167 for f in freqs:
2168 i = 0
2169 if datatimes is None:
2170 datatimes = np.arange(starttime, starttime + duration, dt)
2171
2172 # get the antenna response
2173 fp, fc = antenna_response(
2174 datatimes, pardict["ra"], pardict["dec"], pardict["psi"], detector
2175 )
2176
2177 if f == 1.0:
2178 sf = (
2179 -(C21 / 4.0) * ePhi21 * siniota * cosiota * fp
2180 + 1j * (C21 / 4.0) * ePhi21 * siniota * fc
2181 )
2182 elif f == 2.0:
2183 sf = (
2184 -(C22 / 2.0) * ePhi22 * (1.0 + cosiota**2.0) * fp
2185 + 1j * (C22) * ePhi22 * cosiota * fc
2186 )
2187
2188 ts.append(datatimes)
2189 s.append(sf)
2190
2191 return ts, s
2192
2193
2194# a function to convert the pinned superfluid model parameters to the complex
2195# amplitude parameters using the Eqns 76-79 defined in Jones 2012 LIGO DCC T1200265-v3
2197 costheta = pardict["costheta"]
2198 theta = np.arccos(costheta)
2199 sintheta = math.sin(theta)
2200 sin2theta = math.sin(2.0 * theta)
2201 sinlambda = math.sin(pardict["lambdapin"])
2202 coslambda = math.cos(pardict["lambdapin"])
2203 sin2lambda = math.sin(2.0 * pardict["lambdapin"])
2204
2205 phi0 = pardict["phi0"]
2206
2207 I21 = pardict["I21"]
2208 I31 = pardict["I31"]
2209
2210 A22 = I21 * (sinlambda**2 - (coslambda * costheta) ** 2) - I31 * sintheta**2
2211 B22 = I21 * sin2lambda * costheta
2212
2213 C22 = 2.0 * math.sqrt(A22**2 + B22**2)
2214
2215 A21 = I21 * sin2lambda * sintheta
2216 B21 = (I21 * coslambda**2 - I31) * sin2theta
2217
2218 C21 = 2.0 * math.sqrt(A21**2 + B21**2)
2219
2220 phi22 = np.fmod(2.0 * phi0 - math.atan2(B22, A22), 2.0 * np.pi)
2221 phi21 = np.fmod(phi0 - math.atan2(B21, A21), 2.0 * np.pi)
2222
2223 outvals = {"C22": C22, "C21": C21, "phi22": phi22, "phi21": phi21}
2224
2225 return outvals
2226
2227
2228# a function to create the signal model for a heterodyned pinned superfluid
2229# model pulsar a signal GPS start time, signal duration (in seconds),
2230# sample interval (seconds), detector, and the parameters I21, I31,
2231# cos(theta), lambda, cos(iota), psi (rads), initial phase phi0 (for l=m=2 mode in rads), right
2232# ascension (rads), declination (rads), distance (kpc) and frequency (f0) in a
2233# dictionary. The list of time stamps, and the real and imaginary parts of the
2234# 1f and 2f signals are returned in an array
2235def heterodyned_pinsf_pulsar(starttime, duration, dt, detector, pardict):
2236 iota = np.arccos(pardict["cosiota"])
2237 theta = np.arccos(pardict["costheta"])
2238 siniota = math.sin(iota)
2239 sintheta = math.sin(theta)
2240 sin2theta = math.sin(2.0 * theta)
2241 sinlambda = math.sin(pardict["lambdapin"])
2242 coslambda = math.cos(pardict["lambdapin"])
2243 sin2lambda = math.sin(2.0 * pardict["lambdapin"])
2244
2245 ePhi = cmath.exp(0.5 * pardict["phi0"] * 1j)
2246 e2Phi = cmath.exp(pardict["phi0"] * 1j)
2247
2248 f2_r = pardict["f0"] ** 2 / pardict["dist"]
2249
2250 """
2251 This model is a complex heterodyned time series for a pinned superfluid
2252 neutron star emitting at its roation frequency and twice its rotation
2253 frequency (as defined in Eqns 35-38 of Jones 2012 LIGO DCC T1200265-v3)
2254 """
2255
2256 Xplusf = -(f2_r / 2.0) * siniota * pardict["cosiota"]
2257 Xcrossf = (f2_r / 2.0) * siniota
2258
2259 Xplus2f = -f2_r * (1.0 + pardict["cosiota"] ** 2)
2260 Xcross2f = 2.0 * f2_r * pardict["cosiota"]
2261
2262 A21 = pardict["I21"] * sin2lambda * sintheta
2263 B21 = (pardict["I21"] * coslambda**2 - pardict["I31"]) * sin2theta
2264
2265 A22 = (
2266 pardict["I21"] * (sinlambda**2 - coslambda**2 * pardict["costheta"] ** 2)
2267 - pardict["I31"] * sintheta**2
2268 )
2269 B22 = pardict["I21"] * sin2lambda * pardict["costheta"]
2270
2271 # create a list of times stamps
2272 ts1 = []
2273 ts2 = []
2274 tmpts = starttime
2275
2276 # create real and imaginary parts of the 1f signal
2277 s1 = np.array([], dtype=complex)
2278 s2 = np.array([], dtype=complex)
2279
2280 i = 0
2281 while tmpts < starttime + duration:
2282 ts1.append(starttime + (dt * i))
2283 ts2.append(starttime + (dt * i))
2284
2285 # get the antenna response
2286 fp, fc = antenna_response(
2287 ts1[i], pardict["ra"], pardict["dec"], pardict["psi"], detector
2288 )
2289
2290 # create the complex signal amplitude model at 1f
2291 s1 = np.append(
2292 s1,
2293 (fp * Xplusf * ePhi * (A21 - 1j * B21))
2294 + (fc * Xcrossf * ePhi * (B21 + 1j * A21)),
2295 )
2296
2297 # create the complex signal amplitude model at 2f
2298 s2 = np.append(
2299 s2,
2300 (fp * Xplus2f * e2Phi * (A22 - 1j * B22))
2301 + (fc * Xcross2f * e2Phi * (B22 + 1j * A22)),
2302 )
2303
2304 tmpts = ts1[i] + dt
2305
2306 i = i + 1
2307
2308 # combine data into 1 array
2309 ts = np.vstack([ts1, ts2])
2310 s = np.vstack([s1, s2])
2311
2312 return ts, s
2313
2314
2315# function to get the antenna response for a given detector. It takes in a GPS time or list (or numpy array)
2316# of GPS times, right ascension (rads), declination (rads), polarisation angle (rads) and a detector name
2317# e.g. H1, L1, V1. The plus and cross polarisations are returned.
2318def antenna_response(gpsTime, ra, dec, psi, det):
2319 import lal
2320
2321 # create detector-name map
2322 detMap = {
2323 "H1": lal.LALDetectorIndexLHODIFF,
2324 "H2": lal.LALDetectorIndexLHODIFF,
2325 "L1": lal.LALDetectorIndexLLODIFF,
2326 "G1": lal.LALDetectorIndexGEO600DIFF,
2327 "V1": lal.LALDetectorIndexVIRGODIFF,
2328 "T1": lal.LALDetectorIndexTAMA300DIFF,
2329 "AL1": lal.LALDetectorIndexLLODIFF,
2330 "AH1": lal.LALDetectorIndexLHODIFF,
2331 "AV1": lal.LALDetectorIndexVIRGODIFF,
2332 "E1": lal.LALDetectorIndexE1DIFF,
2333 "E2": lal.LALDetectorIndexE2DIFF,
2334 "E3": lal.LALDetectorIndexE3DIFF,
2335 }
2336
2337 try:
2338 detector = detMap[det]
2339 except KeyError:
2340 raise KeyError("ERROR. Key {} is not a valid detector name.".format(det))
2341
2342 # get detector
2343 detval = lal.CachedDetectors[detector]
2344 response = detval.response
2345
2346 # check if ra and dec are floats or strings (if strings in hh/dd:mm:ss.s format then convert to rads)
2347 if not isinstance(ra, float):
2348 if isinstance(ra, str):
2349 try:
2350 ra = ra_to_rad(ra)
2351 except:
2352 raise ValueError(
2353 "Right ascension string '{}' not formatted properly".format(ra)
2354 )
2355 else:
2356 raise ValueError(
2357 "Right ascension must be a 'float' in radians or a string of the format 'hh:mm:ss.s'"
2358 )
2359
2360 if not isinstance(dec, float):
2361 if isinstance(dec, str):
2362 try:
2363 dec = dec_to_rad(dec)
2364 except:
2365 raise ValueError(
2366 "Declination string '{}' not formatted properly".format(ra)
2367 )
2368 else:
2369 raise ValueError(
2370 "Declination must be a 'float' in radians or a string of the format 'dd:mm:ss.s'"
2371 )
2372
2373 # check if gpsTime is just a float or int, and if so convert into an array
2374 if isinstance(gpsTime, float) or isinstance(gpsTime, int):
2375 gpsTime = np.array([gpsTime])
2376 else: # make sure it's a numpy array
2377 gpsTime = np.copy(gpsTime)
2378
2379 # make sure times are floats
2380 gpsTime = gpsTime.astype("float64")
2381
2382 # if gpsTime is a list of regularly spaced values then use ComputeDetAMResponseSeries
2383 if len(gpsTime) == 1 or np.unique(np.diff(gpsTime)).size == 1:
2384 gpsStart = lal.LIGOTimeGPS(gpsTime[0])
2385 dt = 0.0
2386 if len(gpsTime) > 1:
2387 dt = gpsTime[1] - gpsTime[0]
2388 fp, fc = lal.ComputeDetAMResponseSeries(
2389 response, ra, dec, psi, gpsStart, dt, len(gpsTime)
2390 )
2391
2392 # return elements from Time Series
2393 return fp.data.data, fc.data.data
2394 else: # we'll have to work out the value at each point in the time series
2395 fp = np.zeros(len(gpsTime))
2396 fc = np.zeros(len(gpsTime))
2397
2398 for i in range(len(gpsTime)):
2399 gps = lal.LIGOTimeGPS(gpsTime[i])
2400 gmst_rad = lal.GreenwichMeanSiderealTime(gps)
2401
2402 # actual computation of antenna factors
2403 fp[i], fc[i] = lal.ComputeDetAMResponse(response, ra, dec, psi, gmst_rad)
2404
2405 return fp, fc
2406
2407
2408# a function to inject a heterodyned pulsar signal into noise of a
2409# given level for detectors. it will return the signal + noise and the optimal
2410# SNR. If an snrscale value is passed to the function the signal will be scaled
2411# to that SNR. nsigs
2413 starttime,
2414 duration,
2415 dt,
2416 detectors,
2417 pardict,
2418 freqfac=[2.0],
2419 npsds=None,
2420 snrscale=None,
2421):
2422 # if detectors is just a string (i.e. one detector) then make it a list
2423 if isinstance(detectors, str):
2424 detectors = [detectors]
2425
2426 # if not noise sigma's are given then generate a noise level from the given
2427 # detector noise curves
2428 if npsds is None:
2429 npsds = []
2430
2431 for det in detectors:
2432 for frf in freqfac:
2433 psd = detector_noise(det, frf * pardict["f0"])
2434
2435 # convert to time domain standard devaition shared between real and
2436 # imaginary signal
2437 ns = np.sqrt((psd / 2.0) / (2.0 * dt))
2438
2439 npsds.append(ns)
2440 else:
2441 # convert input psds into time domain noise standard deviation
2442 tmpnpsds = []
2443
2444 count = 0
2445 for j, det in enumerate(detectors):
2446 for frf in freqfac:
2447 if len(npsds) == 1:
2448 tmpnpsds.append(np.sqrt((npsds[0] / 2.0) / (2.0 * dt)))
2449 else:
2450 tmpnpsds.append(np.sqrt((npsds[count] / 2.0) / (2.0 * dt)))
2451
2452 count = count + 1
2453
2454 npsds = tmpnpsds
2455
2456 if len(freqfac) == 1 and len(detectors) != len(npsds):
2457 raise ValueError(
2458 "Number of detectors {} not the same as number of noises {}".format(
2459 len(detectors), len(npsds)
2460 )
2461 )
2462
2463 if len(freqfac) == 2 and 2 * len(detectors) != len(npsds):
2464 raise ValueError(
2465 "Number of detectors {} not half the number of noises {}".format(
2466 len(detectors), len(npsds)
2467 )
2468 )
2469
2470 tss = np.array([])
2471 ss = np.array([])
2472
2473 snrtot = 0
2474 for j, det in enumerate(detectors):
2475 # create the pulsar signal
2476 if len(freqfac) == 1 and freqfac[0] == 2.0:
2477 ts, s = heterodyned_pulsar_signal(pardict, det, starttime, duration, dt)
2478
2479 if j == 0:
2480 tss = np.append(tss, ts)
2481 ss = np.append(ss, s)
2482 else:
2483 tss = np.vstack([tss, ts])
2484 ss = np.vstack([ss, s])
2485
2486 # get SNR
2487 snrtmp = get_optimal_snr(s[0], npsds[j])
2488 elif len(freqfac) == 2:
2489 ts, s = heterodyned_pulsar_signal(pardict, det, starttime, duration, dt)
2490
2491 snrtmp = 0
2492 for k, frf in enumerate(freqfac):
2493 if j == 0 and k == 0:
2494 tss = np.append(tss, ts[k][:])
2495 ss = np.append(ss, s[k][:])
2496 else:
2497 tss = np.vstack([tss, ts[k][:]])
2498 ss = np.vstack([ss, s[k][:]])
2499
2500 snrtmp2 = get_optimal_snr(s[k][:], npsds[2 * j + k])
2501 snrtmp = snrtmp + snrtmp2 * snrtmp2
2502
2503 snrtmp = np.sqrt(snrtmp)
2504
2505 snrtot = snrtot + snrtmp * snrtmp
2506
2507 # total multidetector/data stream snr
2508 snrtot = np.sqrt(snrtot)
2509
2510 # add noise and rescale signals if necessary
2511 if snrscale is not None:
2512 if snrscale != 0:
2513 snrscale = snrscale / snrtot
2514 # print snrscale
2515 else:
2516 snrscale = 1
2517
2518 signalonly = ss.copy()
2519
2520 i = 0
2521 for det in detectors:
2522 # for triaxial model
2523 if len(freqfac) == 1:
2524 # generate random numbers
2525 rs = np.random.randn(len(ts[0]), 2)
2526
2527 for j, t in enumerate(ts[0]):
2528 if len(tss.shape) == 1:
2529 signalonly[j] = (snrscale * ss[j].real) + 1j * (
2530 snrscale * ss[j].imag
2531 )
2532 ss[j] = (snrscale * ss[j].real + npsds[i] * rs[j][0]) + 1j * (
2533 snrscale * ss[j].imag + npsds[i] * rs[j][1]
2534 )
2535 else:
2536 signalonly[i][j] = (snrscale * ss[i][j].real) + 1j * (
2537 snrscale * ss[i][j].imag
2538 )
2539 ss[i][j] = (snrscale * ss[i][j].real + npsds[i] * rs[j][0]) + 1j * (
2540 snrscale * ss[i][j].imag + npsds[i] * rs[j][1]
2541 )
2542
2543 i = i + 1
2544 elif len(freqfac) == 2:
2545 # generate random numbers
2546 rs = np.random.randn(len(ts[0][:]), 4)
2547
2548 for j, t in enumerate(ts[0][:]):
2549 signalonly[i][j] = (snrscale * ss[i][j].real) + 1j * (
2550 snrscale * ss[i][j].imag
2551 )
2552 ss[i][j] = (snrscale * ss[i][j].real + npsds[i] * rs[j][0]) + 1j * (
2553 snrscale * ss[i][j].imag + npsds[i] * rs[j][1]
2554 )
2555 signalonly[i + 1][j] = (snrscale * ss[i + 1][j].real) + 1j * (
2556 snrscale * ss[i + 1][j].imag
2557 )
2558 ss[i + 1][j] = (
2559 snrscale * ss[i + 1][j].real + npsds[i + 1] * rs[j][2]
2560 ) + 1j * (snrscale * ss[i + 1][j].imag + npsds[i + 1] * rs[j][3])
2561
2562 i = i + 2
2563 else:
2564 print("Something wrong with injection", file=sys.stderr)
2565 sys.exit(1)
2566
2567 snrtot = snrtot * snrscale
2568
2569 return tss, ss, signalonly, snrtot, snrscale, npsds
2570
2571
2572# function to create a time domain PSD from theoretical
2573# detector noise curves. It takes in the detector name and the frequency at
2574# which to generate the noise.
2575#
2576# The noise models are taken from those in lalsimulation/lib/LALSimNoisePSD.c
2577def detector_noise(det, f):
2578 import lalsimulation
2579
2580 if det == "AV1": # Advanced Virgo
2581 return lalsimulation.SimNoisePSDAdvVirgo(f)
2582 elif det == "H1" or det == "L1": # iLIGO SRD
2583 return lalsimulation.SimNoisePSDiLIGOSRD(f)
2584 elif det == "H2":
2585 return lalsimulation.SimNoisePSDiLIGOSRD(f) * 2.0
2586 elif det == "G1": # GEO_600
2587 return lalsimulation.SimNoisePSDGEO(f)
2588 elif det == "V1": # initial Virgo
2589 return lalsimulation.SimNoisePSDVirgo(f)
2590 elif det == "T1": # TAMA
2591 return lalsimulation.SimNoisePSDTAMA(f)
2592 elif det == "K1": # KAGRA
2593 return lalsimulation.SimNoisePSDKAGRA(f)
2594 elif det == "AL1" or det == "AH1":
2595 return lalsimulation.SimNoisePSDaLIGOZeroDetHighPower(f)
2596 else:
2597 raise ValueError("{} is not a recognised detector".format(det))
2598
2599
2600# function to calculate the optimal SNR of a heterodyned pulsar signal - it
2601# takes in a complex signal model and noise standard deviation
2603 ss = 0
2604 # sum square of signal
2605 for val in s:
2606 ss = ss + val.real**2 + val.imag**2
2607
2608 return np.sqrt(ss / sig**2)
2609
2610
2611# use the Gelman-Rubins convergence test for MCMC chains, where chains is a list
2612# of MCMC numpy chain arrays - this copies the gelman_rubins function in
2613# bayespputils
2614def gelman_rubins(chains):
2615 chainMeans = [np.mean(data) for data in chains]
2616 chainVars = [np.var(data) for data in chains]
2617
2618 BoverN = np.var(chainMeans)
2619 W = np.mean(chainVars)
2620
2621 sigmaHat2 = W + BoverN
2622
2623 m = len(chains)
2624
2625 VHat = sigmaHat2 + BoverN / m
2626
2627 R = VHat / W
2628
2629 return R
2630
2631
2632# function to convert MCMC files output from pulsar_parameter_estimation into
2633# a posterior class object - also outputting:
2634# - the mean effective sample size of each parameter chain
2635# - the Gelman-Rubins statistic for each parameter (a dictionary)
2636# - the original length of each chain
2637# Th input is a list of MCMC chain files
2639 from lalinference import bayespputils as bppu
2640
2641 cl = []
2642 neffs = []
2643 grr = {}
2644
2645 mcmc = []
2646
2647 for cfile in chainfiles:
2648 if os.path.isfile(cfile):
2649 # load MCMC chain
2650 mcmcChain = read_pulsar_mcmc_file(cfile)
2651
2652 # if no chain found then exit with None's
2653 if mcmcChain is None:
2654 return None, None, None, None
2655
2656 # find number of effective samples for the chain
2657 neffstmp = []
2658 for j in range(1, mcmcChain.shape[1]):
2659 neff, acl, acf = bppu.effectiveSampleSize(mcmcChain[:, j])
2660 neffstmp.append(neff)
2661
2662 # get the minimum effective sample size
2663 # neffs.append(min(neffstmp))
2664 # get the mean effective sample size
2665 neffs.append(math.floor(np.mean(neffstmp)))
2666
2667 # nskip = math.ceil(mcmcChain.shape[0]/min(neffstmp))
2668 nskip = int(math.ceil(mcmcChain.shape[0] / np.mean(neffstmp)))
2669
2670 # output every nskip (independent) value
2671 mcmc.append(mcmcChain[::nskip, :])
2672 cl.append(mcmcChain.shape[0])
2673 else:
2674 print("File %s does not exist!" % cfile, file=sys.stderr)
2675 return None, None, None, None
2676
2677 # output data to common results format
2678 # get first line of MCMC chain file for header names
2679 cf = open(chainfiles[0], "r")
2680 headers = cf.readline()
2681 headers = cf.readline() # column names are on the second line
2682 # remove % from start
2683 headers = re.sub("%", "", headers)
2684 # remove rads
2685 headers = re.sub("rads", "", headers)
2686 # remove other brackets e.g. around (iota)
2687 headers = re.sub("[()]", "", headers)
2688 cf.close()
2689
2690 # get Gelman-Rubins stat for each parameter
2691 for idx, parv in enumerate(headers.split()):
2692 lgr = []
2693 if parv != "logL":
2694 for j in range(0, len(mcmc)):
2695 achain = mcmc[j]
2696 singlechain = achain[:, idx]
2697 lgr.append(singlechain)
2698 grr[parv.lower()] = gelman_rubins(lgr)
2699
2700 # logL in chain is actually log posterior, so also output the posterior
2701 # values (can be used to estimate the evidence)
2702 headers = headers.replace("\n", "\tpost\n")
2703
2704 # output full data to common format
2705 comfile = chainfiles[0] + "_common_tmp.dat"
2706 try:
2707 cf = open(comfile, "w")
2708 except:
2709 print("Can't open common posterior file!", file=sys.stderr)
2710 sys.exit(0)
2711
2712 cf.write(headers)
2713 for narr in mcmc:
2714 for j in range(0, narr.shape[0]):
2715 mline = narr[j, :]
2716 # add on posterior
2717 mline = np.append(mline, np.exp(mline[0]))
2718
2719 strmline = " ".join(str(x) for x in mline) + "\n"
2720 cf.write(strmline)
2721 cf.close()
2722
2723 # read in as common object
2724 peparser = bppu.PEOutputParser("common")
2725 cf = open(comfile, "r")
2726 commonResultsObj = peparser.parse(cf)
2727 cf.close()
2728
2729 # remove temporary file
2730 os.remove(comfile)
2731
2732 # create posterior class
2733 pos = bppu.Posterior(commonResultsObj, SimInspiralTableEntry=None, votfile=None)
2734
2735 # convert iota back to cos(iota)
2736 # create 1D posterior class of cos(iota) values
2737 cipos = None
2738 cipos = bppu.PosteriorOneDPDF("cosiota", np.cos(pos["iota"].samples))
2739
2740 # add it back to posterior
2741 pos.append(cipos)
2742
2743 # remove iota samples
2744 pos.pop("iota")
2745
2746 return pos, neffs, grr, cl
2747
2748
2749# a function that attempt to load an pulsar MCMC chain file: first it tries using
2750# numpy.loadtxt; if it fails it tries reading line by line and checking
2751# for consistent line numbers, skipping lines that are inconsistent; if this
2752# fails it returns None
2754 cfdata = None
2755
2756 # first try reading in with loadtxt (skipping lines starting with %)
2757 try:
2758 cfdata = np.loadtxt(cf, comments="%")
2759 except:
2760 try:
2761 fc = open(cf, "r")
2762
2763 # read in header lines and count how many values each line should have
2764 headers = fc.readline()
2765 headers = fc.readline() # column names are on the second line
2766 fc.close()
2767 # remove % from start
2768 headers = re.sub("%", "", headers)
2769 # remove rads
2770 headers = re.sub("rads", "", headers)
2771 # remove other brackets e.g. around (iota)
2772 headers = re.sub("[()]", "", headers)
2773
2774 lh = len(headers.split())
2775
2776 cfdata = np.array([])
2777
2778 lines = cf.readlines()
2779
2780 for i, line in enumerate(lines):
2781 if "%" in line: # skip lines containing %
2782 continue
2783
2784 lvals = line.split()
2785
2786 # skip line if number of values isn't consistent with header
2787 if len(lvals) != lh:
2788 continue
2789
2790 # convert values to floats
2791 try:
2792 lvalsf = map(float, lvals)
2793 except:
2794 continue
2795
2796 # add values to array
2797 if i == 0:
2798 cfdata = np.array(lvalsf)
2799 else:
2800 cfdata = np.vstack((cfdata, lvalsf))
2801
2802 if cfdata.size == 0:
2803 cfdata = None
2804 except:
2805 cfdata = None
2806
2807 return cfdata
2808
2809
2810def pulsar_nest_to_posterior(postfile, nestedsamples=False, removeuntrig=True):
2811 """
2812 This function will import a posterior sample file created by `lalapps_nest2pos` (or a nested
2813 sample file). It will be returned as a Posterior class object from `bayespputils`. The
2814 signal evidence and noise evidence are also returned.
2815
2816 If there are samples in 'Q22' and not 'H0', and also a distance, or distance samples, then
2817 the Q22 samples will be converted into equivalent H0 values.
2818
2819 Parameters
2820 ----------
2821 postfile : str, required
2822 The file name of the posterior or nested sample file. In general this should be a HDF5 file with the extension
2823 '.hdf' or '.h5', although older format ascii files are still supported at the moment.
2824 nestedsamples : bool, default: False
2825 If the file being input contains nested samples, rather than posterior samples, then this flag must be set to
2826 True
2827 removeuntrig : bool, default: True
2828 If this is True then any parameters that are sines or cosines of a value will have the value removed if present
2829 e.g. if cosiota and iota exist then iota will be removed.
2830 """
2831
2832 from lalinference import bayespputils as bppu
2833 from lalinference.bayespputils import replace_column
2834
2835 fe = os.path.splitext(postfile)[-1].lower() # file extension
2836
2837 # combine multiple nested sample files for an IFO into a single posterior (copied from lalapps_nest2pos)
2838 if fe == ".h5" or fe == ".hdf": # HDF5 file
2839 if nestedsamples:
2840 # use functions from lalapps_nest2pos to read values from nested sample files i.e. not a posterior file created by lalapps_nest2pos
2841 try:
2842 from lalinference.io import read_samples
2843 from lalinference import LALInferenceHDF5NestedSamplesDatasetName
2844
2845 samples = read_samples(
2846 postfile, tablename=LALInferenceHDF5NestedSamplesDatasetName
2847 )
2848 params = samples.colnames
2849
2850 # make everything a float, since that's what's excected of a CommonResultsObj
2851 for param in params:
2852 replace_column(samples, param, samples[param].astype(float))
2853
2854 nsResultsObject = (
2855 samples.colnames,
2856 samples.as_array().view(float).reshape(-1, len(params)),
2857 )
2858 except:
2859 raise IOError("Could not import nested samples")
2860 else: # assume posterior file has been created with lalapps_nest2pos
2861 peparser = bppu.PEOutputParser("hdf5")
2862 nsResultsObject = peparser.parse(postfile)
2863 elif fe == ".gz": # gzipped file
2864 import gzip
2865
2866 peparser = bppu.PEOutputParser("common")
2867 nsResultsObject = peparser.parse(gzip.open(postfile, "r"))
2868 else: # assume an ascii text file
2869 peparser = bppu.PEOutputParser("common")
2870 nsResultsObject = peparser.parse(open(postfile, "r"))
2871
2872 pos = bppu.Posterior(nsResultsObject, SimInspiralTableEntry=None)
2873
2874 # remove any unchanging variables and randomly shuffle the rest
2875 pnames = pos.names
2876 nsamps = len(pos[pnames[0]].samples)
2877 permarr = np.arange(nsamps)
2878 np.random.shuffle(permarr)
2879 posdist = None
2880 posfreqs = None
2881 for pname in pnames:
2882 # check if all samples are the same
2883 if pos[pname].samples.tolist().count(pos[pname].samples[0]) == len(
2884 pos[pname].samples
2885 ):
2886 if pname == "f0_fixed":
2887 # try getting a fixed f0 value (for calculating h0 from Q22)
2888 posfreqs = pos[pname].samples[0]
2889 elif pname == "dist":
2890 # try getting a fixed distance value (for calculating h0 from Q22 if required)
2891 posdist = pos[pname].samples[0] / KPC
2892
2893 pos.pop(pname)
2894 else:
2895 # shuffle
2896 shufpos = None
2897 shufpos = bppu.PosteriorOneDPDF(pname, pos[pname].samples[permarr])
2898 pos.pop(pname)
2899 pos.append(shufpos)
2900
2901 # check whether iota has been used
2902 posIota = None
2903 if "iota" in pos.names:
2904 posIota = pos["iota"].samples
2905
2906 if posIota is not None:
2907 cipos = None
2908 cipos = bppu.PosteriorOneDPDF("cosiota", np.cos(posIota))
2909 pos.append(cipos)
2910 if removeuntrig:
2911 pos.pop("iota")
2912
2913 # check whether sin(i) binary parameter has been used
2914 posI = None
2915 if "i" in pos.names:
2916 posI = pos["i"].samples
2917
2918 if posI is not None:
2919 sinipos = None
2920 sinipos = bppu.PosteriorOneDPDF("sini", np.sin(posI))
2921 pos.append(sinipos)
2922 if removeuntrig:
2923 pos.pop("i")
2924
2925 # convert C22 back into h0, and phi22 back into phi0 if required
2926 posC21 = None
2927 if "c21" in pos.names:
2928 posC21 = pos["c21"].samples
2929
2930 posC22 = None
2931 if "c22" in pos.names:
2932 posC22 = pos["c22"].samples
2933
2934 posphi22 = None
2935 if "phi22" in pos.names:
2936 posphi22 = pos["phi22"].samples
2937
2938 # convert C22 back into h0, and phi22 back into phi0 if required
2939 if posC22 is not None and posC21 is None:
2940 h0pos = None
2941 h0pos = bppu.PosteriorOneDPDF("h0", 2.0 * pos["c22"].samples)
2942
2943 pos.append(h0pos)
2944 pos.pop("c22")
2945
2946 if posphi22 is not None:
2947 phi0pos = None
2948 phi0pos = bppu.PosteriorOneDPDF(
2949 "phi0", np.fmod(pos["phi22"].samples + math.pi, 2.0 * math.pi)
2950 )
2951
2952 pos.append(phi0pos)
2953 pos.pop("phi22")
2954
2955 # convert Q22 to h0 is required
2956 posQ22 = None
2957 if "q22" in pos.names:
2958 posQ22 = pos["q22"].samples
2959
2960 if "dist" in pos.names:
2961 posdist = (
2962 pos["dist"].samples / KPC
2963 ) # distance in kpc (for use in converting Q22 to h0)
2964
2965 # convert distance samples to kpc
2966 pos.pop("dist")
2967 distpos = bppu.PosteriorOneDPDF("dist", posdist)
2968 pos.append(distpos)
2969
2970 if "f0" in pos.names:
2971 posfreqs = pos["f0"].samples
2972 # if not found this will have been set from the f0_fixed value above
2973
2974 if posQ22 is not None and posdist is not None and "h0" not in pos.names:
2975 h0pos = None
2976 h0pos = bppu.PosteriorOneDPDF("h0", quadrupole_to_h0(posQ22, posfreqs, posdist))
2977
2978 pos.append(h0pos)
2979
2980 # get evidence (as in lalapps_nest2pos)
2981 if fe == ".h5" or fe == ".hdf":
2982 # read evidence from HDF5 file
2983 hdf = h5py.File(postfile, "r")
2984 a = hdf["lalinference"]["lalinference_nest"]
2985 sigev = a.attrs["log_evidence"]
2986 noiseev = a.attrs["log_noise_evidence"]
2987 hdf.close()
2988 else:
2989 B = np.loadtxt(postfile.replace(".gz", "") + "_B.txt")
2990 sigev = B[1]
2991 noiseev = B[2]
2992
2993 # return posterior samples, signal evidence (B[1]) and noise evidence (B[2])
2994 return pos, sigev, noiseev
2995
2996
2997# function to add two exponentiated log values and return the log of the result
2998def logplus(x, y):
2999 return np.logaddexp(x, y)
3000
3001
3002def logtrapz(lnf, dx):
3003 """
3004 Perform trapezium rule integration for the logarithm of a function on a regular grid.
3005
3006 Inputs
3007 ------
3008 lnf - a numpy array of values that are the natural logarithm of a function
3009 dx - a float giving the step size between values in the function
3010
3011 Returns
3012 -------
3013 The natural logarithm of the area under the function.
3014 """
3015
3016 return np.log(dx / 2.0) + logsumexp([logsumexp(lnf[:-1]), logsumexp(lnf[1:])])
3017
3018
3019# function to marginalise over a parameter
3020def marginalise(like, pname, pnames, ranges):
3021 # like - a copy of the loglikelihood array
3022 # pname - the parameter to marginalise over
3023 # pnames - a copy of the list of parameters that haven't been marginalised over
3024 # ranges - a copy of the dictionary of ranges that haven't been marginalised over
3025
3026 places = ranges[pname]
3027 axis = pnames.index(pname)
3028
3029 # perform marginalisation
3030 if len(places) > 1:
3031 x = np.apply_along_axis(logtrapz, axis, like, places[1] - places[0])
3032 elif len(places) == 1:
3033 # no marginalisation required just remove the specific singleton dimension via reshaping
3034 z = like.shape
3035 q = np.arange(0, len(z)).astype(int) != axis
3036 newshape = tuple((np.array(list(z)))[q])
3037 x = np.reshape(like, newshape)
3038
3039 # remove name from pnames and ranges
3040 del ranges[pname]
3041 pnames.remove(pname)
3042
3043 return x
3044
3045
3046# return the log marginal posterior on a given parameter (if 'all' marginalise over all parameters)
3047def marginal(lnlike, pname, pnames, ranges, multflatprior=False):
3048 from copy import deepcopy
3049
3050 # make copies of everything
3051 lnliketmp = deepcopy(lnlike)
3052 pnamestmp = deepcopy(pnames)
3053 rangestmp = deepcopy(ranges)
3054
3055 for name in pnames:
3056 if name != pname:
3057 if multflatprior: # multiply by flat prior range
3058 lnliketmp = marginalise(
3059 lnliketmp
3060 + np.log(1.0 / (rangestmp[name][-1] - rangestmp[name][0])),
3061 name,
3062 pnamestmp,
3063 rangestmp,
3064 )
3065 else:
3066 lnliketmp = marginalise(lnliketmp, name, pnamestmp, rangestmp)
3067
3068 return lnliketmp
3069
3070
3071# a function to chop the data time series, ts, up into contigous segments with a maximum length, chunkMax
3072def get_chunk_lengths(ts, chunkMax):
3073 i = j = count = 0
3074
3075 length = len(ts)
3076
3077 chunkLengths = []
3078
3079 dt = np.min(np.diff(ts)) # the time steps in the data
3080
3081 # create vector of data segment length
3082 while True:
3083 count += 1 # counter
3084
3085 # break clause
3086 if i > length - 2:
3087 # set final value of chunkLength
3088 chunkLengths.append(count)
3089 break
3090
3091 i += 1
3092
3093 t1 = ts[i - 1]
3094 t2 = ts[i]
3095
3096 # if consecutive points are within two sample times of each other count as in the same chunk
3097 if t2 - t1 > 2.0 * dt or count == chunkMax:
3098 chunkLengths.append(count)
3099 count = 0 # reset counter
3100
3101 j += 1
3102
3103 return chunkLengths
3104
3105
3106def pulsar_estimate_snr(source, det, tstart, duration, Sn, dt=1800):
3107 """
3108 A function to estimate the signal-to-noise ratio of a pulsar signal (for a triaxial neutron
3109 star emitting at twice the rotation frequency from the l=m=2 quadrupole mode) in a given
3110 detector over a given time range, for a given one-sided power spectral density.
3111
3112 Inputs
3113 ------
3114 source - a dictionary containing 'h0', 'cosiota', 'psi', 'ra' (rads or string), 'dec' (rads or string)
3115 det - the detector, e.g. 'H1'
3116 tstart - a GPS start time
3117 duration - a signal duration (seconds)
3118 Sn - a one-sided power spectral density
3119 dt - timestep for antenna response calculation [default: 1800s]
3120
3121 Returns
3122 -------
3123 rho - an estimate of the optimal matched filter SNR
3124 """
3125
3126 # if duration is greater than a sidereal day then just calculate the antenna pattern for 1 sidereal day plus the remainder
3127 sidday = 86164.0905 # one sidereal day in seconds
3128 Ndays = duration / sidday
3129 Nsamps = np.floor(sidday / dt) # number of time samples in 1 sidereal day
3130 tend = tstart + duration
3131
3132 sFp = 0
3133 sFc = 0
3134
3135 # get antenna pattern and sum over the square of it
3136 if Ndays > 1:
3137 tts = np.arange(tstart, tstart + sidday, dt)
3138
3139 # get antenna pattern over one sidereal day
3140 Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3141
3142 sFp = np.floor(Ndays) * np.sum(Fp**2)
3143 sFc = np.floor(Ndays) * np.sum(Fc**2)
3144
3145 # add on extra fractional part of a day
3146 if np.floor(Ndays) * sidday > dt:
3147 tts = np.arange(tstart + sidday * np.floor(Ndays), tend, dt)
3148
3149 Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3150
3151 sFp += np.sum(Fp**2)
3152 sFc += np.sum(Fc**2)
3153
3154 # get snr squared
3155 snr2 = (
3156 (source["h0"] ** 2 / Sn)
3157 * dt
3158 * (
3159 sFp * (0.5 * (1 + source["cosiota"] ** 2)) ** 2
3160 + sFc * source["cosiota"] ** 2
3161 )
3162 )
3163
3164 return np.sqrt(snr2)
3165
3166
3167def pulsar_estimate_h0_from_snr(snr, source, det, tstart, duration, Sn, dt=600):
3168 """
3169 A function to estimate the signal amplitude of a pulsar signal (for a triaxial neutron star emitting at twice
3170 the rotation frequency from the l=m=2 quadrupole mode) for a given SNR in a particular detector over
3171 a given time range, for a given one-sided power spectral density.
3172
3173 Inputs
3174 ------
3175 snr - the optimal matched filter signal-to-noise ratio
3176 source - a dictionary containing 'cosiota', 'psi', 'ra' (rads or string), 'dec' (rads or string)
3177 det - the detector, e.g. 'H1'
3178 tstart - a GPS start time for the signal
3179 duration - a signal duration (seconds)
3180 Sn - a one-sided power spectral density
3181 dt - timestep for antenna response calculation [default: 600s]
3182
3183 Returns
3184 -------
3185 h0 - an estimate of signal amplitude required to give the input SNR
3186 """
3187
3188 # if duration is greater than a sidereal day then just calculate the antenna pattern for 1 sidereal day plus the remainder
3189 sidday = 86164.0905 # one sidereal day in seconds
3190 Ndays = duration / sidday
3191 Nsamps = np.floor(sidday / dt) # number of time samples in 1 sidereal day
3192 tend = tstart + duration
3193
3194 sFp = 0
3195 sFc = 0
3196
3197 # get antenna pattern and sum over the square of it
3198 if Ndays > 1:
3199 tts = np.arange(tstart, tstart + sidday, dt)
3200
3201 # get antenna pattern over one sidereal day
3202 Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3203
3204 sFp = np.floor(Ndays) * np.sum(Fp**2)
3205 sFc = np.floor(Ndays) * np.sum(Fc**2)
3206
3207 # add on extra fractional part of a day
3208 if np.floor(Ndays) * sidday > dt:
3209 tts = np.arange(tstart + sidday * np.floor(Ndays), tend, dt)
3210
3211 Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3212
3213 sFp += np.sum(Fp**2)
3214 sFc += np.sum(Fc**2)
3215
3216 # get h0 squared
3217 h02 = (snr**2 * Sn) / (
3218 dt
3219 * (
3220 sFp * (0.5 * (1 + source["cosiota"] ** 2)) ** 2
3221 + sFc * source["cosiota"] ** 2
3222 )
3223 )
3224
3225 return np.sqrt(h02)
3226
3227
3229 dets,
3230 ts,
3231 data,
3232 ra,
3233 dec,
3234 sigmas=None,
3235 paramranges={},
3236 datachunks=30,
3237 chunkmin=5,
3238 ngrid=50,
3239 outputlike=False,
3240):
3241 """
3242 A function to calculate the 4-parameter posterior probability density for a continuous wave signal
3243 given a set of processed data from a set of detectors.
3244
3245 Inputs
3246 ------
3247 dets - a list of strings containing the detectors being used in the likelihood calculation
3248 ts - a dictionary of 1d numpy arrays containing the GPS times of the data for each detector
3249 data - a dictionary of 1d numpy arrays containing the complex data values for each detector
3250 ra - the right ascension (in rads) of the source
3251 dec - the declination (in rads) of the source
3252 sigmas - a dictionary of 1d numpy arrays containing the times series of standard deviation
3253 values for each detector. If this is not given a Student's t likelihood will be used,
3254 but if it is given a Gaussian likelihood will be used (default: None)
3255 paramranges - a dictionary of tuples for each parameter ('h0', 'phi0', 'psi' and 'cosiota') giving
3256 the lower and upper ranges of the parameter grid and the number of grid points. If not
3257 given then defaults will be used.
3258 datachunks - in the calculation split the data into stationary chunks with a maximum length given
3259 by this value. If set to 0 or inf then the data will not be split. (default: 30)
3260 chunkmin - this is the shortest chunk length allowed to be included in the likelihood calculation
3261 (default: 5)
3262 ngrid - the number of grid points to use for each dimension of the likelihood calculation. This
3263 is used if the values are not specified in the paramranges argument (default: 50)
3264 outputlike - output the log likelihoods rather than posteriors (default: False)
3265
3266 Returns
3267 -------
3268 L - The 4d posterior (or likelihood) over all parameters
3269 h0pdf - The 1d marginal posterior for h0
3270 phi0pdf - The 1d marginal posterior for phi0 (the rotation frequency, not GW frequency)
3271 psipdf - The 1d marginal posterior for psi
3272 cosiotapdf - The 1d marginal posterior for cosiota
3273 lingrids - A dictionary of the grid points for each parameter
3274 sigev - The log evidence for the signal model
3275 noiseev - The log evidence for the noise model
3276
3277 An example would be:
3278 # set the detectors
3279 dets = ['H1', 'L1']
3280
3281 # set the time series and data
3282 ts = {}
3283 data = {}
3284 for det in dets:
3285 ts[det] = np.arange(900000000., 921000843., 60.)
3286 data[det] = np.random.randn(len(ts[det])) + 1j*np.random.randn(len(ts[det]))
3288 # set the parameter ranges
3289 ra = 0.2
3290 dec = -0.8
3291 paramranges = {}
3292 paramranges['h0'] = (0., 2., 50)
3293 paramranges['psi'] = (0., np.pi/2., 50)
3294 paramranges['phi0'] = (0., np.pi, 50)
3295 paramranges['cosiota'] = (-1., 1., 50)
3296
3297 L, h0pdf, phi0pdf, psipdf, cosiotapdf, grid, sigev, noiseev = pulsar_posterior_grid(dets, ts, data, ra, dec,
3298 paramranges=paramranges)
3299 """
3300
3301 # import numpy
3302 import numpy as np
3303 import sys
3304 from scipy.special import gammaln
3305
3306 # set the likelihood to either Student's or Gaussian
3307 if sigmas == None:
3308 liketype = "studentst"
3309 else:
3310 liketype = "gaussian"
3311
3312 # if dets is just a single string
3313 if not isinstance(dets, list):
3314 if isinstance(dets, str):
3315 dets = [dets] # make into list
3316 else:
3317 print("Detector not, or incorrectly, set", file=sys.stderr)
3318 return
3319
3320 # allowed list of detectors
3321 alloweddets = ["H1", "L1", "V1", "G1", "T1", "H2"]
3322
3323 # check consistency of arguments
3324 for det in dets:
3325 if det not in alloweddets:
3326 print(
3327 "Detector not in list of allowed detectors ("
3328 + ",".join(alloweddets)
3329 + ")",
3330 file=sys.stderr,
3331 )
3332 return
3333
3334 # checks on time stamps
3335 if det not in ts:
3336 print("No time stamps given for detector %s" % det, file=sys.stderr)
3337 return
3338
3339 # checks on data
3340 if det not in data:
3341 print("No data time series given for detector %s" % det, file=sys.stderr)
3342 return
3343
3344 # checks on sigmas
3345 if sigmas != None:
3346 if det not in sigmas:
3347 print(
3348 "No sigma time series given for detector %s" % det, file=sys.stderr
3349 )
3350 return
3351
3352 # check length consistency
3353 if len(ts[det]) != len(data[det]):
3354 print(
3355 "Length of times stamps array and data array are inconsistent for %s"
3356 % det,
3357 file=sys.stderr,
3358 )
3359
3360 if sigmas != None:
3361 if len(ts[det]) != len(sigmas[det]):
3362 print(
3363 "Length of times stamps array and sigma array are inconsistent for %s"
3364 % det,
3365 file=sys.stderr,
3366 )
3367
3368 # setup grid on parameter space
3369 params = ["h0", "phi0", "psi", "cosiota"]
3370 defaultranges = {} # default ranges for use if not specified
3371 defaultranges["phi0"] = (0.0, np.pi, ngrid) # 0 to pi for phi0
3372 defaultranges["psi"] = (0.0, np.pi / 2.0, ngrid) # 0 to pi/2 for psi
3373 defaultranges["cosiota"] = (-1.0, 1.0, ngrid) # -1 to 1 for cos(iota)
3374 # 0 to 2 times the max standard deviation of the data (scaled to the data length)
3375 defaultranges["h0"] = (
3376 0.0,
3377 2.0
3378 * np.max([np.std(data[dv]) for dv in data])
3379 * np.sqrt(1440.0 / np.max([len(ts[dtx]) for dtx in ts])),
3380 ngrid,
3381 )
3382 lingrids = {}
3383 shapetuple = ()
3384 for param in params:
3385 if param in paramranges:
3386 if len(paramranges[param]) != 3:
3387 paramranges[param] = defaultranges[param] # set to default range
3388 else:
3389 if (
3390 paramranges[param][1] < paramranges[param][0]
3391 or paramranges[param][2] < 1
3392 ):
3393 print(
3394 "Parameter ranges wrong for %s, reverting to defaults" % param,
3395 file=sys.stderr,
3396 )
3397 paramranges[param] = defaultranges[param]
3398 else: # use defaults
3399 paramranges[param] = defaultranges[param]
3400
3401 lingrids[param] = np.linspace(
3402 paramranges[param][0], paramranges[param][1], paramranges[param][2]
3403 )
3404 shapetuple += (paramranges[param][2],)
3405
3406 # set up meshgrid on parameter space
3407 [H0S, PHI0S, PSIS, COSIS] = np.meshgrid(
3408 lingrids["h0"],
3409 lingrids["phi0"],
3410 lingrids["psi"],
3411 lingrids["cosiota"],
3412 indexing="ij",
3413 )
3414
3415 APLUS = 0.5 * (1.0 + COSIS**2)
3416 ACROSS = COSIS
3417 SINPHI = np.sin(2.0 * PHI0S) # multiply phi0 by two to get GW frequency
3418 COSPHI = np.cos(2.0 * PHI0S)
3419 SIN2PSI = np.sin(2.0 * PSIS)
3420 COS2PSI = np.cos(2.0 * PSIS)
3421
3422 Apsinphi_2 = 0.5 * APLUS * SINPHI
3423 Acsinphi_2 = 0.5 * ACROSS * SINPHI
3424 Apcosphi_2 = 0.5 * APLUS * COSPHI
3425 Accosphi_2 = 0.5 * ACROSS * COSPHI
3426 Acpsphicphi_2 = 0.5 * APLUS * ACROSS * COSPHI * SINPHI
3427
3428 AP2 = APLUS**2
3429 AC2 = ACROSS**2
3430 TWOAPACSPCP = 2.0 * APLUS * ACROSS * SINPHI * COSPHI
3431 SP2 = SINPHI**2
3432 CP2 = COSPHI**2
3433
3434 C2PS2P = COS2PSI * SIN2PSI
3435 C2P2 = COS2PSI**2
3436 S2P2 = SIN2PSI**2
3437
3438 H02 = H0S**2
3439
3440 # initialise loglikelihood
3441 like = np.zeros(shapetuple)
3442 noiselike = 0.0
3443
3444 # get flat prior
3445 logprior = 0.0
3446 for p in params:
3447 logprior -= np.log(paramranges[p][1] - paramranges[p][0])
3448
3449 # loop over detectors and calculate the log likelihood
3450 for det in dets:
3451 if liketype == "gaussian":
3452 nstd = sigmas[det]
3453 else:
3454 nstd = np.ones(len(ts[det]))
3455
3456 # get the antenna pattern
3457 [As, Bs] = antenna_response(ts[det], ra, dec, 0.0, det)
3458
3459 # split the data into chunks if required
3460 if np.isfinite(datachunks) and datachunks > 0:
3461 chunklengths = get_chunk_lengths(ts[det], datachunks)
3462 else:
3463 chunklengths = [len(ts[det])]
3464
3465 startidx = 0
3466 for cl in chunklengths:
3467 endidx = startidx + cl
3468
3469 # check for shortest allowed chunks
3470 if cl < chunkmin:
3471 continue
3472
3473 thisdata = data[det][startidx:endidx] / nstd[startidx:endidx]
3474 ast = As[startidx:endidx] / nstd[startidx:endidx]
3475 bst = Bs[startidx:endidx] / nstd[startidx:endidx]
3476
3477 A = np.sum(ast**2)
3478 B = np.sum(bst**2)
3479 AB = np.dot(ast, bst)
3480
3481 dA1real = np.dot(thisdata.real, ast)
3482 dA1imag = np.dot(thisdata.imag, ast)
3483
3484 dB1real = np.dot(thisdata.real, bst)
3485 dB1imag = np.dot(thisdata.imag, bst)
3486
3487 dd1real = np.sum(thisdata.real**2)
3488 dd1imag = np.sum(thisdata.imag**2)
3489
3490 P1 = A * C2P2 + B * S2P2 + 2.0 * AB * C2PS2P
3491 P2 = B * C2P2 + A * S2P2 - 2.0 * AB * C2PS2P
3492 P3 = AB * (C2P2 - S2P2) - A * C2PS2P + B * C2PS2P
3493
3494 hr2 = (Apcosphi_2**2) * P1 + (Acsinphi_2**2) * P2 + Acpsphicphi_2 * P3
3495 hi2 = (Apsinphi_2**2) * P1 + (Accosphi_2**2) * P2 - Acpsphicphi_2 * P3
3496
3497 d1hr = Apcosphi_2 * (dA1real * COS2PSI + dB1real * SIN2PSI) + Acsinphi_2 * (
3498 dB1real * COS2PSI - dA1real * SIN2PSI
3499 )
3500 d1hi = Apsinphi_2 * (dA1imag * COS2PSI + dB1imag * SIN2PSI) - Accosphi_2 * (
3501 dB1imag * COS2PSI - dA1imag * SIN2PSI
3502 )
3503
3504 chiSq = dd1real + dd1imag + H02 * (hr2 + hi2) - 2.0 * H0S * (d1hr + d1hi)
3505
3506 if liketype == "gaussian":
3507 like -= 0.5 * (chiSq)
3508 like -= (cl * np.log(2.0 * np.pi)) + 2.0 * np.sum(
3509 np.log(nstd[startidx:endidx])
3510 )
3511 noiselike -= 0.5 * (dd1real + dd1imag)
3512 noiselike -= (cl * np.log(2.0 * np.pi)) + 2.0 * np.sum(
3513 np.log(nstd[startidx:endidx])
3514 )
3515 else:
3516 like -= float(cl) * np.log(chiSq)
3517 like += gammaln(cl) - np.log(2.0) - cl * np.log(np.pi)
3518 noiselike -= float(cl) * np.log(dd1real + dd1imag)
3519 noiselike += gammaln(cl) - np.log(2.0) - cl * np.log(np.pi)
3520
3521 startidx += cl # updated start index
3522
3523 # convert to posterior
3524 like += logprior
3525
3526 # get signal evidence
3527 sigev = marginal(like, "all", params, lingrids)
3528
3529 # normalise posterior
3530 if not outputlike:
3531 like -= sigev
3532 else:
3533 like -= logprior # remove prior if outputting likelihood
3534
3535 # get marginal posteriors for each parameter
3536 posts = {}
3537 for p in params:
3538 if not outputlike:
3539 posts[p] = np.exp(marginal(like, p, params, lingrids))
3540 else:
3541 posts[p] = marginal(
3542 like, p, params, lingrids, multflatprior=True
3543 ) # output individual log likelihoods
3544
3545 # odds ratio for signal versus noise
3546 evrat = sigev - noiselike
3547
3548 return (
3549 like,
3550 posts["h0"],
3551 posts["phi0"],
3552 posts["psi"],
3553 posts["cosiota"],
3554 lingrids,
3555 sigev,
3556 noiselike,
3557 )
3558
3559
3560# current version of the ATNF pulsar catalogue
3561ATNF_VERSION = "1.58"
3562
3563
3564def get_atnf_info(psr):
3565 """
3566 Get the pulsar (psr) distance (DIST in kpc), proper motion corrected period derivative (P1_I) and any association
3567 (ASSOC e.g. GC) from the ATNF catalogue.
3568 """
3569
3570 import requests
3571
3572 psrname = re.sub(r"\+", "%2B", psr) # switch '+' for unicode character
3573
3574 atnfurl = (
3575 "http://www.atnf.csiro.au/people/pulsar/psrcat/proc_form.php?version="
3576 + ATNF_VERSION
3577 )
3578 atnfurl += "&Dist=Dist&Assoc=Assoc&P1_i=P1_i" # set parameters to get
3579 atnfurl += (
3580 "&startUserDefined=true&c1_val=&c2_val=&c3_val=&c4_val=&sort_attr=jname&sort_order=asc&condition=&pulsar_names="
3581 + psrname
3582 )
3583 atnfurl += "&ephemeris=selected&submit_ephemeris=Get+Ephemeris&coords_unit=raj%2Fdecj&radius=&coords_1=&coords_2="
3584 atnfurl += "&style=Long+with+last+digit+error&no_value=*&fsize=3&x_axis=&x_scale=linear&y_axis=&y_scale=linear&state=query"
3585
3586 try:
3587 urldat = requests.get(atnfurl) # read ATNF url
3588 predat = re.search(
3589 r"<pre[^>]*>([^<]+)</pre>", str(urldat.content)
3590 ) # to extract data within pre environment (without using BeautifulSoup) see e.g. http://stackoverflow.com/a/3369000/1862861 and http://stackoverflow.com/a/20046030/1862861
3591 pdat = (
3592 predat.group(1).strip().split(r"\n")
3593 ) # remove preceeding and trailing new lines and split lines
3594 except:
3595 print(
3596 "Warning... could not get information from ATNF pulsar catalogue.",
3597 file=sys.stderr,
3598 )
3599 return None
3600
3601 # check whether information could be found and get distance, age and association from data
3602 dist = None
3603 p1_I = None
3604 assoc = None
3605 for line in [p for p in pdat if len(p) != 0]:
3606 if "WARNING" in line or "not in catalogue" in line:
3607 return None
3608 vals = line.split()
3609 if "DIST" in vals[0]:
3610 dist = float(vals[1])
3611 if "P1_I" in vals[0]:
3612 age = float(vals[1])
3613 if "ASSOC" in vals[0]:
3614 assoc = vals[1]
3615
3616 return (dist, p1_I, assoc, atnfurl)
3617
3618
3619def cov_to_cor(cov):
3620 """
3621 Convert a covariance matrix to a correlation coefficient matrix.
3622 Return the correlation coefficient matrix and the standard deviations
3623 from the covariance matrix.
3624 """
3625
3626 # get the standard deviations from the covariance matrix
3627 sigmas = np.sqrt(np.diag(cov))
3628
3629 # convert these into a diagonal matrix
3630 D = sigmas * np.identity(cov.shape[0])
3631
3632 # invert D
3633 Dinv = np.linalg.inv(D)
3634
3635 # get correlation coefficient matrix
3636 Ccor = np.dot(np.dot(Dinv, cov), Dinv)
3637
3638 return Ccor, sigmas
#define min(a, b)
def __init__(self, parfilenm)
This class parses a TEMPO(2)-style pulsar parameter file.
def __init__(self, priorfilenm)
static const INT4 a
long long count
Definition: hwinject.c:371
def heterodyned_pulsar_signal(pardict, detector, starttime=900000000.0, duration=86400.0, dt=60.0, datatimes=None)
def quadrupole_to_h0(q22, freq, dist)
def rad_to_hms(rad)
rad_to_hms(rad): Convert radians to hours, minutes, and seconds of arc.
def dec_to_rad(dec_string)
dec_to_rad(dec_string): Given a string containing DEC information as 'dd:mm:ss.ssss',...
def hms_to_rad(hour, mins, sec)
hms_to_rad(hour, min, sec): Convert hours, minutes, and seconds of arc to radians
def cov_to_cor(cov)
Convert a covariance matrix to a correlation coefficient matrix.
def pulsar_mcmc_to_posterior(chainfiles)
def heterodyned_pinsf_pulsar(starttime, duration, dt, detector, pardict)
def plot_posterior_hist(poslist, param, ifos, parambounds=[float("-inf"), float("inf")], nbins=50, upperlimit=0, overplot=False, parfile=None, mplparams=False)
def dms_to_rad(deg, mins, sec)
dms_to_rad(deg, min, sec): Convert degrees, minutes, and seconds of arc to radians.
def pulsar_posterior_grid(dets, ts, data, ra, dec, sigmas=None, paramranges={}, datachunks=30, chunkmin=5, ngrid=50, outputlike=False)
A function to calculate the 4-parameter posterior probability density for a continuous wave signal gi...
def rad_to_string(rad, ra_or_dec)
rad_to_string(rad, ra_or_dec): Convert an angle in radians to hours/degrees, minutes seconds and outp...
def ra_to_rad(ra_string)
ra_to_rad(ar_string): Given a string containing RA information as 'hh:mm:ss.ssss',...
def pferrs(porf, porferr, pdorfd=None, pdorfderr=None)
pferrs(porf, porferr, pdorfd=None, pdorfderr=None): Calculate the period or frequency errors and the ...
def plot_limits_hist(lims, param, ifos, prevlims=None, bins=20, overplot=False, mplparams=False)
def marginalise(like, pname, pnames, ranges)
def pulsar_estimate_snr(source, det, tstart, duration, Sn, dt=1800)
A function to estimate the signal-to-noise ratio of a pulsar signal (for a triaxial neutron star emit...
def heterodyned_triaxial_pulsar(starttime, duration, dt, detector, pardict)
def convert_model_parameters(pardict)
def coord_to_string(h_or_d, m, s)
coord_to_string(h_or_d, m, s): Return a formatted string of RA or DEC values as 'hh:mm:ss....
def logtrapz(lnf, dx)
Perform trapezium rule integration for the logarithm of a function on a regular grid.
def pulsar_nest_to_posterior(postfile, nestedsamples=False, removeuntrig=True)
This function will import a posterior sample file created by lalapps_nest2pos (or a nested sample fil...
def phipsiconvert(phipchain, psipchain)
def upper_limit_greedy(pos, upperlimit=0.95, nbins=100)
def marginal(lnlike, pname, pnames, ranges, multflatprior=False)
def pulsar_estimate_h0_from_snr(snr, source, det, tstart, duration, Sn, dt=600)
A function to estimate the signal amplitude of a pulsar signal (for a triaxial neutron star emitting ...
def plot_posterior_hist2D(poslist, params, ifos, bounds=None, nbins=[50, 50], parfile=None, overplot=False, mplparams=False)
def get_chunk_lengths(ts, chunkMax)
def tukey_window(N, alpha=0.5)
def read_hist_from_file(histfile)
def h0_to_quadrupole(h0, freq, dist)
def hist_norm_bounds(samples, nbins, low=float("-inf"), high=float("inf"))
def plot_2Dhist_from_file(histfile, ndimlabel, mdimlabel, margpars=True, mplparams=False)
def spin_down_limit(freq, fdot, dist)
def inject_pulsar_signal(starttime, duration, dt, detectors, pardict, freqfac=[2.0], npsds=None, snrscale=None)
def antenna_response(gpsTime, ra, dec, psi, det)
def upper_limit(pos, upperlimit=0.95, parambounds=[float("-inf"), float("inf")], nbins=50)
def plot_posterior_chain(poslist, param, ifos, grr=None, withhist=0, mplparams=False)
def get_atnf_info(psr)
Get the pulsar (psr) distance (DIST in kpc), proper motion corrected period derivative (P1_I) and any...
def plot_h0_lims(h0lims, f0gw, ifos, xlims=[10, 1500], ulesttop=None, ulestbot=None, prevlim=None, prevlimf0gw=None, overplot=False, mplparams=False)
def dms_to_deg(deg, mins, sec)
dms_to_deg(deg, min, sec): Convert degrees, minutes, and seconds of arc to degrees.
def rad_to_dms(rad)
rad_to_dms(rad): Convert radians to degrees, minutes, and seconds of arc.
def plot_Bks_ASDs(Bkdata, delt=86400, plotpsds=True, plotfscan=False, removeoutlier=None, mplparams=False)
def p_to_f(p, pd, pdd=None)
p_to_f(p, pd, pdd=None): Convert period, period derivative and period second derivative to the equiva...
def h0ul_from_prior_file(priorfile, ulval=0.95)
def h0_to_ellipticity(h0, freq, dist)
#define max(a, b)