LALPulsar  6.1.0.1-89842e6
pulsarpputils.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 #
3 # pulsarpputils.py
4 #
5 # Copyright 2012
6 # Matthew Pitkin <matthew.pitkin@ligo.org>
7 #
8 #
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program; if not, write to the Free Software
21 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 # MA 02110-1301, USA.
23 
24 # known pulsar analysis post-processing utilities
25 
26 # Many functions in this a taken from, or derived from equivalents available in
27 # the PRESTO pulsar software package http://www.cv.nrao.edu/~sransom/presto/
28 
29 from __future__ import print_function, division
30 
31 import sys
32 import math
33 import cmath
34 import os
35 import numpy as np
36 import struct
37 import re
38 import h5py
39 
40 from scipy.integrate import cumtrapz
41 from scipy.interpolate import interp1d
42 
43 try:
44  from scipy.special import logsumexp
45 except ImportError: # scipy < 0.19.0
46  from scipy.misc import logsumexp
47 
48 from six import string_types
49 
50 # some common constants taken from psr_constants.py in PRESTO
51 ARCSECTORAD = float("4.8481368110953599358991410235794797595635330237270e-6")
52 RADTOARCSEC = float("206264.80624709635515647335733077861319665970087963")
53 SECTORAD = float("7.2722052166430399038487115353692196393452995355905e-5")
54 RADTOSEC = float("13750.987083139757010431557155385240879777313391975")
55 RADTODEG = float("57.295779513082320876798154814105170332405472466564")
56 DEGTORAD = float("1.7453292519943295769236907684886127134428718885417e-2")
57 RADTOHRS = float("3.8197186342054880584532103209403446888270314977710")
58 HRSTORAD = float("2.6179938779914943653855361527329190701643078328126e-1")
59 PI = float("3.1415926535897932384626433832795028841971693993751")
60 TWOPI = float("6.2831853071795864769252867665590057683943387987502")
61 PIBYTWO = float("1.5707963267948966192313216916397514420985846996876")
62 SECPERDAY = float("86400.0")
63 SECPERJULYR = float("31557600.0")
64 KMPERPC = float("3.0856776e13")
65 KMPERKPC = float("3.0856776e16")
66 Tsun = float("4.925490947e-6") # sec
67 Msun = float("1.9891e30") # kg
68 Mjup = float("1.8987e27") # kg
69 Rsun = float("6.9551e8") # m
70 Rearth = float("6.378e6") # m
71 SOL = float("299792458.0") # m/s
72 MSUN = float("1.989e+30") # kg
73 G = float("6.673e-11") # m^3/s^2/kg
74 C = SOL
75 KPC = float("3.0856776e19") # kiloparsec in metres
76 I38 = float("1e38") # moment of inertia kg m^2
77 
78 
79 # some angle conversion functions taken from psr_utils.py in PRESTO
80 def rad_to_dms(rad):
81  """
82  rad_to_dms(rad):
83  Convert radians to degrees, minutes, and seconds of arc.
84  """
85  if rad < 0.0:
86  sign = -1
87  else:
88  sign = 1
89  arc = RADTODEG * np.fmod(np.fabs(rad), math.pi)
90  d = int(arc)
91  arc = (arc - d) * 60.0
92  m = int(arc)
93  s = (arc - m) * 60.0
94  if sign == -1 and d == 0:
95  return (sign * d, sign * m, sign * s)
96  else:
97  return (sign * d, m, s)
98 
99 
100 def dms_to_rad(deg, mins, sec):
101  """
102  dms_to_rad(deg, min, sec):
103  Convert degrees, minutes, and seconds of arc to radians.
104  """
105  if deg < 0.0:
106  sign = -1
107  elif deg == 0.0 and (mins < 0.0 or sec < 0.0):
108  sign = -1
109  else:
110  sign = 1
111  return (
112  sign
113  * ARCSECTORAD
114  * (60.0 * (60.0 * np.fabs(deg) + np.fabs(mins)) + np.fabs(sec))
115  )
116 
117 
118 def dms_to_deg(deg, mins, sec):
119  """
120  dms_to_deg(deg, min, sec):
121  Convert degrees, minutes, and seconds of arc to degrees.
122  """
123  return RADTODEG * dms_to_rad(deg, mins, sec)
124 
125 
126 def rad_to_hms(rad):
127  """
128  rad_to_hms(rad):
129  Convert radians to hours, minutes, and seconds of arc.
130  """
131  rad = np.fmod(rad, 2.0 * math.pi)
132  if rad < 0.0:
133  rad = rad + 2.0 * math.pi
134  arc = RADTOHRS * rad
135  h = int(arc)
136  arc = (arc - h) * 60.0
137  m = int(arc)
138  s = (arc - m) * 60.0
139  return (h, m, s)
140 
141 
142 def hms_to_rad(hour, mins, sec):
143  """
144  hms_to_rad(hour, min, sec):
145  Convert hours, minutes, and seconds of arc to radians
146  """
147  if hour < 0.0:
148  sign = -1
149  else:
150  sign = 1
151  return (
152  sign * SECTORAD * (60.0 * (60.0 * np.fabs(hour) + np.fabs(mins)) + np.fabs(sec))
153  )
154 
155 
156 def coord_to_string(h_or_d, m, s):
157  """
158  coord_to_string(h_or_d, m, s):
159  Return a formatted string of RA or DEC values as
160  'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC.
161  """
162  retstr = ""
163  if h_or_d < 0:
164  retstr = "-"
165  elif abs(h_or_d) == 0:
166  if (m < 0.0) or (s < 0.0):
167  retstr = "-"
168 
169  h_or_d, m, s = abs(h_or_d), abs(m), abs(s)
170  if s >= 9.9995:
171  return retstr + "%.2d:%.2d:%.4f" % (h_or_d, m, s)
172  else:
173  return retstr + "%.2d:%.2d:0%.4f" % (h_or_d, m, s)
174 
175 
176 def rad_to_string(rad, ra_or_dec):
177  """
178  rad_to_string(rad, ra_or_dec):
179  Convert an angle in radians to hours/degrees, minutes seconds and output
180  it as a string in the format 'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC.
181  Whether to use hours or degrees is set by whether ra_or_dec is 'RA' or 'DEC'
182  """
183  if ra_or_dec.upper() == "RA":
184  v, m, s = rad_to_hms(rad)
185  elif ra_or_dec.upper() == "DEC":
186  v, m, s = rad_to_dms(rad)
187  else:
188  raise ValueError(
189  "Unrecognised option: Expected 'ra_or_dec' to be 'RA' or 'DEC'"
190  )
191 
192  return coord_to_string(v, m, s)
193 
194 
195 def ra_to_rad(ra_string):
196  """
197  ra_to_rad(ar_string):
198  Given a string containing RA information as
199  'hh:mm:ss.ssss', return the equivalent decimal
200  radians. Also deal with cases where input
201  string is just hh:mm, or hh.
202  """
203  hms = ra_string.split(":")
204  if len(hms) == 3:
205  return hms_to_rad(int(hms[0]), int(hms[1]), float(hms[2]))
206  elif len(hms) == 2:
207  return hms_to_rad(int(hms[0]), int(hms[1]), 0.0)
208  elif len(hms) == 1:
209  return hms_to_rad(float(hms[0]), 0.0, 0.0)
210  else:
211  print("Problem parsing RA string %s" % ra_string, file=sys.stderr)
212  sys.exit(1)
213 
214 
215 def dec_to_rad(dec_string):
216  """
217  dec_to_rad(dec_string):
218  Given a string containing DEC information as
219  'dd:mm:ss.ssss', return the equivalent decimal
220  radians. Also deal with cases where input string
221  is just dd:mm or dd
222  """
223  dms = dec_string.split(":")
224  if "-" in dms[0] and float(dms[0]) == 0.0:
225  m = "-"
226  else:
227  m = ""
228 
229  if len(dms) == 3:
230  return dms_to_rad(int(dms[0]), int(m + dms[1]), float(m + dms[2]))
231  elif len(dms) == 2:
232  return dms_to_rad(int(dms[0]), int(m + dms[1]), 0.0)
233  elif len(dms) == 1:
234  return dms_to_rad(float(dms[0]), 0.0, 0.0)
235  else:
236  print("Problem parsing DEC string %s" % dec_string, file=sys.stderr)
237  sys.exit(1)
238 
239 
240 def p_to_f(p, pd, pdd=None):
241  """
242  p_to_f(p, pd, pdd=None):
243  Convert period, period derivative and period second
244  derivative to the equivalent frequency counterparts.
245  Will also convert from f to p.
246  """
247  f = 1.0 / p
248  fd = -pd / (p * p)
249  if pdd == None:
250  return [f, fd]
251  else:
252  if pdd == 0.0:
253  fdd = 0.0
254  else:
255  fdd = 2.0 * pd * pd / (p**3.0) - pdd / (p * p)
256 
257  return [f, fd, fdd]
258 
259 
260 def pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
261  """
262  pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
263  Calculate the period or frequency errors and
264  the pdot or fdot errors from the opposite one.
265  """
266  if pdorfd == None:
267  return [1.0 / porf, porferr / porf**2.0]
268  else:
269  forperr = porferr / porf**2.0
270  fdorpderr = np.sqrt(
271  (4.0 * pdorfd**2.0 * porferr**2.0) / porf**6.0
272  + pdorfderr**2.0 / porf**4.0
273  )
274  [forp, fdorpd] = p_to_f(porf, pdorfd)
275 
276  return [forp, forperr, fdorpd, fdorpderr]
277 
278 
279 # class to read in a pulsar par file - this is heavily based on the function
280 # in parfile.py in PRESTO
281 float_keys = [
282  "F",
283  "F0",
284  "F1",
285  "F2",
286  "F3",
287  "F4",
288  "F5",
289  "F6",
290  "F7",
291  "F8",
292  "F9",
293  "F10",
294  "F11",
295  "F12",
296  "PEPOCH",
297  "POSEPOCH",
298  "DM",
299  "START",
300  "FINISH",
301  "NTOA",
302  "TRES",
303  "TZRMJD",
304  "TZRFRQ",
305  "TZRSITE",
306  "NITS",
307  "A1",
308  "XDOT",
309  "E",
310  "ECC",
311  "EDOT",
312  "T0",
313  "PB",
314  "PBDOT",
315  "OM",
316  "OMDOT",
317  "EPS1",
318  "EPS2",
319  "EPS1DOT",
320  "EPS2DOT",
321  "TASC",
322  "LAMBDAPIN",
323  "BETA",
324  "RA_RAD",
325  "DEC_RAD",
326  "GAMMA",
327  "SINI",
328  "M2",
329  "MTOT",
330  "FB0",
331  "FB1",
332  "FB2",
333  "ELAT",
334  "ELONG",
335  "PMRA",
336  "PMDEC",
337  "DIST",
338  "PB_2",
339  "PB_3",
340  "T0_2",
341  "T0_3",
342  "A1_2",
343  "A1_3",
344  "OM_2",
345  "OM_3",
346  "ECC_2",
347  "ECC_3",
348  "PX",
349  "KIN",
350  "KOM",
351  "A0",
352  "B0",
353  "D_AOP",
354  # GW PARAMETERS
355  "H0",
356  "COSIOTA",
357  "PSI",
358  "PHI0",
359  "THETA",
360  "I21",
361  "I31",
362  "C22",
363  "HPLUS",
364  "HCROSS",
365  "C21",
366  "PHI22",
367  "PHI21",
368  "SNR",
369  "COSTHETA",
370  "IOTA",
371  "Q22",
372 ]
373 str_keys = [
374  "FILE",
375  "PSR",
376  "PSRJ",
377  "NAME",
378  "RAJ",
379  "DECJ",
380  "RA",
381  "DEC",
382  "EPHEM",
383  "CLK",
384  "BINARY",
385  "UNITS",
386 ]
387 
388 
389 class psr_par:
390  def __init__(self, parfilenm):
391  """
392  This class parses a TEMPO(2)-style pulsar parameter file. If possible all parameters
393  are converted into SI units and angles are in radians. Epochs will be converted from
394  MJD values into GPS times.
395  """
396  self.FILEFILE = parfilenm
397  pf = open(parfilenm)
398  for line in pf.readlines():
399  # ignore empty lines (i.e. containing only whitespace)
400  if not line.strip():
401  continue
402 
403  # Convert any 'D-' or 'D+' to 'E-' or 'E+'
404  line = line.replace("D-", "E-")
405  line = line.replace("D+", "E+")
406  # also check for lower case
407  line = line.replace("d-", "e-")
408  line = line.replace("d+", "e+")
409  splitline = line.split()
410 
411  # get all upper case version in case lower case in par file
412  key = splitline[0].upper()
413 
414  if key in str_keys:
415  setattr(self, key, splitline[1])
416  elif key in float_keys:
417  try:
418  setattr(self, key, float(splitline[1]))
419  except:
420  continue
421 
422  if (
423  len(splitline) == 3
424  ): # Some parfiles don't have flags, but do have errors
425  if splitline[2] not in ["0", "1"]:
426  setattr(self, key + "_ERR", float(splitline[2]))
427  setattr(self, key + "_FIT", 0) # parameter was not fit
428 
429  if len(splitline) == 4:
430  if splitline[2] == "1": # parameter was fit
431  setattr(self, key + "_FIT", 1)
432  else:
433  setattr(self, key + "_FIT", 0)
434  setattr(self, key + "_ERR", float(splitline[3]))
435 
436  # sky position
437  if hasattr(self, "RAJ"):
438  setattr(self, "RA_RAD", ra_to_rad(self.RAJ))
439 
440  # set RA error in rads (rather than secs)
441  if hasattr(self, "RAJ_ERR"):
442  setattr(self, "RA_RAD_ERR", hms_to_rad(0, 0, self.RAJ_ERR))
443 
444  if hasattr(self, "RAJ_FIT"):
445  setattr(self, "RA_RAD_FIT", self["RAJ_FIT"])
446  if hasattr(self, "DECJ"):
447  setattr(self, "DEC_RAD", dec_to_rad(self.DECJ))
448 
449  # set DEC error in rads (rather than arcsecs)
450  if hasattr(self, "DECJ_ERR"):
451  setattr(self, "DEC_RAD_ERR", dms_to_rad(0, 0, self.DECJ_ERR))
452 
453  if hasattr(self, "DECJ_FIT"):
454  setattr(self, "DEC_RAD_FIT", self["DECJ_FIT"])
455 
456  # convert proper motions to rads/sec from mas/year
457  for pv in ["RA", "DEC"]:
458  if hasattr(self, "PM" + pv):
459  pmv = self["PM" + pv]
460  setattr(self, "PM" + pv + "_ORIGINAL", pmv) # save original value
461  setattr(
462  self, "PM" + pv, pmv * np.pi / (180.0 * 3600.0e3 * 365.25 * 86400.0)
463  )
464 
465  if hasattr(self, "PM" + pv + "_ERR"):
466  pmve = self["PM" + pv + "_ERR"]
467  setattr(
468  self, "PM" + pv + "_ERR_ORIGINAL", pmv
469  ) # save original value
470  setattr(
471  self,
472  "PM" + pv + "_ERR",
473  pmve * np.pi / (180.0 * 3600.0e3 * 365.25 * 86400.0),
474  )
475 
476  # periods and frequencies
477  if hasattr(self, "P"):
478  setattr(self, "P0", self.P)
479  if hasattr(self, "P0"):
480  setattr(self, "F0", 1.0 / self.P0)
481  if hasattr(self, "F0"):
482  setattr(self, "P0", 1.0 / self.F0)
483  if hasattr(self, "FB0"):
484  setattr(self, "PB", (1.0 / self.FB0) / 86400.0)
485  if hasattr(self, "P0_ERR"):
486  if hasattr(self, "P1_ERR"):
487  f, ferr, fd, fderr = pferrs(self.P0, self.P0_ERR, self.P1, self.P1_ERR)
488  setattr(self, "F0_ERR", ferr)
489  setattr(self, "F1", fd)
490  setattr(self, "F1_ERR", fderr)
491  else:
492  if hasattr(self, "P1"):
493  (
494  f,
495  fd,
496  ) = p_to_f(self.P0, self.P1)
497  setattr(self, "F0_ERR", self.P0_ERR / (self.P0 * self.P0))
498  setattr(self, "F1", fd)
499  if hasattr(self, "F0_ERR"):
500  if hasattr(self, "F1_ERR"):
501  p, perr, pd, pderr = pferrs(self.F0, self.F0_ERR, self.F1, self.F1_ERR)
502  setattr(self, "P0_ERR", perr)
503  setattr(self, "P1", pd)
504  setattr(self, "P1_ERR", pderr)
505  else:
506  if hasattr(self, "F1"):
507  (
508  p,
509  pd,
510  ) = p_to_f(self.F0, self.F1)
511  setattr(self, "P0_ERR", self.F0_ERR / (self.F0 * self.F0))
512  setattr(self, "P1", pd)
513 
514  # convert epochs (including binary epochs) to GPS if possible
515  try:
516  from lalpulsar import TTMJDtoGPS
517 
518  for epoch in [
519  "PEPOCH",
520  "POSEPOCH",
521  "DMEPOCH",
522  "T0",
523  "TASC",
524  "T0_2",
525  "T0_3",
526  ]:
527  if hasattr(self, epoch):
528  setattr(
529  self, epoch + "_ORIGINAL", self[epoch]
530  ) # save original value
531  setattr(self, epoch, TTMJDtoGPS(self[epoch]))
532 
533  if hasattr(
534  self, epoch + "_ERR"
535  ): # convert errors from days to seconds
536  setattr(
537  self, epoch + "_ERR_ORIGINAL", self[epoch + "_ERR"]
538  ) # save original value
539  setattr(self, epoch + "_ERR", self[epoch + "_ERR"] * SECPERDAY)
540  except:
541  print(
542  "Could not convert epochs to GPS times. They are all still MJD values.",
543  file=sys.stderr,
544  )
545 
546  # distance and parallax (distance: kpc -> metres, parallax: mas -> rads)
547  convfacs = {"DIST": KPC, "PX": 1e-3 * ARCSECTORAD}
548  for item in convfacs:
549  if hasattr(self, item): # convert kpc to metres
550  setattr(self, item + "_ORIGINAL", self[item]) # save original value
551  setattr(self, item, self[item] * convfacs[item])
552 
553  if hasattr(self, item + "_ERR"):
554  setattr(
555  self, item + "_ERR_ORIGINAL", self[item + "_ERR"]
556  ) # save original value
557  setattr(self, item + "_ERR", self[item + "_ERR"] * convfacs[item])
558 
559  # binary parameters
560  # omega (angle of periastron) parameters (or others requiring conversion from degs to rads)
561  for om in ["OM", "OM_2", "OM_3", "KIN", "KOM"]:
562  if hasattr(self, om): # convert OM from degs to rads
563  setattr(self, om, self[om] / RADTODEG)
564 
565  if hasattr(self, om + "_ERR"):
566  setattr(self, om + "_ERR", self[om + "_ERR"] / RADTODEG)
567 
568  # period
569  for pb in ["PB", "PB_2", "PB_3"]:
570  if hasattr(self, pb): # convert PB from days to seconds
571  setattr(self, pb + "_ORIGINAL", self[pb]) # save original value
572  setattr(self, pb, self[pb] * SECPERDAY)
573 
574  if hasattr(self, pb + "_ERR"):
575  setattr(
576  self, pb + "_ERR_ORIGINAL", self[pb + "_ERR"]
577  ) # save original value
578  setattr(self, pb + "_ERR", self[pb + "_ERR"] * SECPERDAY)
579 
580  # OMDOT
581  if hasattr(self, "OMDOT"): # convert from deg/year to rad/sec
582  setattr(self, "OMDOT_ORIGINAL", self["OMDOT"]) # save original value
583  setattr(self, "OMDOT", self["OMDOT"] / (RADTODEG * 365.25 * SECPERDAY))
584 
585  if hasattr(self, "OMDOT_ERR"):
586  setattr(
587  self, "OMDOT_ERR_ORIGINAL", self["OMDOT_ERR"]
588  ) # save original value
589  setattr(
590  self,
591  "OMDOT_ERR",
592  self["OMDOT_ERR"] / (RADTODEG * 365.25 * SECPERDAY),
593  )
594 
595  if hasattr(self, "EPS1") and hasattr(self, "EPS2"):
596  ecc = math.sqrt(self.EPS1 * self.EPS1 + self.EPS2 * self.EPS2)
597  omega = math.atan2(self.EPS1, self.EPS2)
598  setattr(self, "E", ecc)
599  setattr(self, "OM", omega) # omega in rads
600  setattr(self, "T0", self.TASC + self.PB * omega / TWOPI)
601  if (
602  hasattr(self, "PB")
603  and hasattr(self, "A1")
604  and not (hasattr(self, "E") or hasattr(self, "ECC"))
605  ):
606  setattr(self, "E", 0.0)
607  if (
608  hasattr(self, "T0")
609  and not hasattr(self, "TASC")
610  and hasattr(self, "OM")
611  and hasattr(self, "PB")
612  ):
613  setattr(self, "TASC", self.T0 - self.PB * self.OM / TWOPI)
614 
615  # binary unit conversion for small numbers (TEMPO2 checks if these are > 1e-7 and if so then the units are in 1e-12) - errors are not converted
616  for binu in ["XDOT", "PBDOT", "EDOT", "EPS1DOT", "EPS2DOT", "XPBDOT"]:
617  if hasattr(self, binu):
618  setattr(self, binu + "_ORIGINAL", self[binu]) # save original value
619  if np.abs(self[binu]) > 1e-7:
620  setattr(self, binu, self[binu] * 1.0e-12)
621 
622  # check value is not extremely large due to ATNF catalogue error
623  if self[binu] > 10000.0: # set to zero
624  setattr(self, binu, 0.0)
625 
626  # masses
627  for mass in ["M2", "MTOT"]:
628  if hasattr(self, mass): # convert solar masses to kg
629  setattr(self, mass + "_ORIGINAL", self[mass]) # save original value
630  setattr(self, mass, self[mass] * MSUN)
631 
632  if hasattr(self, mass + "_ERR"):
633  setattr(
634  self, mass + "_ERR_ORIGINAL", self[mass + "_ERR"]
635  ) # save original value
636  setattr(self, mass + "_ERR", self[mass + "_ERR"] * MSUN)
637 
638  # D_AOP
639  if hasattr(self, "D_AOP"): # convert from inverse arcsec to inverse radians
640  setattr(self, "D_AOP_ORIGINAL", self["D_AOP"]) # save original value
641  setattr(self, "D_AOP", self["D_AOP"] * RADTODEG * 3600.0)
642 
643  pf.close()
644 
645  def __getitem__(self, key):
646  try:
647  par = getattr(self, key)
648  except:
649  par = None
650 
651  return par
652 
653  def __str__(self):
654  out = ""
655  for k, v in self.__dict__.items():
656  if k[:2] != "__":
657  if isinstance(self.__dict__[k], string_type):
658  out += "%10s = '%s'\n" % (k, v)
659  else:
660  out += "%10s = %-20.15g\n" % (k, v)
661 
662  return out
663 
664 
665 # class to read in a nested sampling prior file
666 class psr_prior:
667  def __init__(self, priorfilenm):
668  self.FILEFILE = priorfilenm
669  pf = open(priorfilenm)
670  for line in pf.readlines():
671  splitline = line.split()
672 
673  # get all upper case version in case lower case in par file
674  key = splitline[0].upper()
675 
676  if key in str_keys:
677  # everything in a prior files should be numeric
678  setattr(self, key, [float(splitline[2]), float(splitline[3])])
679  elif key in float_keys:
680  setattr(self, key, [float(splitline[2]), float(splitline[3])])
681 
682  # get sky positions in rads as strings 'dd/hh:mm:ss.s'
683  if hasattr(self, "RA"):
684  hl, ml, sl = rad_to_hms(self.RA[0])
685  rastrl = coord_to_string(hl, ml, sl)
686  hu, mu, su = rad_to_hms(self.RA[1])
687  rastru = coord_to_string(hu, mu, su)
688  setattr(self, "RA_STR", [rastrl, rastru])
689 
690  if hasattr(self, "DEC"):
691  dl, ml, sl = rad_to_dms(self.DEC[0])
692  decstrl = coord_to_string(dl, ml, sl)
693  du, mu, su = rad_to_dms(self.DEC[1])
694  decstru = coord_to_string(du, mu, su)
695  setattr(self, "DEC_STR", [decstrl, decstru])
696 
697  pf.close()
698 
699  def __getitem__(self, key):
700  try:
701  atr = getattr(self, key)
702  except:
703  atr = None
704 
705  return atr
706 
707  def __str__(self):
708  out = ""
709  for k, v in self.__dict__.items():
710  if k[:2] != "__":
711  out += "%10s = %-20.15g, %-20.15g\n" % (k, float(v[0]), float(v[1]))
712 
713  return out
714 
715 
716 # Function to return a pulsar's strain spin-down limit given its spin frequency
717 # (Hz), spin-down (Hz/s) and distance (kpc). The canonical value of moment of
718 # inertia of 1e38 kg m^2 is used
719 def spin_down_limit(freq, fdot, dist):
720  hsd = np.sqrt((5.0 / 2.0) * (G / C**3) * I38 * np.fabs(fdot) / freq) / (
721  dist * KPC
722  )
723 
724  return hsd
725 
726 
727 # Function to convert a pulsar stain into ellipticity assuming the canonical
728 # moment of inertia
729 def h0_to_ellipticity(h0, freq, dist):
730  ell = h0 * C**4.0 * dist * KPC / (16.0 * np.pi**2 * G * I38 * freq**2)
731 
732  return ell
733 
734 
735 # Function to convert a pulsar strain into a mass quadrupole moment
736 def h0_to_quadrupole(h0, freq, dist):
737  q22 = (
738  np.sqrt(15.0 / (8.0 * np.pi))
739  * h0
740  * C**4.0
741  * dist
742  * KPC
743  / (16.0 * np.pi**2 * G * freq**2)
744  )
745 
746  return q22
747 
748 
749 # Function to conver quadrupole moment to strain
750 def quadrupole_to_h0(q22, freq, dist):
751  h0 = (
752  q22
753  * np.sqrt((8.0 * np.pi) / 15.0)
754  * 16.0
755  * np.pi**2
756  * G
757  * freq**2
758  / (C**4.0 * dist * KPC)
759  )
760 
761  return h0
762 
763 
764 # function to convert the psi' and phi0' coordinates used in nested sampling
765 # into the standard psi and phi0 coordinates (using vectors of those parameters
766 def phipsiconvert(phipchain, psipchain):
767  chainlen = len(phipchain)
768 
769  phichain = []
770  psichain = []
771 
772  theta = math.atan2(1, 2)
773  ct = math.cos(theta)
774  st = math.sin(theta)
775 
776  for i in range(0, chainlen):
777  phi0 = (1 / (2 * st)) * phipchain[i] - (1 / (2 * st)) * psipchain[i]
778  psi = (1 / (2 * ct)) * phipchain[i] + (1 / (2 * ct)) * psipchain[i]
779 
780  # put psi between +/-pi/4
781  if math.fabs(psi) > math.pi / 4.0:
782  # shift phi0 by pi
783  phi0 = phi0 + math.pi
784 
785  # wrap around psi
786  if psi > math.pi / 4.0:
787  psi = -(math.pi / 4.0) + math.fmod(psi + (math.pi / 4.0), math.pi / 2.0)
788  else:
789  psi = (math.pi / 4.0) - math.fmod((math.pi / 4.0) - psi, math.pi / 2.0)
790 
791  # get phi0 into 0 -> 2pi range
792  if phi0 > 2.0 * math.pi:
793  phi0 = math.fmod(phi0, 2.0 * math.pi)
794  else:
795  phi0 = 2.0 * math.pi - math.fmod(2.0 * math.pi - phi0, 2.0 * math.pi)
796 
797  phichain.append(phi0)
798  psichain.append(psi)
799 
800  return phichain, psichain
801 
802 
803 # function to create histogram plot of the 1D posterior (potentially for
804 # multiple IFOs) for a parameter (param). If an upper limit is given then
805 # that will be output
807  poslist,
808  param,
809  ifos,
810  parambounds=[float("-inf"), float("inf")],
811  nbins=50,
812  upperlimit=0,
813  overplot=False,
814  parfile=None,
815  mplparams=False,
816 ):
817  import matplotlib
818  from matplotlib import pyplot as plt
819  from lalpulsar.pulsarhtmlutils import paramlatexdict
820 
821  # create list of figures
822  myfigs = []
823 
824  # create a list of upper limits
825  ulvals = []
826 
827  # set some matplotlib defaults for hist
828  if not mplparams:
829  mplparams = {
830  "backend": "Agg",
831  "text.usetex": True, # use LaTeX for all text
832  "axes.linewidth": 0.5, # set axes linewidths to 0.5
833  "axes.grid": True, # add a grid
834  "grid.linewidth": 0.5,
835  "font.family": "serif",
836  "font.size": 12,
837  }
838 
839  matplotlib.rcParams.update(mplparams)
840 
841  # ifos line colour specs
842  coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
843 
844  # param name for axis label
845  try:
846  paraxis = paramlatexdict[param.upper()]
847  except:
848  paraxis = param
849 
850  ymax = []
851 
852  # if a par file object is given expect that we have an injection file
853  # containing the injected value to overplot on the histgram
854  parval = None
855  if parfile:
856  parval = parfile[param.upper()]
857 
858  if ifos == None:
859  # default to just output colour for H1
860  ifos = ["H1"]
861 
862  # loop over ifos
863  for idx, ifo in enumerate(ifos):
864  # check whether to plot all figures on top of each other
865  if overplot and idx == 0:
866  myfig = plt.figure(figsize=(4, 4), dpi=200)
867  plt.hold(True)
868  elif not overplot:
869  myfig = plt.figure(figsize=(4, 4), dpi=200)
870 
871  pos = poslist[idx]
872 
873  pos_samps = pos[param].samples
874 
875  # get a normalised histogram for each
876  n, bins = hist_norm_bounds(
877  pos_samps, int(nbins), parambounds[0], parambounds[1]
878  )
879 
880  # plot histogram
881  plt.step(bins, n, color=coldict[ifo])
882 
883  if "h0" not in param:
884  plt.xlim(parambounds[0], parambounds[1])
885 
886  plt.xlabel(r"" + paraxis, fontsize=14, fontweight=100)
887  plt.ylabel(r"Probability Density", fontsize=14, fontweight=100)
888  myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
889 
890  if not overplot:
891  plt.ylim(0, n.max() + 0.1 * n.max())
892  # plt.legend(ifo)
893  # set background colour of axes
894  ax = plt.gca()
895  ax.set_axis_bgcolor("#F2F1F0")
896  myfigs.append(myfig)
897  else:
898  ymax.append(n.max() + 0.1 * n.max())
899 
900  # if upper limit is needed then integrate posterior using trapezium rule
901  if upperlimit != 0:
902  ct = cumtrapz(n, bins)
903 
904  # prepend a zero to ct
905  ct = np.insert(ct, 0, 0)
906 
907  # use linear interpolation to find the value at 'upper limit'
908  ctu, ui = np.unique(ct, return_index=True)
909  intf = interp1d(ctu, bins[ui], kind="linear")
910  ulvals.append(intf(float(upperlimit)))
911 
912  # plot parameter values
913  if parval:
914  if not overplot:
915  plt.hold(True)
916  plt.axvline(parval, color="k", ls="--", linewidth=1.5)
917  else:
918  plt.axvline(parval, color="k", ls="--", linewidth=1.5)
919 
920  if overplot:
921  plt.ylim(0, max(ymax))
922  # plt.legend(ifos)
923  ax = plt.gca()
924  ax.set_axis_bgcolor("#F2F1F0")
925  plt.hold(False)
926  myfigs.append(myfig)
927 
928  return myfigs, ulvals
929 
930 
931 # function to return an upper limit from a posteriors: pos is an array of posteriors samples for a
932 # particular parameter
934  pos, upperlimit=0.95, parambounds=[float("-inf"), float("inf")], nbins=50
935 ):
936  ulval = 0
937 
938  # get a normalised histogram of posterior samples
939  n, bins = hist_norm_bounds(pos, int(nbins), parambounds[0], parambounds[1])
940 
941  # if upper limit is needed then integrate posterior using trapezium rule
942  if upperlimit != 0:
943  ct = cumtrapz(n, bins)
944 
945  # prepend a zero to ct
946  ct = np.insert(ct, 0, 0)
947 
948  # use linear interpolation to find the value at 'upper limit'
949  ctu, ui = np.unique(ct, return_index=True)
950  intf = interp1d(ctu, bins[ui], kind="linear")
951  ulval = intf(float(upperlimit))
952 
953  return ulval
954 
955 
956 def upper_limit_greedy(pos, upperlimit=0.95, nbins=100):
957  n, binedges = np.histogram(pos, bins=nbins)
958  dbins = binedges[1] - binedges[0] # width of a histogram bin
959 
960  frac = 0.0
961  j = 0
962  for nv in n:
963  prevfrac = frac
964  frac += float(nv) / len(pos)
965  j += 1
966  if frac > upperlimit:
967  break
968 
969  # linearly interpolate to get upper limit
970  ul = binedges[j - 1] + (upperlimit - prevfrac) * (dbins / (frac - prevfrac))
971 
972  return ul
973 
974 
975 # function to plot a posterior chain (be it MCMC chains or nested samples)
976 # the input should be a list of posteriors for each IFO, and the parameter
977 # required, the list of IFO. grr is a list of dictionaries giving
978 # the Gelman-Rubins statistics for the given parameter for each IFO.
979 # If withhist is set then it will also output a histgram, with withhist number
980 # of bins
981 def plot_posterior_chain(poslist, param, ifos, grr=None, withhist=0, mplparams=False):
982  import matplotlib
983  from matplotlib import pyplot as plt
984  from lalpulsar.pulsarhtmlutils import paramlatexdict
985 
986  try:
987  from matplotlib import gridspec
988  except:
989  return None
990 
991  if not mplparams:
992  mplparams = {
993  "backend": "Agg",
994  "text.usetex": True, # use LaTeX for all text
995  "axes.linewidth": 0.5, # set axes linewidths to 0.5
996  "axes.grid": True, # add a grid
997  "grid.linewidth": 0.5,
998  "font.family": "sans-serif",
999  "font.sans-serif": "Avant Garde, Helvetica, Computer Modern Sans serif",
1000  "font.size": 15,
1001  }
1002 
1003  matplotlib.rcParams.update(mplparams)
1004 
1005  coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
1006 
1007  # param name for axis label
1008  try:
1009  if param == "iota":
1010  p = "cosiota"
1011  else:
1012  p = param
1013 
1014  paryaxis = paramlatexdict[p.upper()]
1015  except:
1016  paryaxis = param
1017 
1018  if grr:
1019  legendvals = []
1020 
1021  maxiter = 0
1022  maxn = 0
1023  minsamp = float("inf")
1024  maxsamp = -float("inf")
1025 
1026  for idx, ifo in enumerate(ifos):
1027  if idx == 0:
1028  myfig = plt.figure(figsize=(12, 4), dpi=200)
1029  myfig.subplots_adjust(bottom=0.15)
1030 
1031  if withhist:
1032  gs = gridspec.GridSpec(1, 4, wspace=0)
1033  ax1 = plt.subplot(gs[:-1])
1034  ax2 = plt.subplot(gs[-1])
1035 
1036  pos = list(poslist)[idx]
1037 
1038  # check for cosiota
1039  if "iota" == param:
1040  pos_samps = np.cos(pos["iota"].samples)
1041  else:
1042  pos_samps = pos[param].samples
1043 
1044  if np.min(pos_samps) < minsamp:
1045  minsamp = np.min(pos_samps)
1046  if np.max(pos_samps) > maxsamp:
1047  maxsamp = np.max(pos_samps)
1048 
1049  if withhist:
1050  ax1.plot(pos_samps, ".", color=coldict[ifo], markersize=1)
1051 
1052  n, binedges = np.histogram(pos_samps, withhist, density=True)
1053  n = np.append(n, 0)
1054  ax2.step(n, binedges, color=coldict[ifo])
1055 
1056  if np.max(n) > maxn:
1057  maxn = np.max(n)
1058  else:
1059  plt.plot(pos_samps, ".", color=coldict[ifo], markersize=1)
1060 
1061  if grr:
1062  try:
1063  legendvals.append(r"$R = %.2f$" % grr[idx][param])
1064  except:
1065  legendvals = []
1066  else:
1067  legendvals = None
1068 
1069  if len(pos_samps) > maxiter:
1070  maxiter = len(pos_samps)
1071 
1072  if not withhist:
1073  ax1 = plt.gca()
1074 
1075  bounds = [minsamp, maxsamp]
1076 
1077  ax1.set_ylabel(r"" + paryaxis, fontsize=16, fontweight=100)
1078  ax1.set_xlabel(r"Iterations", fontsize=16, fontweight=100)
1079 
1080  ax1.set_xlim(0, maxiter)
1081  ax1.set_ylim(bounds[0], bounds[1])
1082 
1083  if withhist:
1084  ax2.set_ylim(bounds[0], bounds[1])
1085  ax2.set_xlim(0, maxn + 0.1 * maxn)
1086  ax2.set_yticklabels([])
1087  ax2.set_axis_bgcolor("#F2F1F0")
1088 
1089  # add gelman-rubins stat data
1090  if legendvals:
1091  ax1.legend(legendvals, title="Gelman-Rubins test")
1092 
1093  return myfig
1094 
1095 
1096 # function to read in and plot a 2D histogram from a binary file, where the files
1097 # structure is of the form:
1098 # a header of six doubles with:
1099 # - minimum of N-dim
1100 # - the step size in N-dim
1101 # - N - number of N-dim values
1102 # - minimum of M-dim
1103 # - the step size in M-dim
1104 # - M - number of M-dim values
1105 # followed by an NxM double array of values.
1106 # The information returned are two lists of the x and y-axis bin centres, along
1107 # with a numpy array containing the histogram values.
1108 def read_hist_from_file(histfile):
1109  # read in 2D binary file
1110  try:
1111  fp = open(histfile, "rb")
1112  except:
1113  print("Could not open prior file %s" % histfile, file=sys.stderr)
1114  return None, None, None
1115 
1116  try:
1117  pd = fp.read() # read in all the data
1118  except:
1119  print("Could not read in data from prior file %s" % histfile, file=sys.stderr)
1120  return None, None, None
1121 
1122  fp.close()
1123 
1124  # read in the header (6 doubles)
1125  # try:
1126  header = struct.unpack("d" * 6, pd[: 6 * 8])
1127  # except:
1128  # print >> sys.stderr, "Could not read in header data from prior file %s" % histfile
1129  # return None, None, None
1130 
1131  # read in histogram grid (NxM doubles)
1132  # try:
1133  grid = struct.unpack("d" * int(header[2]) * int(header[5]), pd[6 * 8 :])
1134  # except:
1135  # print >> sys.stderr, "Could not read in header data from prior file %s" % histfile
1136  # return None, None, None
1137 
1138  header = list(header)
1139 
1140  # convert grid into numpy array
1141  g = list(grid) # convert to list
1142  histarr = np.array([g[: int(header[5])]], ndmin=2)
1143  for i in range(int(header[2]) - 1):
1144  histarr = np.append(
1145  histarr, [g[(i + 1) * int(header[5]) : (i + 2) * int(header[5])]], axis=0
1146  )
1147 
1148  xbins = np.linspace(
1149  header[0], header[0] + header[1] * (header[2] - 1), int(header[2])
1150  )
1151  ybins = np.linspace(
1152  header[3], header[3] + header[4] * (header[5] - 1), int(header[5])
1153  )
1154 
1155  return xbins, ybins, histarr
1156 
1157 
1158 # Function to plot a 2D histogram of from a binary file containing it.
1159 # Also supply the label names for the n-dimension (x-axis) and m-dimension
1160 # (y-axis). If margpars is true marginalised plots of both dimensions will
1161 # also be returned.
1163  histfile, ndimlabel, mdimlabel, margpars=True, mplparams=False
1164 ):
1165  import matplotlib
1166  from matplotlib import pyplot as plt
1167  from lalpulsar.pulsarhtmlutils import paramlatexdict
1168 
1169  # read in 2D h0 vs cos(iota) binary prior file
1170  xbins, ybins, histarr = read_hist_from_file(histfile)
1171 
1172  if not xbins.any():
1173  print("Could not read binary histogram file", file=sys.stderr)
1174  return None
1175 
1176  figs = []
1177 
1178  # set some matplotlib defaults for amplitude spectral density
1179  if not mplparams:
1180  mplparams = {
1181  "backend": "Agg",
1182  "text.usetex": True, # use LaTeX for all text
1183  "axes.linewidth": 0.5, # set axes linewidths to 0.5
1184  "axes.grid": True, # add a grid
1185  "grid.linewidth": 0.5,
1186  "font.family": "serif",
1187  "font.size": 12,
1188  }
1189 
1190  matplotlib.rcParams.update(mplparams)
1191 
1192  fig = plt.figure(figsize=(4, 4), dpi=200)
1193 
1194  # param name for axis label
1195  try:
1196  parxaxis = paramlatexdict[ndimlabel.upper()]
1197  except:
1198  parxaxis = ndimlabel
1199 
1200  try:
1201  paryaxis = paramlatexdict[mdimlabel.upper()]
1202  except:
1203  paryaxis = mdimlabel
1204 
1205  plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1206  plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100, rotation=270)
1207 
1208  dx = xbins[1] - xbins[0]
1209  dy = ybins[1] - ybins[0]
1210 
1211  extent = [
1212  xbins[0] - dx / 2.0,
1213  xbins[-1] + dx / 2.0,
1214  ybins[-1] + dy / 2.0,
1215  ybins[0] - dy / 2.0,
1216  ]
1217 
1218  plt.imshow(
1219  np.transpose(histarr),
1220  aspect="auto",
1221  extent=extent,
1222  interpolation="bicubic",
1223  cmap="gray_r",
1224  )
1225 
1226  fig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1227  fax = (fig.get_axes())[0].axis()
1228 
1229  figs.append(fig)
1230 
1231  if margpars:
1232  # marginalise over y-axes and produce plots
1233  xmarg = []
1234  for i in range(len(xbins)):
1235  xmarg.append(np.trapz(histarr[:][i], x=ybins))
1236 
1237  # normalise
1238  xarea = np.trapz(xmarg, x=xbins)
1239  xmarg = map(lambda x: x / xarea, xmarg)
1240 
1241  ymarg = []
1242  for i in range(len(ybins)):
1243  ymarg.append(np.trapz(np.transpose(histarr)[:][i], x=xbins))
1244 
1245  # normalise
1246  yarea = np.trapz(ymarg, x=ybins)
1247  ymarg = map(lambda x: x / yarea, ymarg)
1248 
1249  # plot x histogram
1250  figx = plt.figure(figsize=(4, 4), dpi=200)
1251  plt.step(xbins, xmarg, color="k")
1252  plt.ylim(0, max(xmarg) + 0.1 * max(xmarg))
1253  plt.xlim(fax[0], fax[1])
1254  plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1255  plt.ylabel(r"Probability Density", fontsize=14, fontweight=100)
1256  figx.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1257  ax = plt.gca()
1258  ax.set_axis_bgcolor("#F2F1F0")
1259 
1260  # plot y histogram
1261  figy = plt.figure(figsize=(4, 4), dpi=200)
1262  plt.step(ybins, ymarg, color="k")
1263  plt.ylim(0, max(ymarg) + 0.1 * max(ymarg))
1264  plt.xlim(fax[3], fax[2])
1265  plt.xlabel(r"" + paryaxis, fontsize=14, fontweight=100)
1266  plt.ylabel(r"Probability Density", fontsize=14, fontweight=100)
1267  figy.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1268  ax = plt.gca()
1269  ax.set_axis_bgcolor("#F2F1F0")
1270 
1271  figs.append(figx)
1272  figs.append(figy)
1273 
1274  return figs
1275 
1276 
1277 # using a binary file containing a histogram oh h0 vs cos(iota) calculate the
1278 # upper limit on h0
1279 def h0ul_from_prior_file(priorfile, ulval=0.95):
1280  # read in 2D h0 vs cos(iota) binary prior file
1281  h0bins, cibins, histarr = read_hist_from_file(priorfile)
1282 
1283  if not h0bins.any():
1284  print("Could not read binary histogram file", file=sys.stderr)
1285  return None
1286 
1287  # marginalise over cos(iota)
1288  h0marg = []
1289  for i in range(len(h0bins)):
1290  h0marg.append(np.trapz(histarr[:][i], x=cibins))
1291 
1292  # normalise h0 posterior
1293  h0bins = h0bins - (h0bins[1] - h0bins[0]) / 2
1294  h0area = np.trapz(h0marg, x=h0bins)
1295  h0margnorm = map(lambda x: x / h0area, h0marg)
1296 
1297  # get cumulative probability
1298  ct = cumtrapz(h0margnorm, h0bins)
1299 
1300  # prepend a zero to ct
1301  ct = np.insert(ct, 0, 0)
1302 
1303  # use spline interpolation to find the value at 'upper limit'
1304  ctu, ui = np.unique(ct, return_index=True)
1305  intf = interp1d(ctu, h0bins[ui], kind="linear")
1306  return intf(float(ulval))
1307 
1308 
1309 # function to create a histogram plot of the 2D posterior
1311  poslist,
1312  params,
1313  ifos,
1314  bounds=None,
1315  nbins=[50, 50],
1316  parfile=None,
1317  overplot=False,
1318  mplparams=False,
1319 ):
1320  import matplotlib
1321  from matplotlib import pyplot as plt
1322  from lalpulsar.pulsarhtmlutils import paramlatexdict
1323 
1324  if len(params) != 2:
1325  print("Require 2 parameters", file=sys.stderr)
1326  sys.exit(1)
1327 
1328  # set some matplotlib defaults for amplitude spectral density
1329  if not mplparams:
1330  mplparams = {
1331  "backend": "Agg",
1332  "text.usetex": True, # use LaTeX for all text
1333  "axes.linewidth": 0.5, # set axes linewidths to 0.5
1334  "axes.grid": True, # add a grid
1335  "grid.linewidth": 0.5,
1336  "font.family": "serif",
1337  "font.size": 12,
1338  }
1339 
1340  matplotlib.rcParams.update(mplparams)
1341 
1342  if overplot:
1343  coldict = {
1344  "H1": "Reds",
1345  "H2": "GnBu",
1346  "L1": "Greens",
1347  "V1": "Blues",
1348  "G1": "BuPu",
1349  "Joint": "Greys",
1350  }
1351 
1352  myfigs = []
1353 
1354  # param name for axis label
1355  try:
1356  parxaxis = paramlatexdict[params[0].upper()]
1357  except:
1358  parxaxis = params[0]
1359 
1360  try:
1361  paryaxis = paramlatexdict[params[1].upper()]
1362  except:
1363  paryaxis = params[1]
1364 
1365  parval1 = None
1366  parval2 = None
1367 
1368  if parfile:
1369  parval1 = parfile[params[0].upper()]
1370  parval2 = parfile[params[1].upper()]
1371 
1372  if ifos == None:
1373  ifos = ["H1"]
1374 
1375  for idx, ifo in enumerate(ifos):
1376  if overplot:
1377  if idx == 0:
1378  # if overplotting just have one figure
1379  myfig = plt.figure(figsize=(4, 4), dpi=200)
1380 
1381  if len(ifos) == 1:
1382  alpha = 1.0
1383  cmap = plt.cm.get_cmap("Greys")
1384  else:
1385  alpha = 0.5 # set transparency
1386  cmap = plt.cm.get_cmap(coldict[ifo]) # set colormap
1387  # cmap.set_bad(alpha=0.) # set 'bad' masked values to be transparent
1388  else:
1389  myfig = plt.figure(figsize=(4, 4), dpi=200)
1390  alpha = 1.0 # no transparency
1391  cmap = plt.cm.get_cmap("Greys")
1392 
1393  posterior = poslist[idx]
1394 
1395  a = np.squeeze(posterior[params[0]].samples)
1396  b = np.squeeze(posterior[params[1]].samples)
1397 
1398  # Create 2D bin array
1399  par1pos_min = a.min()
1400  par2pos_min = b.min()
1401 
1402  par1pos_max = a.max()
1403  par2pos_max = b.max()
1404 
1405  plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1406  plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100, rotation=270)
1407 
1408  H, xedges, yedges = np.histogram2d(a, b, nbins, normed=True)
1409 
1410  # if overplot and len(ifos) > 1:
1411  # # mask values that are zero, so that they are
1412  # Hm = np.ma.masked_where(np.transpose(H) == 0, np.transpose(H))
1413  # else:
1414  # Hm = np.transpose(H)
1415  Hm = np.transpose(H)
1416 
1417  extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
1418  plt.imshow(
1419  Hm,
1420  aspect="auto",
1421  extent=extent,
1422  interpolation="bilinear",
1423  cmap=cmap,
1424  alpha=alpha,
1425  )
1426  # plt.colorbar()
1427 
1428  if bounds:
1429  plt.xlim(bounds[0][0], bounds[0][1])
1430  plt.ylim(bounds[1][0], bounds[1][1])
1431 
1432  # plot injection values if given
1433  if parval1 and parval2:
1434  if overplot and idx == len(ifos) - 1:
1435  # only plot after final posterior has been plotted
1436  plt.hold(True)
1437  plt.plot(parval1, parval2, "rx", markersize=8, mew=2)
1438  else:
1439  plt.hold(True)
1440  plt.plot(parval1, parval2, "rx", markersize=8, mew=2)
1441 
1442  if overplot:
1443  plt.hold(True)
1444  if not overplot:
1445  myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1446  myfigs.append(myfig)
1447 
1448  if overplot:
1449  myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1450  myfigs.append(myfig)
1451 
1452  return myfigs
1453 
1454 
1455 # a function that creates and normalises a histograms of samples, with nbins
1456 # between an upper and lower bound a upper and lower bound. The values at the
1457 # bin points (with length nbins+2) will be returned as numpy arrays
1458 def hist_norm_bounds(samples, nbins, low=float("-inf"), high=float("inf")):
1459  # get histogram
1460  n, binedges = np.histogram(samples, nbins)
1461 
1462  # get bin width
1463  binwidth = binedges[1] - binedges[0]
1464 
1465  # create bin centres
1466  bincentres = np.array([])
1467  for i in range(0, len(binedges) - 1):
1468  bincentres = np.append(bincentres, binedges[i] + binwidth / 2)
1469 
1470  # if histogram points are not close to boundaries (i.e. within a bin of the
1471  # boundaries) then add zeros to histrogram edges
1472  if bincentres[0] - binwidth > low:
1473  # prepend a zero to n
1474  n = np.insert(n, 0, 0)
1475 
1476  # prepend a new bin centre at bincentres[0] - binwidth
1477  bincentres = np.insert(bincentres, 0, bincentres[0] - binwidth)
1478  else:
1479  # we're closer to the boundary edge than the bin width then, so set a new
1480  # bin on the boundary with a value linearly extrapolated from the
1481  # gradiant of the adjacent points
1482  dx = bincentres[0] - low
1483 
1484  # prepend low value to bins
1485  bincentres = np.insert(bincentres, 0, low)
1486 
1487  dn = n[1] - n[0]
1488 
1489  nbound = n[0] - (dn / binwidth) * dx
1490 
1491  n = n.astype(float) # convert to floats
1492 
1493  # prepend to n
1494  n = np.insert(n, 0, nbound)
1495 
1496  # now the other end!
1497  if bincentres[-1] + binwidth < high:
1498  # append a zero to n
1499  n = np.append(n, 0)
1500 
1501  # append a new bin centre at bincentres[end] + binwidth
1502  bincentres = np.append(bincentres, bincentres[-1] + binwidth)
1503  else:
1504  dx = high - bincentres[-1]
1505 
1506  # prepend low value to bins
1507  bincentres = np.append(bincentres, high)
1508 
1509  dn = n[-1] - n[-2]
1510 
1511  nbound = n[-1] + (dn / binwidth) * dx
1512 
1513  n = n.astype(float) # convert to floats
1514 
1515  # prepend to n
1516  n = np.append(n, nbound)
1517 
1518  # now calculate area and normalise
1519  area = np.trapz(n, x=bincentres)
1520 
1521  ns = np.array([])
1522  for i in range(0, len(bincentres)):
1523  ns = np.append(ns, float(n[i]) / area)
1524 
1525  return ns, bincentres
1526 
1527 
1528 # create a Tukey window of length N
1529 def tukey_window(N, alpha=0.5):
1530  # if alpha >= 1 just return a Hanning window
1531  if alpha >= 1:
1532  return np.hanning(N)
1533 
1534  # get x values at which to calculate window
1535  x = np.linspace(0, 1, N)
1536 
1537  # initial square window
1538  win = np.ones(x.shape)
1539 
1540  # get the left-hand side of the window 0 <= x < alpha/2
1541  lhs = x < alpha / 2
1542  win[lhs] = 0.5 * (1 + np.cos(2 * np.pi / alpha * (x[lhs] - alpha / 2)))
1543 
1544  # get right hand side condition 1 - alpha / 2 <= x <= 1
1545  rhs = x >= (1 - alpha / 2)
1546  win[rhs] = 0.5 * (1 + np.cos(2 * np.pi / alpha * (x[rhs] - 1 + alpha / 2)))
1547 
1548  return win
1549 
1550 
1551 # create a function for plotting the absolute value of Bk data (read in from
1552 # data files) and an averaged 1 day "two-sided" amplitude spectral density
1553 # spectrogram for each IFO. Bkdata should be a dictionary contains the files
1554 # keyed to the IFO
1556  Bkdata,
1557  delt=86400,
1558  plotpsds=True,
1559  plotfscan=False,
1560  removeoutlier=None,
1561  mplparams=False,
1562 ):
1563  import matplotlib
1564  from matplotlib.mlab import specgram
1565  from matplotlib import colors
1566  from matplotlib import pyplot as plt
1567 
1568  # create list of figures
1569  Bkfigs = []
1570  psdfigs = []
1571  fscanfigs = []
1572  asdlist = []
1573  sampledt = []
1574 
1575  # set some matplotlib defaults for amplitude spectral density
1576  if not mplparams:
1577  mplparams = {
1578  "backend": "Agg",
1579  "text.usetex": True, # use LaTeX for all text
1580  "axes.linewidth": 0.5, # set axes linewidths to 0.5
1581  "axes.grid": True, # add a grid
1582  "grid.linewidth": 0.5,
1583  "font.family": "sans-serif",
1584  "font.sans-serif": "Avant Garde, Helvetica, Computer Modern Sans serif",
1585  "font.size": 15,
1586  }
1587 
1588  matplotlib.rcParams.update(mplparams)
1589  matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{xfrac}"
1590 
1591  # ifos line colour specs
1592  coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m"}
1593  colmapdic = {
1594  "H1": "Reds",
1595  "H2": "PuBu",
1596  "L1": "Greens",
1597  "V1": "Blues",
1598  "G1": "PuRd",
1599  }
1600 
1601  # there should be data for each ifo
1602  for ifo in Bkdata:
1603  # get data for given ifo
1604  try:
1605  Bk = np.loadtxt(Bkdata[ifo], comments=["#", "%"])
1606  except:
1607  print("Could not open file %s" % Bkdata[ifo], file=sys.stderr)
1608  sys.exit(-1)
1609 
1610  # sort the array in time
1611  Bk = Bk[Bk[:, 0].argsort()]
1612 
1613  # make sure times are unique
1614  uargs = np.unique(Bk[:, 0], return_index=True)[1]
1615  Bk = Bk[uargs, :]
1616 
1617  # should be three lines in file
1618  gpstime = []
1619 
1620  # remove outliers at Xsigma by working out sigma from the peak of the
1621  # distribution of the log absolute value
1622  if removeoutlier:
1623  n, binedges = np.histogram(
1624  np.log(np.fabs(np.concatenate((Bk[:, 1], Bk[:, 2])))), 50
1625  )
1626  j = n.argmax(0)
1627 
1628  # standard devaition estimate
1629  stdest = math.exp((binedges[j] + binedges[j + 1]) / 2)
1630 
1631  # get values within +/-8 sigma
1632  # test real parts
1633  vals = np.where(np.fabs(Bk[:, 1]) < removeoutlier * stdest)
1634  Bknew = Bk[vals, :][-1]
1635  # test imag parts
1636  vals = np.where(np.fabs(Bknew[:, 2]) < removeoutlier * stdest)
1637  Bk = Bknew[vals, :][-1]
1638 
1639  gpstime = Bk[:, 0]
1640 
1641  # minimum time step between points (should generally be 60 seconds)
1642  mindt = min(np.diff(gpstime))
1643  sampledt.append(mindt)
1644 
1645  Bkabs = np.sqrt(Bk[:, 1] ** 2 + Bk[:, 2] ** 2)
1646 
1647  # plot the time series of the data
1648  Bkfig = plt.figure(figsize=(11, 3.5), dpi=200)
1649  Bkfig.subplots_adjust(bottom=0.15, left=0.09, right=0.94)
1650 
1651  tms = list(map(lambda x: x - gpstime[0], gpstime))
1652 
1653  plt.plot(tms, Bkabs, ".", color=coldict[ifo], markersize=1)
1654  plt.xlabel(r"GPS - %d" % int(gpstime[0]), fontsize=14, fontweight=100)
1655  plt.ylabel(r"$|B_k|$", fontsize=14, fontweight=100)
1656  # plt.title(r'$B_k$s for ' + ifo.upper(), fontsize=14)
1657  plt.xlim(tms[0], tms[-1])
1658 
1659  Bkfigs.append(Bkfig)
1660 
1661  if plotpsds or plotfscan:
1662  # create PSD by splitting data into days, padding with zeros to give a
1663  # sample a second, getting the PSD for each day and combining them
1664  totlen = gpstime[-1] - gpstime[0] # total data length
1665 
1666  # check mindt is an integer and greater than 1
1667  if math.fmod(mindt, 1) != 0.0 or mindt < 1:
1668  print(
1669  "Error... time steps between data points must be integers",
1670  file=sys.stderr,
1671  )
1672  sys.exit(-1)
1673 
1674  count = 0
1675 
1676  # zero pad the data and bin each point in the nearest bin
1677  datazeropad = np.zeros(int(math.ceil(totlen / mindt)) + 1, dtype=complex)
1678 
1679  idx = list(map(lambda x: int(math.floor((x / mindt) + 0.5)), tms))
1680  for i in range(0, len(idx)):
1681  datazeropad[idx[i]] = complex(Bk[i, 1], Bk[i, 2])
1682 
1683  win = tukey_window(math.floor(delt / mindt), alpha=0.1)
1684 
1685  Fs = 1.0 / mindt # sample rate in Hz
1686 
1687  fscan, freqs, t = specgram(
1688  datazeropad,
1689  NFFT=int(math.floor(delt / mindt)),
1690  Fs=Fs,
1691  window=win,
1692  noverlap=int(math.floor(delt / (2.0 * mindt))),
1693  )
1694 
1695  if plotpsds:
1696  fshape = fscan.shape
1697 
1698  totalasd = np.zeros(fshape[0])
1699 
1700  for i in range(0, fshape[0]):
1701  scanasd = np.sqrt(fscan[i, :])
1702  nonzasd = np.nonzero(scanasd)
1703  scanasd = scanasd[nonzasd]
1704 
1705  # median amplitude spectral density
1706  # totalasd[i] = hmean(scanasd)
1707  totalasd[i] = np.median(scanasd)
1708 
1709  # average amplitude spectral density
1710  asdlist.append(totalasd)
1711 
1712  # plot PSD
1713  psdfig = plt.figure(figsize=(4, 3.5), dpi=200)
1714  psdfig.subplots_adjust(left=0.18, bottom=0.15)
1715 
1716  plt.plot(freqs, totalasd, color=coldict[ifo])
1717  plt.xlim(freqs[0], freqs[-1])
1718  plt.xlabel(r"Frequency (Hz)", fontsize=14, fontweight=100)
1719  plt.ylabel(r"$h/\sqrt{\rm Hz}$", fontsize=14, fontweight=100)
1720 
1721  # convert frequency labels to fractions
1722  ax = plt.gca()
1723  xt = [-Fs / 2.0, -Fs / 4.0, 0.0, Fs / 2.0, Fs / 4.0]
1724  ax.set_xticks(xt)
1725  xl = []
1726  for item in xt:
1727  if item == 0:
1728  xl.append("0")
1729  else:
1730  if item < 0:
1731  xl.append(r"$-\sfrac{1}{%d}$" % (-1.0 / item))
1732  else:
1733  xl.append(r"$\sfrac{1}{%d}$" % (1.0 / item))
1734  ax.set_xticklabels(xl)
1735  # plt.setp(ax.get_xticklabels(), fontsize=16) # increase font size
1736  plt.tick_params(axis="x", which="major", labelsize=14)
1737 
1738  psdfigs.append(psdfig)
1739 
1740  if plotfscan:
1741  fscanfig = plt.figure(figsize=(11, 3.5), dpi=200)
1742  fscanfig.subplots_adjust(bottom=0.15, left=0.09, right=0.94)
1743 
1744  extent = [tms[0], tms[-1], freqs[0], freqs[-1]]
1745  plt.imshow(
1746  np.sqrt(np.flipud(fscan)),
1747  aspect="auto",
1748  extent=extent,
1749  interpolation=None,
1750  cmap=colmapdic[ifo],
1751  norm=colors.Normalize(),
1752  )
1753  plt.ylabel(r"Frequency (Hz)", fontsize=14, fontweight=100)
1754  plt.xlabel(r"GPS - %d" % int(gpstime[0]), fontsize=14, fontweight=100)
1755 
1756  # convert frequency labels to fractions
1757  ax = plt.gca()
1758  yt = [-Fs / 2.0, -Fs / 4.0, 0.0, Fs / 2.0, Fs / 4.0]
1759  ax.set_yticks(yt)
1760  yl = []
1761  for item in yt:
1762  if item == 0:
1763  yl.append("0")
1764  else:
1765  if item < 0:
1766  yl.append(r"$-\sfrac{1}{%d}$" % (-1.0 / item))
1767  else:
1768  yl.append(r"$\sfrac{1}{%d}$" % (1.0 / item))
1769  ax.set_yticklabels(yl)
1770  plt.tick_params(axis="y", which="major", labelsize=14)
1771 
1772  fscanfigs.append(fscanfig)
1773 
1774  return Bkfigs, psdfigs, fscanfigs, asdlist, sampledt
1775 
1776 
1777 # a function to create a histogram of log10(results) for a list of a given parameter
1778 # (if a list with previous value is given they will be plotted as well)
1779 # - lims is a dictionary of lists for each IFO
1781  lims, param, ifos, prevlims=None, bins=20, overplot=False, mplparams=False
1782 ):
1783  import matplotlib
1784  from matplotlib import pyplot as plt
1785  from lalpulsar.pulsarhtmlutils import paramlatexdict
1786 
1787  if not mplparams:
1788  mplparams = {
1789  "backend": "Agg",
1790  "text.usetex": True, # use LaTeX for all text
1791  "axes.linewidth": 0.5, # set axes linewidths to 0.5
1792  "axes.grid": True, # add a grid
1793  "grid.linewidth": 0.5,
1794  "font.family": "serif",
1795  "font.size": 12,
1796  }
1797 
1798  matplotlib.rcParams.update(mplparams)
1799 
1800  myfigs = []
1801 
1802  # ifos line colour specs
1803  coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
1804 
1805  try:
1806  parxaxis = paramlatexdict[param.upper()]
1807  except:
1808  parxaxis = param
1809 
1810  paryaxis = "Number of Pulsars"
1811 
1812  # get log10 of previous results
1813  logprevlims = None
1814  if prevlims is not None:
1815  logprevlims = np.log10(prevlims)
1816 
1817  # get the limits of the histogram range
1818  hrange = None
1819  stacked = False
1820  if overplot:
1821  highbins = []
1822  lowbins = []
1823 
1824  # combine dataset to plot as stacked
1825  stackdata = []
1826  stacked = True
1827 
1828  for j, ifo in enumerate(ifos):
1829  theselims = lims[ifo]
1830 
1831  # remove any None's
1832  for i, val in enumerate(theselims):
1833  if val == None:
1834  del theselims[i]
1835 
1836  loglims = np.log10(theselims)
1837 
1838  stackdata.append(loglims)
1839 
1840  highbins.append(np.max(loglims))
1841  lowbins.append(np.min(loglims))
1842 
1843  if logprevlims is not None:
1844  highbins.append(max(logprevlims))
1845  lowbins.append(min(logprevlims))
1846 
1847  hrange = (min(lowbins), max(highbins))
1848 
1849  for j, ifo in enumerate(ifos):
1850  # get log10 of results
1851  theselims = lims[ifo]
1852 
1853  # remove any None's
1854  for i, val in enumerate(theselims):
1855  if val == None:
1856  del theselims[i]
1857 
1858  loglims = np.log10(theselims)
1859 
1860  if not overplot:
1861  stackdata = loglims
1862 
1863  # if not overplot or (overplot and j == 0):
1864  myfig = plt.figure(figsize=(4, 4), dpi=200)
1865  maxlims = []
1866  minlims = []
1867 
1868  if overplot:
1869  edgecolor = []
1870  facecolor = []
1871  for ifoname in ifos:
1872  edgecolor.append(coldict[ifoname])
1873  facecolor.append(coldict[ifoname])
1874  else:
1875  edgecolor = [coldict[ifo]]
1876  facecolor = [coldict[ifo]]
1877 
1878  # plt.hist(loglims, bins, range=hrange, histtype='step', fill=True, edgecolor=coldict[ifo], facecolor=coldict[ifo], alpha=0.6)
1879  n, bins, patches = plt.hist(
1880  stackdata,
1881  bins,
1882  range=hrange,
1883  histtype="step",
1884  stacked=stacked,
1885  fill=True,
1886  color=edgecolor,
1887  alpha=0.6,
1888  )
1889  for i, patch in enumerate(patches):
1890  plt.setp(patch, "facecolor", facecolor[i])
1891 
1892  if not overplot:
1893  maxlims.append(np.max(loglims))
1894  minlims.append(np.min(loglims))
1895 
1896  if logprevlims is not None:
1897  if not overplot:
1898  maxlims.append(max(logprevlims))
1899  minlims.append(min(logprevlims))
1900 
1901  maxlim = max(maxlims)
1902  minlim = min(minlims)
1903  else:
1904  maxlim = hrange[1]
1905  minlim = hrange[0]
1906 
1907  plt.hold(True)
1908  plt.hist(
1909  logprevlims,
1910  bins,
1911  range=hrange,
1912  edgecolor="k",
1913  lw=2,
1914  histtype="step",
1915  fill=False,
1916  )
1917  plt.hold(False)
1918  else:
1919  if not overplot:
1920  maxlim = max(maxlims)
1921  minlim = min(minlims)
1922  else:
1923  maxlim = hrange[1]
1924  minlim = hrange[0]
1925 
1926  # set xlabels to 10^x
1927  ax = plt.gca()
1928 
1929  # work out how many ticks to set
1930  tickvals = range(int(math.ceil(maxlim) - math.floor(minlim)))
1931  tickvals = map(lambda x: x + int(math.floor(minlim)), tickvals)
1932  ax.set_xticks(tickvals)
1933  tls = map(lambda x: "$10^{%d}$" % x, tickvals)
1934  ax.set_xticklabels(tls)
1935 
1936  plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100)
1937  plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
1938 
1939  myfig.subplots_adjust(left=0.18, bottom=0.15) # adjust size
1940 
1941  myfigs.append(myfig)
1942 
1943  if overplot:
1944  break
1945 
1946  return myfigs
1947 
1948 
1949 # a function plot upper limits verses GW frequency in log-log space. If upper and
1950 # lower estimates for the upper limit are supplied these will also be plotted as a
1951 # band. If previous limits are supplied these will be plotted as well.
1952 # h0lims is a dictionary of lists of h0 upper limits for each of the IFOs given in
1953 # the list of ifos
1954 # ulesttop and ulestbot are the upper and lower ranges of the estimates limit given
1955 # as a dictionary if list for each ifo.
1956 # prevlim is a list of previous upper limits at the frequencies prevlimf0gw
1957 # xlims is the frequency range for the plot
1958 # overplot - set to true to plot different IFOs on same plot
1960  h0lims,
1961  f0gw,
1962  ifos,
1963  xlims=[10, 1500],
1964  ulesttop=None,
1965  ulestbot=None,
1966  prevlim=None,
1967  prevlimf0gw=None,
1968  overplot=False,
1969  mplparams=False,
1970 ):
1971  import matplotlib
1972  from matplotlib import pyplot as plt
1973 
1974  if not mplparams:
1975  mplparams = {
1976  "backend": "Agg",
1977  "text.usetex": True, # use LaTeX for all text
1978  "axes.linewidth": 0.5, # set axes linewidths to 0.5
1979  "axes.grid": True, # add a grid
1980  "grid.linewidth": 0.5,
1981  "font.family": "serif",
1982  "font.size": 12,
1983  }
1984 
1985  matplotlib.rcParams.update(mplparams)
1986 
1987  myfigs = []
1988 
1989  # ifos line colour specs
1990  coldict = {"H1": "r", "H2": "c", "L1": "g", "V1": "b", "G1": "m", "Joint": "k"}
1991 
1992  parxaxis = "Frequency (Hz)"
1993  paryaxis = "$h_0$"
1994 
1995  for j, ifo in enumerate(ifos):
1996  h0lim = h0lims[ifo]
1997 
1998  if len(ifos) > 1:
1999  f0s = f0gw[ifo]
2000  else:
2001  f0s = f0gw
2002 
2003  if len(h0lim) != len(f0s):
2004  return None # exit if lengths aren't correct
2005 
2006  if not overplot or (overplot and j == 0):
2007  myfig = plt.figure(figsize=(7, 5.5), dpi=200)
2008 
2009  plt.hold(True)
2010  if ulesttop is not None and ulestbot is not None:
2011  ult = ulesttop[ifo]
2012  ulb = ulestbot[ifo]
2013 
2014  if len(ult) == len(ulb) and len(ult) == len(f0s):
2015  for i in range(len(ult)):
2016  plt.loglog(
2017  [f0s[i], f0s[i]], [ulb[i], ult[i]], ls="-", lw=3, c="lightgrey"
2018  )
2019 
2020  if (
2021  prevlim is not None
2022  and prevlimf0gw is not None
2023  and len(prevlim) == len(prevlimf0gw)
2024  ):
2025  if not overplot or (overplot and j == len(ifos) - 1):
2026  plt.loglog(
2027  prevlimf0gw,
2028  prevlim,
2029  marker="*",
2030  ms=10,
2031  alpha=0.7,
2032  mfc="None",
2033  mec="k",
2034  ls="None",
2035  )
2036 
2037  # plot current limits
2038  plt.loglog(
2039  f0s, h0lim, marker="*", ms=10, mfc=coldict[ifo], mec=coldict[ifo], ls="None"
2040  )
2041 
2042  plt.ylabel(r"" + paryaxis, fontsize=14, fontweight=100)
2043  plt.xlabel(r"" + parxaxis, fontsize=14, fontweight=100)
2044 
2045  plt.xlim(xlims[0], xlims[1])
2046 
2047  if not overplot or (overplot and j == len(ifos) - 1):
2048  plt.hold(False)
2049  myfig.subplots_adjust(left=0.12, bottom=0.10) # adjust size
2050  myfigs.append(myfig)
2051 
2052  return myfigs
2053 
2054 
2055 # a function to create the signal model for a heterodyned triaxial pulsar given
2056 # a signal GPS start time, signal duration (in seconds), sample interval
2057 # (seconds), detector, and the parameters h0, cos(iota), psi (rads), initial
2058 # phase phi0 (rads), right ascension (rads) and declination (rads) in a
2059 # dictionary. The list of time stamps, and the real and imaginary parts of the
2060 # signal are returned
2061 def heterodyned_triaxial_pulsar(starttime, duration, dt, detector, pardict):
2062  # create a list of times stamps
2063  ts = []
2064  tmpts = starttime
2065 
2066  # create real and imaginary parts of the signal
2067  s = np.array([], dtype=complex)
2068 
2069  sphi = np.sin(pardict["phi0"])
2070  cphi = np.cos(pardict["phi0"])
2071 
2072  Xplus = 0.25 * (1.0 + pardict["cosiota"] * pardict["cosiota"]) * pardict["h0"]
2073  Xcross = 0.5 * pardict["cosiota"] * pardict["h0"]
2074  Xpsinphi = Xplus * sphi
2075  Xcsinphi = Xcross * sphi
2076  Xpcosphi = Xplus * cphi
2077  Xccosphi = Xcross * cphi
2078 
2079  i = 0
2080  while tmpts < starttime + duration:
2081  ts.append(starttime + (dt * i))
2082 
2083  # get the antenna response
2084  fp, fc = antenna_response(
2085  ts[i], pardict["ra"], pardict["dec"], pardict["psi"], detector
2086  )
2087 
2088  # create real part of signal
2089  s = np.append(
2090  s, (fp * Xpcosphi + fc * Xcsinphi) + 1j * (fp * Xpsinphi - fc * Xccosphi)
2091  )
2092 
2093  tmpts = ts[i] + dt
2094 
2095  i = i + 1
2096 
2097  return ts, s
2098 
2099 
2100 # a function to create a heterodyned signal model for a neutron star using the complex
2101 # amplitude parameters C22, phi22, C21 and phi21 and the orientation parameters cos(iota)
2102 # and psi. If both C22 and C21 are non-zero then a signal at both the rotation frequency
2103 # and twice the rotation frequency will be generated.
2104 #
2105 # Note that for the purely triaxial signal the waveform, as defined in Jones (2015)
2106 # http://arxiv.org/abs/1501.05832 (and subsequently Pitkin et al (2015) http://arxiv.org/abs/1508.00416),
2107 # has the opposite sign to the convention in e.g. JKS (which is used for signal generation in hardware
2108 # injections). Hence, when converting h0, from the conventional model, to C22 a minus sign needs to be
2109 # introduced. Also, phi0 here is defined as the rotational phase of the source, so when converting to phi22
2110 # a factor of 2 is needed.
2112  pardict, detector, starttime=900000000.0, duration=86400.0, dt=60.0, datatimes=None
2113 ):
2114  if "cosiota" in pardict:
2115  cosiota = pardict["cosiota"]
2116  iota = math.acos(cosiota)
2117  siniota = math.sin(iota)
2118  else:
2119  raise KeyError("cos(iota) not defined!")
2120 
2121  if "psi" in pardict:
2122  psi = pardict["psi"]
2123  else:
2124  raise KeyError("psi not defined!")
2125 
2126  if "C22" in pardict:
2127  C22 = pardict["C22"]
2128  else:
2129  if "h0" in pardict:
2130  C22 = -pardict["h0"] / 2.0
2131  else:
2132  C22 = 0.0
2133 
2134  if "phi22" in pardict:
2135  phi22 = pardict["phi22"]
2136  else:
2137  if "phi0" in pardict:
2138  phi22 = 2.0 * pardict["phi0"]
2139  else:
2140  phi22 = 0.0
2141 
2142  ePhi22 = cmath.exp(phi22 * 1j)
2143 
2144  if "C21" in pardict:
2145  C21 = pardict["C21"]
2146  else:
2147  C21 = 0.0
2148 
2149  if "phi21" in pardict:
2150  phi21 = pardict["phi21"]
2151  else:
2152  phi21 = 0.0
2153 
2154  ePhi21 = cmath.exp(phi21 * 1j)
2155 
2156  s = [] # signal
2157  ts = [] # times
2158 
2159  if "C21" in pardict and "C22" in pardict and "h0" not in pardict:
2160  freqs = [1.0, 2.0]
2161  else:
2162  freqs = [2.0]
2163 
2164  for f in freqs:
2165  i = 0
2166  if datatimes is None:
2167  datatimes = np.arange(starttime, starttime + duration, dt)
2168 
2169  # get the antenna response
2170  fp, fc = antenna_response(
2171  datatimes, pardict["ra"], pardict["dec"], pardict["psi"], detector
2172  )
2173 
2174  if f == 1.0:
2175  sf = (
2176  -(C21 / 4.0) * ePhi21 * siniota * cosiota * fp
2177  + 1j * (C21 / 4.0) * ePhi21 * siniota * fc
2178  )
2179  elif f == 2.0:
2180  sf = (
2181  -(C22 / 2.0) * ePhi22 * (1.0 + cosiota**2.0) * fp
2182  + 1j * (C22) * ePhi22 * cosiota * fc
2183  )
2184 
2185  ts.append(datatimes)
2186  s.append(sf)
2187 
2188  return ts, s
2189 
2190 
2191 # a function to convert the pinned superfluid model parameters to the complex
2192 # amplitude parameters using the Eqns 76-79 defined in Jones 2012 LIGO DCC T1200265-v3
2194  costheta = pardict["costheta"]
2195  theta = np.arccos(costheta)
2196  sintheta = math.sin(theta)
2197  sin2theta = math.sin(2.0 * theta)
2198  sinlambda = math.sin(pardict["lambdapin"])
2199  coslambda = math.cos(pardict["lambdapin"])
2200  sin2lambda = math.sin(2.0 * pardict["lambdapin"])
2201 
2202  phi0 = pardict["phi0"]
2203 
2204  I21 = pardict["I21"]
2205  I31 = pardict["I31"]
2206 
2207  A22 = I21 * (sinlambda**2 - (coslambda * costheta) ** 2) - I31 * sintheta**2
2208  B22 = I21 * sin2lambda * costheta
2209 
2210  C22 = 2.0 * math.sqrt(A22**2 + B22**2)
2211 
2212  A21 = I21 * sin2lambda * sintheta
2213  B21 = (I21 * coslambda**2 - I31) * sin2theta
2214 
2215  C21 = 2.0 * math.sqrt(A21**2 + B21**2)
2216 
2217  phi22 = np.fmod(2.0 * phi0 - math.atan2(B22, A22), 2.0 * np.pi)
2218  phi21 = np.fmod(phi0 - math.atan2(B21, A21), 2.0 * np.pi)
2219 
2220  outvals = {"C22": C22, "C21": C21, "phi22": phi22, "phi21": phi21}
2221 
2222  return outvals
2223 
2224 
2225 # a function to create the signal model for a heterodyned pinned superfluid
2226 # model pulsar a signal GPS start time, signal duration (in seconds),
2227 # sample interval (seconds), detector, and the parameters I21, I31,
2228 # cos(theta), lambda, cos(iota), psi (rads), initial phase phi0 (for l=m=2 mode in rads), right
2229 # ascension (rads), declination (rads), distance (kpc) and frequency (f0) in a
2230 # dictionary. The list of time stamps, and the real and imaginary parts of the
2231 # 1f and 2f signals are returned in an array
2232 def heterodyned_pinsf_pulsar(starttime, duration, dt, detector, pardict):
2233  iota = np.arccos(pardict["cosiota"])
2234  theta = np.arccos(pardict["costheta"])
2235  siniota = math.sin(iota)
2236  sintheta = math.sin(theta)
2237  sin2theta = math.sin(2.0 * theta)
2238  sinlambda = math.sin(pardict["lambdapin"])
2239  coslambda = math.cos(pardict["lambdapin"])
2240  sin2lambda = math.sin(2.0 * pardict["lambdapin"])
2241 
2242  ePhi = cmath.exp(0.5 * pardict["phi0"] * 1j)
2243  e2Phi = cmath.exp(pardict["phi0"] * 1j)
2244 
2245  f2_r = pardict["f0"] ** 2 / pardict["dist"]
2246 
2247  """
2248  This model is a complex heterodyned time series for a pinned superfluid
2249  neutron star emitting at its roation frequency and twice its rotation
2250  frequency (as defined in Eqns 35-38 of Jones 2012 LIGO DCC T1200265-v3)
2251  """
2252 
2253  Xplusf = -(f2_r / 2.0) * siniota * pardict["cosiota"]
2254  Xcrossf = (f2_r / 2.0) * siniota
2255 
2256  Xplus2f = -f2_r * (1.0 + pardict["cosiota"] ** 2)
2257  Xcross2f = 2.0 * f2_r * pardict["cosiota"]
2258 
2259  A21 = pardict["I21"] * sin2lambda * sintheta
2260  B21 = (pardict["I21"] * coslambda**2 - pardict["I31"]) * sin2theta
2261 
2262  A22 = (
2263  pardict["I21"] * (sinlambda**2 - coslambda**2 * pardict["costheta"] ** 2)
2264  - pardict["I31"] * sintheta**2
2265  )
2266  B22 = pardict["I21"] * sin2lambda * pardict["costheta"]
2267 
2268  # create a list of times stamps
2269  ts1 = []
2270  ts2 = []
2271  tmpts = starttime
2272 
2273  # create real and imaginary parts of the 1f signal
2274  s1 = np.array([], dtype=complex)
2275  s2 = np.array([], dtype=complex)
2276 
2277  i = 0
2278  while tmpts < starttime + duration:
2279  ts1.append(starttime + (dt * i))
2280  ts2.append(starttime + (dt * i))
2281 
2282  # get the antenna response
2283  fp, fc = antenna_response(
2284  ts1[i], pardict["ra"], pardict["dec"], pardict["psi"], detector
2285  )
2286 
2287  # create the complex signal amplitude model at 1f
2288  s1 = np.append(
2289  s1,
2290  (fp * Xplusf * ePhi * (A21 - 1j * B21))
2291  + (fc * Xcrossf * ePhi * (B21 + 1j * A21)),
2292  )
2293 
2294  # create the complex signal amplitude model at 2f
2295  s2 = np.append(
2296  s2,
2297  (fp * Xplus2f * e2Phi * (A22 - 1j * B22))
2298  + (fc * Xcross2f * e2Phi * (B22 + 1j * A22)),
2299  )
2300 
2301  tmpts = ts1[i] + dt
2302 
2303  i = i + 1
2304 
2305  # combine data into 1 array
2306  ts = np.vstack([ts1, ts2])
2307  s = np.vstack([s1, s2])
2308 
2309  return ts, s
2310 
2311 
2312 # function to get the antenna response for a given detector. It takes in a GPS time or list (or numpy array)
2313 # of GPS times, right ascension (rads), declination (rads), polarisation angle (rads) and a detector name
2314 # e.g. H1, L1, V1. The plus and cross polarisations are returned.
2315 def antenna_response(gpsTime, ra, dec, psi, det):
2316  import lal
2317 
2318  # create detector-name map
2319  detMap = {
2320  "H1": lal.LALDetectorIndexLHODIFF,
2321  "H2": lal.LALDetectorIndexLHODIFF,
2322  "L1": lal.LALDetectorIndexLLODIFF,
2323  "G1": lal.LALDetectorIndexGEO600DIFF,
2324  "V1": lal.LALDetectorIndexVIRGODIFF,
2325  "T1": lal.LALDetectorIndexTAMA300DIFF,
2326  "AL1": lal.LALDetectorIndexLLODIFF,
2327  "AH1": lal.LALDetectorIndexLHODIFF,
2328  "AV1": lal.LALDetectorIndexVIRGODIFF,
2329  "E1": lal.LALDetectorIndexE1DIFF,
2330  "E2": lal.LALDetectorIndexE2DIFF,
2331  "E3": lal.LALDetectorIndexE3DIFF,
2332  }
2333 
2334  try:
2335  detector = detMap[det]
2336  except KeyError:
2337  raise KeyError("ERROR. Key {} is not a valid detector name.".format(det))
2338 
2339  # get detector
2340  detval = lal.CachedDetectors[detector]
2341  response = detval.response
2342 
2343  # check if ra and dec are floats or strings (if strings in hh/dd:mm:ss.s format then convert to rads)
2344  if not isinstance(ra, float):
2345  if isinstance(ra, string_types):
2346  try:
2347  ra = ra_to_rad(ra)
2348  except:
2349  raise ValueError(
2350  "Right ascension string '{}' not formatted properly".format(ra)
2351  )
2352  else:
2353  raise ValueError(
2354  "Right ascension must be a 'float' in radians or a string of the format 'hh:mm:ss.s'"
2355  )
2356 
2357  if not isinstance(dec, float):
2358  if isinstance(dec, string_types):
2359  try:
2360  dec = dec_to_rad(dec)
2361  except:
2362  raise ValueError(
2363  "Declination string '{}' not formatted properly".format(ra)
2364  )
2365  else:
2366  raise ValueError(
2367  "Declination must be a 'float' in radians or a string of the format 'dd:mm:ss.s'"
2368  )
2369 
2370  # check if gpsTime is just a float or int, and if so convert into an array
2371  if isinstance(gpsTime, float) or isinstance(gpsTime, int):
2372  gpsTime = np.array([gpsTime])
2373  else: # make sure it's a numpy array
2374  gpsTime = np.copy(gpsTime)
2375 
2376  # make sure times are floats
2377  gpsTime = gpsTime.astype("float64")
2378 
2379  # if gpsTime is a list of regularly spaced values then use ComputeDetAMResponseSeries
2380  if len(gpsTime) == 1 or np.unique(np.diff(gpsTime)).size == 1:
2381  gpsStart = lal.LIGOTimeGPS(gpsTime[0])
2382  dt = 0.0
2383  if len(gpsTime) > 1:
2384  dt = gpsTime[1] - gpsTime[0]
2385  fp, fc = lal.ComputeDetAMResponseSeries(
2386  response, ra, dec, psi, gpsStart, dt, len(gpsTime)
2387  )
2388 
2389  # return elements from Time Series
2390  return fp.data.data, fc.data.data
2391  else: # we'll have to work out the value at each point in the time series
2392  fp = np.zeros(len(gpsTime))
2393  fc = np.zeros(len(gpsTime))
2394 
2395  for i in range(len(gpsTime)):
2396  gps = lal.LIGOTimeGPS(gpsTime[i])
2397  gmst_rad = lal.GreenwichMeanSiderealTime(gps)
2398 
2399  # actual computation of antenna factors
2400  fp[i], fc[i] = lal.ComputeDetAMResponse(response, ra, dec, psi, gmst_rad)
2401 
2402  return fp, fc
2403 
2404 
2405 # a function to inject a heterodyned pulsar signal into noise of a
2406 # given level for detectors. it will return the signal + noise and the optimal
2407 # SNR. If an snrscale value is passed to the function the signal will be scaled
2408 # to that SNR. nsigs
2410  starttime,
2411  duration,
2412  dt,
2413  detectors,
2414  pardict,
2415  freqfac=[2.0],
2416  npsds=None,
2417  snrscale=None,
2418 ):
2419  # if detectors is just a string (i.e. one detector) then make it a list
2420  if isinstance(detectors, string_types):
2421  detectors = [detectors]
2422 
2423  # if not noise sigma's are given then generate a noise level from the given
2424  # detector noise curves
2425  if npsds is None:
2426  npsds = []
2427 
2428  for det in detectors:
2429  for frf in freqfac:
2430  psd = detector_noise(det, frf * pardict["f0"])
2431 
2432  # convert to time domain standard devaition shared between real and
2433  # imaginary signal
2434  ns = np.sqrt((psd / 2.0) / (2.0 * dt))
2435 
2436  npsds.append(ns)
2437  else:
2438  # convert input psds into time domain noise standard deviation
2439  tmpnpsds = []
2440 
2441  count = 0
2442  for j, det in enumerate(detectors):
2443  for frf in freqfac:
2444  if len(npsds) == 1:
2445  tmpnpsds.append(np.sqrt((npsds[0] / 2.0) / (2.0 * dt)))
2446  else:
2447  tmpnpsds.append(np.sqrt((npsds[count] / 2.0) / (2.0 * dt)))
2448 
2449  count = count + 1
2450 
2451  npsds = tmpnpsds
2452 
2453  if len(freqfac) == 1 and len(detectors) != len(npsds):
2454  raise ValueError(
2455  "Number of detectors {} not the same as number of noises {}".format(
2456  len(detectors), len(npsds)
2457  )
2458  )
2459 
2460  if len(freqfac) == 2 and 2 * len(detectors) != len(npsds):
2461  raise ValueError(
2462  "Number of detectors {} not half the number of noises {}".format(
2463  len(detectors), len(npsds)
2464  )
2465  )
2466 
2467  tss = np.array([])
2468  ss = np.array([])
2469 
2470  snrtot = 0
2471  for j, det in enumerate(detectors):
2472  # create the pulsar signal
2473  if len(freqfac) == 1 and freqfac[0] == 2.0:
2474  ts, s = heterodyned_pulsar_signal(pardict, det, starttime, duration, dt)
2475 
2476  if j == 0:
2477  tss = np.append(tss, ts)
2478  ss = np.append(ss, s)
2479  else:
2480  tss = np.vstack([tss, ts])
2481  ss = np.vstack([ss, s])
2482 
2483  # get SNR
2484  snrtmp = get_optimal_snr(s[0], npsds[j])
2485  elif len(freqfac) == 2:
2486  ts, s = heterodyned_pulsar_signal(pardict, det, starttime, duration, dt)
2487 
2488  snrtmp = 0
2489  for k, frf in enumerate(freqfac):
2490  if j == 0 and k == 0:
2491  tss = np.append(tss, ts[k][:])
2492  ss = np.append(ss, s[k][:])
2493  else:
2494  tss = np.vstack([tss, ts[k][:]])
2495  ss = np.vstack([ss, s[k][:]])
2496 
2497  snrtmp2 = get_optimal_snr(s[k][:], npsds[2 * j + k])
2498  snrtmp = snrtmp + snrtmp2 * snrtmp2
2499 
2500  snrtmp = np.sqrt(snrtmp)
2501 
2502  snrtot = snrtot + snrtmp * snrtmp
2503 
2504  # total multidetector/data stream snr
2505  snrtot = np.sqrt(snrtot)
2506 
2507  # add noise and rescale signals if necessary
2508  if snrscale is not None:
2509  if snrscale != 0:
2510  snrscale = snrscale / snrtot
2511  # print snrscale
2512  else:
2513  snrscale = 1
2514 
2515  signalonly = ss.copy()
2516 
2517  i = 0
2518  for det in detectors:
2519  # for triaxial model
2520  if len(freqfac) == 1:
2521  # generate random numbers
2522  rs = np.random.randn(len(ts[0]), 2)
2523 
2524  for j, t in enumerate(ts[0]):
2525  if len(tss.shape) == 1:
2526  signalonly[j] = (snrscale * ss[j].real) + 1j * (
2527  snrscale * ss[j].imag
2528  )
2529  ss[j] = (snrscale * ss[j].real + npsds[i] * rs[j][0]) + 1j * (
2530  snrscale * ss[j].imag + npsds[i] * rs[j][1]
2531  )
2532  else:
2533  signalonly[i][j] = (snrscale * ss[i][j].real) + 1j * (
2534  snrscale * ss[i][j].imag
2535  )
2536  ss[i][j] = (snrscale * ss[i][j].real + npsds[i] * rs[j][0]) + 1j * (
2537  snrscale * ss[i][j].imag + npsds[i] * rs[j][1]
2538  )
2539 
2540  i = i + 1
2541  elif len(freqfac) == 2:
2542  # generate random numbers
2543  rs = np.random.randn(len(ts[0][:]), 4)
2544 
2545  for j, t in enumerate(ts[0][:]):
2546  signalonly[i][j] = (snrscale * ss[i][j].real) + 1j * (
2547  snrscale * ss[i][j].imag
2548  )
2549  ss[i][j] = (snrscale * ss[i][j].real + npsds[i] * rs[j][0]) + 1j * (
2550  snrscale * ss[i][j].imag + npsds[i] * rs[j][1]
2551  )
2552  signalonly[i + 1][j] = (snrscale * ss[i + 1][j].real) + 1j * (
2553  snrscale * ss[i + 1][j].imag
2554  )
2555  ss[i + 1][j] = (
2556  snrscale * ss[i + 1][j].real + npsds[i + 1] * rs[j][2]
2557  ) + 1j * (snrscale * ss[i + 1][j].imag + npsds[i + 1] * rs[j][3])
2558 
2559  i = i + 2
2560  else:
2561  print("Something wrong with injection", file=sys.stderr)
2562  sys.exit(1)
2563 
2564  snrtot = snrtot * snrscale
2565 
2566  return tss, ss, signalonly, snrtot, snrscale, npsds
2567 
2568 
2569 # function to create a time domain PSD from theoretical
2570 # detector noise curves. It takes in the detector name and the frequency at
2571 # which to generate the noise.
2572 #
2573 # The noise models are taken from those in lalsimulation/lib/LALSimNoisePSD.c
2574 def detector_noise(det, f):
2575  import lalsimulation
2576 
2577  if det == "AV1": # Advanced Virgo
2578  return lalsimulation.SimNoisePSDAdvVirgo(f)
2579  elif det == "H1" or det == "L1": # iLIGO SRD
2580  return lalsimulation.SimNoisePSDiLIGOSRD(f)
2581  elif det == "H2":
2582  return lalsimulation.SimNoisePSDiLIGOSRD(f) * 2.0
2583  elif det == "G1": # GEO_600
2584  return lalsimulation.SimNoisePSDGEO(f)
2585  elif det == "V1": # initial Virgo
2586  return lalsimulation.SimNoisePSDVirgo(f)
2587  elif det == "T1": # TAMA
2588  return lalsimulation.SimNoisePSDTAMA(f)
2589  elif det == "K1": # KAGRA
2590  return lalsimulation.SimNoisePSDKAGRA(f)
2591  elif det == "AL1" or det == "AH1":
2592  return lalsimulation.SimNoisePSDaLIGOZeroDetHighPower(f)
2593  else:
2594  raise ValueError("{} is not a recognised detector".format(det))
2595 
2596 
2597 # function to calculate the optimal SNR of a heterodyned pulsar signal - it
2598 # takes in a complex signal model and noise standard deviation
2599 def get_optimal_snr(s, sig):
2600  ss = 0
2601  # sum square of signal
2602  for val in s:
2603  ss = ss + val.real**2 + val.imag**2
2604 
2605  return np.sqrt(ss / sig**2)
2606 
2607 
2608 # use the Gelman-Rubins convergence test for MCMC chains, where chains is a list
2609 # of MCMC numpy chain arrays - this copies the gelman_rubins function in
2610 # bayespputils
2611 def gelman_rubins(chains):
2612  chainMeans = [np.mean(data) for data in chains]
2613  chainVars = [np.var(data) for data in chains]
2614 
2615  BoverN = np.var(chainMeans)
2616  W = np.mean(chainVars)
2617 
2618  sigmaHat2 = W + BoverN
2619 
2620  m = len(chains)
2621 
2622  VHat = sigmaHat2 + BoverN / m
2623 
2624  R = VHat / W
2625 
2626  return R
2627 
2628 
2629 # function to convert MCMC files output from pulsar_parameter_estimation into
2630 # a posterior class object - also outputting:
2631 # - the mean effective sample size of each parameter chain
2632 # - the Gelman-Rubins statistic for each parameter (a dictionary)
2633 # - the original length of each chain
2634 # Th input is a list of MCMC chain files
2636  from lalinference import bayespputils as bppu
2637 
2638  cl = []
2639  neffs = []
2640  grr = {}
2641 
2642  mcmc = []
2643 
2644  for cfile in chainfiles:
2645  if os.path.isfile(cfile):
2646  # load MCMC chain
2647  mcmcChain = read_pulsar_mcmc_file(cfile)
2648 
2649  # if no chain found then exit with None's
2650  if mcmcChain is None:
2651  return None, None, None, None
2652 
2653  # find number of effective samples for the chain
2654  neffstmp = []
2655  for j in range(1, mcmcChain.shape[1]):
2656  neff, acl, acf = bppu.effectiveSampleSize(mcmcChain[:, j])
2657  neffstmp.append(neff)
2658 
2659  # get the minimum effective sample size
2660  # neffs.append(min(neffstmp))
2661  # get the mean effective sample size
2662  neffs.append(math.floor(np.mean(neffstmp)))
2663 
2664  # nskip = math.ceil(mcmcChain.shape[0]/min(neffstmp))
2665  nskip = int(math.ceil(mcmcChain.shape[0] / np.mean(neffstmp)))
2666 
2667  # output every nskip (independent) value
2668  mcmc.append(mcmcChain[::nskip, :])
2669  cl.append(mcmcChain.shape[0])
2670  else:
2671  print("File %s does not exist!" % cfile, file=sys.stderr)
2672  return None, None, None, None
2673 
2674  # output data to common results format
2675  # get first line of MCMC chain file for header names
2676  cf = open(chainfiles[0], "r")
2677  headers = cf.readline()
2678  headers = cf.readline() # column names are on the second line
2679  # remove % from start
2680  headers = re.sub("%", "", headers)
2681  # remove rads
2682  headers = re.sub("rads", "", headers)
2683  # remove other brackets e.g. around (iota)
2684  headers = re.sub("[()]", "", headers)
2685  cf.close()
2686 
2687  # get Gelman-Rubins stat for each parameter
2688  for idx, parv in enumerate(headers.split()):
2689  lgr = []
2690  if parv != "logL":
2691  for j in range(0, len(mcmc)):
2692  achain = mcmc[j]
2693  singlechain = achain[:, idx]
2694  lgr.append(singlechain)
2695  grr[parv.lower()] = gelman_rubins(lgr)
2696 
2697  # logL in chain is actually log posterior, so also output the posterior
2698  # values (can be used to estimate the evidence)
2699  headers = headers.replace("\n", "\tpost\n")
2700 
2701  # output full data to common format
2702  comfile = chainfiles[0] + "_common_tmp.dat"
2703  try:
2704  cf = open(comfile, "w")
2705  except:
2706  print("Can't open common posterior file!", file=sys.stderr)
2707  sys.exit(0)
2708 
2709  cf.write(headers)
2710  for narr in mcmc:
2711  for j in range(0, narr.shape[0]):
2712  mline = narr[j, :]
2713  # add on posterior
2714  mline = np.append(mline, np.exp(mline[0]))
2715 
2716  strmline = " ".join(str(x) for x in mline) + "\n"
2717  cf.write(strmline)
2718  cf.close()
2719 
2720  # read in as common object
2721  peparser = bppu.PEOutputParser("common")
2722  cf = open(comfile, "r")
2723  commonResultsObj = peparser.parse(cf)
2724  cf.close()
2725 
2726  # remove temporary file
2727  os.remove(comfile)
2728 
2729  # create posterior class
2730  pos = bppu.Posterior(commonResultsObj, SimInspiralTableEntry=None, votfile=None)
2731 
2732  # convert iota back to cos(iota)
2733  # create 1D posterior class of cos(iota) values
2734  cipos = None
2735  cipos = bppu.PosteriorOneDPDF("cosiota", np.cos(pos["iota"].samples))
2736 
2737  # add it back to posterior
2738  pos.append(cipos)
2739 
2740  # remove iota samples
2741  pos.pop("iota")
2742 
2743  return pos, neffs, grr, cl
2744 
2745 
2746 # a function that attempt to load an pulsar MCMC chain file: first it tries using
2747 # numpy.loadtxt; if it fails it tries reading line by line and checking
2748 # for consistent line numbers, skipping lines that are inconsistent; if this
2749 # fails it returns None
2751  cfdata = None
2752 
2753  # first try reading in with loadtxt (skipping lines starting with %)
2754  try:
2755  cfdata = np.loadtxt(cf, comments="%")
2756  except:
2757  try:
2758  fc = open(cf, "r")
2759 
2760  # read in header lines and count how many values each line should have
2761  headers = fc.readline()
2762  headers = fc.readline() # column names are on the second line
2763  fc.close()
2764  # remove % from start
2765  headers = re.sub("%", "", headers)
2766  # remove rads
2767  headers = re.sub("rads", "", headers)
2768  # remove other brackets e.g. around (iota)
2769  headers = re.sub("[()]", "", headers)
2770 
2771  lh = len(headers.split())
2772 
2773  cfdata = np.array([])
2774 
2775  lines = cf.readlines()
2776 
2777  for i, line in enumerate(lines):
2778  if "%" in line: # skip lines containing %
2779  continue
2780 
2781  lvals = line.split()
2782 
2783  # skip line if number of values isn't consistent with header
2784  if len(lvals) != lh:
2785  continue
2786 
2787  # convert values to floats
2788  try:
2789  lvalsf = map(float, lvals)
2790  except:
2791  continue
2792 
2793  # add values to array
2794  if i == 0:
2795  cfdata = np.array(lvalsf)
2796  else:
2797  cfdata = np.vstack((cfdata, lvalsf))
2798 
2799  if cfdata.size == 0:
2800  cfdata = None
2801  except:
2802  cfdata = None
2803 
2804  return cfdata
2805 
2806 
2807 def pulsar_nest_to_posterior(postfile, nestedsamples=False, removeuntrig=True):
2808  """
2809  This function will import a posterior sample file created by `lalapps_nest2pos` (or a nested
2810  sample file). It will be returned as a Posterior class object from `bayespputils`. The
2811  signal evidence and noise evidence are also returned.
2812 
2813  If there are samples in 'Q22' and not 'H0', and also a distance, or distance samples, then
2814  the Q22 samples will be converted into equivalent H0 values.
2815 
2816  Parameters
2817  ----------
2818  postfile : str, required
2819  The file name of the posterior or nested sample file. In general this should be a HDF5 file with the extension
2820  '.hdf' or '.h5', although older format ascii files are still supported at the moment.
2821  nestedsamples : bool, default: False
2822  If the file being input contains nested samples, rather than posterior samples, then this flag must be set to
2823  True
2824  removeuntrig : bool, default: True
2825  If this is True then any parameters that are sines or cosines of a value will have the value removed if present
2826  e.g. if cosiota and iota exist then iota will be removed.
2827  """
2828 
2829  from lalinference import bayespputils as bppu
2830  from lalinference.bayespputils import replace_column
2831 
2832  fe = os.path.splitext(postfile)[-1].lower() # file extension
2833 
2834  # combine multiple nested sample files for an IFO into a single posterior (copied from lalapps_nest2pos)
2835  if fe == ".h5" or fe == ".hdf": # HDF5 file
2836  if nestedsamples:
2837  # use functions from lalapps_nest2pos to read values from nested sample files i.e. not a posterior file created by lalapps_nest2pos
2838  try:
2839  from lalinference.io import read_samples
2840  from lalinference import LALInferenceHDF5NestedSamplesDatasetName
2841 
2842  samples = read_samples(
2843  postfile, tablename=LALInferenceHDF5NestedSamplesDatasetName
2844  )
2845  params = samples.colnames
2846 
2847  # make everything a float, since that's what's excected of a CommonResultsObj
2848  for param in params:
2849  replace_column(samples, param, samples[param].astype(float))
2850 
2851  nsResultsObject = (
2852  samples.colnames,
2853  samples.as_array().view(float).reshape(-1, len(params)),
2854  )
2855  except:
2856  raise IOError("Could not import nested samples")
2857  else: # assume posterior file has been created with lalapps_nest2pos
2858  peparser = bppu.PEOutputParser("hdf5")
2859  nsResultsObject = peparser.parse(postfile)
2860  elif fe == ".gz": # gzipped file
2861  import gzip
2862 
2863  peparser = bppu.PEOutputParser("common")
2864  nsResultsObject = peparser.parse(gzip.open(postfile, "r"))
2865  else: # assume an ascii text file
2866  peparser = bppu.PEOutputParser("common")
2867  nsResultsObject = peparser.parse(open(postfile, "r"))
2868 
2869  pos = bppu.Posterior(nsResultsObject, SimInspiralTableEntry=None)
2870 
2871  # remove any unchanging variables and randomly shuffle the rest
2872  pnames = pos.names
2873  nsamps = len(pos[pnames[0]].samples)
2874  permarr = np.arange(nsamps)
2875  np.random.shuffle(permarr)
2876  posdist = None
2877  posfreqs = None
2878  for pname in pnames:
2879  # check if all samples are the same
2880  if pos[pname].samples.tolist().count(pos[pname].samples[0]) == len(
2881  pos[pname].samples
2882  ):
2883  if pname == "f0_fixed":
2884  # try getting a fixed f0 value (for calculating h0 from Q22)
2885  posfreqs = pos[pname].samples[0]
2886  elif pname == "dist":
2887  # try getting a fixed distance value (for calculating h0 from Q22 if required)
2888  posdist = pos[pname].samples[0] / KPC
2889 
2890  pos.pop(pname)
2891  else:
2892  # shuffle
2893  shufpos = None
2894  shufpos = bppu.PosteriorOneDPDF(pname, pos[pname].samples[permarr])
2895  pos.pop(pname)
2896  pos.append(shufpos)
2897 
2898  # check whether iota has been used
2899  posIota = None
2900  if "iota" in pos.names:
2901  posIota = pos["iota"].samples
2902 
2903  if posIota is not None:
2904  cipos = None
2905  cipos = bppu.PosteriorOneDPDF("cosiota", np.cos(posIota))
2906  pos.append(cipos)
2907  if removeuntrig:
2908  pos.pop("iota")
2909 
2910  # check whether sin(i) binary parameter has been used
2911  posI = None
2912  if "i" in pos.names:
2913  posI = pos["i"].samples
2914 
2915  if posI is not None:
2916  sinipos = None
2917  sinipos = bppu.PosteriorOneDPDF("sini", np.sin(posI))
2918  pos.append(sinipos)
2919  if removeuntrig:
2920  pos.pop("i")
2921 
2922  # convert C22 back into h0, and phi22 back into phi0 if required
2923  posC21 = None
2924  if "c21" in pos.names:
2925  posC21 = pos["c21"].samples
2926 
2927  posC22 = None
2928  if "c22" in pos.names:
2929  posC22 = pos["c22"].samples
2930 
2931  posphi22 = None
2932  if "phi22" in pos.names:
2933  posphi22 = pos["phi22"].samples
2934 
2935  # convert C22 back into h0, and phi22 back into phi0 if required
2936  if posC22 is not None and posC21 is None:
2937  h0pos = None
2938  h0pos = bppu.PosteriorOneDPDF("h0", 2.0 * pos["c22"].samples)
2939 
2940  pos.append(h0pos)
2941  pos.pop("c22")
2942 
2943  if posphi22 is not None:
2944  phi0pos = None
2945  phi0pos = bppu.PosteriorOneDPDF(
2946  "phi0", np.fmod(pos["phi22"].samples + math.pi, 2.0 * math.pi)
2947  )
2948 
2949  pos.append(phi0pos)
2950  pos.pop("phi22")
2951 
2952  # convert Q22 to h0 is required
2953  posQ22 = None
2954  if "q22" in pos.names:
2955  posQ22 = pos["q22"].samples
2956 
2957  if "dist" in pos.names:
2958  posdist = (
2959  pos["dist"].samples / KPC
2960  ) # distance in kpc (for use in converting Q22 to h0)
2961 
2962  # convert distance samples to kpc
2963  pos.pop("dist")
2964  distpos = bppu.PosteriorOneDPDF("dist", posdist)
2965  pos.append(distpos)
2966 
2967  if "f0" in pos.names:
2968  posfreqs = pos["f0"].samples
2969  # if not found this will have been set from the f0_fixed value above
2970 
2971  if posQ22 is not None and posdist is not None and "h0" not in pos.names:
2972  h0pos = None
2973  h0pos = bppu.PosteriorOneDPDF("h0", quadrupole_to_h0(posQ22, posfreqs, posdist))
2974 
2975  pos.append(h0pos)
2976 
2977  # get evidence (as in lalapps_nest2pos)
2978  if fe == ".h5" or fe == ".hdf":
2979  # read evidence from HDF5 file
2980  hdf = h5py.File(postfile, "r")
2981  a = hdf["lalinference"]["lalinference_nest"]
2982  sigev = a.attrs["log_evidence"]
2983  noiseev = a.attrs["log_noise_evidence"]
2984  hdf.close()
2985  else:
2986  B = np.loadtxt(postfile.replace(".gz", "") + "_B.txt")
2987  sigev = B[1]
2988  noiseev = B[2]
2989 
2990  # return posterior samples, signal evidence (B[1]) and noise evidence (B[2])
2991  return pos, sigev, noiseev
2992 
2993 
2994 # function to add two exponentiated log values and return the log of the result
2995 def logplus(x, y):
2996  return np.logaddexp(x, y)
2997 
2998 
2999 def logtrapz(lnf, dx):
3000  """
3001  Perform trapezium rule integration for the logarithm of a function on a regular grid.
3002 
3003  Inputs
3004  ------
3005  lnf - a numpy array of values that are the natural logarithm of a function
3006  dx - a float giving the step size between values in the function
3007 
3008  Returns
3009  -------
3010  The natural logarithm of the area under the function.
3011  """
3012 
3013  return np.log(dx / 2.0) + logsumexp([logsumexp(lnf[:-1]), logsumexp(lnf[1:])])
3014 
3015 
3016 # function to marginalise over a parameter
3017 def marginalise(like, pname, pnames, ranges):
3018  # like - a copy of the loglikelihood array
3019  # pname - the parameter to marginalise over
3020  # pnames - a copy of the list of parameters that haven't been marginalised over
3021  # ranges - a copy of the dictionary of ranges that haven't been marginalised over
3022 
3023  places = ranges[pname]
3024  axis = pnames.index(pname)
3025 
3026  # perform marginalisation
3027  if len(places) > 1:
3028  x = np.apply_along_axis(logtrapz, axis, like, places[1] - places[0])
3029  elif len(places) == 1:
3030  # no marginalisation required just remove the specific singleton dimension via reshaping
3031  z = like.shape
3032  q = np.arange(0, len(z)).astype(int) != axis
3033  newshape = tuple((np.array(list(z)))[q])
3034  x = np.reshape(like, newshape)
3035 
3036  # remove name from pnames and ranges
3037  del ranges[pname]
3038  pnames.remove(pname)
3039 
3040  return x
3041 
3042 
3043 # return the log marginal posterior on a given parameter (if 'all' marginalise over all parameters)
3044 def marginal(lnlike, pname, pnames, ranges, multflatprior=False):
3045  from copy import deepcopy
3046 
3047  # make copies of everything
3048  lnliketmp = deepcopy(lnlike)
3049  pnamestmp = deepcopy(pnames)
3050  rangestmp = deepcopy(ranges)
3051 
3052  for name in pnames:
3053  if name != pname:
3054  if multflatprior: # multiply by flat prior range
3055  lnliketmp = marginalise(
3056  lnliketmp
3057  + np.log(1.0 / (rangestmp[name][-1] - rangestmp[name][0])),
3058  name,
3059  pnamestmp,
3060  rangestmp,
3061  )
3062  else:
3063  lnliketmp = marginalise(lnliketmp, name, pnamestmp, rangestmp)
3064 
3065  return lnliketmp
3066 
3067 
3068 # a function to chop the data time series, ts, up into contigous segments with a maximum length, chunkMax
3069 def get_chunk_lengths(ts, chunkMax):
3070  i = j = count = 0
3071 
3072  length = len(ts)
3073 
3074  chunkLengths = []
3075 
3076  dt = np.min(np.diff(ts)) # the time steps in the data
3077 
3078  # create vector of data segment length
3079  while True:
3080  count += 1 # counter
3081 
3082  # break clause
3083  if i > length - 2:
3084  # set final value of chunkLength
3085  chunkLengths.append(count)
3086  break
3087 
3088  i += 1
3089 
3090  t1 = ts[i - 1]
3091  t2 = ts[i]
3092 
3093  # if consecutive points are within two sample times of each other count as in the same chunk
3094  if t2 - t1 > 2.0 * dt or count == chunkMax:
3095  chunkLengths.append(count)
3096  count = 0 # reset counter
3097 
3098  j += 1
3099 
3100  return chunkLengths
3101 
3102 
3103 def pulsar_estimate_snr(source, det, tstart, duration, Sn, dt=1800):
3104  """
3105  A function to estimate the signal-to-noise ratio of a pulsar signal (for a triaxial neutron
3106  star emitting at twice the rotation frequency from the l=m=2 quadrupole mode) in a given
3107  detector over a given time range, for a given one-sided power spectral density.
3108 
3109  Inputs
3110  ------
3111  source - a dictionary containing 'h0', 'cosiota', 'psi', 'ra' (rads or string), 'dec' (rads or string)
3112  det - the detector, e.g. 'H1'
3113  tstart - a GPS start time
3114  duration - a signal duration (seconds)
3115  Sn - a one-sided power spectral density
3116  dt - timestep for antenna response calculation [default: 1800s]
3117 
3118  Returns
3119  -------
3120  rho - an estimate of the optimal matched filter SNR
3121  """
3122 
3123  # if duration is greater than a sidereal day then just calculate the antenna pattern for 1 sidereal day plus the remainder
3124  sidday = 86164.0905 # one sidereal day in seconds
3125  Ndays = duration / sidday
3126  Nsamps = np.floor(sidday / dt) # number of time samples in 1 sidereal day
3127  tend = tstart + duration
3128 
3129  sFp = 0
3130  sFc = 0
3131 
3132  # get antenna pattern and sum over the square of it
3133  if Ndays > 1:
3134  tts = np.arange(tstart, tstart + sidday, dt)
3135 
3136  # get antenna pattern over one sidereal day
3137  Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3138 
3139  sFp = np.floor(Ndays) * np.sum(Fp**2)
3140  sFc = np.floor(Ndays) * np.sum(Fc**2)
3141 
3142  # add on extra fractional part of a day
3143  if np.floor(Ndays) * sidday > dt:
3144  tts = np.arange(tstart + sidday * np.floor(Ndays), tend, dt)
3145 
3146  Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3147 
3148  sFp += np.sum(Fp**2)
3149  sFc += np.sum(Fc**2)
3150 
3151  # get snr squared
3152  snr2 = (
3153  (source["h0"] ** 2 / Sn)
3154  * dt
3155  * (
3156  sFp * (0.5 * (1 + source["cosiota"] ** 2)) ** 2
3157  + sFc * source["cosiota"] ** 2
3158  )
3159  )
3160 
3161  return np.sqrt(snr2)
3162 
3163 
3164 def pulsar_estimate_h0_from_snr(snr, source, det, tstart, duration, Sn, dt=600):
3165  """
3166  A function to estimate the signal amplitude of a pulsar signal (for a triaxial neutron star emitting at twice
3167  the rotation frequency from the l=m=2 quadrupole mode) for a given SNR in a particular detector over
3168  a given time range, for a given one-sided power spectral density.
3169 
3170  Inputs
3171  ------
3172  snr - the optimal matched filter signal-to-noise ratio
3173  source - a dictionary containing 'cosiota', 'psi', 'ra' (rads or string), 'dec' (rads or string)
3174  det - the detector, e.g. 'H1'
3175  tstart - a GPS start time for the signal
3176  duration - a signal duration (seconds)
3177  Sn - a one-sided power spectral density
3178  dt - timestep for antenna response calculation [default: 600s]
3179 
3180  Returns
3181  -------
3182  h0 - an estimate of signal amplitude required to give the input SNR
3183  """
3184 
3185  # if duration is greater than a sidereal day then just calculate the antenna pattern for 1 sidereal day plus the remainder
3186  sidday = 86164.0905 # one sidereal day in seconds
3187  Ndays = duration / sidday
3188  Nsamps = np.floor(sidday / dt) # number of time samples in 1 sidereal day
3189  tend = tstart + duration
3190 
3191  sFp = 0
3192  sFc = 0
3193 
3194  # get antenna pattern and sum over the square of it
3195  if Ndays > 1:
3196  tts = np.arange(tstart, tstart + sidday, dt)
3197 
3198  # get antenna pattern over one sidereal day
3199  Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3200 
3201  sFp = np.floor(Ndays) * np.sum(Fp**2)
3202  sFc = np.floor(Ndays) * np.sum(Fc**2)
3203 
3204  # add on extra fractional part of a day
3205  if np.floor(Ndays) * sidday > dt:
3206  tts = np.arange(tstart + sidday * np.floor(Ndays), tend, dt)
3207 
3208  Fp, Fc = antenna_response(tts, source["ra"], source["dec"], source["psi"], det)
3209 
3210  sFp += np.sum(Fp**2)
3211  sFc += np.sum(Fc**2)
3212 
3213  # get h0 squared
3214  h02 = (snr**2 * Sn) / (
3215  dt
3216  * (
3217  sFp * (0.5 * (1 + source["cosiota"] ** 2)) ** 2
3218  + sFc * source["cosiota"] ** 2
3219  )
3220  )
3221 
3222  return np.sqrt(h02)
3223 
3224 
3226  dets,
3227  ts,
3228  data,
3229  ra,
3230  dec,
3231  sigmas=None,
3232  paramranges={},
3233  datachunks=30,
3234  chunkmin=5,
3235  ngrid=50,
3236  outputlike=False,
3237 ):
3238  """
3239  A function to calculate the 4-parameter posterior probability density for a continuous wave signal
3240  given a set of processed data from a set of detectors.
3241 
3242  Inputs
3243  ------
3244  dets - a list of strings containing the detectors being used in the likelihood calculation
3245  ts - a dictionary of 1d numpy arrays containing the GPS times of the data for each detector
3246  data - a dictionary of 1d numpy arrays containing the complex data values for each detector
3247  ra - the right ascension (in rads) of the source
3248  dec - the declination (in rads) of the source
3249  sigmas - a dictionary of 1d numpy arrays containing the times series of standard deviation
3250  values for each detector. If this is not given a Student's t likelihood will be used,
3251  but if it is given a Gaussian likelihood will be used (default: None)
3252  paramranges - a dictionary of tuples for each parameter ('h0', 'phi0', 'psi' and 'cosiota') giving
3253  the lower and upper ranges of the parameter grid and the number of grid points. If not
3254  given then defaults will be used.
3255  datachunks - in the calculation split the data into stationary chunks with a maximum length given
3256  by this value. If set to 0 or inf then the data will not be split. (default: 30)
3257  chunkmin - this is the shortest chunk length allowed to be included in the likelihood calculation
3258  (default: 5)
3259  ngrid - the number of grid points to use for each dimension of the likelihood calculation. This
3260  is used if the values are not specified in the paramranges argument (default: 50)
3261  outputlike - output the log likelihoods rather than posteriors (default: False)
3262 
3263  Returns
3264  -------
3265  L - The 4d posterior (or likelihood) over all parameters
3266  h0pdf - The 1d marginal posterior for h0
3267  phi0pdf - The 1d marginal posterior for phi0 (the rotation frequency, not GW frequency)
3268  psipdf - The 1d marginal posterior for psi
3269  cosiotapdf - The 1d marginal posterior for cosiota
3270  lingrids - A dictionary of the grid points for each parameter
3271  sigev - The log evidence for the signal model
3272  noiseev - The log evidence for the noise model
3273 
3274  An example would be:
3275  # set the detectors
3276  dets = ['H1', 'L1']
3277 
3278  # set the time series and data
3279  ts = {}
3280  data = {}
3281  for det in dets:
3282  ts[det] = np.arange(900000000., 921000843., 60.)
3283  data[det] = np.random.randn(len(ts[det])) + 1j*np.random.randn(len(ts[det]))
3285  # set the parameter ranges
3286  ra = 0.2
3287  dec = -0.8
3288  paramranges = {}
3289  paramranges['h0'] = (0., 2., 50)
3290  paramranges['psi'] = (0., np.pi/2., 50)
3291  paramranges['phi0'] = (0., np.pi, 50)
3292  paramranges['cosiota'] = (-1., 1., 50)
3293 
3294  L, h0pdf, phi0pdf, psipdf, cosiotapdf, grid, sigev, noiseev = pulsar_posterior_grid(dets, ts, data, ra, dec,
3295  paramranges=paramranges)
3296  """
3297 
3298  # import numpy
3299  import numpy as np
3300  import sys
3301  from scipy.special import gammaln
3302 
3303  # set the likelihood to either Student's or Gaussian
3304  if sigmas == None:
3305  liketype = "studentst"
3306  else:
3307  liketype = "gaussian"
3308 
3309  # if dets is just a single string
3310  if not isinstance(dets, list):
3311  if isinstance(dets, string_types):
3312  dets = [dets] # make into list
3313  else:
3314  print("Detector not, or incorrectly, set", file=sys.stderr)
3315  return
3316 
3317  # allowed list of detectors
3318  alloweddets = ["H1", "L1", "V1", "G1", "T1", "H2"]
3319 
3320  # check consistency of arguments
3321  for det in dets:
3322  if det not in alloweddets:
3323  print(
3324  "Detector not in list of allowed detectors ("
3325  + ",".join(alloweddets)
3326  + ")",
3327  file=sys.stderr,
3328  )
3329  return
3330 
3331  # checks on time stamps
3332  if det not in ts:
3333  print("No time stamps given for detector %s" % det, file=sys.stderr)
3334  return
3335 
3336  # checks on data
3337  if det not in data:
3338  print("No data time series given for detector %s" % det, file=sys.stderr)
3339  return
3340 
3341  # checks on sigmas
3342  if sigmas != None:
3343  if det not in sigmas:
3344  print(
3345  "No sigma time series given for detector %s" % det, file=sys.stderr
3346  )
3347  return
3348 
3349  # check length consistency
3350  if len(ts[det]) != len(data[det]):
3351  print(
3352  "Length of times stamps array and data array are inconsistent for %s"
3353  % det,
3354  file=sys.stderr,
3355  )
3356 
3357  if sigmas != None:
3358  if len(ts[det]) != len(sigmas[det]):
3359  print(
3360  "Length of times stamps array and sigma array are inconsistent for %s"
3361  % det,
3362  file=sys.stderr,
3363  )
3364 
3365  # setup grid on parameter space
3366  params = ["h0", "phi0", "psi", "cosiota"]
3367  defaultranges = {} # default ranges for use if not specified
3368  defaultranges["phi0"] = (0.0, np.pi, ngrid) # 0 to pi for phi0
3369  defaultranges["psi"] = (0.0, np.pi / 2.0, ngrid) # 0 to pi/2 for psi
3370  defaultranges["cosiota"] = (-1.0, 1.0, ngrid) # -1 to 1 for cos(iota)
3371  # 0 to 2 times the max standard deviation of the data (scaled to the data length)
3372  defaultranges["h0"] = (
3373  0.0,
3374  2.0
3375  * np.max([np.std(data[dv]) for dv in data])
3376  * np.sqrt(1440.0 / np.max([len(ts[dtx]) for dtx in ts])),
3377  ngrid,
3378  )
3379  lingrids = {}
3380  shapetuple = ()
3381  for param in params:
3382  if param in paramranges:
3383  if len(paramranges[param]) != 3:
3384  paramranges[param] = defaultranges[param] # set to default range
3385  else:
3386  if (
3387  paramranges[param][1] < paramranges[param][0]
3388  or paramranges[param][2] < 1
3389  ):
3390  print(
3391  "Parameter ranges wrong for %s, reverting to defaults" % param,
3392  file=sys.stderr,
3393  )
3394  paramranges[param] = defaultranges[param]
3395  else: # use defaults
3396  paramranges[param] = defaultranges[param]
3397 
3398  lingrids[param] = np.linspace(
3399  paramranges[param][0], paramranges[param][1], paramranges[param][2]
3400  )
3401  shapetuple += (paramranges[param][2],)
3402 
3403  # set up meshgrid on parameter space
3404  [H0S, PHI0S, PSIS, COSIS] = np.meshgrid(
3405  lingrids["h0"],
3406  lingrids["phi0"],
3407  lingrids["psi"],
3408  lingrids["cosiota"],
3409  indexing="ij",
3410  )
3411 
3412  APLUS = 0.5 * (1.0 + COSIS**2)
3413  ACROSS = COSIS
3414  SINPHI = np.sin(2.0 * PHI0S) # multiply phi0 by two to get GW frequency
3415  COSPHI = np.cos(2.0 * PHI0S)
3416  SIN2PSI = np.sin(2.0 * PSIS)
3417  COS2PSI = np.cos(2.0 * PSIS)
3418 
3419  Apsinphi_2 = 0.5 * APLUS * SINPHI
3420  Acsinphi_2 = 0.5 * ACROSS * SINPHI
3421  Apcosphi_2 = 0.5 * APLUS * COSPHI
3422  Accosphi_2 = 0.5 * ACROSS * COSPHI
3423  Acpsphicphi_2 = 0.5 * APLUS * ACROSS * COSPHI * SINPHI
3424 
3425  AP2 = APLUS**2
3426  AC2 = ACROSS**2
3427  TWOAPACSPCP = 2.0 * APLUS * ACROSS * SINPHI * COSPHI
3428  SP2 = SINPHI**2
3429  CP2 = COSPHI**2
3430 
3431  C2PS2P = COS2PSI * SIN2PSI
3432  C2P2 = COS2PSI**2
3433  S2P2 = SIN2PSI**2
3434 
3435  H02 = H0S**2
3436 
3437  # initialise loglikelihood
3438  like = np.zeros(shapetuple)
3439  noiselike = 0.0
3440 
3441  # get flat prior
3442  logprior = 0.0
3443  for p in params:
3444  logprior -= np.log(paramranges[p][1] - paramranges[p][0])
3445 
3446  # loop over detectors and calculate the log likelihood
3447  for det in dets:
3448  if liketype == "gaussian":
3449  nstd = sigmas[det]
3450  else:
3451  nstd = np.ones(len(ts[det]))
3452 
3453  # get the antenna pattern
3454  [As, Bs] = antenna_response(ts[det], ra, dec, 0.0, det)
3455 
3456  # split the data into chunks if required
3457  if np.isfinite(datachunks) and datachunks > 0:
3458  chunklengths = get_chunk_lengths(ts[det], datachunks)
3459  else:
3460  chunklengths = [len(ts[det])]
3461 
3462  startidx = 0
3463  for cl in chunklengths:
3464  endidx = startidx + cl
3465 
3466  # check for shortest allowed chunks
3467  if cl < chunkmin:
3468  continue
3469 
3470  thisdata = data[det][startidx:endidx] / nstd[startidx:endidx]
3471  ast = As[startidx:endidx] / nstd[startidx:endidx]
3472  bst = Bs[startidx:endidx] / nstd[startidx:endidx]
3473 
3474  A = np.sum(ast**2)
3475  B = np.sum(bst**2)
3476  AB = np.dot(ast, bst)
3477 
3478  dA1real = np.dot(thisdata.real, ast)
3479  dA1imag = np.dot(thisdata.imag, ast)
3480 
3481  dB1real = np.dot(thisdata.real, bst)
3482  dB1imag = np.dot(thisdata.imag, bst)
3483 
3484  dd1real = np.sum(thisdata.real**2)
3485  dd1imag = np.sum(thisdata.imag**2)
3486 
3487  P1 = A * C2P2 + B * S2P2 + 2.0 * AB * C2PS2P
3488  P2 = B * C2P2 + A * S2P2 - 2.0 * AB * C2PS2P
3489  P3 = AB * (C2P2 - S2P2) - A * C2PS2P + B * C2PS2P
3490 
3491  hr2 = (Apcosphi_2**2) * P1 + (Acsinphi_2**2) * P2 + Acpsphicphi_2 * P3
3492  hi2 = (Apsinphi_2**2) * P1 + (Accosphi_2**2) * P2 - Acpsphicphi_2 * P3
3493 
3494  d1hr = Apcosphi_2 * (dA1real * COS2PSI + dB1real * SIN2PSI) + Acsinphi_2 * (
3495  dB1real * COS2PSI - dA1real * SIN2PSI
3496  )
3497  d1hi = Apsinphi_2 * (dA1imag * COS2PSI + dB1imag * SIN2PSI) - Accosphi_2 * (
3498  dB1imag * COS2PSI - dA1imag * SIN2PSI
3499  )
3500 
3501  chiSq = dd1real + dd1imag + H02 * (hr2 + hi2) - 2.0 * H0S * (d1hr + d1hi)
3502 
3503  if liketype == "gaussian":
3504  like -= 0.5 * (chiSq)
3505  like -= (cl * np.log(2.0 * np.pi)) + 2.0 * np.sum(
3506  np.log(nstd[startidx:endidx])
3507  )
3508  noiselike -= 0.5 * (dd1real + dd1imag)
3509  noiselike -= (cl * np.log(2.0 * np.pi)) + 2.0 * np.sum(
3510  np.log(nstd[startidx:endidx])
3511  )
3512  else:
3513  like -= float(cl) * np.log(chiSq)
3514  like += gammaln(cl) - np.log(2.0) - cl * np.log(np.pi)
3515  noiselike -= float(cl) * np.log(dd1real + dd1imag)
3516  noiselike += gammaln(cl) - np.log(2.0) - cl * np.log(np.pi)
3517 
3518  startidx += cl # updated start index
3519 
3520  # convert to posterior
3521  like += logprior
3522 
3523  # get signal evidence
3524  sigev = marginal(like, "all", params, lingrids)
3525 
3526  # normalise posterior
3527  if not outputlike:
3528  like -= sigev
3529  else:
3530  like -= logprior # remove prior if outputting likelihood
3531 
3532  # get marginal posteriors for each parameter
3533  posts = {}
3534  for p in params:
3535  if not outputlike:
3536  posts[p] = np.exp(marginal(like, p, params, lingrids))
3537  else:
3538  posts[p] = marginal(
3539  like, p, params, lingrids, multflatprior=True
3540  ) # output individual log likelihoods
3541 
3542  # odds ratio for signal versus noise
3543  evrat = sigev - noiselike
3544 
3545  return (
3546  like,
3547  posts["h0"],
3548  posts["phi0"],
3549  posts["psi"],
3550  posts["cosiota"],
3551  lingrids,
3552  sigev,
3553  noiselike,
3554  )
3555 
3556 
3557 # current version of the ATNF pulsar catalogue
3558 ATNF_VERSION = "1.58"
3559 
3560 
3561 def get_atnf_info(psr):
3562  """
3563  Get the pulsar (psr) distance (DIST in kpc), proper motion corrected period derivative (P1_I) and any association
3564  (ASSOC e.g. GC) from the ATNF catalogue.
3565  """
3566 
3567  import requests
3568 
3569  psrname = re.sub(r"\+", "%2B", psr) # switch '+' for unicode character
3570 
3571  atnfurl = (
3572  "http://www.atnf.csiro.au/people/pulsar/psrcat/proc_form.php?version="
3573  + ATNF_VERSION
3574  )
3575  atnfurl += "&Dist=Dist&Assoc=Assoc&P1_i=P1_i" # set parameters to get
3576  atnfurl += (
3577  "&startUserDefined=true&c1_val=&c2_val=&c3_val=&c4_val=&sort_attr=jname&sort_order=asc&condition=&pulsar_names="
3578  + psrname
3579  )
3580  atnfurl += "&ephemeris=selected&submit_ephemeris=Get+Ephemeris&coords_unit=raj%2Fdecj&radius=&coords_1=&coords_2="
3581  atnfurl += "&style=Long+with+last+digit+error&no_value=*&fsize=3&x_axis=&x_scale=linear&y_axis=&y_scale=linear&state=query"
3582 
3583  try:
3584  urldat = requests.get(atnfurl) # read ATNF url
3585  predat = re.search(
3586  r"<pre[^>]*>([^<]+)</pre>", str(urldat.content)
3587  ) # to extract data within pre environment (without using BeautifulSoup) see e.g. http://stackoverflow.com/a/3369000/1862861 and http://stackoverflow.com/a/20046030/1862861
3588  pdat = (
3589  predat.group(1).strip().split(r"\n")
3590  ) # remove preceeding and trailing new lines and split lines
3591  except:
3592  print(
3593  "Warning... could not get information from ATNF pulsar catalogue.",
3594  file=sys.stderr,
3595  )
3596  return None
3597 
3598  # check whether information could be found and get distance, age and association from data
3599  dist = None
3600  p1_I = None
3601  assoc = None
3602  for line in [p for p in pdat if len(p) != 0]:
3603  if "WARNING" in line or "not in catalogue" in line:
3604  return None
3605  vals = line.split()
3606  if "DIST" in vals[0]:
3607  dist = float(vals[1])
3608  if "P1_I" in vals[0]:
3609  age = float(vals[1])
3610  if "ASSOC" in vals[0]:
3611  assoc = vals[1]
3612 
3613  return (dist, p1_I, assoc, atnfurl)
3614 
3615 
3616 def cov_to_cor(cov):
3617  """
3618  Convert a covariance matrix to a correlation coefficient matrix.
3619  Return the correlation coefficient matrix and the standard deviations
3620  from the covariance matrix.
3621  """
3622 
3623  # get the standard deviations from the covariance matrix
3624  sigmas = np.sqrt(np.diag(cov))
3625 
3626  # convert these into a diagonal matrix
3627  D = sigmas * np.identity(cov.shape[0])
3628 
3629  # invert D
3630  Dinv = np.linalg.inv(D)
3631 
3632  # get correlation coefficient matrix
3633  Ccor = np.dot(np.dot(Dinv, cov), Dinv)
3634 
3635  return Ccor, sigmas
#define min(a, b)
def __init__(self, parfilenm)
This class parses a TEMPO(2)-style pulsar parameter file.
def __init__(self, priorfilenm)
long long count
Definition: hwinject.c:371
def heterodyned_pulsar_signal(pardict, detector, starttime=900000000.0, duration=86400.0, dt=60.0, datatimes=None)
def quadrupole_to_h0(q22, freq, dist)
def rad_to_hms(rad)
rad_to_hms(rad): Convert radians to hours, minutes, and seconds of arc.
def dec_to_rad(dec_string)
dec_to_rad(dec_string): Given a string containing DEC information as 'dd:mm:ss.ssss',...
def hms_to_rad(hour, mins, sec)
hms_to_rad(hour, min, sec): Convert hours, minutes, and seconds of arc to radians
def cov_to_cor(cov)
Convert a covariance matrix to a correlation coefficient matrix.
def pulsar_mcmc_to_posterior(chainfiles)
def heterodyned_pinsf_pulsar(starttime, duration, dt, detector, pardict)
def plot_posterior_hist(poslist, param, ifos, parambounds=[float("-inf"), float("inf")], nbins=50, upperlimit=0, overplot=False, parfile=None, mplparams=False)
def dms_to_rad(deg, mins, sec)
dms_to_rad(deg, min, sec): Convert degrees, minutes, and seconds of arc to radians.
def pulsar_posterior_grid(dets, ts, data, ra, dec, sigmas=None, paramranges={}, datachunks=30, chunkmin=5, ngrid=50, outputlike=False)
A function to calculate the 4-parameter posterior probability density for a continuous wave signal gi...
def rad_to_string(rad, ra_or_dec)
rad_to_string(rad, ra_or_dec): Convert an angle in radians to hours/degrees, minutes seconds and outp...
def ra_to_rad(ra_string)
ra_to_rad(ar_string): Given a string containing RA information as 'hh:mm:ss.ssss',...
def pferrs(porf, porferr, pdorfd=None, pdorfderr=None)
pferrs(porf, porferr, pdorfd=None, pdorfderr=None): Calculate the period or frequency errors and the ...
def plot_limits_hist(lims, param, ifos, prevlims=None, bins=20, overplot=False, mplparams=False)
def marginalise(like, pname, pnames, ranges)
def pulsar_estimate_snr(source, det, tstart, duration, Sn, dt=1800)
A function to estimate the signal-to-noise ratio of a pulsar signal (for a triaxial neutron star emit...
def heterodyned_triaxial_pulsar(starttime, duration, dt, detector, pardict)
def convert_model_parameters(pardict)
def coord_to_string(h_or_d, m, s)
coord_to_string(h_or_d, m, s): Return a formatted string of RA or DEC values as 'hh:mm:ss....
def logtrapz(lnf, dx)
Perform trapezium rule integration for the logarithm of a function on a regular grid.
def pulsar_nest_to_posterior(postfile, nestedsamples=False, removeuntrig=True)
This function will import a posterior sample file created by lalapps_nest2pos (or a nested sample fil...
def phipsiconvert(phipchain, psipchain)
def upper_limit_greedy(pos, upperlimit=0.95, nbins=100)
def marginal(lnlike, pname, pnames, ranges, multflatprior=False)
def pulsar_estimate_h0_from_snr(snr, source, det, tstart, duration, Sn, dt=600)
A function to estimate the signal amplitude of a pulsar signal (for a triaxial neutron star emitting ...
def plot_posterior_hist2D(poslist, params, ifos, bounds=None, nbins=[50, 50], parfile=None, overplot=False, mplparams=False)
def get_chunk_lengths(ts, chunkMax)
def tukey_window(N, alpha=0.5)
def read_hist_from_file(histfile)
def h0_to_quadrupole(h0, freq, dist)
def hist_norm_bounds(samples, nbins, low=float("-inf"), high=float("inf"))
def plot_2Dhist_from_file(histfile, ndimlabel, mdimlabel, margpars=True, mplparams=False)
def spin_down_limit(freq, fdot, dist)
def inject_pulsar_signal(starttime, duration, dt, detectors, pardict, freqfac=[2.0], npsds=None, snrscale=None)
def antenna_response(gpsTime, ra, dec, psi, det)
def upper_limit(pos, upperlimit=0.95, parambounds=[float("-inf"), float("inf")], nbins=50)
def plot_posterior_chain(poslist, param, ifos, grr=None, withhist=0, mplparams=False)
def get_atnf_info(psr)
Get the pulsar (psr) distance (DIST in kpc), proper motion corrected period derivative (P1_I) and any...
def plot_h0_lims(h0lims, f0gw, ifos, xlims=[10, 1500], ulesttop=None, ulestbot=None, prevlim=None, prevlimf0gw=None, overplot=False, mplparams=False)
def dms_to_deg(deg, mins, sec)
dms_to_deg(deg, min, sec): Convert degrees, minutes, and seconds of arc to degrees.
def rad_to_dms(rad)
rad_to_dms(rad): Convert radians to degrees, minutes, and seconds of arc.
def plot_Bks_ASDs(Bkdata, delt=86400, plotpsds=True, plotfscan=False, removeoutlier=None, mplparams=False)
def p_to_f(p, pd, pdd=None)
p_to_f(p, pd, pdd=None): Convert period, period derivative and period second derivative to the equiva...
def h0ul_from_prior_file(priorfile, ulval=0.95)
def h0_to_ellipticity(h0, freq, dist)
#define max(a, b)