2 Classes needed for the known pulsar search pipeline.
4 (C) 2006, 2015 Matthew Pitkin
8# make print statements python 3-proof
9from __future__ import print_function, division
11__author__ = "Matthew Pitkin <matthew.pitkin@ligo.org>"
13__version__ = "$Revision$"
17from lal import pipeline
21import subprocess as sp
24from configparser import RawConfigParser
25import urllib.parse as urlparse
26from copy import deepcopy
29from scipy import optimize
30from collections import OrderedDict
31from lalpulsar import pulsarpputils as pppu
33# set some specific error codes and messages
34KNOPE_ERROR_GENERAL = -1
35KNOPE_ERROR_NO_SEGMENTS = 2
37KNOPE_ERROR_GENERAL_MSG = "Error... an error has occurred during DAG creation"
38KNOPE_ERROR_NO_SEGMENTS_MSG = (
39 "No required data segments were available to perform the analysis"
44 KNOPE_ERROR_GENERAL: KNOPE_ERROR_GENERAL_MSG,
45 KNOPE_ERROR_NO_SEGMENTS: KNOPE_ERROR_NO_SEGMENTS_MSG,
49KNOPE_WARNING_NO_SEGMENTS = 102
52Class for setting up the DAG for the whole known pulsar search pipeline
57 def __init__(self, cp, configfilename, pulsarlist=None):
59 Initialise with ConfigParser cp object
and the filename of the config file.
61 If an error occurs the error_code variables will be set to -1. The value will stay
as 0 on success.
69 if pulsarlist
is not None:
70 if isinstance(pulsarlist, list):
73 print(
"Error... 'pulsarlist' argument must be 'None' or a list.")
79 "analysis",
"postprocessing_only", cftype=
"boolean", default=
False
84 "analysis",
"preprocessed_pickle_object"
87 if preprocessed_pickle ==
None:
89 "Error... trying post-processing only, but no previous pickle file is given",
96 fp = open(preprocessed_pickle,
"rb")
97 prevdag = pickle.load(fp)
101 "Error... trying post-processing only, but previous pickle file '%s' cannot be read in"
102 % preprocessed_pickle,
110 "analysis",
"run_dir", cftype=
"dir", default=os.getcwd()
116 allowed_ifos = [
"H1",
"H2",
"L1",
"G1",
"V1",
"T1",
"K1"]
124 for ifo
in self.
ifos:
125 if ifo
not in allowed_ifos:
127 "Error... you have specified an unknown IFO '%s'" % ifo,
136 for ifo
in self.
ifos:
137 if ifo
not in prevdag.ifos:
139 "Error... for 'post-processing-only' the current IFOs must be a subset of those in the previous run",
147 "analysis",
"freq_factors", cftype=
"list", default=[2.0]
151 "Warning... only up to two frequency factors can be given. Defaulting to [2.]"
157 "Warning... if giving two frequency factors they must be [1., 2.]. Defaulting to this"
163 "Warning... frequency factors cannot be negative. Defaulting to [2.]"
171 if ff
not in prevdag.freq_factors:
173 "Error... for 'post-processing-only' the current frequency factors must be a subset of those in the previous run",
182 "analysis",
"preprocessing_base_dir", cftype=
"dict"
191 for ifo
in self.
ifos:
204 for ifo
in self.
ifos:
208 for ifo
in self.
ifos:
211 "Error... 'starttime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
218 if isinstance(self.
starttime[stkey], int):
221 elif isinstance(self.
starttime[stkey], list):
224 [v
for v
in self.
starttime[stkey]
if isinstance(v, int)]
227 "Error... 'starttime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
234 "Error... 'starttime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
241 "Error... 'starttime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
251 if isinstance(self.
endtime, int):
253 for ifo
in self.
ifos:
256 elif isinstance(self.
endtime, dict):
257 for ifo
in self.
ifos:
260 "Error... 'endtime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
266 for etkey
in dict(self.
endtime):
267 if isinstance(self.
endtime[etkey], int):
270 elif isinstance(self.
endtime[etkey], list):
273 [v
for v
in self.
endtime[etkey]
if isinstance(v, int)]
276 "Error... 'endtime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
283 "Error... 'endtime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
290 "Error... 'endtime' either be a single 'int', or a dictionary containing all detectors with an integer or list of integers.",
300 for ifo
in self.
ifos:
303 "Error... 'starttime' and 'endtime' have an inconsistent number of entries.",
314 "analysis",
"preprocessing_engine", default=
"heterodyne"
319 if self.
engine not in [
"heterodyne",
"splinter"]:
321 "Warning... 'preprocessing_engine' value '%s' not recognised. Defaulting to 'heterodyne'."
324 self.
engine =
"heterodyne"
326 self.
engine = prevdag.engine
330 defaultephempath = os.environ[
"LALPULSAR_DATADIR"]
332 defaultephempath =
None
334 "analysis",
"ephem_path", cftype=
"dir", default=defaultephempath
341 "condor",
"accounting_group", default=
"ligo.prod.o1.cw.targeted.bayesian"
348 "Error... the 'accounting_group' should contain 'cw.targeted.bayesian'",
354 if cp.has_option(
"condor",
"accounting_group_user"):
364 "analysis",
"run_dir", default=os.getcwd()
376 uniqueid =
str(uuid.uuid4().hex)
378 "analysis",
"dag_name", default=
"knope-" + uniqueid +
".log"
383 daglog =
"known_pulsar_pipeline-" + uniqueid +
".log"
386 pipeline.CondorDAG.__init__(self, self.
daglogfile)
390 "analysis",
"dag_name", default=
"knope-" + uniqueid
395 dagname =
"known_pulsar_pipeline-" + uniqueid
396 self.set_dag_file(os.path.join(self.
run_dir, dagname))
400 "analysis",
"autonomous", cftype=
"boolean", default=
False
407 "analysis",
"autonomous_initial_start", cftype=
"int"
414 for ifo
in self.
ifos:
429 "Error... pulsar parameter file/directory '%s' does not exist!"
439 "Error... for 'post-processing-only' the pulsar parameter directory must be that same as in the previous run",
462 and ".mod_" not in pf
467 "Error... no pulsar parameter files found in '%s'"
481 "Error... pulsar parameter file or directory '%s' does not exist"
500 psr = pppu.psr_par(par)
503 "Could not read in parameter file '%s'. Skipping this pulsar."
510 if "PSRJ" not in psr.__dict__:
512 "Could not read 'PSRJ' value from '%s'. Skipping this pulsar."
519 if pulsarlist
is not None:
520 if psr[
"PSRJ"]
not in pulsarlist:
524 if "BINARY" in psr.__dict__:
525 bintype = psr[
"BINARY"]
538 "Binary type '%s' in '%s' is not recognised. Skipping this pulsar."
542 "Binary type '%s' is currenlty not recognised" % bintype
547 if psr[
"EPHEM"] !=
None:
548 if psr[
"EPHEM"]
not in [
558 "Unregconised ephemeris '%s' in '%s'. Skipping this source"
559 % (psr[
"EPHEM"], par)
562 "Unregconised ephemeris '%s'" % psr[
"EPHEM"]
566 if psr[
"UNITS"] !=
None:
567 if psr[
"UNITS"]
not in [
"TCB",
"TDB"]:
569 "Unregconised time units '%s' in '%s'. Skipping this source"
570 % (psr[
"UNITS"], par)
573 "Unregconised ephemeris '%s'" % psr[
"UNITS"]
580 modtimefile = os.path.join(
581 os.path.dirname(par),
".mod_" + os.path.basename(par)
585 if not os.path.isfile(modtimefile):
590 modtime = os.stat(par).st_mtime
592 {
"file": modtimefile,
"time":
str(modtime)}
595 parmodtime =
str(os.stat(par).st_mtime)
596 fm = open(modtimefile,
"r")
598 oldmodtime = fm.readline().strip()
601 "Warning... could not read modification time from '%s'. Assuming file is modified"
604 oldmodtime = -1.23456789
607 if parmodtime == oldmodtime:
611 {
"file": modtimefile,
"time": parmodtime}
615 if pulsarlist
is not None:
618 "Could not find any of the listed pulsars '[%s]' in the .par file directory '%s'."
636 "analysis",
"preprocessing_only", cftype=
"boolean", default=
False
644 if pulsarlist
is None:
652 for psrl
in pulsarlist:
653 if psrl
not in prevdag.analysed_pulsars:
655 "Error... specified pulsar '%s' could not be found in previous run pickle file '%s'."
656 % (psrl, preprocessed_pickle)
661 if psrl
in prevdag.unmodified_pulsars:
663 if psrl
in prevdag.modified_pulsars:
672 fm = open(pitem[
"file"],
"w")
673 fm.write(pitem[
"time"])
678 p = sp.Popen(
"cat " + sfs[0] +
" >> " + sfs[1], shell=
True)
680 if p.returncode != 0:
682 "Warning... could not append segments to previous segments file. No log of previous segments will be available."
698 fm = open(pitem[
"file"],
"w")
699 fm.write(pitem[
"time"])
704 p = sp.Popen(
"cat " + sfs[0] +
" >> " + sfs[1], shell=
True)
706 if p.returncode != 0:
708 "Warning... could not append segments to previous segments file. No log of previous segments will be available."
712 fpa = open(os.path.join(self.
run_dir,
"analysed_pulsars.txt"),
"w")
716 fps = open(os.path.join(self.
run_dir,
"skipped_pulsars.txt"),
"w")
725 "Warning... email address '%s' is invalid. No notification will be sent."
736 HOST = socket.getfqdn()
737 USER = os.environ[
"USER"]
738 FROM = USER +
"@" + HOST
740 FROM =
"matthew.pitkin@ligo.org"
742 subject =
"lalpulsar_knope: successful setup"
744 "Hi User,\n\nYour analysis using configuration file '%s' has successfully setup the analysis. Once complete the results will be found at %s.\n\nRegards\n\nlalpulsar_knope\n"
748 emailtemplate =
"From: {0}\nTo: {1}\nSubject: {2}\n\n{3}"
749 message = emailtemplate.format(FROM, email, subject, messagetxt)
750 server = smtplib.SMTP(
"localhost")
751 server.sendmail(FROM, email, message)
754 print(
"Warning... could not send notification email.")
758 Setup the results webpage creation
781 default=
"/usr/bin/lalpulsar_knope_result_page",
784 "results_page",
"universe", default=
"local"
788 if not os.path.isfile(self.
results_exec)
or not os.access(
792 "Warning... 'results_exec' in '[results_page]' does not exist or is not an executable. Try finding code in path."
796 if resultexec ==
None:
798 "Error... could not find 'lalpulsar_knope_result_page' in 'PATH'",
809 default=
"/usr/bin/lalpulsar_knope_collate_results",
813 if not os.path.isfile(self.
collate_exec)
or not os.access(
817 "Warning... 'collate_exec' in '[results_page]' does not exist or is not an executable. Try finding code in path."
819 collateexec = self.
find_exec_file(
"lalpulsar_knope_collate_results")
821 if collateexec ==
None:
823 "Error... could not find 'lalpulsar_knope_collate_results' in 'PATH'",
833 "analysis",
"injections", cftype=
"boolean", default=
False
836 "pe",
"use_gw_phase", cftype=
"boolean", default=
False
843 "results_page",
"upper_limit", cftype=
"int", default=95
848 "results_page",
"show_all_posteriors", cftype=
"boolean", default=
False
853 "results_page",
"subtract_truths", cftype=
"boolean", default=
False
858 "results_page",
"show_priors", cftype=
"boolean", default=
False
864 "results_page",
"copy_all_files", cftype=
"boolean", default=
False
895 cpc = RawConfigParser()
900 cpc.add_section(
"output")
901 cpc.add_section(
"input")
902 cpc.add_section(
"general")
911 "results_page",
"sort_value", default=
"name"
913 cpc.set(
"general",
"sort_value", sorttype)
915 "results_page",
"sort_direction", default=
"ascending"
917 cpc.set(
"general",
"sort_direction", sortdirection)
919 cpc.set(
"general",
"detectors", [
"Joint"])
921 cpc.set(
"general",
"detectors", self.
ifos)
923 cdets = deepcopy(self.
ifos)
924 cdets.append(
"Joint")
925 cpc.set(
"general",
"detectors", cdets)
929 "results_page",
"parameters", cftype=
"list", default=[
"f0"]
931 cpc.set(
"general",
"parameters", paramout)
935 "results_page",
"results", cftype=
"list", default=[
"h0ul"]
937 cpc.set(
"general",
"results", resout)
941 fp = open(cinifile,
"w")
946 "Error... could not write configuration file '%s' for results collation page"
985 if os.path.isfile(jsonfile):
990 jsonfile +
"_%d" % list(self.
starttime.values())[0],
994 "Warning... could not copy previous results JSON file '%s'. Previous results may get overwritten."
1001 pinfo = pppu.get_atnf_info(pname)
1002 if pinfo
is not None:
1003 dist, p1_I, assoc, _ = pinfo
1005 psrinfo[
"Pulsar data"] = {}
1006 psrinfo[
"Pulsar data"][
"DIST"] = dist
1007 psrinfo[
"Pulsar data"][
"P1_I"] = p1_I
1008 psrinfo[
"Pulsar data"][
"ASSOC"] = assoc
1011 fp = open(jsonfile,
"w")
1012 json.dump(psrinfo, fp, indent=2)
1016 "Warning... could not write out ATNF catalogue information to JSON file '%s'."
1021 cp = RawConfigParser()
1026 cp.add_section(
"general")
1027 cp.add_section(
"parameter_estimation")
1028 cp.add_section(
"data")
1029 cp.add_section(
"output")
1030 cp.add_section(
"plotting")
1044 cp.set(
"general",
"detectors", self.
ifos)
1051 "general",
"joint_only",
True
1054 cp.set(
"general",
"joint_only",
False)
1058 "general",
"with_joint",
False
1062 "general",
"with_joint",
True
1067 "general",
"with_background",
True
1071 "general",
"with_background",
False
1075 cp.set(
"general",
"injection",
True)
1077 cp.set(
"general",
"injection",
False)
1081 "general",
"use_gw_phase",
True
1084 cp.set(
"general",
"use_gw_phase",
False)
1102 posteriorsfiles = {}
1105 if copydir
is not None:
1106 copydirpos = os.path.join(copydir,
"posteriors")
1110 dets = comb[
"detectors"]
1111 detprefix = comb[
"prefix"]
1121 posteriorsfiles[det] = os.path.join(posteriorsfiles[det], detprefix)
1123 if copydirpos
is not None:
1124 self.
mkdirs(os.path.join(copydirpos, detprefix))
1127 backgrounddir[det] = os.path.join(
1130 backgrounddir[det] = os.path.join(backgrounddir[det], detprefix)
1134 dirpostfix =
"multiharmonic"
1143 posteriorsfiles[det] = os.path.join(posteriorsfiles[det], dirpostfix)
1144 posteriorsfiles[det] = os.path.join(
1145 posteriorsfiles[det],
"posterior_samples_%s.hdf" % pname
1148 if copydirpos
is not None:
1149 copydirposp = os.path.join(copydirpos, detprefix)
1150 copydirposp = os.path.join(copydirposp, dirpostfix)
1153 cpnode.set_source(posteriorsfiles[det])
1154 cpnode.set_destination(copydirposp)
1156 cpnode.add_parent(n2pnode)
1157 self.add_node(cpnode)
1161 if os.path.isfile(posteriorsfiles[det]):
1164 posteriorsfiles[det],
1165 posteriorsfiles[det].strip(
".hdf")
1166 +
"_%d.hdf" % list(self.
starttime.values())[0],
1170 "Warning... could not create copy of current posterior samples file '%s'. This will get overwritten on next autonomous run."
1171 % posteriorsfiles[det],
1176 backgrounddir[det] = os.path.join(backgrounddir[det], dirpostfix)
1178 cp.set(
"parameter_estimation",
"posteriors", posteriorsfiles)
1181 cp.set(
"parameter_estimation",
"background", backgrounddir)
1185 if copydir
is not None:
1186 copydirhet = os.path.join(copydir,
"data")
1190 for ifo
in self.
ifos:
1191 if copydir
is not None:
1192 copydirhet = os.path.join(copydirhet, ifo)
1200 if copydir
is not None:
1202 ffdir = os.path.join(copydirhet,
"%df" %
int(ff))
1204 ffdir = os.path.join(copydirhet,
"%.3ff" %
int(ff))
1209 cpnode.set_destination(ffdir)
1219 self.add_node(cpnode)
1222 filelist = filelist[0]
1223 datafiles[ifo] = filelist
1225 if copydir
is not None:
1226 copydirhet = os.path.join(copydir,
"data")
1228 cp.set(
"data",
"files", datafiles)
1235 fp = open(inifile,
"w")
1240 "Error... could not write configuration file '%s' for results page"
1249 resultsnode.set_config(inifile)
1253 resultsnode.add_parent(n2pnode)
1257 resultsnode.add_parent(n2pnode)
1259 self.add_node(resultsnode)
1265 collatenode.set_config(cinifile)
1266 collatenode.add_parent(resultsnode)
1267 self.add_node(collatenode)
1271 Setup the preprocessing analysis: data finding, segment finding and heterodyne/splinter data processing
1280 if self.
engine ==
"heterodyne":
1285 if self.
engine ==
"splinter":
1298 Setup parameter estimation jobs/nodes for signal
and background analyses
1303 "pe",
"pe_exec", default=
"lalpulsar_parameter_estimation_nested"
1309 if not os.path.isfile(self.
pe_exec)
or not os.access(self.
pe_exec, os.X_OK):
1311 "Warning... 'pe_exec' in '[pe]' does not exist or is not an executable. Try finding code in path."
1313 peexec = self.
find_exec_file(
"lalpulsar_parameter_estimation_nested")
1317 "Error... could not find 'lalpulsar_parameter_estimation_nested' in 'PATH'",
1336 if len(self.
ifos) == 1:
1340 "analysis",
"incoherent_only", cftype=
"boolean", default=
False
1345 if len(self.
ifos) > 1:
1347 "analysis",
"coherent_only", cftype=
"boolean", default=
False
1352 "analysis",
"num_background", cftype=
"int", default=0
1356 "Warning... 'num_background' is a negative value. Defaulting to zero background runs"
1362 "pe",
"pe_output_dir", cftype=
"dir"
1372 "pe",
"pe_output_dir_background", cftype=
"dir", allownone=
True
1377 "Error... no background analysis directory has been set",
1389 "pe",
"n_live", cftype=
"int", default=2048
1392 "pe",
"n_runs", cftype=
"int", default=1
1395 "pe",
"tolerance", cftype=
"float", default=0.1
1398 "pe",
"random_seed", cftype=
"int", allownone=
True
1401 "pe",
"n_mcmc", cftype=
"int", allownone=
True
1404 "pe",
"n_mcmc_initial", cftype=
"int", default=500
1407 "pe",
"non_gr", cftype=
"boolean", default=
False
1412 "pe",
"starttime", cftype=
"float", allownone=
True
1415 "pe",
"endtime", cftype=
"float", allownone=
True
1418 "pe",
"truncate_time", cftype=
"float", allownone=
True
1421 "pe",
"truncate_samples", cftype=
"int", allownone=
True
1424 "pe",
"truncate_fraction", cftype=
"float", allownone=
True
1429 "pe",
"n_runs_background", cftype=
"int", default=1
1432 "pe",
"n_live_background", cftype=
"int", default=1024
1437 "pe",
"use_roq", cftype=
"boolean", default=
False
1440 "pe",
"roq_ntraining", cftype=
"int", default=2500
1443 "pe",
"roq_tolerance", cftype=
"float", default=5e-12
1446 "pe",
"roq_uniform", cftype=
"boolean", default=
False
1449 "pe",
"roq_chunkmax", cftype=
"int", default=1440
1455 "Warning... currently this will not run with non-GR parameters. Reverting to GR-mode."
1461 "pe",
"model_type", default=
"waveform"
1465 "Warning... the given 'model_type' '%s' is not allowed. Defaulting to 'waveform'"
1474 "pe",
"biaxial", cftype=
"boolean", default=
False
1479 "pe",
"gaussian_like", cftype=
"boolean", default=
False
1482 self.
engine ==
"splinter"
1488 "pe",
"prior_options", cftype=
"dict"
1491 "pe",
"premade_prior_file", allownone=
True
1497 "Error... pre-made prior file '{}' does not exist!".
format(
1506 "pe",
"derive_amplitude_prior", cftype=
"boolean", default=
False
1512 "pe",
"amplitude_prior_file", allownone=
True
1518 "pe",
"previous_posteriors_file", allownone=
True
1531 "pe",
"amplitude_prior_asds", allownone=
True
1537 "pe",
"amplitude_prior_obstimes", allownone=
True
1542 "pe",
"amplitude_prior_obstimes", allownone=
True
1546 "pe",
"amplitude_prior_type", default=
"fermidirac"
1551 "pe",
"use_parameter_errors", cftype=
"boolean", default=
False
1556 "pe",
"n2p_exec", default=
"lalinference_nest2pos"
1562 if not os.path.isfile(self.
pe_n2p_exec)
or not os.access(
1566 "Warning... 'pe_n2p_exec' in '[pe]' does not exist or is not an executable. Try finding code in path."
1570 if pen2pexec ==
None:
1572 "Error... could not find 'lalinference_nest2pos' in 'PATH'",
1583 "Error... no 'n2p_output_dir' specified in '[pe]' giving path for posterior sample outputs",
1594 "pe",
"n2p_output_dir_background"
1598 "Error... no 'n2p_output_dir_background' specified in '[pe]' giving path for posterior sample outputs",
1609 "pe",
"clean_nest_samples", cftype=
"boolean", default=
False
1614 "pe",
"pe_request_memory", cftype=
"int", allownone=
True
1663 for ifo
in self.
ifos:
1666 {
"detectors": [ifo],
"prefix": ifo}
1671 {
"detectors": self.
ifos,
"prefix":
"".join(self.
ifos)}
1684 for j
in range(njobs):
1699 psrpostdir = os.path.join(
1724 dets = comb[
"detectors"]
1725 detprefix = comb[
"prefix"]
1728 detdir = os.path.join(psrdir, detprefix)
1733 detpostdir = os.path.join(psrpostdir, detprefix)
1740 ffdir = os.path.join(detdir,
"multiharmonic")
1741 ffpostdir = os.path.join(detpostdir,
"multiharmonic")
1746 ffdir = os.path.join(
1749 ffpostdir = os.path.join(
1753 ffdir = os.path.join(detdir,
"%.2ff" % self.
freq_factors[0])
1754 ffpostdir = os.path.join(
1760 ffdir = os.path.join(
1761 ffdir,
"%05d" % (j - 1)
1763 ffpostdir = os.path.join(ffpostdir,
"%05d" % (j - 1))
1790 randomiseseed =
"".join(
1791 [
str(f)
for f
in np.random.randint(1, 10, size=15).tolist()]
1796 roqweightsfile = os.path.join(ffdir,
"roqweights.bin")
1805 counter < nruns + nroqruns
1807 penode =
ppeNode(pejob, psrname=pname)
1809 penode.set_randomseed(
1812 penode.set_detectors(
",".join(dets))
1813 penode.set_par_file(
1817 penode.set_cor_file(
1820 penode.set_prior_file(priorfile)
1821 penode.set_harmonics(
1825 penode.set_Nlive(nlive)
1830 penode.set_Nmcmcinitial(
1833 penode.set_tolerance(
1838 penode.set_start_time(
1843 penode.set_end_time(
1858 ffdir,
"nested_samples_%s_%05d.hdf" % (pname, i)
1861 penode.set_outfile(nestfiles[i])
1870 penode.set_roq_outputweights(roqweightsfile)
1872 penode.set_roq_uniform()
1874 penode.set_roq_inputweights(roqweightsfile)
1878 penode.set_randomise(randomiseseed)
1887 penode.set_input_files(
",".join(inputfiles))
1891 penode.set_gaussian_like()
1900 penode.set_source_model()
1904 penode.set_biaxial()
1908 penode.set_ephem_earth(earthfile)
1909 penode.set_ephem_sun(sunfile)
1910 penode.set_ephem_time(timefile)
1922 if self.
engine ==
"heterodyne":
1931 if self.
engine ==
"splinter":
1954 roqinputnode
is not None
1956 penode.add_parent(roqinputnode)
1961 roqinputnode = penode
1962 self.add_node(penode)
1970 self.add_node(penode)
1971 penodes.append(penode)
1976 os.path.splitext(nestfiles[i])[0] +
"_SNR"
1978 snrdestfile = os.path.join(
1979 ffpostdir,
"SNR_%05d.txt" % i
1981 mvnode.set_source(snrsourcefile)
1982 mvnode.set_destination(snrdestfile)
1983 mvnode.add_parent(penode)
1984 self.add_node(mvnode)
1986 counter = counter + 1
1991 postfile = os.path.join(
1992 ffpostdir,
"posterior_samples_%s.hdf" % pname
1994 n2pnode.set_outfile(postfile)
1995 n2pnode.set_nest_files(nestfiles)
1997 n2pnodes[pname].append(n2pnode)
2001 n2pnode.add_parent(pn)
2003 self.add_node(n2pnode)
2009 rmnode.set_files(nestfiles)
2010 rmnode.add_parent(n2pnode)
2011 self.add_node(rmnode)
2023 self, psr, psrdir, detectors, freqfactors, outputpath, scalefactor=25.0
2026 Create the prior file to use for a particular job defined by a set of detectors,
or single detector,
and
2027 a set of frequency factors,
or a single frequency factor. If creating an prior limit based on a set of given
2028 amplitude spectral densities (by calculating an estimate of the 95% UL they would produce) then it will
2029 additionally be scaled by a factor of `scalefactor`.
2031 Return the full output file
and the create prior node
2036 outfile = os.path.join(outputpath,
"%s.prior" % pname)
2044 "Error... could not create symbolic link to prior file '%s'"
2059 corfile = os.path.join(psrdir,
"%s.cor" % pname)
2060 fp = open(corfile,
"w")
2079 for paritem
in pppu.float_keys:
2080 if paritem
in ignore_pars:
2083 psr[
"%s_ERR" % paritem] !=
None
2084 and psr[
"%s_FIT" % paritem] !=
None
2086 if psr[
"%s_FIT" % paritem] == 1:
2088 if paritem
in [
"RA_RAD",
"DEC_RAD"]:
2089 itemname = paritem.replace(
"_RAD",
"")
2092 prior_options[itemname] = {
2093 "priortype":
"gaussian",
2094 "ranges": [psr[paritem], psr[
"%s_ERR" % paritem]],
2097 fp.write(itemname +
" ")
2098 erritems.append(itemname)
2100 if len(erritems) > 0:
2103 for i, ei
in enumerate(
2107 for j
in range(i + 1):
2115 if psr[
"E"] !=
None:
2117 elif psr[
"ECC"] !=
None:
2120 if ecc >= 0.0
and ecc < 0.001:
2122 (ei ==
"T0" and erritems[j] ==
"OM")
2123 or (ei ==
"OM" and erritems[j] ==
"T0")
2124 )
and (
"T0" in erritems
and "OM" in erritems):
2129 (ei ==
"PB" and erritems[j] ==
"OMDOT")
2130 or (ei ==
"OMDOT" and erritems[j] ==
"PB")
2131 )
and (
"PB" in erritems
and "OMDOT" in erritems):
2144 if len(erritems) > 0:
2152 posteriorfile =
None
2162 "Error... could not open file '%s' listing previous posterior files."
2175 fp = open(outfile,
"w")
2178 "Error... could not open prior file '%s'" % outfile, file=sys.stderr
2184 for prioritem
in prior_options:
2185 if "priortype" not in prior_options[prioritem]:
2187 "Error... no 'priortype' given for parameter '%s'" % prioritem,
2193 ptype = prior_options[prioritem][
"priortype"]
2196 if "ranges" not in prior_options[prioritem]:
2198 "Error... no 'ranges' given for parameter '%s'" % prioritem,
2204 rangevals = prior_options[prioritem][
"ranges"]
2206 if len(rangevals) != 2:
2208 "Error... 'ranges' for parameter '%s' must be a list or tuple with two entries"
2216 if posteriorfile
is not None and prioritem.upper() ==
"COSIOTA":
2220 "%s\t%s\t%.16le\t%.16le\n"
2221 % (prioritem, ptype, rangevals[0], rangevals[1])
2225 npars = len(prioritem.split(
":"))
2228 if "nmodes" not in prior_options[prioritem]:
2230 "Error... no 'nmodes' given for parameter '{}'".
format(
2238 nmodes = prior_options[prioritem][
"nmodes"]
2240 if "means" not in prior_options[prioritem]:
2242 "Error... no 'means' given for parameter '{}'".
format(
2250 means = prior_options[prioritem][
"means"]
2252 if len(means) != nmodes:
2254 "Error... number of mean values must be equal to the number of modes",
2261 if len(mean) != npars:
2263 "Error... number of mean values must be equal to the number of parameters",
2269 if "covs" not in prior_options[prioritem]:
2271 "Error... no 'covs' given for parameter '{}'".
format(
2279 covs = prior_options[prioritem][
"covs"]
2281 if len(means) != nmodes:
2283 "Error... number of covariance matrices values must be equal to the number of modes",
2290 npcov = np.array(cov)
2291 if npcov.shape[0] != npcov.shape[1]
and npcov.shape[1] != npars:
2293 "Error... number of covariance matrices rows/columns must be equal to the number of parameters",
2299 if "weights" not in prior_options[prioritem]:
2301 "Error... no 'weights' given for parameter '{}'".
format(
2309 weights = prior_options[prioritem][
"weights"]
2311 if len(weights) != nmodes:
2313 "Error... number of weights must be equal to the number of modes",
2319 if "ranges" in prior_options[prioritem]:
2320 ranges = prior_options[prioritem][
"ranges"]
2322 if len(ranges) != npars:
2324 "Error... number of ranges must be equal to the number of parameters",
2330 for rangevals
in ranges:
2331 if len(rangevals) != 2:
2333 "Error... ranges must have two values",
2341 fp.write(
"{}\tgmm\t".
format(prioritem))
2342 fp.write(
"{}\t".
format(nmodes))
2343 fp.write(
"{}\t".
format(re.sub(
r"\s+",
"",
str(means))))
2344 fp.write(
"{}\t".
format(re.sub(
r"\s+",
"",
str(covs))))
2345 fp.write(
"{}".
format(re.sub(
r"\s+",
"",
str(weights))))
2347 if ranges
is not None:
2348 for rangevals
in ranges:
2350 fp.write(
"{}".
format(re.sub(
r"\s+",
"",
str(rangevals))))
2356 gmmpars = OrderedDict()
2360 requls[
"C22"] =
None
2361 gmmpars[
"C22"] = [0.0, np.inf]
2363 requls[
"C21"] =
None
2364 gmmpars[
"C21"] = [0.0, np.inf]
2368 gmmpars[
"H0"] = [0.0, np.inf]
2371 requls[
"I21"] =
None
2372 requls[
"I31"] =
None
2373 gmmpars[
"I21"] = [0.0, np.inf]
2374 gmmpars[
"I31"] = [0.0, np.inf]
2375 gmmpars[
"COSIOTA"] = [
2380 if len(requls) == 0:
2382 "Error... unknown frequency factors or model type in configuration file.",
2398 "Error... could not parse prior file '%s'."
2410 if "C22UL" not in uls
and "H0UL" in uls:
2412 requls[
"C22"] = uls[
"H0UL"]
2414 if ult +
"UL" in uls:
2415 requls[ult] = uls[ult +
"UL"]
2420 if None in requls.values()
and freq > 0.0
and posteriorfile
is None:
2433 asdfilestmp[
"det"] = asdfiles
2434 obstimestmp[
"det"] =
float(obstimes)
2435 asdfiles = asdfilestmp
2436 obstimes = obstimestmp
2440 if dk
not in obstimes:
2442 "Error... no corresponding observation times for detector '%s'"
2449 if not isinstance(obstimes[dk], float)
and not isinstance(
2453 "Error... observation time must be a float or int.",
2459 if not os.path.isfile(asdfiles[dk]):
2461 "Error... ASD file '%s' does not exist."
2470 asdfiles[dk], comments=[
"%",
"#"]
2474 "Error... could not load file '%s'."
2484 asd[0, 0] <= freq
and asd[-1, 0] >= freq
2487 np.abs(asd[:, 0] - freq)
2489 asdv.append(asd[idxf, 1])
2491 asd[0, 0] <= 2.0 * freq
and asd[-1, 0] >= 2.0 * freq
2494 np.abs(asd[:, 0] - 2.0 * freq)
2496 asdv.append(asd[idxf, 1])
2500 np.array(asdv) ** 2 / (obstimes[dk] * 86400.0)
2504 "Error... frequency range in ASD file does not span pulsar frequency.",
2512 for asdv
in asdlist:
2514 mspec = mspec + (1.0 / asdv)
2516 mspec = np.sqrt(1.0 / mspec)
2524 if requls[
"C21"] ==
None:
2529 if requls[
"C22"] ==
None:
2535 if requls[
"H0"] ==
None:
2536 requls[
"H0"] = ulspec[0] * scalefactor
2540 if requls[
"I21"] ==
None:
2541 requls[
"I21"] = np.max(ulspec) * scalefactor
2542 if requls[
"I31"] ==
None:
2543 requls[
"I31"] = np.max(ulspec) * scalefactor
2548 and posteriorfile
is None
2551 "Error... prior type must be 'fermidirac' or 'uniform'",
2557 if posteriorfile
is None:
2560 if requls[ult] ==
None:
2562 "Error... a required upper limit for '%s' is not available."
2574 "Error... problem deriving the Fermi-Dirac prior for '%s'."
2582 b = requls[ult] / 0.95
2585 "%s\t%s\t%.16le\t%.16le\n"
2590 means, covs, weights, _ = self.
gmm_prior(
2591 posteriorfile, gmmpars, taper=
"elliptical", decaywidth=1.0
2595 "Error... could not set GMM prior using previous posterior samples.",
2600 parssep =
":".join(gmmpars.keys())
2601 fp.write(
"%s\tgmm\t%d\t" % (parssep, len(means)))
2605 "[" +
",".join([
str(v)
for v
in vs.tolist()]) +
"]"
2609 fp.write(
"[%s]\t" % meanstr)
2616 "[" +
",".join([
str(ca)
for ca
in c]) +
"]"
2617 for c
in cs.tolist()
2624 fp.write(
"[%s]\t" % covstr)
2626 fp.write(
"[%s]\t" %
",".join([
str(w)
for w
in weights]))
2629 fp.write(
"[%s]\t" %
",".join([
str(lim)
for lim
in gmmpars[gp]]))
2636 fp = open(outfile,
"w")
2639 "Error... could not write prior file '%s'" % outfile,
2645 for prioritem
in prior_options:
2646 ptype = prior_options[prioritem][
"priortype"]
2649 rangevals = prior_options[prioritem][
"ranges"]
2651 if len(rangevals) != 2:
2653 "Error... the ranges in the prior for '%s' are not set properly"
2660 "%s\t%s\t%.16e\t%.16e\n"
2661 % (prioritem, ptype, rangevals[0], rangevals[1])
2665 npars = len(prioritem.split(
":"))
2668 if "nmodes" not in prior_options[prioritem]:
2670 "Error... no 'nmodes' given for parameter '{}'".
format(
2678 nmodes = prior_options[prioritem][
"nmodes"]
2680 if "means" not in prior_options[prioritem]:
2682 "Error... no 'means' given for parameter '{}'".
format(
2690 means = prior_options[prioritem][
"means"]
2692 if len(means) != nmodes:
2694 "Error... number of mean values must be equal to the number of modes",
2701 if len(mean) != npars:
2703 "Error... number of mean values must be equal to the number of parameters",
2709 if "covs" not in prior_options[prioritem]:
2711 "Error... no 'covs' given for parameter '{}'".
format(
2719 covs = prior_options[prioritem][
"covs"]
2721 if len(means) != nmodes:
2723 "Error... number of covariance matrices values must be equal to the number of modes",
2730 npcov = np.array(cov)
2731 if npcov.shape[0] != npcov.shape[1]
and npcov.shape[1] != npars:
2733 "Error... number of covariance matrices rows/columns must be equal to the number of parameters",
2739 if "weights" not in prior_options[prioritem]:
2741 "Error... no 'weights' given for parameter '{}'".
format(
2749 weights = prior_options[prioritem][
"weights"]
2751 if len(weights) != nmodes:
2753 "Error... number of weights must be equal to the number of modes",
2759 if "ranges" in prior_options[prioritems]:
2760 ranges = prior_options[prioritem][
"ranges"]
2762 if len(ranges) != npars:
2764 "Error... number of ranges must be equal to the number of parameters",
2770 for rangevals
in ranges:
2771 if len(rangevals) != 2:
2773 "Error... ranges must have two values",
2781 fp.write(
"{}\tgmm\t".
format(prioritem))
2782 fp.write(
"{}\t".
format(nmodes))
2783 fp.write(
"{}\t".
format(re.sub(
r"\s+",
"",
str(means))))
2784 fp.write(
"{}\t".
format(re.sub(
r"\s+",
"",
str(covs))))
2785 fp.write(
"{}".
format(re.sub(
r"\s+",
"",
str(weights))))
2787 if ranges
is not None:
2788 for rangevals
in ranges:
2790 fp.write(
"{}".
format(re.sub(
r"\s+",
"",
str(rangevals))))
2800 Calculate the r and sigma parameter of the Fermi-Dirac distribution to be used.
2802 Based on the definition of the distribution given
in https://www.authorea.com/users/50521/articles/65214/_show_article
2803 the distribution will be defined by a mu parameter at which the distribution has 50% of it
's maximum
2804 probability, and mufrac which
is the fraction of mu defining the range
from which the distribution falls
from
2805 97.5% of the maximum down to 2.5%. Using an upper limit defining a given cdf of the distribution the parameters
2806 r
and sigma will be returned.
2810 r = 0.5 * Z / mufrac
2813 solution = optimize.root(
2814 lambda s: cdf * np.log(1.0 + np.exp(r))
2815 - np.log(1.0 + np.exp(-r))
2817 - np.log(1.0 + np.exp((ul / s) - r)),
2820 sigma = solution.x[0]
2824 def gmm_prior(self, prevpostfile, pardict, ncomps=20, taper=None, decaywidth=5.0):
2826 Create an ND Gaussian Mixture Model for use
as a prior.
2828 This will use the BayesianGaussianMixture Model
from scikit-learn, which fits a Dirichlet Process Gaussian
2829 Mixture Model to the input data infering the number of components required. The input to this should be
2830 a previously calculated posterior sample file,
or numpy array of samples. If a files
is given then the
2831 parameters given
as keys
in the `pardict` ordered dictionary will be extracted. For each parameter name
2832 key
in the `pardict` ordered there should be pairs of hard upper an lower limits of the particular parameters.
2833 If any of these are
not +/-infinity then the samples will be duplicated
and reflected around that limit. This
2834 is to avoid edge effects
for the inferred Gaussian distributions. `ncomps` sets the hyperparameter used
in
2835 the Dirichlet process related to the number of Gaussian components.
2837 `taper` sets whether
or not to taper-off any reflected samples,
and how that tapering happens. Tapering can
2838 use: a
'gaussian' taper, where the half-width of the Gaussian
is set by the range of the samples multiplied
2839 by `decaywidth`; a
'triangular' taper, which falls
from one to zero over the range of the samples; an
2840 'exponential' taper, where the decay constant
is defined by
'decaywidth' multiplied by the range of the
2841 samples;
or, an
'elliptical' taper, where the axis of the ellipse
is set by
'decaywidth' multiplied by the
2842 range of the samples. The default
is that no tapering
is applied,
and it should be noted that tapering can
2843 still leave artifacts
in the final GMM.
2845 The means, covariance matrices
and weights of the Gaussian components will be returned, along
with
2846 the full set of points (including reflected points) used
for the estimation.
2848 An example of using this would be
for "H0" versus
"COSIOTA",
in which case the `pardict` might be:
2849 >> pardict = OrderedDict()
2850 >> pardict[
'H0'] = [0., np.inf]
2851 >> pardict[
'COSIOTA'] = [-1., 1.]
2855 from sklearn
import mixture
2857 print(
"Error... could not import scikit-learn.", file=sys.stderr)
2859 return None,
None,
None,
None
2865 if not isinstance(pardict, OrderedDict):
2866 print(
"Error... Input must be an ordered dictionary", file=sys.stderr)
2868 return means, covs, weights,
None
2870 npars = len(pardict)
2875 if not isinstance(prevpostfile, (np.ndarray, np.generic)):
2877 if not os.path.isfile(prevpostfile):
2879 "Error... previous posterior sample file '%s' does not exist"
2884 return means, covs, weights,
None
2886 possamps, _, _ = pppu.pulsar_nest_to_posterior(prevpostfile)
2888 allsamples.append(possamps[par.upper()].samples)
2890 if prevpostfile.shape[1] == npars:
2891 for i
in range(npars):
2893 prevpostfile[:, i].reshape(len(prevpostfile), 1)
2897 "Error... input numpy array does not contain correct number of parameters",
2901 return means, covs, weights,
None
2904 "Error... could not extract posterior samples from file or numpy array",
2908 return means, covs, weights,
None
2911 allsamplesnp = np.copy(allsamples).squeeze().T
2912 for i, p
in enumerate(pardict.keys()):
2915 for lim
in pardict[p]:
2916 if np.isfinite(lim):
2917 maxp = np.max(allsamples[i])
2918 minp = np.min(allsamples[i])
2920 sigmap = decaywidth * (maxp - minp)
2922 dist = lim - allsamplesnp[:, i]
2924 refidxs = np.ones(len(allsamplesnp[:, i]), dtype=bool)
2926 if taper
is not None:
2928 deltav = allsamplesnp[:, i] + 2.0 * dist - maxp
2930 deltav = minp - (allsamplesnp[:, i] + 2.0 * dist)
2933 "Warning... limit is inside the extent of the samples",
2938 probkeep = np.ones(len(allsamplesnp[:, i]))
2939 if taper ==
"gaussian":
2940 probkeep = np.exp(-0.5 * (deltav) ** 2 / sigmap**2)
2941 elif taper ==
"triangular":
2942 probkeep = 1.0 - (deltav) / (maxp - minp)
2943 elif taper ==
"exponential":
2944 probkeep = np.exp(-(deltav) / sigmap)
2945 elif taper ==
"elliptical":
2946 probkeep = np.zeros(len(allsamplesnp[:, i]))
2947 probkeep[deltav < sigmap] = np.sqrt(
2948 1.0 - (deltav[deltav < sigmap] / sigmap) ** 2
2952 "Warning... unknown tapering has been set, so none will be applied",
2957 np.random.rand(len(allsamplesnp[:, i])) < probkeep
2960 thesesamples = allsamplesnp[refidxs, :]
2961 thesesamples[:, i] += 2.0 * dist[refidxs]
2962 if refsamples
is None:
2963 refsamples = np.copy(thesesamples)
2965 refsamples = np.concatenate((refsamples, thesesamples))
2968 if refsamples
is not None:
2969 allsamplesnp = np.concatenate((allsamplesnp, refsamples))
2972 parscales = np.std(allsamplesnp, axis=0)
2973 scalesmat = np.identity(npars) * parscales
2974 scaledsamples = allsamplesnp / parscales
2979 dpgmm = mixture.BayesianGaussianMixture(
2980 n_components=ncomps, covariance_type=
"full", tol=5e-2, max_iter=500
2981 ).fit(scaledsamples)
2984 parpred = dpgmm.predict(scaledsamples)
2990 for i, (mean, covar, weight)
in enumerate(
2991 zip(dpgmm.means_, dpgmm.covariances_, dpgmm.weights_)
2998 for mus, sigs, lowlim, highlim
in zip(
3000 parscales * np.sqrt(np.diag(covar)),
3001 [pardict[p][0]
for p
in pardict],
3002 [pardict[p][1]
for p
in pardict],
3004 if mus < lowlim - 3.0 * sigs
or mus > highlim + 3.0 * sigs:
3011 means.append(mean * parscales)
3012 covs.append(np.dot(scalesmat, np.dot(covar, scalesmat)))
3013 weights.append(weight)
3016 print(
"Error... no GMM components returned", file=sys.stderr)
3019 return means, covs, weights, allsamplesnp
3023 Setup the coarse and fine heterodyne jobs/nodes.
3028 "heterodyne",
"heterodyne_exec", default=
"lalpulsar_heterodyne"
3038 "Warning... 'heterodyne_exec' in '[heterodyne]' does not exist or is not an executable. Try finding code in path."
3044 "Error... could not find 'lalpulsar_heterodyne' in 'PATH'",
3054 "heterodyne",
"universe", default=
"vanilla"
3061 "heterodyne",
"filter_knee",
"float", default=0.25
3064 "heterodyne",
"coarse_sample_rate",
"int", default=16384
3067 "heterodyne",
"coarse_resample_rate",
"float", default=1.0
3070 "heterodyne",
"channels",
"dict"
3073 "heterodyne",
"binary_output",
"boolean", default=
True
3076 "heterodyne",
"gzip_coarse_output",
"boolean", default=
False
3079 "heterodyne",
"coarse_request_memory",
"int", allownone=
True
3082 "heterodyne",
"coarse_max_data_length",
"int", default=512
3088 "Warning... cannot output coarse heterdyned data as gzip and binary. Defaulting to binary output."
3093 "heterodyne",
"fine_resample_rate",
"string", default=
"1/60"
3096 "heterodyne",
"stddev_thresh",
"float", default=3.5
3099 "heterodyne",
"gzip_fine_output",
"boolean", default=
True
3102 "heterodyne",
"fine_request_memory",
"int", allownone=
True
3135 getmodsciencesegs = (
3141 for ifo
in self.
ifos:
3142 getmodsciencesegs[ifo] =
True
3143 segfiletimes[ifo] = {}
3146 for ifo
in self.
ifos:
3149 "Error... could channel not specified for '{}'".
format(ifo),
3162 "Error... channel must be a string or a list of strings",
3171 modified_pulsar = unmodified_pulsar =
False
3175 modified_pulsar =
True
3177 unmodified_pulsar =
True
3180 earthfile, sunfile, timefile = self.
get_ephemeris(pppu.psr_par(par))
3190 for ifo
in self.
ifos[
3198 datadir = os.path.join(psrdir,
"data")
3203 coarsedir = os.path.join(datadir,
"coarse")
3208 finedir = os.path.join(datadir,
"fine")
3213 segfiles[ifo] = os.path.join(psrdir,
"segments.txt")
3217 if getmodsciencesegs[ifo]:
3237 "Error... could not copy segment list into pulsar directory",
3243 if getmodsciencesegs[ifo]:
3244 getmodsciencesegs[ifo] =
False
3247 prevsegs = os.path.join(
3248 os.path.dirname(segfiles[ifo]),
"previous_segments.txt"
3250 if os.path.isfile(prevsegs):
3255 "Warning... previous segment list file '%s' could not be removed"
3258 elif unmodified_pulsar:
3260 if os.path.isfile(segfiles[ifo]):
3262 prevsegs = os.path.join(
3263 os.path.dirname(segfiles[ifo]),
"previous_segments.txt"
3265 if not os.path.isfile(prevsegs):
3267 segfiles[ifo], prevsegs
3270 (segfiles[ifo], prevsegs)
3276 if os.path.isfile(segfiles[ifo]):
3278 p = sp.check_output(
"tail -1 " + segfiles[ifo], shell=
True)
3285 "Error... could not get end time out of previous segment file '%s'"
3293 if self.
starttime[ifo][0]
in segfiletimes[ifo]:
3295 segfiletimes[ifo][self.
starttime[ifo][0]], segfiles[ifo]
3298 segfiletimes[ifo][self.
starttime[ifo][0]] = segfiles[ifo]
3312 self.
ifos.remove(ifo)
3313 if len(self.
ifos) == 0:
3315 "Error... no segments were available for any required IFOs",
3330 not freqfactor % 1.0
3332 freqfacdircoarse = os.path.join(
3333 coarsedir,
"%df" %
int(freqfactor)
3335 freqfacdirfine = os.path.join(finedir,
"%df" %
int(freqfactor))
3337 freqfacdircoarse = os.path.join(coarsedir,
"%.2ff" % freqfactor)
3338 freqfacdirfine = os.path.join(finedir,
"%.2ff" % freqfactor)
3340 self.
mkdirs(freqfacdircoarse)
3343 self.
mkdirs(freqfacdirfine)
3354 fineoutput = os.path.join(
3366 subfile=os.path.join(freqfacdirfine,
"concat.sub"),
3378 coarseoutput = os.path.join(
3380 "coarse-%s-%d-%d.bin"
3388 coarseoutput = os.path.join(
3390 "coarse-%s-%d-%d.txt"
3406 coarsenode.set_data_file(self.
cache_files[ifo][k])
3407 coarsenode.set_max_data_length(
3410 coarsenode.set_seg_list(segfiles[ifo])
3411 coarsenode.set_channel(
3414 coarsenode.set_output_file(coarseoutput)
3416 coarsenode.set_binoutput()
3418 coarsenode.set_gzip_output()
3421 coarsenode.set_ifo(ifo)
3422 coarsenode.set_het_flag(0)
3423 coarsenode.set_pulsar(pname)
3424 coarsenode.set_param_file(par)
3425 coarsenode.set_filter_knee(
3428 coarsenode.set_sample_rate(
3431 coarsenode.set_resample_rate(
3434 coarsenode.set_freq_factor(
3438 self.add_node(coarsenode)
3450 concatnode.add_parent(
3455 finenode.add_parent(coarsenode)
3458 fineoutput = os.path.join(
3467 finetmpfiles.append(fineoutput)
3471 finenode.set_data_file(coarseoutput +
".gz")
3473 finenode.set_data_file(coarseoutput)
3476 finenode.set_bininput()
3478 finenode.set_seg_list(segfiles[ifo])
3479 finenode.set_output_file(fineoutput)
3482 finenode.set_ifo(ifo)
3483 finenode.set_het_flag(1)
3484 finenode.set_pulsar(pname)
3485 finenode.set_param_file(par)
3486 finenode.set_sample_rate(
3489 finenode.set_resample_rate(
3493 finenode.set_freq_factor(freqfactor)
3494 finenode.set_stddev_thresh(
3497 finenode.set_ephem_earth_file(earthfile)
3498 finenode.set_ephem_sun_file(sunfile)
3499 finenode.set_ephem_time_file(timefile)
3502 finenode.set_gzip_output()
3503 finetmpfiles[-1] +=
".gz"
3505 self.add_node(finenode)
3509 concatnode.set_files(finetmpfiles)
3510 self.add_node(concatnode)
3512 fineoutput = os.path.join(
3530 rmnode.set_files(finetmpfiles)
3531 rmnode.add_parent(concatnode)
3532 self.add_node(rmnode)
3551 elif unmodified_pulsar:
3554 os.path.join(freqfacdirfine, hf)
3555 for hf
in os.listdir(freqfacdirfine)
3558 hetfilescheck.sort()
3562 for i, hf
in enumerate(hetfilescheck):
3565 p = sp.Popen(
"gzip " + hf, shell=
True)
3566 out, err = p.communicate()
3567 if p.returncode != 0:
3569 "Error... could not gzip previous fine heterodyned file '%s': %s, %s"
3576 hetfilescheck[i] = hf +
".gz"
3578 for i, hf
in enumerate(hetfilescheck):
3581 p = sp.Popen(
"gunzip " + hf, shell=
True)
3582 out, err = p.communicate()
3583 if p.returncode != 0:
3585 "Error... could not gunzip previous fine heterodyned file '%s': %s, %s"
3596 if len(hetfilescheck) == 0:
3600 if len(self.
ifos) == 0:
3602 "Error... no segments were available for any required IFOs",
3617 for ifo
in self.
ifos:
3623 "Warning... could not remove temporary segment list '%s'"
3629 Setup the Spectral Interpolation jobs/nodes.
3640 "splinter",
"splinter_exec", default=
"lalpulsar_SplInter"
3653 "Warning... 'splinter_exec' in '[splinter]' does not exist or is not an executable. Try finding code in path."
3660 "Error... could not find 'lalpulsar_SplInter' in 'PATH'",
3670 "splinter",
"universe", default=
"vanilla"
3677 "splinter",
"splinter_request_memory",
"int", allownone=
True
3701 "splinter",
"bandwidth",
"float", 0.3
3704 "splinter",
"freq_range",
"list", [30.0, 2000.0]
3707 "splinter",
"stddev_thresh",
"float", 3.5
3710 "splinter",
"min_seg_length",
"int", 1800
3713 "splinter",
"max_seg_length",
"int", 21600
3716 "splinter",
"gzip_output",
"boolean",
False
3723 "Error... minimum segment length ({}) for SplInter is larger than maximum segment length ({})".
format(
3750 for ifo
in self.
ifos[
3762 for modsuffix
in [
"modified",
"unmodified"]:
3765 if modsuffix ==
"modified":
3773 if modsuffix ==
"unmodified":
3774 if unmodpars
is True:
3781 if os.path.isdir(pardir):
3783 for f
in os.listdir(pardir):
3785 os.remove(os.path.join(pardir, f))
3788 "Warning... problem removing par file '%s' from '%s'. This file may be overwritten."
3798 psr = pppu.psr_par(par)
3801 parlink = os.path.join(pardir, os.path.basename(par))
3802 if modsuffix ==
"modified":
3804 if modsuffix ==
"unmodified":
3806 os.symlink(par, parlink)
3822 datadir = os.path.join(psrdir,
"data")
3827 splintercpydir = os.path.join(datadir,
"splinter")
3828 self.
mkdirs(splintercpydir)
3834 not freqfactor % 1.0
3836 ffdir = os.path.join(
3837 splintercpydir,
"%df" %
int(freqfactor)
3840 ffdir = os.path.join(
3841 splintercpydir,
"%.3ff" %
int(freqfactor)
3846 os.path.join(ffdir,
"SplInter_%s_%s" % (pname, ifo))
3860 "Warning... could not create link to par file '%s' in '%s'. This file may be overwritten."
3873 modsegfile = os.path.join(
3883 self.
ifos.remove(ifo)
3884 if len(self.
ifos) == 0:
3886 "Error... no segments could were available for any required IFOs",
3898 unmodsegfile = os.path.join(
3905 if os.path.isfile(unmodsegfile):
3907 p = sp.check_output(
"tail -1 " + unmodsegfile, shell=
True)
3912 "Error... could not get end time out of previous segment file '%s'"
3927 self.
ifos.remove(ifo)
3928 if len(self.
ifos) == 0:
3930 "Error... no segments were available for any required IFOs",
3941 prevsegs = os.path.join(
3944 if not os.path.isfile(prevsegs):
3946 unmodsegfile, prevsegs
3949 (unmodsegfile, prevsegs)
3954 if not freqfactor % 1.0:
3955 splinterdir = os.path.join(
3959 splinterdir = os.path.join(self.
splinter_dir,
"%.2ff" % freqfactor)
3966 splsegfiles = [modsegfile, unmodsegfile]
3972 for idx, splbool
in enumerate([modpars, unmodpars]):
3978 splnode.set_sft_lalcache(
3981 splnode.set_output_dir(splinterdir)
3982 splnode.set_param_dir(
3985 splnode.set_seg_list(
3988 splnode.set_freq_factor(freqfactor)
3989 splnode.set_ifo(ifo)
3991 splnode.set_bandwidth(
3994 splnode.set_min_seg_length(
3997 splnode.set_max_seg_length(
4000 splnode.set_start_freq(
4003 splnode.set_end_freq(
4006 splnode.set_stddev_thresh(
4009 splnode.set_ephem_dir(
4013 splnode.set_gzip_output()
4019 self.add_node(splnode)
4023 psr = pppu.psr_par(par)
4026 splinterfile = os.path.join(
4027 splinterdir,
"SplInter_%s_%s" % (pname, ifo)
4030 splinterfile +=
".gz"
4031 mvnode.set_source(splinterfile)
4032 mvnode.set_destination(
4035 mvnode.add_parent(splnode)
4036 self.add_node(mvnode)
4040 Get the ephemeris file information based on the content of the psr file
4044 ephem = psr[
"EPHEM"]
4049 units = psr[
"UNITS"]
4053 earthfile = os.path.join(self.
ephem_path,
"earth00-40-%s.dat.gz" % ephem)
4054 sunfile = os.path.join(self.
ephem_path,
"sun00-40-%s.dat.gz" % ephem)
4057 timefile = os.path.join(self.
ephem_path,
"tdb_2000-2040.dat.gz")
4059 timefile = os.path.join(self.
ephem_path,
"te405_2000-2040.dat.gz")
4061 if not os.path.isfile(earthfile):
4063 "Error... Earth ephemeris file '%s' does not exist" % earthfile,
4069 if not os.path.isfile(sunfile):
4071 "Error... Sun ephemeris file '%s' does not exist" % sunfile,
4077 if not os.path.isfile(timefile):
4079 "Error... time ephemeris file '%s' does not exist" % timefile,
4085 return earthfile, sunfile, timefile
4089 Create jobs to concatenate fine heterodyned/Splinter data files in cases where multiple files exist (e.g.
in automated search).
4090 Go though the processed file list
and find ones that have more than one file, also also remove previous files.
4121 if self.
engine ==
"heterodyne":
4122 if len(finefiles) > 1:
4130 firststart = os.path.basename(finefiles[0]).split(
"-")[2]
4131 lastend = os.path.basename(finefiles[-1]).split(
"-")[3]
4132 lastend = lastend.replace(
".txt",
"")
4133 lastend = lastend.replace(
".gz",
"")
4136 newfinefile = os.path.join(
4137 os.path.dirname(finefiles[0]),
4138 "fine-%s-%s-%s.txt" % (pifo, firststart, lastend),
4141 newfinefile = newfinefile +
".gz"
4143 catfiles = finefiles
4152 subloc = os.path.join(
4153 os.path.dirname(finefiles[0]),
"concat.sub"
4155 elif self.
engine ==
"splinter":
4157 prevfile = os.listdir(
4161 if len(prevfile) > 1:
4163 "Error... more than one previous Splinter file in directory '%s'"
4174 if len(prevfile) == 1:
4176 startname = os.path.basename(prevfile).split(
"-")[1]
4179 "Warning... could not get previous start time from Splinter file name '%s'. Using start time from this run: '%d'"
4201 newfinefile = os.path.join(
4205 (os.path.basename(finefiles[0])).strip(
".gz"),
4211 newfinefile = newfinefile +
".gz"
4214 subloc = os.path.join(
4219 if subloc
is not None and catfiles
is not None:
4228 concatnode.set_files(catfiles)
4229 if parent
is not None:
4230 concatnode.add_parent(parent)
4231 self.add_node(concatnode)
4240 len(finefiles) > 1
or self.
engine ==
"splinter"
4241 )
and concatnode !=
None:
4244 if prevfile !=
None:
4252 rmnode.set_files(finefiles)
4253 if subloc
is not None and catfiles
is not None:
4254 rmnode.add_parent(concatnode)
4255 self.add_node(rmnode)
4258 self, section, option, cftype=None, default=None, allownone=False
4261 Get a value of type cftype ('string',
'int',
'float',
'boolean',
'list',
'dict' or 'dir')
from the configuration parser object.
4263 Return value on success
and None on failure
4268 if cftype ==
None or cftype ==
"string" or cftype ==
"dir":
4270 value = self.
config.get(section, option)
4273 if not isinstance(default, str):
4275 "Error... could not parse '%s' option from '[%s]' section."
4276 % (option, section),
4282 "Warning... could not parse '%s' option from '[%s]' section. Defaulting to %s."
4283 % (option, section, default)
4286 elif cftype ==
"float":
4288 value = self.
config.getfloat(section, option)
4291 if not isinstance(default, float):
4293 "Error... could not parse '%s' float option from '[%s]' section."
4294 % (option, section),
4300 "Warning... could not parse '%s' float option from '[%s]' section. Defaulting to %f."
4301 % (option, section, default)
4304 elif cftype ==
"boolean":
4306 value = self.
config.getboolean(section, option)
4309 if not isinstance(default, bool):
4311 "Error... could not parse '%s' boolean option from '[%s]' section."
4312 % (option, section),
4318 "Warning... could not parse '%s' boolean option from '[%s]' section. Defaulting to %r."
4319 % (option, section, default)
4322 elif cftype ==
"int":
4324 value = self.
config.getint(section, option)
4327 if not isinstance(default, int):
4329 "Error... could not parse '%s' int option from '[%s]' section."
4330 % (option, section),
4336 "Warning... could not parse '%s' int option from '[%s]' section. Defaulting to %d."
4337 % (option, section, default)
4340 elif cftype ==
"list":
4342 value = ast.literal_eval(self.
config.get(section, option))
4343 if not isinstance(value, list):
4347 if not isinstance(default, list):
4349 "Error... could not parse '%s' list option from '[%s]' section."
4350 % (option, section),
4356 "Warning... could not parse '%s' list option from '[%s]' section. Defaulting to [%s]."
4357 % (option, section,
", ".join(default))
4360 elif cftype ==
"dict":
4362 value = ast.literal_eval(self.
config.get(section, option))
4364 if not isinstance(value, dict)
and isinstance(default, dict):
4366 "Warning... could not parse '%s' dictionary option from '[%s]' section. Defaulting to %s."
4367 % (option, section,
str(default))
4370 elif not isinstance(value, dict)
and not isinstance(default, dict):
4373 "Error... could not parse '%s' dictionary option from '[%s]' section."
4374 % (option, section),
4382 if not isinstance(default, dict):
4384 "Error... could not parse '%s' dictionary option from '[%s]' section."
4385 % (option, section),
4391 "Warning... could not parse '%s' dictionary option from '[%s]' section. Defaulting to %s."
4392 % (option, section,
str(default))
4397 "Error... unknown trying to get unknown type '%s' from configuration file"
4407 Check Condor universe is 'local',
'vanilla' or 'standard'
4409 if universe
not in [
"local",
"vanilla",
"standard"]:
4416 Create and setup the data find jobs.
4423 if self.
config.has_option(
"condor",
"datafind"):
4426 "condor",
"datafind", cftype=
"dict", allownone=
True
4429 if datafind
is not None:
4431 for ifo
in self.
ifos:
4432 if ifo
not in datafind:
4434 "Warning... no frame/SFT cache file given for %s, try using system gw_data_find instead"
4438 if datafindexec
is None:
4440 "Error... could not find 'gw_data_find' in your 'PATH'",
4447 "condor",
"datafind", datafindexec
4450 if not isinstance(datafind[ifo], list):
4451 datafind[ifo] = [datafind[ifo]]
4454 for cachefile
in datafind[ifo]:
4455 if not os.path.isfile(cachefile):
4457 "Warning... frame/SFT cache file '%s' does not exist, try using system gw_data_find instead"
4461 if datafindexec
is None:
4463 "Error... could not find 'gw_data_find' in your 'PATH'",
4470 "condor",
"datafind", datafindexec
4485 if os.path.isfile(datafind)
and os.access(datafind, os.X_OK):
4493 "Warning... data find executable '%s' does not exist, or is not executable, try using system gw_data_find instead"
4497 if datafindexec
is None:
4499 "Error... could not find 'gw_data_find' in your 'PATH'",
4506 "condor",
"datafind", datafindexec
4516 if datafindexec
is None:
4518 "Error... could not find 'gw_data_find' in your 'PATH'",
4525 "condor",
"datafind", datafindexec
4553 Add data find nodes to a dictionary of nodes
4558 if self.
config.has_option(
"datafind",
"type"):
4559 frtypes = ast.literal_eval(self.
config.get(
"datafind",
"type"))
4561 if not isinstance(frtypes, dict):
4563 "Error... the data find 'types' must be a dictionary of values for each detector",
4569 print(
"Error... no frame 'type' specified for data find", file=sys.stderr)
4573 for ifo
in self.
ifos:
4575 if not ifo
in frtypes:
4576 print(
"Error... no data find type for %s" % ifo, file=sys.stderr)
4580 if isinstance(frtypes[ifo], str):
4582 frtypelist.append(frtypes[ifo])
4583 elif isinstance(frtypes[ifo], list):
4584 frtypelist = frtypes[ifo]
4585 if len(frtypelist) != self.
ndatasets[ifo]:
4587 "Error... the number of frame types must be the same as the number of start times",
4594 "Error... frame types for '{}' must be a single string or a list of strings".
format(
4609 if self.
engine ==
"splinter":
4612 subfile=os.path.join(
4623 dfnode.set_observatory(ifo[0])
4624 dfnode.set_type(frtypelist[i])
4626 dfnode.set_end(self.
endtime[ifo][i])
4630 cachefile = os.path.join(
4634 cachefile = os.path.join(
4638 dfnode.set_output(cachefile)
4641 self.add_node(dfnode)
4643 if self.
engine ==
"splinter":
4649 concatnode.add_parent(dfn)
4650 self.add_node(concatnode)
4662 Find data segments and output them to an acsii text segment file containing the start
and end times of
4663 each segment. Pairs of start
and end times
in the `starttime`
and `endtime` lists will be iterated over
4664 and all segments within those pair will be concatenated into the final file.
4667 starttime (list): a list of GPS start times
4668 endtime (list): a list of GPS end times
4669 ifo (str): a detector name, e.g.,
'H1'
4670 outfile (str): the file to output the segment list file to
4675 "segmentfind",
"segfind", cftype=
"dict", allownone=
True
4677 if segfiles
is not None:
4678 if ifo
not in segfiles:
4679 print(
"Error... No segment file given for '%s'" % ifo)
4683 segfile = segfiles[ifo]
4686 if not os.path.isfile(segfile):
4687 print(
"Error... segment file '%s' does not exist." % segfile)
4693 shutil.copyfile(segfile, outfile)
4696 "Error... could not copy segment file to location of '%s'."
4704 from gwpy.segments
import DataQualityFlag
4706 print(
"Error... gwpy is required for finding segment lists")
4712 "segmentfind",
"server", default=
"https://segments.ligo.org"
4717 "segmentfind",
"segmenttype", cftype=
"dict"
4722 "segmentfind",
"excludetype", cftype=
"dict", allownone=
True
4726 if ifo
not in segmenttypes:
4727 print(
"Error... No segment type for %s" % ifo, file=sys.stderr)
4731 if isinstance(starttime, int)
and isinstance(endtime, int):
4734 elif isinstance(starttime, list)
and isinstance(endtime, list):
4735 if len(starttime) != len(endtime):
4737 "Error... list of start and end times for the segments are not the same lengths",
4743 for st, et
in zip(starttime, endtime):
4745 print(
"Error... start time comes before end time!", file=sys.stderr)
4751 print(
"Error... start and end times must be integers or lists of integers")
4757 segfp = open(outfile,
"w")
4759 print(
"Error... could not open segment list file")
4764 for st, et
in zip(sts, ets):
4766 if isinstance(segmenttypes[ifo], list):
4767 if not isinstance(segmenttypes[ifo][sidx], str):
4768 print(
"Error... segment types must be a string")
4771 elif len(segmenttypes[ifo]) != len(sts):
4773 "Error... number of segment types is not the same as the number of start times"
4778 segmenttype = segmenttypes[ifo][sidx]
4780 if not isinstance(segmenttypes[ifo], str):
4781 print(
"Error... segment types must be a string")
4785 segmenttype = segmenttypes[ifo]
4790 sts.strip()
for sts
in segmenttype.split(
",")
4794 for i
in range(len(segtypes)):
4796 query = DataQualityFlag.query_dqsegdb(segtypes[i], st, et, url=server)
4797 query = query.active
4799 if excludesegs
is not None:
4801 if ifo
in excludesegs:
4802 if isinstance(excludesegs[ifo], list):
4803 if not isinstance(excludesegs[ifo][sidx], str):
4804 print(
"Error... exclude types must be a string")
4807 elif len(excludesegs[ifo]) != len(sts):
4809 "Error... number of exclude types is not the same as the number of start times"
4814 excludetype = excludesegs[ifo][sidx]
4816 if not isinstance(excludesegs[ifo], str):
4817 print(
"Error... exclude types must be a string")
4821 excludetype = excludesegs[ifo]
4823 if len(excludetype) > 0:
4825 sts.strip()
for sts
in excludetype.split(
",")
4828 for j
in range(len(segextypes)):
4829 exquery = DataQualityFlag.query_dqsegdb(
4830 segextypes[j], st, et, url=server
4834 query = query & ~exquery.active
4836 if segquery
is None:
4837 segquery = query.copy()
4840 segquery = segquery & query
4843 for thisseg
in segquery:
4844 print(
int(thisseg[0]),
int(thisseg[1]), file=segfp)
4852 Search through the PATH environment for the first instance of the executable file
"filename"
4855 if os.path.isfile(filename)
and os.access(filename, os.X_OK):
4859 for d
in os.environ[
"PATH"].split(os.pathsep):
4860 filecheck = os.path.join(d, filename)
4861 if os.path.isfile(filecheck)
and os.access(filecheck, os.X_OK):
4868 Helper function. Make the given directory, creating intermediate
4869 dirs if necessary,
and don
't complain about it already existing.
4871 if os.access(path, os.W_OK)
and os.path.isdir(path):
4877 print(
"Error... cannot make directory '%s'" % path, file=sys.stderr)
4881class heterodyneJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
4883 A lalpulsar_heterodyne job to heterodyne the data.
4900 pipeline.AnalysisJob.__init__(self,
None)
4902 if accgroup !=
None:
4903 self.add_condor_cmd(
"accounting_group", accgroup)
4905 self.add_condor_cmd(
"accounting_group_user", accuser)
4907 self.add_condor_cmd(
"getenv",
"True")
4909 if requestmemory
is not None:
4910 if isinstance(requestmemory, int):
4911 self.add_condor_cmd(
"request_memory", requestmemory)
4915 self.set_stdout_file(os.path.join(logdir,
"heterodyne-$(cluster).out"))
4916 self.set_stderr_file(os.path.join(logdir,
"heterodyne-$(cluster).err"))
4918 self.set_stdout_file(
"heterodyne-$(cluster).out")
4919 self.set_stderr_file(
"heterodyne-$(cluster).err")
4922 self.set_sub_file(os.path.join(rundir, subprefix +
"heterodyne.sub"))
4924 self.set_sub_file(subprefix +
"heterodyne.sub")
4927class heterodyneNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
4929 A heterodyneNode runs an instance of lalpulsar_heterodyne in a condor DAG.
4934 job = A CondorDAGJob that can run an instance of lalpulsar_heterodyne
4936 pipeline.CondorDAGNode.__init__(self, job)
4937 pipeline.AnalysisNode.__init__(self)
4961 self.add_var_opt(
"data-file", data_file)
4966 self.add_var_opt(
"data-chunk-length", maxdatalength)
4971 self.add_var_opt(
"output-file", output_file)
4976 self.add_var_opt(
"seg-file", seg_list)
4981 self.add_var_opt(
"ifo", ifo)
4986 self.add_var_opt(
"param-file", param_file)
4991 self.add_var_opt(
"freq-factor", freq_factor)
4996 self.add_var_opt(
"param-file-update", param_file_update)
5000 self.add_var_opt(
"manual-epoch", manual_epoch)
5005 self.add_var_opt(
"ephem-earth-file", ephem_earth_file)
5009 self.add_var_opt(
"ephem-sun-file", ephem_sun_file)
5013 self.add_var_opt(
"ephem-time-file", ephem_time_file)
5017 self.add_var_opt(
"pulsar", pulsar)
5022 self.add_var_opt(
"heterodyne-flag", het_flag)
5027 self.add_var_opt(
"filter-knee", filter_knee)
5032 self.add_var_opt(
"channel", channel)
5037 self.add_var_opt(
"sample-rate", sample_rate)
5042 self.add_var_opt(
"resample-rate", resample_rate)
5047 self.add_var_opt(
"stddev-thresh", stddev_thresh)
5051 self.add_var_opt(
"calibrate",
"")
5055 self.add_var_opt(
"verbose",
"")
5059 self.add_var_opt(
"binary-input",
"")
5063 self.add_var_opt(
"binary-output",
"")
5067 self.add_var_opt(
"gzip-output",
"")
5072 self.add_var_opt(
"response-file", response_function)
5076 self.add_var_opt(
"coefficient-file", coefficient_file)
5080 self.add_var_opt(
"sensing-function", sensing_function)
5084 self.add_var_opt(
"open-loop-gain", open_loop_gain)
5088 self.add_var_opt(
"scale-factor", scale_fac)
5092 self.add_var_opt(
"high-pass-freq", high_pass)
5095class splinterJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
5097 A lalpulsar_SplInter job to process SFT data.
5113 pipeline.AnalysisJob.__init__(self,
None)
5115 if accgroup !=
None:
5116 self.add_condor_cmd(
"accounting_group", accgroup)
5118 self.add_condor_cmd(
"accounting_group_user", accuser)
5120 self.add_condor_cmd(
"getenv",
"True")
5122 if requestmemory
is not None:
5123 if isinstance(requestmemory, int):
5124 self.add_condor_cmd(
"request_memory", requestmemory)
5128 self.set_stdout_file(os.path.join(logdir,
"splinter-$(cluster).out"))
5129 self.set_stderr_file(os.path.join(logdir,
"splinter-$(cluster).err"))
5131 self.set_stdout_file(
"splinter-$(cluster).out")
5132 self.set_stderr_file(
"splinter-$(cluster).err")
5135 self.set_sub_file(os.path.join(rundir,
"splinter.sub"))
5137 self.set_sub_file(
"splinter.sub")
5140class splinterNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
5142 A splinterNode runs an instance of lalpulsar_Splinter in a condor DAG.
5147 job = A CondorDAGJob that can run an instance of lalpulsar_SplInter
5149 pipeline.CondorDAGNode.__init__(self, job)
5150 pipeline.AnalysisNode.__init__(self)
5176 self.add_var_opt(
"start-freq", f)
5181 self.add_var_opt(
"end-freq", f)
5185 self.add_var_opt(
"sft-cache", f)
5190 self.add_var_opt(
"sft-lalcache", f)
5195 self.add_var_opt(
"sft-loc", f)
5200 self.add_var_opt(
"output-dir", f)
5205 self.add_var_opt(
"seg-file", f)
5210 self.add_var_opt(
"ifo", ifo)
5215 self.add_var_opt(
"param-file", f)
5220 self.add_var_opt(
"param-dir", f)
5225 self.add_var_opt(
"freq-factor", f)
5230 self.add_var_opt(
"bandwidth", f)
5235 self.add_var_opt(
"min-seg-length", f)
5240 self.add_var_opt(
"max-seg-length", f)
5245 self.add_var_opt(
"ephem-dir", f)
5250 self.add_var_opt(
"stddevthresh", f)
5255 self.add_var_opt(
"gzip",
"")
5260 self.add_var_opt(
"starttime", f)
5265 self.add_var_opt(
"endtime", f)
5270 Job for concatenating processed (heterodyned
or spectrally interpolated) files
5274class concatJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
5276 A concatenation job (using "cat" and output to stdout - a new job
is needed
for each pulsar)
5280 self, subfile=None, output=None, accgroup=None, accuser=None, logdir=None
5286 pipeline.AnalysisJob.__init__(self,
None)
5288 if accgroup !=
None:
5289 self.add_condor_cmd(
"accounting_group", accgroup)
5291 self.add_condor_cmd(
"accounting_group_user", accuser)
5295 "Error... Condor sub file required for concatenation job",
5300 self.set_sub_file(subfile)
5304 "Error... output file required for concatenation job", file=sys.stderr
5308 self.set_stdout_file(output)
5311 self.set_stderr_file(os.path.join(logdir,
"concat-$(cluster).err"))
5313 self.set_stderr_file(
"concat-$(cluster).err")
5315 self.add_arg(
"$(macrocatfiles)")
5318class concatNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
5320 A node for a concatJob
5325 job = A CondorDAGJob that can run an instance of parameter estimation code.
5327 pipeline.CondorDAGNode.__init__(self, job)
5328 pipeline.AnalysisNode.__init__(self)
5334 self.add_macro(
"macrocatfiles",
" ".join(files))
5339 Job for removing files
5343class removeJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
5345 A remove job (using "rm" to remove files)
5348 def __init__(self, accgroup=None, accuser=None, logdir=None, rundir=None):
5355 pipeline.AnalysisJob.__init__(self,
None)
5357 if accgroup !=
None:
5358 self.add_condor_cmd(
"accounting_group", accgroup)
5360 self.add_condor_cmd(
"accounting_group_user", accuser)
5364 self.set_stdout_file(os.path.join(logdir,
"rm-$(cluster).out"))
5365 self.set_stderr_file(os.path.join(logdir,
"rm-$(cluster).err"))
5367 self.set_stdout_file(
"rm-$(cluster).out")
5368 self.set_stderr_file(
"rm-$(cluster).err")
5371 "-f $(macrormfiles)"
5375 self.set_sub_file(os.path.join(rundir,
"rm.sub"))
5377 self.set_sub_file(
"rm.sub")
5380class removeNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
5382 An instance of a removeJob in a condor DAG.
5387 job = A CondorDAGJob that can run an instance of rm.
5389 pipeline.CondorDAGNode.__init__(self, job)
5390 pipeline.AnalysisNode.__init__(self)
5397 self.add_macro(
"macrormfiles",
" ".join(files))
5402 Job for moving files
5406class moveJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
5408 A move job (using "mv" to move files)
5411 def __init__(self, accgroup=None, accuser=None, logdir=None, rundir=None):
5418 pipeline.AnalysisJob.__init__(self,
None)
5420 if accgroup !=
None:
5421 self.add_condor_cmd(
"accounting_group", accgroup)
5423 self.add_condor_cmd(
"accounting_group_user", accuser)
5427 self.set_stdout_file(os.path.join(logdir,
"mv-$(cluster).out"))
5428 self.set_stderr_file(os.path.join(logdir,
"mv-$(cluster).err"))
5430 self.set_stdout_file(
"mv-$(cluster).out")
5431 self.set_stderr_file(
"mv-$(cluster).err")
5434 "$(macrosource) $(macrodestination)"
5438 self.set_sub_file(os.path.join(rundir,
"mv.sub"))
5440 self.set_sub_file(
"mv.sub")
5443class moveNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
5445 An instance of a moveJob in a condor DAG.
5450 job = A CondorDAGJob that can run an instance of mv.
5452 pipeline.CondorDAGNode.__init__(self, job)
5453 pipeline.AnalysisNode.__init__(self)
5461 self.add_macro(
"macrosource", sfile)
5466 self.add_macro(
"macrodestination", dfile)
5471 Job for copying files
5475class copyJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
5477 A copy job (using "cp" to copy files)
5480 def __init__(self, accgroup=None, accuser=None, logdir=None, rundir=None):
5487 pipeline.AnalysisJob.__init__(self,
None)
5489 if accgroup !=
None:
5490 self.add_condor_cmd(
"accounting_group", accgroup)
5492 self.add_condor_cmd(
"accounting_group_user", accuser)
5496 self.set_stdout_file(os.path.join(logdir,
"cp-$(cluster).out"))
5497 self.set_stderr_file(os.path.join(logdir,
"cp-$(cluster).err"))
5499 self.set_stdout_file(
"cp-$(cluster).out")
5500 self.set_stderr_file(
"cp-$(cluster).err")
5503 "$(macrosource) $(macrodestination)"
5507 self.set_sub_file(os.path.join(rundir,
"cp.sub"))
5509 self.set_sub_file(
"cp.sub")
5512class copyNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
5514 An instance of a copyJob in a condor DAG.
5519 job = A CondorDAGJob that can run an instance of mv.
5521 pipeline.CondorDAGNode.__init__(self, job)
5522 pipeline.AnalysisNode.__init__(self)
5530 self.add_macro(
"macrosource", sfile)
5535 self.add_macro(
"macrodestination", dfile)
5540 Pulsar parameter estimation pipeline utilities
5544class ppeJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
5546 A parameter estimation job
5562 pipeline.AnalysisJob.__init__(self,
None)
5564 if accgroup !=
None:
5565 self.add_condor_cmd(
"accounting_group", accgroup)
5567 self.add_condor_cmd(
"accounting_group_user", accuser)
5569 self.add_condor_cmd(
"getenv",
"True")
5571 if requestmemory
is not None:
5572 if isinstance(requestmemory, int):
5573 self.add_condor_cmd(
"request_memory", requestmemory)
5577 self.set_stdout_file(os.path.join(logdir,
"ppe$(logname)-$(cluster).out"))
5578 self.set_stderr_file(os.path.join(logdir,
"ppe$(logname)-$(cluster).err"))
5580 self.set_stdout_file(
"ppe$(logname)-$(cluster).out")
5581 self.set_stderr_file(
"ppe$(logname)-$(cluster).err")
5584 self.set_sub_file(os.path.join(rundir,
"ppe.sub"))
5586 self.set_sub_file(
"ppe.sub")
5593class ppeNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
5595 A pes runs an instance of the parameter estimation code in a condor DAG.
5598 def __init__(self, job, psrname=""):
5600 job = A CondorDAGJob that can run an instance of parameter estimation code.
5602 pipeline.CondorDAGNode.__init__(self, job)
5603 pipeline.AnalysisNode.__init__(self)
5669 self.add_macro(
"macroargs",
"")
5672 if len(psrname) > 0:
5673 self.add_macro(
"logname",
"-" + psrname)
5675 self.add_macro(
"logname",
"")
5679 self.add_var_opt(
"detectors", detectors)
5683 if "logname" in self.get_opts():
5684 curmacroval = self.get_opts()[
"logname"]
5687 curmacroval = curmacroval +
"-" + detectors.replace(
",",
"")
5688 self.add_macro(
"logname", curmacroval)
5692 self.add_var_opt(
"verbose",
"")
5697 self.add_var_opt(
"par-file", parfile)
5704 if "macroargs" in self.get_opts():
5705 curmacroval = self.get_opts()[
"macroargs"]
5708 curmacroval = curmacroval +
" --cor-file " + corfile
5709 self.add_macro(
"macroargs", curmacroval)
5715 self.add_var_opt(
"input-files", inputfiles)
5720 self.add_var_opt(
"outfile", of)
5725 self.add_var_opt(
"chunk-min", cmin)
5730 self.add_var_opt(
"chunk-max", cmax)
5735 self.add_var_opt(
"start-time", starttime)
5740 self.add_var_opt(
"end-time", endtime)
5747 self.add_var_opt(
"truncate-time", trunc)
5753 self.add_var_opt(
"truncate-samples", trunc)
5758 self.add_var_opt(
"truncate-fraction", trunc)
5763 self.add_var_opt(
"veto-threshold", veto)
5768 self.add_var_opt(
"psi-bins", pb)
5773 self.add_var_opt(
"time-bins", tb)
5778 self.add_var_opt(
"prior-file", pf)
5783 self.add_var_opt(
"ephem-earth", ee)
5788 self.add_var_opt(
"ephem-sun", es)
5793 self.add_var_opt(
"ephem-timecorr", et)
5798 self.add_var_opt(
"harmonics", h)
5803 self.add_var_opt(
"Nlive", nl)
5808 self.add_var_opt(
"Nmcmc", nm)
5813 self.add_var_opt(
"Nmcmcinitial", nm)
5818 self.add_var_opt(
"Nruns", nr)
5823 self.add_var_opt(
"tolerance", tol)
5828 self.add_var_opt(
"randomseed", rs)
5833 self.add_var_opt(
"ensembleStretch", f)
5838 self.add_var_opt(
"ensembleWalk", f)
5843 self.add_var_opt(
"temperature", temp)
5848 self.add_var_opt(
"diffev", de)
5853 self.add_var_opt(
"inject-file", ifil)
5858 self.add_var_opt(
"inject-output", iout)
5863 self.add_var_opt(
"fake-data", fd)
5868 self.add_var_opt(
"fake-psd", fp)
5873 self.add_var_opt(
"fake-starts", fs)
5878 self.add_var_opt(
"fake-lengths", fl)
5883 self.add_var_opt(
"fake-dt", fdt)
5888 self.add_var_opt(
"scale-snr", ssnr)
5893 self.add_var_opt(
"sample-files", ssf)
5898 self.add_var_opt(
"sample-nlives", snl)
5903 self.add_var_opt(
"prior-cell", pc)
5908 self.add_var_opt(
"oldChunks",
"")
5915 if "macroargs" in self.get_opts():
5916 curmacroval = self.get_opts()[
"macroargs"]
5919 curmacroval = curmacroval +
" --source-model"
5920 self.add_macro(
"macroargs", curmacroval)
5927 if "macroargs" in self.get_opts():
5928 curmacroval = self.get_opts()[
"macroargs"]
5931 curmacroval = curmacroval +
" --biaxial"
5932 self.add_macro(
"macroargs", curmacroval)
5937 self.add_var_opt(
"gaussian-like",
"")
5944 if "macroargs" in self.get_opts():
5945 curmacroval = self.get_opts()[
"macroargs"]
5948 curmacroval = curmacroval +
" --randomise " + f
5949 self.add_macro(
"macroargs", curmacroval)
5956 if "macroargs" in self.get_opts():
5957 curmacroval = self.get_opts()[
"macroargs"]
5960 curmacroval = curmacroval +
" --roq"
5961 self.add_macro(
"macroargs", curmacroval)
5967 if "macroargs" in self.get_opts():
5968 curmacroval = self.get_opts()[
"macroargs"]
5971 curmacroval = curmacroval +
" --ntraining " + f
5972 self.add_macro(
"macroargs", curmacroval)
5978 if "macroargs" in self.get_opts():
5979 curmacroval = self.get_opts()[
"macroargs"]
5982 curmacroval = curmacroval +
" --roq-tolerance " + f
5983 self.add_macro(
"macroargs", curmacroval)
5989 if "macroargs" in self.get_opts():
5990 curmacroval = self.get_opts()[
"macroargs"]
5993 curmacroval = curmacroval +
" --roq-uniform"
5994 self.add_macro(
"macroargs", curmacroval)
6000 if "macroargs" in self.get_opts():
6001 curmacroval = self.get_opts()[
"macroargs"]
6004 curmacroval = curmacroval +
" --input-weights " + f
6005 self.add_macro(
"macroargs", curmacroval)
6011 if "macroargs" in self.get_opts():
6012 curmacroval = self.get_opts()[
"macroargs"]
6015 curmacroval = curmacroval +
" --output-weights " + f
6016 self.add_macro(
"macroargs", curmacroval)
6022 if "macroargs" in self.get_opts():
6023 curmacroval = self.get_opts()[
"macroargs"]
6026 curmacroval = curmacroval +
" --chunk-max " + f
6027 self.add_macro(
"macroargs", curmacroval)
6032 Job for creating the result page
for a particular source
6036class resultpageJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
6038 self, execu, univ="local", accgroup=None, accuser=None, logdir=None, rundir=None
6043 pipeline.AnalysisJob.__init__(self,
None)
6045 if accgroup !=
None:
6046 self.add_condor_cmd(
"accounting_group", accgroup)
6048 self.add_condor_cmd(
"accounting_group_user", accuser)
6050 self.add_condor_cmd(
"getenv",
"True")
6054 self.set_stdout_file(os.path.join(logdir,
"resultpage-$(cluster).out"))
6055 self.set_stderr_file(os.path.join(logdir,
"resultpage-$(cluster).err"))
6057 self.set_stdout_file(
"resultpage-$(cluster).out")
6058 self.set_stderr_file(
"resultpage-$(cluster).err")
6061 self.set_sub_file(os.path.join(rundir,
"resultpage.sub"))
6063 self.set_sub_file(
"resultpage.sub")
6065 self.add_arg(
"$(macroconfigfile)")
6068class resultpageNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
6070 A resultpageNode runs an instance of the result page script in a condor DAG.
6075 job = A CondorDAGJob that can run an instance of lalpulsar_knope_result_page
6077 pipeline.CondorDAGNode.__init__(self, job)
6078 pipeline.AnalysisNode.__init__(self)
6083 self.add_macro(
"macroconfigfile", configfile)
6088 Job for creating the collated results page
for all sources
6092class collateJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
6094 self, execu, univ="local", accgroup=None, accuser=None, logdir=None, rundir=None
6099 pipeline.AnalysisJob.__init__(self,
None)
6101 if accgroup !=
None:
6102 self.add_condor_cmd(
"accounting_group", accgroup)
6104 self.add_condor_cmd(
"accounting_group_user", accuser)
6106 self.add_condor_cmd(
"getenv",
"True")
6110 self.set_stdout_file(os.path.join(logdir,
"collate-$(cluster).out"))
6111 self.set_stderr_file(os.path.join(logdir,
"collate-$(cluster).err"))
6113 self.set_stdout_file(
"collate-$(cluster).out")
6114 self.set_stderr_file(
"collate-$(cluster).err")
6117 self.set_sub_file(os.path.join(rundir,
"collate.sub"))
6119 self.set_sub_file(
"collate.sub")
6121 self.add_arg(
"$(macroconfigfile)")
6124class collateNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
6126 A collateNode runs an instance of the result page collation script in a condor DAG.
6131 job = A CondorDAGJob that can run an instance of lalpulsar_knope_collate_results
6133 pipeline.CondorDAGNode.__init__(self, job)
6134 pipeline.AnalysisNode.__init__(self)
6139 self.add_macro(
"macroconfigfile", configfile)
6143class nest2posJob(pipeline.CondorDAGJob, pipeline.AnalysisJob):
6145 A merge nested sampling files job to use lalinference_nest2pos
6149 self, execu, univ="local", accgroup=None, accuser=None, logdir=None, rundir=None
6154 pipeline.AnalysisJob.__init__(self,
None)
6156 if accgroup !=
None:
6157 self.add_condor_cmd(
"accounting_group", accgroup)
6159 self.add_condor_cmd(
"accounting_group_user", accuser)
6161 self.add_condor_cmd(
"getenv",
"True")
6165 self.set_stdout_file(os.path.join(logdir,
"n2p-$(cluster).out"))
6166 self.set_stderr_file(os.path.join(logdir,
"n2p-$(cluster).err"))
6168 self.set_stdout_file(
"n2p-$(cluster).out")
6169 self.set_stderr_file(
"n2p-$(cluster).err")
6171 self.add_arg(
"--non-strict-versions")
6172 self.add_arg(
"$(macroinputfiles)")
6175 self.set_sub_file(os.path.join(rundir,
"n2p.sub"))
6177 self.set_sub_file(
"n2p.sub")
6180class nest2posNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
6182 A nest2posNode runs a instance of the lalinference_nest2pos to combine individual nested
6183 sample files in a condor DAG.
6188 job = A CondorDAGJob that can run an of the nested sample combination script
6190 pipeline.CondorDAGNode.__init__(self, job)
6191 pipeline.AnalysisNode.__init__(self)
6206 fe = os.path.splitext(nestfiles[0])[-1].lower()
6208 if fe !=
".hdf" and fe !=
".h5":
6209 header = nestfiles[0].rstrip(
".gz") +
"_params.txt"
6211 self.add_var_opt(
"headers", header)
6212 self.add_macro(
"macroinputfiles",
" ".join(nestfiles))
6216 self.add_var_opt(
"Nlive", nestlive)
6221 self.add_var_opt(
"pos", outfile)
6226 self.add_var_opt(
"npos", npos)
6230 self.add_var_opt(
"gzip",
"")
6237 A job to create an individual pulsar results page
6244 pipeline.AnalysisJob.__init__(self,
None)
6246 self.add_condor_cmd(
"getenv",
"True")
6247 self.add_condor_cmd(
"accounting_group", accgroup)
6248 self.add_condor_cmd(
"accounting_group_user", accuser)
6250 self.set_stdout_file(logpath +
"/create_results_page-$(cluster).out")
6251 self.set_stderr_file(logpath +
"/create_results_page-$(cluster).err")
6252 self.set_sub_file(
"create_results_page.sub")
6255 self.add_arg(
"$(macrom)")
6256 self.add_arg(
"$(macrobk)")
6257 self.add_arg(
"$(macroi)")
6258 self.add_arg(
"$(macrof)")
6259 self.add_arg(
"$(macrow)")
6260 self.add_arg(
"$(macros)")
6265 A createresultspage node to run as part of a condor DAG.
6270 job = A CondorDAGJob that can run the segment list finding script
6272 pipeline.CondorDAGNode.__init__(self, job)
6273 pipeline.AnalysisNode.__init__(self)
6290 self.add_var_opt(
"o", val, short=
True)
6295 self.add_var_opt(
"M",
"", short=
True)
6302 macroval =
"%s-m %s " % (macroval, f)
6304 self.add_macro(
"macrom", macroval)
6305 self.add_macro(
"macrof",
"")
6310 self.add_var_opt(
"nested",
"")
6317 macroval =
"%s-f %s " % (macroval, f)
6319 self.add_macro(
"macrof", macroval)
6320 self.add_macro(
"macrom",
"")
6325 self.add_var_opt(
"p", val, short=
True)
6332 macroval =
"%s-b %s " % (macroval, f)
6334 self.add_macro(
"macrobk", macroval)
6339 self.add_var_opt(
"r", val, short=
True)
6346 macroval =
"%s-i %s " % (macroval, f)
6348 self.add_macro(
"macroi", macroval)
6353 self.add_var_opt(
"n", val, short=
True)
6358 self.add_var_opt(
"e",
"", short=
True)
6364 self.add_macro(
"macros",
"--sw-inj")
6366 self.add_macro(
"macros",
"")
6371 self.add_macro(
"macrow",
"--hw-inj")
6373 self.add_macro(
"macrow",
"")
6378 A job to collate all the individual pulsar results pages
6385 pipeline.AnalysisJob.__init__(self,
None)
6387 self.add_condor_cmd(
"getenv",
"True")
6388 self.add_condor_cmd(
"accounting_group", accgroup)
6389 self.add_condor_cmd(
"accounting_group_user", accuser)
6391 self.set_stdout_file(logpath +
"/collate_results-$(cluster).out")
6392 self.set_stderr_file(logpath +
"/collate_results-$(cluster).err")
6393 self.set_sub_file(
"collate_results.sub")
6396 self.add_arg(
"$(macroifo)")
6403 A collateresults node to run as part of a condor DAG.
6408 job = A CondorDAGJob that can run the segment list finding script
6410 pipeline.CondorDAGNode.__init__(self, job)
6411 pipeline.AnalysisNode.__init__(self)
6429 self.add_var_opt(
"o", val, short=
True)
6434 self.add_var_opt(
"z", val, short=
True)
6439 self.add_var_opt(
"p", val, short=
True)
6444 self.add_var_opt(
"l",
"", short=
True)
6449 self.add_var_opt(
"s", val, short=
True)
6456 macroval =
"%s-i %s " % (macroval, f)
6458 self.add_macro(
"macroifo", macroval)
6465 macroval =
"%s-u %s " % (macroval, f)
6467 self.add_macro(
"macrou", macroval)
6474 macroval =
"%s-n %s " % (macroval, f)
6476 self.add_macro(
"macron", macroval)
6481 self.add_var_opt(
"k",
"", short=
True)
6486 self.add_var_opt(
"t",
"", short=
True)
6491 self.add_var_opt(
"w",
"", short=
True)
6496 self.add_var_opt(
"e",
"", short=
True)
def __init__(self, execu, univ="local", accgroup=None, accuser=None, logdir=None, rundir=None)
A collateNode runs an instance of the result page collation script in a condor DAG.
def set_config(self, configfile)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of lalpulsar_knope_collate_results
A job to collate all the individual pulsar results pages.
def __init__(self, execu, logpath, accgroup, accuser)
A collateresults node to run as part of a condor DAG.
def set_outputlims(self, val)
def set_parfile(self, val)
def __init__(self, job)
job = A CondorDAGJob that can run the segment list finding script
def set_compilelatex(self)
def set_outpath(self, val)
def set_inpath(self, val)
def set_outputvals(self, val)
def set_outputulplot(self)
def set_sorttype(self, val)
A concatenation job (using "cat" and output to stdout - a new job is needed for each pulsar)
def __init__(self, subfile=None, output=None, accgroup=None, accuser=None, logdir=None)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of parameter estimation code.
def set_files(self, files)
A copy job (using "cp" to copy files)
def __init__(self, accgroup=None, accuser=None, logdir=None, rundir=None)
An instance of a copyJob in a condor DAG.
def set_source(self, sfile)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of mv.
def set_destination(self, dfile)
A job to create an individual pulsar results page.
def __init__(self, execu, logpath, accgroup, accuser)
A createresultspage node to run as part of a condor DAG.
def set_histbins(self, val)
def set_mcmcdir(self, val)
def __init__(self, job)
job = A CondorDAGJob that can run the segment list finding script
def set_bkfiles(self, val)
def set_priordir(self, val)
def set_outpath(self, val)
def set_swinj(self, isswinj)
def set_hwinj(self, ishwinj)
def set_parfile(self, val)
def set_nestedfiles(self, val)
A lalpulsar_heterodyne job to heterodyne the data.
def __init__(self, execu, univ="vanilla", accgroup=None, accuser=None, logdir=None, rundir=None, subprefix="", requestmemory=None)
A heterodyneNode runs an instance of lalpulsar_heterodyne in a condor DAG.
def set_gzip_output(self)
def set_freq_factor(self, freq_factor)
def set_ephem_earth_file(self, ephem_earth_file)
def set_seg_list(self, seg_list)
def set_pulsar(self, pulsar)
def set_scale_fac(self, scale_fac)
def set_ephem_time_file(self, ephem_time_file)
def set_resample_rate(self, resample_rate)
def set_open_loop_gain(self, open_loop_gain)
def set_output_file(self, output_file)
def set_coefficient_file(self, coefficient_file)
def set_sample_rate(self, sample_rate)
def set_manual_epoch(self, manual_epoch)
def set_filter_knee(self, filter_knee)
def set_param_file_update(self, param_file_update)
def set_channel(self, channel)
def set_data_file(self, data_file)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of lalpulsar_heterodyne
def set_response_function(self, response_function)
def set_param_file(self, param_file)
def set_sensing_function(self, sensing_function)
def set_het_flag(self, het_flag)
def set_ephem_sun_file(self, ephem_sun_file)
def set_max_data_length(self, maxdatalength)
def set_high_pass(self, high_pass)
def set_stddev_thresh(self, stddev_thresh)
splinter_modified_pulsar_dir
coarse_heterodyne_sample_rate
splinter_unmodified_pulsar_dir
coarse_heterodyne_request_memory
def find_exec_file(self, filename)
Search through the PATH environment for the first instance of the executable file "filename".
coarse_heterodyne_binary_output
def gmm_prior(self, prevpostfile, pardict, ncomps=20, taper=None, decaywidth=5.0)
Create an ND Gaussian Mixture Model for use as a prior.
splinter_nodes_unmodified
def setup_preprocessing(self)
Setup the preprocessing analysis: data finding, segment finding and heterodyne/splinter data processi...
fine_heterodyne_resample_rate
def fermidirac_rsigma(self, ul, mufrac=0.4, cdf=0.95)
Calculate the r and sigma parameter of the Fermi-Dirac distribution to be used.
pe_previous_posterior_files
fine_heterodyne_request_memory
coarse_heterodyne_gzip_output
pe_posterior_background_basedir
def setup_heterodyne(self)
Setup the coarse and fine heterodyne jobs/nodes.
pe_previous_posteriors_file
def get_ephemeris(self, psr)
Get the ephemeris file information based on the content of the psr file.
coarse_heterodyne_filter_knee
def find_data_segments(self, starttime, endtime, ifo, outfile)
Find data segments and output them to an acsii text segment file containing the start and end times o...
def setup_datafind(self)
Create and setup the data find jobs.
def mkdirs(self, path)
Helper function.
coarse_heterodyne_resample_rate
def setup_parameter_estimation(self)
Setup parameter estimation jobs/nodes for signal and background analyses.
modified_pulsars_segment_list_tmp
pe_nest2pos_background_nodes
def setup_results_pages(self)
Setup the results webpage creation.
def check_universe(self, universe)
Check Condor universe is 'local', 'vanilla' or 'standard'.
def create_prior_file(self, psr, psrdir, detectors, freqfactors, outputpath, scalefactor=25.0)
Create the prior file to use for a particular job defined by a set of detectors, or single detector,...
def setup_splinter(self)
Setup the Spectral Interpolation jobs/nodes.
def concatenate_files(self)
Create jobs to concatenate fine heterodyned/Splinter data files in cases where multiple files exist (...
pe_output_background_basedir
def get_config_option(self, section, option, cftype=None, default=None, allownone=False)
Get a value of type cftype ('string', 'int', 'float', 'boolean', 'list', 'dict' or 'dir') from the co...
coarse_heterodyne_max_data_length
pe_derive_amplitude_prior
pe_amplitude_prior_obstimes
coarse_heterodyne_channels
def __init__(self, cp, configfilename, pulsarlist=None)
Initialise with ConfigParser cp object and the filename of the config file.
fine_heterodyne_stddev_thresh
def set_datafind_nodes(self)
Add data find nodes to a dictionary of nodes.
fine_heterodyne_gzip_output
A move job (using "mv" to move files)
def __init__(self, accgroup=None, accuser=None, logdir=None, rundir=None)
An instance of a moveJob in a condor DAG.
def __init__(self, job)
job = A CondorDAGJob that can run an instance of mv.
def set_source(self, sfile)
def set_destination(self, dfile)
A merge nested sampling files job to use lalinference_nest2pos.
def __init__(self, execu, univ="local", accgroup=None, accuser=None, logdir=None, rundir=None)
A nest2posNode runs a instance of the lalinference_nest2pos to combine individual nested sample files...
def __init__(self, job)
job = A CondorDAGJob that can run an of the nested sample combination script
def set_numpos(self, npos)
def set_outfile(self, outfile)
def set_nest_live(self, nestlive)
def set_nest_files(self, nestfiles)
A parameter estimation job.
def __init__(self, execu, univ="vanilla", accgroup=None, accuser=None, logdir=None, rundir=None, requestmemory=None)
A pes runs an instance of the parameter estimation code in a condor DAG.
def set_chunk_max(self, cmax)
def set_cor_file(self, corfile)
def set_start_time(self, starttime)
def set_fake_data(self, fd)
def set_harmonics(self, h)
def set_roq_chunkmax(self, f)
def set_fake_starts(self, fs)
def set_roq_outputweights(self, f)
def set_ephem_earth(self, ee)
def set_Nmcmcinitial(self, nm)
def set_end_time(self, endtime)
def set_input_files(self, inputfiles)
def set_par_file(self, parfile)
def set_prior_file(self, pf)
def __init__(self, job, psrname="")
job = A CondorDAGJob that can run an instance of parameter estimation code.
def set_ephem_time(self, et)
def set_fake_lengths(self, fl)
def set_source_model(self)
def set_detectors(self, detectors)
def set_temperature(self, temp)
def set_truncate_time(self, trunc)
def set_prior_cell(self, pc)
def set_scale_snr(self, ssnr)
def set_inject_output(self, iout)
def set_psi_bins(self, pb)
def set_roq_ntraining(self, f)
def set_roq_uniform(self)
def set_fake_dt(self, fdt)
def set_time_bins(self, tb)
def set_veto_threshold(self, veto)
def set_roq_inputweights(self, f)
def set_ephem_sun(self, es)
def set_roq_tolerance(self, f)
def set_sample_files(self, ssf)
def set_chunk_min(self, cmin)
def set_ensemble_walk(self, f)
def set_truncate_samples(self, trunc)
def set_fake_psd(self, fp)
def set_truncate_fraction(self, trunc)
def set_gaussian_like(self)
def set_sample_nlives(self, snl)
def set_ensemble_stretch(self, f)
def set_randomise(self, f)
def set_tolerance(self, tol)
def set_outfile(self, of)
def set_inject_file(self, ifil)
def set_randomseed(self, rs)
A remove job (using "rm" to remove files)
def __init__(self, accgroup=None, accuser=None, logdir=None, rundir=None)
An instance of a removeJob in a condor DAG.
def set_files(self, files)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of rm.
def __init__(self, execu, univ="local", accgroup=None, accuser=None, logdir=None, rundir=None)
A resultpageNode runs an instance of the result page script in a condor DAG.
def set_config(self, configfile)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of lalpulsar_knope_result_page
A lalpulsar_SplInter job to process SFT data.
def __init__(self, execu, univ="vanilla", accgroup=None, accuser=None, logdir=None, rundir=None, requestmemory=None)
A splinterNode runs an instance of lalpulsar_Splinter in a condor DAG.
def set_min_seg_length(self, f)
def set_gzip_output(self)
def set_start_freq(self, f)
def set_param_dir(self, f)
def set_sft_lalcache(self, f)
def set_seg_list(self, f)
def set_ephem_dir(self, f)
def set_sft_cache(self, f)
def set_end_freq(self, f)
def __init__(self, job)
job = A CondorDAGJob that can run an instance of lalpulsar_SplInter
def set_bandwidth(self, f)
def set_output_dir(self, f)
def set_starttime(self, f)
def set_freq_factor(self, f)
def set_max_seg_length(self, f)
def set_param_file(self, f)
def set_stddev_thresh(self, f)