30Excess power offline pipeline's likelihood stage construction script.
34from __future__
import print_function
38from optparse
import OptionParser
41from configparser
import ConfigParser
44from igwn_segments
import utils
as segmentsUtils
46from lal
import pipeline
48from lalburst
import power
51__author__ =
"Kipp Cannon <kipp@gravity.phys.uwm.edu>"
53__version__ =
"$Revision$"
66 parser = OptionParser(
67 version =
"%prog CVS $Id$",
68 usage =
"%prog [options]",
69 description =
"Constructs the likelihood-ratio based coincidence stage for an excess power analysis. The input consists of one or more LAL caches listing the sqlite database trigger files, and a list of segments giving the time intervals that should be considered to be independent. The LAL caches list all trigger files together, that is injections, time slides, and zero-lag. The individual trigger files are self-describing, so the analysis codes can autodetect their type. Each segment will be analyzed using the files that intersect it: the likelihood ratios will be constructed from the injections and time-lag triggers contained in files that intersect the segment, and that data used to assign likelihoods to the injections, time-lag, and zero-lag coincs in all files that intersect the same segment."
71 parser.add_option(
"--input-cache", metavar =
"filename", action =
"append", default = [], help =
"Add the contents of this cache file to the list of files from which to draw statistics.")
72 parser.add_option(
"--round-robin-cache", metavar =
"filename", action =
"append", default = [], help =
"Add the contents of this cache file to the list of files from which to draw injection statistics in a round-robin way.")
73 parser.add_option(
"--condor-log-dir", metavar =
"path", default =
".", help =
"Set the directory for Condor log files (default = \".\").")
74 parser.add_option(
"--config-file", metavar =
"filename", default =
"power.ini", help =
"Set .ini configuration file name (default = \"power.ini\").")
75 parser.add_option(
"--distribution-segments", metavar =
"filename", help =
"Read boundaries for distribution data intervals from this segwizard format segments file (required).")
76 parser.add_option(
"-v",
"--verbose", action =
"store_true", help =
"Be verbose.")
77 options, filenames = parser.parse_args()
79 if options.distribution_segments
is None:
80 raise ValueError(
"missing required argument --distribution-segments")
81 options.distribution_segments = segmentsUtils.fromsegwizard(
file(options.distribution_segments), coltype = lal.LIGOTimeGPS)
83 options.input_cache = set([CacheEntry(line)
for filename
in options.input_cache
for line
in file(filename)])
84 options.round_robin_cache = [set(map(CacheEntry,
file(filename)))
for filename
in options.round_robin_cache]
86 return options, (filenames
or [])
100 print(
"reading %s ..." % options.config_file, file=sys.stderr)
101 config = ConfigParser()
102 config.read(options.config_file)
104 options.tag = config.get(
"pipeline",
"user_tag")
105 options.likelihood_data_cache_base = config.get(
"pipeline",
"likelihood_data_cache_base")
160power.init_job_types(config_parser)
168power.make_dag_directories(config_parser)
169dag = pipeline.CondorDAG(tempfile.mkstemp(
".log",
"power_likelihood_", options.condor_log_dir)[1])
170dag.set_dag_file(
"power_likelihood")
178input_cache_nodes = set()
179round_robin_cache_nodes = [set()
for cache
in options.round_robin_cache]
180for seg
in options.distribution_segments:
182 print(
"generating distribution measurement jobs for %s ..." % str(seg), file=sys.stderr)
183 input_cache_nodes |= power.make_burca_tailor_fragment(dag, set([entry
for entry
in options.input_cache
if entry.segmentlistdict.intersects_segment(seg)]), seg,
"LIKELIHOOD_MAIN")
184 for i, (nodes, cache)
in enumerate(zip(round_robin_cache_nodes, options.round_robin_cache)):
185 nodes |= power.make_burca_tailor_fragment(dag, set([entry
for entry
in cache
if entry.segmentlistdict.intersects_segment(seg)]), seg,
"LIKELIHOOD_RR%02d" % i)
194 print(
"generating likelihood assignment jobs for main group ...", file=sys.stderr)
195parents = reduce(
lambda a, b: a | b, round_robin_cache_nodes, input_cache_nodes)
196nodes = power.make_burca2_fragment(dag, options.input_cache, parents,
"LIKELIHOOD_MAIN")
200 parents = list(itertools.combinations(round_robin_cache_nodes, len(round_robin_cache_nodes) - 1))
202 parents = [reduce(
lambda a, b: a | b, seq)
for seq
in parents]
203 return zip(parents, [cache
for (cache,)
in itertools.combinations(round_robin_cache, 1)])
205for i, (parents, apply_to_cache)
in enumerate(
round_robin(round_robin_cache_nodes, options.round_robin_cache)):
207 print(
"generating likelihood assignment jobs for round-robin group %d ..." % i, file=sys.stderr)
208 nodes |= power.make_burca2_fragment(dag, apply_to_cache, parents | input_cache_nodes,
"LIKELIHOOD_RR%02d" % i)
217 print(
"writing dag ...", file=sys.stderr)
def get_output_cache(self)
def add_input_cache(self, cache)
def parse_config_file(options)
def round_robin(round_robin_cache_nodes, round_robin_cache)