23"""Creates DAGs to run jobs that generates SFTs"""
29from pathlib
import Path
31from lalpulsar
import (
34 FillSFTFilenameSpecStrings,
35 BuildSFTFilenameFromSpec,
38__author__ =
"Evan Goetz <evan.goetz@ligo.org>, Greg Mendell"
39__version__ = git_version.id
40__date__ = git_version.date
103 """Create SFT file name from specification"""
105 spec = SFTFilenameSpec()
107 FillSFTFilenameSpecStrings(
111 detector=channel[:2],
117 spec.pubObsRun = obs
or 0
118 spec.pubRevision = rev
or 0
119 spec.window_param = par
or 0
121 spec.SFTtimebase = Tsft
122 spec.gpsStart = gpsstart
125 return BuildSFTFilenameFromSpec(spec)
131def writeToDag(dagFID, nodeCount, startTimeThisNode, endTimeThisNode, site, args):
132 datafind = f
"datafind_{nodeCount}"
133 MakeSFTs = f
"MakeSFTs_{nodeCount}"
134 startTimeDatafind = startTimeThisNode - args.extra_datafind_time
135 endTimeDatafind = endTimeThisNode + args.extra_datafind_time
136 tagStringOut = f
"{args.tag_string}_{nodeCount}"
138 cacheFile = args.cache_file
141 args.cache_path / f
"{site}-{startTimeDatafind}-{endTimeDatafind}.cache"
145 argList.append(f
"-O {args.observing_run}")
146 if args.observing_run > 0:
147 argList.append(f
"-K {args.observing_kind}")
148 argList.append(f
"-R {args.observing_revision}")
150 argList.append(f
"-X {args.misc_desc}")
151 argList.append(f
"-f {args.filter_knee_freq}")
152 argList.append(f
"-t {args.time_baseline}")
156 argList.append(f
"-p {','.join(['.' for p in args.output_sft_path])}")
160 argList.append(f
"-C {cacheFile.name}")
161 argList.append(f
"-s {startTimeThisNode}")
162 argList.append(f
"-e {endTimeThisNode}")
163 argList.append(f
"-N {','.join(args.channel_name)}")
164 argList.append(f
"-F {args.start_freq}")
165 argList.append(f
"-B {args.band}")
166 if args.comment_field:
167 argList.append(f
"-c {args.comment_field}")
168 if ":" in args.window_type:
169 window_type, window_param = args.window_type.split(
":")
170 window_param =
float(window_param)
171 argList.append(f
"-w {window_type} -r {window_param}")
173 window_type = args.window_type
175 argList.append(f
"-w {window_type}")
176 if args.overlap_fraction:
177 argList.append(f
"-P {args.overlap_fraction}")
178 if args.allow_skipping:
179 argList.append(
"--allow-skipping TRUE")
180 argStr =
" ".join(argList)
186 sft_start = startTimeThisNode
187 sft_end = sft_start + args.time_baseline
189 while sft_end <= endTimeThisNode:
191 for idx, c
in enumerate(args.channel_name):
197 kind=args.observing_kind,
198 rev=args.observing_revision,
201 miscstr=args.misc_desc,
203 outputfiles.append(filename)
204 remap.append(f
"{filename}={args.output_sft_path[idx]/filename}")
207 if args.overlap_fraction:
208 sft_start +=
int(round((1 - args.overlap_fraction) * args.time_baseline))
210 sft_start += args.time_baseline
211 sft_end = sft_start + args.time_baseline
214 if not args.cache_file:
215 dagFID.write(f
"JOB {datafind} {Path(dagFID.name).parent / 'datafind.sub'}\n")
216 dagFID.write(f
"RETRY {datafind} 1\n")
218 f
'VARS {datafind} gpsstarttime="{startTimeDatafind}" '
219 f
'gpsendtime="{endTimeDatafind}" observatory="{site}" '
220 f
'inputdatatype="{args.input_data_type}" tagstring="{tagStringOut}"\n'
224 dagFID.write(f
"JOB {MakeSFTs} {Path(dagFID.name).parent / 'MakeSFTs.sub'}\n")
225 dagFID.write(f
"RETRY {MakeSFTs} 1\n")
227 f
'VARS {MakeSFTs} argList="{argStr}" cachefile="{cacheFile}" '
228 f
'tagstring="{tagStringOut}"\n'
230 if not args.cache_file:
231 dagFID.write(f
"PARENT {datafind} CHILD {MakeSFTs}\n")
238parser = argparse.ArgumentParser(
239 description=
"This script creates datafind.sub, MakeSFTs.sub, and a dag \
240 file that generates SFTs based on the options given.",
241 fromfile_prefix_chars=
"@",
248 help=
"For public SFTs, observing run data the SFTs are generated from, or \
249 (in the case of mock data challenge data) the observing \
250 run on which the data is most closely based",
256 choices=[
"RUN",
"AUX",
"SIM",
"DEV"],
257 help=
'For public SFTs, one of: "RUN" for production SFTs of h(t) channels; \
258 "AUX" for SFTs of non-h(t) channels; \
259 "SIM" for mock data challenge or other simulated data; or \
260 "DEV" for development/testing purposes',
264 "--observing-revision",
266 help=
"For public SFTs: revision number starts at 1, and should be incremented once \
267 SFTs have been widely distributed across clusters, advertised \
268 as being ready for use, etc. For example, if mistakes are found \
269 in the initial SFT production run after they have been published, \
270 regenerated SFTs should have a revision number of at least 2",
276 help=
"For private SFTs, miscellaneous part of the SFT \
277 description field in the filename",
281 "--analysis-start-time",
283 help=
"GPS start time of data from which to generate \
284 SFTs (optional and unused if a segment file is given)",
288 "--analysis-end-time",
290 help=
"GPS end time of data from which to generate SFTs \
291 (optional and unused if a segment file is given)",
298 help=
"filename for .dag file (should end in .dag)",
305 help=
"tag string used in names of various files unique to \
306 jobs that will run under the DAG",
313 help=
"input data type for use with the gw_data_find --type \
318 "--extra-datafind-time",
321 help=
"extra time to subtract/add from/to start/end time \
322 arguments of gw_data_find",
328 help=
"string to use with the gw_data_find --match option",
332 "--synchronize-start",
334 help=
"synchronize the start times of the SFTs so that the \
335 start times are synchronized when there are gaps in the \
340 "--filter-knee-freq",
343 help=
"high pass filter knee frequency used on time domain \
344 data before generating SFTs",
351 help=
"time baseline of SFTs (e.g., 60 or 1800 seconds)",
358 help=
"Path where to save the SFT files. Can specify multiple options, \
359 If specifying multiple options then it is required to specify the \
360 same number of output-sft-path options as the number of channels. \
361 The first listed channel will have the SFTs go into the first \
362 listed output-sft-path. Otherwise specify only one output path. \
363 If one path is specified and more than 1 channels are specified \
364 then --observing-run must be >= 1 and --observing-kind and \
365 --observing-revision must be set",
372 help=
"path to cache files that will be produced by \
373 gw_data_find (default is $PWD/cache; this directory is \
374 created if it does not exist and must agree with that \
375 given in .sub files)",
381 help=
"path and filename to frame cache file to use instead \
389 help=
"path to log, output, and error files (default \
390 is $PWD/logs; this directory is created if it does not \
391 exist and usually should be under a local file system)",
398 help=
"Name of input time-domain channel to read from frames. \
399 Can specify multiple options. The number of channels must be \
400 equal to the number of output-sft-path options given. The \
401 first listed channel will have the SFTs go to the first listed \
402 output-sft-path. Can only specify one channel when generating \
403 private SFTs (--observing-run=0)",
408 help=
"allow channels to be skipped if not in frames or too low sampling \
411parser.add_argument(
"-c",
"--comment-field", type=str, help=
"comment for SFT header")
413 "-F",
"--start-freq", type=int, default=10, help=
"start frequency of the SFTs"
416 "-B",
"--band", type=int, default=1990, help=
"frequency band of the SFTs"
422 default=
"tukey:0.001",
423 help=
'type of windowing of time-domain to do \
424 before generating SFTs, e.g. "rectangular", \
425 "hann", "tukey:<beta in [0,1], required>"; \
426 (default is "tukey:0.001", standard choice for LVK production SFTs)',
430 "--overlap-fraction",
433 help=
"overlap fraction (for use with windows; e.g., use \
434 --overlap-fraction 0.5 with --window-type hann windows)",
438 "--max-num-per-node",
441 help=
"maximum number of SFTs to generate on one node",
445 "--max-length-all-jobs",
447 help=
"maximum total amount of data to process, in seconds \
448 (optional and unused if a segment file is given)",
454 help=
"alternative file with segments to use, rather than \
462 help=
"minimum length segments to process in seconds (used \
463 only if a segment file is given)",
469 help=
"file with list of nodes on which to output SFTs",
475 help=
"path to nodes to output SFTs; the node name is \
476 appended to this path, followed by path given by the -p \
477 option; for example, if -q point to file with the list \
478 node1 node2 ... and the -Q /data/ -p /frames/S5/sfts/LHO \
479 options are given, the first output file will go into \
480 /data/node1/frames/S5/sfts/LHO; the next node in the list \
481 is used in constructing the path when the number of jobs \
482 given by the -r option reached, and so on",
486 "--output-jobs-per-node",
489 help=
"number of jobs to output per node in the list of \
490 nodes given with the -q option",
496 help=
"string specifying the gw_data_find executable, \
497 or a path to it; if not set, will use \
498 LSC_DATAFIND_PATH env variable or system default (in \
505 help=
"string specifying the lalpulsar_MakeSFTs executable, \
506 or a path to it; if not set, will use \
507 MAKESFTS_PATH env variable or system default (in that \
513 help=
"string specifying the lalpulsar_MoveSFTs executable, \
514 or a path to it; if not set, will use \
515 MOVESFTS_PATH env variable or system default (in that \
521 action=
"store_false",
522 help=
"do not validate created SFTs",
529 help=
"memory allocation in MB to request from condor for \
530 lalpulsar_MakeSFTs step",
537 help=
"disk space allocation in MB to request from condor \
538 for lalpulsar_MakeSFTs step",
542 "--accounting-group",
545 help=
"Condor tag for the production of SFTs",
549 "--accounting-group-user",
552 help=
"albert.einstein username (do not add @LIGO.ORG)",
558 def __call__(self, parser, namespace, values, option_string=None):
560 f
"Argument {self.option_strings} has been deprecated in lalpulsar_MakeSFTs"
566 "--frame-struct-type",
568 action=DeprecateAction,
569 help=
"DEPRECATED. No longer required; \
570 the frame channel type is determined automatically",
576 action=DeprecateAction,
577 help=
"DEPRECATED. No longer required; \
578 the frame channel type is determined automatically",
584 action=DeprecateAction,
585 help=
"DEPRECATED. No longer required; \
586 the detector prefix is deduced from the channel name",
592 action=DeprecateAction,
593 help=
"DEPRECATED. No longer supported",
599 action=DeprecateAction,
600 help=
"DEPRECATED. Default behaviour",
606 action=DeprecateAction,
607 help=
"DEPRECATED. No longer supported",
613 action=DeprecateAction,
614 help=
"DEPRECATED. No longer supported",
617args = parser.parse_args()
620if args.observing_run < 0:
621 raise parser.error(
"--observing-run must be >= 0")
623if args.observing_run > 0
and not args.observing_kind:
624 raise parser.error(
"--observing-run requires --observing-kind")
626if args.observing_run > 0
and not args.observing_revision:
627 raise parser.error(
"--observing-run requires --observing-revision")
629if args.observing_revision
and args.observing_revision <= 0:
630 raise parser.error(
"--observing-revision must be > 0")
632if args.observing_run > 0
and args.misc_desc:
634 f
"--observing-run={args.observing_run} incompatible with --misc-desc"
637if args.misc_desc
and not re.compile(
r"^[A-Za-z0-9]+$").match(args.misc_desc):
638 raise parser.error(
"--misc-desc may only contain A-Z, a-z, 0-9 characters")
640if args.extra_datafind_time < 0:
641 raise parser.error(
"--extra-datafind-time must be >= 0")
643if args.filter_knee_freq < 0:
644 raise parser.error(
"--filter-knee-freq must be >= 0")
646if args.time_baseline <= 0:
647 raise parser.error(
"--time-baseline must be > 0")
649if args.overlap_fraction < 0.0
or args.overlap_fraction >= 1.0:
650 raise parser.error(
"--overlap-fraction must be in the range [0,1)")
652if args.start_freq < 0.0
or args.start_freq >= 7192.0:
653 raise parser.error(
"--start-freq must be in the range [0,7192)")
655if args.band <= 0
or args.band >= 8192.0:
656 raise parser.error(
"--band must be in the range (0,8192)")
658if args.start_freq + args.band >= 8192.0:
659 raise parser.error(
"--start-freq + --band must be < 8192")
661if args.max_num_per_node <= 0:
662 raise parser.error(
"--max-num-per-node must be > 0")
665 len(args.channel_name) != len(args.output_sft_path)
666 and len(args.output_sft_path) != 1
669 "--channel-name and --output-sft-path must be the "
670 "same length or --output-sft-path must be length of 1"
673if len(args.channel_name) > 1
and args.observing_run == 0:
675 "When creating SFTs from multiple channels, public SFT naming "
676 "convention must be used: --observing-run > 0 and set "
677 "--observing-kind and --observing-revision"
681dataFindExe =
"gw_data_find"
682if args.datafind_path:
683 if args.datafind_path.is_file():
684 dataFindExe = args.datafind_path
686 dataFindExe = args.datafind_path / dataFindExe
687elif "LSC_DATAFIND_PATH" in os.environ:
688 dataFindExe = Path(
"$ENV(LSC_DATAFIND_PATH)") / dataFindExe
690 dataFindExe = Path(
"/usr/bin") / dataFindExe
692makeSFTsExe =
"lalpulsar_MakeSFTs"
693if args.makesfts_path:
694 if args.makesfts_path.is_file():
695 makeSFTsExe = args.makesfts_path
697 makeSFTsExe = args.makesfts_path / makeSFTsExe
698elif "MAKESFTS_PATH" in os.environ:
699 makeSFTsExe = Path(
"$ENV(MAKESFTS_PATH)") / makeSFTsExe
701 makeSFTsExe = Path(
"@LALSUITE_BINDIR@") / makeSFTsExe
703moveSFTsExe =
"lalpulsar_MoveSFTs"
704if args.movesfts_path:
705 if args.movesfts_path.is_file():
706 moveSFTsExe = args.movesfts_path
708 moveSFTsExe = args.movesfts_path / moveSFTsExe
709elif "MOVESFTS_PATH" in os.environ:
710 moveSFTsExe = Path(
"$ENV(MOVESFTS_PATH)") / moveSFTsExe
712 moveSFTsExe = Path(
"@LALSUITE_BINDIR@") / moveSFTsExe
716 args.log_path.mkdir()
719if not args.cache_file:
721 args.cache_path.mkdir()
728savedOutputSFTPath =
None
729if args.list_of_nodes
is not None:
730 if args.node_path
is None:
731 raise argparse.error(
"Node file list given, but no node path specified")
733 if args.output_jobs_per_node < 1:
734 raise argparse.error(
735 "Node file list given, but invalid output jobs per node specified"
738 with open(args.list_of_nodes)
as fp_nodelist:
739 for idx, line
in enumerate(fp_nodelist):
740 splitLine = line.split()
741 nodeList.append(splitLine[0])
742 if len(nodeList) < 1:
744 "No nodes found in node list file: {}".
format(args.list_of_nodes)
749 savedOutputSFTPath = args.output_sft_path
754adjustSegExtraTime =
False
755if args.segment_file
is not None:
756 if args.min_seg_length < 0:
757 raise argparse.error(
"--min-seg-length must be >= 0")
761 adjustSegExtraTime =
True
763 with open(args.segment_file)
as fp_segfile:
764 for idx, line
in enumerate(fp_segfile):
765 splitLine = line.split()
767 oneSeg.append(
int(splitLine[0]))
768 oneSeg.append(
int(splitLine[1]))
769 if (oneSeg[1] - oneSeg[0]) >= args.min_seg_length:
770 segList.append(oneSeg)
774 "No segments found in segment file: {}".
format(args.segment_file)
777 if args.analysis_start_time
is None:
778 raise argparse.error(
779 "--analysis-start-time must be specified if no segment file is \
783 if args.analysis_end_time
is None:
784 raise argparse.error(
785 "--analysis-start-time must be specified if no segment file is \
789 if args.max_length_all_jobs
is None:
790 raise argparse.error(
791 "--max-length-all-jobs must be specified if no segment file is \
796 if args.analysis_end_time > (args.analysis_start_time + args.max_length_all_jobs):
797 args.analysis_end_time = args.analysis_start_time + args.max_length_all_jobs
800 oneSeg.append(args.analysis_start_time)
801 oneSeg.append(args.analysis_end_time)
802 segList.append(oneSeg)
806site = args.channel_name[0][0]
812path_to_dag_file = args.dag_file.parent
813dag_filename = args.dag_file.name
814datafind_sub = path_to_dag_file /
"datafind.sub"
815makesfts_sub = path_to_dag_file /
"MakeSFTs.sub"
816movesfts_sub = path_to_dag_file /
"MoveSFTs.sub"
819if not args.cache_file:
820 with open(datafind_sub,
"w")
as datafindFID:
821 datafindLogFile = f
"{args.log_path}/datafind_{dag_filename}.log"
822 datafindFID.write(
"universe = vanilla\n")
823 datafindFID.write(f
"executable = {dataFindExe}\n")
824 datafindFID.write(
"arguments = ")
825 datafindFID.write(
"--observatory $(observatory) --url-type file ")
826 datafindFID.write(
"--gps-start-time $(gpsstarttime) ")
827 datafindFID.write(
"--gps-end-time $(gpsendtime) --lal-cache --gaps ")
828 datafindFID.write(f
"--type $(inputdatatype)")
829 if args.datafind_match:
830 datafindFID.write(f
" --match {args.datafind_match}\n")
832 datafindFID.write(
"\n")
833 datafindFID.write(
"getenv = *DATAFIND*\n")
834 datafindFID.write(
"request_disk = 5MB\n")
835 datafindFID.write(
"request_memory = 2000MB\n")
836 datafindFID.write(f
"accounting_group = {args.accounting_group}\n")
837 datafindFID.write(f
"accounting_group_user = {args.accounting_group_user}\n")
838 datafindFID.write(f
"log = {datafindLogFile}\n")
839 datafindFID.write(f
"error = {args.log_path}/datafind_$(tagstring).err\n")
840 datafindFID.write(f
"output = {args.cache_path}/")
841 datafindFID.write(
"$(observatory)-$(gpsstarttime)-$(gpsendtime).cache\n")
842 datafindFID.write(
"should_transfer_files = yes\n")
843 datafindFID.write(
"notification = never\n")
844 datafindFID.write(
"queue 1\n")
847with open(makesfts_sub,
"w")
as MakeSFTsFID:
848 MakeSFTsLogFile = f
"{args.log_path}/MakeSFTs_{dag_filename}.log"
849 MakeSFTsFID.write(
"universe = vanilla\n")
850 MakeSFTsFID.write(f
"executable = {makeSFTsExe}\n")
851 MakeSFTsFID.write(
"arguments = $(argList)\n")
852 MakeSFTsFID.write(f
"accounting_group = {args.accounting_group}\n")
853 MakeSFTsFID.write(f
"accounting_group_user = {args.accounting_group_user}\n")
854 MakeSFTsFID.write(f
"log = {MakeSFTsLogFile}\n")
855 MakeSFTsFID.write(f
"error = {args.log_path}/MakeSFTs_$(tagstring).err\n")
856 MakeSFTsFID.write(f
"output = {args.log_path}/MakeSFTs_$(tagstring).out\n")
857 MakeSFTsFID.write(
"notification = never\n")
858 MakeSFTsFID.write(f
"request_memory = {args.request_memory}MB\n")
859 MakeSFTsFID.write(f
"request_disk = {args.request_disk}MB\n")
860 MakeSFTsFID.write(
"RequestCpus = 1\n")
861 MakeSFTsFID.write(
"should_transfer_files = yes\n")
862 MakeSFTsFID.write(
"transfer_input_files = $(cachefile)\n")
863 if "MAKESFTS_PATH" in os.environ
and not args.makesfts_path:
864 MakeSFTsFID.write(
"getenv = MAKESFTS_PATH\n")
865 MakeSFTsFID.write(
"queue 1\n")
868with open(movesfts_sub,
"w")
as MoveSFTsFID:
869 MoveSFTsLogFile = f
"{args.log_path}/MoveSFTs_{dag_filename}.log"
870 MoveSFTsFID.write(
"universe = local\n")
871 MoveSFTsFID.write(f
"executable = {moveSFTsExe}\n")
872 MoveSFTsFID.write(
"arguments = ")
873 if not args.validate:
874 MoveSFTsFID.write(
"$(opts) ")
875 MoveSFTsFID.write(
"-s $(sourcedirectory) -c $(channels) -d $(destdirectory)\n")
876 MoveSFTsFID.write(f
"accounting_group = {args.accounting_group}\n")
877 MoveSFTsFID.write(f
"accounting_group_user = {args.accounting_group_user}\n")
878 MoveSFTsFID.write(f
"log = {MoveSFTsLogFile}\n")
879 MoveSFTsFID.write(f
"error = {args.log_path}/MoveSFTs.err\n")
880 MoveSFTsFID.write(f
"output = {args.log_path}/MoveSFTs.out\n")
881 MoveSFTsFID.write(
"notification = never\n")
882 MoveSFTsFID.write(f
"request_memory = 1GB\n")
883 MoveSFTsFID.write(f
"request_disk = 10MB\n")
884 MoveSFTsFID.write(
"RequestCpus = 1\n")
885 if "MOVESFTS_PATH" in os.environ
and not args.movesfts_path:
886 MoveSFTsFID.write(
"getenv = MOVESFTS_PATH\n")
887 MoveSFTsFID.write(
"queue 1\n")
890with open(args.dag_file,
"w")
as dagFID:
891 startTimeAllNodes =
None
892 firstSFTstartTime = 0
904 if adjustSegExtraTime
and not args.synchronize_start:
905 segStartTime = seg[0]
914 segExtraTime = (segEndTime - segStartTime) % args.time_baseline
920 if args.overlap_fraction != 0.0:
921 if (segEndTime - segStartTime) > args.time_baseline:
923 segEndTime - segStartTime - args.time_baseline
924 ) %
int((1.0 - args.overlap_fraction) * args.time_baseline)
928 segExtraStart =
int(segExtraTime / 2)
929 segExtraEnd = segExtraTime - segExtraStart
930 args.analysis_start_time = segStartTime + segExtraStart
934 if args.analysis_start_time > segEndTime:
935 args.analysis_start_time = segEndTime
939 args.analysis_end_time = segEndTime - segExtraEnd
943 if args.analysis_end_time < segStartTime:
944 args.analysis_end_time = segStartTime
949 elif args.synchronize_start:
950 segStartTime = seg[0]
955 if firstSFTstartTime == 0:
956 firstSFTstartTime = segStartTime
960 args.analysis_start_time = (
964 (segStartTime - firstSFTstartTime)
965 / ((1.0 - args.overlap_fraction) * args.time_baseline)
967 * (1.0 - args.overlap_fraction)
976 if args.analysis_start_time > segEndTime:
977 args.analysis_start_time = segEndTime
981 args.analysis_end_time = (
985 (segEndTime - args.analysis_start_time - args.time_baseline)
986 / ((1.0 - args.overlap_fraction) * args.time_baseline)
988 * (1.0 - args.overlap_fraction)
993 + args.analysis_start_time
998 if args.analysis_end_time < segStartTime:
999 args.analysis_end_time = segStartTime
1004 args.analysis_start_time = seg[0]
1005 args.analysis_end_time = seg[1]
1009 startTimeThisNode = args.analysis_start_time
1010 endTimeThisNode = args.analysis_start_time
1011 endTimeAllNodes = args.analysis_start_time
1012 while endTimeAllNodes < args.analysis_end_time:
1015 if args.overlap_fraction != 0.0:
1018 endTimeAllNodes = endTimeAllNodes + args.time_baseline
1020 endTimeAllNodes = endTimeAllNodes +
int(
1021 (1.0 - args.overlap_fraction) * args.time_baseline
1025 endTimeAllNodes = endTimeAllNodes + args.time_baseline
1026 if endTimeAllNodes <= args.analysis_end_time:
1029 numThisNode = numThisNode + 1
1030 numThisSeg = numThisSeg + 1
1031 endTimeThisNode = endTimeAllNodes
1032 if numThisNode < args.max_num_per_node:
1036 nodeCount = nodeCount + 1
1039 args.output_sft_path = (
1041 + nodeList[nodeListIndex]
1042 + savedOutputSFTPath
1044 if (nodeCount % args.output_jobs_per_node) == 0:
1045 nodeListIndex = nodeListIndex + 1
1050 startTimeAllNodes = startTimeThisNode
1061 if args.overlap_fraction != 0.0:
1063 startTimeThisNode = endTimeThisNode -
int(
1064 (args.overlap_fraction) * args.time_baseline
1068 startTimeThisNode = endTimeThisNode
1074 nodeCount = nodeCount + 1
1077 args.output_sft_path = (
1078 args.node_path + nodeList[nodeListIndex] + savedOutputSFTPath
1080 if (nodeCount % args.output_jobs_per_node) == 0:
1081 nodeListIndex = nodeListIndex + 1
1086 startTimeAllNodes = startTimeThisNode
1088 dagFID, nodeCount, startTimeThisNode, endTimeThisNode, site, args
1095 dagFID.write(f
"JOB MoveSFTs {Path(dagFID.name).parent / 'MoveSFTs.sub'}\n")
1096 dagFID.write(f
"RETRY MoveSFTs 1\n")
1097 dagFID.write(f
"VARS MoveSFTs ")
1098 if not args.validate:
1099 dagFID.write(
'opts="--no-validate" ')
1101 f
'sourcedirectory="." '
1102 f
"channels=\"{' '.join(args.channel_name)}\" "
1103 f
"destdirectory=\"{' '.join([str(p) for p in args.output_sft_path])}\"\n"
1106 f
"PARENT {' '.join([f'MakeSFTs_{n}' for n in range(1, nodeCount+1)])} CHILD MoveSFTs\n"
1112endTimeAllNodes = endTimeThisNode
1114if startTimeAllNodes
is None:
1115 raise Exception(
"The startTimeAllNodes == none; the DAG file contains no jobs!")
1117if endTimeAllNodes <= startTimeAllNodes:
1119 "The endTimeAllNodes <= startTimeAllNodes; the DAG file contains no jobs!"
1122print(startTimeAllNodes, endTimeAllNodes)
DEPRECATED OPTIONS #####.
def __call__(self, parser, namespace, values, option_string=None)
def writeToDag(dagFID, nodeCount, startTimeThisNode, endTimeThisNode, site, args)
def sft_name_from_vars(obs, gpsstart, Tsft, channel=None, kind=None, rev=None, window="unknown", par=None, miscstr=None)
Create SFT file name from specification.