Loading [MathJax]/extensions/TeX/AMSsymbols.js
LALInference 4.1.9.1-00ddc7f
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
lalinference_compute_roq_weights.py
Go to the documentation of this file.
1##python
2import numpy as np
3from math import ceil
4from optparse import OptionParser
5import os
6from scipy.linalg import solve_triangular
7import lal
8import lalsimulation
9
10#####################################################################
11
12def _block_slices(dim_size, block_size):
13 """Generator that yields slice objects for indexing into
14 sequential blocks of an array along a particular axis
15 """
16 count = 0
17 while True:
18 yield slice(count, count + block_size, 1)
19 count += block_size
20 if count > dim_size:
21 return
22
23def blockwise_dot(A, B, deltaF, max_elements=int(2**27), out=None):
24 """
25 Computes the dot product of two matrices in a block-wise fashion.
26 Only blocks of `A` with a maximum size of `max_elements` will be
27 processed simultaneously.
28 """
29
30 m, n = A.shape
31 n1, o = B.shape
32
33 if n1 != n:
34 raise ValueError('matrices are not aligned')
35
36 if A.flags.f_contiguous:
37 # prioritize processing as many columns of A as possible
38 max_cols = max(1, max_elements // m)
39 max_rows = max_elements // max_cols
40
41 else:
42 # prioritize processing as many rows of A as possible
43 max_rows = max(1, max_elements // n)
44 max_cols = max_elements // max_rows
45
46 if out is None:
47 out = np.empty((m, o), dtype=np.result_type(A, B))
48 elif out.shape != (m, o):
49 raise ValueError('output array has incorrect dimensions')
50
51 for mm in _block_slices(m, max_rows):
52 out[mm, :] = 0
53 for nn in _block_slices(n, max_cols):
54 A_block = A[mm, nn].copy() # copy to force a read
55 out[mm, :] += np.dot(A_block, B[nn, :]) * 4 * deltaF
56 del A_block
57
58 return out
59
60def ehat(j, length):
61 """Return a unit vector whose j-th component is 1"""
62 ehat = np.zeros(length)
63 ehat[j] = 1.0
64 return ehat
65
66def DEIM(basis):
67 """Calculate interpolation nodes following the Algorithm 5 in J Sci Comput
68 (2013) 57:604-637.
69
70 Parameters
71 ----------
72 basis : ndarray
73 ndarray whose i-th row is the i-th basis vector
74
75 Return
76 ------
77 nodes : array
78 interpolation nodes
79 B : ndarray
80 ndarray whose i-th row is the weight for the i-th frequency node.
81 """
82 vec_num, vec_len = basis.shape
83 # Step (1)
84 j = abs(basis[0]).argmax()
85 # Step (2)
86 UT = np.zeros(shape=(vec_num, vec_len), dtype=complex)
87 PT = np.zeros(shape=(vec_num, vec_len))
88 UT[0] = basis[0]
89 PT[0] = ehat(j, vec_len)
90 PTU = np.zeros(shape=(vec_num, vec_num), dtype=complex)
91 nodes = [j]
92 # Step (3)
93 for i in range(vec_num - 1):
94 i += 1
95 ei = basis[i]
96 # Step (4)
97 PTU[i-1][:i:] = np.array([UT[k][nodes[i-1]] for k in range(i)])
98 c = solve_triangular(
99 PTU[:i:, :i:], np.array([ei[node] for node in nodes]),
100 lower=True, check_finite=False)
101 # Step (5)
102 r = ei - c.dot(UT[:i:])
103 # Step (6)
104 j = abs(r).argmax()
105 # Step (7)
106 UT[i] = r
107 PT[i] = ehat(j, vec_len)
108 nodes.append(j)
109
110 # Calculate B
111 VT = basis
112 B = np.linalg.inv(VT.dot(PT.transpose())).dot(VT)
113
114 # ordering
115 B = np.array([B[i] for i in np.argsort(nodes)])
116 nodes.sort()
117
118 return nodes, B
119
121 selected_params, flow, fhigh, deltaF, approximant, quadratic
122):
123 """Construct frequency nodes and weights from parameter values selected by greedy algorithm.
124
125 Parameters
126 ----------
127 selected_params : ndarray
128 ndarray whose i-th row is the i-th parameter set selected by greedy
129 algorithm
130 flow : float
131 fhigh : float
132 deltaF : float
133 approximant : string
134 quadratic : bool
135 construct nodes for products of waveforms when this is True
136
137 Return
138 ------
139 f_nodes : ndarray
140 interpolation frequency nodes
141 B : ndarray
142 ndarray whose i-th roq is the weight for the i-th frequency node.
143 """
144 # generate waveforms at selected parameters
145 start_index = int(flow / deltaF)
146 def _waveform(param):
147 # FIXME: Need to be fixed when we take into account precession.
148 m1, m2, a1z, a2z = param
149 hp, _ = lalsimulation.SimInspiralChooseFDWaveform(
150 m1 * lal.MSUN_SI, m2 * lal.MSUN_SI,
151 0.0, 0.0, a1z, 0.0, 0.0, a2z,
152 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
153 deltaF, flow, fhigh, flow, None,
154 lalsimulation.GetApproximantFromString(approximant)
155 )
156 hp = hp.data.data[start_index::]
157 if quadratic:
158 return np.conj(hp) * hp
159 else:
160 return hp
161 basis = np.array([_waveform(param) for param in selected_params]).transpose()
162
163 # orthogonalize waveforms
164 basis, _ = np.linalg.qr(basis)
165 basis = basis.transpose()
166
167 # discrete empirical interpolation
168 nodes, B = DEIM(basis)
169 return np.array([flow + deltaF * node for node in nodes]), B
170
171######################################################################
172
173data_dir = './'
174# load in data from file
175
176parser = OptionParser(usage="usage: %prog [options]",
177 version="%prog")
178parser.add_option("-d", "--data", type='string',
179 action="append",
180 dest="data_file",
181 help="data file",)
182parser.add_option("-p", "--psd", type='string',
183 action="append", # optional because action defaults to "store"
184 dest="psd_file",
185 help="psd file",)
186parser.add_option("-t", "--time_prior", type=float,
187 action="store", # optional because action defaults to "store"
188 dest="dt",
189 help="width of time prior",)
190parser.add_option("-i", "--ifo", type='string',
191 action="append",
192 dest="IFOs",
193 help="list of ifos",)
194parser.add_option("-a", "--approx", type='string',
195 action="store",
196 dest="approximant",
197 help="approximant name",)
198parser.add_option("-s", "--seglen", type=float,
199 action="store",
200 dest="seglen",
201 help="segment length",)
202parser.add_option("-f", "--fLow", type=float,
203 action="store",
204 dest="fLow",
205 help="low frequency cut off",)
206parser.add_option("-u", "--fHigh", type=float,
207 action="store",
208 dest="fHigh",
209 help="high frequency cut off",)
210parser.add_option("-T", "--delta_tc", type=float,
211 action="store",
212 dest="delta_tc",
213 help="width of tc subdomain",)
214parser.add_option("-B", "--basis-set", type='string',
215 action="store",
216 dest="b_matrix_directory",
217 help="B matrix directory",)
218parser.add_option("-o", "--out", type='string',
219 action="store",
220 dest="outpath",
221 default="./",
222 help="output path",)
223
224(options, args) = parser.parse_args()
225
226basis_params = np.loadtxt(os.path.join(options.b_matrix_directory, "params.dat")).T
227flow_in_params, fhigh_in_params, deltaF_in_params = basis_params[0], basis_params[1], 1. / basis_params[2]
228
229B_linear_path = os.path.join(options.b_matrix_directory, "B_linear.npy")
230B_quadratic_path = os.path.join(options.b_matrix_directory, "B_quadratic.npy")
231fnodes_linear_path = os.path.join(options.b_matrix_directory, "fnodes_linear.npy")
232fnodes_quadratic_path = os.path.join(options.b_matrix_directory, "fnodes_quadratic.npy")
233params_linear_path = os.path.join(options.b_matrix_directory, "selected_params_linear.npy")
234params_quadratic_path = os.path.join(options.b_matrix_directory, "selected_params_quadratic.npy")
235if os.path.exists(B_linear_path) and os.path.exists(B_quadratic_path) and \
236 os.path.exists(fnodes_linear_path) and os.path.exists(fnodes_quadratic_path):
237 B_linear = np.load(B_linear_path)
238 B_quadratic = np.load(B_quadratic_path)
239 fnodes_linear = np.load(fnodes_linear_path)
240 fnodes_quadratic = np.load(fnodes_quadratic_path)
241elif os.path.exists(params_linear_path) and os.path.exists(params_quadratic_path):
242 selected_params_linear = np.load(params_linear_path)
243 selected_params_quadratic = np.load(params_quadratic_path)
244 fnodes_linear, B_linear = construct_nodes(selected_params_linear, flow_in_params, fhigh_in_params, deltaF_in_params, options.approximant, False)
245 fnodes_quadratic, B_quadratic = construct_nodes(selected_params_quadratic, flow_in_params, fhigh_in_params, deltaF_in_params, options.approximant, True)
246else:
247 print("No ROQ data found. Please make sure that you have B_(linear, quadratic).npy "
248 "and fnodes_(linear, quadratic).npy, or selected_params_(linear, quadratic).npy")
249 raise
250
251def BuildWeights(data, B, deltaF):
252
253 ''' for a data array and reduced basis compute roq weights
254
255 B: (reduced basis element)*invV (the inverse Vandermonde matrix)
256 data: data set
257 PSD: detector noise power spectral density (must be same shape as data)
258 deltaF: integration element df
260 '''
261 weights = np.dot(data, B.conjugate()) * deltaF * 4.
262 #weights = np.einsum('ij,jk->ik', B, data.conjugate() * deltaF * 4)
263 return weights
264##################################
265
266relative_tc_shift = options.seglen - 2.
267
268# loop over ifos
269
270i=0
271scale_factor = 0
272
273for ifo in options.IFOs:
274
275 dat_file = np.column_stack( np.loadtxt(options.data_file[i]) )
276 data = dat_file[1] + 1j*dat_file[2]
277 fseries = dat_file[0]
278 if options.seglen:
279 deltaF = 1./options.seglen
280 else:
281 deltaF = fseries[1] - fseries[0]
282 if options.fHigh:
283 fHigh = options.fHigh
284 else:
285 fHigh = fseries[-1]
286 fHigh_index = int(fHigh / deltaF)
287
288 print('Desired fHigh is', fHigh,', actual fHigh is', fseries[fHigh_index])
289
290 if options.fLow:
291 fLow = options.fLow
292 scale_factor = flow_in_params / fLow
293
294 else:
295 fLow = flow_in_params
296 assert fHigh == fhigh_in_params
297 fLow_index = int(fLow / deltaF)
298
299 print('Desired fLow is', fLow,', actual fLow is', fseries[fLow_index])
300
301 fseries = fseries[fLow_index:fHigh_index]
302 data = data[fLow_index:fHigh_index]
303
304 psdfile = np.column_stack( np.loadtxt(options.psd_file[i]) )
305 psd = psdfile[1]
306
307 psd[-1] = psd[-1 -1 ]
308 psd = psd[fLow_index:fHigh_index]
309 data /= psd
310
311 # only get frequency components up to fHigh
312 B_linear = B_linear.T[0:(fHigh_index - fLow_index)][:].T
313 B_quadratic = B_quadratic.T[0:(fHigh_index-fLow_index)][:].T
314 print(B_linear.shape[1], B_quadratic.shape[1], len(data), len(psd))
315 assert len(data) == len(psd) == B_linear.shape[1] == B_quadratic.shape[1]
316
317
318
319 #for the dot product, it's convenient to work with transpose of B:
320 B_linear = B_linear.T
321 B_quadratic = B_quadratic.T
322
323 for k in range(len(data)):
324 if np.isnan(data[k].real):
325 data[k] = 0+0j
326
327 #0.045 comes from the diameter of the earth in light seconds: the maximum time-delay between earth-based observatories
328 tcs = np.linspace(relative_tc_shift - options.dt - 0.045, relative_tc_shift + options.dt + 0.045, ceil(2.*(options.dt+0.045) / options.delta_tc) )# array of relative time shifts to be applied to the data
329
330
331 tc_shifted_data = np.zeros([len(tcs), len(fseries)], dtype=complex) # array to be filled with data, shifted by discrete time tc
332
333 print("time steps = "+str(len(tcs)))
334 for j in range(len(tcs)):
335
336 tc = tcs[j]
337
338 #exp_2pi_i_tc = np.array(np.exp(1j*2.*np.pi*fseries*tc))
339 #data_tc = np.array(data*exp_2pi_i_tc)
340 tc_shifted_data[j] = data * np.exp(1j*2.*np.pi*fseries*tc)
341
342 #tc_shifted_data = tc_shifted_data.T#np.array(tc_shifted_data).T
343
344
345
346 #*************************************************************************** #
347 print("Computing weights for "+ifo)
348 weights_path_linear = os.path.join(options.outpath,"weights_linear_%s.dat"%ifo)
349 weights_file_linear = open(weights_path_linear, "wb")
350
351 max_block_gigabytes = 4
352 max_elements = int((max_block_gigabytes * 2 ** 30) / 8) # max number of double complex elements
353
354 weights_linear = blockwise_dot(tc_shifted_data, B_linear.conjugate(), deltaF, max_elements).T
355
356 (weights_linear).tofile(weights_file_linear)
357 weights_file_linear.close()
358
359 del tc_shifted_data
360
361 #*************************************************************************** #
362 weights_path_quadratic = os.path.join(options.outpath,"weights_quadratic_%s.dat"%ifo)
363 weights_file_quadratic = open(weights_path_quadratic, "wb")
364 weights_quadratic = (BuildWeights(1./psd, B_quadratic, deltaF).T).real
365
366 (weights_quadratic).tofile(weights_file_quadratic)
367 weights_file_quadratic.close()
368 size_file_path = os.path.join(options.outpath,"roq_sizes.dat")
369
370 #*************************************************************************** #
371
372 B_linear = B_linear.T
373 B_quadratic = B_quadratic.T
374
375 np.savetxt(size_file_path,np.array((len(tcs),B_linear.shape[0],B_quadratic.shape[0],B_linear.shape[1])),fmt='%u')
376 print("Weights have been computed for "+ifo)
377
378 i += 1
379
380 #save the fnodes as a dat file if they're not already:
381
382if scale_factor:
383 print("scale factor = %f"%scale_factor)
384 fnodes_linear /= scale_factor
385 fnodes_quadratic /= scale_factor
386
387
388fnodes_linear_path = os.path.join(options.outpath,"fnodes_linear.dat")
389fnodes_linear_file = open(fnodes_linear_path, "wb")
390
391fnodes_linear.tofile(fnodes_linear_file)
392fnodes_linear_file.close()
393
394fnodes_quadratic_path = os.path.join(options.outpath,"fnodes_quadratic.dat")
395fnodes_quadratic_file = open(fnodes_quadratic_path, "wb")
396
397fnodes_quadratic.tofile(fnodes_quadratic_file)
398fnodes_quadratic_file.close()
399
400tcs_file_path = os.path.join(options.outpath,"tcs.dat")
401tcs_file = open(tcs_file_path, "wb")
402
403tcs.tofile(tcs_file)
404tcs_file.close()
static REAL8 dot(const REAL8 v[3], const REAL8 w[3])
#define max(a, b)
def construct_nodes(selected_params, flow, fhigh, deltaF, approximant, quadratic)
Construct frequency nodes and weights from parameter values selected by greedy algorithm.
def DEIM(basis)
Calculate interpolation nodes following the Algorithm 5 in J Sci Comput (2013) 57:604-637.
def BuildWeights(data, B, deltaF)
for a data array and reduced basis compute roq weights
def ehat(j, length)
Return a unit vector whose j-th component is 1.
def blockwise_dot(A, B, deltaF, max_elements=int(2 **27), out=None)
Computes the dot product of two matrices in a block-wise fashion.